##// END OF EJS Templates
repo: avoid copying/updating a dict on every `repo.__getitem__`...
Kyle Lippincott -
r46036:4a0ccbec default
parent child Browse files
Show More
@@ -1,3526 +1,3524 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 changegroup,
34 changegroup,
35 color,
35 color,
36 commit,
36 commit,
37 context,
37 context,
38 dirstate,
38 dirstate,
39 dirstateguard,
39 dirstateguard,
40 discovery,
40 discovery,
41 encoding,
41 encoding,
42 error,
42 error,
43 exchange,
43 exchange,
44 extensions,
44 extensions,
45 filelog,
45 filelog,
46 hook,
46 hook,
47 lock as lockmod,
47 lock as lockmod,
48 match as matchmod,
48 match as matchmod,
49 mergestate as mergestatemod,
49 mergestate as mergestatemod,
50 mergeutil,
50 mergeutil,
51 namespaces,
51 namespaces,
52 narrowspec,
52 narrowspec,
53 obsolete,
53 obsolete,
54 pathutil,
54 pathutil,
55 phases,
55 phases,
56 pushkey,
56 pushkey,
57 pycompat,
57 pycompat,
58 rcutil,
58 rcutil,
59 repoview,
59 repoview,
60 requirements as requirementsmod,
60 requirements as requirementsmod,
61 revset,
61 revset,
62 revsetlang,
62 revsetlang,
63 scmutil,
63 scmutil,
64 sparse,
64 sparse,
65 store as storemod,
65 store as storemod,
66 subrepoutil,
66 subrepoutil,
67 tags as tagsmod,
67 tags as tagsmod,
68 transaction,
68 transaction,
69 txnutil,
69 txnutil,
70 util,
70 util,
71 vfs as vfsmod,
71 vfs as vfsmod,
72 )
72 )
73
73
74 from .interfaces import (
74 from .interfaces import (
75 repository,
75 repository,
76 util as interfaceutil,
76 util as interfaceutil,
77 )
77 )
78
78
79 from .utils import (
79 from .utils import (
80 hashutil,
80 hashutil,
81 procutil,
81 procutil,
82 stringutil,
82 stringutil,
83 )
83 )
84
84
85 from .revlogutils import constants as revlogconst
85 from .revlogutils import constants as revlogconst
86
86
87 release = lockmod.release
87 release = lockmod.release
88 urlerr = util.urlerr
88 urlerr = util.urlerr
89 urlreq = util.urlreq
89 urlreq = util.urlreq
90
90
91 # set of (path, vfs-location) tuples. vfs-location is:
91 # set of (path, vfs-location) tuples. vfs-location is:
92 # - 'plain for vfs relative paths
92 # - 'plain for vfs relative paths
93 # - '' for svfs relative paths
93 # - '' for svfs relative paths
94 _cachedfiles = set()
94 _cachedfiles = set()
95
95
96
96
97 class _basefilecache(scmutil.filecache):
97 class _basefilecache(scmutil.filecache):
98 """All filecache usage on repo are done for logic that should be unfiltered
98 """All filecache usage on repo are done for logic that should be unfiltered
99 """
99 """
100
100
101 def __get__(self, repo, type=None):
101 def __get__(self, repo, type=None):
102 if repo is None:
102 if repo is None:
103 return self
103 return self
104 # proxy to unfiltered __dict__ since filtered repo has no entry
104 # proxy to unfiltered __dict__ since filtered repo has no entry
105 unfi = repo.unfiltered()
105 unfi = repo.unfiltered()
106 try:
106 try:
107 return unfi.__dict__[self.sname]
107 return unfi.__dict__[self.sname]
108 except KeyError:
108 except KeyError:
109 pass
109 pass
110 return super(_basefilecache, self).__get__(unfi, type)
110 return super(_basefilecache, self).__get__(unfi, type)
111
111
112 def set(self, repo, value):
112 def set(self, repo, value):
113 return super(_basefilecache, self).set(repo.unfiltered(), value)
113 return super(_basefilecache, self).set(repo.unfiltered(), value)
114
114
115
115
116 class repofilecache(_basefilecache):
116 class repofilecache(_basefilecache):
117 """filecache for files in .hg but outside of .hg/store"""
117 """filecache for files in .hg but outside of .hg/store"""
118
118
119 def __init__(self, *paths):
119 def __init__(self, *paths):
120 super(repofilecache, self).__init__(*paths)
120 super(repofilecache, self).__init__(*paths)
121 for path in paths:
121 for path in paths:
122 _cachedfiles.add((path, b'plain'))
122 _cachedfiles.add((path, b'plain'))
123
123
124 def join(self, obj, fname):
124 def join(self, obj, fname):
125 return obj.vfs.join(fname)
125 return obj.vfs.join(fname)
126
126
127
127
128 class storecache(_basefilecache):
128 class storecache(_basefilecache):
129 """filecache for files in the store"""
129 """filecache for files in the store"""
130
130
131 def __init__(self, *paths):
131 def __init__(self, *paths):
132 super(storecache, self).__init__(*paths)
132 super(storecache, self).__init__(*paths)
133 for path in paths:
133 for path in paths:
134 _cachedfiles.add((path, b''))
134 _cachedfiles.add((path, b''))
135
135
136 def join(self, obj, fname):
136 def join(self, obj, fname):
137 return obj.sjoin(fname)
137 return obj.sjoin(fname)
138
138
139
139
140 class mixedrepostorecache(_basefilecache):
140 class mixedrepostorecache(_basefilecache):
141 """filecache for a mix files in .hg/store and outside"""
141 """filecache for a mix files in .hg/store and outside"""
142
142
143 def __init__(self, *pathsandlocations):
143 def __init__(self, *pathsandlocations):
144 # scmutil.filecache only uses the path for passing back into our
144 # scmutil.filecache only uses the path for passing back into our
145 # join(), so we can safely pass a list of paths and locations
145 # join(), so we can safely pass a list of paths and locations
146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
147 _cachedfiles.update(pathsandlocations)
147 _cachedfiles.update(pathsandlocations)
148
148
149 def join(self, obj, fnameandlocation):
149 def join(self, obj, fnameandlocation):
150 fname, location = fnameandlocation
150 fname, location = fnameandlocation
151 if location == b'plain':
151 if location == b'plain':
152 return obj.vfs.join(fname)
152 return obj.vfs.join(fname)
153 else:
153 else:
154 if location != b'':
154 if location != b'':
155 raise error.ProgrammingError(
155 raise error.ProgrammingError(
156 b'unexpected location: %s' % location
156 b'unexpected location: %s' % location
157 )
157 )
158 return obj.sjoin(fname)
158 return obj.sjoin(fname)
159
159
160
160
161 def isfilecached(repo, name):
161 def isfilecached(repo, name):
162 """check if a repo has already cached "name" filecache-ed property
162 """check if a repo has already cached "name" filecache-ed property
163
163
164 This returns (cachedobj-or-None, iscached) tuple.
164 This returns (cachedobj-or-None, iscached) tuple.
165 """
165 """
166 cacheentry = repo.unfiltered()._filecache.get(name, None)
166 cacheentry = repo.unfiltered()._filecache.get(name, None)
167 if not cacheentry:
167 if not cacheentry:
168 return None, False
168 return None, False
169 return cacheentry.obj, True
169 return cacheentry.obj, True
170
170
171
171
172 class unfilteredpropertycache(util.propertycache):
172 class unfilteredpropertycache(util.propertycache):
173 """propertycache that apply to unfiltered repo only"""
173 """propertycache that apply to unfiltered repo only"""
174
174
175 def __get__(self, repo, type=None):
175 def __get__(self, repo, type=None):
176 unfi = repo.unfiltered()
176 unfi = repo.unfiltered()
177 if unfi is repo:
177 if unfi is repo:
178 return super(unfilteredpropertycache, self).__get__(unfi)
178 return super(unfilteredpropertycache, self).__get__(unfi)
179 return getattr(unfi, self.name)
179 return getattr(unfi, self.name)
180
180
181
181
182 class filteredpropertycache(util.propertycache):
182 class filteredpropertycache(util.propertycache):
183 """propertycache that must take filtering in account"""
183 """propertycache that must take filtering in account"""
184
184
185 def cachevalue(self, obj, value):
185 def cachevalue(self, obj, value):
186 object.__setattr__(obj, self.name, value)
186 object.__setattr__(obj, self.name, value)
187
187
188
188
189 def hasunfilteredcache(repo, name):
189 def hasunfilteredcache(repo, name):
190 """check if a repo has an unfilteredpropertycache value for <name>"""
190 """check if a repo has an unfilteredpropertycache value for <name>"""
191 return name in vars(repo.unfiltered())
191 return name in vars(repo.unfiltered())
192
192
193
193
194 def unfilteredmethod(orig):
194 def unfilteredmethod(orig):
195 """decorate method that always need to be run on unfiltered version"""
195 """decorate method that always need to be run on unfiltered version"""
196
196
197 @functools.wraps(orig)
197 @functools.wraps(orig)
198 def wrapper(repo, *args, **kwargs):
198 def wrapper(repo, *args, **kwargs):
199 return orig(repo.unfiltered(), *args, **kwargs)
199 return orig(repo.unfiltered(), *args, **kwargs)
200
200
201 return wrapper
201 return wrapper
202
202
203
203
204 moderncaps = {
204 moderncaps = {
205 b'lookup',
205 b'lookup',
206 b'branchmap',
206 b'branchmap',
207 b'pushkey',
207 b'pushkey',
208 b'known',
208 b'known',
209 b'getbundle',
209 b'getbundle',
210 b'unbundle',
210 b'unbundle',
211 }
211 }
212 legacycaps = moderncaps.union({b'changegroupsubset'})
212 legacycaps = moderncaps.union({b'changegroupsubset'})
213
213
214
214
215 @interfaceutil.implementer(repository.ipeercommandexecutor)
215 @interfaceutil.implementer(repository.ipeercommandexecutor)
216 class localcommandexecutor(object):
216 class localcommandexecutor(object):
217 def __init__(self, peer):
217 def __init__(self, peer):
218 self._peer = peer
218 self._peer = peer
219 self._sent = False
219 self._sent = False
220 self._closed = False
220 self._closed = False
221
221
222 def __enter__(self):
222 def __enter__(self):
223 return self
223 return self
224
224
225 def __exit__(self, exctype, excvalue, exctb):
225 def __exit__(self, exctype, excvalue, exctb):
226 self.close()
226 self.close()
227
227
228 def callcommand(self, command, args):
228 def callcommand(self, command, args):
229 if self._sent:
229 if self._sent:
230 raise error.ProgrammingError(
230 raise error.ProgrammingError(
231 b'callcommand() cannot be used after sendcommands()'
231 b'callcommand() cannot be used after sendcommands()'
232 )
232 )
233
233
234 if self._closed:
234 if self._closed:
235 raise error.ProgrammingError(
235 raise error.ProgrammingError(
236 b'callcommand() cannot be used after close()'
236 b'callcommand() cannot be used after close()'
237 )
237 )
238
238
239 # We don't need to support anything fancy. Just call the named
239 # We don't need to support anything fancy. Just call the named
240 # method on the peer and return a resolved future.
240 # method on the peer and return a resolved future.
241 fn = getattr(self._peer, pycompat.sysstr(command))
241 fn = getattr(self._peer, pycompat.sysstr(command))
242
242
243 f = pycompat.futures.Future()
243 f = pycompat.futures.Future()
244
244
245 try:
245 try:
246 result = fn(**pycompat.strkwargs(args))
246 result = fn(**pycompat.strkwargs(args))
247 except Exception:
247 except Exception:
248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
249 else:
249 else:
250 f.set_result(result)
250 f.set_result(result)
251
251
252 return f
252 return f
253
253
254 def sendcommands(self):
254 def sendcommands(self):
255 self._sent = True
255 self._sent = True
256
256
257 def close(self):
257 def close(self):
258 self._closed = True
258 self._closed = True
259
259
260
260
261 @interfaceutil.implementer(repository.ipeercommands)
261 @interfaceutil.implementer(repository.ipeercommands)
262 class localpeer(repository.peer):
262 class localpeer(repository.peer):
263 '''peer for a local repo; reflects only the most recent API'''
263 '''peer for a local repo; reflects only the most recent API'''
264
264
265 def __init__(self, repo, caps=None):
265 def __init__(self, repo, caps=None):
266 super(localpeer, self).__init__()
266 super(localpeer, self).__init__()
267
267
268 if caps is None:
268 if caps is None:
269 caps = moderncaps.copy()
269 caps = moderncaps.copy()
270 self._repo = repo.filtered(b'served')
270 self._repo = repo.filtered(b'served')
271 self.ui = repo.ui
271 self.ui = repo.ui
272 self._caps = repo._restrictcapabilities(caps)
272 self._caps = repo._restrictcapabilities(caps)
273
273
274 # Begin of _basepeer interface.
274 # Begin of _basepeer interface.
275
275
276 def url(self):
276 def url(self):
277 return self._repo.url()
277 return self._repo.url()
278
278
279 def local(self):
279 def local(self):
280 return self._repo
280 return self._repo
281
281
282 def peer(self):
282 def peer(self):
283 return self
283 return self
284
284
285 def canpush(self):
285 def canpush(self):
286 return True
286 return True
287
287
288 def close(self):
288 def close(self):
289 self._repo.close()
289 self._repo.close()
290
290
291 # End of _basepeer interface.
291 # End of _basepeer interface.
292
292
293 # Begin of _basewirecommands interface.
293 # Begin of _basewirecommands interface.
294
294
295 def branchmap(self):
295 def branchmap(self):
296 return self._repo.branchmap()
296 return self._repo.branchmap()
297
297
298 def capabilities(self):
298 def capabilities(self):
299 return self._caps
299 return self._caps
300
300
301 def clonebundles(self):
301 def clonebundles(self):
302 return self._repo.tryread(b'clonebundles.manifest')
302 return self._repo.tryread(b'clonebundles.manifest')
303
303
304 def debugwireargs(self, one, two, three=None, four=None, five=None):
304 def debugwireargs(self, one, two, three=None, four=None, five=None):
305 """Used to test argument passing over the wire"""
305 """Used to test argument passing over the wire"""
306 return b"%s %s %s %s %s" % (
306 return b"%s %s %s %s %s" % (
307 one,
307 one,
308 two,
308 two,
309 pycompat.bytestr(three),
309 pycompat.bytestr(three),
310 pycompat.bytestr(four),
310 pycompat.bytestr(four),
311 pycompat.bytestr(five),
311 pycompat.bytestr(five),
312 )
312 )
313
313
314 def getbundle(
314 def getbundle(
315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
316 ):
316 ):
317 chunks = exchange.getbundlechunks(
317 chunks = exchange.getbundlechunks(
318 self._repo,
318 self._repo,
319 source,
319 source,
320 heads=heads,
320 heads=heads,
321 common=common,
321 common=common,
322 bundlecaps=bundlecaps,
322 bundlecaps=bundlecaps,
323 **kwargs
323 **kwargs
324 )[1]
324 )[1]
325 cb = util.chunkbuffer(chunks)
325 cb = util.chunkbuffer(chunks)
326
326
327 if exchange.bundle2requested(bundlecaps):
327 if exchange.bundle2requested(bundlecaps):
328 # When requesting a bundle2, getbundle returns a stream to make the
328 # When requesting a bundle2, getbundle returns a stream to make the
329 # wire level function happier. We need to build a proper object
329 # wire level function happier. We need to build a proper object
330 # from it in local peer.
330 # from it in local peer.
331 return bundle2.getunbundler(self.ui, cb)
331 return bundle2.getunbundler(self.ui, cb)
332 else:
332 else:
333 return changegroup.getunbundler(b'01', cb, None)
333 return changegroup.getunbundler(b'01', cb, None)
334
334
335 def heads(self):
335 def heads(self):
336 return self._repo.heads()
336 return self._repo.heads()
337
337
338 def known(self, nodes):
338 def known(self, nodes):
339 return self._repo.known(nodes)
339 return self._repo.known(nodes)
340
340
341 def listkeys(self, namespace):
341 def listkeys(self, namespace):
342 return self._repo.listkeys(namespace)
342 return self._repo.listkeys(namespace)
343
343
344 def lookup(self, key):
344 def lookup(self, key):
345 return self._repo.lookup(key)
345 return self._repo.lookup(key)
346
346
347 def pushkey(self, namespace, key, old, new):
347 def pushkey(self, namespace, key, old, new):
348 return self._repo.pushkey(namespace, key, old, new)
348 return self._repo.pushkey(namespace, key, old, new)
349
349
350 def stream_out(self):
350 def stream_out(self):
351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
352
352
353 def unbundle(self, bundle, heads, url):
353 def unbundle(self, bundle, heads, url):
354 """apply a bundle on a repo
354 """apply a bundle on a repo
355
355
356 This function handles the repo locking itself."""
356 This function handles the repo locking itself."""
357 try:
357 try:
358 try:
358 try:
359 bundle = exchange.readbundle(self.ui, bundle, None)
359 bundle = exchange.readbundle(self.ui, bundle, None)
360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
361 if util.safehasattr(ret, b'getchunks'):
361 if util.safehasattr(ret, b'getchunks'):
362 # This is a bundle20 object, turn it into an unbundler.
362 # This is a bundle20 object, turn it into an unbundler.
363 # This little dance should be dropped eventually when the
363 # This little dance should be dropped eventually when the
364 # API is finally improved.
364 # API is finally improved.
365 stream = util.chunkbuffer(ret.getchunks())
365 stream = util.chunkbuffer(ret.getchunks())
366 ret = bundle2.getunbundler(self.ui, stream)
366 ret = bundle2.getunbundler(self.ui, stream)
367 return ret
367 return ret
368 except Exception as exc:
368 except Exception as exc:
369 # If the exception contains output salvaged from a bundle2
369 # If the exception contains output salvaged from a bundle2
370 # reply, we need to make sure it is printed before continuing
370 # reply, we need to make sure it is printed before continuing
371 # to fail. So we build a bundle2 with such output and consume
371 # to fail. So we build a bundle2 with such output and consume
372 # it directly.
372 # it directly.
373 #
373 #
374 # This is not very elegant but allows a "simple" solution for
374 # This is not very elegant but allows a "simple" solution for
375 # issue4594
375 # issue4594
376 output = getattr(exc, '_bundle2salvagedoutput', ())
376 output = getattr(exc, '_bundle2salvagedoutput', ())
377 if output:
377 if output:
378 bundler = bundle2.bundle20(self._repo.ui)
378 bundler = bundle2.bundle20(self._repo.ui)
379 for out in output:
379 for out in output:
380 bundler.addpart(out)
380 bundler.addpart(out)
381 stream = util.chunkbuffer(bundler.getchunks())
381 stream = util.chunkbuffer(bundler.getchunks())
382 b = bundle2.getunbundler(self.ui, stream)
382 b = bundle2.getunbundler(self.ui, stream)
383 bundle2.processbundle(self._repo, b)
383 bundle2.processbundle(self._repo, b)
384 raise
384 raise
385 except error.PushRaced as exc:
385 except error.PushRaced as exc:
386 raise error.ResponseError(
386 raise error.ResponseError(
387 _(b'push failed:'), stringutil.forcebytestr(exc)
387 _(b'push failed:'), stringutil.forcebytestr(exc)
388 )
388 )
389
389
390 # End of _basewirecommands interface.
390 # End of _basewirecommands interface.
391
391
392 # Begin of peer interface.
392 # Begin of peer interface.
393
393
394 def commandexecutor(self):
394 def commandexecutor(self):
395 return localcommandexecutor(self)
395 return localcommandexecutor(self)
396
396
397 # End of peer interface.
397 # End of peer interface.
398
398
399
399
400 @interfaceutil.implementer(repository.ipeerlegacycommands)
400 @interfaceutil.implementer(repository.ipeerlegacycommands)
401 class locallegacypeer(localpeer):
401 class locallegacypeer(localpeer):
402 '''peer extension which implements legacy methods too; used for tests with
402 '''peer extension which implements legacy methods too; used for tests with
403 restricted capabilities'''
403 restricted capabilities'''
404
404
405 def __init__(self, repo):
405 def __init__(self, repo):
406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
407
407
408 # Begin of baselegacywirecommands interface.
408 # Begin of baselegacywirecommands interface.
409
409
410 def between(self, pairs):
410 def between(self, pairs):
411 return self._repo.between(pairs)
411 return self._repo.between(pairs)
412
412
413 def branches(self, nodes):
413 def branches(self, nodes):
414 return self._repo.branches(nodes)
414 return self._repo.branches(nodes)
415
415
416 def changegroup(self, nodes, source):
416 def changegroup(self, nodes, source):
417 outgoing = discovery.outgoing(
417 outgoing = discovery.outgoing(
418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
419 )
419 )
420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421
421
422 def changegroupsubset(self, bases, heads, source):
422 def changegroupsubset(self, bases, heads, source):
423 outgoing = discovery.outgoing(
423 outgoing = discovery.outgoing(
424 self._repo, missingroots=bases, ancestorsof=heads
424 self._repo, missingroots=bases, ancestorsof=heads
425 )
425 )
426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
427
427
428 # End of baselegacywirecommands interface.
428 # End of baselegacywirecommands interface.
429
429
430
430
431 # Functions receiving (ui, features) that extensions can register to impact
431 # Functions receiving (ui, features) that extensions can register to impact
432 # the ability to load repositories with custom requirements. Only
432 # the ability to load repositories with custom requirements. Only
433 # functions defined in loaded extensions are called.
433 # functions defined in loaded extensions are called.
434 #
434 #
435 # The function receives a set of requirement strings that the repository
435 # The function receives a set of requirement strings that the repository
436 # is capable of opening. Functions will typically add elements to the
436 # is capable of opening. Functions will typically add elements to the
437 # set to reflect that the extension knows how to handle that requirements.
437 # set to reflect that the extension knows how to handle that requirements.
438 featuresetupfuncs = set()
438 featuresetupfuncs = set()
439
439
440
440
441 def _getsharedvfs(hgvfs, requirements):
441 def _getsharedvfs(hgvfs, requirements):
442 """ returns the vfs object pointing to root of shared source
442 """ returns the vfs object pointing to root of shared source
443 repo for a shared repository
443 repo for a shared repository
444
444
445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
446 requirements is a set of requirements of current repo (shared one)
446 requirements is a set of requirements of current repo (shared one)
447 """
447 """
448 # The ``shared`` or ``relshared`` requirements indicate the
448 # The ``shared`` or ``relshared`` requirements indicate the
449 # store lives in the path contained in the ``.hg/sharedpath`` file.
449 # store lives in the path contained in the ``.hg/sharedpath`` file.
450 # This is an absolute path for ``shared`` and relative to
450 # This is an absolute path for ``shared`` and relative to
451 # ``.hg/`` for ``relshared``.
451 # ``.hg/`` for ``relshared``.
452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
454 sharedpath = hgvfs.join(sharedpath)
454 sharedpath = hgvfs.join(sharedpath)
455
455
456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
457
457
458 if not sharedvfs.exists():
458 if not sharedvfs.exists():
459 raise error.RepoError(
459 raise error.RepoError(
460 _(b'.hg/sharedpath points to nonexistent directory %s')
460 _(b'.hg/sharedpath points to nonexistent directory %s')
461 % sharedvfs.base
461 % sharedvfs.base
462 )
462 )
463 return sharedvfs
463 return sharedvfs
464
464
465
465
466 def _readrequires(vfs, allowmissing):
466 def _readrequires(vfs, allowmissing):
467 """ reads the require file present at root of this vfs
467 """ reads the require file present at root of this vfs
468 and return a set of requirements
468 and return a set of requirements
469
469
470 If allowmissing is True, we suppress ENOENT if raised"""
470 If allowmissing is True, we suppress ENOENT if raised"""
471 # requires file contains a newline-delimited list of
471 # requires file contains a newline-delimited list of
472 # features/capabilities the opener (us) must have in order to use
472 # features/capabilities the opener (us) must have in order to use
473 # the repository. This file was introduced in Mercurial 0.9.2,
473 # the repository. This file was introduced in Mercurial 0.9.2,
474 # which means very old repositories may not have one. We assume
474 # which means very old repositories may not have one. We assume
475 # a missing file translates to no requirements.
475 # a missing file translates to no requirements.
476 try:
476 try:
477 requirements = set(vfs.read(b'requires').splitlines())
477 requirements = set(vfs.read(b'requires').splitlines())
478 except IOError as e:
478 except IOError as e:
479 if not (allowmissing and e.errno == errno.ENOENT):
479 if not (allowmissing and e.errno == errno.ENOENT):
480 raise
480 raise
481 requirements = set()
481 requirements = set()
482 return requirements
482 return requirements
483
483
484
484
485 def makelocalrepository(baseui, path, intents=None):
485 def makelocalrepository(baseui, path, intents=None):
486 """Create a local repository object.
486 """Create a local repository object.
487
487
488 Given arguments needed to construct a local repository, this function
488 Given arguments needed to construct a local repository, this function
489 performs various early repository loading functionality (such as
489 performs various early repository loading functionality (such as
490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
491 the repository can be opened, derives a type suitable for representing
491 the repository can be opened, derives a type suitable for representing
492 that repository, and returns an instance of it.
492 that repository, and returns an instance of it.
493
493
494 The returned object conforms to the ``repository.completelocalrepository``
494 The returned object conforms to the ``repository.completelocalrepository``
495 interface.
495 interface.
496
496
497 The repository type is derived by calling a series of factory functions
497 The repository type is derived by calling a series of factory functions
498 for each aspect/interface of the final repository. These are defined by
498 for each aspect/interface of the final repository. These are defined by
499 ``REPO_INTERFACES``.
499 ``REPO_INTERFACES``.
500
500
501 Each factory function is called to produce a type implementing a specific
501 Each factory function is called to produce a type implementing a specific
502 interface. The cumulative list of returned types will be combined into a
502 interface. The cumulative list of returned types will be combined into a
503 new type and that type will be instantiated to represent the local
503 new type and that type will be instantiated to represent the local
504 repository.
504 repository.
505
505
506 The factory functions each receive various state that may be consulted
506 The factory functions each receive various state that may be consulted
507 as part of deriving a type.
507 as part of deriving a type.
508
508
509 Extensions should wrap these factory functions to customize repository type
509 Extensions should wrap these factory functions to customize repository type
510 creation. Note that an extension's wrapped function may be called even if
510 creation. Note that an extension's wrapped function may be called even if
511 that extension is not loaded for the repo being constructed. Extensions
511 that extension is not loaded for the repo being constructed. Extensions
512 should check if their ``__name__`` appears in the
512 should check if their ``__name__`` appears in the
513 ``extensionmodulenames`` set passed to the factory function and no-op if
513 ``extensionmodulenames`` set passed to the factory function and no-op if
514 not.
514 not.
515 """
515 """
516 ui = baseui.copy()
516 ui = baseui.copy()
517 # Prevent copying repo configuration.
517 # Prevent copying repo configuration.
518 ui.copy = baseui.copy
518 ui.copy = baseui.copy
519
519
520 # Working directory VFS rooted at repository root.
520 # Working directory VFS rooted at repository root.
521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
522
522
523 # Main VFS for .hg/ directory.
523 # Main VFS for .hg/ directory.
524 hgpath = wdirvfs.join(b'.hg')
524 hgpath = wdirvfs.join(b'.hg')
525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
526 # Whether this repository is shared one or not
526 # Whether this repository is shared one or not
527 shared = False
527 shared = False
528 # If this repository is shared, vfs pointing to shared repo
528 # If this repository is shared, vfs pointing to shared repo
529 sharedvfs = None
529 sharedvfs = None
530
530
531 # The .hg/ path should exist and should be a directory. All other
531 # The .hg/ path should exist and should be a directory. All other
532 # cases are errors.
532 # cases are errors.
533 if not hgvfs.isdir():
533 if not hgvfs.isdir():
534 try:
534 try:
535 hgvfs.stat()
535 hgvfs.stat()
536 except OSError as e:
536 except OSError as e:
537 if e.errno != errno.ENOENT:
537 if e.errno != errno.ENOENT:
538 raise
538 raise
539 except ValueError as e:
539 except ValueError as e:
540 # Can be raised on Python 3.8 when path is invalid.
540 # Can be raised on Python 3.8 when path is invalid.
541 raise error.Abort(
541 raise error.Abort(
542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
543 )
543 )
544
544
545 raise error.RepoError(_(b'repository %s not found') % path)
545 raise error.RepoError(_(b'repository %s not found') % path)
546
546
547 requirements = _readrequires(hgvfs, True)
547 requirements = _readrequires(hgvfs, True)
548
548
549 # The .hg/hgrc file may load extensions or contain config options
549 # The .hg/hgrc file may load extensions or contain config options
550 # that influence repository construction. Attempt to load it and
550 # that influence repository construction. Attempt to load it and
551 # process any new extensions that it may have pulled in.
551 # process any new extensions that it may have pulled in.
552 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
552 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
553 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
553 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
554 extensions.loadall(ui)
554 extensions.loadall(ui)
555 extensions.populateui(ui)
555 extensions.populateui(ui)
556
556
557 # Set of module names of extensions loaded for this repository.
557 # Set of module names of extensions loaded for this repository.
558 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
558 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
559
559
560 supportedrequirements = gathersupportedrequirements(ui)
560 supportedrequirements = gathersupportedrequirements(ui)
561
561
562 # We first validate the requirements are known.
562 # We first validate the requirements are known.
563 ensurerequirementsrecognized(requirements, supportedrequirements)
563 ensurerequirementsrecognized(requirements, supportedrequirements)
564
564
565 # Then we validate that the known set is reasonable to use together.
565 # Then we validate that the known set is reasonable to use together.
566 ensurerequirementscompatible(ui, requirements)
566 ensurerequirementscompatible(ui, requirements)
567
567
568 # TODO there are unhandled edge cases related to opening repositories with
568 # TODO there are unhandled edge cases related to opening repositories with
569 # shared storage. If storage is shared, we should also test for requirements
569 # shared storage. If storage is shared, we should also test for requirements
570 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
570 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
571 # that repo, as that repo may load extensions needed to open it. This is a
571 # that repo, as that repo may load extensions needed to open it. This is a
572 # bit complicated because we don't want the other hgrc to overwrite settings
572 # bit complicated because we don't want the other hgrc to overwrite settings
573 # in this hgrc.
573 # in this hgrc.
574 #
574 #
575 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
575 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
576 # file when sharing repos. But if a requirement is added after the share is
576 # file when sharing repos. But if a requirement is added after the share is
577 # performed, thereby introducing a new requirement for the opener, we may
577 # performed, thereby introducing a new requirement for the opener, we may
578 # will not see that and could encounter a run-time error interacting with
578 # will not see that and could encounter a run-time error interacting with
579 # that shared store since it has an unknown-to-us requirement.
579 # that shared store since it has an unknown-to-us requirement.
580
580
581 # At this point, we know we should be capable of opening the repository.
581 # At this point, we know we should be capable of opening the repository.
582 # Now get on with doing that.
582 # Now get on with doing that.
583
583
584 features = set()
584 features = set()
585
585
586 # The "store" part of the repository holds versioned data. How it is
586 # The "store" part of the repository holds versioned data. How it is
587 # accessed is determined by various requirements. If `shared` or
587 # accessed is determined by various requirements. If `shared` or
588 # `relshared` requirements are present, this indicates current repository
588 # `relshared` requirements are present, this indicates current repository
589 # is a share and store exists in path mentioned in `.hg/sharedpath`
589 # is a share and store exists in path mentioned in `.hg/sharedpath`
590 shared = (
590 shared = (
591 requirementsmod.SHARED_REQUIREMENT in requirements
591 requirementsmod.SHARED_REQUIREMENT in requirements
592 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
592 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
593 )
593 )
594 if shared:
594 if shared:
595 sharedvfs = _getsharedvfs(hgvfs, requirements)
595 sharedvfs = _getsharedvfs(hgvfs, requirements)
596 storebasepath = sharedvfs.base
596 storebasepath = sharedvfs.base
597 cachepath = sharedvfs.join(b'cache')
597 cachepath = sharedvfs.join(b'cache')
598 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
598 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
599 else:
599 else:
600 storebasepath = hgvfs.base
600 storebasepath = hgvfs.base
601 cachepath = hgvfs.join(b'cache')
601 cachepath = hgvfs.join(b'cache')
602 wcachepath = hgvfs.join(b'wcache')
602 wcachepath = hgvfs.join(b'wcache')
603
603
604 # The store has changed over time and the exact layout is dictated by
604 # The store has changed over time and the exact layout is dictated by
605 # requirements. The store interface abstracts differences across all
605 # requirements. The store interface abstracts differences across all
606 # of them.
606 # of them.
607 store = makestore(
607 store = makestore(
608 requirements,
608 requirements,
609 storebasepath,
609 storebasepath,
610 lambda base: vfsmod.vfs(base, cacheaudited=True),
610 lambda base: vfsmod.vfs(base, cacheaudited=True),
611 )
611 )
612 hgvfs.createmode = store.createmode
612 hgvfs.createmode = store.createmode
613
613
614 storevfs = store.vfs
614 storevfs = store.vfs
615 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
615 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
616
616
617 # The cache vfs is used to manage cache files.
617 # The cache vfs is used to manage cache files.
618 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
618 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
619 cachevfs.createmode = store.createmode
619 cachevfs.createmode = store.createmode
620 # The cache vfs is used to manage cache files related to the working copy
620 # The cache vfs is used to manage cache files related to the working copy
621 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
621 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
622 wcachevfs.createmode = store.createmode
622 wcachevfs.createmode = store.createmode
623
623
624 # Now resolve the type for the repository object. We do this by repeatedly
624 # Now resolve the type for the repository object. We do this by repeatedly
625 # calling a factory function to produces types for specific aspects of the
625 # calling a factory function to produces types for specific aspects of the
626 # repo's operation. The aggregate returned types are used as base classes
626 # repo's operation. The aggregate returned types are used as base classes
627 # for a dynamically-derived type, which will represent our new repository.
627 # for a dynamically-derived type, which will represent our new repository.
628
628
629 bases = []
629 bases = []
630 extrastate = {}
630 extrastate = {}
631
631
632 for iface, fn in REPO_INTERFACES:
632 for iface, fn in REPO_INTERFACES:
633 # We pass all potentially useful state to give extensions tons of
633 # We pass all potentially useful state to give extensions tons of
634 # flexibility.
634 # flexibility.
635 typ = fn()(
635 typ = fn()(
636 ui=ui,
636 ui=ui,
637 intents=intents,
637 intents=intents,
638 requirements=requirements,
638 requirements=requirements,
639 features=features,
639 features=features,
640 wdirvfs=wdirvfs,
640 wdirvfs=wdirvfs,
641 hgvfs=hgvfs,
641 hgvfs=hgvfs,
642 store=store,
642 store=store,
643 storevfs=storevfs,
643 storevfs=storevfs,
644 storeoptions=storevfs.options,
644 storeoptions=storevfs.options,
645 cachevfs=cachevfs,
645 cachevfs=cachevfs,
646 wcachevfs=wcachevfs,
646 wcachevfs=wcachevfs,
647 extensionmodulenames=extensionmodulenames,
647 extensionmodulenames=extensionmodulenames,
648 extrastate=extrastate,
648 extrastate=extrastate,
649 baseclasses=bases,
649 baseclasses=bases,
650 )
650 )
651
651
652 if not isinstance(typ, type):
652 if not isinstance(typ, type):
653 raise error.ProgrammingError(
653 raise error.ProgrammingError(
654 b'unable to construct type for %s' % iface
654 b'unable to construct type for %s' % iface
655 )
655 )
656
656
657 bases.append(typ)
657 bases.append(typ)
658
658
659 # type() allows you to use characters in type names that wouldn't be
659 # type() allows you to use characters in type names that wouldn't be
660 # recognized as Python symbols in source code. We abuse that to add
660 # recognized as Python symbols in source code. We abuse that to add
661 # rich information about our constructed repo.
661 # rich information about our constructed repo.
662 name = pycompat.sysstr(
662 name = pycompat.sysstr(
663 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
663 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
664 )
664 )
665
665
666 cls = type(name, tuple(bases), {})
666 cls = type(name, tuple(bases), {})
667
667
668 return cls(
668 return cls(
669 baseui=baseui,
669 baseui=baseui,
670 ui=ui,
670 ui=ui,
671 origroot=path,
671 origroot=path,
672 wdirvfs=wdirvfs,
672 wdirvfs=wdirvfs,
673 hgvfs=hgvfs,
673 hgvfs=hgvfs,
674 requirements=requirements,
674 requirements=requirements,
675 supportedrequirements=supportedrequirements,
675 supportedrequirements=supportedrequirements,
676 sharedpath=storebasepath,
676 sharedpath=storebasepath,
677 store=store,
677 store=store,
678 cachevfs=cachevfs,
678 cachevfs=cachevfs,
679 wcachevfs=wcachevfs,
679 wcachevfs=wcachevfs,
680 features=features,
680 features=features,
681 intents=intents,
681 intents=intents,
682 )
682 )
683
683
684
684
685 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
685 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
686 """Load hgrc files/content into a ui instance.
686 """Load hgrc files/content into a ui instance.
687
687
688 This is called during repository opening to load any additional
688 This is called during repository opening to load any additional
689 config files or settings relevant to the current repository.
689 config files or settings relevant to the current repository.
690
690
691 Returns a bool indicating whether any additional configs were loaded.
691 Returns a bool indicating whether any additional configs were loaded.
692
692
693 Extensions should monkeypatch this function to modify how per-repo
693 Extensions should monkeypatch this function to modify how per-repo
694 configs are loaded. For example, an extension may wish to pull in
694 configs are loaded. For example, an extension may wish to pull in
695 configs from alternate files or sources.
695 configs from alternate files or sources.
696 """
696 """
697 if not rcutil.use_repo_hgrc():
697 if not rcutil.use_repo_hgrc():
698 return False
698 return False
699 try:
699 try:
700 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
700 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
701 return True
701 return True
702 except IOError:
702 except IOError:
703 return False
703 return False
704
704
705
705
706 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
706 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
707 """Perform additional actions after .hg/hgrc is loaded.
707 """Perform additional actions after .hg/hgrc is loaded.
708
708
709 This function is called during repository loading immediately after
709 This function is called during repository loading immediately after
710 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
710 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
711
711
712 The function can be used to validate configs, automatically add
712 The function can be used to validate configs, automatically add
713 options (including extensions) based on requirements, etc.
713 options (including extensions) based on requirements, etc.
714 """
714 """
715
715
716 # Map of requirements to list of extensions to load automatically when
716 # Map of requirements to list of extensions to load automatically when
717 # requirement is present.
717 # requirement is present.
718 autoextensions = {
718 autoextensions = {
719 b'git': [b'git'],
719 b'git': [b'git'],
720 b'largefiles': [b'largefiles'],
720 b'largefiles': [b'largefiles'],
721 b'lfs': [b'lfs'],
721 b'lfs': [b'lfs'],
722 }
722 }
723
723
724 for requirement, names in sorted(autoextensions.items()):
724 for requirement, names in sorted(autoextensions.items()):
725 if requirement not in requirements:
725 if requirement not in requirements:
726 continue
726 continue
727
727
728 for name in names:
728 for name in names:
729 if not ui.hasconfig(b'extensions', name):
729 if not ui.hasconfig(b'extensions', name):
730 ui.setconfig(b'extensions', name, b'', source=b'autoload')
730 ui.setconfig(b'extensions', name, b'', source=b'autoload')
731
731
732
732
733 def gathersupportedrequirements(ui):
733 def gathersupportedrequirements(ui):
734 """Determine the complete set of recognized requirements."""
734 """Determine the complete set of recognized requirements."""
735 # Start with all requirements supported by this file.
735 # Start with all requirements supported by this file.
736 supported = set(localrepository._basesupported)
736 supported = set(localrepository._basesupported)
737
737
738 # Execute ``featuresetupfuncs`` entries if they belong to an extension
738 # Execute ``featuresetupfuncs`` entries if they belong to an extension
739 # relevant to this ui instance.
739 # relevant to this ui instance.
740 modules = {m.__name__ for n, m in extensions.extensions(ui)}
740 modules = {m.__name__ for n, m in extensions.extensions(ui)}
741
741
742 for fn in featuresetupfuncs:
742 for fn in featuresetupfuncs:
743 if fn.__module__ in modules:
743 if fn.__module__ in modules:
744 fn(ui, supported)
744 fn(ui, supported)
745
745
746 # Add derived requirements from registered compression engines.
746 # Add derived requirements from registered compression engines.
747 for name in util.compengines:
747 for name in util.compengines:
748 engine = util.compengines[name]
748 engine = util.compengines[name]
749 if engine.available() and engine.revlogheader():
749 if engine.available() and engine.revlogheader():
750 supported.add(b'exp-compression-%s' % name)
750 supported.add(b'exp-compression-%s' % name)
751 if engine.name() == b'zstd':
751 if engine.name() == b'zstd':
752 supported.add(b'revlog-compression-zstd')
752 supported.add(b'revlog-compression-zstd')
753
753
754 return supported
754 return supported
755
755
756
756
757 def ensurerequirementsrecognized(requirements, supported):
757 def ensurerequirementsrecognized(requirements, supported):
758 """Validate that a set of local requirements is recognized.
758 """Validate that a set of local requirements is recognized.
759
759
760 Receives a set of requirements. Raises an ``error.RepoError`` if there
760 Receives a set of requirements. Raises an ``error.RepoError`` if there
761 exists any requirement in that set that currently loaded code doesn't
761 exists any requirement in that set that currently loaded code doesn't
762 recognize.
762 recognize.
763
763
764 Returns a set of supported requirements.
764 Returns a set of supported requirements.
765 """
765 """
766 missing = set()
766 missing = set()
767
767
768 for requirement in requirements:
768 for requirement in requirements:
769 if requirement in supported:
769 if requirement in supported:
770 continue
770 continue
771
771
772 if not requirement or not requirement[0:1].isalnum():
772 if not requirement or not requirement[0:1].isalnum():
773 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
773 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
774
774
775 missing.add(requirement)
775 missing.add(requirement)
776
776
777 if missing:
777 if missing:
778 raise error.RequirementError(
778 raise error.RequirementError(
779 _(b'repository requires features unknown to this Mercurial: %s')
779 _(b'repository requires features unknown to this Mercurial: %s')
780 % b' '.join(sorted(missing)),
780 % b' '.join(sorted(missing)),
781 hint=_(
781 hint=_(
782 b'see https://mercurial-scm.org/wiki/MissingRequirement '
782 b'see https://mercurial-scm.org/wiki/MissingRequirement '
783 b'for more information'
783 b'for more information'
784 ),
784 ),
785 )
785 )
786
786
787
787
788 def ensurerequirementscompatible(ui, requirements):
788 def ensurerequirementscompatible(ui, requirements):
789 """Validates that a set of recognized requirements is mutually compatible.
789 """Validates that a set of recognized requirements is mutually compatible.
790
790
791 Some requirements may not be compatible with others or require
791 Some requirements may not be compatible with others or require
792 config options that aren't enabled. This function is called during
792 config options that aren't enabled. This function is called during
793 repository opening to ensure that the set of requirements needed
793 repository opening to ensure that the set of requirements needed
794 to open a repository is sane and compatible with config options.
794 to open a repository is sane and compatible with config options.
795
795
796 Extensions can monkeypatch this function to perform additional
796 Extensions can monkeypatch this function to perform additional
797 checking.
797 checking.
798
798
799 ``error.RepoError`` should be raised on failure.
799 ``error.RepoError`` should be raised on failure.
800 """
800 """
801 if (
801 if (
802 requirementsmod.SPARSE_REQUIREMENT in requirements
802 requirementsmod.SPARSE_REQUIREMENT in requirements
803 and not sparse.enabled
803 and not sparse.enabled
804 ):
804 ):
805 raise error.RepoError(
805 raise error.RepoError(
806 _(
806 _(
807 b'repository is using sparse feature but '
807 b'repository is using sparse feature but '
808 b'sparse is not enabled; enable the '
808 b'sparse is not enabled; enable the '
809 b'"sparse" extensions to access'
809 b'"sparse" extensions to access'
810 )
810 )
811 )
811 )
812
812
813
813
814 def makestore(requirements, path, vfstype):
814 def makestore(requirements, path, vfstype):
815 """Construct a storage object for a repository."""
815 """Construct a storage object for a repository."""
816 if b'store' in requirements:
816 if b'store' in requirements:
817 if b'fncache' in requirements:
817 if b'fncache' in requirements:
818 return storemod.fncachestore(
818 return storemod.fncachestore(
819 path, vfstype, b'dotencode' in requirements
819 path, vfstype, b'dotencode' in requirements
820 )
820 )
821
821
822 return storemod.encodedstore(path, vfstype)
822 return storemod.encodedstore(path, vfstype)
823
823
824 return storemod.basicstore(path, vfstype)
824 return storemod.basicstore(path, vfstype)
825
825
826
826
827 def resolvestorevfsoptions(ui, requirements, features):
827 def resolvestorevfsoptions(ui, requirements, features):
828 """Resolve the options to pass to the store vfs opener.
828 """Resolve the options to pass to the store vfs opener.
829
829
830 The returned dict is used to influence behavior of the storage layer.
830 The returned dict is used to influence behavior of the storage layer.
831 """
831 """
832 options = {}
832 options = {}
833
833
834 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
834 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
835 options[b'treemanifest'] = True
835 options[b'treemanifest'] = True
836
836
837 # experimental config: format.manifestcachesize
837 # experimental config: format.manifestcachesize
838 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
838 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
839 if manifestcachesize is not None:
839 if manifestcachesize is not None:
840 options[b'manifestcachesize'] = manifestcachesize
840 options[b'manifestcachesize'] = manifestcachesize
841
841
842 # In the absence of another requirement superseding a revlog-related
842 # In the absence of another requirement superseding a revlog-related
843 # requirement, we have to assume the repo is using revlog version 0.
843 # requirement, we have to assume the repo is using revlog version 0.
844 # This revlog format is super old and we don't bother trying to parse
844 # This revlog format is super old and we don't bother trying to parse
845 # opener options for it because those options wouldn't do anything
845 # opener options for it because those options wouldn't do anything
846 # meaningful on such old repos.
846 # meaningful on such old repos.
847 if (
847 if (
848 b'revlogv1' in requirements
848 b'revlogv1' in requirements
849 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
849 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
850 ):
850 ):
851 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
851 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
852 else: # explicitly mark repo as using revlogv0
852 else: # explicitly mark repo as using revlogv0
853 options[b'revlogv0'] = True
853 options[b'revlogv0'] = True
854
854
855 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
855 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
856 options[b'copies-storage'] = b'changeset-sidedata'
856 options[b'copies-storage'] = b'changeset-sidedata'
857 else:
857 else:
858 writecopiesto = ui.config(b'experimental', b'copies.write-to')
858 writecopiesto = ui.config(b'experimental', b'copies.write-to')
859 copiesextramode = (b'changeset-only', b'compatibility')
859 copiesextramode = (b'changeset-only', b'compatibility')
860 if writecopiesto in copiesextramode:
860 if writecopiesto in copiesextramode:
861 options[b'copies-storage'] = b'extra'
861 options[b'copies-storage'] = b'extra'
862
862
863 return options
863 return options
864
864
865
865
866 def resolverevlogstorevfsoptions(ui, requirements, features):
866 def resolverevlogstorevfsoptions(ui, requirements, features):
867 """Resolve opener options specific to revlogs."""
867 """Resolve opener options specific to revlogs."""
868
868
869 options = {}
869 options = {}
870 options[b'flagprocessors'] = {}
870 options[b'flagprocessors'] = {}
871
871
872 if b'revlogv1' in requirements:
872 if b'revlogv1' in requirements:
873 options[b'revlogv1'] = True
873 options[b'revlogv1'] = True
874 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
874 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
875 options[b'revlogv2'] = True
875 options[b'revlogv2'] = True
876
876
877 if b'generaldelta' in requirements:
877 if b'generaldelta' in requirements:
878 options[b'generaldelta'] = True
878 options[b'generaldelta'] = True
879
879
880 # experimental config: format.chunkcachesize
880 # experimental config: format.chunkcachesize
881 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
881 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
882 if chunkcachesize is not None:
882 if chunkcachesize is not None:
883 options[b'chunkcachesize'] = chunkcachesize
883 options[b'chunkcachesize'] = chunkcachesize
884
884
885 deltabothparents = ui.configbool(
885 deltabothparents = ui.configbool(
886 b'storage', b'revlog.optimize-delta-parent-choice'
886 b'storage', b'revlog.optimize-delta-parent-choice'
887 )
887 )
888 options[b'deltabothparents'] = deltabothparents
888 options[b'deltabothparents'] = deltabothparents
889
889
890 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
890 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
891 lazydeltabase = False
891 lazydeltabase = False
892 if lazydelta:
892 if lazydelta:
893 lazydeltabase = ui.configbool(
893 lazydeltabase = ui.configbool(
894 b'storage', b'revlog.reuse-external-delta-parent'
894 b'storage', b'revlog.reuse-external-delta-parent'
895 )
895 )
896 if lazydeltabase is None:
896 if lazydeltabase is None:
897 lazydeltabase = not scmutil.gddeltaconfig(ui)
897 lazydeltabase = not scmutil.gddeltaconfig(ui)
898 options[b'lazydelta'] = lazydelta
898 options[b'lazydelta'] = lazydelta
899 options[b'lazydeltabase'] = lazydeltabase
899 options[b'lazydeltabase'] = lazydeltabase
900
900
901 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
901 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
902 if 0 <= chainspan:
902 if 0 <= chainspan:
903 options[b'maxdeltachainspan'] = chainspan
903 options[b'maxdeltachainspan'] = chainspan
904
904
905 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
905 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
906 if mmapindexthreshold is not None:
906 if mmapindexthreshold is not None:
907 options[b'mmapindexthreshold'] = mmapindexthreshold
907 options[b'mmapindexthreshold'] = mmapindexthreshold
908
908
909 withsparseread = ui.configbool(b'experimental', b'sparse-read')
909 withsparseread = ui.configbool(b'experimental', b'sparse-read')
910 srdensitythres = float(
910 srdensitythres = float(
911 ui.config(b'experimental', b'sparse-read.density-threshold')
911 ui.config(b'experimental', b'sparse-read.density-threshold')
912 )
912 )
913 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
913 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
914 options[b'with-sparse-read'] = withsparseread
914 options[b'with-sparse-read'] = withsparseread
915 options[b'sparse-read-density-threshold'] = srdensitythres
915 options[b'sparse-read-density-threshold'] = srdensitythres
916 options[b'sparse-read-min-gap-size'] = srmingapsize
916 options[b'sparse-read-min-gap-size'] = srmingapsize
917
917
918 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
918 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
919 options[b'sparse-revlog'] = sparserevlog
919 options[b'sparse-revlog'] = sparserevlog
920 if sparserevlog:
920 if sparserevlog:
921 options[b'generaldelta'] = True
921 options[b'generaldelta'] = True
922
922
923 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
923 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
924 options[b'side-data'] = sidedata
924 options[b'side-data'] = sidedata
925
925
926 maxchainlen = None
926 maxchainlen = None
927 if sparserevlog:
927 if sparserevlog:
928 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
928 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
929 # experimental config: format.maxchainlen
929 # experimental config: format.maxchainlen
930 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
930 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
931 if maxchainlen is not None:
931 if maxchainlen is not None:
932 options[b'maxchainlen'] = maxchainlen
932 options[b'maxchainlen'] = maxchainlen
933
933
934 for r in requirements:
934 for r in requirements:
935 # we allow multiple compression engine requirement to co-exist because
935 # we allow multiple compression engine requirement to co-exist because
936 # strickly speaking, revlog seems to support mixed compression style.
936 # strickly speaking, revlog seems to support mixed compression style.
937 #
937 #
938 # The compression used for new entries will be "the last one"
938 # The compression used for new entries will be "the last one"
939 prefix = r.startswith
939 prefix = r.startswith
940 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
940 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
941 options[b'compengine'] = r.split(b'-', 2)[2]
941 options[b'compengine'] = r.split(b'-', 2)[2]
942
942
943 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
943 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
944 if options[b'zlib.level'] is not None:
944 if options[b'zlib.level'] is not None:
945 if not (0 <= options[b'zlib.level'] <= 9):
945 if not (0 <= options[b'zlib.level'] <= 9):
946 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
946 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
947 raise error.Abort(msg % options[b'zlib.level'])
947 raise error.Abort(msg % options[b'zlib.level'])
948 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
948 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
949 if options[b'zstd.level'] is not None:
949 if options[b'zstd.level'] is not None:
950 if not (0 <= options[b'zstd.level'] <= 22):
950 if not (0 <= options[b'zstd.level'] <= 22):
951 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
951 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
952 raise error.Abort(msg % options[b'zstd.level'])
952 raise error.Abort(msg % options[b'zstd.level'])
953
953
954 if requirementsmod.NARROW_REQUIREMENT in requirements:
954 if requirementsmod.NARROW_REQUIREMENT in requirements:
955 options[b'enableellipsis'] = True
955 options[b'enableellipsis'] = True
956
956
957 if ui.configbool(b'experimental', b'rust.index'):
957 if ui.configbool(b'experimental', b'rust.index'):
958 options[b'rust.index'] = True
958 options[b'rust.index'] = True
959 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
959 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
960 options[b'persistent-nodemap'] = True
960 options[b'persistent-nodemap'] = True
961 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
961 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
962 options[b'persistent-nodemap.mmap'] = True
962 options[b'persistent-nodemap.mmap'] = True
963 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
963 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
964 options[b'persistent-nodemap.mode'] = epnm
964 options[b'persistent-nodemap.mode'] = epnm
965 if ui.configbool(b'devel', b'persistent-nodemap'):
965 if ui.configbool(b'devel', b'persistent-nodemap'):
966 options[b'devel-force-nodemap'] = True
966 options[b'devel-force-nodemap'] = True
967
967
968 return options
968 return options
969
969
970
970
971 def makemain(**kwargs):
971 def makemain(**kwargs):
972 """Produce a type conforming to ``ilocalrepositorymain``."""
972 """Produce a type conforming to ``ilocalrepositorymain``."""
973 return localrepository
973 return localrepository
974
974
975
975
976 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
976 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
977 class revlogfilestorage(object):
977 class revlogfilestorage(object):
978 """File storage when using revlogs."""
978 """File storage when using revlogs."""
979
979
980 def file(self, path):
980 def file(self, path):
981 if path[0] == b'/':
981 if path[0] == b'/':
982 path = path[1:]
982 path = path[1:]
983
983
984 return filelog.filelog(self.svfs, path)
984 return filelog.filelog(self.svfs, path)
985
985
986
986
987 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
987 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
988 class revlognarrowfilestorage(object):
988 class revlognarrowfilestorage(object):
989 """File storage when using revlogs and narrow files."""
989 """File storage when using revlogs and narrow files."""
990
990
991 def file(self, path):
991 def file(self, path):
992 if path[0] == b'/':
992 if path[0] == b'/':
993 path = path[1:]
993 path = path[1:]
994
994
995 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
995 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
996
996
997
997
998 def makefilestorage(requirements, features, **kwargs):
998 def makefilestorage(requirements, features, **kwargs):
999 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
999 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1000 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1000 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1001 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1001 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1002
1002
1003 if requirementsmod.NARROW_REQUIREMENT in requirements:
1003 if requirementsmod.NARROW_REQUIREMENT in requirements:
1004 return revlognarrowfilestorage
1004 return revlognarrowfilestorage
1005 else:
1005 else:
1006 return revlogfilestorage
1006 return revlogfilestorage
1007
1007
1008
1008
1009 # List of repository interfaces and factory functions for them. Each
1009 # List of repository interfaces and factory functions for them. Each
1010 # will be called in order during ``makelocalrepository()`` to iteratively
1010 # will be called in order during ``makelocalrepository()`` to iteratively
1011 # derive the final type for a local repository instance. We capture the
1011 # derive the final type for a local repository instance. We capture the
1012 # function as a lambda so we don't hold a reference and the module-level
1012 # function as a lambda so we don't hold a reference and the module-level
1013 # functions can be wrapped.
1013 # functions can be wrapped.
1014 REPO_INTERFACES = [
1014 REPO_INTERFACES = [
1015 (repository.ilocalrepositorymain, lambda: makemain),
1015 (repository.ilocalrepositorymain, lambda: makemain),
1016 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1016 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1017 ]
1017 ]
1018
1018
1019
1019
1020 @interfaceutil.implementer(repository.ilocalrepositorymain)
1020 @interfaceutil.implementer(repository.ilocalrepositorymain)
1021 class localrepository(object):
1021 class localrepository(object):
1022 """Main class for representing local repositories.
1022 """Main class for representing local repositories.
1023
1023
1024 All local repositories are instances of this class.
1024 All local repositories are instances of this class.
1025
1025
1026 Constructed on its own, instances of this class are not usable as
1026 Constructed on its own, instances of this class are not usable as
1027 repository objects. To obtain a usable repository object, call
1027 repository objects. To obtain a usable repository object, call
1028 ``hg.repository()``, ``localrepo.instance()``, or
1028 ``hg.repository()``, ``localrepo.instance()``, or
1029 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1029 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1030 ``instance()`` adds support for creating new repositories.
1030 ``instance()`` adds support for creating new repositories.
1031 ``hg.repository()`` adds more extension integration, including calling
1031 ``hg.repository()`` adds more extension integration, including calling
1032 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1032 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1033 used.
1033 used.
1034 """
1034 """
1035
1035
1036 # obsolete experimental requirements:
1036 # obsolete experimental requirements:
1037 # - manifestv2: An experimental new manifest format that allowed
1037 # - manifestv2: An experimental new manifest format that allowed
1038 # for stem compression of long paths. Experiment ended up not
1038 # for stem compression of long paths. Experiment ended up not
1039 # being successful (repository sizes went up due to worse delta
1039 # being successful (repository sizes went up due to worse delta
1040 # chains), and the code was deleted in 4.6.
1040 # chains), and the code was deleted in 4.6.
1041 supportedformats = {
1041 supportedformats = {
1042 b'revlogv1',
1042 b'revlogv1',
1043 b'generaldelta',
1043 b'generaldelta',
1044 requirementsmod.TREEMANIFEST_REQUIREMENT,
1044 requirementsmod.TREEMANIFEST_REQUIREMENT,
1045 requirementsmod.COPIESSDC_REQUIREMENT,
1045 requirementsmod.COPIESSDC_REQUIREMENT,
1046 requirementsmod.REVLOGV2_REQUIREMENT,
1046 requirementsmod.REVLOGV2_REQUIREMENT,
1047 requirementsmod.SIDEDATA_REQUIREMENT,
1047 requirementsmod.SIDEDATA_REQUIREMENT,
1048 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1048 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1049 requirementsmod.NODEMAP_REQUIREMENT,
1049 requirementsmod.NODEMAP_REQUIREMENT,
1050 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1050 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1051 }
1051 }
1052 _basesupported = supportedformats | {
1052 _basesupported = supportedformats | {
1053 b'store',
1053 b'store',
1054 b'fncache',
1054 b'fncache',
1055 requirementsmod.SHARED_REQUIREMENT,
1055 requirementsmod.SHARED_REQUIREMENT,
1056 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1056 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1057 b'dotencode',
1057 b'dotencode',
1058 requirementsmod.SPARSE_REQUIREMENT,
1058 requirementsmod.SPARSE_REQUIREMENT,
1059 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1059 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1060 }
1060 }
1061
1061
1062 # list of prefix for file which can be written without 'wlock'
1062 # list of prefix for file which can be written without 'wlock'
1063 # Extensions should extend this list when needed
1063 # Extensions should extend this list when needed
1064 _wlockfreeprefix = {
1064 _wlockfreeprefix = {
1065 # We migh consider requiring 'wlock' for the next
1065 # We migh consider requiring 'wlock' for the next
1066 # two, but pretty much all the existing code assume
1066 # two, but pretty much all the existing code assume
1067 # wlock is not needed so we keep them excluded for
1067 # wlock is not needed so we keep them excluded for
1068 # now.
1068 # now.
1069 b'hgrc',
1069 b'hgrc',
1070 b'requires',
1070 b'requires',
1071 # XXX cache is a complicatged business someone
1071 # XXX cache is a complicatged business someone
1072 # should investigate this in depth at some point
1072 # should investigate this in depth at some point
1073 b'cache/',
1073 b'cache/',
1074 # XXX shouldn't be dirstate covered by the wlock?
1074 # XXX shouldn't be dirstate covered by the wlock?
1075 b'dirstate',
1075 b'dirstate',
1076 # XXX bisect was still a bit too messy at the time
1076 # XXX bisect was still a bit too messy at the time
1077 # this changeset was introduced. Someone should fix
1077 # this changeset was introduced. Someone should fix
1078 # the remainig bit and drop this line
1078 # the remainig bit and drop this line
1079 b'bisect.state',
1079 b'bisect.state',
1080 }
1080 }
1081
1081
1082 def __init__(
1082 def __init__(
1083 self,
1083 self,
1084 baseui,
1084 baseui,
1085 ui,
1085 ui,
1086 origroot,
1086 origroot,
1087 wdirvfs,
1087 wdirvfs,
1088 hgvfs,
1088 hgvfs,
1089 requirements,
1089 requirements,
1090 supportedrequirements,
1090 supportedrequirements,
1091 sharedpath,
1091 sharedpath,
1092 store,
1092 store,
1093 cachevfs,
1093 cachevfs,
1094 wcachevfs,
1094 wcachevfs,
1095 features,
1095 features,
1096 intents=None,
1096 intents=None,
1097 ):
1097 ):
1098 """Create a new local repository instance.
1098 """Create a new local repository instance.
1099
1099
1100 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1100 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1101 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1101 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1102 object.
1102 object.
1103
1103
1104 Arguments:
1104 Arguments:
1105
1105
1106 baseui
1106 baseui
1107 ``ui.ui`` instance that ``ui`` argument was based off of.
1107 ``ui.ui`` instance that ``ui`` argument was based off of.
1108
1108
1109 ui
1109 ui
1110 ``ui.ui`` instance for use by the repository.
1110 ``ui.ui`` instance for use by the repository.
1111
1111
1112 origroot
1112 origroot
1113 ``bytes`` path to working directory root of this repository.
1113 ``bytes`` path to working directory root of this repository.
1114
1114
1115 wdirvfs
1115 wdirvfs
1116 ``vfs.vfs`` rooted at the working directory.
1116 ``vfs.vfs`` rooted at the working directory.
1117
1117
1118 hgvfs
1118 hgvfs
1119 ``vfs.vfs`` rooted at .hg/
1119 ``vfs.vfs`` rooted at .hg/
1120
1120
1121 requirements
1121 requirements
1122 ``set`` of bytestrings representing repository opening requirements.
1122 ``set`` of bytestrings representing repository opening requirements.
1123
1123
1124 supportedrequirements
1124 supportedrequirements
1125 ``set`` of bytestrings representing repository requirements that we
1125 ``set`` of bytestrings representing repository requirements that we
1126 know how to open. May be a supetset of ``requirements``.
1126 know how to open. May be a supetset of ``requirements``.
1127
1127
1128 sharedpath
1128 sharedpath
1129 ``bytes`` Defining path to storage base directory. Points to a
1129 ``bytes`` Defining path to storage base directory. Points to a
1130 ``.hg/`` directory somewhere.
1130 ``.hg/`` directory somewhere.
1131
1131
1132 store
1132 store
1133 ``store.basicstore`` (or derived) instance providing access to
1133 ``store.basicstore`` (or derived) instance providing access to
1134 versioned storage.
1134 versioned storage.
1135
1135
1136 cachevfs
1136 cachevfs
1137 ``vfs.vfs`` used for cache files.
1137 ``vfs.vfs`` used for cache files.
1138
1138
1139 wcachevfs
1139 wcachevfs
1140 ``vfs.vfs`` used for cache files related to the working copy.
1140 ``vfs.vfs`` used for cache files related to the working copy.
1141
1141
1142 features
1142 features
1143 ``set`` of bytestrings defining features/capabilities of this
1143 ``set`` of bytestrings defining features/capabilities of this
1144 instance.
1144 instance.
1145
1145
1146 intents
1146 intents
1147 ``set`` of system strings indicating what this repo will be used
1147 ``set`` of system strings indicating what this repo will be used
1148 for.
1148 for.
1149 """
1149 """
1150 self.baseui = baseui
1150 self.baseui = baseui
1151 self.ui = ui
1151 self.ui = ui
1152 self.origroot = origroot
1152 self.origroot = origroot
1153 # vfs rooted at working directory.
1153 # vfs rooted at working directory.
1154 self.wvfs = wdirvfs
1154 self.wvfs = wdirvfs
1155 self.root = wdirvfs.base
1155 self.root = wdirvfs.base
1156 # vfs rooted at .hg/. Used to access most non-store paths.
1156 # vfs rooted at .hg/. Used to access most non-store paths.
1157 self.vfs = hgvfs
1157 self.vfs = hgvfs
1158 self.path = hgvfs.base
1158 self.path = hgvfs.base
1159 self.requirements = requirements
1159 self.requirements = requirements
1160 self.supported = supportedrequirements
1160 self.supported = supportedrequirements
1161 self.sharedpath = sharedpath
1161 self.sharedpath = sharedpath
1162 self.store = store
1162 self.store = store
1163 self.cachevfs = cachevfs
1163 self.cachevfs = cachevfs
1164 self.wcachevfs = wcachevfs
1164 self.wcachevfs = wcachevfs
1165 self.features = features
1165 self.features = features
1166
1166
1167 self.filtername = None
1167 self.filtername = None
1168
1168
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1170 b'devel', b'check-locks'
1170 b'devel', b'check-locks'
1171 ):
1171 ):
1172 self.vfs.audit = self._getvfsward(self.vfs.audit)
1172 self.vfs.audit = self._getvfsward(self.vfs.audit)
1173 # A list of callback to shape the phase if no data were found.
1173 # A list of callback to shape the phase if no data were found.
1174 # Callback are in the form: func(repo, roots) --> processed root.
1174 # Callback are in the form: func(repo, roots) --> processed root.
1175 # This list it to be filled by extension during repo setup
1175 # This list it to be filled by extension during repo setup
1176 self._phasedefaults = []
1176 self._phasedefaults = []
1177
1177
1178 color.setup(self.ui)
1178 color.setup(self.ui)
1179
1179
1180 self.spath = self.store.path
1180 self.spath = self.store.path
1181 self.svfs = self.store.vfs
1181 self.svfs = self.store.vfs
1182 self.sjoin = self.store.join
1182 self.sjoin = self.store.join
1183 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1183 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1184 b'devel', b'check-locks'
1184 b'devel', b'check-locks'
1185 ):
1185 ):
1186 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1186 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1187 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1187 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1188 else: # standard vfs
1188 else: # standard vfs
1189 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1189 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1190
1190
1191 self._dirstatevalidatewarned = False
1191 self._dirstatevalidatewarned = False
1192
1192
1193 self._branchcaches = branchmap.BranchMapCache()
1193 self._branchcaches = branchmap.BranchMapCache()
1194 self._revbranchcache = None
1194 self._revbranchcache = None
1195 self._filterpats = {}
1195 self._filterpats = {}
1196 self._datafilters = {}
1196 self._datafilters = {}
1197 self._transref = self._lockref = self._wlockref = None
1197 self._transref = self._lockref = self._wlockref = None
1198
1198
1199 # A cache for various files under .hg/ that tracks file changes,
1199 # A cache for various files under .hg/ that tracks file changes,
1200 # (used by the filecache decorator)
1200 # (used by the filecache decorator)
1201 #
1201 #
1202 # Maps a property name to its util.filecacheentry
1202 # Maps a property name to its util.filecacheentry
1203 self._filecache = {}
1203 self._filecache = {}
1204
1204
1205 # hold sets of revision to be filtered
1205 # hold sets of revision to be filtered
1206 # should be cleared when something might have changed the filter value:
1206 # should be cleared when something might have changed the filter value:
1207 # - new changesets,
1207 # - new changesets,
1208 # - phase change,
1208 # - phase change,
1209 # - new obsolescence marker,
1209 # - new obsolescence marker,
1210 # - working directory parent change,
1210 # - working directory parent change,
1211 # - bookmark changes
1211 # - bookmark changes
1212 self.filteredrevcache = {}
1212 self.filteredrevcache = {}
1213
1213
1214 # post-dirstate-status hooks
1214 # post-dirstate-status hooks
1215 self._postdsstatus = []
1215 self._postdsstatus = []
1216
1216
1217 # generic mapping between names and nodes
1217 # generic mapping between names and nodes
1218 self.names = namespaces.namespaces()
1218 self.names = namespaces.namespaces()
1219
1219
1220 # Key to signature value.
1220 # Key to signature value.
1221 self._sparsesignaturecache = {}
1221 self._sparsesignaturecache = {}
1222 # Signature to cached matcher instance.
1222 # Signature to cached matcher instance.
1223 self._sparsematchercache = {}
1223 self._sparsematchercache = {}
1224
1224
1225 self._extrafilterid = repoview.extrafilter(ui)
1225 self._extrafilterid = repoview.extrafilter(ui)
1226
1226
1227 self.filecopiesmode = None
1227 self.filecopiesmode = None
1228 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1228 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1229 self.filecopiesmode = b'changeset-sidedata'
1229 self.filecopiesmode = b'changeset-sidedata'
1230
1230
1231 def _getvfsward(self, origfunc):
1231 def _getvfsward(self, origfunc):
1232 """build a ward for self.vfs"""
1232 """build a ward for self.vfs"""
1233 rref = weakref.ref(self)
1233 rref = weakref.ref(self)
1234
1234
1235 def checkvfs(path, mode=None):
1235 def checkvfs(path, mode=None):
1236 ret = origfunc(path, mode=mode)
1236 ret = origfunc(path, mode=mode)
1237 repo = rref()
1237 repo = rref()
1238 if (
1238 if (
1239 repo is None
1239 repo is None
1240 or not util.safehasattr(repo, b'_wlockref')
1240 or not util.safehasattr(repo, b'_wlockref')
1241 or not util.safehasattr(repo, b'_lockref')
1241 or not util.safehasattr(repo, b'_lockref')
1242 ):
1242 ):
1243 return
1243 return
1244 if mode in (None, b'r', b'rb'):
1244 if mode in (None, b'r', b'rb'):
1245 return
1245 return
1246 if path.startswith(repo.path):
1246 if path.startswith(repo.path):
1247 # truncate name relative to the repository (.hg)
1247 # truncate name relative to the repository (.hg)
1248 path = path[len(repo.path) + 1 :]
1248 path = path[len(repo.path) + 1 :]
1249 if path.startswith(b'cache/'):
1249 if path.startswith(b'cache/'):
1250 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1250 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1251 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1251 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1252 # path prefixes covered by 'lock'
1252 # path prefixes covered by 'lock'
1253 vfs_path_prefixes = (
1253 vfs_path_prefixes = (
1254 b'journal.',
1254 b'journal.',
1255 b'undo.',
1255 b'undo.',
1256 b'strip-backup/',
1256 b'strip-backup/',
1257 b'cache/',
1257 b'cache/',
1258 )
1258 )
1259 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1259 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1260 if repo._currentlock(repo._lockref) is None:
1260 if repo._currentlock(repo._lockref) is None:
1261 repo.ui.develwarn(
1261 repo.ui.develwarn(
1262 b'write with no lock: "%s"' % path,
1262 b'write with no lock: "%s"' % path,
1263 stacklevel=3,
1263 stacklevel=3,
1264 config=b'check-locks',
1264 config=b'check-locks',
1265 )
1265 )
1266 elif repo._currentlock(repo._wlockref) is None:
1266 elif repo._currentlock(repo._wlockref) is None:
1267 # rest of vfs files are covered by 'wlock'
1267 # rest of vfs files are covered by 'wlock'
1268 #
1268 #
1269 # exclude special files
1269 # exclude special files
1270 for prefix in self._wlockfreeprefix:
1270 for prefix in self._wlockfreeprefix:
1271 if path.startswith(prefix):
1271 if path.startswith(prefix):
1272 return
1272 return
1273 repo.ui.develwarn(
1273 repo.ui.develwarn(
1274 b'write with no wlock: "%s"' % path,
1274 b'write with no wlock: "%s"' % path,
1275 stacklevel=3,
1275 stacklevel=3,
1276 config=b'check-locks',
1276 config=b'check-locks',
1277 )
1277 )
1278 return ret
1278 return ret
1279
1279
1280 return checkvfs
1280 return checkvfs
1281
1281
1282 def _getsvfsward(self, origfunc):
1282 def _getsvfsward(self, origfunc):
1283 """build a ward for self.svfs"""
1283 """build a ward for self.svfs"""
1284 rref = weakref.ref(self)
1284 rref = weakref.ref(self)
1285
1285
1286 def checksvfs(path, mode=None):
1286 def checksvfs(path, mode=None):
1287 ret = origfunc(path, mode=mode)
1287 ret = origfunc(path, mode=mode)
1288 repo = rref()
1288 repo = rref()
1289 if repo is None or not util.safehasattr(repo, b'_lockref'):
1289 if repo is None or not util.safehasattr(repo, b'_lockref'):
1290 return
1290 return
1291 if mode in (None, b'r', b'rb'):
1291 if mode in (None, b'r', b'rb'):
1292 return
1292 return
1293 if path.startswith(repo.sharedpath):
1293 if path.startswith(repo.sharedpath):
1294 # truncate name relative to the repository (.hg)
1294 # truncate name relative to the repository (.hg)
1295 path = path[len(repo.sharedpath) + 1 :]
1295 path = path[len(repo.sharedpath) + 1 :]
1296 if repo._currentlock(repo._lockref) is None:
1296 if repo._currentlock(repo._lockref) is None:
1297 repo.ui.develwarn(
1297 repo.ui.develwarn(
1298 b'write with no lock: "%s"' % path, stacklevel=4
1298 b'write with no lock: "%s"' % path, stacklevel=4
1299 )
1299 )
1300 return ret
1300 return ret
1301
1301
1302 return checksvfs
1302 return checksvfs
1303
1303
1304 def close(self):
1304 def close(self):
1305 self._writecaches()
1305 self._writecaches()
1306
1306
1307 def _writecaches(self):
1307 def _writecaches(self):
1308 if self._revbranchcache:
1308 if self._revbranchcache:
1309 self._revbranchcache.write()
1309 self._revbranchcache.write()
1310
1310
1311 def _restrictcapabilities(self, caps):
1311 def _restrictcapabilities(self, caps):
1312 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1312 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1313 caps = set(caps)
1313 caps = set(caps)
1314 capsblob = bundle2.encodecaps(
1314 capsblob = bundle2.encodecaps(
1315 bundle2.getrepocaps(self, role=b'client')
1315 bundle2.getrepocaps(self, role=b'client')
1316 )
1316 )
1317 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1317 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1318 return caps
1318 return caps
1319
1319
1320 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1320 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1321 # self -> auditor -> self._checknested -> self
1321 # self -> auditor -> self._checknested -> self
1322
1322
1323 @property
1323 @property
1324 def auditor(self):
1324 def auditor(self):
1325 # This is only used by context.workingctx.match in order to
1325 # This is only used by context.workingctx.match in order to
1326 # detect files in subrepos.
1326 # detect files in subrepos.
1327 return pathutil.pathauditor(self.root, callback=self._checknested)
1327 return pathutil.pathauditor(self.root, callback=self._checknested)
1328
1328
1329 @property
1329 @property
1330 def nofsauditor(self):
1330 def nofsauditor(self):
1331 # This is only used by context.basectx.match in order to detect
1331 # This is only used by context.basectx.match in order to detect
1332 # files in subrepos.
1332 # files in subrepos.
1333 return pathutil.pathauditor(
1333 return pathutil.pathauditor(
1334 self.root, callback=self._checknested, realfs=False, cached=True
1334 self.root, callback=self._checknested, realfs=False, cached=True
1335 )
1335 )
1336
1336
1337 def _checknested(self, path):
1337 def _checknested(self, path):
1338 """Determine if path is a legal nested repository."""
1338 """Determine if path is a legal nested repository."""
1339 if not path.startswith(self.root):
1339 if not path.startswith(self.root):
1340 return False
1340 return False
1341 subpath = path[len(self.root) + 1 :]
1341 subpath = path[len(self.root) + 1 :]
1342 normsubpath = util.pconvert(subpath)
1342 normsubpath = util.pconvert(subpath)
1343
1343
1344 # XXX: Checking against the current working copy is wrong in
1344 # XXX: Checking against the current working copy is wrong in
1345 # the sense that it can reject things like
1345 # the sense that it can reject things like
1346 #
1346 #
1347 # $ hg cat -r 10 sub/x.txt
1347 # $ hg cat -r 10 sub/x.txt
1348 #
1348 #
1349 # if sub/ is no longer a subrepository in the working copy
1349 # if sub/ is no longer a subrepository in the working copy
1350 # parent revision.
1350 # parent revision.
1351 #
1351 #
1352 # However, it can of course also allow things that would have
1352 # However, it can of course also allow things that would have
1353 # been rejected before, such as the above cat command if sub/
1353 # been rejected before, such as the above cat command if sub/
1354 # is a subrepository now, but was a normal directory before.
1354 # is a subrepository now, but was a normal directory before.
1355 # The old path auditor would have rejected by mistake since it
1355 # The old path auditor would have rejected by mistake since it
1356 # panics when it sees sub/.hg/.
1356 # panics when it sees sub/.hg/.
1357 #
1357 #
1358 # All in all, checking against the working copy seems sensible
1358 # All in all, checking against the working copy seems sensible
1359 # since we want to prevent access to nested repositories on
1359 # since we want to prevent access to nested repositories on
1360 # the filesystem *now*.
1360 # the filesystem *now*.
1361 ctx = self[None]
1361 ctx = self[None]
1362 parts = util.splitpath(subpath)
1362 parts = util.splitpath(subpath)
1363 while parts:
1363 while parts:
1364 prefix = b'/'.join(parts)
1364 prefix = b'/'.join(parts)
1365 if prefix in ctx.substate:
1365 if prefix in ctx.substate:
1366 if prefix == normsubpath:
1366 if prefix == normsubpath:
1367 return True
1367 return True
1368 else:
1368 else:
1369 sub = ctx.sub(prefix)
1369 sub = ctx.sub(prefix)
1370 return sub.checknested(subpath[len(prefix) + 1 :])
1370 return sub.checknested(subpath[len(prefix) + 1 :])
1371 else:
1371 else:
1372 parts.pop()
1372 parts.pop()
1373 return False
1373 return False
1374
1374
1375 def peer(self):
1375 def peer(self):
1376 return localpeer(self) # not cached to avoid reference cycle
1376 return localpeer(self) # not cached to avoid reference cycle
1377
1377
1378 def unfiltered(self):
1378 def unfiltered(self):
1379 """Return unfiltered version of the repository
1379 """Return unfiltered version of the repository
1380
1380
1381 Intended to be overwritten by filtered repo."""
1381 Intended to be overwritten by filtered repo."""
1382 return self
1382 return self
1383
1383
1384 def filtered(self, name, visibilityexceptions=None):
1384 def filtered(self, name, visibilityexceptions=None):
1385 """Return a filtered version of a repository
1385 """Return a filtered version of a repository
1386
1386
1387 The `name` parameter is the identifier of the requested view. This
1387 The `name` parameter is the identifier of the requested view. This
1388 will return a repoview object set "exactly" to the specified view.
1388 will return a repoview object set "exactly" to the specified view.
1389
1389
1390 This function does not apply recursive filtering to a repository. For
1390 This function does not apply recursive filtering to a repository. For
1391 example calling `repo.filtered("served")` will return a repoview using
1391 example calling `repo.filtered("served")` will return a repoview using
1392 the "served" view, regardless of the initial view used by `repo`.
1392 the "served" view, regardless of the initial view used by `repo`.
1393
1393
1394 In other word, there is always only one level of `repoview` "filtering".
1394 In other word, there is always only one level of `repoview` "filtering".
1395 """
1395 """
1396 if self._extrafilterid is not None and b'%' not in name:
1396 if self._extrafilterid is not None and b'%' not in name:
1397 name = name + b'%' + self._extrafilterid
1397 name = name + b'%' + self._extrafilterid
1398
1398
1399 cls = repoview.newtype(self.unfiltered().__class__)
1399 cls = repoview.newtype(self.unfiltered().__class__)
1400 return cls(self, name, visibilityexceptions)
1400 return cls(self, name, visibilityexceptions)
1401
1401
1402 @mixedrepostorecache(
1402 @mixedrepostorecache(
1403 (b'bookmarks', b'plain'),
1403 (b'bookmarks', b'plain'),
1404 (b'bookmarks.current', b'plain'),
1404 (b'bookmarks.current', b'plain'),
1405 (b'bookmarks', b''),
1405 (b'bookmarks', b''),
1406 (b'00changelog.i', b''),
1406 (b'00changelog.i', b''),
1407 )
1407 )
1408 def _bookmarks(self):
1408 def _bookmarks(self):
1409 # Since the multiple files involved in the transaction cannot be
1409 # Since the multiple files involved in the transaction cannot be
1410 # written atomically (with current repository format), there is a race
1410 # written atomically (with current repository format), there is a race
1411 # condition here.
1411 # condition here.
1412 #
1412 #
1413 # 1) changelog content A is read
1413 # 1) changelog content A is read
1414 # 2) outside transaction update changelog to content B
1414 # 2) outside transaction update changelog to content B
1415 # 3) outside transaction update bookmark file referring to content B
1415 # 3) outside transaction update bookmark file referring to content B
1416 # 4) bookmarks file content is read and filtered against changelog-A
1416 # 4) bookmarks file content is read and filtered against changelog-A
1417 #
1417 #
1418 # When this happens, bookmarks against nodes missing from A are dropped.
1418 # When this happens, bookmarks against nodes missing from A are dropped.
1419 #
1419 #
1420 # Having this happening during read is not great, but it become worse
1420 # Having this happening during read is not great, but it become worse
1421 # when this happen during write because the bookmarks to the "unknown"
1421 # when this happen during write because the bookmarks to the "unknown"
1422 # nodes will be dropped for good. However, writes happen within locks.
1422 # nodes will be dropped for good. However, writes happen within locks.
1423 # This locking makes it possible to have a race free consistent read.
1423 # This locking makes it possible to have a race free consistent read.
1424 # For this purpose data read from disc before locking are
1424 # For this purpose data read from disc before locking are
1425 # "invalidated" right after the locks are taken. This invalidations are
1425 # "invalidated" right after the locks are taken. This invalidations are
1426 # "light", the `filecache` mechanism keep the data in memory and will
1426 # "light", the `filecache` mechanism keep the data in memory and will
1427 # reuse them if the underlying files did not changed. Not parsing the
1427 # reuse them if the underlying files did not changed. Not parsing the
1428 # same data multiple times helps performances.
1428 # same data multiple times helps performances.
1429 #
1429 #
1430 # Unfortunately in the case describe above, the files tracked by the
1430 # Unfortunately in the case describe above, the files tracked by the
1431 # bookmarks file cache might not have changed, but the in-memory
1431 # bookmarks file cache might not have changed, but the in-memory
1432 # content is still "wrong" because we used an older changelog content
1432 # content is still "wrong" because we used an older changelog content
1433 # to process the on-disk data. So after locking, the changelog would be
1433 # to process the on-disk data. So after locking, the changelog would be
1434 # refreshed but `_bookmarks` would be preserved.
1434 # refreshed but `_bookmarks` would be preserved.
1435 # Adding `00changelog.i` to the list of tracked file is not
1435 # Adding `00changelog.i` to the list of tracked file is not
1436 # enough, because at the time we build the content for `_bookmarks` in
1436 # enough, because at the time we build the content for `_bookmarks` in
1437 # (4), the changelog file has already diverged from the content used
1437 # (4), the changelog file has already diverged from the content used
1438 # for loading `changelog` in (1)
1438 # for loading `changelog` in (1)
1439 #
1439 #
1440 # To prevent the issue, we force the changelog to be explicitly
1440 # To prevent the issue, we force the changelog to be explicitly
1441 # reloaded while computing `_bookmarks`. The data race can still happen
1441 # reloaded while computing `_bookmarks`. The data race can still happen
1442 # without the lock (with a narrower window), but it would no longer go
1442 # without the lock (with a narrower window), but it would no longer go
1443 # undetected during the lock time refresh.
1443 # undetected during the lock time refresh.
1444 #
1444 #
1445 # The new schedule is as follow
1445 # The new schedule is as follow
1446 #
1446 #
1447 # 1) filecache logic detect that `_bookmarks` needs to be computed
1447 # 1) filecache logic detect that `_bookmarks` needs to be computed
1448 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1448 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1449 # 3) We force `changelog` filecache to be tested
1449 # 3) We force `changelog` filecache to be tested
1450 # 4) cachestat for `changelog` are captured (for changelog)
1450 # 4) cachestat for `changelog` are captured (for changelog)
1451 # 5) `_bookmarks` is computed and cached
1451 # 5) `_bookmarks` is computed and cached
1452 #
1452 #
1453 # The step in (3) ensure we have a changelog at least as recent as the
1453 # The step in (3) ensure we have a changelog at least as recent as the
1454 # cache stat computed in (1). As a result at locking time:
1454 # cache stat computed in (1). As a result at locking time:
1455 # * if the changelog did not changed since (1) -> we can reuse the data
1455 # * if the changelog did not changed since (1) -> we can reuse the data
1456 # * otherwise -> the bookmarks get refreshed.
1456 # * otherwise -> the bookmarks get refreshed.
1457 self._refreshchangelog()
1457 self._refreshchangelog()
1458 return bookmarks.bmstore(self)
1458 return bookmarks.bmstore(self)
1459
1459
1460 def _refreshchangelog(self):
1460 def _refreshchangelog(self):
1461 """make sure the in memory changelog match the on-disk one"""
1461 """make sure the in memory changelog match the on-disk one"""
1462 if 'changelog' in vars(self) and self.currenttransaction() is None:
1462 if 'changelog' in vars(self) and self.currenttransaction() is None:
1463 del self.changelog
1463 del self.changelog
1464
1464
1465 @property
1465 @property
1466 def _activebookmark(self):
1466 def _activebookmark(self):
1467 return self._bookmarks.active
1467 return self._bookmarks.active
1468
1468
1469 # _phasesets depend on changelog. what we need is to call
1469 # _phasesets depend on changelog. what we need is to call
1470 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1470 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1471 # can't be easily expressed in filecache mechanism.
1471 # can't be easily expressed in filecache mechanism.
1472 @storecache(b'phaseroots', b'00changelog.i')
1472 @storecache(b'phaseroots', b'00changelog.i')
1473 def _phasecache(self):
1473 def _phasecache(self):
1474 return phases.phasecache(self, self._phasedefaults)
1474 return phases.phasecache(self, self._phasedefaults)
1475
1475
1476 @storecache(b'obsstore')
1476 @storecache(b'obsstore')
1477 def obsstore(self):
1477 def obsstore(self):
1478 return obsolete.makestore(self.ui, self)
1478 return obsolete.makestore(self.ui, self)
1479
1479
1480 @storecache(b'00changelog.i')
1480 @storecache(b'00changelog.i')
1481 def changelog(self):
1481 def changelog(self):
1482 # load dirstate before changelog to avoid race see issue6303
1482 # load dirstate before changelog to avoid race see issue6303
1483 self.dirstate.prefetch_parents()
1483 self.dirstate.prefetch_parents()
1484 return self.store.changelog(txnutil.mayhavepending(self.root))
1484 return self.store.changelog(txnutil.mayhavepending(self.root))
1485
1485
1486 @storecache(b'00manifest.i')
1486 @storecache(b'00manifest.i')
1487 def manifestlog(self):
1487 def manifestlog(self):
1488 return self.store.manifestlog(self, self._storenarrowmatch)
1488 return self.store.manifestlog(self, self._storenarrowmatch)
1489
1489
1490 @repofilecache(b'dirstate')
1490 @repofilecache(b'dirstate')
1491 def dirstate(self):
1491 def dirstate(self):
1492 return self._makedirstate()
1492 return self._makedirstate()
1493
1493
1494 def _makedirstate(self):
1494 def _makedirstate(self):
1495 """Extension point for wrapping the dirstate per-repo."""
1495 """Extension point for wrapping the dirstate per-repo."""
1496 sparsematchfn = lambda: sparse.matcher(self)
1496 sparsematchfn = lambda: sparse.matcher(self)
1497
1497
1498 return dirstate.dirstate(
1498 return dirstate.dirstate(
1499 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1499 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1500 )
1500 )
1501
1501
1502 def _dirstatevalidate(self, node):
1502 def _dirstatevalidate(self, node):
1503 try:
1503 try:
1504 self.changelog.rev(node)
1504 self.changelog.rev(node)
1505 return node
1505 return node
1506 except error.LookupError:
1506 except error.LookupError:
1507 if not self._dirstatevalidatewarned:
1507 if not self._dirstatevalidatewarned:
1508 self._dirstatevalidatewarned = True
1508 self._dirstatevalidatewarned = True
1509 self.ui.warn(
1509 self.ui.warn(
1510 _(b"warning: ignoring unknown working parent %s!\n")
1510 _(b"warning: ignoring unknown working parent %s!\n")
1511 % short(node)
1511 % short(node)
1512 )
1512 )
1513 return nullid
1513 return nullid
1514
1514
1515 @storecache(narrowspec.FILENAME)
1515 @storecache(narrowspec.FILENAME)
1516 def narrowpats(self):
1516 def narrowpats(self):
1517 """matcher patterns for this repository's narrowspec
1517 """matcher patterns for this repository's narrowspec
1518
1518
1519 A tuple of (includes, excludes).
1519 A tuple of (includes, excludes).
1520 """
1520 """
1521 return narrowspec.load(self)
1521 return narrowspec.load(self)
1522
1522
1523 @storecache(narrowspec.FILENAME)
1523 @storecache(narrowspec.FILENAME)
1524 def _storenarrowmatch(self):
1524 def _storenarrowmatch(self):
1525 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1525 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1526 return matchmod.always()
1526 return matchmod.always()
1527 include, exclude = self.narrowpats
1527 include, exclude = self.narrowpats
1528 return narrowspec.match(self.root, include=include, exclude=exclude)
1528 return narrowspec.match(self.root, include=include, exclude=exclude)
1529
1529
1530 @storecache(narrowspec.FILENAME)
1530 @storecache(narrowspec.FILENAME)
1531 def _narrowmatch(self):
1531 def _narrowmatch(self):
1532 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1532 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1533 return matchmod.always()
1533 return matchmod.always()
1534 narrowspec.checkworkingcopynarrowspec(self)
1534 narrowspec.checkworkingcopynarrowspec(self)
1535 include, exclude = self.narrowpats
1535 include, exclude = self.narrowpats
1536 return narrowspec.match(self.root, include=include, exclude=exclude)
1536 return narrowspec.match(self.root, include=include, exclude=exclude)
1537
1537
1538 def narrowmatch(self, match=None, includeexact=False):
1538 def narrowmatch(self, match=None, includeexact=False):
1539 """matcher corresponding the the repo's narrowspec
1539 """matcher corresponding the the repo's narrowspec
1540
1540
1541 If `match` is given, then that will be intersected with the narrow
1541 If `match` is given, then that will be intersected with the narrow
1542 matcher.
1542 matcher.
1543
1543
1544 If `includeexact` is True, then any exact matches from `match` will
1544 If `includeexact` is True, then any exact matches from `match` will
1545 be included even if they're outside the narrowspec.
1545 be included even if they're outside the narrowspec.
1546 """
1546 """
1547 if match:
1547 if match:
1548 if includeexact and not self._narrowmatch.always():
1548 if includeexact and not self._narrowmatch.always():
1549 # do not exclude explicitly-specified paths so that they can
1549 # do not exclude explicitly-specified paths so that they can
1550 # be warned later on
1550 # be warned later on
1551 em = matchmod.exact(match.files())
1551 em = matchmod.exact(match.files())
1552 nm = matchmod.unionmatcher([self._narrowmatch, em])
1552 nm = matchmod.unionmatcher([self._narrowmatch, em])
1553 return matchmod.intersectmatchers(match, nm)
1553 return matchmod.intersectmatchers(match, nm)
1554 return matchmod.intersectmatchers(match, self._narrowmatch)
1554 return matchmod.intersectmatchers(match, self._narrowmatch)
1555 return self._narrowmatch
1555 return self._narrowmatch
1556
1556
1557 def setnarrowpats(self, newincludes, newexcludes):
1557 def setnarrowpats(self, newincludes, newexcludes):
1558 narrowspec.save(self, newincludes, newexcludes)
1558 narrowspec.save(self, newincludes, newexcludes)
1559 self.invalidate(clearfilecache=True)
1559 self.invalidate(clearfilecache=True)
1560
1560
1561 @unfilteredpropertycache
1561 @unfilteredpropertycache
1562 def _quick_access_changeid_null(self):
1562 def _quick_access_changeid_null(self):
1563 return {
1563 return {
1564 b'null': (nullrev, nullid),
1564 b'null': (nullrev, nullid),
1565 nullrev: (nullrev, nullid),
1565 nullrev: (nullrev, nullid),
1566 nullid: (nullrev, nullid),
1566 nullid: (nullrev, nullid),
1567 }
1567 }
1568
1568
1569 @unfilteredpropertycache
1569 @unfilteredpropertycache
1570 def _quick_access_changeid_wc(self):
1570 def _quick_access_changeid_wc(self):
1571 # also fast path access to the working copy parents
1571 # also fast path access to the working copy parents
1572 # however, only do it for filter that ensure wc is visible.
1572 # however, only do it for filter that ensure wc is visible.
1573 quick = {}
1573 quick = self._quick_access_changeid_null.copy()
1574 cl = self.unfiltered().changelog
1574 cl = self.unfiltered().changelog
1575 for node in self.dirstate.parents():
1575 for node in self.dirstate.parents():
1576 if node == nullid:
1576 if node == nullid:
1577 continue
1577 continue
1578 rev = cl.index.get_rev(node)
1578 rev = cl.index.get_rev(node)
1579 if rev is None:
1579 if rev is None:
1580 # unknown working copy parent case:
1580 # unknown working copy parent case:
1581 #
1581 #
1582 # skip the fast path and let higher code deal with it
1582 # skip the fast path and let higher code deal with it
1583 continue
1583 continue
1584 pair = (rev, node)
1584 pair = (rev, node)
1585 quick[rev] = pair
1585 quick[rev] = pair
1586 quick[node] = pair
1586 quick[node] = pair
1587 # also add the parents of the parents
1587 # also add the parents of the parents
1588 for r in cl.parentrevs(rev):
1588 for r in cl.parentrevs(rev):
1589 if r == nullrev:
1589 if r == nullrev:
1590 continue
1590 continue
1591 n = cl.node(r)
1591 n = cl.node(r)
1592 pair = (r, n)
1592 pair = (r, n)
1593 quick[r] = pair
1593 quick[r] = pair
1594 quick[n] = pair
1594 quick[n] = pair
1595 p1node = self.dirstate.p1()
1595 p1node = self.dirstate.p1()
1596 if p1node != nullid:
1596 if p1node != nullid:
1597 quick[b'.'] = quick[p1node]
1597 quick[b'.'] = quick[p1node]
1598 return quick
1598 return quick
1599
1599
1600 @unfilteredmethod
1600 @unfilteredmethod
1601 def _quick_access_changeid_invalidate(self):
1601 def _quick_access_changeid_invalidate(self):
1602 if '_quick_access_changeid_wc' in vars(self):
1602 if '_quick_access_changeid_wc' in vars(self):
1603 del self.__dict__['_quick_access_changeid_wc']
1603 del self.__dict__['_quick_access_changeid_wc']
1604
1604
1605 @property
1605 @property
1606 def _quick_access_changeid(self):
1606 def _quick_access_changeid(self):
1607 """an helper dictionnary for __getitem__ calls
1607 """an helper dictionnary for __getitem__ calls
1608
1608
1609 This contains a list of symbol we can recognise right away without
1609 This contains a list of symbol we can recognise right away without
1610 further processing.
1610 further processing.
1611 """
1611 """
1612 mapping = self._quick_access_changeid_null
1613 if self.filtername in repoview.filter_has_wc:
1612 if self.filtername in repoview.filter_has_wc:
1614 mapping = mapping.copy()
1613 return self._quick_access_changeid_wc
1615 mapping.update(self._quick_access_changeid_wc)
1614 return self._quick_access_changeid_null
1616 return mapping
1617
1615
1618 def __getitem__(self, changeid):
1616 def __getitem__(self, changeid):
1619 # dealing with special cases
1617 # dealing with special cases
1620 if changeid is None:
1618 if changeid is None:
1621 return context.workingctx(self)
1619 return context.workingctx(self)
1622 if isinstance(changeid, context.basectx):
1620 if isinstance(changeid, context.basectx):
1623 return changeid
1621 return changeid
1624
1622
1625 # dealing with multiple revisions
1623 # dealing with multiple revisions
1626 if isinstance(changeid, slice):
1624 if isinstance(changeid, slice):
1627 # wdirrev isn't contiguous so the slice shouldn't include it
1625 # wdirrev isn't contiguous so the slice shouldn't include it
1628 return [
1626 return [
1629 self[i]
1627 self[i]
1630 for i in pycompat.xrange(*changeid.indices(len(self)))
1628 for i in pycompat.xrange(*changeid.indices(len(self)))
1631 if i not in self.changelog.filteredrevs
1629 if i not in self.changelog.filteredrevs
1632 ]
1630 ]
1633
1631
1634 # dealing with some special values
1632 # dealing with some special values
1635 quick_access = self._quick_access_changeid.get(changeid)
1633 quick_access = self._quick_access_changeid.get(changeid)
1636 if quick_access is not None:
1634 if quick_access is not None:
1637 rev, node = quick_access
1635 rev, node = quick_access
1638 return context.changectx(self, rev, node, maybe_filtered=False)
1636 return context.changectx(self, rev, node, maybe_filtered=False)
1639 if changeid == b'tip':
1637 if changeid == b'tip':
1640 node = self.changelog.tip()
1638 node = self.changelog.tip()
1641 rev = self.changelog.rev(node)
1639 rev = self.changelog.rev(node)
1642 return context.changectx(self, rev, node)
1640 return context.changectx(self, rev, node)
1643
1641
1644 # dealing with arbitrary values
1642 # dealing with arbitrary values
1645 try:
1643 try:
1646 if isinstance(changeid, int):
1644 if isinstance(changeid, int):
1647 node = self.changelog.node(changeid)
1645 node = self.changelog.node(changeid)
1648 rev = changeid
1646 rev = changeid
1649 elif changeid == b'.':
1647 elif changeid == b'.':
1650 # this is a hack to delay/avoid loading obsmarkers
1648 # this is a hack to delay/avoid loading obsmarkers
1651 # when we know that '.' won't be hidden
1649 # when we know that '.' won't be hidden
1652 node = self.dirstate.p1()
1650 node = self.dirstate.p1()
1653 rev = self.unfiltered().changelog.rev(node)
1651 rev = self.unfiltered().changelog.rev(node)
1654 elif len(changeid) == 20:
1652 elif len(changeid) == 20:
1655 try:
1653 try:
1656 node = changeid
1654 node = changeid
1657 rev = self.changelog.rev(changeid)
1655 rev = self.changelog.rev(changeid)
1658 except error.FilteredLookupError:
1656 except error.FilteredLookupError:
1659 changeid = hex(changeid) # for the error message
1657 changeid = hex(changeid) # for the error message
1660 raise
1658 raise
1661 except LookupError:
1659 except LookupError:
1662 # check if it might have come from damaged dirstate
1660 # check if it might have come from damaged dirstate
1663 #
1661 #
1664 # XXX we could avoid the unfiltered if we had a recognizable
1662 # XXX we could avoid the unfiltered if we had a recognizable
1665 # exception for filtered changeset access
1663 # exception for filtered changeset access
1666 if (
1664 if (
1667 self.local()
1665 self.local()
1668 and changeid in self.unfiltered().dirstate.parents()
1666 and changeid in self.unfiltered().dirstate.parents()
1669 ):
1667 ):
1670 msg = _(b"working directory has unknown parent '%s'!")
1668 msg = _(b"working directory has unknown parent '%s'!")
1671 raise error.Abort(msg % short(changeid))
1669 raise error.Abort(msg % short(changeid))
1672 changeid = hex(changeid) # for the error message
1670 changeid = hex(changeid) # for the error message
1673 raise
1671 raise
1674
1672
1675 elif len(changeid) == 40:
1673 elif len(changeid) == 40:
1676 node = bin(changeid)
1674 node = bin(changeid)
1677 rev = self.changelog.rev(node)
1675 rev = self.changelog.rev(node)
1678 else:
1676 else:
1679 raise error.ProgrammingError(
1677 raise error.ProgrammingError(
1680 b"unsupported changeid '%s' of type %s"
1678 b"unsupported changeid '%s' of type %s"
1681 % (changeid, pycompat.bytestr(type(changeid)))
1679 % (changeid, pycompat.bytestr(type(changeid)))
1682 )
1680 )
1683
1681
1684 return context.changectx(self, rev, node)
1682 return context.changectx(self, rev, node)
1685
1683
1686 except (error.FilteredIndexError, error.FilteredLookupError):
1684 except (error.FilteredIndexError, error.FilteredLookupError):
1687 raise error.FilteredRepoLookupError(
1685 raise error.FilteredRepoLookupError(
1688 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1686 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1689 )
1687 )
1690 except (IndexError, LookupError):
1688 except (IndexError, LookupError):
1691 raise error.RepoLookupError(
1689 raise error.RepoLookupError(
1692 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1690 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1693 )
1691 )
1694 except error.WdirUnsupported:
1692 except error.WdirUnsupported:
1695 return context.workingctx(self)
1693 return context.workingctx(self)
1696
1694
1697 def __contains__(self, changeid):
1695 def __contains__(self, changeid):
1698 """True if the given changeid exists
1696 """True if the given changeid exists
1699
1697
1700 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1698 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1701 specified.
1699 specified.
1702 """
1700 """
1703 try:
1701 try:
1704 self[changeid]
1702 self[changeid]
1705 return True
1703 return True
1706 except error.RepoLookupError:
1704 except error.RepoLookupError:
1707 return False
1705 return False
1708
1706
1709 def __nonzero__(self):
1707 def __nonzero__(self):
1710 return True
1708 return True
1711
1709
1712 __bool__ = __nonzero__
1710 __bool__ = __nonzero__
1713
1711
1714 def __len__(self):
1712 def __len__(self):
1715 # no need to pay the cost of repoview.changelog
1713 # no need to pay the cost of repoview.changelog
1716 unfi = self.unfiltered()
1714 unfi = self.unfiltered()
1717 return len(unfi.changelog)
1715 return len(unfi.changelog)
1718
1716
1719 def __iter__(self):
1717 def __iter__(self):
1720 return iter(self.changelog)
1718 return iter(self.changelog)
1721
1719
1722 def revs(self, expr, *args):
1720 def revs(self, expr, *args):
1723 '''Find revisions matching a revset.
1721 '''Find revisions matching a revset.
1724
1722
1725 The revset is specified as a string ``expr`` that may contain
1723 The revset is specified as a string ``expr`` that may contain
1726 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1724 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1727
1725
1728 Revset aliases from the configuration are not expanded. To expand
1726 Revset aliases from the configuration are not expanded. To expand
1729 user aliases, consider calling ``scmutil.revrange()`` or
1727 user aliases, consider calling ``scmutil.revrange()`` or
1730 ``repo.anyrevs([expr], user=True)``.
1728 ``repo.anyrevs([expr], user=True)``.
1731
1729
1732 Returns a smartset.abstractsmartset, which is a list-like interface
1730 Returns a smartset.abstractsmartset, which is a list-like interface
1733 that contains integer revisions.
1731 that contains integer revisions.
1734 '''
1732 '''
1735 tree = revsetlang.spectree(expr, *args)
1733 tree = revsetlang.spectree(expr, *args)
1736 return revset.makematcher(tree)(self)
1734 return revset.makematcher(tree)(self)
1737
1735
1738 def set(self, expr, *args):
1736 def set(self, expr, *args):
1739 '''Find revisions matching a revset and emit changectx instances.
1737 '''Find revisions matching a revset and emit changectx instances.
1740
1738
1741 This is a convenience wrapper around ``revs()`` that iterates the
1739 This is a convenience wrapper around ``revs()`` that iterates the
1742 result and is a generator of changectx instances.
1740 result and is a generator of changectx instances.
1743
1741
1744 Revset aliases from the configuration are not expanded. To expand
1742 Revset aliases from the configuration are not expanded. To expand
1745 user aliases, consider calling ``scmutil.revrange()``.
1743 user aliases, consider calling ``scmutil.revrange()``.
1746 '''
1744 '''
1747 for r in self.revs(expr, *args):
1745 for r in self.revs(expr, *args):
1748 yield self[r]
1746 yield self[r]
1749
1747
1750 def anyrevs(self, specs, user=False, localalias=None):
1748 def anyrevs(self, specs, user=False, localalias=None):
1751 '''Find revisions matching one of the given revsets.
1749 '''Find revisions matching one of the given revsets.
1752
1750
1753 Revset aliases from the configuration are not expanded by default. To
1751 Revset aliases from the configuration are not expanded by default. To
1754 expand user aliases, specify ``user=True``. To provide some local
1752 expand user aliases, specify ``user=True``. To provide some local
1755 definitions overriding user aliases, set ``localalias`` to
1753 definitions overriding user aliases, set ``localalias`` to
1756 ``{name: definitionstring}``.
1754 ``{name: definitionstring}``.
1757 '''
1755 '''
1758 if specs == [b'null']:
1756 if specs == [b'null']:
1759 return revset.baseset([nullrev])
1757 return revset.baseset([nullrev])
1760 if specs == [b'.']:
1758 if specs == [b'.']:
1761 quick_data = self._quick_access_changeid.get(b'.')
1759 quick_data = self._quick_access_changeid.get(b'.')
1762 if quick_data is not None:
1760 if quick_data is not None:
1763 return revset.baseset([quick_data[0]])
1761 return revset.baseset([quick_data[0]])
1764 if user:
1762 if user:
1765 m = revset.matchany(
1763 m = revset.matchany(
1766 self.ui,
1764 self.ui,
1767 specs,
1765 specs,
1768 lookup=revset.lookupfn(self),
1766 lookup=revset.lookupfn(self),
1769 localalias=localalias,
1767 localalias=localalias,
1770 )
1768 )
1771 else:
1769 else:
1772 m = revset.matchany(None, specs, localalias=localalias)
1770 m = revset.matchany(None, specs, localalias=localalias)
1773 return m(self)
1771 return m(self)
1774
1772
1775 def url(self):
1773 def url(self):
1776 return b'file:' + self.root
1774 return b'file:' + self.root
1777
1775
1778 def hook(self, name, throw=False, **args):
1776 def hook(self, name, throw=False, **args):
1779 """Call a hook, passing this repo instance.
1777 """Call a hook, passing this repo instance.
1780
1778
1781 This a convenience method to aid invoking hooks. Extensions likely
1779 This a convenience method to aid invoking hooks. Extensions likely
1782 won't call this unless they have registered a custom hook or are
1780 won't call this unless they have registered a custom hook or are
1783 replacing code that is expected to call a hook.
1781 replacing code that is expected to call a hook.
1784 """
1782 """
1785 return hook.hook(self.ui, self, name, throw, **args)
1783 return hook.hook(self.ui, self, name, throw, **args)
1786
1784
1787 @filteredpropertycache
1785 @filteredpropertycache
1788 def _tagscache(self):
1786 def _tagscache(self):
1789 '''Returns a tagscache object that contains various tags related
1787 '''Returns a tagscache object that contains various tags related
1790 caches.'''
1788 caches.'''
1791
1789
1792 # This simplifies its cache management by having one decorated
1790 # This simplifies its cache management by having one decorated
1793 # function (this one) and the rest simply fetch things from it.
1791 # function (this one) and the rest simply fetch things from it.
1794 class tagscache(object):
1792 class tagscache(object):
1795 def __init__(self):
1793 def __init__(self):
1796 # These two define the set of tags for this repository. tags
1794 # These two define the set of tags for this repository. tags
1797 # maps tag name to node; tagtypes maps tag name to 'global' or
1795 # maps tag name to node; tagtypes maps tag name to 'global' or
1798 # 'local'. (Global tags are defined by .hgtags across all
1796 # 'local'. (Global tags are defined by .hgtags across all
1799 # heads, and local tags are defined in .hg/localtags.)
1797 # heads, and local tags are defined in .hg/localtags.)
1800 # They constitute the in-memory cache of tags.
1798 # They constitute the in-memory cache of tags.
1801 self.tags = self.tagtypes = None
1799 self.tags = self.tagtypes = None
1802
1800
1803 self.nodetagscache = self.tagslist = None
1801 self.nodetagscache = self.tagslist = None
1804
1802
1805 cache = tagscache()
1803 cache = tagscache()
1806 cache.tags, cache.tagtypes = self._findtags()
1804 cache.tags, cache.tagtypes = self._findtags()
1807
1805
1808 return cache
1806 return cache
1809
1807
1810 def tags(self):
1808 def tags(self):
1811 '''return a mapping of tag to node'''
1809 '''return a mapping of tag to node'''
1812 t = {}
1810 t = {}
1813 if self.changelog.filteredrevs:
1811 if self.changelog.filteredrevs:
1814 tags, tt = self._findtags()
1812 tags, tt = self._findtags()
1815 else:
1813 else:
1816 tags = self._tagscache.tags
1814 tags = self._tagscache.tags
1817 rev = self.changelog.rev
1815 rev = self.changelog.rev
1818 for k, v in pycompat.iteritems(tags):
1816 for k, v in pycompat.iteritems(tags):
1819 try:
1817 try:
1820 # ignore tags to unknown nodes
1818 # ignore tags to unknown nodes
1821 rev(v)
1819 rev(v)
1822 t[k] = v
1820 t[k] = v
1823 except (error.LookupError, ValueError):
1821 except (error.LookupError, ValueError):
1824 pass
1822 pass
1825 return t
1823 return t
1826
1824
1827 def _findtags(self):
1825 def _findtags(self):
1828 '''Do the hard work of finding tags. Return a pair of dicts
1826 '''Do the hard work of finding tags. Return a pair of dicts
1829 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1827 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1830 maps tag name to a string like \'global\' or \'local\'.
1828 maps tag name to a string like \'global\' or \'local\'.
1831 Subclasses or extensions are free to add their own tags, but
1829 Subclasses or extensions are free to add their own tags, but
1832 should be aware that the returned dicts will be retained for the
1830 should be aware that the returned dicts will be retained for the
1833 duration of the localrepo object.'''
1831 duration of the localrepo object.'''
1834
1832
1835 # XXX what tagtype should subclasses/extensions use? Currently
1833 # XXX what tagtype should subclasses/extensions use? Currently
1836 # mq and bookmarks add tags, but do not set the tagtype at all.
1834 # mq and bookmarks add tags, but do not set the tagtype at all.
1837 # Should each extension invent its own tag type? Should there
1835 # Should each extension invent its own tag type? Should there
1838 # be one tagtype for all such "virtual" tags? Or is the status
1836 # be one tagtype for all such "virtual" tags? Or is the status
1839 # quo fine?
1837 # quo fine?
1840
1838
1841 # map tag name to (node, hist)
1839 # map tag name to (node, hist)
1842 alltags = tagsmod.findglobaltags(self.ui, self)
1840 alltags = tagsmod.findglobaltags(self.ui, self)
1843 # map tag name to tag type
1841 # map tag name to tag type
1844 tagtypes = {tag: b'global' for tag in alltags}
1842 tagtypes = {tag: b'global' for tag in alltags}
1845
1843
1846 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1844 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1847
1845
1848 # Build the return dicts. Have to re-encode tag names because
1846 # Build the return dicts. Have to re-encode tag names because
1849 # the tags module always uses UTF-8 (in order not to lose info
1847 # the tags module always uses UTF-8 (in order not to lose info
1850 # writing to the cache), but the rest of Mercurial wants them in
1848 # writing to the cache), but the rest of Mercurial wants them in
1851 # local encoding.
1849 # local encoding.
1852 tags = {}
1850 tags = {}
1853 for (name, (node, hist)) in pycompat.iteritems(alltags):
1851 for (name, (node, hist)) in pycompat.iteritems(alltags):
1854 if node != nullid:
1852 if node != nullid:
1855 tags[encoding.tolocal(name)] = node
1853 tags[encoding.tolocal(name)] = node
1856 tags[b'tip'] = self.changelog.tip()
1854 tags[b'tip'] = self.changelog.tip()
1857 tagtypes = {
1855 tagtypes = {
1858 encoding.tolocal(name): value
1856 encoding.tolocal(name): value
1859 for (name, value) in pycompat.iteritems(tagtypes)
1857 for (name, value) in pycompat.iteritems(tagtypes)
1860 }
1858 }
1861 return (tags, tagtypes)
1859 return (tags, tagtypes)
1862
1860
1863 def tagtype(self, tagname):
1861 def tagtype(self, tagname):
1864 '''
1862 '''
1865 return the type of the given tag. result can be:
1863 return the type of the given tag. result can be:
1866
1864
1867 'local' : a local tag
1865 'local' : a local tag
1868 'global' : a global tag
1866 'global' : a global tag
1869 None : tag does not exist
1867 None : tag does not exist
1870 '''
1868 '''
1871
1869
1872 return self._tagscache.tagtypes.get(tagname)
1870 return self._tagscache.tagtypes.get(tagname)
1873
1871
1874 def tagslist(self):
1872 def tagslist(self):
1875 '''return a list of tags ordered by revision'''
1873 '''return a list of tags ordered by revision'''
1876 if not self._tagscache.tagslist:
1874 if not self._tagscache.tagslist:
1877 l = []
1875 l = []
1878 for t, n in pycompat.iteritems(self.tags()):
1876 for t, n in pycompat.iteritems(self.tags()):
1879 l.append((self.changelog.rev(n), t, n))
1877 l.append((self.changelog.rev(n), t, n))
1880 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1878 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1881
1879
1882 return self._tagscache.tagslist
1880 return self._tagscache.tagslist
1883
1881
1884 def nodetags(self, node):
1882 def nodetags(self, node):
1885 '''return the tags associated with a node'''
1883 '''return the tags associated with a node'''
1886 if not self._tagscache.nodetagscache:
1884 if not self._tagscache.nodetagscache:
1887 nodetagscache = {}
1885 nodetagscache = {}
1888 for t, n in pycompat.iteritems(self._tagscache.tags):
1886 for t, n in pycompat.iteritems(self._tagscache.tags):
1889 nodetagscache.setdefault(n, []).append(t)
1887 nodetagscache.setdefault(n, []).append(t)
1890 for tags in pycompat.itervalues(nodetagscache):
1888 for tags in pycompat.itervalues(nodetagscache):
1891 tags.sort()
1889 tags.sort()
1892 self._tagscache.nodetagscache = nodetagscache
1890 self._tagscache.nodetagscache = nodetagscache
1893 return self._tagscache.nodetagscache.get(node, [])
1891 return self._tagscache.nodetagscache.get(node, [])
1894
1892
1895 def nodebookmarks(self, node):
1893 def nodebookmarks(self, node):
1896 """return the list of bookmarks pointing to the specified node"""
1894 """return the list of bookmarks pointing to the specified node"""
1897 return self._bookmarks.names(node)
1895 return self._bookmarks.names(node)
1898
1896
1899 def branchmap(self):
1897 def branchmap(self):
1900 '''returns a dictionary {branch: [branchheads]} with branchheads
1898 '''returns a dictionary {branch: [branchheads]} with branchheads
1901 ordered by increasing revision number'''
1899 ordered by increasing revision number'''
1902 return self._branchcaches[self]
1900 return self._branchcaches[self]
1903
1901
1904 @unfilteredmethod
1902 @unfilteredmethod
1905 def revbranchcache(self):
1903 def revbranchcache(self):
1906 if not self._revbranchcache:
1904 if not self._revbranchcache:
1907 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1905 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1908 return self._revbranchcache
1906 return self._revbranchcache
1909
1907
1910 def branchtip(self, branch, ignoremissing=False):
1908 def branchtip(self, branch, ignoremissing=False):
1911 '''return the tip node for a given branch
1909 '''return the tip node for a given branch
1912
1910
1913 If ignoremissing is True, then this method will not raise an error.
1911 If ignoremissing is True, then this method will not raise an error.
1914 This is helpful for callers that only expect None for a missing branch
1912 This is helpful for callers that only expect None for a missing branch
1915 (e.g. namespace).
1913 (e.g. namespace).
1916
1914
1917 '''
1915 '''
1918 try:
1916 try:
1919 return self.branchmap().branchtip(branch)
1917 return self.branchmap().branchtip(branch)
1920 except KeyError:
1918 except KeyError:
1921 if not ignoremissing:
1919 if not ignoremissing:
1922 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1920 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1923 else:
1921 else:
1924 pass
1922 pass
1925
1923
1926 def lookup(self, key):
1924 def lookup(self, key):
1927 node = scmutil.revsymbol(self, key).node()
1925 node = scmutil.revsymbol(self, key).node()
1928 if node is None:
1926 if node is None:
1929 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1927 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1930 return node
1928 return node
1931
1929
1932 def lookupbranch(self, key):
1930 def lookupbranch(self, key):
1933 if self.branchmap().hasbranch(key):
1931 if self.branchmap().hasbranch(key):
1934 return key
1932 return key
1935
1933
1936 return scmutil.revsymbol(self, key).branch()
1934 return scmutil.revsymbol(self, key).branch()
1937
1935
1938 def known(self, nodes):
1936 def known(self, nodes):
1939 cl = self.changelog
1937 cl = self.changelog
1940 get_rev = cl.index.get_rev
1938 get_rev = cl.index.get_rev
1941 filtered = cl.filteredrevs
1939 filtered = cl.filteredrevs
1942 result = []
1940 result = []
1943 for n in nodes:
1941 for n in nodes:
1944 r = get_rev(n)
1942 r = get_rev(n)
1945 resp = not (r is None or r in filtered)
1943 resp = not (r is None or r in filtered)
1946 result.append(resp)
1944 result.append(resp)
1947 return result
1945 return result
1948
1946
1949 def local(self):
1947 def local(self):
1950 return self
1948 return self
1951
1949
1952 def publishing(self):
1950 def publishing(self):
1953 # it's safe (and desirable) to trust the publish flag unconditionally
1951 # it's safe (and desirable) to trust the publish flag unconditionally
1954 # so that we don't finalize changes shared between users via ssh or nfs
1952 # so that we don't finalize changes shared between users via ssh or nfs
1955 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1953 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1956
1954
1957 def cancopy(self):
1955 def cancopy(self):
1958 # so statichttprepo's override of local() works
1956 # so statichttprepo's override of local() works
1959 if not self.local():
1957 if not self.local():
1960 return False
1958 return False
1961 if not self.publishing():
1959 if not self.publishing():
1962 return True
1960 return True
1963 # if publishing we can't copy if there is filtered content
1961 # if publishing we can't copy if there is filtered content
1964 return not self.filtered(b'visible').changelog.filteredrevs
1962 return not self.filtered(b'visible').changelog.filteredrevs
1965
1963
1966 def shared(self):
1964 def shared(self):
1967 '''the type of shared repository (None if not shared)'''
1965 '''the type of shared repository (None if not shared)'''
1968 if self.sharedpath != self.path:
1966 if self.sharedpath != self.path:
1969 return b'store'
1967 return b'store'
1970 return None
1968 return None
1971
1969
1972 def wjoin(self, f, *insidef):
1970 def wjoin(self, f, *insidef):
1973 return self.vfs.reljoin(self.root, f, *insidef)
1971 return self.vfs.reljoin(self.root, f, *insidef)
1974
1972
1975 def setparents(self, p1, p2=nullid):
1973 def setparents(self, p1, p2=nullid):
1976 self[None].setparents(p1, p2)
1974 self[None].setparents(p1, p2)
1977 self._quick_access_changeid_invalidate()
1975 self._quick_access_changeid_invalidate()
1978
1976
1979 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1977 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1980 """changeid must be a changeset revision, if specified.
1978 """changeid must be a changeset revision, if specified.
1981 fileid can be a file revision or node."""
1979 fileid can be a file revision or node."""
1982 return context.filectx(
1980 return context.filectx(
1983 self, path, changeid, fileid, changectx=changectx
1981 self, path, changeid, fileid, changectx=changectx
1984 )
1982 )
1985
1983
1986 def getcwd(self):
1984 def getcwd(self):
1987 return self.dirstate.getcwd()
1985 return self.dirstate.getcwd()
1988
1986
1989 def pathto(self, f, cwd=None):
1987 def pathto(self, f, cwd=None):
1990 return self.dirstate.pathto(f, cwd)
1988 return self.dirstate.pathto(f, cwd)
1991
1989
1992 def _loadfilter(self, filter):
1990 def _loadfilter(self, filter):
1993 if filter not in self._filterpats:
1991 if filter not in self._filterpats:
1994 l = []
1992 l = []
1995 for pat, cmd in self.ui.configitems(filter):
1993 for pat, cmd in self.ui.configitems(filter):
1996 if cmd == b'!':
1994 if cmd == b'!':
1997 continue
1995 continue
1998 mf = matchmod.match(self.root, b'', [pat])
1996 mf = matchmod.match(self.root, b'', [pat])
1999 fn = None
1997 fn = None
2000 params = cmd
1998 params = cmd
2001 for name, filterfn in pycompat.iteritems(self._datafilters):
1999 for name, filterfn in pycompat.iteritems(self._datafilters):
2002 if cmd.startswith(name):
2000 if cmd.startswith(name):
2003 fn = filterfn
2001 fn = filterfn
2004 params = cmd[len(name) :].lstrip()
2002 params = cmd[len(name) :].lstrip()
2005 break
2003 break
2006 if not fn:
2004 if not fn:
2007 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2005 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2008 fn.__name__ = 'commandfilter'
2006 fn.__name__ = 'commandfilter'
2009 # Wrap old filters not supporting keyword arguments
2007 # Wrap old filters not supporting keyword arguments
2010 if not pycompat.getargspec(fn)[2]:
2008 if not pycompat.getargspec(fn)[2]:
2011 oldfn = fn
2009 oldfn = fn
2012 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2010 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2013 fn.__name__ = 'compat-' + oldfn.__name__
2011 fn.__name__ = 'compat-' + oldfn.__name__
2014 l.append((mf, fn, params))
2012 l.append((mf, fn, params))
2015 self._filterpats[filter] = l
2013 self._filterpats[filter] = l
2016 return self._filterpats[filter]
2014 return self._filterpats[filter]
2017
2015
2018 def _filter(self, filterpats, filename, data):
2016 def _filter(self, filterpats, filename, data):
2019 for mf, fn, cmd in filterpats:
2017 for mf, fn, cmd in filterpats:
2020 if mf(filename):
2018 if mf(filename):
2021 self.ui.debug(
2019 self.ui.debug(
2022 b"filtering %s through %s\n"
2020 b"filtering %s through %s\n"
2023 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2021 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2024 )
2022 )
2025 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2023 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2026 break
2024 break
2027
2025
2028 return data
2026 return data
2029
2027
2030 @unfilteredpropertycache
2028 @unfilteredpropertycache
2031 def _encodefilterpats(self):
2029 def _encodefilterpats(self):
2032 return self._loadfilter(b'encode')
2030 return self._loadfilter(b'encode')
2033
2031
2034 @unfilteredpropertycache
2032 @unfilteredpropertycache
2035 def _decodefilterpats(self):
2033 def _decodefilterpats(self):
2036 return self._loadfilter(b'decode')
2034 return self._loadfilter(b'decode')
2037
2035
2038 def adddatafilter(self, name, filter):
2036 def adddatafilter(self, name, filter):
2039 self._datafilters[name] = filter
2037 self._datafilters[name] = filter
2040
2038
2041 def wread(self, filename):
2039 def wread(self, filename):
2042 if self.wvfs.islink(filename):
2040 if self.wvfs.islink(filename):
2043 data = self.wvfs.readlink(filename)
2041 data = self.wvfs.readlink(filename)
2044 else:
2042 else:
2045 data = self.wvfs.read(filename)
2043 data = self.wvfs.read(filename)
2046 return self._filter(self._encodefilterpats, filename, data)
2044 return self._filter(self._encodefilterpats, filename, data)
2047
2045
2048 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2046 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2049 """write ``data`` into ``filename`` in the working directory
2047 """write ``data`` into ``filename`` in the working directory
2050
2048
2051 This returns length of written (maybe decoded) data.
2049 This returns length of written (maybe decoded) data.
2052 """
2050 """
2053 data = self._filter(self._decodefilterpats, filename, data)
2051 data = self._filter(self._decodefilterpats, filename, data)
2054 if b'l' in flags:
2052 if b'l' in flags:
2055 self.wvfs.symlink(data, filename)
2053 self.wvfs.symlink(data, filename)
2056 else:
2054 else:
2057 self.wvfs.write(
2055 self.wvfs.write(
2058 filename, data, backgroundclose=backgroundclose, **kwargs
2056 filename, data, backgroundclose=backgroundclose, **kwargs
2059 )
2057 )
2060 if b'x' in flags:
2058 if b'x' in flags:
2061 self.wvfs.setflags(filename, False, True)
2059 self.wvfs.setflags(filename, False, True)
2062 else:
2060 else:
2063 self.wvfs.setflags(filename, False, False)
2061 self.wvfs.setflags(filename, False, False)
2064 return len(data)
2062 return len(data)
2065
2063
2066 def wwritedata(self, filename, data):
2064 def wwritedata(self, filename, data):
2067 return self._filter(self._decodefilterpats, filename, data)
2065 return self._filter(self._decodefilterpats, filename, data)
2068
2066
2069 def currenttransaction(self):
2067 def currenttransaction(self):
2070 """return the current transaction or None if non exists"""
2068 """return the current transaction or None if non exists"""
2071 if self._transref:
2069 if self._transref:
2072 tr = self._transref()
2070 tr = self._transref()
2073 else:
2071 else:
2074 tr = None
2072 tr = None
2075
2073
2076 if tr and tr.running():
2074 if tr and tr.running():
2077 return tr
2075 return tr
2078 return None
2076 return None
2079
2077
2080 def transaction(self, desc, report=None):
2078 def transaction(self, desc, report=None):
2081 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2079 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2082 b'devel', b'check-locks'
2080 b'devel', b'check-locks'
2083 ):
2081 ):
2084 if self._currentlock(self._lockref) is None:
2082 if self._currentlock(self._lockref) is None:
2085 raise error.ProgrammingError(b'transaction requires locking')
2083 raise error.ProgrammingError(b'transaction requires locking')
2086 tr = self.currenttransaction()
2084 tr = self.currenttransaction()
2087 if tr is not None:
2085 if tr is not None:
2088 return tr.nest(name=desc)
2086 return tr.nest(name=desc)
2089
2087
2090 # abort here if the journal already exists
2088 # abort here if the journal already exists
2091 if self.svfs.exists(b"journal"):
2089 if self.svfs.exists(b"journal"):
2092 raise error.RepoError(
2090 raise error.RepoError(
2093 _(b"abandoned transaction found"),
2091 _(b"abandoned transaction found"),
2094 hint=_(b"run 'hg recover' to clean up transaction"),
2092 hint=_(b"run 'hg recover' to clean up transaction"),
2095 )
2093 )
2096
2094
2097 idbase = b"%.40f#%f" % (random.random(), time.time())
2095 idbase = b"%.40f#%f" % (random.random(), time.time())
2098 ha = hex(hashutil.sha1(idbase).digest())
2096 ha = hex(hashutil.sha1(idbase).digest())
2099 txnid = b'TXN:' + ha
2097 txnid = b'TXN:' + ha
2100 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2098 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2101
2099
2102 self._writejournal(desc)
2100 self._writejournal(desc)
2103 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2101 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2104 if report:
2102 if report:
2105 rp = report
2103 rp = report
2106 else:
2104 else:
2107 rp = self.ui.warn
2105 rp = self.ui.warn
2108 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2106 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2109 # we must avoid cyclic reference between repo and transaction.
2107 # we must avoid cyclic reference between repo and transaction.
2110 reporef = weakref.ref(self)
2108 reporef = weakref.ref(self)
2111 # Code to track tag movement
2109 # Code to track tag movement
2112 #
2110 #
2113 # Since tags are all handled as file content, it is actually quite hard
2111 # Since tags are all handled as file content, it is actually quite hard
2114 # to track these movement from a code perspective. So we fallback to a
2112 # to track these movement from a code perspective. So we fallback to a
2115 # tracking at the repository level. One could envision to track changes
2113 # tracking at the repository level. One could envision to track changes
2116 # to the '.hgtags' file through changegroup apply but that fails to
2114 # to the '.hgtags' file through changegroup apply but that fails to
2117 # cope with case where transaction expose new heads without changegroup
2115 # cope with case where transaction expose new heads without changegroup
2118 # being involved (eg: phase movement).
2116 # being involved (eg: phase movement).
2119 #
2117 #
2120 # For now, We gate the feature behind a flag since this likely comes
2118 # For now, We gate the feature behind a flag since this likely comes
2121 # with performance impacts. The current code run more often than needed
2119 # with performance impacts. The current code run more often than needed
2122 # and do not use caches as much as it could. The current focus is on
2120 # and do not use caches as much as it could. The current focus is on
2123 # the behavior of the feature so we disable it by default. The flag
2121 # the behavior of the feature so we disable it by default. The flag
2124 # will be removed when we are happy with the performance impact.
2122 # will be removed when we are happy with the performance impact.
2125 #
2123 #
2126 # Once this feature is no longer experimental move the following
2124 # Once this feature is no longer experimental move the following
2127 # documentation to the appropriate help section:
2125 # documentation to the appropriate help section:
2128 #
2126 #
2129 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2127 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2130 # tags (new or changed or deleted tags). In addition the details of
2128 # tags (new or changed or deleted tags). In addition the details of
2131 # these changes are made available in a file at:
2129 # these changes are made available in a file at:
2132 # ``REPOROOT/.hg/changes/tags.changes``.
2130 # ``REPOROOT/.hg/changes/tags.changes``.
2133 # Make sure you check for HG_TAG_MOVED before reading that file as it
2131 # Make sure you check for HG_TAG_MOVED before reading that file as it
2134 # might exist from a previous transaction even if no tag were touched
2132 # might exist from a previous transaction even if no tag were touched
2135 # in this one. Changes are recorded in a line base format::
2133 # in this one. Changes are recorded in a line base format::
2136 #
2134 #
2137 # <action> <hex-node> <tag-name>\n
2135 # <action> <hex-node> <tag-name>\n
2138 #
2136 #
2139 # Actions are defined as follow:
2137 # Actions are defined as follow:
2140 # "-R": tag is removed,
2138 # "-R": tag is removed,
2141 # "+A": tag is added,
2139 # "+A": tag is added,
2142 # "-M": tag is moved (old value),
2140 # "-M": tag is moved (old value),
2143 # "+M": tag is moved (new value),
2141 # "+M": tag is moved (new value),
2144 tracktags = lambda x: None
2142 tracktags = lambda x: None
2145 # experimental config: experimental.hook-track-tags
2143 # experimental config: experimental.hook-track-tags
2146 shouldtracktags = self.ui.configbool(
2144 shouldtracktags = self.ui.configbool(
2147 b'experimental', b'hook-track-tags'
2145 b'experimental', b'hook-track-tags'
2148 )
2146 )
2149 if desc != b'strip' and shouldtracktags:
2147 if desc != b'strip' and shouldtracktags:
2150 oldheads = self.changelog.headrevs()
2148 oldheads = self.changelog.headrevs()
2151
2149
2152 def tracktags(tr2):
2150 def tracktags(tr2):
2153 repo = reporef()
2151 repo = reporef()
2154 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2152 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2155 newheads = repo.changelog.headrevs()
2153 newheads = repo.changelog.headrevs()
2156 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2154 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2157 # notes: we compare lists here.
2155 # notes: we compare lists here.
2158 # As we do it only once buiding set would not be cheaper
2156 # As we do it only once buiding set would not be cheaper
2159 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2157 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2160 if changes:
2158 if changes:
2161 tr2.hookargs[b'tag_moved'] = b'1'
2159 tr2.hookargs[b'tag_moved'] = b'1'
2162 with repo.vfs(
2160 with repo.vfs(
2163 b'changes/tags.changes', b'w', atomictemp=True
2161 b'changes/tags.changes', b'w', atomictemp=True
2164 ) as changesfile:
2162 ) as changesfile:
2165 # note: we do not register the file to the transaction
2163 # note: we do not register the file to the transaction
2166 # because we needs it to still exist on the transaction
2164 # because we needs it to still exist on the transaction
2167 # is close (for txnclose hooks)
2165 # is close (for txnclose hooks)
2168 tagsmod.writediff(changesfile, changes)
2166 tagsmod.writediff(changesfile, changes)
2169
2167
2170 def validate(tr2):
2168 def validate(tr2):
2171 """will run pre-closing hooks"""
2169 """will run pre-closing hooks"""
2172 # XXX the transaction API is a bit lacking here so we take a hacky
2170 # XXX the transaction API is a bit lacking here so we take a hacky
2173 # path for now
2171 # path for now
2174 #
2172 #
2175 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2173 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2176 # dict is copied before these run. In addition we needs the data
2174 # dict is copied before these run. In addition we needs the data
2177 # available to in memory hooks too.
2175 # available to in memory hooks too.
2178 #
2176 #
2179 # Moreover, we also need to make sure this runs before txnclose
2177 # Moreover, we also need to make sure this runs before txnclose
2180 # hooks and there is no "pending" mechanism that would execute
2178 # hooks and there is no "pending" mechanism that would execute
2181 # logic only if hooks are about to run.
2179 # logic only if hooks are about to run.
2182 #
2180 #
2183 # Fixing this limitation of the transaction is also needed to track
2181 # Fixing this limitation of the transaction is also needed to track
2184 # other families of changes (bookmarks, phases, obsolescence).
2182 # other families of changes (bookmarks, phases, obsolescence).
2185 #
2183 #
2186 # This will have to be fixed before we remove the experimental
2184 # This will have to be fixed before we remove the experimental
2187 # gating.
2185 # gating.
2188 tracktags(tr2)
2186 tracktags(tr2)
2189 repo = reporef()
2187 repo = reporef()
2190
2188
2191 singleheadopt = (b'experimental', b'single-head-per-branch')
2189 singleheadopt = (b'experimental', b'single-head-per-branch')
2192 singlehead = repo.ui.configbool(*singleheadopt)
2190 singlehead = repo.ui.configbool(*singleheadopt)
2193 if singlehead:
2191 if singlehead:
2194 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2192 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2195 accountclosed = singleheadsub.get(
2193 accountclosed = singleheadsub.get(
2196 b"account-closed-heads", False
2194 b"account-closed-heads", False
2197 )
2195 )
2198 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2196 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2199 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2197 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2200 for name, (old, new) in sorted(
2198 for name, (old, new) in sorted(
2201 tr.changes[b'bookmarks'].items()
2199 tr.changes[b'bookmarks'].items()
2202 ):
2200 ):
2203 args = tr.hookargs.copy()
2201 args = tr.hookargs.copy()
2204 args.update(bookmarks.preparehookargs(name, old, new))
2202 args.update(bookmarks.preparehookargs(name, old, new))
2205 repo.hook(
2203 repo.hook(
2206 b'pretxnclose-bookmark',
2204 b'pretxnclose-bookmark',
2207 throw=True,
2205 throw=True,
2208 **pycompat.strkwargs(args)
2206 **pycompat.strkwargs(args)
2209 )
2207 )
2210 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2208 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2211 cl = repo.unfiltered().changelog
2209 cl = repo.unfiltered().changelog
2212 for revs, (old, new) in tr.changes[b'phases']:
2210 for revs, (old, new) in tr.changes[b'phases']:
2213 for rev in revs:
2211 for rev in revs:
2214 args = tr.hookargs.copy()
2212 args = tr.hookargs.copy()
2215 node = hex(cl.node(rev))
2213 node = hex(cl.node(rev))
2216 args.update(phases.preparehookargs(node, old, new))
2214 args.update(phases.preparehookargs(node, old, new))
2217 repo.hook(
2215 repo.hook(
2218 b'pretxnclose-phase',
2216 b'pretxnclose-phase',
2219 throw=True,
2217 throw=True,
2220 **pycompat.strkwargs(args)
2218 **pycompat.strkwargs(args)
2221 )
2219 )
2222
2220
2223 repo.hook(
2221 repo.hook(
2224 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2222 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2225 )
2223 )
2226
2224
2227 def releasefn(tr, success):
2225 def releasefn(tr, success):
2228 repo = reporef()
2226 repo = reporef()
2229 if repo is None:
2227 if repo is None:
2230 # If the repo has been GC'd (and this release function is being
2228 # If the repo has been GC'd (and this release function is being
2231 # called from transaction.__del__), there's not much we can do,
2229 # called from transaction.__del__), there's not much we can do,
2232 # so just leave the unfinished transaction there and let the
2230 # so just leave the unfinished transaction there and let the
2233 # user run `hg recover`.
2231 # user run `hg recover`.
2234 return
2232 return
2235 if success:
2233 if success:
2236 # this should be explicitly invoked here, because
2234 # this should be explicitly invoked here, because
2237 # in-memory changes aren't written out at closing
2235 # in-memory changes aren't written out at closing
2238 # transaction, if tr.addfilegenerator (via
2236 # transaction, if tr.addfilegenerator (via
2239 # dirstate.write or so) isn't invoked while
2237 # dirstate.write or so) isn't invoked while
2240 # transaction running
2238 # transaction running
2241 repo.dirstate.write(None)
2239 repo.dirstate.write(None)
2242 else:
2240 else:
2243 # discard all changes (including ones already written
2241 # discard all changes (including ones already written
2244 # out) in this transaction
2242 # out) in this transaction
2245 narrowspec.restorebackup(self, b'journal.narrowspec')
2243 narrowspec.restorebackup(self, b'journal.narrowspec')
2246 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2244 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2247 repo.dirstate.restorebackup(None, b'journal.dirstate')
2245 repo.dirstate.restorebackup(None, b'journal.dirstate')
2248
2246
2249 repo.invalidate(clearfilecache=True)
2247 repo.invalidate(clearfilecache=True)
2250
2248
2251 tr = transaction.transaction(
2249 tr = transaction.transaction(
2252 rp,
2250 rp,
2253 self.svfs,
2251 self.svfs,
2254 vfsmap,
2252 vfsmap,
2255 b"journal",
2253 b"journal",
2256 b"undo",
2254 b"undo",
2257 aftertrans(renames),
2255 aftertrans(renames),
2258 self.store.createmode,
2256 self.store.createmode,
2259 validator=validate,
2257 validator=validate,
2260 releasefn=releasefn,
2258 releasefn=releasefn,
2261 checkambigfiles=_cachedfiles,
2259 checkambigfiles=_cachedfiles,
2262 name=desc,
2260 name=desc,
2263 )
2261 )
2264 tr.changes[b'origrepolen'] = len(self)
2262 tr.changes[b'origrepolen'] = len(self)
2265 tr.changes[b'obsmarkers'] = set()
2263 tr.changes[b'obsmarkers'] = set()
2266 tr.changes[b'phases'] = []
2264 tr.changes[b'phases'] = []
2267 tr.changes[b'bookmarks'] = {}
2265 tr.changes[b'bookmarks'] = {}
2268
2266
2269 tr.hookargs[b'txnid'] = txnid
2267 tr.hookargs[b'txnid'] = txnid
2270 tr.hookargs[b'txnname'] = desc
2268 tr.hookargs[b'txnname'] = desc
2271 tr.hookargs[b'changes'] = tr.changes
2269 tr.hookargs[b'changes'] = tr.changes
2272 # note: writing the fncache only during finalize mean that the file is
2270 # note: writing the fncache only during finalize mean that the file is
2273 # outdated when running hooks. As fncache is used for streaming clone,
2271 # outdated when running hooks. As fncache is used for streaming clone,
2274 # this is not expected to break anything that happen during the hooks.
2272 # this is not expected to break anything that happen during the hooks.
2275 tr.addfinalize(b'flush-fncache', self.store.write)
2273 tr.addfinalize(b'flush-fncache', self.store.write)
2276
2274
2277 def txnclosehook(tr2):
2275 def txnclosehook(tr2):
2278 """To be run if transaction is successful, will schedule a hook run
2276 """To be run if transaction is successful, will schedule a hook run
2279 """
2277 """
2280 # Don't reference tr2 in hook() so we don't hold a reference.
2278 # Don't reference tr2 in hook() so we don't hold a reference.
2281 # This reduces memory consumption when there are multiple
2279 # This reduces memory consumption when there are multiple
2282 # transactions per lock. This can likely go away if issue5045
2280 # transactions per lock. This can likely go away if issue5045
2283 # fixes the function accumulation.
2281 # fixes the function accumulation.
2284 hookargs = tr2.hookargs
2282 hookargs = tr2.hookargs
2285
2283
2286 def hookfunc(unused_success):
2284 def hookfunc(unused_success):
2287 repo = reporef()
2285 repo = reporef()
2288 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2286 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2289 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2287 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2290 for name, (old, new) in bmchanges:
2288 for name, (old, new) in bmchanges:
2291 args = tr.hookargs.copy()
2289 args = tr.hookargs.copy()
2292 args.update(bookmarks.preparehookargs(name, old, new))
2290 args.update(bookmarks.preparehookargs(name, old, new))
2293 repo.hook(
2291 repo.hook(
2294 b'txnclose-bookmark',
2292 b'txnclose-bookmark',
2295 throw=False,
2293 throw=False,
2296 **pycompat.strkwargs(args)
2294 **pycompat.strkwargs(args)
2297 )
2295 )
2298
2296
2299 if hook.hashook(repo.ui, b'txnclose-phase'):
2297 if hook.hashook(repo.ui, b'txnclose-phase'):
2300 cl = repo.unfiltered().changelog
2298 cl = repo.unfiltered().changelog
2301 phasemv = sorted(
2299 phasemv = sorted(
2302 tr.changes[b'phases'], key=lambda r: r[0][0]
2300 tr.changes[b'phases'], key=lambda r: r[0][0]
2303 )
2301 )
2304 for revs, (old, new) in phasemv:
2302 for revs, (old, new) in phasemv:
2305 for rev in revs:
2303 for rev in revs:
2306 args = tr.hookargs.copy()
2304 args = tr.hookargs.copy()
2307 node = hex(cl.node(rev))
2305 node = hex(cl.node(rev))
2308 args.update(phases.preparehookargs(node, old, new))
2306 args.update(phases.preparehookargs(node, old, new))
2309 repo.hook(
2307 repo.hook(
2310 b'txnclose-phase',
2308 b'txnclose-phase',
2311 throw=False,
2309 throw=False,
2312 **pycompat.strkwargs(args)
2310 **pycompat.strkwargs(args)
2313 )
2311 )
2314
2312
2315 repo.hook(
2313 repo.hook(
2316 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2314 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2317 )
2315 )
2318
2316
2319 reporef()._afterlock(hookfunc)
2317 reporef()._afterlock(hookfunc)
2320
2318
2321 tr.addfinalize(b'txnclose-hook', txnclosehook)
2319 tr.addfinalize(b'txnclose-hook', txnclosehook)
2322 # Include a leading "-" to make it happen before the transaction summary
2320 # Include a leading "-" to make it happen before the transaction summary
2323 # reports registered via scmutil.registersummarycallback() whose names
2321 # reports registered via scmutil.registersummarycallback() whose names
2324 # are 00-txnreport etc. That way, the caches will be warm when the
2322 # are 00-txnreport etc. That way, the caches will be warm when the
2325 # callbacks run.
2323 # callbacks run.
2326 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2324 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2327
2325
2328 def txnaborthook(tr2):
2326 def txnaborthook(tr2):
2329 """To be run if transaction is aborted
2327 """To be run if transaction is aborted
2330 """
2328 """
2331 reporef().hook(
2329 reporef().hook(
2332 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2330 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2333 )
2331 )
2334
2332
2335 tr.addabort(b'txnabort-hook', txnaborthook)
2333 tr.addabort(b'txnabort-hook', txnaborthook)
2336 # avoid eager cache invalidation. in-memory data should be identical
2334 # avoid eager cache invalidation. in-memory data should be identical
2337 # to stored data if transaction has no error.
2335 # to stored data if transaction has no error.
2338 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2336 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2339 self._transref = weakref.ref(tr)
2337 self._transref = weakref.ref(tr)
2340 scmutil.registersummarycallback(self, tr, desc)
2338 scmutil.registersummarycallback(self, tr, desc)
2341 return tr
2339 return tr
2342
2340
2343 def _journalfiles(self):
2341 def _journalfiles(self):
2344 return (
2342 return (
2345 (self.svfs, b'journal'),
2343 (self.svfs, b'journal'),
2346 (self.svfs, b'journal.narrowspec'),
2344 (self.svfs, b'journal.narrowspec'),
2347 (self.vfs, b'journal.narrowspec.dirstate'),
2345 (self.vfs, b'journal.narrowspec.dirstate'),
2348 (self.vfs, b'journal.dirstate'),
2346 (self.vfs, b'journal.dirstate'),
2349 (self.vfs, b'journal.branch'),
2347 (self.vfs, b'journal.branch'),
2350 (self.vfs, b'journal.desc'),
2348 (self.vfs, b'journal.desc'),
2351 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2349 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2352 (self.svfs, b'journal.phaseroots'),
2350 (self.svfs, b'journal.phaseroots'),
2353 )
2351 )
2354
2352
2355 def undofiles(self):
2353 def undofiles(self):
2356 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2354 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2357
2355
2358 @unfilteredmethod
2356 @unfilteredmethod
2359 def _writejournal(self, desc):
2357 def _writejournal(self, desc):
2360 self.dirstate.savebackup(None, b'journal.dirstate')
2358 self.dirstate.savebackup(None, b'journal.dirstate')
2361 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2359 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2362 narrowspec.savebackup(self, b'journal.narrowspec')
2360 narrowspec.savebackup(self, b'journal.narrowspec')
2363 self.vfs.write(
2361 self.vfs.write(
2364 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2362 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2365 )
2363 )
2366 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2364 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2367 bookmarksvfs = bookmarks.bookmarksvfs(self)
2365 bookmarksvfs = bookmarks.bookmarksvfs(self)
2368 bookmarksvfs.write(
2366 bookmarksvfs.write(
2369 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2367 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2370 )
2368 )
2371 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2369 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2372
2370
2373 def recover(self):
2371 def recover(self):
2374 with self.lock():
2372 with self.lock():
2375 if self.svfs.exists(b"journal"):
2373 if self.svfs.exists(b"journal"):
2376 self.ui.status(_(b"rolling back interrupted transaction\n"))
2374 self.ui.status(_(b"rolling back interrupted transaction\n"))
2377 vfsmap = {
2375 vfsmap = {
2378 b'': self.svfs,
2376 b'': self.svfs,
2379 b'plain': self.vfs,
2377 b'plain': self.vfs,
2380 }
2378 }
2381 transaction.rollback(
2379 transaction.rollback(
2382 self.svfs,
2380 self.svfs,
2383 vfsmap,
2381 vfsmap,
2384 b"journal",
2382 b"journal",
2385 self.ui.warn,
2383 self.ui.warn,
2386 checkambigfiles=_cachedfiles,
2384 checkambigfiles=_cachedfiles,
2387 )
2385 )
2388 self.invalidate()
2386 self.invalidate()
2389 return True
2387 return True
2390 else:
2388 else:
2391 self.ui.warn(_(b"no interrupted transaction available\n"))
2389 self.ui.warn(_(b"no interrupted transaction available\n"))
2392 return False
2390 return False
2393
2391
2394 def rollback(self, dryrun=False, force=False):
2392 def rollback(self, dryrun=False, force=False):
2395 wlock = lock = dsguard = None
2393 wlock = lock = dsguard = None
2396 try:
2394 try:
2397 wlock = self.wlock()
2395 wlock = self.wlock()
2398 lock = self.lock()
2396 lock = self.lock()
2399 if self.svfs.exists(b"undo"):
2397 if self.svfs.exists(b"undo"):
2400 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2398 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2401
2399
2402 return self._rollback(dryrun, force, dsguard)
2400 return self._rollback(dryrun, force, dsguard)
2403 else:
2401 else:
2404 self.ui.warn(_(b"no rollback information available\n"))
2402 self.ui.warn(_(b"no rollback information available\n"))
2405 return 1
2403 return 1
2406 finally:
2404 finally:
2407 release(dsguard, lock, wlock)
2405 release(dsguard, lock, wlock)
2408
2406
2409 @unfilteredmethod # Until we get smarter cache management
2407 @unfilteredmethod # Until we get smarter cache management
2410 def _rollback(self, dryrun, force, dsguard):
2408 def _rollback(self, dryrun, force, dsguard):
2411 ui = self.ui
2409 ui = self.ui
2412 try:
2410 try:
2413 args = self.vfs.read(b'undo.desc').splitlines()
2411 args = self.vfs.read(b'undo.desc').splitlines()
2414 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2412 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2415 if len(args) >= 3:
2413 if len(args) >= 3:
2416 detail = args[2]
2414 detail = args[2]
2417 oldtip = oldlen - 1
2415 oldtip = oldlen - 1
2418
2416
2419 if detail and ui.verbose:
2417 if detail and ui.verbose:
2420 msg = _(
2418 msg = _(
2421 b'repository tip rolled back to revision %d'
2419 b'repository tip rolled back to revision %d'
2422 b' (undo %s: %s)\n'
2420 b' (undo %s: %s)\n'
2423 ) % (oldtip, desc, detail)
2421 ) % (oldtip, desc, detail)
2424 else:
2422 else:
2425 msg = _(
2423 msg = _(
2426 b'repository tip rolled back to revision %d (undo %s)\n'
2424 b'repository tip rolled back to revision %d (undo %s)\n'
2427 ) % (oldtip, desc)
2425 ) % (oldtip, desc)
2428 except IOError:
2426 except IOError:
2429 msg = _(b'rolling back unknown transaction\n')
2427 msg = _(b'rolling back unknown transaction\n')
2430 desc = None
2428 desc = None
2431
2429
2432 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2430 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2433 raise error.Abort(
2431 raise error.Abort(
2434 _(
2432 _(
2435 b'rollback of last commit while not checked out '
2433 b'rollback of last commit while not checked out '
2436 b'may lose data'
2434 b'may lose data'
2437 ),
2435 ),
2438 hint=_(b'use -f to force'),
2436 hint=_(b'use -f to force'),
2439 )
2437 )
2440
2438
2441 ui.status(msg)
2439 ui.status(msg)
2442 if dryrun:
2440 if dryrun:
2443 return 0
2441 return 0
2444
2442
2445 parents = self.dirstate.parents()
2443 parents = self.dirstate.parents()
2446 self.destroying()
2444 self.destroying()
2447 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2445 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2448 transaction.rollback(
2446 transaction.rollback(
2449 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2447 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2450 )
2448 )
2451 bookmarksvfs = bookmarks.bookmarksvfs(self)
2449 bookmarksvfs = bookmarks.bookmarksvfs(self)
2452 if bookmarksvfs.exists(b'undo.bookmarks'):
2450 if bookmarksvfs.exists(b'undo.bookmarks'):
2453 bookmarksvfs.rename(
2451 bookmarksvfs.rename(
2454 b'undo.bookmarks', b'bookmarks', checkambig=True
2452 b'undo.bookmarks', b'bookmarks', checkambig=True
2455 )
2453 )
2456 if self.svfs.exists(b'undo.phaseroots'):
2454 if self.svfs.exists(b'undo.phaseroots'):
2457 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2455 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2458 self.invalidate()
2456 self.invalidate()
2459
2457
2460 has_node = self.changelog.index.has_node
2458 has_node = self.changelog.index.has_node
2461 parentgone = any(not has_node(p) for p in parents)
2459 parentgone = any(not has_node(p) for p in parents)
2462 if parentgone:
2460 if parentgone:
2463 # prevent dirstateguard from overwriting already restored one
2461 # prevent dirstateguard from overwriting already restored one
2464 dsguard.close()
2462 dsguard.close()
2465
2463
2466 narrowspec.restorebackup(self, b'undo.narrowspec')
2464 narrowspec.restorebackup(self, b'undo.narrowspec')
2467 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2465 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2468 self.dirstate.restorebackup(None, b'undo.dirstate')
2466 self.dirstate.restorebackup(None, b'undo.dirstate')
2469 try:
2467 try:
2470 branch = self.vfs.read(b'undo.branch')
2468 branch = self.vfs.read(b'undo.branch')
2471 self.dirstate.setbranch(encoding.tolocal(branch))
2469 self.dirstate.setbranch(encoding.tolocal(branch))
2472 except IOError:
2470 except IOError:
2473 ui.warn(
2471 ui.warn(
2474 _(
2472 _(
2475 b'named branch could not be reset: '
2473 b'named branch could not be reset: '
2476 b'current branch is still \'%s\'\n'
2474 b'current branch is still \'%s\'\n'
2477 )
2475 )
2478 % self.dirstate.branch()
2476 % self.dirstate.branch()
2479 )
2477 )
2480
2478
2481 parents = tuple([p.rev() for p in self[None].parents()])
2479 parents = tuple([p.rev() for p in self[None].parents()])
2482 if len(parents) > 1:
2480 if len(parents) > 1:
2483 ui.status(
2481 ui.status(
2484 _(
2482 _(
2485 b'working directory now based on '
2483 b'working directory now based on '
2486 b'revisions %d and %d\n'
2484 b'revisions %d and %d\n'
2487 )
2485 )
2488 % parents
2486 % parents
2489 )
2487 )
2490 else:
2488 else:
2491 ui.status(
2489 ui.status(
2492 _(b'working directory now based on revision %d\n') % parents
2490 _(b'working directory now based on revision %d\n') % parents
2493 )
2491 )
2494 mergestatemod.mergestate.clean(self, self[b'.'].node())
2492 mergestatemod.mergestate.clean(self, self[b'.'].node())
2495
2493
2496 # TODO: if we know which new heads may result from this rollback, pass
2494 # TODO: if we know which new heads may result from this rollback, pass
2497 # them to destroy(), which will prevent the branchhead cache from being
2495 # them to destroy(), which will prevent the branchhead cache from being
2498 # invalidated.
2496 # invalidated.
2499 self.destroyed()
2497 self.destroyed()
2500 return 0
2498 return 0
2501
2499
2502 def _buildcacheupdater(self, newtransaction):
2500 def _buildcacheupdater(self, newtransaction):
2503 """called during transaction to build the callback updating cache
2501 """called during transaction to build the callback updating cache
2504
2502
2505 Lives on the repository to help extension who might want to augment
2503 Lives on the repository to help extension who might want to augment
2506 this logic. For this purpose, the created transaction is passed to the
2504 this logic. For this purpose, the created transaction is passed to the
2507 method.
2505 method.
2508 """
2506 """
2509 # we must avoid cyclic reference between repo and transaction.
2507 # we must avoid cyclic reference between repo and transaction.
2510 reporef = weakref.ref(self)
2508 reporef = weakref.ref(self)
2511
2509
2512 def updater(tr):
2510 def updater(tr):
2513 repo = reporef()
2511 repo = reporef()
2514 repo.updatecaches(tr)
2512 repo.updatecaches(tr)
2515
2513
2516 return updater
2514 return updater
2517
2515
2518 @unfilteredmethod
2516 @unfilteredmethod
2519 def updatecaches(self, tr=None, full=False):
2517 def updatecaches(self, tr=None, full=False):
2520 """warm appropriate caches
2518 """warm appropriate caches
2521
2519
2522 If this function is called after a transaction closed. The transaction
2520 If this function is called after a transaction closed. The transaction
2523 will be available in the 'tr' argument. This can be used to selectively
2521 will be available in the 'tr' argument. This can be used to selectively
2524 update caches relevant to the changes in that transaction.
2522 update caches relevant to the changes in that transaction.
2525
2523
2526 If 'full' is set, make sure all caches the function knows about have
2524 If 'full' is set, make sure all caches the function knows about have
2527 up-to-date data. Even the ones usually loaded more lazily.
2525 up-to-date data. Even the ones usually loaded more lazily.
2528 """
2526 """
2529 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2527 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2530 # During strip, many caches are invalid but
2528 # During strip, many caches are invalid but
2531 # later call to `destroyed` will refresh them.
2529 # later call to `destroyed` will refresh them.
2532 return
2530 return
2533
2531
2534 if tr is None or tr.changes[b'origrepolen'] < len(self):
2532 if tr is None or tr.changes[b'origrepolen'] < len(self):
2535 # accessing the 'ser ved' branchmap should refresh all the others,
2533 # accessing the 'ser ved' branchmap should refresh all the others,
2536 self.ui.debug(b'updating the branch cache\n')
2534 self.ui.debug(b'updating the branch cache\n')
2537 self.filtered(b'served').branchmap()
2535 self.filtered(b'served').branchmap()
2538 self.filtered(b'served.hidden').branchmap()
2536 self.filtered(b'served.hidden').branchmap()
2539
2537
2540 if full:
2538 if full:
2541 unfi = self.unfiltered()
2539 unfi = self.unfiltered()
2542
2540
2543 self.changelog.update_caches(transaction=tr)
2541 self.changelog.update_caches(transaction=tr)
2544 self.manifestlog.update_caches(transaction=tr)
2542 self.manifestlog.update_caches(transaction=tr)
2545
2543
2546 rbc = unfi.revbranchcache()
2544 rbc = unfi.revbranchcache()
2547 for r in unfi.changelog:
2545 for r in unfi.changelog:
2548 rbc.branchinfo(r)
2546 rbc.branchinfo(r)
2549 rbc.write()
2547 rbc.write()
2550
2548
2551 # ensure the working copy parents are in the manifestfulltextcache
2549 # ensure the working copy parents are in the manifestfulltextcache
2552 for ctx in self[b'.'].parents():
2550 for ctx in self[b'.'].parents():
2553 ctx.manifest() # accessing the manifest is enough
2551 ctx.manifest() # accessing the manifest is enough
2554
2552
2555 # accessing fnode cache warms the cache
2553 # accessing fnode cache warms the cache
2556 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2554 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2557 # accessing tags warm the cache
2555 # accessing tags warm the cache
2558 self.tags()
2556 self.tags()
2559 self.filtered(b'served').tags()
2557 self.filtered(b'served').tags()
2560
2558
2561 # The `full` arg is documented as updating even the lazily-loaded
2559 # The `full` arg is documented as updating even the lazily-loaded
2562 # caches immediately, so we're forcing a write to cause these caches
2560 # caches immediately, so we're forcing a write to cause these caches
2563 # to be warmed up even if they haven't explicitly been requested
2561 # to be warmed up even if they haven't explicitly been requested
2564 # yet (if they've never been used by hg, they won't ever have been
2562 # yet (if they've never been used by hg, they won't ever have been
2565 # written, even if they're a subset of another kind of cache that
2563 # written, even if they're a subset of another kind of cache that
2566 # *has* been used).
2564 # *has* been used).
2567 for filt in repoview.filtertable.keys():
2565 for filt in repoview.filtertable.keys():
2568 filtered = self.filtered(filt)
2566 filtered = self.filtered(filt)
2569 filtered.branchmap().write(filtered)
2567 filtered.branchmap().write(filtered)
2570
2568
2571 def invalidatecaches(self):
2569 def invalidatecaches(self):
2572
2570
2573 if '_tagscache' in vars(self):
2571 if '_tagscache' in vars(self):
2574 # can't use delattr on proxy
2572 # can't use delattr on proxy
2575 del self.__dict__['_tagscache']
2573 del self.__dict__['_tagscache']
2576
2574
2577 self._branchcaches.clear()
2575 self._branchcaches.clear()
2578 self.invalidatevolatilesets()
2576 self.invalidatevolatilesets()
2579 self._sparsesignaturecache.clear()
2577 self._sparsesignaturecache.clear()
2580
2578
2581 def invalidatevolatilesets(self):
2579 def invalidatevolatilesets(self):
2582 self.filteredrevcache.clear()
2580 self.filteredrevcache.clear()
2583 obsolete.clearobscaches(self)
2581 obsolete.clearobscaches(self)
2584 self._quick_access_changeid_invalidate()
2582 self._quick_access_changeid_invalidate()
2585
2583
2586 def invalidatedirstate(self):
2584 def invalidatedirstate(self):
2587 '''Invalidates the dirstate, causing the next call to dirstate
2585 '''Invalidates the dirstate, causing the next call to dirstate
2588 to check if it was modified since the last time it was read,
2586 to check if it was modified since the last time it was read,
2589 rereading it if it has.
2587 rereading it if it has.
2590
2588
2591 This is different to dirstate.invalidate() that it doesn't always
2589 This is different to dirstate.invalidate() that it doesn't always
2592 rereads the dirstate. Use dirstate.invalidate() if you want to
2590 rereads the dirstate. Use dirstate.invalidate() if you want to
2593 explicitly read the dirstate again (i.e. restoring it to a previous
2591 explicitly read the dirstate again (i.e. restoring it to a previous
2594 known good state).'''
2592 known good state).'''
2595 if hasunfilteredcache(self, 'dirstate'):
2593 if hasunfilteredcache(self, 'dirstate'):
2596 for k in self.dirstate._filecache:
2594 for k in self.dirstate._filecache:
2597 try:
2595 try:
2598 delattr(self.dirstate, k)
2596 delattr(self.dirstate, k)
2599 except AttributeError:
2597 except AttributeError:
2600 pass
2598 pass
2601 delattr(self.unfiltered(), 'dirstate')
2599 delattr(self.unfiltered(), 'dirstate')
2602
2600
2603 def invalidate(self, clearfilecache=False):
2601 def invalidate(self, clearfilecache=False):
2604 '''Invalidates both store and non-store parts other than dirstate
2602 '''Invalidates both store and non-store parts other than dirstate
2605
2603
2606 If a transaction is running, invalidation of store is omitted,
2604 If a transaction is running, invalidation of store is omitted,
2607 because discarding in-memory changes might cause inconsistency
2605 because discarding in-memory changes might cause inconsistency
2608 (e.g. incomplete fncache causes unintentional failure, but
2606 (e.g. incomplete fncache causes unintentional failure, but
2609 redundant one doesn't).
2607 redundant one doesn't).
2610 '''
2608 '''
2611 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2609 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2612 for k in list(self._filecache.keys()):
2610 for k in list(self._filecache.keys()):
2613 # dirstate is invalidated separately in invalidatedirstate()
2611 # dirstate is invalidated separately in invalidatedirstate()
2614 if k == b'dirstate':
2612 if k == b'dirstate':
2615 continue
2613 continue
2616 if (
2614 if (
2617 k == b'changelog'
2615 k == b'changelog'
2618 and self.currenttransaction()
2616 and self.currenttransaction()
2619 and self.changelog._delayed
2617 and self.changelog._delayed
2620 ):
2618 ):
2621 # The changelog object may store unwritten revisions. We don't
2619 # The changelog object may store unwritten revisions. We don't
2622 # want to lose them.
2620 # want to lose them.
2623 # TODO: Solve the problem instead of working around it.
2621 # TODO: Solve the problem instead of working around it.
2624 continue
2622 continue
2625
2623
2626 if clearfilecache:
2624 if clearfilecache:
2627 del self._filecache[k]
2625 del self._filecache[k]
2628 try:
2626 try:
2629 delattr(unfiltered, k)
2627 delattr(unfiltered, k)
2630 except AttributeError:
2628 except AttributeError:
2631 pass
2629 pass
2632 self.invalidatecaches()
2630 self.invalidatecaches()
2633 if not self.currenttransaction():
2631 if not self.currenttransaction():
2634 # TODO: Changing contents of store outside transaction
2632 # TODO: Changing contents of store outside transaction
2635 # causes inconsistency. We should make in-memory store
2633 # causes inconsistency. We should make in-memory store
2636 # changes detectable, and abort if changed.
2634 # changes detectable, and abort if changed.
2637 self.store.invalidatecaches()
2635 self.store.invalidatecaches()
2638
2636
2639 def invalidateall(self):
2637 def invalidateall(self):
2640 '''Fully invalidates both store and non-store parts, causing the
2638 '''Fully invalidates both store and non-store parts, causing the
2641 subsequent operation to reread any outside changes.'''
2639 subsequent operation to reread any outside changes.'''
2642 # extension should hook this to invalidate its caches
2640 # extension should hook this to invalidate its caches
2643 self.invalidate()
2641 self.invalidate()
2644 self.invalidatedirstate()
2642 self.invalidatedirstate()
2645
2643
2646 @unfilteredmethod
2644 @unfilteredmethod
2647 def _refreshfilecachestats(self, tr):
2645 def _refreshfilecachestats(self, tr):
2648 """Reload stats of cached files so that they are flagged as valid"""
2646 """Reload stats of cached files so that they are flagged as valid"""
2649 for k, ce in self._filecache.items():
2647 for k, ce in self._filecache.items():
2650 k = pycompat.sysstr(k)
2648 k = pycompat.sysstr(k)
2651 if k == 'dirstate' or k not in self.__dict__:
2649 if k == 'dirstate' or k not in self.__dict__:
2652 continue
2650 continue
2653 ce.refresh()
2651 ce.refresh()
2654
2652
2655 def _lock(
2653 def _lock(
2656 self,
2654 self,
2657 vfs,
2655 vfs,
2658 lockname,
2656 lockname,
2659 wait,
2657 wait,
2660 releasefn,
2658 releasefn,
2661 acquirefn,
2659 acquirefn,
2662 desc,
2660 desc,
2663 inheritchecker=None,
2661 inheritchecker=None,
2664 parentenvvar=None,
2662 parentenvvar=None,
2665 ):
2663 ):
2666 parentlock = None
2664 parentlock = None
2667 # the contents of parentenvvar are used by the underlying lock to
2665 # the contents of parentenvvar are used by the underlying lock to
2668 # determine whether it can be inherited
2666 # determine whether it can be inherited
2669 if parentenvvar is not None:
2667 if parentenvvar is not None:
2670 parentlock = encoding.environ.get(parentenvvar)
2668 parentlock = encoding.environ.get(parentenvvar)
2671
2669
2672 timeout = 0
2670 timeout = 0
2673 warntimeout = 0
2671 warntimeout = 0
2674 if wait:
2672 if wait:
2675 timeout = self.ui.configint(b"ui", b"timeout")
2673 timeout = self.ui.configint(b"ui", b"timeout")
2676 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2674 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2677 # internal config: ui.signal-safe-lock
2675 # internal config: ui.signal-safe-lock
2678 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2676 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2679
2677
2680 l = lockmod.trylock(
2678 l = lockmod.trylock(
2681 self.ui,
2679 self.ui,
2682 vfs,
2680 vfs,
2683 lockname,
2681 lockname,
2684 timeout,
2682 timeout,
2685 warntimeout,
2683 warntimeout,
2686 releasefn=releasefn,
2684 releasefn=releasefn,
2687 acquirefn=acquirefn,
2685 acquirefn=acquirefn,
2688 desc=desc,
2686 desc=desc,
2689 inheritchecker=inheritchecker,
2687 inheritchecker=inheritchecker,
2690 parentlock=parentlock,
2688 parentlock=parentlock,
2691 signalsafe=signalsafe,
2689 signalsafe=signalsafe,
2692 )
2690 )
2693 return l
2691 return l
2694
2692
2695 def _afterlock(self, callback):
2693 def _afterlock(self, callback):
2696 """add a callback to be run when the repository is fully unlocked
2694 """add a callback to be run when the repository is fully unlocked
2697
2695
2698 The callback will be executed when the outermost lock is released
2696 The callback will be executed when the outermost lock is released
2699 (with wlock being higher level than 'lock')."""
2697 (with wlock being higher level than 'lock')."""
2700 for ref in (self._wlockref, self._lockref):
2698 for ref in (self._wlockref, self._lockref):
2701 l = ref and ref()
2699 l = ref and ref()
2702 if l and l.held:
2700 if l and l.held:
2703 l.postrelease.append(callback)
2701 l.postrelease.append(callback)
2704 break
2702 break
2705 else: # no lock have been found.
2703 else: # no lock have been found.
2706 callback(True)
2704 callback(True)
2707
2705
2708 def lock(self, wait=True):
2706 def lock(self, wait=True):
2709 '''Lock the repository store (.hg/store) and return a weak reference
2707 '''Lock the repository store (.hg/store) and return a weak reference
2710 to the lock. Use this before modifying the store (e.g. committing or
2708 to the lock. Use this before modifying the store (e.g. committing or
2711 stripping). If you are opening a transaction, get a lock as well.)
2709 stripping). If you are opening a transaction, get a lock as well.)
2712
2710
2713 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2711 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2714 'wlock' first to avoid a dead-lock hazard.'''
2712 'wlock' first to avoid a dead-lock hazard.'''
2715 l = self._currentlock(self._lockref)
2713 l = self._currentlock(self._lockref)
2716 if l is not None:
2714 if l is not None:
2717 l.lock()
2715 l.lock()
2718 return l
2716 return l
2719
2717
2720 l = self._lock(
2718 l = self._lock(
2721 vfs=self.svfs,
2719 vfs=self.svfs,
2722 lockname=b"lock",
2720 lockname=b"lock",
2723 wait=wait,
2721 wait=wait,
2724 releasefn=None,
2722 releasefn=None,
2725 acquirefn=self.invalidate,
2723 acquirefn=self.invalidate,
2726 desc=_(b'repository %s') % self.origroot,
2724 desc=_(b'repository %s') % self.origroot,
2727 )
2725 )
2728 self._lockref = weakref.ref(l)
2726 self._lockref = weakref.ref(l)
2729 return l
2727 return l
2730
2728
2731 def _wlockchecktransaction(self):
2729 def _wlockchecktransaction(self):
2732 if self.currenttransaction() is not None:
2730 if self.currenttransaction() is not None:
2733 raise error.LockInheritanceContractViolation(
2731 raise error.LockInheritanceContractViolation(
2734 b'wlock cannot be inherited in the middle of a transaction'
2732 b'wlock cannot be inherited in the middle of a transaction'
2735 )
2733 )
2736
2734
2737 def wlock(self, wait=True):
2735 def wlock(self, wait=True):
2738 '''Lock the non-store parts of the repository (everything under
2736 '''Lock the non-store parts of the repository (everything under
2739 .hg except .hg/store) and return a weak reference to the lock.
2737 .hg except .hg/store) and return a weak reference to the lock.
2740
2738
2741 Use this before modifying files in .hg.
2739 Use this before modifying files in .hg.
2742
2740
2743 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2741 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2744 'wlock' first to avoid a dead-lock hazard.'''
2742 'wlock' first to avoid a dead-lock hazard.'''
2745 l = self._wlockref and self._wlockref()
2743 l = self._wlockref and self._wlockref()
2746 if l is not None and l.held:
2744 if l is not None and l.held:
2747 l.lock()
2745 l.lock()
2748 return l
2746 return l
2749
2747
2750 # We do not need to check for non-waiting lock acquisition. Such
2748 # We do not need to check for non-waiting lock acquisition. Such
2751 # acquisition would not cause dead-lock as they would just fail.
2749 # acquisition would not cause dead-lock as they would just fail.
2752 if wait and (
2750 if wait and (
2753 self.ui.configbool(b'devel', b'all-warnings')
2751 self.ui.configbool(b'devel', b'all-warnings')
2754 or self.ui.configbool(b'devel', b'check-locks')
2752 or self.ui.configbool(b'devel', b'check-locks')
2755 ):
2753 ):
2756 if self._currentlock(self._lockref) is not None:
2754 if self._currentlock(self._lockref) is not None:
2757 self.ui.develwarn(b'"wlock" acquired after "lock"')
2755 self.ui.develwarn(b'"wlock" acquired after "lock"')
2758
2756
2759 def unlock():
2757 def unlock():
2760 if self.dirstate.pendingparentchange():
2758 if self.dirstate.pendingparentchange():
2761 self.dirstate.invalidate()
2759 self.dirstate.invalidate()
2762 else:
2760 else:
2763 self.dirstate.write(None)
2761 self.dirstate.write(None)
2764
2762
2765 self._filecache[b'dirstate'].refresh()
2763 self._filecache[b'dirstate'].refresh()
2766
2764
2767 l = self._lock(
2765 l = self._lock(
2768 self.vfs,
2766 self.vfs,
2769 b"wlock",
2767 b"wlock",
2770 wait,
2768 wait,
2771 unlock,
2769 unlock,
2772 self.invalidatedirstate,
2770 self.invalidatedirstate,
2773 _(b'working directory of %s') % self.origroot,
2771 _(b'working directory of %s') % self.origroot,
2774 inheritchecker=self._wlockchecktransaction,
2772 inheritchecker=self._wlockchecktransaction,
2775 parentenvvar=b'HG_WLOCK_LOCKER',
2773 parentenvvar=b'HG_WLOCK_LOCKER',
2776 )
2774 )
2777 self._wlockref = weakref.ref(l)
2775 self._wlockref = weakref.ref(l)
2778 return l
2776 return l
2779
2777
2780 def _currentlock(self, lockref):
2778 def _currentlock(self, lockref):
2781 """Returns the lock if it's held, or None if it's not."""
2779 """Returns the lock if it's held, or None if it's not."""
2782 if lockref is None:
2780 if lockref is None:
2783 return None
2781 return None
2784 l = lockref()
2782 l = lockref()
2785 if l is None or not l.held:
2783 if l is None or not l.held:
2786 return None
2784 return None
2787 return l
2785 return l
2788
2786
2789 def currentwlock(self):
2787 def currentwlock(self):
2790 """Returns the wlock if it's held, or None if it's not."""
2788 """Returns the wlock if it's held, or None if it's not."""
2791 return self._currentlock(self._wlockref)
2789 return self._currentlock(self._wlockref)
2792
2790
2793 def checkcommitpatterns(self, wctx, match, status, fail):
2791 def checkcommitpatterns(self, wctx, match, status, fail):
2794 """check for commit arguments that aren't committable"""
2792 """check for commit arguments that aren't committable"""
2795 if match.isexact() or match.prefix():
2793 if match.isexact() or match.prefix():
2796 matched = set(status.modified + status.added + status.removed)
2794 matched = set(status.modified + status.added + status.removed)
2797
2795
2798 for f in match.files():
2796 for f in match.files():
2799 f = self.dirstate.normalize(f)
2797 f = self.dirstate.normalize(f)
2800 if f == b'.' or f in matched or f in wctx.substate:
2798 if f == b'.' or f in matched or f in wctx.substate:
2801 continue
2799 continue
2802 if f in status.deleted:
2800 if f in status.deleted:
2803 fail(f, _(b'file not found!'))
2801 fail(f, _(b'file not found!'))
2804 # Is it a directory that exists or used to exist?
2802 # Is it a directory that exists or used to exist?
2805 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2803 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2806 d = f + b'/'
2804 d = f + b'/'
2807 for mf in matched:
2805 for mf in matched:
2808 if mf.startswith(d):
2806 if mf.startswith(d):
2809 break
2807 break
2810 else:
2808 else:
2811 fail(f, _(b"no match under directory!"))
2809 fail(f, _(b"no match under directory!"))
2812 elif f not in self.dirstate:
2810 elif f not in self.dirstate:
2813 fail(f, _(b"file not tracked!"))
2811 fail(f, _(b"file not tracked!"))
2814
2812
2815 @unfilteredmethod
2813 @unfilteredmethod
2816 def commit(
2814 def commit(
2817 self,
2815 self,
2818 text=b"",
2816 text=b"",
2819 user=None,
2817 user=None,
2820 date=None,
2818 date=None,
2821 match=None,
2819 match=None,
2822 force=False,
2820 force=False,
2823 editor=None,
2821 editor=None,
2824 extra=None,
2822 extra=None,
2825 ):
2823 ):
2826 """Add a new revision to current repository.
2824 """Add a new revision to current repository.
2827
2825
2828 Revision information is gathered from the working directory,
2826 Revision information is gathered from the working directory,
2829 match can be used to filter the committed files. If editor is
2827 match can be used to filter the committed files. If editor is
2830 supplied, it is called to get a commit message.
2828 supplied, it is called to get a commit message.
2831 """
2829 """
2832 if extra is None:
2830 if extra is None:
2833 extra = {}
2831 extra = {}
2834
2832
2835 def fail(f, msg):
2833 def fail(f, msg):
2836 raise error.Abort(b'%s: %s' % (f, msg))
2834 raise error.Abort(b'%s: %s' % (f, msg))
2837
2835
2838 if not match:
2836 if not match:
2839 match = matchmod.always()
2837 match = matchmod.always()
2840
2838
2841 if not force:
2839 if not force:
2842 match.bad = fail
2840 match.bad = fail
2843
2841
2844 # lock() for recent changelog (see issue4368)
2842 # lock() for recent changelog (see issue4368)
2845 with self.wlock(), self.lock():
2843 with self.wlock(), self.lock():
2846 wctx = self[None]
2844 wctx = self[None]
2847 merge = len(wctx.parents()) > 1
2845 merge = len(wctx.parents()) > 1
2848
2846
2849 if not force and merge and not match.always():
2847 if not force and merge and not match.always():
2850 raise error.Abort(
2848 raise error.Abort(
2851 _(
2849 _(
2852 b'cannot partially commit a merge '
2850 b'cannot partially commit a merge '
2853 b'(do not specify files or patterns)'
2851 b'(do not specify files or patterns)'
2854 )
2852 )
2855 )
2853 )
2856
2854
2857 status = self.status(match=match, clean=force)
2855 status = self.status(match=match, clean=force)
2858 if force:
2856 if force:
2859 status.modified.extend(
2857 status.modified.extend(
2860 status.clean
2858 status.clean
2861 ) # mq may commit clean files
2859 ) # mq may commit clean files
2862
2860
2863 # check subrepos
2861 # check subrepos
2864 subs, commitsubs, newstate = subrepoutil.precommit(
2862 subs, commitsubs, newstate = subrepoutil.precommit(
2865 self.ui, wctx, status, match, force=force
2863 self.ui, wctx, status, match, force=force
2866 )
2864 )
2867
2865
2868 # make sure all explicit patterns are matched
2866 # make sure all explicit patterns are matched
2869 if not force:
2867 if not force:
2870 self.checkcommitpatterns(wctx, match, status, fail)
2868 self.checkcommitpatterns(wctx, match, status, fail)
2871
2869
2872 cctx = context.workingcommitctx(
2870 cctx = context.workingcommitctx(
2873 self, status, text, user, date, extra
2871 self, status, text, user, date, extra
2874 )
2872 )
2875
2873
2876 ms = mergestatemod.mergestate.read(self)
2874 ms = mergestatemod.mergestate.read(self)
2877 mergeutil.checkunresolved(ms)
2875 mergeutil.checkunresolved(ms)
2878
2876
2879 # internal config: ui.allowemptycommit
2877 # internal config: ui.allowemptycommit
2880 if cctx.isempty() and not self.ui.configbool(
2878 if cctx.isempty() and not self.ui.configbool(
2881 b'ui', b'allowemptycommit'
2879 b'ui', b'allowemptycommit'
2882 ):
2880 ):
2883 self.ui.debug(b'nothing to commit, clearing merge state\n')
2881 self.ui.debug(b'nothing to commit, clearing merge state\n')
2884 ms.reset()
2882 ms.reset()
2885 return None
2883 return None
2886
2884
2887 if merge and cctx.deleted():
2885 if merge and cctx.deleted():
2888 raise error.Abort(_(b"cannot commit merge with missing files"))
2886 raise error.Abort(_(b"cannot commit merge with missing files"))
2889
2887
2890 if editor:
2888 if editor:
2891 cctx._text = editor(self, cctx, subs)
2889 cctx._text = editor(self, cctx, subs)
2892 edited = text != cctx._text
2890 edited = text != cctx._text
2893
2891
2894 # Save commit message in case this transaction gets rolled back
2892 # Save commit message in case this transaction gets rolled back
2895 # (e.g. by a pretxncommit hook). Leave the content alone on
2893 # (e.g. by a pretxncommit hook). Leave the content alone on
2896 # the assumption that the user will use the same editor again.
2894 # the assumption that the user will use the same editor again.
2897 msgfn = self.savecommitmessage(cctx._text)
2895 msgfn = self.savecommitmessage(cctx._text)
2898
2896
2899 # commit subs and write new state
2897 # commit subs and write new state
2900 if subs:
2898 if subs:
2901 uipathfn = scmutil.getuipathfn(self)
2899 uipathfn = scmutil.getuipathfn(self)
2902 for s in sorted(commitsubs):
2900 for s in sorted(commitsubs):
2903 sub = wctx.sub(s)
2901 sub = wctx.sub(s)
2904 self.ui.status(
2902 self.ui.status(
2905 _(b'committing subrepository %s\n')
2903 _(b'committing subrepository %s\n')
2906 % uipathfn(subrepoutil.subrelpath(sub))
2904 % uipathfn(subrepoutil.subrelpath(sub))
2907 )
2905 )
2908 sr = sub.commit(cctx._text, user, date)
2906 sr = sub.commit(cctx._text, user, date)
2909 newstate[s] = (newstate[s][0], sr)
2907 newstate[s] = (newstate[s][0], sr)
2910 subrepoutil.writestate(self, newstate)
2908 subrepoutil.writestate(self, newstate)
2911
2909
2912 p1, p2 = self.dirstate.parents()
2910 p1, p2 = self.dirstate.parents()
2913 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2911 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2914 try:
2912 try:
2915 self.hook(
2913 self.hook(
2916 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2914 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2917 )
2915 )
2918 with self.transaction(b'commit'):
2916 with self.transaction(b'commit'):
2919 ret = self.commitctx(cctx, True)
2917 ret = self.commitctx(cctx, True)
2920 # update bookmarks, dirstate and mergestate
2918 # update bookmarks, dirstate and mergestate
2921 bookmarks.update(self, [p1, p2], ret)
2919 bookmarks.update(self, [p1, p2], ret)
2922 cctx.markcommitted(ret)
2920 cctx.markcommitted(ret)
2923 ms.reset()
2921 ms.reset()
2924 except: # re-raises
2922 except: # re-raises
2925 if edited:
2923 if edited:
2926 self.ui.write(
2924 self.ui.write(
2927 _(b'note: commit message saved in %s\n') % msgfn
2925 _(b'note: commit message saved in %s\n') % msgfn
2928 )
2926 )
2929 self.ui.write(
2927 self.ui.write(
2930 _(
2928 _(
2931 b"note: use 'hg commit --logfile "
2929 b"note: use 'hg commit --logfile "
2932 b".hg/last-message.txt --edit' to reuse it\n"
2930 b".hg/last-message.txt --edit' to reuse it\n"
2933 )
2931 )
2934 )
2932 )
2935 raise
2933 raise
2936
2934
2937 def commithook(unused_success):
2935 def commithook(unused_success):
2938 # hack for command that use a temporary commit (eg: histedit)
2936 # hack for command that use a temporary commit (eg: histedit)
2939 # temporary commit got stripped before hook release
2937 # temporary commit got stripped before hook release
2940 if self.changelog.hasnode(ret):
2938 if self.changelog.hasnode(ret):
2941 self.hook(
2939 self.hook(
2942 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2940 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2943 )
2941 )
2944
2942
2945 self._afterlock(commithook)
2943 self._afterlock(commithook)
2946 return ret
2944 return ret
2947
2945
2948 @unfilteredmethod
2946 @unfilteredmethod
2949 def commitctx(self, ctx, error=False, origctx=None):
2947 def commitctx(self, ctx, error=False, origctx=None):
2950 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2948 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2951
2949
2952 @unfilteredmethod
2950 @unfilteredmethod
2953 def destroying(self):
2951 def destroying(self):
2954 '''Inform the repository that nodes are about to be destroyed.
2952 '''Inform the repository that nodes are about to be destroyed.
2955 Intended for use by strip and rollback, so there's a common
2953 Intended for use by strip and rollback, so there's a common
2956 place for anything that has to be done before destroying history.
2954 place for anything that has to be done before destroying history.
2957
2955
2958 This is mostly useful for saving state that is in memory and waiting
2956 This is mostly useful for saving state that is in memory and waiting
2959 to be flushed when the current lock is released. Because a call to
2957 to be flushed when the current lock is released. Because a call to
2960 destroyed is imminent, the repo will be invalidated causing those
2958 destroyed is imminent, the repo will be invalidated causing those
2961 changes to stay in memory (waiting for the next unlock), or vanish
2959 changes to stay in memory (waiting for the next unlock), or vanish
2962 completely.
2960 completely.
2963 '''
2961 '''
2964 # When using the same lock to commit and strip, the phasecache is left
2962 # When using the same lock to commit and strip, the phasecache is left
2965 # dirty after committing. Then when we strip, the repo is invalidated,
2963 # dirty after committing. Then when we strip, the repo is invalidated,
2966 # causing those changes to disappear.
2964 # causing those changes to disappear.
2967 if '_phasecache' in vars(self):
2965 if '_phasecache' in vars(self):
2968 self._phasecache.write()
2966 self._phasecache.write()
2969
2967
2970 @unfilteredmethod
2968 @unfilteredmethod
2971 def destroyed(self):
2969 def destroyed(self):
2972 '''Inform the repository that nodes have been destroyed.
2970 '''Inform the repository that nodes have been destroyed.
2973 Intended for use by strip and rollback, so there's a common
2971 Intended for use by strip and rollback, so there's a common
2974 place for anything that has to be done after destroying history.
2972 place for anything that has to be done after destroying history.
2975 '''
2973 '''
2976 # When one tries to:
2974 # When one tries to:
2977 # 1) destroy nodes thus calling this method (e.g. strip)
2975 # 1) destroy nodes thus calling this method (e.g. strip)
2978 # 2) use phasecache somewhere (e.g. commit)
2976 # 2) use phasecache somewhere (e.g. commit)
2979 #
2977 #
2980 # then 2) will fail because the phasecache contains nodes that were
2978 # then 2) will fail because the phasecache contains nodes that were
2981 # removed. We can either remove phasecache from the filecache,
2979 # removed. We can either remove phasecache from the filecache,
2982 # causing it to reload next time it is accessed, or simply filter
2980 # causing it to reload next time it is accessed, or simply filter
2983 # the removed nodes now and write the updated cache.
2981 # the removed nodes now and write the updated cache.
2984 self._phasecache.filterunknown(self)
2982 self._phasecache.filterunknown(self)
2985 self._phasecache.write()
2983 self._phasecache.write()
2986
2984
2987 # refresh all repository caches
2985 # refresh all repository caches
2988 self.updatecaches()
2986 self.updatecaches()
2989
2987
2990 # Ensure the persistent tag cache is updated. Doing it now
2988 # Ensure the persistent tag cache is updated. Doing it now
2991 # means that the tag cache only has to worry about destroyed
2989 # means that the tag cache only has to worry about destroyed
2992 # heads immediately after a strip/rollback. That in turn
2990 # heads immediately after a strip/rollback. That in turn
2993 # guarantees that "cachetip == currenttip" (comparing both rev
2991 # guarantees that "cachetip == currenttip" (comparing both rev
2994 # and node) always means no nodes have been added or destroyed.
2992 # and node) always means no nodes have been added or destroyed.
2995
2993
2996 # XXX this is suboptimal when qrefresh'ing: we strip the current
2994 # XXX this is suboptimal when qrefresh'ing: we strip the current
2997 # head, refresh the tag cache, then immediately add a new head.
2995 # head, refresh the tag cache, then immediately add a new head.
2998 # But I think doing it this way is necessary for the "instant
2996 # But I think doing it this way is necessary for the "instant
2999 # tag cache retrieval" case to work.
2997 # tag cache retrieval" case to work.
3000 self.invalidate()
2998 self.invalidate()
3001
2999
3002 def status(
3000 def status(
3003 self,
3001 self,
3004 node1=b'.',
3002 node1=b'.',
3005 node2=None,
3003 node2=None,
3006 match=None,
3004 match=None,
3007 ignored=False,
3005 ignored=False,
3008 clean=False,
3006 clean=False,
3009 unknown=False,
3007 unknown=False,
3010 listsubrepos=False,
3008 listsubrepos=False,
3011 ):
3009 ):
3012 '''a convenience method that calls node1.status(node2)'''
3010 '''a convenience method that calls node1.status(node2)'''
3013 return self[node1].status(
3011 return self[node1].status(
3014 node2, match, ignored, clean, unknown, listsubrepos
3012 node2, match, ignored, clean, unknown, listsubrepos
3015 )
3013 )
3016
3014
3017 def addpostdsstatus(self, ps):
3015 def addpostdsstatus(self, ps):
3018 """Add a callback to run within the wlock, at the point at which status
3016 """Add a callback to run within the wlock, at the point at which status
3019 fixups happen.
3017 fixups happen.
3020
3018
3021 On status completion, callback(wctx, status) will be called with the
3019 On status completion, callback(wctx, status) will be called with the
3022 wlock held, unless the dirstate has changed from underneath or the wlock
3020 wlock held, unless the dirstate has changed from underneath or the wlock
3023 couldn't be grabbed.
3021 couldn't be grabbed.
3024
3022
3025 Callbacks should not capture and use a cached copy of the dirstate --
3023 Callbacks should not capture and use a cached copy of the dirstate --
3026 it might change in the meanwhile. Instead, they should access the
3024 it might change in the meanwhile. Instead, they should access the
3027 dirstate via wctx.repo().dirstate.
3025 dirstate via wctx.repo().dirstate.
3028
3026
3029 This list is emptied out after each status run -- extensions should
3027 This list is emptied out after each status run -- extensions should
3030 make sure it adds to this list each time dirstate.status is called.
3028 make sure it adds to this list each time dirstate.status is called.
3031 Extensions should also make sure they don't call this for statuses
3029 Extensions should also make sure they don't call this for statuses
3032 that don't involve the dirstate.
3030 that don't involve the dirstate.
3033 """
3031 """
3034
3032
3035 # The list is located here for uniqueness reasons -- it is actually
3033 # The list is located here for uniqueness reasons -- it is actually
3036 # managed by the workingctx, but that isn't unique per-repo.
3034 # managed by the workingctx, but that isn't unique per-repo.
3037 self._postdsstatus.append(ps)
3035 self._postdsstatus.append(ps)
3038
3036
3039 def postdsstatus(self):
3037 def postdsstatus(self):
3040 """Used by workingctx to get the list of post-dirstate-status hooks."""
3038 """Used by workingctx to get the list of post-dirstate-status hooks."""
3041 return self._postdsstatus
3039 return self._postdsstatus
3042
3040
3043 def clearpostdsstatus(self):
3041 def clearpostdsstatus(self):
3044 """Used by workingctx to clear post-dirstate-status hooks."""
3042 """Used by workingctx to clear post-dirstate-status hooks."""
3045 del self._postdsstatus[:]
3043 del self._postdsstatus[:]
3046
3044
3047 def heads(self, start=None):
3045 def heads(self, start=None):
3048 if start is None:
3046 if start is None:
3049 cl = self.changelog
3047 cl = self.changelog
3050 headrevs = reversed(cl.headrevs())
3048 headrevs = reversed(cl.headrevs())
3051 return [cl.node(rev) for rev in headrevs]
3049 return [cl.node(rev) for rev in headrevs]
3052
3050
3053 heads = self.changelog.heads(start)
3051 heads = self.changelog.heads(start)
3054 # sort the output in rev descending order
3052 # sort the output in rev descending order
3055 return sorted(heads, key=self.changelog.rev, reverse=True)
3053 return sorted(heads, key=self.changelog.rev, reverse=True)
3056
3054
3057 def branchheads(self, branch=None, start=None, closed=False):
3055 def branchheads(self, branch=None, start=None, closed=False):
3058 '''return a (possibly filtered) list of heads for the given branch
3056 '''return a (possibly filtered) list of heads for the given branch
3059
3057
3060 Heads are returned in topological order, from newest to oldest.
3058 Heads are returned in topological order, from newest to oldest.
3061 If branch is None, use the dirstate branch.
3059 If branch is None, use the dirstate branch.
3062 If start is not None, return only heads reachable from start.
3060 If start is not None, return only heads reachable from start.
3063 If closed is True, return heads that are marked as closed as well.
3061 If closed is True, return heads that are marked as closed as well.
3064 '''
3062 '''
3065 if branch is None:
3063 if branch is None:
3066 branch = self[None].branch()
3064 branch = self[None].branch()
3067 branches = self.branchmap()
3065 branches = self.branchmap()
3068 if not branches.hasbranch(branch):
3066 if not branches.hasbranch(branch):
3069 return []
3067 return []
3070 # the cache returns heads ordered lowest to highest
3068 # the cache returns heads ordered lowest to highest
3071 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3069 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3072 if start is not None:
3070 if start is not None:
3073 # filter out the heads that cannot be reached from startrev
3071 # filter out the heads that cannot be reached from startrev
3074 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3072 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3075 bheads = [h for h in bheads if h in fbheads]
3073 bheads = [h for h in bheads if h in fbheads]
3076 return bheads
3074 return bheads
3077
3075
3078 def branches(self, nodes):
3076 def branches(self, nodes):
3079 if not nodes:
3077 if not nodes:
3080 nodes = [self.changelog.tip()]
3078 nodes = [self.changelog.tip()]
3081 b = []
3079 b = []
3082 for n in nodes:
3080 for n in nodes:
3083 t = n
3081 t = n
3084 while True:
3082 while True:
3085 p = self.changelog.parents(n)
3083 p = self.changelog.parents(n)
3086 if p[1] != nullid or p[0] == nullid:
3084 if p[1] != nullid or p[0] == nullid:
3087 b.append((t, n, p[0], p[1]))
3085 b.append((t, n, p[0], p[1]))
3088 break
3086 break
3089 n = p[0]
3087 n = p[0]
3090 return b
3088 return b
3091
3089
3092 def between(self, pairs):
3090 def between(self, pairs):
3093 r = []
3091 r = []
3094
3092
3095 for top, bottom in pairs:
3093 for top, bottom in pairs:
3096 n, l, i = top, [], 0
3094 n, l, i = top, [], 0
3097 f = 1
3095 f = 1
3098
3096
3099 while n != bottom and n != nullid:
3097 while n != bottom and n != nullid:
3100 p = self.changelog.parents(n)[0]
3098 p = self.changelog.parents(n)[0]
3101 if i == f:
3099 if i == f:
3102 l.append(n)
3100 l.append(n)
3103 f = f * 2
3101 f = f * 2
3104 n = p
3102 n = p
3105 i += 1
3103 i += 1
3106
3104
3107 r.append(l)
3105 r.append(l)
3108
3106
3109 return r
3107 return r
3110
3108
3111 def checkpush(self, pushop):
3109 def checkpush(self, pushop):
3112 """Extensions can override this function if additional checks have
3110 """Extensions can override this function if additional checks have
3113 to be performed before pushing, or call it if they override push
3111 to be performed before pushing, or call it if they override push
3114 command.
3112 command.
3115 """
3113 """
3116
3114
3117 @unfilteredpropertycache
3115 @unfilteredpropertycache
3118 def prepushoutgoinghooks(self):
3116 def prepushoutgoinghooks(self):
3119 """Return util.hooks consists of a pushop with repo, remote, outgoing
3117 """Return util.hooks consists of a pushop with repo, remote, outgoing
3120 methods, which are called before pushing changesets.
3118 methods, which are called before pushing changesets.
3121 """
3119 """
3122 return util.hooks()
3120 return util.hooks()
3123
3121
3124 def pushkey(self, namespace, key, old, new):
3122 def pushkey(self, namespace, key, old, new):
3125 try:
3123 try:
3126 tr = self.currenttransaction()
3124 tr = self.currenttransaction()
3127 hookargs = {}
3125 hookargs = {}
3128 if tr is not None:
3126 if tr is not None:
3129 hookargs.update(tr.hookargs)
3127 hookargs.update(tr.hookargs)
3130 hookargs = pycompat.strkwargs(hookargs)
3128 hookargs = pycompat.strkwargs(hookargs)
3131 hookargs['namespace'] = namespace
3129 hookargs['namespace'] = namespace
3132 hookargs['key'] = key
3130 hookargs['key'] = key
3133 hookargs['old'] = old
3131 hookargs['old'] = old
3134 hookargs['new'] = new
3132 hookargs['new'] = new
3135 self.hook(b'prepushkey', throw=True, **hookargs)
3133 self.hook(b'prepushkey', throw=True, **hookargs)
3136 except error.HookAbort as exc:
3134 except error.HookAbort as exc:
3137 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3135 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3138 if exc.hint:
3136 if exc.hint:
3139 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3137 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3140 return False
3138 return False
3141 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3139 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3142 ret = pushkey.push(self, namespace, key, old, new)
3140 ret = pushkey.push(self, namespace, key, old, new)
3143
3141
3144 def runhook(unused_success):
3142 def runhook(unused_success):
3145 self.hook(
3143 self.hook(
3146 b'pushkey',
3144 b'pushkey',
3147 namespace=namespace,
3145 namespace=namespace,
3148 key=key,
3146 key=key,
3149 old=old,
3147 old=old,
3150 new=new,
3148 new=new,
3151 ret=ret,
3149 ret=ret,
3152 )
3150 )
3153
3151
3154 self._afterlock(runhook)
3152 self._afterlock(runhook)
3155 return ret
3153 return ret
3156
3154
3157 def listkeys(self, namespace):
3155 def listkeys(self, namespace):
3158 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3156 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3159 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3157 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3160 values = pushkey.list(self, namespace)
3158 values = pushkey.list(self, namespace)
3161 self.hook(b'listkeys', namespace=namespace, values=values)
3159 self.hook(b'listkeys', namespace=namespace, values=values)
3162 return values
3160 return values
3163
3161
3164 def debugwireargs(self, one, two, three=None, four=None, five=None):
3162 def debugwireargs(self, one, two, three=None, four=None, five=None):
3165 '''used to test argument passing over the wire'''
3163 '''used to test argument passing over the wire'''
3166 return b"%s %s %s %s %s" % (
3164 return b"%s %s %s %s %s" % (
3167 one,
3165 one,
3168 two,
3166 two,
3169 pycompat.bytestr(three),
3167 pycompat.bytestr(three),
3170 pycompat.bytestr(four),
3168 pycompat.bytestr(four),
3171 pycompat.bytestr(five),
3169 pycompat.bytestr(five),
3172 )
3170 )
3173
3171
3174 def savecommitmessage(self, text):
3172 def savecommitmessage(self, text):
3175 fp = self.vfs(b'last-message.txt', b'wb')
3173 fp = self.vfs(b'last-message.txt', b'wb')
3176 try:
3174 try:
3177 fp.write(text)
3175 fp.write(text)
3178 finally:
3176 finally:
3179 fp.close()
3177 fp.close()
3180 return self.pathto(fp.name[len(self.root) + 1 :])
3178 return self.pathto(fp.name[len(self.root) + 1 :])
3181
3179
3182
3180
3183 # used to avoid circular references so destructors work
3181 # used to avoid circular references so destructors work
3184 def aftertrans(files):
3182 def aftertrans(files):
3185 renamefiles = [tuple(t) for t in files]
3183 renamefiles = [tuple(t) for t in files]
3186
3184
3187 def a():
3185 def a():
3188 for vfs, src, dest in renamefiles:
3186 for vfs, src, dest in renamefiles:
3189 # if src and dest refer to a same file, vfs.rename is a no-op,
3187 # if src and dest refer to a same file, vfs.rename is a no-op,
3190 # leaving both src and dest on disk. delete dest to make sure
3188 # leaving both src and dest on disk. delete dest to make sure
3191 # the rename couldn't be such a no-op.
3189 # the rename couldn't be such a no-op.
3192 vfs.tryunlink(dest)
3190 vfs.tryunlink(dest)
3193 try:
3191 try:
3194 vfs.rename(src, dest)
3192 vfs.rename(src, dest)
3195 except OSError: # journal file does not yet exist
3193 except OSError: # journal file does not yet exist
3196 pass
3194 pass
3197
3195
3198 return a
3196 return a
3199
3197
3200
3198
3201 def undoname(fn):
3199 def undoname(fn):
3202 base, name = os.path.split(fn)
3200 base, name = os.path.split(fn)
3203 assert name.startswith(b'journal')
3201 assert name.startswith(b'journal')
3204 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3202 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3205
3203
3206
3204
3207 def instance(ui, path, create, intents=None, createopts=None):
3205 def instance(ui, path, create, intents=None, createopts=None):
3208 localpath = util.urllocalpath(path)
3206 localpath = util.urllocalpath(path)
3209 if create:
3207 if create:
3210 createrepository(ui, localpath, createopts=createopts)
3208 createrepository(ui, localpath, createopts=createopts)
3211
3209
3212 return makelocalrepository(ui, localpath, intents=intents)
3210 return makelocalrepository(ui, localpath, intents=intents)
3213
3211
3214
3212
3215 def islocal(path):
3213 def islocal(path):
3216 return True
3214 return True
3217
3215
3218
3216
3219 def defaultcreateopts(ui, createopts=None):
3217 def defaultcreateopts(ui, createopts=None):
3220 """Populate the default creation options for a repository.
3218 """Populate the default creation options for a repository.
3221
3219
3222 A dictionary of explicitly requested creation options can be passed
3220 A dictionary of explicitly requested creation options can be passed
3223 in. Missing keys will be populated.
3221 in. Missing keys will be populated.
3224 """
3222 """
3225 createopts = dict(createopts or {})
3223 createopts = dict(createopts or {})
3226
3224
3227 if b'backend' not in createopts:
3225 if b'backend' not in createopts:
3228 # experimental config: storage.new-repo-backend
3226 # experimental config: storage.new-repo-backend
3229 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3227 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3230
3228
3231 return createopts
3229 return createopts
3232
3230
3233
3231
3234 def newreporequirements(ui, createopts):
3232 def newreporequirements(ui, createopts):
3235 """Determine the set of requirements for a new local repository.
3233 """Determine the set of requirements for a new local repository.
3236
3234
3237 Extensions can wrap this function to specify custom requirements for
3235 Extensions can wrap this function to specify custom requirements for
3238 new repositories.
3236 new repositories.
3239 """
3237 """
3240 # If the repo is being created from a shared repository, we copy
3238 # If the repo is being created from a shared repository, we copy
3241 # its requirements.
3239 # its requirements.
3242 if b'sharedrepo' in createopts:
3240 if b'sharedrepo' in createopts:
3243 requirements = set(createopts[b'sharedrepo'].requirements)
3241 requirements = set(createopts[b'sharedrepo'].requirements)
3244 if createopts.get(b'sharedrelative'):
3242 if createopts.get(b'sharedrelative'):
3245 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3243 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3246 else:
3244 else:
3247 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3245 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3248
3246
3249 return requirements
3247 return requirements
3250
3248
3251 if b'backend' not in createopts:
3249 if b'backend' not in createopts:
3252 raise error.ProgrammingError(
3250 raise error.ProgrammingError(
3253 b'backend key not present in createopts; '
3251 b'backend key not present in createopts; '
3254 b'was defaultcreateopts() called?'
3252 b'was defaultcreateopts() called?'
3255 )
3253 )
3256
3254
3257 if createopts[b'backend'] != b'revlogv1':
3255 if createopts[b'backend'] != b'revlogv1':
3258 raise error.Abort(
3256 raise error.Abort(
3259 _(
3257 _(
3260 b'unable to determine repository requirements for '
3258 b'unable to determine repository requirements for '
3261 b'storage backend: %s'
3259 b'storage backend: %s'
3262 )
3260 )
3263 % createopts[b'backend']
3261 % createopts[b'backend']
3264 )
3262 )
3265
3263
3266 requirements = {b'revlogv1'}
3264 requirements = {b'revlogv1'}
3267 if ui.configbool(b'format', b'usestore'):
3265 if ui.configbool(b'format', b'usestore'):
3268 requirements.add(b'store')
3266 requirements.add(b'store')
3269 if ui.configbool(b'format', b'usefncache'):
3267 if ui.configbool(b'format', b'usefncache'):
3270 requirements.add(b'fncache')
3268 requirements.add(b'fncache')
3271 if ui.configbool(b'format', b'dotencode'):
3269 if ui.configbool(b'format', b'dotencode'):
3272 requirements.add(b'dotencode')
3270 requirements.add(b'dotencode')
3273
3271
3274 compengines = ui.configlist(b'format', b'revlog-compression')
3272 compengines = ui.configlist(b'format', b'revlog-compression')
3275 for compengine in compengines:
3273 for compengine in compengines:
3276 if compengine in util.compengines:
3274 if compengine in util.compengines:
3277 break
3275 break
3278 else:
3276 else:
3279 raise error.Abort(
3277 raise error.Abort(
3280 _(
3278 _(
3281 b'compression engines %s defined by '
3279 b'compression engines %s defined by '
3282 b'format.revlog-compression not available'
3280 b'format.revlog-compression not available'
3283 )
3281 )
3284 % b', '.join(b'"%s"' % e for e in compengines),
3282 % b', '.join(b'"%s"' % e for e in compengines),
3285 hint=_(
3283 hint=_(
3286 b'run "hg debuginstall" to list available '
3284 b'run "hg debuginstall" to list available '
3287 b'compression engines'
3285 b'compression engines'
3288 ),
3286 ),
3289 )
3287 )
3290
3288
3291 # zlib is the historical default and doesn't need an explicit requirement.
3289 # zlib is the historical default and doesn't need an explicit requirement.
3292 if compengine == b'zstd':
3290 if compengine == b'zstd':
3293 requirements.add(b'revlog-compression-zstd')
3291 requirements.add(b'revlog-compression-zstd')
3294 elif compengine != b'zlib':
3292 elif compengine != b'zlib':
3295 requirements.add(b'exp-compression-%s' % compengine)
3293 requirements.add(b'exp-compression-%s' % compengine)
3296
3294
3297 if scmutil.gdinitconfig(ui):
3295 if scmutil.gdinitconfig(ui):
3298 requirements.add(b'generaldelta')
3296 requirements.add(b'generaldelta')
3299 if ui.configbool(b'format', b'sparse-revlog'):
3297 if ui.configbool(b'format', b'sparse-revlog'):
3300 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3298 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3301
3299
3302 # experimental config: format.exp-use-side-data
3300 # experimental config: format.exp-use-side-data
3303 if ui.configbool(b'format', b'exp-use-side-data'):
3301 if ui.configbool(b'format', b'exp-use-side-data'):
3304 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3302 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3305 # experimental config: format.exp-use-copies-side-data-changeset
3303 # experimental config: format.exp-use-copies-side-data-changeset
3306 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3304 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3307 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3305 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3308 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3306 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3309 if ui.configbool(b'experimental', b'treemanifest'):
3307 if ui.configbool(b'experimental', b'treemanifest'):
3310 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3308 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3311
3309
3312 revlogv2 = ui.config(b'experimental', b'revlogv2')
3310 revlogv2 = ui.config(b'experimental', b'revlogv2')
3313 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3311 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3314 requirements.remove(b'revlogv1')
3312 requirements.remove(b'revlogv1')
3315 # generaldelta is implied by revlogv2.
3313 # generaldelta is implied by revlogv2.
3316 requirements.discard(b'generaldelta')
3314 requirements.discard(b'generaldelta')
3317 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3315 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3318 # experimental config: format.internal-phase
3316 # experimental config: format.internal-phase
3319 if ui.configbool(b'format', b'internal-phase'):
3317 if ui.configbool(b'format', b'internal-phase'):
3320 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3318 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3321
3319
3322 if createopts.get(b'narrowfiles'):
3320 if createopts.get(b'narrowfiles'):
3323 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3321 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3324
3322
3325 if createopts.get(b'lfs'):
3323 if createopts.get(b'lfs'):
3326 requirements.add(b'lfs')
3324 requirements.add(b'lfs')
3327
3325
3328 if ui.configbool(b'format', b'bookmarks-in-store'):
3326 if ui.configbool(b'format', b'bookmarks-in-store'):
3329 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3327 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3330
3328
3331 if ui.configbool(b'format', b'use-persistent-nodemap'):
3329 if ui.configbool(b'format', b'use-persistent-nodemap'):
3332 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3330 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3333
3331
3334 return requirements
3332 return requirements
3335
3333
3336
3334
3337 def checkrequirementscompat(ui, requirements):
3335 def checkrequirementscompat(ui, requirements):
3338 """ Checks compatibility of repository requirements enabled and disabled.
3336 """ Checks compatibility of repository requirements enabled and disabled.
3339
3337
3340 Returns a set of requirements which needs to be dropped because dependend
3338 Returns a set of requirements which needs to be dropped because dependend
3341 requirements are not enabled. Also warns users about it """
3339 requirements are not enabled. Also warns users about it """
3342
3340
3343 dropped = set()
3341 dropped = set()
3344
3342
3345 if b'store' not in requirements:
3343 if b'store' not in requirements:
3346 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3344 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3347 ui.warn(
3345 ui.warn(
3348 _(
3346 _(
3349 b'ignoring enabled \'format.bookmarks-in-store\' config '
3347 b'ignoring enabled \'format.bookmarks-in-store\' config '
3350 b'beacuse it is incompatible with disabled '
3348 b'beacuse it is incompatible with disabled '
3351 b'\'format.usestore\' config\n'
3349 b'\'format.usestore\' config\n'
3352 )
3350 )
3353 )
3351 )
3354 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3352 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3355
3353
3356 if (
3354 if (
3357 requirementsmod.SHARED_REQUIREMENT in requirements
3355 requirementsmod.SHARED_REQUIREMENT in requirements
3358 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3356 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3359 ):
3357 ):
3360 raise error.Abort(
3358 raise error.Abort(
3361 _(
3359 _(
3362 b"cannot create shared repository as source was created"
3360 b"cannot create shared repository as source was created"
3363 b" with 'format.usestore' config disabled"
3361 b" with 'format.usestore' config disabled"
3364 )
3362 )
3365 )
3363 )
3366
3364
3367 return dropped
3365 return dropped
3368
3366
3369
3367
3370 def filterknowncreateopts(ui, createopts):
3368 def filterknowncreateopts(ui, createopts):
3371 """Filters a dict of repo creation options against options that are known.
3369 """Filters a dict of repo creation options against options that are known.
3372
3370
3373 Receives a dict of repo creation options and returns a dict of those
3371 Receives a dict of repo creation options and returns a dict of those
3374 options that we don't know how to handle.
3372 options that we don't know how to handle.
3375
3373
3376 This function is called as part of repository creation. If the
3374 This function is called as part of repository creation. If the
3377 returned dict contains any items, repository creation will not
3375 returned dict contains any items, repository creation will not
3378 be allowed, as it means there was a request to create a repository
3376 be allowed, as it means there was a request to create a repository
3379 with options not recognized by loaded code.
3377 with options not recognized by loaded code.
3380
3378
3381 Extensions can wrap this function to filter out creation options
3379 Extensions can wrap this function to filter out creation options
3382 they know how to handle.
3380 they know how to handle.
3383 """
3381 """
3384 known = {
3382 known = {
3385 b'backend',
3383 b'backend',
3386 b'lfs',
3384 b'lfs',
3387 b'narrowfiles',
3385 b'narrowfiles',
3388 b'sharedrepo',
3386 b'sharedrepo',
3389 b'sharedrelative',
3387 b'sharedrelative',
3390 b'shareditems',
3388 b'shareditems',
3391 b'shallowfilestore',
3389 b'shallowfilestore',
3392 }
3390 }
3393
3391
3394 return {k: v for k, v in createopts.items() if k not in known}
3392 return {k: v for k, v in createopts.items() if k not in known}
3395
3393
3396
3394
3397 def createrepository(ui, path, createopts=None):
3395 def createrepository(ui, path, createopts=None):
3398 """Create a new repository in a vfs.
3396 """Create a new repository in a vfs.
3399
3397
3400 ``path`` path to the new repo's working directory.
3398 ``path`` path to the new repo's working directory.
3401 ``createopts`` options for the new repository.
3399 ``createopts`` options for the new repository.
3402
3400
3403 The following keys for ``createopts`` are recognized:
3401 The following keys for ``createopts`` are recognized:
3404
3402
3405 backend
3403 backend
3406 The storage backend to use.
3404 The storage backend to use.
3407 lfs
3405 lfs
3408 Repository will be created with ``lfs`` requirement. The lfs extension
3406 Repository will be created with ``lfs`` requirement. The lfs extension
3409 will automatically be loaded when the repository is accessed.
3407 will automatically be loaded when the repository is accessed.
3410 narrowfiles
3408 narrowfiles
3411 Set up repository to support narrow file storage.
3409 Set up repository to support narrow file storage.
3412 sharedrepo
3410 sharedrepo
3413 Repository object from which storage should be shared.
3411 Repository object from which storage should be shared.
3414 sharedrelative
3412 sharedrelative
3415 Boolean indicating if the path to the shared repo should be
3413 Boolean indicating if the path to the shared repo should be
3416 stored as relative. By default, the pointer to the "parent" repo
3414 stored as relative. By default, the pointer to the "parent" repo
3417 is stored as an absolute path.
3415 is stored as an absolute path.
3418 shareditems
3416 shareditems
3419 Set of items to share to the new repository (in addition to storage).
3417 Set of items to share to the new repository (in addition to storage).
3420 shallowfilestore
3418 shallowfilestore
3421 Indicates that storage for files should be shallow (not all ancestor
3419 Indicates that storage for files should be shallow (not all ancestor
3422 revisions are known).
3420 revisions are known).
3423 """
3421 """
3424 createopts = defaultcreateopts(ui, createopts=createopts)
3422 createopts = defaultcreateopts(ui, createopts=createopts)
3425
3423
3426 unknownopts = filterknowncreateopts(ui, createopts)
3424 unknownopts = filterknowncreateopts(ui, createopts)
3427
3425
3428 if not isinstance(unknownopts, dict):
3426 if not isinstance(unknownopts, dict):
3429 raise error.ProgrammingError(
3427 raise error.ProgrammingError(
3430 b'filterknowncreateopts() did not return a dict'
3428 b'filterknowncreateopts() did not return a dict'
3431 )
3429 )
3432
3430
3433 if unknownopts:
3431 if unknownopts:
3434 raise error.Abort(
3432 raise error.Abort(
3435 _(
3433 _(
3436 b'unable to create repository because of unknown '
3434 b'unable to create repository because of unknown '
3437 b'creation option: %s'
3435 b'creation option: %s'
3438 )
3436 )
3439 % b', '.join(sorted(unknownopts)),
3437 % b', '.join(sorted(unknownopts)),
3440 hint=_(b'is a required extension not loaded?'),
3438 hint=_(b'is a required extension not loaded?'),
3441 )
3439 )
3442
3440
3443 requirements = newreporequirements(ui, createopts=createopts)
3441 requirements = newreporequirements(ui, createopts=createopts)
3444 requirements -= checkrequirementscompat(ui, requirements)
3442 requirements -= checkrequirementscompat(ui, requirements)
3445
3443
3446 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3444 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3447
3445
3448 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3446 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3449 if hgvfs.exists():
3447 if hgvfs.exists():
3450 raise error.RepoError(_(b'repository %s already exists') % path)
3448 raise error.RepoError(_(b'repository %s already exists') % path)
3451
3449
3452 if b'sharedrepo' in createopts:
3450 if b'sharedrepo' in createopts:
3453 sharedpath = createopts[b'sharedrepo'].sharedpath
3451 sharedpath = createopts[b'sharedrepo'].sharedpath
3454
3452
3455 if createopts.get(b'sharedrelative'):
3453 if createopts.get(b'sharedrelative'):
3456 try:
3454 try:
3457 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3455 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3458 except (IOError, ValueError) as e:
3456 except (IOError, ValueError) as e:
3459 # ValueError is raised on Windows if the drive letters differ
3457 # ValueError is raised on Windows if the drive letters differ
3460 # on each path.
3458 # on each path.
3461 raise error.Abort(
3459 raise error.Abort(
3462 _(b'cannot calculate relative path'),
3460 _(b'cannot calculate relative path'),
3463 hint=stringutil.forcebytestr(e),
3461 hint=stringutil.forcebytestr(e),
3464 )
3462 )
3465
3463
3466 if not wdirvfs.exists():
3464 if not wdirvfs.exists():
3467 wdirvfs.makedirs()
3465 wdirvfs.makedirs()
3468
3466
3469 hgvfs.makedir(notindexed=True)
3467 hgvfs.makedir(notindexed=True)
3470 if b'sharedrepo' not in createopts:
3468 if b'sharedrepo' not in createopts:
3471 hgvfs.mkdir(b'cache')
3469 hgvfs.mkdir(b'cache')
3472 hgvfs.mkdir(b'wcache')
3470 hgvfs.mkdir(b'wcache')
3473
3471
3474 if b'store' in requirements and b'sharedrepo' not in createopts:
3472 if b'store' in requirements and b'sharedrepo' not in createopts:
3475 hgvfs.mkdir(b'store')
3473 hgvfs.mkdir(b'store')
3476
3474
3477 # We create an invalid changelog outside the store so very old
3475 # We create an invalid changelog outside the store so very old
3478 # Mercurial versions (which didn't know about the requirements
3476 # Mercurial versions (which didn't know about the requirements
3479 # file) encounter an error on reading the changelog. This
3477 # file) encounter an error on reading the changelog. This
3480 # effectively locks out old clients and prevents them from
3478 # effectively locks out old clients and prevents them from
3481 # mucking with a repo in an unknown format.
3479 # mucking with a repo in an unknown format.
3482 #
3480 #
3483 # The revlog header has version 2, which won't be recognized by
3481 # The revlog header has version 2, which won't be recognized by
3484 # such old clients.
3482 # such old clients.
3485 hgvfs.append(
3483 hgvfs.append(
3486 b'00changelog.i',
3484 b'00changelog.i',
3487 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3485 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3488 b'layout',
3486 b'layout',
3489 )
3487 )
3490
3488
3491 scmutil.writerequires(hgvfs, requirements)
3489 scmutil.writerequires(hgvfs, requirements)
3492
3490
3493 # Write out file telling readers where to find the shared store.
3491 # Write out file telling readers where to find the shared store.
3494 if b'sharedrepo' in createopts:
3492 if b'sharedrepo' in createopts:
3495 hgvfs.write(b'sharedpath', sharedpath)
3493 hgvfs.write(b'sharedpath', sharedpath)
3496
3494
3497 if createopts.get(b'shareditems'):
3495 if createopts.get(b'shareditems'):
3498 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3496 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3499 hgvfs.write(b'shared', shared)
3497 hgvfs.write(b'shared', shared)
3500
3498
3501
3499
3502 def poisonrepository(repo):
3500 def poisonrepository(repo):
3503 """Poison a repository instance so it can no longer be used."""
3501 """Poison a repository instance so it can no longer be used."""
3504 # Perform any cleanup on the instance.
3502 # Perform any cleanup on the instance.
3505 repo.close()
3503 repo.close()
3506
3504
3507 # Our strategy is to replace the type of the object with one that
3505 # Our strategy is to replace the type of the object with one that
3508 # has all attribute lookups result in error.
3506 # has all attribute lookups result in error.
3509 #
3507 #
3510 # But we have to allow the close() method because some constructors
3508 # But we have to allow the close() method because some constructors
3511 # of repos call close() on repo references.
3509 # of repos call close() on repo references.
3512 class poisonedrepository(object):
3510 class poisonedrepository(object):
3513 def __getattribute__(self, item):
3511 def __getattribute__(self, item):
3514 if item == 'close':
3512 if item == 'close':
3515 return object.__getattribute__(self, item)
3513 return object.__getattribute__(self, item)
3516
3514
3517 raise error.ProgrammingError(
3515 raise error.ProgrammingError(
3518 b'repo instances should not be used after unshare'
3516 b'repo instances should not be used after unshare'
3519 )
3517 )
3520
3518
3521 def close(self):
3519 def close(self):
3522 pass
3520 pass
3523
3521
3524 # We may have a repoview, which intercepts __setattr__. So be sure
3522 # We may have a repoview, which intercepts __setattr__. So be sure
3525 # we operate at the lowest level possible.
3523 # we operate at the lowest level possible.
3526 object.__setattr__(repo, '__class__', poisonedrepository)
3524 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now