##// END OF EJS Templates
localrepo: warn if we are writing to cache without a lock...
Pulkit Goyal -
r46003:324ad3e7 default
parent child Browse files
Show More
@@ -1,3521 +1,3526 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 changegroup,
34 changegroup,
35 color,
35 color,
36 commit,
36 commit,
37 context,
37 context,
38 dirstate,
38 dirstate,
39 dirstateguard,
39 dirstateguard,
40 discovery,
40 discovery,
41 encoding,
41 encoding,
42 error,
42 error,
43 exchange,
43 exchange,
44 extensions,
44 extensions,
45 filelog,
45 filelog,
46 hook,
46 hook,
47 lock as lockmod,
47 lock as lockmod,
48 match as matchmod,
48 match as matchmod,
49 mergestate as mergestatemod,
49 mergestate as mergestatemod,
50 mergeutil,
50 mergeutil,
51 namespaces,
51 namespaces,
52 narrowspec,
52 narrowspec,
53 obsolete,
53 obsolete,
54 pathutil,
54 pathutil,
55 phases,
55 phases,
56 pushkey,
56 pushkey,
57 pycompat,
57 pycompat,
58 rcutil,
58 rcutil,
59 repoview,
59 repoview,
60 requirements as requirementsmod,
60 requirements as requirementsmod,
61 revset,
61 revset,
62 revsetlang,
62 revsetlang,
63 scmutil,
63 scmutil,
64 sparse,
64 sparse,
65 store as storemod,
65 store as storemod,
66 subrepoutil,
66 subrepoutil,
67 tags as tagsmod,
67 tags as tagsmod,
68 transaction,
68 transaction,
69 txnutil,
69 txnutil,
70 util,
70 util,
71 vfs as vfsmod,
71 vfs as vfsmod,
72 )
72 )
73
73
74 from .interfaces import (
74 from .interfaces import (
75 repository,
75 repository,
76 util as interfaceutil,
76 util as interfaceutil,
77 )
77 )
78
78
79 from .utils import (
79 from .utils import (
80 hashutil,
80 hashutil,
81 procutil,
81 procutil,
82 stringutil,
82 stringutil,
83 )
83 )
84
84
85 from .revlogutils import constants as revlogconst
85 from .revlogutils import constants as revlogconst
86
86
87 release = lockmod.release
87 release = lockmod.release
88 urlerr = util.urlerr
88 urlerr = util.urlerr
89 urlreq = util.urlreq
89 urlreq = util.urlreq
90
90
91 # set of (path, vfs-location) tuples. vfs-location is:
91 # set of (path, vfs-location) tuples. vfs-location is:
92 # - 'plain for vfs relative paths
92 # - 'plain for vfs relative paths
93 # - '' for svfs relative paths
93 # - '' for svfs relative paths
94 _cachedfiles = set()
94 _cachedfiles = set()
95
95
96
96
97 class _basefilecache(scmutil.filecache):
97 class _basefilecache(scmutil.filecache):
98 """All filecache usage on repo are done for logic that should be unfiltered
98 """All filecache usage on repo are done for logic that should be unfiltered
99 """
99 """
100
100
101 def __get__(self, repo, type=None):
101 def __get__(self, repo, type=None):
102 if repo is None:
102 if repo is None:
103 return self
103 return self
104 # proxy to unfiltered __dict__ since filtered repo has no entry
104 # proxy to unfiltered __dict__ since filtered repo has no entry
105 unfi = repo.unfiltered()
105 unfi = repo.unfiltered()
106 try:
106 try:
107 return unfi.__dict__[self.sname]
107 return unfi.__dict__[self.sname]
108 except KeyError:
108 except KeyError:
109 pass
109 pass
110 return super(_basefilecache, self).__get__(unfi, type)
110 return super(_basefilecache, self).__get__(unfi, type)
111
111
112 def set(self, repo, value):
112 def set(self, repo, value):
113 return super(_basefilecache, self).set(repo.unfiltered(), value)
113 return super(_basefilecache, self).set(repo.unfiltered(), value)
114
114
115
115
116 class repofilecache(_basefilecache):
116 class repofilecache(_basefilecache):
117 """filecache for files in .hg but outside of .hg/store"""
117 """filecache for files in .hg but outside of .hg/store"""
118
118
119 def __init__(self, *paths):
119 def __init__(self, *paths):
120 super(repofilecache, self).__init__(*paths)
120 super(repofilecache, self).__init__(*paths)
121 for path in paths:
121 for path in paths:
122 _cachedfiles.add((path, b'plain'))
122 _cachedfiles.add((path, b'plain'))
123
123
124 def join(self, obj, fname):
124 def join(self, obj, fname):
125 return obj.vfs.join(fname)
125 return obj.vfs.join(fname)
126
126
127
127
128 class storecache(_basefilecache):
128 class storecache(_basefilecache):
129 """filecache for files in the store"""
129 """filecache for files in the store"""
130
130
131 def __init__(self, *paths):
131 def __init__(self, *paths):
132 super(storecache, self).__init__(*paths)
132 super(storecache, self).__init__(*paths)
133 for path in paths:
133 for path in paths:
134 _cachedfiles.add((path, b''))
134 _cachedfiles.add((path, b''))
135
135
136 def join(self, obj, fname):
136 def join(self, obj, fname):
137 return obj.sjoin(fname)
137 return obj.sjoin(fname)
138
138
139
139
140 class mixedrepostorecache(_basefilecache):
140 class mixedrepostorecache(_basefilecache):
141 """filecache for a mix files in .hg/store and outside"""
141 """filecache for a mix files in .hg/store and outside"""
142
142
143 def __init__(self, *pathsandlocations):
143 def __init__(self, *pathsandlocations):
144 # scmutil.filecache only uses the path for passing back into our
144 # scmutil.filecache only uses the path for passing back into our
145 # join(), so we can safely pass a list of paths and locations
145 # join(), so we can safely pass a list of paths and locations
146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
147 _cachedfiles.update(pathsandlocations)
147 _cachedfiles.update(pathsandlocations)
148
148
149 def join(self, obj, fnameandlocation):
149 def join(self, obj, fnameandlocation):
150 fname, location = fnameandlocation
150 fname, location = fnameandlocation
151 if location == b'plain':
151 if location == b'plain':
152 return obj.vfs.join(fname)
152 return obj.vfs.join(fname)
153 else:
153 else:
154 if location != b'':
154 if location != b'':
155 raise error.ProgrammingError(
155 raise error.ProgrammingError(
156 b'unexpected location: %s' % location
156 b'unexpected location: %s' % location
157 )
157 )
158 return obj.sjoin(fname)
158 return obj.sjoin(fname)
159
159
160
160
161 def isfilecached(repo, name):
161 def isfilecached(repo, name):
162 """check if a repo has already cached "name" filecache-ed property
162 """check if a repo has already cached "name" filecache-ed property
163
163
164 This returns (cachedobj-or-None, iscached) tuple.
164 This returns (cachedobj-or-None, iscached) tuple.
165 """
165 """
166 cacheentry = repo.unfiltered()._filecache.get(name, None)
166 cacheentry = repo.unfiltered()._filecache.get(name, None)
167 if not cacheentry:
167 if not cacheentry:
168 return None, False
168 return None, False
169 return cacheentry.obj, True
169 return cacheentry.obj, True
170
170
171
171
172 class unfilteredpropertycache(util.propertycache):
172 class unfilteredpropertycache(util.propertycache):
173 """propertycache that apply to unfiltered repo only"""
173 """propertycache that apply to unfiltered repo only"""
174
174
175 def __get__(self, repo, type=None):
175 def __get__(self, repo, type=None):
176 unfi = repo.unfiltered()
176 unfi = repo.unfiltered()
177 if unfi is repo:
177 if unfi is repo:
178 return super(unfilteredpropertycache, self).__get__(unfi)
178 return super(unfilteredpropertycache, self).__get__(unfi)
179 return getattr(unfi, self.name)
179 return getattr(unfi, self.name)
180
180
181
181
182 class filteredpropertycache(util.propertycache):
182 class filteredpropertycache(util.propertycache):
183 """propertycache that must take filtering in account"""
183 """propertycache that must take filtering in account"""
184
184
185 def cachevalue(self, obj, value):
185 def cachevalue(self, obj, value):
186 object.__setattr__(obj, self.name, value)
186 object.__setattr__(obj, self.name, value)
187
187
188
188
189 def hasunfilteredcache(repo, name):
189 def hasunfilteredcache(repo, name):
190 """check if a repo has an unfilteredpropertycache value for <name>"""
190 """check if a repo has an unfilteredpropertycache value for <name>"""
191 return name in vars(repo.unfiltered())
191 return name in vars(repo.unfiltered())
192
192
193
193
194 def unfilteredmethod(orig):
194 def unfilteredmethod(orig):
195 """decorate method that always need to be run on unfiltered version"""
195 """decorate method that always need to be run on unfiltered version"""
196
196
197 @functools.wraps(orig)
197 @functools.wraps(orig)
198 def wrapper(repo, *args, **kwargs):
198 def wrapper(repo, *args, **kwargs):
199 return orig(repo.unfiltered(), *args, **kwargs)
199 return orig(repo.unfiltered(), *args, **kwargs)
200
200
201 return wrapper
201 return wrapper
202
202
203
203
204 moderncaps = {
204 moderncaps = {
205 b'lookup',
205 b'lookup',
206 b'branchmap',
206 b'branchmap',
207 b'pushkey',
207 b'pushkey',
208 b'known',
208 b'known',
209 b'getbundle',
209 b'getbundle',
210 b'unbundle',
210 b'unbundle',
211 }
211 }
212 legacycaps = moderncaps.union({b'changegroupsubset'})
212 legacycaps = moderncaps.union({b'changegroupsubset'})
213
213
214
214
215 @interfaceutil.implementer(repository.ipeercommandexecutor)
215 @interfaceutil.implementer(repository.ipeercommandexecutor)
216 class localcommandexecutor(object):
216 class localcommandexecutor(object):
217 def __init__(self, peer):
217 def __init__(self, peer):
218 self._peer = peer
218 self._peer = peer
219 self._sent = False
219 self._sent = False
220 self._closed = False
220 self._closed = False
221
221
222 def __enter__(self):
222 def __enter__(self):
223 return self
223 return self
224
224
225 def __exit__(self, exctype, excvalue, exctb):
225 def __exit__(self, exctype, excvalue, exctb):
226 self.close()
226 self.close()
227
227
228 def callcommand(self, command, args):
228 def callcommand(self, command, args):
229 if self._sent:
229 if self._sent:
230 raise error.ProgrammingError(
230 raise error.ProgrammingError(
231 b'callcommand() cannot be used after sendcommands()'
231 b'callcommand() cannot be used after sendcommands()'
232 )
232 )
233
233
234 if self._closed:
234 if self._closed:
235 raise error.ProgrammingError(
235 raise error.ProgrammingError(
236 b'callcommand() cannot be used after close()'
236 b'callcommand() cannot be used after close()'
237 )
237 )
238
238
239 # We don't need to support anything fancy. Just call the named
239 # We don't need to support anything fancy. Just call the named
240 # method on the peer and return a resolved future.
240 # method on the peer and return a resolved future.
241 fn = getattr(self._peer, pycompat.sysstr(command))
241 fn = getattr(self._peer, pycompat.sysstr(command))
242
242
243 f = pycompat.futures.Future()
243 f = pycompat.futures.Future()
244
244
245 try:
245 try:
246 result = fn(**pycompat.strkwargs(args))
246 result = fn(**pycompat.strkwargs(args))
247 except Exception:
247 except Exception:
248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
249 else:
249 else:
250 f.set_result(result)
250 f.set_result(result)
251
251
252 return f
252 return f
253
253
254 def sendcommands(self):
254 def sendcommands(self):
255 self._sent = True
255 self._sent = True
256
256
257 def close(self):
257 def close(self):
258 self._closed = True
258 self._closed = True
259
259
260
260
261 @interfaceutil.implementer(repository.ipeercommands)
261 @interfaceutil.implementer(repository.ipeercommands)
262 class localpeer(repository.peer):
262 class localpeer(repository.peer):
263 '''peer for a local repo; reflects only the most recent API'''
263 '''peer for a local repo; reflects only the most recent API'''
264
264
265 def __init__(self, repo, caps=None):
265 def __init__(self, repo, caps=None):
266 super(localpeer, self).__init__()
266 super(localpeer, self).__init__()
267
267
268 if caps is None:
268 if caps is None:
269 caps = moderncaps.copy()
269 caps = moderncaps.copy()
270 self._repo = repo.filtered(b'served')
270 self._repo = repo.filtered(b'served')
271 self.ui = repo.ui
271 self.ui = repo.ui
272 self._caps = repo._restrictcapabilities(caps)
272 self._caps = repo._restrictcapabilities(caps)
273
273
274 # Begin of _basepeer interface.
274 # Begin of _basepeer interface.
275
275
276 def url(self):
276 def url(self):
277 return self._repo.url()
277 return self._repo.url()
278
278
279 def local(self):
279 def local(self):
280 return self._repo
280 return self._repo
281
281
282 def peer(self):
282 def peer(self):
283 return self
283 return self
284
284
285 def canpush(self):
285 def canpush(self):
286 return True
286 return True
287
287
288 def close(self):
288 def close(self):
289 self._repo.close()
289 self._repo.close()
290
290
291 # End of _basepeer interface.
291 # End of _basepeer interface.
292
292
293 # Begin of _basewirecommands interface.
293 # Begin of _basewirecommands interface.
294
294
295 def branchmap(self):
295 def branchmap(self):
296 return self._repo.branchmap()
296 return self._repo.branchmap()
297
297
298 def capabilities(self):
298 def capabilities(self):
299 return self._caps
299 return self._caps
300
300
301 def clonebundles(self):
301 def clonebundles(self):
302 return self._repo.tryread(b'clonebundles.manifest')
302 return self._repo.tryread(b'clonebundles.manifest')
303
303
304 def debugwireargs(self, one, two, three=None, four=None, five=None):
304 def debugwireargs(self, one, two, three=None, four=None, five=None):
305 """Used to test argument passing over the wire"""
305 """Used to test argument passing over the wire"""
306 return b"%s %s %s %s %s" % (
306 return b"%s %s %s %s %s" % (
307 one,
307 one,
308 two,
308 two,
309 pycompat.bytestr(three),
309 pycompat.bytestr(three),
310 pycompat.bytestr(four),
310 pycompat.bytestr(four),
311 pycompat.bytestr(five),
311 pycompat.bytestr(five),
312 )
312 )
313
313
314 def getbundle(
314 def getbundle(
315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
316 ):
316 ):
317 chunks = exchange.getbundlechunks(
317 chunks = exchange.getbundlechunks(
318 self._repo,
318 self._repo,
319 source,
319 source,
320 heads=heads,
320 heads=heads,
321 common=common,
321 common=common,
322 bundlecaps=bundlecaps,
322 bundlecaps=bundlecaps,
323 **kwargs
323 **kwargs
324 )[1]
324 )[1]
325 cb = util.chunkbuffer(chunks)
325 cb = util.chunkbuffer(chunks)
326
326
327 if exchange.bundle2requested(bundlecaps):
327 if exchange.bundle2requested(bundlecaps):
328 # When requesting a bundle2, getbundle returns a stream to make the
328 # When requesting a bundle2, getbundle returns a stream to make the
329 # wire level function happier. We need to build a proper object
329 # wire level function happier. We need to build a proper object
330 # from it in local peer.
330 # from it in local peer.
331 return bundle2.getunbundler(self.ui, cb)
331 return bundle2.getunbundler(self.ui, cb)
332 else:
332 else:
333 return changegroup.getunbundler(b'01', cb, None)
333 return changegroup.getunbundler(b'01', cb, None)
334
334
335 def heads(self):
335 def heads(self):
336 return self._repo.heads()
336 return self._repo.heads()
337
337
338 def known(self, nodes):
338 def known(self, nodes):
339 return self._repo.known(nodes)
339 return self._repo.known(nodes)
340
340
341 def listkeys(self, namespace):
341 def listkeys(self, namespace):
342 return self._repo.listkeys(namespace)
342 return self._repo.listkeys(namespace)
343
343
344 def lookup(self, key):
344 def lookup(self, key):
345 return self._repo.lookup(key)
345 return self._repo.lookup(key)
346
346
347 def pushkey(self, namespace, key, old, new):
347 def pushkey(self, namespace, key, old, new):
348 return self._repo.pushkey(namespace, key, old, new)
348 return self._repo.pushkey(namespace, key, old, new)
349
349
350 def stream_out(self):
350 def stream_out(self):
351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
352
352
353 def unbundle(self, bundle, heads, url):
353 def unbundle(self, bundle, heads, url):
354 """apply a bundle on a repo
354 """apply a bundle on a repo
355
355
356 This function handles the repo locking itself."""
356 This function handles the repo locking itself."""
357 try:
357 try:
358 try:
358 try:
359 bundle = exchange.readbundle(self.ui, bundle, None)
359 bundle = exchange.readbundle(self.ui, bundle, None)
360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
361 if util.safehasattr(ret, b'getchunks'):
361 if util.safehasattr(ret, b'getchunks'):
362 # This is a bundle20 object, turn it into an unbundler.
362 # This is a bundle20 object, turn it into an unbundler.
363 # This little dance should be dropped eventually when the
363 # This little dance should be dropped eventually when the
364 # API is finally improved.
364 # API is finally improved.
365 stream = util.chunkbuffer(ret.getchunks())
365 stream = util.chunkbuffer(ret.getchunks())
366 ret = bundle2.getunbundler(self.ui, stream)
366 ret = bundle2.getunbundler(self.ui, stream)
367 return ret
367 return ret
368 except Exception as exc:
368 except Exception as exc:
369 # If the exception contains output salvaged from a bundle2
369 # If the exception contains output salvaged from a bundle2
370 # reply, we need to make sure it is printed before continuing
370 # reply, we need to make sure it is printed before continuing
371 # to fail. So we build a bundle2 with such output and consume
371 # to fail. So we build a bundle2 with such output and consume
372 # it directly.
372 # it directly.
373 #
373 #
374 # This is not very elegant but allows a "simple" solution for
374 # This is not very elegant but allows a "simple" solution for
375 # issue4594
375 # issue4594
376 output = getattr(exc, '_bundle2salvagedoutput', ())
376 output = getattr(exc, '_bundle2salvagedoutput', ())
377 if output:
377 if output:
378 bundler = bundle2.bundle20(self._repo.ui)
378 bundler = bundle2.bundle20(self._repo.ui)
379 for out in output:
379 for out in output:
380 bundler.addpart(out)
380 bundler.addpart(out)
381 stream = util.chunkbuffer(bundler.getchunks())
381 stream = util.chunkbuffer(bundler.getchunks())
382 b = bundle2.getunbundler(self.ui, stream)
382 b = bundle2.getunbundler(self.ui, stream)
383 bundle2.processbundle(self._repo, b)
383 bundle2.processbundle(self._repo, b)
384 raise
384 raise
385 except error.PushRaced as exc:
385 except error.PushRaced as exc:
386 raise error.ResponseError(
386 raise error.ResponseError(
387 _(b'push failed:'), stringutil.forcebytestr(exc)
387 _(b'push failed:'), stringutil.forcebytestr(exc)
388 )
388 )
389
389
390 # End of _basewirecommands interface.
390 # End of _basewirecommands interface.
391
391
392 # Begin of peer interface.
392 # Begin of peer interface.
393
393
394 def commandexecutor(self):
394 def commandexecutor(self):
395 return localcommandexecutor(self)
395 return localcommandexecutor(self)
396
396
397 # End of peer interface.
397 # End of peer interface.
398
398
399
399
400 @interfaceutil.implementer(repository.ipeerlegacycommands)
400 @interfaceutil.implementer(repository.ipeerlegacycommands)
401 class locallegacypeer(localpeer):
401 class locallegacypeer(localpeer):
402 '''peer extension which implements legacy methods too; used for tests with
402 '''peer extension which implements legacy methods too; used for tests with
403 restricted capabilities'''
403 restricted capabilities'''
404
404
405 def __init__(self, repo):
405 def __init__(self, repo):
406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
407
407
408 # Begin of baselegacywirecommands interface.
408 # Begin of baselegacywirecommands interface.
409
409
410 def between(self, pairs):
410 def between(self, pairs):
411 return self._repo.between(pairs)
411 return self._repo.between(pairs)
412
412
413 def branches(self, nodes):
413 def branches(self, nodes):
414 return self._repo.branches(nodes)
414 return self._repo.branches(nodes)
415
415
416 def changegroup(self, nodes, source):
416 def changegroup(self, nodes, source):
417 outgoing = discovery.outgoing(
417 outgoing = discovery.outgoing(
418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
419 )
419 )
420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421
421
422 def changegroupsubset(self, bases, heads, source):
422 def changegroupsubset(self, bases, heads, source):
423 outgoing = discovery.outgoing(
423 outgoing = discovery.outgoing(
424 self._repo, missingroots=bases, ancestorsof=heads
424 self._repo, missingroots=bases, ancestorsof=heads
425 )
425 )
426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
427
427
428 # End of baselegacywirecommands interface.
428 # End of baselegacywirecommands interface.
429
429
430
430
431 # Functions receiving (ui, features) that extensions can register to impact
431 # Functions receiving (ui, features) that extensions can register to impact
432 # the ability to load repositories with custom requirements. Only
432 # the ability to load repositories with custom requirements. Only
433 # functions defined in loaded extensions are called.
433 # functions defined in loaded extensions are called.
434 #
434 #
435 # The function receives a set of requirement strings that the repository
435 # The function receives a set of requirement strings that the repository
436 # is capable of opening. Functions will typically add elements to the
436 # is capable of opening. Functions will typically add elements to the
437 # set to reflect that the extension knows how to handle that requirements.
437 # set to reflect that the extension knows how to handle that requirements.
438 featuresetupfuncs = set()
438 featuresetupfuncs = set()
439
439
440
440
441 def _getsharedvfs(hgvfs, requirements):
441 def _getsharedvfs(hgvfs, requirements):
442 """ returns the vfs object pointing to root of shared source
442 """ returns the vfs object pointing to root of shared source
443 repo for a shared repository
443 repo for a shared repository
444
444
445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
446 requirements is a set of requirements of current repo (shared one)
446 requirements is a set of requirements of current repo (shared one)
447 """
447 """
448 # The ``shared`` or ``relshared`` requirements indicate the
448 # The ``shared`` or ``relshared`` requirements indicate the
449 # store lives in the path contained in the ``.hg/sharedpath`` file.
449 # store lives in the path contained in the ``.hg/sharedpath`` file.
450 # This is an absolute path for ``shared`` and relative to
450 # This is an absolute path for ``shared`` and relative to
451 # ``.hg/`` for ``relshared``.
451 # ``.hg/`` for ``relshared``.
452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
454 sharedpath = hgvfs.join(sharedpath)
454 sharedpath = hgvfs.join(sharedpath)
455
455
456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
457
457
458 if not sharedvfs.exists():
458 if not sharedvfs.exists():
459 raise error.RepoError(
459 raise error.RepoError(
460 _(b'.hg/sharedpath points to nonexistent directory %s')
460 _(b'.hg/sharedpath points to nonexistent directory %s')
461 % sharedvfs.base
461 % sharedvfs.base
462 )
462 )
463 return sharedvfs
463 return sharedvfs
464
464
465
465
466 def _readrequires(vfs, allowmissing):
466 def _readrequires(vfs, allowmissing):
467 """ reads the require file present at root of this vfs
467 """ reads the require file present at root of this vfs
468 and return a set of requirements
468 and return a set of requirements
469
469
470 If allowmissing is True, we suppress ENOENT if raised"""
470 If allowmissing is True, we suppress ENOENT if raised"""
471 # requires file contains a newline-delimited list of
471 # requires file contains a newline-delimited list of
472 # features/capabilities the opener (us) must have in order to use
472 # features/capabilities the opener (us) must have in order to use
473 # the repository. This file was introduced in Mercurial 0.9.2,
473 # the repository. This file was introduced in Mercurial 0.9.2,
474 # which means very old repositories may not have one. We assume
474 # which means very old repositories may not have one. We assume
475 # a missing file translates to no requirements.
475 # a missing file translates to no requirements.
476 try:
476 try:
477 requirements = set(vfs.read(b'requires').splitlines())
477 requirements = set(vfs.read(b'requires').splitlines())
478 except IOError as e:
478 except IOError as e:
479 if not (allowmissing and e.errno == errno.ENOENT):
479 if not (allowmissing and e.errno == errno.ENOENT):
480 raise
480 raise
481 requirements = set()
481 requirements = set()
482 return requirements
482 return requirements
483
483
484
484
485 def makelocalrepository(baseui, path, intents=None):
485 def makelocalrepository(baseui, path, intents=None):
486 """Create a local repository object.
486 """Create a local repository object.
487
487
488 Given arguments needed to construct a local repository, this function
488 Given arguments needed to construct a local repository, this function
489 performs various early repository loading functionality (such as
489 performs various early repository loading functionality (such as
490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
491 the repository can be opened, derives a type suitable for representing
491 the repository can be opened, derives a type suitable for representing
492 that repository, and returns an instance of it.
492 that repository, and returns an instance of it.
493
493
494 The returned object conforms to the ``repository.completelocalrepository``
494 The returned object conforms to the ``repository.completelocalrepository``
495 interface.
495 interface.
496
496
497 The repository type is derived by calling a series of factory functions
497 The repository type is derived by calling a series of factory functions
498 for each aspect/interface of the final repository. These are defined by
498 for each aspect/interface of the final repository. These are defined by
499 ``REPO_INTERFACES``.
499 ``REPO_INTERFACES``.
500
500
501 Each factory function is called to produce a type implementing a specific
501 Each factory function is called to produce a type implementing a specific
502 interface. The cumulative list of returned types will be combined into a
502 interface. The cumulative list of returned types will be combined into a
503 new type and that type will be instantiated to represent the local
503 new type and that type will be instantiated to represent the local
504 repository.
504 repository.
505
505
506 The factory functions each receive various state that may be consulted
506 The factory functions each receive various state that may be consulted
507 as part of deriving a type.
507 as part of deriving a type.
508
508
509 Extensions should wrap these factory functions to customize repository type
509 Extensions should wrap these factory functions to customize repository type
510 creation. Note that an extension's wrapped function may be called even if
510 creation. Note that an extension's wrapped function may be called even if
511 that extension is not loaded for the repo being constructed. Extensions
511 that extension is not loaded for the repo being constructed. Extensions
512 should check if their ``__name__`` appears in the
512 should check if their ``__name__`` appears in the
513 ``extensionmodulenames`` set passed to the factory function and no-op if
513 ``extensionmodulenames`` set passed to the factory function and no-op if
514 not.
514 not.
515 """
515 """
516 ui = baseui.copy()
516 ui = baseui.copy()
517 # Prevent copying repo configuration.
517 # Prevent copying repo configuration.
518 ui.copy = baseui.copy
518 ui.copy = baseui.copy
519
519
520 # Working directory VFS rooted at repository root.
520 # Working directory VFS rooted at repository root.
521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
522
522
523 # Main VFS for .hg/ directory.
523 # Main VFS for .hg/ directory.
524 hgpath = wdirvfs.join(b'.hg')
524 hgpath = wdirvfs.join(b'.hg')
525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
526 # Whether this repository is shared one or not
526 # Whether this repository is shared one or not
527 shared = False
527 shared = False
528 # If this repository is shared, vfs pointing to shared repo
528 # If this repository is shared, vfs pointing to shared repo
529 sharedvfs = None
529 sharedvfs = None
530
530
531 # The .hg/ path should exist and should be a directory. All other
531 # The .hg/ path should exist and should be a directory. All other
532 # cases are errors.
532 # cases are errors.
533 if not hgvfs.isdir():
533 if not hgvfs.isdir():
534 try:
534 try:
535 hgvfs.stat()
535 hgvfs.stat()
536 except OSError as e:
536 except OSError as e:
537 if e.errno != errno.ENOENT:
537 if e.errno != errno.ENOENT:
538 raise
538 raise
539 except ValueError as e:
539 except ValueError as e:
540 # Can be raised on Python 3.8 when path is invalid.
540 # Can be raised on Python 3.8 when path is invalid.
541 raise error.Abort(
541 raise error.Abort(
542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
543 )
543 )
544
544
545 raise error.RepoError(_(b'repository %s not found') % path)
545 raise error.RepoError(_(b'repository %s not found') % path)
546
546
547 requirements = _readrequires(hgvfs, True)
547 requirements = _readrequires(hgvfs, True)
548
548
549 # The .hg/hgrc file may load extensions or contain config options
549 # The .hg/hgrc file may load extensions or contain config options
550 # that influence repository construction. Attempt to load it and
550 # that influence repository construction. Attempt to load it and
551 # process any new extensions that it may have pulled in.
551 # process any new extensions that it may have pulled in.
552 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
552 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
553 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
553 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
554 extensions.loadall(ui)
554 extensions.loadall(ui)
555 extensions.populateui(ui)
555 extensions.populateui(ui)
556
556
557 # Set of module names of extensions loaded for this repository.
557 # Set of module names of extensions loaded for this repository.
558 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
558 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
559
559
560 supportedrequirements = gathersupportedrequirements(ui)
560 supportedrequirements = gathersupportedrequirements(ui)
561
561
562 # We first validate the requirements are known.
562 # We first validate the requirements are known.
563 ensurerequirementsrecognized(requirements, supportedrequirements)
563 ensurerequirementsrecognized(requirements, supportedrequirements)
564
564
565 # Then we validate that the known set is reasonable to use together.
565 # Then we validate that the known set is reasonable to use together.
566 ensurerequirementscompatible(ui, requirements)
566 ensurerequirementscompatible(ui, requirements)
567
567
568 # TODO there are unhandled edge cases related to opening repositories with
568 # TODO there are unhandled edge cases related to opening repositories with
569 # shared storage. If storage is shared, we should also test for requirements
569 # shared storage. If storage is shared, we should also test for requirements
570 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
570 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
571 # that repo, as that repo may load extensions needed to open it. This is a
571 # that repo, as that repo may load extensions needed to open it. This is a
572 # bit complicated because we don't want the other hgrc to overwrite settings
572 # bit complicated because we don't want the other hgrc to overwrite settings
573 # in this hgrc.
573 # in this hgrc.
574 #
574 #
575 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
575 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
576 # file when sharing repos. But if a requirement is added after the share is
576 # file when sharing repos. But if a requirement is added after the share is
577 # performed, thereby introducing a new requirement for the opener, we may
577 # performed, thereby introducing a new requirement for the opener, we may
578 # will not see that and could encounter a run-time error interacting with
578 # will not see that and could encounter a run-time error interacting with
579 # that shared store since it has an unknown-to-us requirement.
579 # that shared store since it has an unknown-to-us requirement.
580
580
581 # At this point, we know we should be capable of opening the repository.
581 # At this point, we know we should be capable of opening the repository.
582 # Now get on with doing that.
582 # Now get on with doing that.
583
583
584 features = set()
584 features = set()
585
585
586 # The "store" part of the repository holds versioned data. How it is
586 # The "store" part of the repository holds versioned data. How it is
587 # accessed is determined by various requirements. If `shared` or
587 # accessed is determined by various requirements. If `shared` or
588 # `relshared` requirements are present, this indicates current repository
588 # `relshared` requirements are present, this indicates current repository
589 # is a share and store exists in path mentioned in `.hg/sharedpath`
589 # is a share and store exists in path mentioned in `.hg/sharedpath`
590 shared = (
590 shared = (
591 requirementsmod.SHARED_REQUIREMENT in requirements
591 requirementsmod.SHARED_REQUIREMENT in requirements
592 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
592 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
593 )
593 )
594 if shared:
594 if shared:
595 sharedvfs = _getsharedvfs(hgvfs, requirements)
595 sharedvfs = _getsharedvfs(hgvfs, requirements)
596 storebasepath = sharedvfs.base
596 storebasepath = sharedvfs.base
597 cachepath = sharedvfs.join(b'cache')
597 cachepath = sharedvfs.join(b'cache')
598 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
598 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
599 else:
599 else:
600 storebasepath = hgvfs.base
600 storebasepath = hgvfs.base
601 cachepath = hgvfs.join(b'cache')
601 cachepath = hgvfs.join(b'cache')
602 wcachepath = hgvfs.join(b'wcache')
602 wcachepath = hgvfs.join(b'wcache')
603
603
604 # The store has changed over time and the exact layout is dictated by
604 # The store has changed over time and the exact layout is dictated by
605 # requirements. The store interface abstracts differences across all
605 # requirements. The store interface abstracts differences across all
606 # of them.
606 # of them.
607 store = makestore(
607 store = makestore(
608 requirements,
608 requirements,
609 storebasepath,
609 storebasepath,
610 lambda base: vfsmod.vfs(base, cacheaudited=True),
610 lambda base: vfsmod.vfs(base, cacheaudited=True),
611 )
611 )
612 hgvfs.createmode = store.createmode
612 hgvfs.createmode = store.createmode
613
613
614 storevfs = store.vfs
614 storevfs = store.vfs
615 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
615 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
616
616
617 # The cache vfs is used to manage cache files.
617 # The cache vfs is used to manage cache files.
618 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
618 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
619 cachevfs.createmode = store.createmode
619 cachevfs.createmode = store.createmode
620 # The cache vfs is used to manage cache files related to the working copy
620 # The cache vfs is used to manage cache files related to the working copy
621 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
621 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
622 wcachevfs.createmode = store.createmode
622 wcachevfs.createmode = store.createmode
623
623
624 # Now resolve the type for the repository object. We do this by repeatedly
624 # Now resolve the type for the repository object. We do this by repeatedly
625 # calling a factory function to produces types for specific aspects of the
625 # calling a factory function to produces types for specific aspects of the
626 # repo's operation. The aggregate returned types are used as base classes
626 # repo's operation. The aggregate returned types are used as base classes
627 # for a dynamically-derived type, which will represent our new repository.
627 # for a dynamically-derived type, which will represent our new repository.
628
628
629 bases = []
629 bases = []
630 extrastate = {}
630 extrastate = {}
631
631
632 for iface, fn in REPO_INTERFACES:
632 for iface, fn in REPO_INTERFACES:
633 # We pass all potentially useful state to give extensions tons of
633 # We pass all potentially useful state to give extensions tons of
634 # flexibility.
634 # flexibility.
635 typ = fn()(
635 typ = fn()(
636 ui=ui,
636 ui=ui,
637 intents=intents,
637 intents=intents,
638 requirements=requirements,
638 requirements=requirements,
639 features=features,
639 features=features,
640 wdirvfs=wdirvfs,
640 wdirvfs=wdirvfs,
641 hgvfs=hgvfs,
641 hgvfs=hgvfs,
642 store=store,
642 store=store,
643 storevfs=storevfs,
643 storevfs=storevfs,
644 storeoptions=storevfs.options,
644 storeoptions=storevfs.options,
645 cachevfs=cachevfs,
645 cachevfs=cachevfs,
646 wcachevfs=wcachevfs,
646 wcachevfs=wcachevfs,
647 extensionmodulenames=extensionmodulenames,
647 extensionmodulenames=extensionmodulenames,
648 extrastate=extrastate,
648 extrastate=extrastate,
649 baseclasses=bases,
649 baseclasses=bases,
650 )
650 )
651
651
652 if not isinstance(typ, type):
652 if not isinstance(typ, type):
653 raise error.ProgrammingError(
653 raise error.ProgrammingError(
654 b'unable to construct type for %s' % iface
654 b'unable to construct type for %s' % iface
655 )
655 )
656
656
657 bases.append(typ)
657 bases.append(typ)
658
658
659 # type() allows you to use characters in type names that wouldn't be
659 # type() allows you to use characters in type names that wouldn't be
660 # recognized as Python symbols in source code. We abuse that to add
660 # recognized as Python symbols in source code. We abuse that to add
661 # rich information about our constructed repo.
661 # rich information about our constructed repo.
662 name = pycompat.sysstr(
662 name = pycompat.sysstr(
663 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
663 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
664 )
664 )
665
665
666 cls = type(name, tuple(bases), {})
666 cls = type(name, tuple(bases), {})
667
667
668 return cls(
668 return cls(
669 baseui=baseui,
669 baseui=baseui,
670 ui=ui,
670 ui=ui,
671 origroot=path,
671 origroot=path,
672 wdirvfs=wdirvfs,
672 wdirvfs=wdirvfs,
673 hgvfs=hgvfs,
673 hgvfs=hgvfs,
674 requirements=requirements,
674 requirements=requirements,
675 supportedrequirements=supportedrequirements,
675 supportedrequirements=supportedrequirements,
676 sharedpath=storebasepath,
676 sharedpath=storebasepath,
677 store=store,
677 store=store,
678 cachevfs=cachevfs,
678 cachevfs=cachevfs,
679 wcachevfs=wcachevfs,
679 wcachevfs=wcachevfs,
680 features=features,
680 features=features,
681 intents=intents,
681 intents=intents,
682 )
682 )
683
683
684
684
685 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
685 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
686 """Load hgrc files/content into a ui instance.
686 """Load hgrc files/content into a ui instance.
687
687
688 This is called during repository opening to load any additional
688 This is called during repository opening to load any additional
689 config files or settings relevant to the current repository.
689 config files or settings relevant to the current repository.
690
690
691 Returns a bool indicating whether any additional configs were loaded.
691 Returns a bool indicating whether any additional configs were loaded.
692
692
693 Extensions should monkeypatch this function to modify how per-repo
693 Extensions should monkeypatch this function to modify how per-repo
694 configs are loaded. For example, an extension may wish to pull in
694 configs are loaded. For example, an extension may wish to pull in
695 configs from alternate files or sources.
695 configs from alternate files or sources.
696 """
696 """
697 if not rcutil.use_repo_hgrc():
697 if not rcutil.use_repo_hgrc():
698 return False
698 return False
699 try:
699 try:
700 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
700 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
701 return True
701 return True
702 except IOError:
702 except IOError:
703 return False
703 return False
704
704
705
705
706 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
706 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
707 """Perform additional actions after .hg/hgrc is loaded.
707 """Perform additional actions after .hg/hgrc is loaded.
708
708
709 This function is called during repository loading immediately after
709 This function is called during repository loading immediately after
710 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
710 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
711
711
712 The function can be used to validate configs, automatically add
712 The function can be used to validate configs, automatically add
713 options (including extensions) based on requirements, etc.
713 options (including extensions) based on requirements, etc.
714 """
714 """
715
715
716 # Map of requirements to list of extensions to load automatically when
716 # Map of requirements to list of extensions to load automatically when
717 # requirement is present.
717 # requirement is present.
718 autoextensions = {
718 autoextensions = {
719 b'git': [b'git'],
719 b'git': [b'git'],
720 b'largefiles': [b'largefiles'],
720 b'largefiles': [b'largefiles'],
721 b'lfs': [b'lfs'],
721 b'lfs': [b'lfs'],
722 }
722 }
723
723
724 for requirement, names in sorted(autoextensions.items()):
724 for requirement, names in sorted(autoextensions.items()):
725 if requirement not in requirements:
725 if requirement not in requirements:
726 continue
726 continue
727
727
728 for name in names:
728 for name in names:
729 if not ui.hasconfig(b'extensions', name):
729 if not ui.hasconfig(b'extensions', name):
730 ui.setconfig(b'extensions', name, b'', source=b'autoload')
730 ui.setconfig(b'extensions', name, b'', source=b'autoload')
731
731
732
732
733 def gathersupportedrequirements(ui):
733 def gathersupportedrequirements(ui):
734 """Determine the complete set of recognized requirements."""
734 """Determine the complete set of recognized requirements."""
735 # Start with all requirements supported by this file.
735 # Start with all requirements supported by this file.
736 supported = set(localrepository._basesupported)
736 supported = set(localrepository._basesupported)
737
737
738 # Execute ``featuresetupfuncs`` entries if they belong to an extension
738 # Execute ``featuresetupfuncs`` entries if they belong to an extension
739 # relevant to this ui instance.
739 # relevant to this ui instance.
740 modules = {m.__name__ for n, m in extensions.extensions(ui)}
740 modules = {m.__name__ for n, m in extensions.extensions(ui)}
741
741
742 for fn in featuresetupfuncs:
742 for fn in featuresetupfuncs:
743 if fn.__module__ in modules:
743 if fn.__module__ in modules:
744 fn(ui, supported)
744 fn(ui, supported)
745
745
746 # Add derived requirements from registered compression engines.
746 # Add derived requirements from registered compression engines.
747 for name in util.compengines:
747 for name in util.compengines:
748 engine = util.compengines[name]
748 engine = util.compengines[name]
749 if engine.available() and engine.revlogheader():
749 if engine.available() and engine.revlogheader():
750 supported.add(b'exp-compression-%s' % name)
750 supported.add(b'exp-compression-%s' % name)
751 if engine.name() == b'zstd':
751 if engine.name() == b'zstd':
752 supported.add(b'revlog-compression-zstd')
752 supported.add(b'revlog-compression-zstd')
753
753
754 return supported
754 return supported
755
755
756
756
757 def ensurerequirementsrecognized(requirements, supported):
757 def ensurerequirementsrecognized(requirements, supported):
758 """Validate that a set of local requirements is recognized.
758 """Validate that a set of local requirements is recognized.
759
759
760 Receives a set of requirements. Raises an ``error.RepoError`` if there
760 Receives a set of requirements. Raises an ``error.RepoError`` if there
761 exists any requirement in that set that currently loaded code doesn't
761 exists any requirement in that set that currently loaded code doesn't
762 recognize.
762 recognize.
763
763
764 Returns a set of supported requirements.
764 Returns a set of supported requirements.
765 """
765 """
766 missing = set()
766 missing = set()
767
767
768 for requirement in requirements:
768 for requirement in requirements:
769 if requirement in supported:
769 if requirement in supported:
770 continue
770 continue
771
771
772 if not requirement or not requirement[0:1].isalnum():
772 if not requirement or not requirement[0:1].isalnum():
773 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
773 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
774
774
775 missing.add(requirement)
775 missing.add(requirement)
776
776
777 if missing:
777 if missing:
778 raise error.RequirementError(
778 raise error.RequirementError(
779 _(b'repository requires features unknown to this Mercurial: %s')
779 _(b'repository requires features unknown to this Mercurial: %s')
780 % b' '.join(sorted(missing)),
780 % b' '.join(sorted(missing)),
781 hint=_(
781 hint=_(
782 b'see https://mercurial-scm.org/wiki/MissingRequirement '
782 b'see https://mercurial-scm.org/wiki/MissingRequirement '
783 b'for more information'
783 b'for more information'
784 ),
784 ),
785 )
785 )
786
786
787
787
788 def ensurerequirementscompatible(ui, requirements):
788 def ensurerequirementscompatible(ui, requirements):
789 """Validates that a set of recognized requirements is mutually compatible.
789 """Validates that a set of recognized requirements is mutually compatible.
790
790
791 Some requirements may not be compatible with others or require
791 Some requirements may not be compatible with others or require
792 config options that aren't enabled. This function is called during
792 config options that aren't enabled. This function is called during
793 repository opening to ensure that the set of requirements needed
793 repository opening to ensure that the set of requirements needed
794 to open a repository is sane and compatible with config options.
794 to open a repository is sane and compatible with config options.
795
795
796 Extensions can monkeypatch this function to perform additional
796 Extensions can monkeypatch this function to perform additional
797 checking.
797 checking.
798
798
799 ``error.RepoError`` should be raised on failure.
799 ``error.RepoError`` should be raised on failure.
800 """
800 """
801 if (
801 if (
802 requirementsmod.SPARSE_REQUIREMENT in requirements
802 requirementsmod.SPARSE_REQUIREMENT in requirements
803 and not sparse.enabled
803 and not sparse.enabled
804 ):
804 ):
805 raise error.RepoError(
805 raise error.RepoError(
806 _(
806 _(
807 b'repository is using sparse feature but '
807 b'repository is using sparse feature but '
808 b'sparse is not enabled; enable the '
808 b'sparse is not enabled; enable the '
809 b'"sparse" extensions to access'
809 b'"sparse" extensions to access'
810 )
810 )
811 )
811 )
812
812
813
813
814 def makestore(requirements, path, vfstype):
814 def makestore(requirements, path, vfstype):
815 """Construct a storage object for a repository."""
815 """Construct a storage object for a repository."""
816 if b'store' in requirements:
816 if b'store' in requirements:
817 if b'fncache' in requirements:
817 if b'fncache' in requirements:
818 return storemod.fncachestore(
818 return storemod.fncachestore(
819 path, vfstype, b'dotencode' in requirements
819 path, vfstype, b'dotencode' in requirements
820 )
820 )
821
821
822 return storemod.encodedstore(path, vfstype)
822 return storemod.encodedstore(path, vfstype)
823
823
824 return storemod.basicstore(path, vfstype)
824 return storemod.basicstore(path, vfstype)
825
825
826
826
827 def resolvestorevfsoptions(ui, requirements, features):
827 def resolvestorevfsoptions(ui, requirements, features):
828 """Resolve the options to pass to the store vfs opener.
828 """Resolve the options to pass to the store vfs opener.
829
829
830 The returned dict is used to influence behavior of the storage layer.
830 The returned dict is used to influence behavior of the storage layer.
831 """
831 """
832 options = {}
832 options = {}
833
833
834 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
834 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
835 options[b'treemanifest'] = True
835 options[b'treemanifest'] = True
836
836
837 # experimental config: format.manifestcachesize
837 # experimental config: format.manifestcachesize
838 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
838 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
839 if manifestcachesize is not None:
839 if manifestcachesize is not None:
840 options[b'manifestcachesize'] = manifestcachesize
840 options[b'manifestcachesize'] = manifestcachesize
841
841
842 # In the absence of another requirement superseding a revlog-related
842 # In the absence of another requirement superseding a revlog-related
843 # requirement, we have to assume the repo is using revlog version 0.
843 # requirement, we have to assume the repo is using revlog version 0.
844 # This revlog format is super old and we don't bother trying to parse
844 # This revlog format is super old and we don't bother trying to parse
845 # opener options for it because those options wouldn't do anything
845 # opener options for it because those options wouldn't do anything
846 # meaningful on such old repos.
846 # meaningful on such old repos.
847 if (
847 if (
848 b'revlogv1' in requirements
848 b'revlogv1' in requirements
849 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
849 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
850 ):
850 ):
851 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
851 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
852 else: # explicitly mark repo as using revlogv0
852 else: # explicitly mark repo as using revlogv0
853 options[b'revlogv0'] = True
853 options[b'revlogv0'] = True
854
854
855 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
855 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
856 options[b'copies-storage'] = b'changeset-sidedata'
856 options[b'copies-storage'] = b'changeset-sidedata'
857 else:
857 else:
858 writecopiesto = ui.config(b'experimental', b'copies.write-to')
858 writecopiesto = ui.config(b'experimental', b'copies.write-to')
859 copiesextramode = (b'changeset-only', b'compatibility')
859 copiesextramode = (b'changeset-only', b'compatibility')
860 if writecopiesto in copiesextramode:
860 if writecopiesto in copiesextramode:
861 options[b'copies-storage'] = b'extra'
861 options[b'copies-storage'] = b'extra'
862
862
863 return options
863 return options
864
864
865
865
866 def resolverevlogstorevfsoptions(ui, requirements, features):
866 def resolverevlogstorevfsoptions(ui, requirements, features):
867 """Resolve opener options specific to revlogs."""
867 """Resolve opener options specific to revlogs."""
868
868
869 options = {}
869 options = {}
870 options[b'flagprocessors'] = {}
870 options[b'flagprocessors'] = {}
871
871
872 if b'revlogv1' in requirements:
872 if b'revlogv1' in requirements:
873 options[b'revlogv1'] = True
873 options[b'revlogv1'] = True
874 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
874 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
875 options[b'revlogv2'] = True
875 options[b'revlogv2'] = True
876
876
877 if b'generaldelta' in requirements:
877 if b'generaldelta' in requirements:
878 options[b'generaldelta'] = True
878 options[b'generaldelta'] = True
879
879
880 # experimental config: format.chunkcachesize
880 # experimental config: format.chunkcachesize
881 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
881 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
882 if chunkcachesize is not None:
882 if chunkcachesize is not None:
883 options[b'chunkcachesize'] = chunkcachesize
883 options[b'chunkcachesize'] = chunkcachesize
884
884
885 deltabothparents = ui.configbool(
885 deltabothparents = ui.configbool(
886 b'storage', b'revlog.optimize-delta-parent-choice'
886 b'storage', b'revlog.optimize-delta-parent-choice'
887 )
887 )
888 options[b'deltabothparents'] = deltabothparents
888 options[b'deltabothparents'] = deltabothparents
889
889
890 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
890 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
891 lazydeltabase = False
891 lazydeltabase = False
892 if lazydelta:
892 if lazydelta:
893 lazydeltabase = ui.configbool(
893 lazydeltabase = ui.configbool(
894 b'storage', b'revlog.reuse-external-delta-parent'
894 b'storage', b'revlog.reuse-external-delta-parent'
895 )
895 )
896 if lazydeltabase is None:
896 if lazydeltabase is None:
897 lazydeltabase = not scmutil.gddeltaconfig(ui)
897 lazydeltabase = not scmutil.gddeltaconfig(ui)
898 options[b'lazydelta'] = lazydelta
898 options[b'lazydelta'] = lazydelta
899 options[b'lazydeltabase'] = lazydeltabase
899 options[b'lazydeltabase'] = lazydeltabase
900
900
901 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
901 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
902 if 0 <= chainspan:
902 if 0 <= chainspan:
903 options[b'maxdeltachainspan'] = chainspan
903 options[b'maxdeltachainspan'] = chainspan
904
904
905 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
905 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
906 if mmapindexthreshold is not None:
906 if mmapindexthreshold is not None:
907 options[b'mmapindexthreshold'] = mmapindexthreshold
907 options[b'mmapindexthreshold'] = mmapindexthreshold
908
908
909 withsparseread = ui.configbool(b'experimental', b'sparse-read')
909 withsparseread = ui.configbool(b'experimental', b'sparse-read')
910 srdensitythres = float(
910 srdensitythres = float(
911 ui.config(b'experimental', b'sparse-read.density-threshold')
911 ui.config(b'experimental', b'sparse-read.density-threshold')
912 )
912 )
913 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
913 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
914 options[b'with-sparse-read'] = withsparseread
914 options[b'with-sparse-read'] = withsparseread
915 options[b'sparse-read-density-threshold'] = srdensitythres
915 options[b'sparse-read-density-threshold'] = srdensitythres
916 options[b'sparse-read-min-gap-size'] = srmingapsize
916 options[b'sparse-read-min-gap-size'] = srmingapsize
917
917
918 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
918 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
919 options[b'sparse-revlog'] = sparserevlog
919 options[b'sparse-revlog'] = sparserevlog
920 if sparserevlog:
920 if sparserevlog:
921 options[b'generaldelta'] = True
921 options[b'generaldelta'] = True
922
922
923 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
923 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
924 options[b'side-data'] = sidedata
924 options[b'side-data'] = sidedata
925
925
926 maxchainlen = None
926 maxchainlen = None
927 if sparserevlog:
927 if sparserevlog:
928 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
928 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
929 # experimental config: format.maxchainlen
929 # experimental config: format.maxchainlen
930 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
930 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
931 if maxchainlen is not None:
931 if maxchainlen is not None:
932 options[b'maxchainlen'] = maxchainlen
932 options[b'maxchainlen'] = maxchainlen
933
933
934 for r in requirements:
934 for r in requirements:
935 # we allow multiple compression engine requirement to co-exist because
935 # we allow multiple compression engine requirement to co-exist because
936 # strickly speaking, revlog seems to support mixed compression style.
936 # strickly speaking, revlog seems to support mixed compression style.
937 #
937 #
938 # The compression used for new entries will be "the last one"
938 # The compression used for new entries will be "the last one"
939 prefix = r.startswith
939 prefix = r.startswith
940 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
940 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
941 options[b'compengine'] = r.split(b'-', 2)[2]
941 options[b'compengine'] = r.split(b'-', 2)[2]
942
942
943 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
943 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
944 if options[b'zlib.level'] is not None:
944 if options[b'zlib.level'] is not None:
945 if not (0 <= options[b'zlib.level'] <= 9):
945 if not (0 <= options[b'zlib.level'] <= 9):
946 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
946 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
947 raise error.Abort(msg % options[b'zlib.level'])
947 raise error.Abort(msg % options[b'zlib.level'])
948 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
948 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
949 if options[b'zstd.level'] is not None:
949 if options[b'zstd.level'] is not None:
950 if not (0 <= options[b'zstd.level'] <= 22):
950 if not (0 <= options[b'zstd.level'] <= 22):
951 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
951 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
952 raise error.Abort(msg % options[b'zstd.level'])
952 raise error.Abort(msg % options[b'zstd.level'])
953
953
954 if requirementsmod.NARROW_REQUIREMENT in requirements:
954 if requirementsmod.NARROW_REQUIREMENT in requirements:
955 options[b'enableellipsis'] = True
955 options[b'enableellipsis'] = True
956
956
957 if ui.configbool(b'experimental', b'rust.index'):
957 if ui.configbool(b'experimental', b'rust.index'):
958 options[b'rust.index'] = True
958 options[b'rust.index'] = True
959 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
959 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
960 options[b'persistent-nodemap'] = True
960 options[b'persistent-nodemap'] = True
961 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
961 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
962 options[b'persistent-nodemap.mmap'] = True
962 options[b'persistent-nodemap.mmap'] = True
963 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
963 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
964 options[b'persistent-nodemap.mode'] = epnm
964 options[b'persistent-nodemap.mode'] = epnm
965 if ui.configbool(b'devel', b'persistent-nodemap'):
965 if ui.configbool(b'devel', b'persistent-nodemap'):
966 options[b'devel-force-nodemap'] = True
966 options[b'devel-force-nodemap'] = True
967
967
968 return options
968 return options
969
969
970
970
971 def makemain(**kwargs):
971 def makemain(**kwargs):
972 """Produce a type conforming to ``ilocalrepositorymain``."""
972 """Produce a type conforming to ``ilocalrepositorymain``."""
973 return localrepository
973 return localrepository
974
974
975
975
976 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
976 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
977 class revlogfilestorage(object):
977 class revlogfilestorage(object):
978 """File storage when using revlogs."""
978 """File storage when using revlogs."""
979
979
980 def file(self, path):
980 def file(self, path):
981 if path[0] == b'/':
981 if path[0] == b'/':
982 path = path[1:]
982 path = path[1:]
983
983
984 return filelog.filelog(self.svfs, path)
984 return filelog.filelog(self.svfs, path)
985
985
986
986
987 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
987 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
988 class revlognarrowfilestorage(object):
988 class revlognarrowfilestorage(object):
989 """File storage when using revlogs and narrow files."""
989 """File storage when using revlogs and narrow files."""
990
990
991 def file(self, path):
991 def file(self, path):
992 if path[0] == b'/':
992 if path[0] == b'/':
993 path = path[1:]
993 path = path[1:]
994
994
995 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
995 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
996
996
997
997
998 def makefilestorage(requirements, features, **kwargs):
998 def makefilestorage(requirements, features, **kwargs):
999 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
999 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1000 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1000 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1001 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1001 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1002
1002
1003 if requirementsmod.NARROW_REQUIREMENT in requirements:
1003 if requirementsmod.NARROW_REQUIREMENT in requirements:
1004 return revlognarrowfilestorage
1004 return revlognarrowfilestorage
1005 else:
1005 else:
1006 return revlogfilestorage
1006 return revlogfilestorage
1007
1007
1008
1008
1009 # List of repository interfaces and factory functions for them. Each
1009 # List of repository interfaces and factory functions for them. Each
1010 # will be called in order during ``makelocalrepository()`` to iteratively
1010 # will be called in order during ``makelocalrepository()`` to iteratively
1011 # derive the final type for a local repository instance. We capture the
1011 # derive the final type for a local repository instance. We capture the
1012 # function as a lambda so we don't hold a reference and the module-level
1012 # function as a lambda so we don't hold a reference and the module-level
1013 # functions can be wrapped.
1013 # functions can be wrapped.
1014 REPO_INTERFACES = [
1014 REPO_INTERFACES = [
1015 (repository.ilocalrepositorymain, lambda: makemain),
1015 (repository.ilocalrepositorymain, lambda: makemain),
1016 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1016 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1017 ]
1017 ]
1018
1018
1019
1019
1020 @interfaceutil.implementer(repository.ilocalrepositorymain)
1020 @interfaceutil.implementer(repository.ilocalrepositorymain)
1021 class localrepository(object):
1021 class localrepository(object):
1022 """Main class for representing local repositories.
1022 """Main class for representing local repositories.
1023
1023
1024 All local repositories are instances of this class.
1024 All local repositories are instances of this class.
1025
1025
1026 Constructed on its own, instances of this class are not usable as
1026 Constructed on its own, instances of this class are not usable as
1027 repository objects. To obtain a usable repository object, call
1027 repository objects. To obtain a usable repository object, call
1028 ``hg.repository()``, ``localrepo.instance()``, or
1028 ``hg.repository()``, ``localrepo.instance()``, or
1029 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1029 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1030 ``instance()`` adds support for creating new repositories.
1030 ``instance()`` adds support for creating new repositories.
1031 ``hg.repository()`` adds more extension integration, including calling
1031 ``hg.repository()`` adds more extension integration, including calling
1032 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1032 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1033 used.
1033 used.
1034 """
1034 """
1035
1035
1036 # obsolete experimental requirements:
1036 # obsolete experimental requirements:
1037 # - manifestv2: An experimental new manifest format that allowed
1037 # - manifestv2: An experimental new manifest format that allowed
1038 # for stem compression of long paths. Experiment ended up not
1038 # for stem compression of long paths. Experiment ended up not
1039 # being successful (repository sizes went up due to worse delta
1039 # being successful (repository sizes went up due to worse delta
1040 # chains), and the code was deleted in 4.6.
1040 # chains), and the code was deleted in 4.6.
1041 supportedformats = {
1041 supportedformats = {
1042 b'revlogv1',
1042 b'revlogv1',
1043 b'generaldelta',
1043 b'generaldelta',
1044 requirementsmod.TREEMANIFEST_REQUIREMENT,
1044 requirementsmod.TREEMANIFEST_REQUIREMENT,
1045 requirementsmod.COPIESSDC_REQUIREMENT,
1045 requirementsmod.COPIESSDC_REQUIREMENT,
1046 requirementsmod.REVLOGV2_REQUIREMENT,
1046 requirementsmod.REVLOGV2_REQUIREMENT,
1047 requirementsmod.SIDEDATA_REQUIREMENT,
1047 requirementsmod.SIDEDATA_REQUIREMENT,
1048 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1048 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1049 requirementsmod.NODEMAP_REQUIREMENT,
1049 requirementsmod.NODEMAP_REQUIREMENT,
1050 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1050 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1051 }
1051 }
1052 _basesupported = supportedformats | {
1052 _basesupported = supportedformats | {
1053 b'store',
1053 b'store',
1054 b'fncache',
1054 b'fncache',
1055 requirementsmod.SHARED_REQUIREMENT,
1055 requirementsmod.SHARED_REQUIREMENT,
1056 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1056 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1057 b'dotencode',
1057 b'dotencode',
1058 requirementsmod.SPARSE_REQUIREMENT,
1058 requirementsmod.SPARSE_REQUIREMENT,
1059 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1059 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1060 }
1060 }
1061
1061
1062 # list of prefix for file which can be written without 'wlock'
1062 # list of prefix for file which can be written without 'wlock'
1063 # Extensions should extend this list when needed
1063 # Extensions should extend this list when needed
1064 _wlockfreeprefix = {
1064 _wlockfreeprefix = {
1065 # We migh consider requiring 'wlock' for the next
1065 # We migh consider requiring 'wlock' for the next
1066 # two, but pretty much all the existing code assume
1066 # two, but pretty much all the existing code assume
1067 # wlock is not needed so we keep them excluded for
1067 # wlock is not needed so we keep them excluded for
1068 # now.
1068 # now.
1069 b'hgrc',
1069 b'hgrc',
1070 b'requires',
1070 b'requires',
1071 # XXX cache is a complicatged business someone
1071 # XXX cache is a complicatged business someone
1072 # should investigate this in depth at some point
1072 # should investigate this in depth at some point
1073 b'cache/',
1073 b'cache/',
1074 # XXX shouldn't be dirstate covered by the wlock?
1074 # XXX shouldn't be dirstate covered by the wlock?
1075 b'dirstate',
1075 b'dirstate',
1076 # XXX bisect was still a bit too messy at the time
1076 # XXX bisect was still a bit too messy at the time
1077 # this changeset was introduced. Someone should fix
1077 # this changeset was introduced. Someone should fix
1078 # the remainig bit and drop this line
1078 # the remainig bit and drop this line
1079 b'bisect.state',
1079 b'bisect.state',
1080 }
1080 }
1081
1081
1082 def __init__(
1082 def __init__(
1083 self,
1083 self,
1084 baseui,
1084 baseui,
1085 ui,
1085 ui,
1086 origroot,
1086 origroot,
1087 wdirvfs,
1087 wdirvfs,
1088 hgvfs,
1088 hgvfs,
1089 requirements,
1089 requirements,
1090 supportedrequirements,
1090 supportedrequirements,
1091 sharedpath,
1091 sharedpath,
1092 store,
1092 store,
1093 cachevfs,
1093 cachevfs,
1094 wcachevfs,
1094 wcachevfs,
1095 features,
1095 features,
1096 intents=None,
1096 intents=None,
1097 ):
1097 ):
1098 """Create a new local repository instance.
1098 """Create a new local repository instance.
1099
1099
1100 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1100 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1101 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1101 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1102 object.
1102 object.
1103
1103
1104 Arguments:
1104 Arguments:
1105
1105
1106 baseui
1106 baseui
1107 ``ui.ui`` instance that ``ui`` argument was based off of.
1107 ``ui.ui`` instance that ``ui`` argument was based off of.
1108
1108
1109 ui
1109 ui
1110 ``ui.ui`` instance for use by the repository.
1110 ``ui.ui`` instance for use by the repository.
1111
1111
1112 origroot
1112 origroot
1113 ``bytes`` path to working directory root of this repository.
1113 ``bytes`` path to working directory root of this repository.
1114
1114
1115 wdirvfs
1115 wdirvfs
1116 ``vfs.vfs`` rooted at the working directory.
1116 ``vfs.vfs`` rooted at the working directory.
1117
1117
1118 hgvfs
1118 hgvfs
1119 ``vfs.vfs`` rooted at .hg/
1119 ``vfs.vfs`` rooted at .hg/
1120
1120
1121 requirements
1121 requirements
1122 ``set`` of bytestrings representing repository opening requirements.
1122 ``set`` of bytestrings representing repository opening requirements.
1123
1123
1124 supportedrequirements
1124 supportedrequirements
1125 ``set`` of bytestrings representing repository requirements that we
1125 ``set`` of bytestrings representing repository requirements that we
1126 know how to open. May be a supetset of ``requirements``.
1126 know how to open. May be a supetset of ``requirements``.
1127
1127
1128 sharedpath
1128 sharedpath
1129 ``bytes`` Defining path to storage base directory. Points to a
1129 ``bytes`` Defining path to storage base directory. Points to a
1130 ``.hg/`` directory somewhere.
1130 ``.hg/`` directory somewhere.
1131
1131
1132 store
1132 store
1133 ``store.basicstore`` (or derived) instance providing access to
1133 ``store.basicstore`` (or derived) instance providing access to
1134 versioned storage.
1134 versioned storage.
1135
1135
1136 cachevfs
1136 cachevfs
1137 ``vfs.vfs`` used for cache files.
1137 ``vfs.vfs`` used for cache files.
1138
1138
1139 wcachevfs
1139 wcachevfs
1140 ``vfs.vfs`` used for cache files related to the working copy.
1140 ``vfs.vfs`` used for cache files related to the working copy.
1141
1141
1142 features
1142 features
1143 ``set`` of bytestrings defining features/capabilities of this
1143 ``set`` of bytestrings defining features/capabilities of this
1144 instance.
1144 instance.
1145
1145
1146 intents
1146 intents
1147 ``set`` of system strings indicating what this repo will be used
1147 ``set`` of system strings indicating what this repo will be used
1148 for.
1148 for.
1149 """
1149 """
1150 self.baseui = baseui
1150 self.baseui = baseui
1151 self.ui = ui
1151 self.ui = ui
1152 self.origroot = origroot
1152 self.origroot = origroot
1153 # vfs rooted at working directory.
1153 # vfs rooted at working directory.
1154 self.wvfs = wdirvfs
1154 self.wvfs = wdirvfs
1155 self.root = wdirvfs.base
1155 self.root = wdirvfs.base
1156 # vfs rooted at .hg/. Used to access most non-store paths.
1156 # vfs rooted at .hg/. Used to access most non-store paths.
1157 self.vfs = hgvfs
1157 self.vfs = hgvfs
1158 self.path = hgvfs.base
1158 self.path = hgvfs.base
1159 self.requirements = requirements
1159 self.requirements = requirements
1160 self.supported = supportedrequirements
1160 self.supported = supportedrequirements
1161 self.sharedpath = sharedpath
1161 self.sharedpath = sharedpath
1162 self.store = store
1162 self.store = store
1163 self.cachevfs = cachevfs
1163 self.cachevfs = cachevfs
1164 self.wcachevfs = wcachevfs
1164 self.wcachevfs = wcachevfs
1165 self.features = features
1165 self.features = features
1166
1166
1167 self.filtername = None
1167 self.filtername = None
1168
1168
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1170 b'devel', b'check-locks'
1170 b'devel', b'check-locks'
1171 ):
1171 ):
1172 self.vfs.audit = self._getvfsward(self.vfs.audit)
1172 self.vfs.audit = self._getvfsward(self.vfs.audit)
1173 # A list of callback to shape the phase if no data were found.
1173 # A list of callback to shape the phase if no data were found.
1174 # Callback are in the form: func(repo, roots) --> processed root.
1174 # Callback are in the form: func(repo, roots) --> processed root.
1175 # This list it to be filled by extension during repo setup
1175 # This list it to be filled by extension during repo setup
1176 self._phasedefaults = []
1176 self._phasedefaults = []
1177
1177
1178 color.setup(self.ui)
1178 color.setup(self.ui)
1179
1179
1180 self.spath = self.store.path
1180 self.spath = self.store.path
1181 self.svfs = self.store.vfs
1181 self.svfs = self.store.vfs
1182 self.sjoin = self.store.join
1182 self.sjoin = self.store.join
1183 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1183 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1184 b'devel', b'check-locks'
1184 b'devel', b'check-locks'
1185 ):
1185 ):
1186 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1186 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1187 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1187 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1188 else: # standard vfs
1188 else: # standard vfs
1189 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1189 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1190
1190
1191 self._dirstatevalidatewarned = False
1191 self._dirstatevalidatewarned = False
1192
1192
1193 self._branchcaches = branchmap.BranchMapCache()
1193 self._branchcaches = branchmap.BranchMapCache()
1194 self._revbranchcache = None
1194 self._revbranchcache = None
1195 self._filterpats = {}
1195 self._filterpats = {}
1196 self._datafilters = {}
1196 self._datafilters = {}
1197 self._transref = self._lockref = self._wlockref = None
1197 self._transref = self._lockref = self._wlockref = None
1198
1198
1199 # A cache for various files under .hg/ that tracks file changes,
1199 # A cache for various files under .hg/ that tracks file changes,
1200 # (used by the filecache decorator)
1200 # (used by the filecache decorator)
1201 #
1201 #
1202 # Maps a property name to its util.filecacheentry
1202 # Maps a property name to its util.filecacheentry
1203 self._filecache = {}
1203 self._filecache = {}
1204
1204
1205 # hold sets of revision to be filtered
1205 # hold sets of revision to be filtered
1206 # should be cleared when something might have changed the filter value:
1206 # should be cleared when something might have changed the filter value:
1207 # - new changesets,
1207 # - new changesets,
1208 # - phase change,
1208 # - phase change,
1209 # - new obsolescence marker,
1209 # - new obsolescence marker,
1210 # - working directory parent change,
1210 # - working directory parent change,
1211 # - bookmark changes
1211 # - bookmark changes
1212 self.filteredrevcache = {}
1212 self.filteredrevcache = {}
1213
1213
1214 # post-dirstate-status hooks
1214 # post-dirstate-status hooks
1215 self._postdsstatus = []
1215 self._postdsstatus = []
1216
1216
1217 # generic mapping between names and nodes
1217 # generic mapping between names and nodes
1218 self.names = namespaces.namespaces()
1218 self.names = namespaces.namespaces()
1219
1219
1220 # Key to signature value.
1220 # Key to signature value.
1221 self._sparsesignaturecache = {}
1221 self._sparsesignaturecache = {}
1222 # Signature to cached matcher instance.
1222 # Signature to cached matcher instance.
1223 self._sparsematchercache = {}
1223 self._sparsematchercache = {}
1224
1224
1225 self._extrafilterid = repoview.extrafilter(ui)
1225 self._extrafilterid = repoview.extrafilter(ui)
1226
1226
1227 self.filecopiesmode = None
1227 self.filecopiesmode = None
1228 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1228 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1229 self.filecopiesmode = b'changeset-sidedata'
1229 self.filecopiesmode = b'changeset-sidedata'
1230
1230
1231 def _getvfsward(self, origfunc):
1231 def _getvfsward(self, origfunc):
1232 """build a ward for self.vfs"""
1232 """build a ward for self.vfs"""
1233 rref = weakref.ref(self)
1233 rref = weakref.ref(self)
1234
1234
1235 def checkvfs(path, mode=None):
1235 def checkvfs(path, mode=None):
1236 ret = origfunc(path, mode=mode)
1236 ret = origfunc(path, mode=mode)
1237 repo = rref()
1237 repo = rref()
1238 if (
1238 if (
1239 repo is None
1239 repo is None
1240 or not util.safehasattr(repo, b'_wlockref')
1240 or not util.safehasattr(repo, b'_wlockref')
1241 or not util.safehasattr(repo, b'_lockref')
1241 or not util.safehasattr(repo, b'_lockref')
1242 ):
1242 ):
1243 return
1243 return
1244 if mode in (None, b'r', b'rb'):
1244 if mode in (None, b'r', b'rb'):
1245 return
1245 return
1246 if path.startswith(repo.path):
1246 if path.startswith(repo.path):
1247 # truncate name relative to the repository (.hg)
1247 # truncate name relative to the repository (.hg)
1248 path = path[len(repo.path) + 1 :]
1248 path = path[len(repo.path) + 1 :]
1249 if path.startswith(b'cache/'):
1249 if path.startswith(b'cache/'):
1250 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1250 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1251 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1251 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1252 # path prefixes covered by 'lock'
1252 # path prefixes covered by 'lock'
1253 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1253 vfs_path_prefixes = (
1254 b'journal.',
1255 b'undo.',
1256 b'strip-backup/',
1257 b'cache/',
1258 )
1254 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1259 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1255 if repo._currentlock(repo._lockref) is None:
1260 if repo._currentlock(repo._lockref) is None:
1256 repo.ui.develwarn(
1261 repo.ui.develwarn(
1257 b'write with no lock: "%s"' % path,
1262 b'write with no lock: "%s"' % path,
1258 stacklevel=3,
1263 stacklevel=3,
1259 config=b'check-locks',
1264 config=b'check-locks',
1260 )
1265 )
1261 elif repo._currentlock(repo._wlockref) is None:
1266 elif repo._currentlock(repo._wlockref) is None:
1262 # rest of vfs files are covered by 'wlock'
1267 # rest of vfs files are covered by 'wlock'
1263 #
1268 #
1264 # exclude special files
1269 # exclude special files
1265 for prefix in self._wlockfreeprefix:
1270 for prefix in self._wlockfreeprefix:
1266 if path.startswith(prefix):
1271 if path.startswith(prefix):
1267 return
1272 return
1268 repo.ui.develwarn(
1273 repo.ui.develwarn(
1269 b'write with no wlock: "%s"' % path,
1274 b'write with no wlock: "%s"' % path,
1270 stacklevel=3,
1275 stacklevel=3,
1271 config=b'check-locks',
1276 config=b'check-locks',
1272 )
1277 )
1273 return ret
1278 return ret
1274
1279
1275 return checkvfs
1280 return checkvfs
1276
1281
1277 def _getsvfsward(self, origfunc):
1282 def _getsvfsward(self, origfunc):
1278 """build a ward for self.svfs"""
1283 """build a ward for self.svfs"""
1279 rref = weakref.ref(self)
1284 rref = weakref.ref(self)
1280
1285
1281 def checksvfs(path, mode=None):
1286 def checksvfs(path, mode=None):
1282 ret = origfunc(path, mode=mode)
1287 ret = origfunc(path, mode=mode)
1283 repo = rref()
1288 repo = rref()
1284 if repo is None or not util.safehasattr(repo, b'_lockref'):
1289 if repo is None or not util.safehasattr(repo, b'_lockref'):
1285 return
1290 return
1286 if mode in (None, b'r', b'rb'):
1291 if mode in (None, b'r', b'rb'):
1287 return
1292 return
1288 if path.startswith(repo.sharedpath):
1293 if path.startswith(repo.sharedpath):
1289 # truncate name relative to the repository (.hg)
1294 # truncate name relative to the repository (.hg)
1290 path = path[len(repo.sharedpath) + 1 :]
1295 path = path[len(repo.sharedpath) + 1 :]
1291 if repo._currentlock(repo._lockref) is None:
1296 if repo._currentlock(repo._lockref) is None:
1292 repo.ui.develwarn(
1297 repo.ui.develwarn(
1293 b'write with no lock: "%s"' % path, stacklevel=4
1298 b'write with no lock: "%s"' % path, stacklevel=4
1294 )
1299 )
1295 return ret
1300 return ret
1296
1301
1297 return checksvfs
1302 return checksvfs
1298
1303
1299 def close(self):
1304 def close(self):
1300 self._writecaches()
1305 self._writecaches()
1301
1306
1302 def _writecaches(self):
1307 def _writecaches(self):
1303 if self._revbranchcache:
1308 if self._revbranchcache:
1304 self._revbranchcache.write()
1309 self._revbranchcache.write()
1305
1310
1306 def _restrictcapabilities(self, caps):
1311 def _restrictcapabilities(self, caps):
1307 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1312 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1308 caps = set(caps)
1313 caps = set(caps)
1309 capsblob = bundle2.encodecaps(
1314 capsblob = bundle2.encodecaps(
1310 bundle2.getrepocaps(self, role=b'client')
1315 bundle2.getrepocaps(self, role=b'client')
1311 )
1316 )
1312 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1317 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1313 return caps
1318 return caps
1314
1319
1315 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1320 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1316 # self -> auditor -> self._checknested -> self
1321 # self -> auditor -> self._checknested -> self
1317
1322
1318 @property
1323 @property
1319 def auditor(self):
1324 def auditor(self):
1320 # This is only used by context.workingctx.match in order to
1325 # This is only used by context.workingctx.match in order to
1321 # detect files in subrepos.
1326 # detect files in subrepos.
1322 return pathutil.pathauditor(self.root, callback=self._checknested)
1327 return pathutil.pathauditor(self.root, callback=self._checknested)
1323
1328
1324 @property
1329 @property
1325 def nofsauditor(self):
1330 def nofsauditor(self):
1326 # This is only used by context.basectx.match in order to detect
1331 # This is only used by context.basectx.match in order to detect
1327 # files in subrepos.
1332 # files in subrepos.
1328 return pathutil.pathauditor(
1333 return pathutil.pathauditor(
1329 self.root, callback=self._checknested, realfs=False, cached=True
1334 self.root, callback=self._checknested, realfs=False, cached=True
1330 )
1335 )
1331
1336
1332 def _checknested(self, path):
1337 def _checknested(self, path):
1333 """Determine if path is a legal nested repository."""
1338 """Determine if path is a legal nested repository."""
1334 if not path.startswith(self.root):
1339 if not path.startswith(self.root):
1335 return False
1340 return False
1336 subpath = path[len(self.root) + 1 :]
1341 subpath = path[len(self.root) + 1 :]
1337 normsubpath = util.pconvert(subpath)
1342 normsubpath = util.pconvert(subpath)
1338
1343
1339 # XXX: Checking against the current working copy is wrong in
1344 # XXX: Checking against the current working copy is wrong in
1340 # the sense that it can reject things like
1345 # the sense that it can reject things like
1341 #
1346 #
1342 # $ hg cat -r 10 sub/x.txt
1347 # $ hg cat -r 10 sub/x.txt
1343 #
1348 #
1344 # if sub/ is no longer a subrepository in the working copy
1349 # if sub/ is no longer a subrepository in the working copy
1345 # parent revision.
1350 # parent revision.
1346 #
1351 #
1347 # However, it can of course also allow things that would have
1352 # However, it can of course also allow things that would have
1348 # been rejected before, such as the above cat command if sub/
1353 # been rejected before, such as the above cat command if sub/
1349 # is a subrepository now, but was a normal directory before.
1354 # is a subrepository now, but was a normal directory before.
1350 # The old path auditor would have rejected by mistake since it
1355 # The old path auditor would have rejected by mistake since it
1351 # panics when it sees sub/.hg/.
1356 # panics when it sees sub/.hg/.
1352 #
1357 #
1353 # All in all, checking against the working copy seems sensible
1358 # All in all, checking against the working copy seems sensible
1354 # since we want to prevent access to nested repositories on
1359 # since we want to prevent access to nested repositories on
1355 # the filesystem *now*.
1360 # the filesystem *now*.
1356 ctx = self[None]
1361 ctx = self[None]
1357 parts = util.splitpath(subpath)
1362 parts = util.splitpath(subpath)
1358 while parts:
1363 while parts:
1359 prefix = b'/'.join(parts)
1364 prefix = b'/'.join(parts)
1360 if prefix in ctx.substate:
1365 if prefix in ctx.substate:
1361 if prefix == normsubpath:
1366 if prefix == normsubpath:
1362 return True
1367 return True
1363 else:
1368 else:
1364 sub = ctx.sub(prefix)
1369 sub = ctx.sub(prefix)
1365 return sub.checknested(subpath[len(prefix) + 1 :])
1370 return sub.checknested(subpath[len(prefix) + 1 :])
1366 else:
1371 else:
1367 parts.pop()
1372 parts.pop()
1368 return False
1373 return False
1369
1374
1370 def peer(self):
1375 def peer(self):
1371 return localpeer(self) # not cached to avoid reference cycle
1376 return localpeer(self) # not cached to avoid reference cycle
1372
1377
1373 def unfiltered(self):
1378 def unfiltered(self):
1374 """Return unfiltered version of the repository
1379 """Return unfiltered version of the repository
1375
1380
1376 Intended to be overwritten by filtered repo."""
1381 Intended to be overwritten by filtered repo."""
1377 return self
1382 return self
1378
1383
1379 def filtered(self, name, visibilityexceptions=None):
1384 def filtered(self, name, visibilityexceptions=None):
1380 """Return a filtered version of a repository
1385 """Return a filtered version of a repository
1381
1386
1382 The `name` parameter is the identifier of the requested view. This
1387 The `name` parameter is the identifier of the requested view. This
1383 will return a repoview object set "exactly" to the specified view.
1388 will return a repoview object set "exactly" to the specified view.
1384
1389
1385 This function does not apply recursive filtering to a repository. For
1390 This function does not apply recursive filtering to a repository. For
1386 example calling `repo.filtered("served")` will return a repoview using
1391 example calling `repo.filtered("served")` will return a repoview using
1387 the "served" view, regardless of the initial view used by `repo`.
1392 the "served" view, regardless of the initial view used by `repo`.
1388
1393
1389 In other word, there is always only one level of `repoview` "filtering".
1394 In other word, there is always only one level of `repoview` "filtering".
1390 """
1395 """
1391 if self._extrafilterid is not None and b'%' not in name:
1396 if self._extrafilterid is not None and b'%' not in name:
1392 name = name + b'%' + self._extrafilterid
1397 name = name + b'%' + self._extrafilterid
1393
1398
1394 cls = repoview.newtype(self.unfiltered().__class__)
1399 cls = repoview.newtype(self.unfiltered().__class__)
1395 return cls(self, name, visibilityexceptions)
1400 return cls(self, name, visibilityexceptions)
1396
1401
1397 @mixedrepostorecache(
1402 @mixedrepostorecache(
1398 (b'bookmarks', b'plain'),
1403 (b'bookmarks', b'plain'),
1399 (b'bookmarks.current', b'plain'),
1404 (b'bookmarks.current', b'plain'),
1400 (b'bookmarks', b''),
1405 (b'bookmarks', b''),
1401 (b'00changelog.i', b''),
1406 (b'00changelog.i', b''),
1402 )
1407 )
1403 def _bookmarks(self):
1408 def _bookmarks(self):
1404 # Since the multiple files involved in the transaction cannot be
1409 # Since the multiple files involved in the transaction cannot be
1405 # written atomically (with current repository format), there is a race
1410 # written atomically (with current repository format), there is a race
1406 # condition here.
1411 # condition here.
1407 #
1412 #
1408 # 1) changelog content A is read
1413 # 1) changelog content A is read
1409 # 2) outside transaction update changelog to content B
1414 # 2) outside transaction update changelog to content B
1410 # 3) outside transaction update bookmark file referring to content B
1415 # 3) outside transaction update bookmark file referring to content B
1411 # 4) bookmarks file content is read and filtered against changelog-A
1416 # 4) bookmarks file content is read and filtered against changelog-A
1412 #
1417 #
1413 # When this happens, bookmarks against nodes missing from A are dropped.
1418 # When this happens, bookmarks against nodes missing from A are dropped.
1414 #
1419 #
1415 # Having this happening during read is not great, but it become worse
1420 # Having this happening during read is not great, but it become worse
1416 # when this happen during write because the bookmarks to the "unknown"
1421 # when this happen during write because the bookmarks to the "unknown"
1417 # nodes will be dropped for good. However, writes happen within locks.
1422 # nodes will be dropped for good. However, writes happen within locks.
1418 # This locking makes it possible to have a race free consistent read.
1423 # This locking makes it possible to have a race free consistent read.
1419 # For this purpose data read from disc before locking are
1424 # For this purpose data read from disc before locking are
1420 # "invalidated" right after the locks are taken. This invalidations are
1425 # "invalidated" right after the locks are taken. This invalidations are
1421 # "light", the `filecache` mechanism keep the data in memory and will
1426 # "light", the `filecache` mechanism keep the data in memory and will
1422 # reuse them if the underlying files did not changed. Not parsing the
1427 # reuse them if the underlying files did not changed. Not parsing the
1423 # same data multiple times helps performances.
1428 # same data multiple times helps performances.
1424 #
1429 #
1425 # Unfortunately in the case describe above, the files tracked by the
1430 # Unfortunately in the case describe above, the files tracked by the
1426 # bookmarks file cache might not have changed, but the in-memory
1431 # bookmarks file cache might not have changed, but the in-memory
1427 # content is still "wrong" because we used an older changelog content
1432 # content is still "wrong" because we used an older changelog content
1428 # to process the on-disk data. So after locking, the changelog would be
1433 # to process the on-disk data. So after locking, the changelog would be
1429 # refreshed but `_bookmarks` would be preserved.
1434 # refreshed but `_bookmarks` would be preserved.
1430 # Adding `00changelog.i` to the list of tracked file is not
1435 # Adding `00changelog.i` to the list of tracked file is not
1431 # enough, because at the time we build the content for `_bookmarks` in
1436 # enough, because at the time we build the content for `_bookmarks` in
1432 # (4), the changelog file has already diverged from the content used
1437 # (4), the changelog file has already diverged from the content used
1433 # for loading `changelog` in (1)
1438 # for loading `changelog` in (1)
1434 #
1439 #
1435 # To prevent the issue, we force the changelog to be explicitly
1440 # To prevent the issue, we force the changelog to be explicitly
1436 # reloaded while computing `_bookmarks`. The data race can still happen
1441 # reloaded while computing `_bookmarks`. The data race can still happen
1437 # without the lock (with a narrower window), but it would no longer go
1442 # without the lock (with a narrower window), but it would no longer go
1438 # undetected during the lock time refresh.
1443 # undetected during the lock time refresh.
1439 #
1444 #
1440 # The new schedule is as follow
1445 # The new schedule is as follow
1441 #
1446 #
1442 # 1) filecache logic detect that `_bookmarks` needs to be computed
1447 # 1) filecache logic detect that `_bookmarks` needs to be computed
1443 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1448 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1444 # 3) We force `changelog` filecache to be tested
1449 # 3) We force `changelog` filecache to be tested
1445 # 4) cachestat for `changelog` are captured (for changelog)
1450 # 4) cachestat for `changelog` are captured (for changelog)
1446 # 5) `_bookmarks` is computed and cached
1451 # 5) `_bookmarks` is computed and cached
1447 #
1452 #
1448 # The step in (3) ensure we have a changelog at least as recent as the
1453 # The step in (3) ensure we have a changelog at least as recent as the
1449 # cache stat computed in (1). As a result at locking time:
1454 # cache stat computed in (1). As a result at locking time:
1450 # * if the changelog did not changed since (1) -> we can reuse the data
1455 # * if the changelog did not changed since (1) -> we can reuse the data
1451 # * otherwise -> the bookmarks get refreshed.
1456 # * otherwise -> the bookmarks get refreshed.
1452 self._refreshchangelog()
1457 self._refreshchangelog()
1453 return bookmarks.bmstore(self)
1458 return bookmarks.bmstore(self)
1454
1459
1455 def _refreshchangelog(self):
1460 def _refreshchangelog(self):
1456 """make sure the in memory changelog match the on-disk one"""
1461 """make sure the in memory changelog match the on-disk one"""
1457 if 'changelog' in vars(self) and self.currenttransaction() is None:
1462 if 'changelog' in vars(self) and self.currenttransaction() is None:
1458 del self.changelog
1463 del self.changelog
1459
1464
1460 @property
1465 @property
1461 def _activebookmark(self):
1466 def _activebookmark(self):
1462 return self._bookmarks.active
1467 return self._bookmarks.active
1463
1468
1464 # _phasesets depend on changelog. what we need is to call
1469 # _phasesets depend on changelog. what we need is to call
1465 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1470 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1466 # can't be easily expressed in filecache mechanism.
1471 # can't be easily expressed in filecache mechanism.
1467 @storecache(b'phaseroots', b'00changelog.i')
1472 @storecache(b'phaseroots', b'00changelog.i')
1468 def _phasecache(self):
1473 def _phasecache(self):
1469 return phases.phasecache(self, self._phasedefaults)
1474 return phases.phasecache(self, self._phasedefaults)
1470
1475
1471 @storecache(b'obsstore')
1476 @storecache(b'obsstore')
1472 def obsstore(self):
1477 def obsstore(self):
1473 return obsolete.makestore(self.ui, self)
1478 return obsolete.makestore(self.ui, self)
1474
1479
1475 @storecache(b'00changelog.i')
1480 @storecache(b'00changelog.i')
1476 def changelog(self):
1481 def changelog(self):
1477 # load dirstate before changelog to avoid race see issue6303
1482 # load dirstate before changelog to avoid race see issue6303
1478 self.dirstate.prefetch_parents()
1483 self.dirstate.prefetch_parents()
1479 return self.store.changelog(txnutil.mayhavepending(self.root))
1484 return self.store.changelog(txnutil.mayhavepending(self.root))
1480
1485
1481 @storecache(b'00manifest.i')
1486 @storecache(b'00manifest.i')
1482 def manifestlog(self):
1487 def manifestlog(self):
1483 return self.store.manifestlog(self, self._storenarrowmatch)
1488 return self.store.manifestlog(self, self._storenarrowmatch)
1484
1489
1485 @repofilecache(b'dirstate')
1490 @repofilecache(b'dirstate')
1486 def dirstate(self):
1491 def dirstate(self):
1487 return self._makedirstate()
1492 return self._makedirstate()
1488
1493
1489 def _makedirstate(self):
1494 def _makedirstate(self):
1490 """Extension point for wrapping the dirstate per-repo."""
1495 """Extension point for wrapping the dirstate per-repo."""
1491 sparsematchfn = lambda: sparse.matcher(self)
1496 sparsematchfn = lambda: sparse.matcher(self)
1492
1497
1493 return dirstate.dirstate(
1498 return dirstate.dirstate(
1494 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1499 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1495 )
1500 )
1496
1501
1497 def _dirstatevalidate(self, node):
1502 def _dirstatevalidate(self, node):
1498 try:
1503 try:
1499 self.changelog.rev(node)
1504 self.changelog.rev(node)
1500 return node
1505 return node
1501 except error.LookupError:
1506 except error.LookupError:
1502 if not self._dirstatevalidatewarned:
1507 if not self._dirstatevalidatewarned:
1503 self._dirstatevalidatewarned = True
1508 self._dirstatevalidatewarned = True
1504 self.ui.warn(
1509 self.ui.warn(
1505 _(b"warning: ignoring unknown working parent %s!\n")
1510 _(b"warning: ignoring unknown working parent %s!\n")
1506 % short(node)
1511 % short(node)
1507 )
1512 )
1508 return nullid
1513 return nullid
1509
1514
1510 @storecache(narrowspec.FILENAME)
1515 @storecache(narrowspec.FILENAME)
1511 def narrowpats(self):
1516 def narrowpats(self):
1512 """matcher patterns for this repository's narrowspec
1517 """matcher patterns for this repository's narrowspec
1513
1518
1514 A tuple of (includes, excludes).
1519 A tuple of (includes, excludes).
1515 """
1520 """
1516 return narrowspec.load(self)
1521 return narrowspec.load(self)
1517
1522
1518 @storecache(narrowspec.FILENAME)
1523 @storecache(narrowspec.FILENAME)
1519 def _storenarrowmatch(self):
1524 def _storenarrowmatch(self):
1520 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1525 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1521 return matchmod.always()
1526 return matchmod.always()
1522 include, exclude = self.narrowpats
1527 include, exclude = self.narrowpats
1523 return narrowspec.match(self.root, include=include, exclude=exclude)
1528 return narrowspec.match(self.root, include=include, exclude=exclude)
1524
1529
1525 @storecache(narrowspec.FILENAME)
1530 @storecache(narrowspec.FILENAME)
1526 def _narrowmatch(self):
1531 def _narrowmatch(self):
1527 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1532 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1528 return matchmod.always()
1533 return matchmod.always()
1529 narrowspec.checkworkingcopynarrowspec(self)
1534 narrowspec.checkworkingcopynarrowspec(self)
1530 include, exclude = self.narrowpats
1535 include, exclude = self.narrowpats
1531 return narrowspec.match(self.root, include=include, exclude=exclude)
1536 return narrowspec.match(self.root, include=include, exclude=exclude)
1532
1537
1533 def narrowmatch(self, match=None, includeexact=False):
1538 def narrowmatch(self, match=None, includeexact=False):
1534 """matcher corresponding the the repo's narrowspec
1539 """matcher corresponding the the repo's narrowspec
1535
1540
1536 If `match` is given, then that will be intersected with the narrow
1541 If `match` is given, then that will be intersected with the narrow
1537 matcher.
1542 matcher.
1538
1543
1539 If `includeexact` is True, then any exact matches from `match` will
1544 If `includeexact` is True, then any exact matches from `match` will
1540 be included even if they're outside the narrowspec.
1545 be included even if they're outside the narrowspec.
1541 """
1546 """
1542 if match:
1547 if match:
1543 if includeexact and not self._narrowmatch.always():
1548 if includeexact and not self._narrowmatch.always():
1544 # do not exclude explicitly-specified paths so that they can
1549 # do not exclude explicitly-specified paths so that they can
1545 # be warned later on
1550 # be warned later on
1546 em = matchmod.exact(match.files())
1551 em = matchmod.exact(match.files())
1547 nm = matchmod.unionmatcher([self._narrowmatch, em])
1552 nm = matchmod.unionmatcher([self._narrowmatch, em])
1548 return matchmod.intersectmatchers(match, nm)
1553 return matchmod.intersectmatchers(match, nm)
1549 return matchmod.intersectmatchers(match, self._narrowmatch)
1554 return matchmod.intersectmatchers(match, self._narrowmatch)
1550 return self._narrowmatch
1555 return self._narrowmatch
1551
1556
1552 def setnarrowpats(self, newincludes, newexcludes):
1557 def setnarrowpats(self, newincludes, newexcludes):
1553 narrowspec.save(self, newincludes, newexcludes)
1558 narrowspec.save(self, newincludes, newexcludes)
1554 self.invalidate(clearfilecache=True)
1559 self.invalidate(clearfilecache=True)
1555
1560
1556 @unfilteredpropertycache
1561 @unfilteredpropertycache
1557 def _quick_access_changeid_null(self):
1562 def _quick_access_changeid_null(self):
1558 return {
1563 return {
1559 b'null': (nullrev, nullid),
1564 b'null': (nullrev, nullid),
1560 nullrev: (nullrev, nullid),
1565 nullrev: (nullrev, nullid),
1561 nullid: (nullrev, nullid),
1566 nullid: (nullrev, nullid),
1562 }
1567 }
1563
1568
1564 @unfilteredpropertycache
1569 @unfilteredpropertycache
1565 def _quick_access_changeid_wc(self):
1570 def _quick_access_changeid_wc(self):
1566 # also fast path access to the working copy parents
1571 # also fast path access to the working copy parents
1567 # however, only do it for filter that ensure wc is visible.
1572 # however, only do it for filter that ensure wc is visible.
1568 quick = {}
1573 quick = {}
1569 cl = self.unfiltered().changelog
1574 cl = self.unfiltered().changelog
1570 for node in self.dirstate.parents():
1575 for node in self.dirstate.parents():
1571 if node == nullid:
1576 if node == nullid:
1572 continue
1577 continue
1573 rev = cl.index.get_rev(node)
1578 rev = cl.index.get_rev(node)
1574 if rev is None:
1579 if rev is None:
1575 # unknown working copy parent case:
1580 # unknown working copy parent case:
1576 #
1581 #
1577 # skip the fast path and let higher code deal with it
1582 # skip the fast path and let higher code deal with it
1578 continue
1583 continue
1579 pair = (rev, node)
1584 pair = (rev, node)
1580 quick[rev] = pair
1585 quick[rev] = pair
1581 quick[node] = pair
1586 quick[node] = pair
1582 # also add the parents of the parents
1587 # also add the parents of the parents
1583 for r in cl.parentrevs(rev):
1588 for r in cl.parentrevs(rev):
1584 if r == nullrev:
1589 if r == nullrev:
1585 continue
1590 continue
1586 n = cl.node(r)
1591 n = cl.node(r)
1587 pair = (r, n)
1592 pair = (r, n)
1588 quick[r] = pair
1593 quick[r] = pair
1589 quick[n] = pair
1594 quick[n] = pair
1590 p1node = self.dirstate.p1()
1595 p1node = self.dirstate.p1()
1591 if p1node != nullid:
1596 if p1node != nullid:
1592 quick[b'.'] = quick[p1node]
1597 quick[b'.'] = quick[p1node]
1593 return quick
1598 return quick
1594
1599
1595 @unfilteredmethod
1600 @unfilteredmethod
1596 def _quick_access_changeid_invalidate(self):
1601 def _quick_access_changeid_invalidate(self):
1597 if '_quick_access_changeid_wc' in vars(self):
1602 if '_quick_access_changeid_wc' in vars(self):
1598 del self.__dict__['_quick_access_changeid_wc']
1603 del self.__dict__['_quick_access_changeid_wc']
1599
1604
1600 @property
1605 @property
1601 def _quick_access_changeid(self):
1606 def _quick_access_changeid(self):
1602 """an helper dictionnary for __getitem__ calls
1607 """an helper dictionnary for __getitem__ calls
1603
1608
1604 This contains a list of symbol we can recognise right away without
1609 This contains a list of symbol we can recognise right away without
1605 further processing.
1610 further processing.
1606 """
1611 """
1607 mapping = self._quick_access_changeid_null
1612 mapping = self._quick_access_changeid_null
1608 if self.filtername in repoview.filter_has_wc:
1613 if self.filtername in repoview.filter_has_wc:
1609 mapping = mapping.copy()
1614 mapping = mapping.copy()
1610 mapping.update(self._quick_access_changeid_wc)
1615 mapping.update(self._quick_access_changeid_wc)
1611 return mapping
1616 return mapping
1612
1617
1613 def __getitem__(self, changeid):
1618 def __getitem__(self, changeid):
1614 # dealing with special cases
1619 # dealing with special cases
1615 if changeid is None:
1620 if changeid is None:
1616 return context.workingctx(self)
1621 return context.workingctx(self)
1617 if isinstance(changeid, context.basectx):
1622 if isinstance(changeid, context.basectx):
1618 return changeid
1623 return changeid
1619
1624
1620 # dealing with multiple revisions
1625 # dealing with multiple revisions
1621 if isinstance(changeid, slice):
1626 if isinstance(changeid, slice):
1622 # wdirrev isn't contiguous so the slice shouldn't include it
1627 # wdirrev isn't contiguous so the slice shouldn't include it
1623 return [
1628 return [
1624 self[i]
1629 self[i]
1625 for i in pycompat.xrange(*changeid.indices(len(self)))
1630 for i in pycompat.xrange(*changeid.indices(len(self)))
1626 if i not in self.changelog.filteredrevs
1631 if i not in self.changelog.filteredrevs
1627 ]
1632 ]
1628
1633
1629 # dealing with some special values
1634 # dealing with some special values
1630 quick_access = self._quick_access_changeid.get(changeid)
1635 quick_access = self._quick_access_changeid.get(changeid)
1631 if quick_access is not None:
1636 if quick_access is not None:
1632 rev, node = quick_access
1637 rev, node = quick_access
1633 return context.changectx(self, rev, node, maybe_filtered=False)
1638 return context.changectx(self, rev, node, maybe_filtered=False)
1634 if changeid == b'tip':
1639 if changeid == b'tip':
1635 node = self.changelog.tip()
1640 node = self.changelog.tip()
1636 rev = self.changelog.rev(node)
1641 rev = self.changelog.rev(node)
1637 return context.changectx(self, rev, node)
1642 return context.changectx(self, rev, node)
1638
1643
1639 # dealing with arbitrary values
1644 # dealing with arbitrary values
1640 try:
1645 try:
1641 if isinstance(changeid, int):
1646 if isinstance(changeid, int):
1642 node = self.changelog.node(changeid)
1647 node = self.changelog.node(changeid)
1643 rev = changeid
1648 rev = changeid
1644 elif changeid == b'.':
1649 elif changeid == b'.':
1645 # this is a hack to delay/avoid loading obsmarkers
1650 # this is a hack to delay/avoid loading obsmarkers
1646 # when we know that '.' won't be hidden
1651 # when we know that '.' won't be hidden
1647 node = self.dirstate.p1()
1652 node = self.dirstate.p1()
1648 rev = self.unfiltered().changelog.rev(node)
1653 rev = self.unfiltered().changelog.rev(node)
1649 elif len(changeid) == 20:
1654 elif len(changeid) == 20:
1650 try:
1655 try:
1651 node = changeid
1656 node = changeid
1652 rev = self.changelog.rev(changeid)
1657 rev = self.changelog.rev(changeid)
1653 except error.FilteredLookupError:
1658 except error.FilteredLookupError:
1654 changeid = hex(changeid) # for the error message
1659 changeid = hex(changeid) # for the error message
1655 raise
1660 raise
1656 except LookupError:
1661 except LookupError:
1657 # check if it might have come from damaged dirstate
1662 # check if it might have come from damaged dirstate
1658 #
1663 #
1659 # XXX we could avoid the unfiltered if we had a recognizable
1664 # XXX we could avoid the unfiltered if we had a recognizable
1660 # exception for filtered changeset access
1665 # exception for filtered changeset access
1661 if (
1666 if (
1662 self.local()
1667 self.local()
1663 and changeid in self.unfiltered().dirstate.parents()
1668 and changeid in self.unfiltered().dirstate.parents()
1664 ):
1669 ):
1665 msg = _(b"working directory has unknown parent '%s'!")
1670 msg = _(b"working directory has unknown parent '%s'!")
1666 raise error.Abort(msg % short(changeid))
1671 raise error.Abort(msg % short(changeid))
1667 changeid = hex(changeid) # for the error message
1672 changeid = hex(changeid) # for the error message
1668 raise
1673 raise
1669
1674
1670 elif len(changeid) == 40:
1675 elif len(changeid) == 40:
1671 node = bin(changeid)
1676 node = bin(changeid)
1672 rev = self.changelog.rev(node)
1677 rev = self.changelog.rev(node)
1673 else:
1678 else:
1674 raise error.ProgrammingError(
1679 raise error.ProgrammingError(
1675 b"unsupported changeid '%s' of type %s"
1680 b"unsupported changeid '%s' of type %s"
1676 % (changeid, pycompat.bytestr(type(changeid)))
1681 % (changeid, pycompat.bytestr(type(changeid)))
1677 )
1682 )
1678
1683
1679 return context.changectx(self, rev, node)
1684 return context.changectx(self, rev, node)
1680
1685
1681 except (error.FilteredIndexError, error.FilteredLookupError):
1686 except (error.FilteredIndexError, error.FilteredLookupError):
1682 raise error.FilteredRepoLookupError(
1687 raise error.FilteredRepoLookupError(
1683 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1688 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1684 )
1689 )
1685 except (IndexError, LookupError):
1690 except (IndexError, LookupError):
1686 raise error.RepoLookupError(
1691 raise error.RepoLookupError(
1687 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1692 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1688 )
1693 )
1689 except error.WdirUnsupported:
1694 except error.WdirUnsupported:
1690 return context.workingctx(self)
1695 return context.workingctx(self)
1691
1696
1692 def __contains__(self, changeid):
1697 def __contains__(self, changeid):
1693 """True if the given changeid exists
1698 """True if the given changeid exists
1694
1699
1695 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1700 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1696 specified.
1701 specified.
1697 """
1702 """
1698 try:
1703 try:
1699 self[changeid]
1704 self[changeid]
1700 return True
1705 return True
1701 except error.RepoLookupError:
1706 except error.RepoLookupError:
1702 return False
1707 return False
1703
1708
1704 def __nonzero__(self):
1709 def __nonzero__(self):
1705 return True
1710 return True
1706
1711
1707 __bool__ = __nonzero__
1712 __bool__ = __nonzero__
1708
1713
1709 def __len__(self):
1714 def __len__(self):
1710 # no need to pay the cost of repoview.changelog
1715 # no need to pay the cost of repoview.changelog
1711 unfi = self.unfiltered()
1716 unfi = self.unfiltered()
1712 return len(unfi.changelog)
1717 return len(unfi.changelog)
1713
1718
1714 def __iter__(self):
1719 def __iter__(self):
1715 return iter(self.changelog)
1720 return iter(self.changelog)
1716
1721
1717 def revs(self, expr, *args):
1722 def revs(self, expr, *args):
1718 '''Find revisions matching a revset.
1723 '''Find revisions matching a revset.
1719
1724
1720 The revset is specified as a string ``expr`` that may contain
1725 The revset is specified as a string ``expr`` that may contain
1721 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1726 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1722
1727
1723 Revset aliases from the configuration are not expanded. To expand
1728 Revset aliases from the configuration are not expanded. To expand
1724 user aliases, consider calling ``scmutil.revrange()`` or
1729 user aliases, consider calling ``scmutil.revrange()`` or
1725 ``repo.anyrevs([expr], user=True)``.
1730 ``repo.anyrevs([expr], user=True)``.
1726
1731
1727 Returns a smartset.abstractsmartset, which is a list-like interface
1732 Returns a smartset.abstractsmartset, which is a list-like interface
1728 that contains integer revisions.
1733 that contains integer revisions.
1729 '''
1734 '''
1730 tree = revsetlang.spectree(expr, *args)
1735 tree = revsetlang.spectree(expr, *args)
1731 return revset.makematcher(tree)(self)
1736 return revset.makematcher(tree)(self)
1732
1737
1733 def set(self, expr, *args):
1738 def set(self, expr, *args):
1734 '''Find revisions matching a revset and emit changectx instances.
1739 '''Find revisions matching a revset and emit changectx instances.
1735
1740
1736 This is a convenience wrapper around ``revs()`` that iterates the
1741 This is a convenience wrapper around ``revs()`` that iterates the
1737 result and is a generator of changectx instances.
1742 result and is a generator of changectx instances.
1738
1743
1739 Revset aliases from the configuration are not expanded. To expand
1744 Revset aliases from the configuration are not expanded. To expand
1740 user aliases, consider calling ``scmutil.revrange()``.
1745 user aliases, consider calling ``scmutil.revrange()``.
1741 '''
1746 '''
1742 for r in self.revs(expr, *args):
1747 for r in self.revs(expr, *args):
1743 yield self[r]
1748 yield self[r]
1744
1749
1745 def anyrevs(self, specs, user=False, localalias=None):
1750 def anyrevs(self, specs, user=False, localalias=None):
1746 '''Find revisions matching one of the given revsets.
1751 '''Find revisions matching one of the given revsets.
1747
1752
1748 Revset aliases from the configuration are not expanded by default. To
1753 Revset aliases from the configuration are not expanded by default. To
1749 expand user aliases, specify ``user=True``. To provide some local
1754 expand user aliases, specify ``user=True``. To provide some local
1750 definitions overriding user aliases, set ``localalias`` to
1755 definitions overriding user aliases, set ``localalias`` to
1751 ``{name: definitionstring}``.
1756 ``{name: definitionstring}``.
1752 '''
1757 '''
1753 if specs == [b'null']:
1758 if specs == [b'null']:
1754 return revset.baseset([nullrev])
1759 return revset.baseset([nullrev])
1755 if specs == [b'.']:
1760 if specs == [b'.']:
1756 quick_data = self._quick_access_changeid.get(b'.')
1761 quick_data = self._quick_access_changeid.get(b'.')
1757 if quick_data is not None:
1762 if quick_data is not None:
1758 return revset.baseset([quick_data[0]])
1763 return revset.baseset([quick_data[0]])
1759 if user:
1764 if user:
1760 m = revset.matchany(
1765 m = revset.matchany(
1761 self.ui,
1766 self.ui,
1762 specs,
1767 specs,
1763 lookup=revset.lookupfn(self),
1768 lookup=revset.lookupfn(self),
1764 localalias=localalias,
1769 localalias=localalias,
1765 )
1770 )
1766 else:
1771 else:
1767 m = revset.matchany(None, specs, localalias=localalias)
1772 m = revset.matchany(None, specs, localalias=localalias)
1768 return m(self)
1773 return m(self)
1769
1774
1770 def url(self):
1775 def url(self):
1771 return b'file:' + self.root
1776 return b'file:' + self.root
1772
1777
1773 def hook(self, name, throw=False, **args):
1778 def hook(self, name, throw=False, **args):
1774 """Call a hook, passing this repo instance.
1779 """Call a hook, passing this repo instance.
1775
1780
1776 This a convenience method to aid invoking hooks. Extensions likely
1781 This a convenience method to aid invoking hooks. Extensions likely
1777 won't call this unless they have registered a custom hook or are
1782 won't call this unless they have registered a custom hook or are
1778 replacing code that is expected to call a hook.
1783 replacing code that is expected to call a hook.
1779 """
1784 """
1780 return hook.hook(self.ui, self, name, throw, **args)
1785 return hook.hook(self.ui, self, name, throw, **args)
1781
1786
1782 @filteredpropertycache
1787 @filteredpropertycache
1783 def _tagscache(self):
1788 def _tagscache(self):
1784 '''Returns a tagscache object that contains various tags related
1789 '''Returns a tagscache object that contains various tags related
1785 caches.'''
1790 caches.'''
1786
1791
1787 # This simplifies its cache management by having one decorated
1792 # This simplifies its cache management by having one decorated
1788 # function (this one) and the rest simply fetch things from it.
1793 # function (this one) and the rest simply fetch things from it.
1789 class tagscache(object):
1794 class tagscache(object):
1790 def __init__(self):
1795 def __init__(self):
1791 # These two define the set of tags for this repository. tags
1796 # These two define the set of tags for this repository. tags
1792 # maps tag name to node; tagtypes maps tag name to 'global' or
1797 # maps tag name to node; tagtypes maps tag name to 'global' or
1793 # 'local'. (Global tags are defined by .hgtags across all
1798 # 'local'. (Global tags are defined by .hgtags across all
1794 # heads, and local tags are defined in .hg/localtags.)
1799 # heads, and local tags are defined in .hg/localtags.)
1795 # They constitute the in-memory cache of tags.
1800 # They constitute the in-memory cache of tags.
1796 self.tags = self.tagtypes = None
1801 self.tags = self.tagtypes = None
1797
1802
1798 self.nodetagscache = self.tagslist = None
1803 self.nodetagscache = self.tagslist = None
1799
1804
1800 cache = tagscache()
1805 cache = tagscache()
1801 cache.tags, cache.tagtypes = self._findtags()
1806 cache.tags, cache.tagtypes = self._findtags()
1802
1807
1803 return cache
1808 return cache
1804
1809
1805 def tags(self):
1810 def tags(self):
1806 '''return a mapping of tag to node'''
1811 '''return a mapping of tag to node'''
1807 t = {}
1812 t = {}
1808 if self.changelog.filteredrevs:
1813 if self.changelog.filteredrevs:
1809 tags, tt = self._findtags()
1814 tags, tt = self._findtags()
1810 else:
1815 else:
1811 tags = self._tagscache.tags
1816 tags = self._tagscache.tags
1812 rev = self.changelog.rev
1817 rev = self.changelog.rev
1813 for k, v in pycompat.iteritems(tags):
1818 for k, v in pycompat.iteritems(tags):
1814 try:
1819 try:
1815 # ignore tags to unknown nodes
1820 # ignore tags to unknown nodes
1816 rev(v)
1821 rev(v)
1817 t[k] = v
1822 t[k] = v
1818 except (error.LookupError, ValueError):
1823 except (error.LookupError, ValueError):
1819 pass
1824 pass
1820 return t
1825 return t
1821
1826
1822 def _findtags(self):
1827 def _findtags(self):
1823 '''Do the hard work of finding tags. Return a pair of dicts
1828 '''Do the hard work of finding tags. Return a pair of dicts
1824 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1829 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1825 maps tag name to a string like \'global\' or \'local\'.
1830 maps tag name to a string like \'global\' or \'local\'.
1826 Subclasses or extensions are free to add their own tags, but
1831 Subclasses or extensions are free to add their own tags, but
1827 should be aware that the returned dicts will be retained for the
1832 should be aware that the returned dicts will be retained for the
1828 duration of the localrepo object.'''
1833 duration of the localrepo object.'''
1829
1834
1830 # XXX what tagtype should subclasses/extensions use? Currently
1835 # XXX what tagtype should subclasses/extensions use? Currently
1831 # mq and bookmarks add tags, but do not set the tagtype at all.
1836 # mq and bookmarks add tags, but do not set the tagtype at all.
1832 # Should each extension invent its own tag type? Should there
1837 # Should each extension invent its own tag type? Should there
1833 # be one tagtype for all such "virtual" tags? Or is the status
1838 # be one tagtype for all such "virtual" tags? Or is the status
1834 # quo fine?
1839 # quo fine?
1835
1840
1836 # map tag name to (node, hist)
1841 # map tag name to (node, hist)
1837 alltags = tagsmod.findglobaltags(self.ui, self)
1842 alltags = tagsmod.findglobaltags(self.ui, self)
1838 # map tag name to tag type
1843 # map tag name to tag type
1839 tagtypes = {tag: b'global' for tag in alltags}
1844 tagtypes = {tag: b'global' for tag in alltags}
1840
1845
1841 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1846 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1842
1847
1843 # Build the return dicts. Have to re-encode tag names because
1848 # Build the return dicts. Have to re-encode tag names because
1844 # the tags module always uses UTF-8 (in order not to lose info
1849 # the tags module always uses UTF-8 (in order not to lose info
1845 # writing to the cache), but the rest of Mercurial wants them in
1850 # writing to the cache), but the rest of Mercurial wants them in
1846 # local encoding.
1851 # local encoding.
1847 tags = {}
1852 tags = {}
1848 for (name, (node, hist)) in pycompat.iteritems(alltags):
1853 for (name, (node, hist)) in pycompat.iteritems(alltags):
1849 if node != nullid:
1854 if node != nullid:
1850 tags[encoding.tolocal(name)] = node
1855 tags[encoding.tolocal(name)] = node
1851 tags[b'tip'] = self.changelog.tip()
1856 tags[b'tip'] = self.changelog.tip()
1852 tagtypes = {
1857 tagtypes = {
1853 encoding.tolocal(name): value
1858 encoding.tolocal(name): value
1854 for (name, value) in pycompat.iteritems(tagtypes)
1859 for (name, value) in pycompat.iteritems(tagtypes)
1855 }
1860 }
1856 return (tags, tagtypes)
1861 return (tags, tagtypes)
1857
1862
1858 def tagtype(self, tagname):
1863 def tagtype(self, tagname):
1859 '''
1864 '''
1860 return the type of the given tag. result can be:
1865 return the type of the given tag. result can be:
1861
1866
1862 'local' : a local tag
1867 'local' : a local tag
1863 'global' : a global tag
1868 'global' : a global tag
1864 None : tag does not exist
1869 None : tag does not exist
1865 '''
1870 '''
1866
1871
1867 return self._tagscache.tagtypes.get(tagname)
1872 return self._tagscache.tagtypes.get(tagname)
1868
1873
1869 def tagslist(self):
1874 def tagslist(self):
1870 '''return a list of tags ordered by revision'''
1875 '''return a list of tags ordered by revision'''
1871 if not self._tagscache.tagslist:
1876 if not self._tagscache.tagslist:
1872 l = []
1877 l = []
1873 for t, n in pycompat.iteritems(self.tags()):
1878 for t, n in pycompat.iteritems(self.tags()):
1874 l.append((self.changelog.rev(n), t, n))
1879 l.append((self.changelog.rev(n), t, n))
1875 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1880 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1876
1881
1877 return self._tagscache.tagslist
1882 return self._tagscache.tagslist
1878
1883
1879 def nodetags(self, node):
1884 def nodetags(self, node):
1880 '''return the tags associated with a node'''
1885 '''return the tags associated with a node'''
1881 if not self._tagscache.nodetagscache:
1886 if not self._tagscache.nodetagscache:
1882 nodetagscache = {}
1887 nodetagscache = {}
1883 for t, n in pycompat.iteritems(self._tagscache.tags):
1888 for t, n in pycompat.iteritems(self._tagscache.tags):
1884 nodetagscache.setdefault(n, []).append(t)
1889 nodetagscache.setdefault(n, []).append(t)
1885 for tags in pycompat.itervalues(nodetagscache):
1890 for tags in pycompat.itervalues(nodetagscache):
1886 tags.sort()
1891 tags.sort()
1887 self._tagscache.nodetagscache = nodetagscache
1892 self._tagscache.nodetagscache = nodetagscache
1888 return self._tagscache.nodetagscache.get(node, [])
1893 return self._tagscache.nodetagscache.get(node, [])
1889
1894
1890 def nodebookmarks(self, node):
1895 def nodebookmarks(self, node):
1891 """return the list of bookmarks pointing to the specified node"""
1896 """return the list of bookmarks pointing to the specified node"""
1892 return self._bookmarks.names(node)
1897 return self._bookmarks.names(node)
1893
1898
1894 def branchmap(self):
1899 def branchmap(self):
1895 '''returns a dictionary {branch: [branchheads]} with branchheads
1900 '''returns a dictionary {branch: [branchheads]} with branchheads
1896 ordered by increasing revision number'''
1901 ordered by increasing revision number'''
1897 return self._branchcaches[self]
1902 return self._branchcaches[self]
1898
1903
1899 @unfilteredmethod
1904 @unfilteredmethod
1900 def revbranchcache(self):
1905 def revbranchcache(self):
1901 if not self._revbranchcache:
1906 if not self._revbranchcache:
1902 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1907 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1903 return self._revbranchcache
1908 return self._revbranchcache
1904
1909
1905 def branchtip(self, branch, ignoremissing=False):
1910 def branchtip(self, branch, ignoremissing=False):
1906 '''return the tip node for a given branch
1911 '''return the tip node for a given branch
1907
1912
1908 If ignoremissing is True, then this method will not raise an error.
1913 If ignoremissing is True, then this method will not raise an error.
1909 This is helpful for callers that only expect None for a missing branch
1914 This is helpful for callers that only expect None for a missing branch
1910 (e.g. namespace).
1915 (e.g. namespace).
1911
1916
1912 '''
1917 '''
1913 try:
1918 try:
1914 return self.branchmap().branchtip(branch)
1919 return self.branchmap().branchtip(branch)
1915 except KeyError:
1920 except KeyError:
1916 if not ignoremissing:
1921 if not ignoremissing:
1917 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1922 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1918 else:
1923 else:
1919 pass
1924 pass
1920
1925
1921 def lookup(self, key):
1926 def lookup(self, key):
1922 node = scmutil.revsymbol(self, key).node()
1927 node = scmutil.revsymbol(self, key).node()
1923 if node is None:
1928 if node is None:
1924 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1929 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1925 return node
1930 return node
1926
1931
1927 def lookupbranch(self, key):
1932 def lookupbranch(self, key):
1928 if self.branchmap().hasbranch(key):
1933 if self.branchmap().hasbranch(key):
1929 return key
1934 return key
1930
1935
1931 return scmutil.revsymbol(self, key).branch()
1936 return scmutil.revsymbol(self, key).branch()
1932
1937
1933 def known(self, nodes):
1938 def known(self, nodes):
1934 cl = self.changelog
1939 cl = self.changelog
1935 get_rev = cl.index.get_rev
1940 get_rev = cl.index.get_rev
1936 filtered = cl.filteredrevs
1941 filtered = cl.filteredrevs
1937 result = []
1942 result = []
1938 for n in nodes:
1943 for n in nodes:
1939 r = get_rev(n)
1944 r = get_rev(n)
1940 resp = not (r is None or r in filtered)
1945 resp = not (r is None or r in filtered)
1941 result.append(resp)
1946 result.append(resp)
1942 return result
1947 return result
1943
1948
1944 def local(self):
1949 def local(self):
1945 return self
1950 return self
1946
1951
1947 def publishing(self):
1952 def publishing(self):
1948 # it's safe (and desirable) to trust the publish flag unconditionally
1953 # it's safe (and desirable) to trust the publish flag unconditionally
1949 # so that we don't finalize changes shared between users via ssh or nfs
1954 # so that we don't finalize changes shared between users via ssh or nfs
1950 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1955 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1951
1956
1952 def cancopy(self):
1957 def cancopy(self):
1953 # so statichttprepo's override of local() works
1958 # so statichttprepo's override of local() works
1954 if not self.local():
1959 if not self.local():
1955 return False
1960 return False
1956 if not self.publishing():
1961 if not self.publishing():
1957 return True
1962 return True
1958 # if publishing we can't copy if there is filtered content
1963 # if publishing we can't copy if there is filtered content
1959 return not self.filtered(b'visible').changelog.filteredrevs
1964 return not self.filtered(b'visible').changelog.filteredrevs
1960
1965
1961 def shared(self):
1966 def shared(self):
1962 '''the type of shared repository (None if not shared)'''
1967 '''the type of shared repository (None if not shared)'''
1963 if self.sharedpath != self.path:
1968 if self.sharedpath != self.path:
1964 return b'store'
1969 return b'store'
1965 return None
1970 return None
1966
1971
1967 def wjoin(self, f, *insidef):
1972 def wjoin(self, f, *insidef):
1968 return self.vfs.reljoin(self.root, f, *insidef)
1973 return self.vfs.reljoin(self.root, f, *insidef)
1969
1974
1970 def setparents(self, p1, p2=nullid):
1975 def setparents(self, p1, p2=nullid):
1971 self[None].setparents(p1, p2)
1976 self[None].setparents(p1, p2)
1972 self._quick_access_changeid_invalidate()
1977 self._quick_access_changeid_invalidate()
1973
1978
1974 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1979 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1975 """changeid must be a changeset revision, if specified.
1980 """changeid must be a changeset revision, if specified.
1976 fileid can be a file revision or node."""
1981 fileid can be a file revision or node."""
1977 return context.filectx(
1982 return context.filectx(
1978 self, path, changeid, fileid, changectx=changectx
1983 self, path, changeid, fileid, changectx=changectx
1979 )
1984 )
1980
1985
1981 def getcwd(self):
1986 def getcwd(self):
1982 return self.dirstate.getcwd()
1987 return self.dirstate.getcwd()
1983
1988
1984 def pathto(self, f, cwd=None):
1989 def pathto(self, f, cwd=None):
1985 return self.dirstate.pathto(f, cwd)
1990 return self.dirstate.pathto(f, cwd)
1986
1991
1987 def _loadfilter(self, filter):
1992 def _loadfilter(self, filter):
1988 if filter not in self._filterpats:
1993 if filter not in self._filterpats:
1989 l = []
1994 l = []
1990 for pat, cmd in self.ui.configitems(filter):
1995 for pat, cmd in self.ui.configitems(filter):
1991 if cmd == b'!':
1996 if cmd == b'!':
1992 continue
1997 continue
1993 mf = matchmod.match(self.root, b'', [pat])
1998 mf = matchmod.match(self.root, b'', [pat])
1994 fn = None
1999 fn = None
1995 params = cmd
2000 params = cmd
1996 for name, filterfn in pycompat.iteritems(self._datafilters):
2001 for name, filterfn in pycompat.iteritems(self._datafilters):
1997 if cmd.startswith(name):
2002 if cmd.startswith(name):
1998 fn = filterfn
2003 fn = filterfn
1999 params = cmd[len(name) :].lstrip()
2004 params = cmd[len(name) :].lstrip()
2000 break
2005 break
2001 if not fn:
2006 if not fn:
2002 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2007 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2003 fn.__name__ = 'commandfilter'
2008 fn.__name__ = 'commandfilter'
2004 # Wrap old filters not supporting keyword arguments
2009 # Wrap old filters not supporting keyword arguments
2005 if not pycompat.getargspec(fn)[2]:
2010 if not pycompat.getargspec(fn)[2]:
2006 oldfn = fn
2011 oldfn = fn
2007 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2012 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2008 fn.__name__ = 'compat-' + oldfn.__name__
2013 fn.__name__ = 'compat-' + oldfn.__name__
2009 l.append((mf, fn, params))
2014 l.append((mf, fn, params))
2010 self._filterpats[filter] = l
2015 self._filterpats[filter] = l
2011 return self._filterpats[filter]
2016 return self._filterpats[filter]
2012
2017
2013 def _filter(self, filterpats, filename, data):
2018 def _filter(self, filterpats, filename, data):
2014 for mf, fn, cmd in filterpats:
2019 for mf, fn, cmd in filterpats:
2015 if mf(filename):
2020 if mf(filename):
2016 self.ui.debug(
2021 self.ui.debug(
2017 b"filtering %s through %s\n"
2022 b"filtering %s through %s\n"
2018 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2023 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2019 )
2024 )
2020 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2025 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2021 break
2026 break
2022
2027
2023 return data
2028 return data
2024
2029
2025 @unfilteredpropertycache
2030 @unfilteredpropertycache
2026 def _encodefilterpats(self):
2031 def _encodefilterpats(self):
2027 return self._loadfilter(b'encode')
2032 return self._loadfilter(b'encode')
2028
2033
2029 @unfilteredpropertycache
2034 @unfilteredpropertycache
2030 def _decodefilterpats(self):
2035 def _decodefilterpats(self):
2031 return self._loadfilter(b'decode')
2036 return self._loadfilter(b'decode')
2032
2037
2033 def adddatafilter(self, name, filter):
2038 def adddatafilter(self, name, filter):
2034 self._datafilters[name] = filter
2039 self._datafilters[name] = filter
2035
2040
2036 def wread(self, filename):
2041 def wread(self, filename):
2037 if self.wvfs.islink(filename):
2042 if self.wvfs.islink(filename):
2038 data = self.wvfs.readlink(filename)
2043 data = self.wvfs.readlink(filename)
2039 else:
2044 else:
2040 data = self.wvfs.read(filename)
2045 data = self.wvfs.read(filename)
2041 return self._filter(self._encodefilterpats, filename, data)
2046 return self._filter(self._encodefilterpats, filename, data)
2042
2047
2043 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2048 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2044 """write ``data`` into ``filename`` in the working directory
2049 """write ``data`` into ``filename`` in the working directory
2045
2050
2046 This returns length of written (maybe decoded) data.
2051 This returns length of written (maybe decoded) data.
2047 """
2052 """
2048 data = self._filter(self._decodefilterpats, filename, data)
2053 data = self._filter(self._decodefilterpats, filename, data)
2049 if b'l' in flags:
2054 if b'l' in flags:
2050 self.wvfs.symlink(data, filename)
2055 self.wvfs.symlink(data, filename)
2051 else:
2056 else:
2052 self.wvfs.write(
2057 self.wvfs.write(
2053 filename, data, backgroundclose=backgroundclose, **kwargs
2058 filename, data, backgroundclose=backgroundclose, **kwargs
2054 )
2059 )
2055 if b'x' in flags:
2060 if b'x' in flags:
2056 self.wvfs.setflags(filename, False, True)
2061 self.wvfs.setflags(filename, False, True)
2057 else:
2062 else:
2058 self.wvfs.setflags(filename, False, False)
2063 self.wvfs.setflags(filename, False, False)
2059 return len(data)
2064 return len(data)
2060
2065
2061 def wwritedata(self, filename, data):
2066 def wwritedata(self, filename, data):
2062 return self._filter(self._decodefilterpats, filename, data)
2067 return self._filter(self._decodefilterpats, filename, data)
2063
2068
2064 def currenttransaction(self):
2069 def currenttransaction(self):
2065 """return the current transaction or None if non exists"""
2070 """return the current transaction or None if non exists"""
2066 if self._transref:
2071 if self._transref:
2067 tr = self._transref()
2072 tr = self._transref()
2068 else:
2073 else:
2069 tr = None
2074 tr = None
2070
2075
2071 if tr and tr.running():
2076 if tr and tr.running():
2072 return tr
2077 return tr
2073 return None
2078 return None
2074
2079
2075 def transaction(self, desc, report=None):
2080 def transaction(self, desc, report=None):
2076 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2081 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2077 b'devel', b'check-locks'
2082 b'devel', b'check-locks'
2078 ):
2083 ):
2079 if self._currentlock(self._lockref) is None:
2084 if self._currentlock(self._lockref) is None:
2080 raise error.ProgrammingError(b'transaction requires locking')
2085 raise error.ProgrammingError(b'transaction requires locking')
2081 tr = self.currenttransaction()
2086 tr = self.currenttransaction()
2082 if tr is not None:
2087 if tr is not None:
2083 return tr.nest(name=desc)
2088 return tr.nest(name=desc)
2084
2089
2085 # abort here if the journal already exists
2090 # abort here if the journal already exists
2086 if self.svfs.exists(b"journal"):
2091 if self.svfs.exists(b"journal"):
2087 raise error.RepoError(
2092 raise error.RepoError(
2088 _(b"abandoned transaction found"),
2093 _(b"abandoned transaction found"),
2089 hint=_(b"run 'hg recover' to clean up transaction"),
2094 hint=_(b"run 'hg recover' to clean up transaction"),
2090 )
2095 )
2091
2096
2092 idbase = b"%.40f#%f" % (random.random(), time.time())
2097 idbase = b"%.40f#%f" % (random.random(), time.time())
2093 ha = hex(hashutil.sha1(idbase).digest())
2098 ha = hex(hashutil.sha1(idbase).digest())
2094 txnid = b'TXN:' + ha
2099 txnid = b'TXN:' + ha
2095 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2100 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2096
2101
2097 self._writejournal(desc)
2102 self._writejournal(desc)
2098 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2103 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2099 if report:
2104 if report:
2100 rp = report
2105 rp = report
2101 else:
2106 else:
2102 rp = self.ui.warn
2107 rp = self.ui.warn
2103 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2108 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2104 # we must avoid cyclic reference between repo and transaction.
2109 # we must avoid cyclic reference between repo and transaction.
2105 reporef = weakref.ref(self)
2110 reporef = weakref.ref(self)
2106 # Code to track tag movement
2111 # Code to track tag movement
2107 #
2112 #
2108 # Since tags are all handled as file content, it is actually quite hard
2113 # Since tags are all handled as file content, it is actually quite hard
2109 # to track these movement from a code perspective. So we fallback to a
2114 # to track these movement from a code perspective. So we fallback to a
2110 # tracking at the repository level. One could envision to track changes
2115 # tracking at the repository level. One could envision to track changes
2111 # to the '.hgtags' file through changegroup apply but that fails to
2116 # to the '.hgtags' file through changegroup apply but that fails to
2112 # cope with case where transaction expose new heads without changegroup
2117 # cope with case where transaction expose new heads without changegroup
2113 # being involved (eg: phase movement).
2118 # being involved (eg: phase movement).
2114 #
2119 #
2115 # For now, We gate the feature behind a flag since this likely comes
2120 # For now, We gate the feature behind a flag since this likely comes
2116 # with performance impacts. The current code run more often than needed
2121 # with performance impacts. The current code run more often than needed
2117 # and do not use caches as much as it could. The current focus is on
2122 # and do not use caches as much as it could. The current focus is on
2118 # the behavior of the feature so we disable it by default. The flag
2123 # the behavior of the feature so we disable it by default. The flag
2119 # will be removed when we are happy with the performance impact.
2124 # will be removed when we are happy with the performance impact.
2120 #
2125 #
2121 # Once this feature is no longer experimental move the following
2126 # Once this feature is no longer experimental move the following
2122 # documentation to the appropriate help section:
2127 # documentation to the appropriate help section:
2123 #
2128 #
2124 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2129 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2125 # tags (new or changed or deleted tags). In addition the details of
2130 # tags (new or changed or deleted tags). In addition the details of
2126 # these changes are made available in a file at:
2131 # these changes are made available in a file at:
2127 # ``REPOROOT/.hg/changes/tags.changes``.
2132 # ``REPOROOT/.hg/changes/tags.changes``.
2128 # Make sure you check for HG_TAG_MOVED before reading that file as it
2133 # Make sure you check for HG_TAG_MOVED before reading that file as it
2129 # might exist from a previous transaction even if no tag were touched
2134 # might exist from a previous transaction even if no tag were touched
2130 # in this one. Changes are recorded in a line base format::
2135 # in this one. Changes are recorded in a line base format::
2131 #
2136 #
2132 # <action> <hex-node> <tag-name>\n
2137 # <action> <hex-node> <tag-name>\n
2133 #
2138 #
2134 # Actions are defined as follow:
2139 # Actions are defined as follow:
2135 # "-R": tag is removed,
2140 # "-R": tag is removed,
2136 # "+A": tag is added,
2141 # "+A": tag is added,
2137 # "-M": tag is moved (old value),
2142 # "-M": tag is moved (old value),
2138 # "+M": tag is moved (new value),
2143 # "+M": tag is moved (new value),
2139 tracktags = lambda x: None
2144 tracktags = lambda x: None
2140 # experimental config: experimental.hook-track-tags
2145 # experimental config: experimental.hook-track-tags
2141 shouldtracktags = self.ui.configbool(
2146 shouldtracktags = self.ui.configbool(
2142 b'experimental', b'hook-track-tags'
2147 b'experimental', b'hook-track-tags'
2143 )
2148 )
2144 if desc != b'strip' and shouldtracktags:
2149 if desc != b'strip' and shouldtracktags:
2145 oldheads = self.changelog.headrevs()
2150 oldheads = self.changelog.headrevs()
2146
2151
2147 def tracktags(tr2):
2152 def tracktags(tr2):
2148 repo = reporef()
2153 repo = reporef()
2149 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2154 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2150 newheads = repo.changelog.headrevs()
2155 newheads = repo.changelog.headrevs()
2151 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2156 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2152 # notes: we compare lists here.
2157 # notes: we compare lists here.
2153 # As we do it only once buiding set would not be cheaper
2158 # As we do it only once buiding set would not be cheaper
2154 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2159 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2155 if changes:
2160 if changes:
2156 tr2.hookargs[b'tag_moved'] = b'1'
2161 tr2.hookargs[b'tag_moved'] = b'1'
2157 with repo.vfs(
2162 with repo.vfs(
2158 b'changes/tags.changes', b'w', atomictemp=True
2163 b'changes/tags.changes', b'w', atomictemp=True
2159 ) as changesfile:
2164 ) as changesfile:
2160 # note: we do not register the file to the transaction
2165 # note: we do not register the file to the transaction
2161 # because we needs it to still exist on the transaction
2166 # because we needs it to still exist on the transaction
2162 # is close (for txnclose hooks)
2167 # is close (for txnclose hooks)
2163 tagsmod.writediff(changesfile, changes)
2168 tagsmod.writediff(changesfile, changes)
2164
2169
2165 def validate(tr2):
2170 def validate(tr2):
2166 """will run pre-closing hooks"""
2171 """will run pre-closing hooks"""
2167 # XXX the transaction API is a bit lacking here so we take a hacky
2172 # XXX the transaction API is a bit lacking here so we take a hacky
2168 # path for now
2173 # path for now
2169 #
2174 #
2170 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2175 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2171 # dict is copied before these run. In addition we needs the data
2176 # dict is copied before these run. In addition we needs the data
2172 # available to in memory hooks too.
2177 # available to in memory hooks too.
2173 #
2178 #
2174 # Moreover, we also need to make sure this runs before txnclose
2179 # Moreover, we also need to make sure this runs before txnclose
2175 # hooks and there is no "pending" mechanism that would execute
2180 # hooks and there is no "pending" mechanism that would execute
2176 # logic only if hooks are about to run.
2181 # logic only if hooks are about to run.
2177 #
2182 #
2178 # Fixing this limitation of the transaction is also needed to track
2183 # Fixing this limitation of the transaction is also needed to track
2179 # other families of changes (bookmarks, phases, obsolescence).
2184 # other families of changes (bookmarks, phases, obsolescence).
2180 #
2185 #
2181 # This will have to be fixed before we remove the experimental
2186 # This will have to be fixed before we remove the experimental
2182 # gating.
2187 # gating.
2183 tracktags(tr2)
2188 tracktags(tr2)
2184 repo = reporef()
2189 repo = reporef()
2185
2190
2186 singleheadopt = (b'experimental', b'single-head-per-branch')
2191 singleheadopt = (b'experimental', b'single-head-per-branch')
2187 singlehead = repo.ui.configbool(*singleheadopt)
2192 singlehead = repo.ui.configbool(*singleheadopt)
2188 if singlehead:
2193 if singlehead:
2189 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2194 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2190 accountclosed = singleheadsub.get(
2195 accountclosed = singleheadsub.get(
2191 b"account-closed-heads", False
2196 b"account-closed-heads", False
2192 )
2197 )
2193 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2198 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2194 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2199 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2195 for name, (old, new) in sorted(
2200 for name, (old, new) in sorted(
2196 tr.changes[b'bookmarks'].items()
2201 tr.changes[b'bookmarks'].items()
2197 ):
2202 ):
2198 args = tr.hookargs.copy()
2203 args = tr.hookargs.copy()
2199 args.update(bookmarks.preparehookargs(name, old, new))
2204 args.update(bookmarks.preparehookargs(name, old, new))
2200 repo.hook(
2205 repo.hook(
2201 b'pretxnclose-bookmark',
2206 b'pretxnclose-bookmark',
2202 throw=True,
2207 throw=True,
2203 **pycompat.strkwargs(args)
2208 **pycompat.strkwargs(args)
2204 )
2209 )
2205 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2210 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2206 cl = repo.unfiltered().changelog
2211 cl = repo.unfiltered().changelog
2207 for revs, (old, new) in tr.changes[b'phases']:
2212 for revs, (old, new) in tr.changes[b'phases']:
2208 for rev in revs:
2213 for rev in revs:
2209 args = tr.hookargs.copy()
2214 args = tr.hookargs.copy()
2210 node = hex(cl.node(rev))
2215 node = hex(cl.node(rev))
2211 args.update(phases.preparehookargs(node, old, new))
2216 args.update(phases.preparehookargs(node, old, new))
2212 repo.hook(
2217 repo.hook(
2213 b'pretxnclose-phase',
2218 b'pretxnclose-phase',
2214 throw=True,
2219 throw=True,
2215 **pycompat.strkwargs(args)
2220 **pycompat.strkwargs(args)
2216 )
2221 )
2217
2222
2218 repo.hook(
2223 repo.hook(
2219 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2224 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2220 )
2225 )
2221
2226
2222 def releasefn(tr, success):
2227 def releasefn(tr, success):
2223 repo = reporef()
2228 repo = reporef()
2224 if repo is None:
2229 if repo is None:
2225 # If the repo has been GC'd (and this release function is being
2230 # If the repo has been GC'd (and this release function is being
2226 # called from transaction.__del__), there's not much we can do,
2231 # called from transaction.__del__), there's not much we can do,
2227 # so just leave the unfinished transaction there and let the
2232 # so just leave the unfinished transaction there and let the
2228 # user run `hg recover`.
2233 # user run `hg recover`.
2229 return
2234 return
2230 if success:
2235 if success:
2231 # this should be explicitly invoked here, because
2236 # this should be explicitly invoked here, because
2232 # in-memory changes aren't written out at closing
2237 # in-memory changes aren't written out at closing
2233 # transaction, if tr.addfilegenerator (via
2238 # transaction, if tr.addfilegenerator (via
2234 # dirstate.write or so) isn't invoked while
2239 # dirstate.write or so) isn't invoked while
2235 # transaction running
2240 # transaction running
2236 repo.dirstate.write(None)
2241 repo.dirstate.write(None)
2237 else:
2242 else:
2238 # discard all changes (including ones already written
2243 # discard all changes (including ones already written
2239 # out) in this transaction
2244 # out) in this transaction
2240 narrowspec.restorebackup(self, b'journal.narrowspec')
2245 narrowspec.restorebackup(self, b'journal.narrowspec')
2241 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2246 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2242 repo.dirstate.restorebackup(None, b'journal.dirstate')
2247 repo.dirstate.restorebackup(None, b'journal.dirstate')
2243
2248
2244 repo.invalidate(clearfilecache=True)
2249 repo.invalidate(clearfilecache=True)
2245
2250
2246 tr = transaction.transaction(
2251 tr = transaction.transaction(
2247 rp,
2252 rp,
2248 self.svfs,
2253 self.svfs,
2249 vfsmap,
2254 vfsmap,
2250 b"journal",
2255 b"journal",
2251 b"undo",
2256 b"undo",
2252 aftertrans(renames),
2257 aftertrans(renames),
2253 self.store.createmode,
2258 self.store.createmode,
2254 validator=validate,
2259 validator=validate,
2255 releasefn=releasefn,
2260 releasefn=releasefn,
2256 checkambigfiles=_cachedfiles,
2261 checkambigfiles=_cachedfiles,
2257 name=desc,
2262 name=desc,
2258 )
2263 )
2259 tr.changes[b'origrepolen'] = len(self)
2264 tr.changes[b'origrepolen'] = len(self)
2260 tr.changes[b'obsmarkers'] = set()
2265 tr.changes[b'obsmarkers'] = set()
2261 tr.changes[b'phases'] = []
2266 tr.changes[b'phases'] = []
2262 tr.changes[b'bookmarks'] = {}
2267 tr.changes[b'bookmarks'] = {}
2263
2268
2264 tr.hookargs[b'txnid'] = txnid
2269 tr.hookargs[b'txnid'] = txnid
2265 tr.hookargs[b'txnname'] = desc
2270 tr.hookargs[b'txnname'] = desc
2266 tr.hookargs[b'changes'] = tr.changes
2271 tr.hookargs[b'changes'] = tr.changes
2267 # note: writing the fncache only during finalize mean that the file is
2272 # note: writing the fncache only during finalize mean that the file is
2268 # outdated when running hooks. As fncache is used for streaming clone,
2273 # outdated when running hooks. As fncache is used for streaming clone,
2269 # this is not expected to break anything that happen during the hooks.
2274 # this is not expected to break anything that happen during the hooks.
2270 tr.addfinalize(b'flush-fncache', self.store.write)
2275 tr.addfinalize(b'flush-fncache', self.store.write)
2271
2276
2272 def txnclosehook(tr2):
2277 def txnclosehook(tr2):
2273 """To be run if transaction is successful, will schedule a hook run
2278 """To be run if transaction is successful, will schedule a hook run
2274 """
2279 """
2275 # Don't reference tr2 in hook() so we don't hold a reference.
2280 # Don't reference tr2 in hook() so we don't hold a reference.
2276 # This reduces memory consumption when there are multiple
2281 # This reduces memory consumption when there are multiple
2277 # transactions per lock. This can likely go away if issue5045
2282 # transactions per lock. This can likely go away if issue5045
2278 # fixes the function accumulation.
2283 # fixes the function accumulation.
2279 hookargs = tr2.hookargs
2284 hookargs = tr2.hookargs
2280
2285
2281 def hookfunc(unused_success):
2286 def hookfunc(unused_success):
2282 repo = reporef()
2287 repo = reporef()
2283 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2288 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2284 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2289 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2285 for name, (old, new) in bmchanges:
2290 for name, (old, new) in bmchanges:
2286 args = tr.hookargs.copy()
2291 args = tr.hookargs.copy()
2287 args.update(bookmarks.preparehookargs(name, old, new))
2292 args.update(bookmarks.preparehookargs(name, old, new))
2288 repo.hook(
2293 repo.hook(
2289 b'txnclose-bookmark',
2294 b'txnclose-bookmark',
2290 throw=False,
2295 throw=False,
2291 **pycompat.strkwargs(args)
2296 **pycompat.strkwargs(args)
2292 )
2297 )
2293
2298
2294 if hook.hashook(repo.ui, b'txnclose-phase'):
2299 if hook.hashook(repo.ui, b'txnclose-phase'):
2295 cl = repo.unfiltered().changelog
2300 cl = repo.unfiltered().changelog
2296 phasemv = sorted(
2301 phasemv = sorted(
2297 tr.changes[b'phases'], key=lambda r: r[0][0]
2302 tr.changes[b'phases'], key=lambda r: r[0][0]
2298 )
2303 )
2299 for revs, (old, new) in phasemv:
2304 for revs, (old, new) in phasemv:
2300 for rev in revs:
2305 for rev in revs:
2301 args = tr.hookargs.copy()
2306 args = tr.hookargs.copy()
2302 node = hex(cl.node(rev))
2307 node = hex(cl.node(rev))
2303 args.update(phases.preparehookargs(node, old, new))
2308 args.update(phases.preparehookargs(node, old, new))
2304 repo.hook(
2309 repo.hook(
2305 b'txnclose-phase',
2310 b'txnclose-phase',
2306 throw=False,
2311 throw=False,
2307 **pycompat.strkwargs(args)
2312 **pycompat.strkwargs(args)
2308 )
2313 )
2309
2314
2310 repo.hook(
2315 repo.hook(
2311 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2316 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2312 )
2317 )
2313
2318
2314 reporef()._afterlock(hookfunc)
2319 reporef()._afterlock(hookfunc)
2315
2320
2316 tr.addfinalize(b'txnclose-hook', txnclosehook)
2321 tr.addfinalize(b'txnclose-hook', txnclosehook)
2317 # Include a leading "-" to make it happen before the transaction summary
2322 # Include a leading "-" to make it happen before the transaction summary
2318 # reports registered via scmutil.registersummarycallback() whose names
2323 # reports registered via scmutil.registersummarycallback() whose names
2319 # are 00-txnreport etc. That way, the caches will be warm when the
2324 # are 00-txnreport etc. That way, the caches will be warm when the
2320 # callbacks run.
2325 # callbacks run.
2321 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2326 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2322
2327
2323 def txnaborthook(tr2):
2328 def txnaborthook(tr2):
2324 """To be run if transaction is aborted
2329 """To be run if transaction is aborted
2325 """
2330 """
2326 reporef().hook(
2331 reporef().hook(
2327 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2332 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2328 )
2333 )
2329
2334
2330 tr.addabort(b'txnabort-hook', txnaborthook)
2335 tr.addabort(b'txnabort-hook', txnaborthook)
2331 # avoid eager cache invalidation. in-memory data should be identical
2336 # avoid eager cache invalidation. in-memory data should be identical
2332 # to stored data if transaction has no error.
2337 # to stored data if transaction has no error.
2333 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2338 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2334 self._transref = weakref.ref(tr)
2339 self._transref = weakref.ref(tr)
2335 scmutil.registersummarycallback(self, tr, desc)
2340 scmutil.registersummarycallback(self, tr, desc)
2336 return tr
2341 return tr
2337
2342
2338 def _journalfiles(self):
2343 def _journalfiles(self):
2339 return (
2344 return (
2340 (self.svfs, b'journal'),
2345 (self.svfs, b'journal'),
2341 (self.svfs, b'journal.narrowspec'),
2346 (self.svfs, b'journal.narrowspec'),
2342 (self.vfs, b'journal.narrowspec.dirstate'),
2347 (self.vfs, b'journal.narrowspec.dirstate'),
2343 (self.vfs, b'journal.dirstate'),
2348 (self.vfs, b'journal.dirstate'),
2344 (self.vfs, b'journal.branch'),
2349 (self.vfs, b'journal.branch'),
2345 (self.vfs, b'journal.desc'),
2350 (self.vfs, b'journal.desc'),
2346 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2351 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2347 (self.svfs, b'journal.phaseroots'),
2352 (self.svfs, b'journal.phaseroots'),
2348 )
2353 )
2349
2354
2350 def undofiles(self):
2355 def undofiles(self):
2351 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2356 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2352
2357
2353 @unfilteredmethod
2358 @unfilteredmethod
2354 def _writejournal(self, desc):
2359 def _writejournal(self, desc):
2355 self.dirstate.savebackup(None, b'journal.dirstate')
2360 self.dirstate.savebackup(None, b'journal.dirstate')
2356 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2361 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2357 narrowspec.savebackup(self, b'journal.narrowspec')
2362 narrowspec.savebackup(self, b'journal.narrowspec')
2358 self.vfs.write(
2363 self.vfs.write(
2359 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2364 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2360 )
2365 )
2361 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2366 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2362 bookmarksvfs = bookmarks.bookmarksvfs(self)
2367 bookmarksvfs = bookmarks.bookmarksvfs(self)
2363 bookmarksvfs.write(
2368 bookmarksvfs.write(
2364 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2369 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2365 )
2370 )
2366 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2371 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2367
2372
2368 def recover(self):
2373 def recover(self):
2369 with self.lock():
2374 with self.lock():
2370 if self.svfs.exists(b"journal"):
2375 if self.svfs.exists(b"journal"):
2371 self.ui.status(_(b"rolling back interrupted transaction\n"))
2376 self.ui.status(_(b"rolling back interrupted transaction\n"))
2372 vfsmap = {
2377 vfsmap = {
2373 b'': self.svfs,
2378 b'': self.svfs,
2374 b'plain': self.vfs,
2379 b'plain': self.vfs,
2375 }
2380 }
2376 transaction.rollback(
2381 transaction.rollback(
2377 self.svfs,
2382 self.svfs,
2378 vfsmap,
2383 vfsmap,
2379 b"journal",
2384 b"journal",
2380 self.ui.warn,
2385 self.ui.warn,
2381 checkambigfiles=_cachedfiles,
2386 checkambigfiles=_cachedfiles,
2382 )
2387 )
2383 self.invalidate()
2388 self.invalidate()
2384 return True
2389 return True
2385 else:
2390 else:
2386 self.ui.warn(_(b"no interrupted transaction available\n"))
2391 self.ui.warn(_(b"no interrupted transaction available\n"))
2387 return False
2392 return False
2388
2393
2389 def rollback(self, dryrun=False, force=False):
2394 def rollback(self, dryrun=False, force=False):
2390 wlock = lock = dsguard = None
2395 wlock = lock = dsguard = None
2391 try:
2396 try:
2392 wlock = self.wlock()
2397 wlock = self.wlock()
2393 lock = self.lock()
2398 lock = self.lock()
2394 if self.svfs.exists(b"undo"):
2399 if self.svfs.exists(b"undo"):
2395 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2400 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2396
2401
2397 return self._rollback(dryrun, force, dsguard)
2402 return self._rollback(dryrun, force, dsguard)
2398 else:
2403 else:
2399 self.ui.warn(_(b"no rollback information available\n"))
2404 self.ui.warn(_(b"no rollback information available\n"))
2400 return 1
2405 return 1
2401 finally:
2406 finally:
2402 release(dsguard, lock, wlock)
2407 release(dsguard, lock, wlock)
2403
2408
2404 @unfilteredmethod # Until we get smarter cache management
2409 @unfilteredmethod # Until we get smarter cache management
2405 def _rollback(self, dryrun, force, dsguard):
2410 def _rollback(self, dryrun, force, dsguard):
2406 ui = self.ui
2411 ui = self.ui
2407 try:
2412 try:
2408 args = self.vfs.read(b'undo.desc').splitlines()
2413 args = self.vfs.read(b'undo.desc').splitlines()
2409 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2414 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2410 if len(args) >= 3:
2415 if len(args) >= 3:
2411 detail = args[2]
2416 detail = args[2]
2412 oldtip = oldlen - 1
2417 oldtip = oldlen - 1
2413
2418
2414 if detail and ui.verbose:
2419 if detail and ui.verbose:
2415 msg = _(
2420 msg = _(
2416 b'repository tip rolled back to revision %d'
2421 b'repository tip rolled back to revision %d'
2417 b' (undo %s: %s)\n'
2422 b' (undo %s: %s)\n'
2418 ) % (oldtip, desc, detail)
2423 ) % (oldtip, desc, detail)
2419 else:
2424 else:
2420 msg = _(
2425 msg = _(
2421 b'repository tip rolled back to revision %d (undo %s)\n'
2426 b'repository tip rolled back to revision %d (undo %s)\n'
2422 ) % (oldtip, desc)
2427 ) % (oldtip, desc)
2423 except IOError:
2428 except IOError:
2424 msg = _(b'rolling back unknown transaction\n')
2429 msg = _(b'rolling back unknown transaction\n')
2425 desc = None
2430 desc = None
2426
2431
2427 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2432 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2428 raise error.Abort(
2433 raise error.Abort(
2429 _(
2434 _(
2430 b'rollback of last commit while not checked out '
2435 b'rollback of last commit while not checked out '
2431 b'may lose data'
2436 b'may lose data'
2432 ),
2437 ),
2433 hint=_(b'use -f to force'),
2438 hint=_(b'use -f to force'),
2434 )
2439 )
2435
2440
2436 ui.status(msg)
2441 ui.status(msg)
2437 if dryrun:
2442 if dryrun:
2438 return 0
2443 return 0
2439
2444
2440 parents = self.dirstate.parents()
2445 parents = self.dirstate.parents()
2441 self.destroying()
2446 self.destroying()
2442 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2447 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2443 transaction.rollback(
2448 transaction.rollback(
2444 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2449 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2445 )
2450 )
2446 bookmarksvfs = bookmarks.bookmarksvfs(self)
2451 bookmarksvfs = bookmarks.bookmarksvfs(self)
2447 if bookmarksvfs.exists(b'undo.bookmarks'):
2452 if bookmarksvfs.exists(b'undo.bookmarks'):
2448 bookmarksvfs.rename(
2453 bookmarksvfs.rename(
2449 b'undo.bookmarks', b'bookmarks', checkambig=True
2454 b'undo.bookmarks', b'bookmarks', checkambig=True
2450 )
2455 )
2451 if self.svfs.exists(b'undo.phaseroots'):
2456 if self.svfs.exists(b'undo.phaseroots'):
2452 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2457 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2453 self.invalidate()
2458 self.invalidate()
2454
2459
2455 has_node = self.changelog.index.has_node
2460 has_node = self.changelog.index.has_node
2456 parentgone = any(not has_node(p) for p in parents)
2461 parentgone = any(not has_node(p) for p in parents)
2457 if parentgone:
2462 if parentgone:
2458 # prevent dirstateguard from overwriting already restored one
2463 # prevent dirstateguard from overwriting already restored one
2459 dsguard.close()
2464 dsguard.close()
2460
2465
2461 narrowspec.restorebackup(self, b'undo.narrowspec')
2466 narrowspec.restorebackup(self, b'undo.narrowspec')
2462 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2467 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2463 self.dirstate.restorebackup(None, b'undo.dirstate')
2468 self.dirstate.restorebackup(None, b'undo.dirstate')
2464 try:
2469 try:
2465 branch = self.vfs.read(b'undo.branch')
2470 branch = self.vfs.read(b'undo.branch')
2466 self.dirstate.setbranch(encoding.tolocal(branch))
2471 self.dirstate.setbranch(encoding.tolocal(branch))
2467 except IOError:
2472 except IOError:
2468 ui.warn(
2473 ui.warn(
2469 _(
2474 _(
2470 b'named branch could not be reset: '
2475 b'named branch could not be reset: '
2471 b'current branch is still \'%s\'\n'
2476 b'current branch is still \'%s\'\n'
2472 )
2477 )
2473 % self.dirstate.branch()
2478 % self.dirstate.branch()
2474 )
2479 )
2475
2480
2476 parents = tuple([p.rev() for p in self[None].parents()])
2481 parents = tuple([p.rev() for p in self[None].parents()])
2477 if len(parents) > 1:
2482 if len(parents) > 1:
2478 ui.status(
2483 ui.status(
2479 _(
2484 _(
2480 b'working directory now based on '
2485 b'working directory now based on '
2481 b'revisions %d and %d\n'
2486 b'revisions %d and %d\n'
2482 )
2487 )
2483 % parents
2488 % parents
2484 )
2489 )
2485 else:
2490 else:
2486 ui.status(
2491 ui.status(
2487 _(b'working directory now based on revision %d\n') % parents
2492 _(b'working directory now based on revision %d\n') % parents
2488 )
2493 )
2489 mergestatemod.mergestate.clean(self, self[b'.'].node())
2494 mergestatemod.mergestate.clean(self, self[b'.'].node())
2490
2495
2491 # TODO: if we know which new heads may result from this rollback, pass
2496 # TODO: if we know which new heads may result from this rollback, pass
2492 # them to destroy(), which will prevent the branchhead cache from being
2497 # them to destroy(), which will prevent the branchhead cache from being
2493 # invalidated.
2498 # invalidated.
2494 self.destroyed()
2499 self.destroyed()
2495 return 0
2500 return 0
2496
2501
2497 def _buildcacheupdater(self, newtransaction):
2502 def _buildcacheupdater(self, newtransaction):
2498 """called during transaction to build the callback updating cache
2503 """called during transaction to build the callback updating cache
2499
2504
2500 Lives on the repository to help extension who might want to augment
2505 Lives on the repository to help extension who might want to augment
2501 this logic. For this purpose, the created transaction is passed to the
2506 this logic. For this purpose, the created transaction is passed to the
2502 method.
2507 method.
2503 """
2508 """
2504 # we must avoid cyclic reference between repo and transaction.
2509 # we must avoid cyclic reference between repo and transaction.
2505 reporef = weakref.ref(self)
2510 reporef = weakref.ref(self)
2506
2511
2507 def updater(tr):
2512 def updater(tr):
2508 repo = reporef()
2513 repo = reporef()
2509 repo.updatecaches(tr)
2514 repo.updatecaches(tr)
2510
2515
2511 return updater
2516 return updater
2512
2517
2513 @unfilteredmethod
2518 @unfilteredmethod
2514 def updatecaches(self, tr=None, full=False):
2519 def updatecaches(self, tr=None, full=False):
2515 """warm appropriate caches
2520 """warm appropriate caches
2516
2521
2517 If this function is called after a transaction closed. The transaction
2522 If this function is called after a transaction closed. The transaction
2518 will be available in the 'tr' argument. This can be used to selectively
2523 will be available in the 'tr' argument. This can be used to selectively
2519 update caches relevant to the changes in that transaction.
2524 update caches relevant to the changes in that transaction.
2520
2525
2521 If 'full' is set, make sure all caches the function knows about have
2526 If 'full' is set, make sure all caches the function knows about have
2522 up-to-date data. Even the ones usually loaded more lazily.
2527 up-to-date data. Even the ones usually loaded more lazily.
2523 """
2528 """
2524 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2529 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2525 # During strip, many caches are invalid but
2530 # During strip, many caches are invalid but
2526 # later call to `destroyed` will refresh them.
2531 # later call to `destroyed` will refresh them.
2527 return
2532 return
2528
2533
2529 if tr is None or tr.changes[b'origrepolen'] < len(self):
2534 if tr is None or tr.changes[b'origrepolen'] < len(self):
2530 # accessing the 'ser ved' branchmap should refresh all the others,
2535 # accessing the 'ser ved' branchmap should refresh all the others,
2531 self.ui.debug(b'updating the branch cache\n')
2536 self.ui.debug(b'updating the branch cache\n')
2532 self.filtered(b'served').branchmap()
2537 self.filtered(b'served').branchmap()
2533 self.filtered(b'served.hidden').branchmap()
2538 self.filtered(b'served.hidden').branchmap()
2534
2539
2535 if full:
2540 if full:
2536 unfi = self.unfiltered()
2541 unfi = self.unfiltered()
2537
2542
2538 self.changelog.update_caches(transaction=tr)
2543 self.changelog.update_caches(transaction=tr)
2539 self.manifestlog.update_caches(transaction=tr)
2544 self.manifestlog.update_caches(transaction=tr)
2540
2545
2541 rbc = unfi.revbranchcache()
2546 rbc = unfi.revbranchcache()
2542 for r in unfi.changelog:
2547 for r in unfi.changelog:
2543 rbc.branchinfo(r)
2548 rbc.branchinfo(r)
2544 rbc.write()
2549 rbc.write()
2545
2550
2546 # ensure the working copy parents are in the manifestfulltextcache
2551 # ensure the working copy parents are in the manifestfulltextcache
2547 for ctx in self[b'.'].parents():
2552 for ctx in self[b'.'].parents():
2548 ctx.manifest() # accessing the manifest is enough
2553 ctx.manifest() # accessing the manifest is enough
2549
2554
2550 # accessing fnode cache warms the cache
2555 # accessing fnode cache warms the cache
2551 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2556 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2552 # accessing tags warm the cache
2557 # accessing tags warm the cache
2553 self.tags()
2558 self.tags()
2554 self.filtered(b'served').tags()
2559 self.filtered(b'served').tags()
2555
2560
2556 # The `full` arg is documented as updating even the lazily-loaded
2561 # The `full` arg is documented as updating even the lazily-loaded
2557 # caches immediately, so we're forcing a write to cause these caches
2562 # caches immediately, so we're forcing a write to cause these caches
2558 # to be warmed up even if they haven't explicitly been requested
2563 # to be warmed up even if they haven't explicitly been requested
2559 # yet (if they've never been used by hg, they won't ever have been
2564 # yet (if they've never been used by hg, they won't ever have been
2560 # written, even if they're a subset of another kind of cache that
2565 # written, even if they're a subset of another kind of cache that
2561 # *has* been used).
2566 # *has* been used).
2562 for filt in repoview.filtertable.keys():
2567 for filt in repoview.filtertable.keys():
2563 filtered = self.filtered(filt)
2568 filtered = self.filtered(filt)
2564 filtered.branchmap().write(filtered)
2569 filtered.branchmap().write(filtered)
2565
2570
2566 def invalidatecaches(self):
2571 def invalidatecaches(self):
2567
2572
2568 if '_tagscache' in vars(self):
2573 if '_tagscache' in vars(self):
2569 # can't use delattr on proxy
2574 # can't use delattr on proxy
2570 del self.__dict__['_tagscache']
2575 del self.__dict__['_tagscache']
2571
2576
2572 self._branchcaches.clear()
2577 self._branchcaches.clear()
2573 self.invalidatevolatilesets()
2578 self.invalidatevolatilesets()
2574 self._sparsesignaturecache.clear()
2579 self._sparsesignaturecache.clear()
2575
2580
2576 def invalidatevolatilesets(self):
2581 def invalidatevolatilesets(self):
2577 self.filteredrevcache.clear()
2582 self.filteredrevcache.clear()
2578 obsolete.clearobscaches(self)
2583 obsolete.clearobscaches(self)
2579 self._quick_access_changeid_invalidate()
2584 self._quick_access_changeid_invalidate()
2580
2585
2581 def invalidatedirstate(self):
2586 def invalidatedirstate(self):
2582 '''Invalidates the dirstate, causing the next call to dirstate
2587 '''Invalidates the dirstate, causing the next call to dirstate
2583 to check if it was modified since the last time it was read,
2588 to check if it was modified since the last time it was read,
2584 rereading it if it has.
2589 rereading it if it has.
2585
2590
2586 This is different to dirstate.invalidate() that it doesn't always
2591 This is different to dirstate.invalidate() that it doesn't always
2587 rereads the dirstate. Use dirstate.invalidate() if you want to
2592 rereads the dirstate. Use dirstate.invalidate() if you want to
2588 explicitly read the dirstate again (i.e. restoring it to a previous
2593 explicitly read the dirstate again (i.e. restoring it to a previous
2589 known good state).'''
2594 known good state).'''
2590 if hasunfilteredcache(self, 'dirstate'):
2595 if hasunfilteredcache(self, 'dirstate'):
2591 for k in self.dirstate._filecache:
2596 for k in self.dirstate._filecache:
2592 try:
2597 try:
2593 delattr(self.dirstate, k)
2598 delattr(self.dirstate, k)
2594 except AttributeError:
2599 except AttributeError:
2595 pass
2600 pass
2596 delattr(self.unfiltered(), 'dirstate')
2601 delattr(self.unfiltered(), 'dirstate')
2597
2602
2598 def invalidate(self, clearfilecache=False):
2603 def invalidate(self, clearfilecache=False):
2599 '''Invalidates both store and non-store parts other than dirstate
2604 '''Invalidates both store and non-store parts other than dirstate
2600
2605
2601 If a transaction is running, invalidation of store is omitted,
2606 If a transaction is running, invalidation of store is omitted,
2602 because discarding in-memory changes might cause inconsistency
2607 because discarding in-memory changes might cause inconsistency
2603 (e.g. incomplete fncache causes unintentional failure, but
2608 (e.g. incomplete fncache causes unintentional failure, but
2604 redundant one doesn't).
2609 redundant one doesn't).
2605 '''
2610 '''
2606 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2611 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2607 for k in list(self._filecache.keys()):
2612 for k in list(self._filecache.keys()):
2608 # dirstate is invalidated separately in invalidatedirstate()
2613 # dirstate is invalidated separately in invalidatedirstate()
2609 if k == b'dirstate':
2614 if k == b'dirstate':
2610 continue
2615 continue
2611 if (
2616 if (
2612 k == b'changelog'
2617 k == b'changelog'
2613 and self.currenttransaction()
2618 and self.currenttransaction()
2614 and self.changelog._delayed
2619 and self.changelog._delayed
2615 ):
2620 ):
2616 # The changelog object may store unwritten revisions. We don't
2621 # The changelog object may store unwritten revisions. We don't
2617 # want to lose them.
2622 # want to lose them.
2618 # TODO: Solve the problem instead of working around it.
2623 # TODO: Solve the problem instead of working around it.
2619 continue
2624 continue
2620
2625
2621 if clearfilecache:
2626 if clearfilecache:
2622 del self._filecache[k]
2627 del self._filecache[k]
2623 try:
2628 try:
2624 delattr(unfiltered, k)
2629 delattr(unfiltered, k)
2625 except AttributeError:
2630 except AttributeError:
2626 pass
2631 pass
2627 self.invalidatecaches()
2632 self.invalidatecaches()
2628 if not self.currenttransaction():
2633 if not self.currenttransaction():
2629 # TODO: Changing contents of store outside transaction
2634 # TODO: Changing contents of store outside transaction
2630 # causes inconsistency. We should make in-memory store
2635 # causes inconsistency. We should make in-memory store
2631 # changes detectable, and abort if changed.
2636 # changes detectable, and abort if changed.
2632 self.store.invalidatecaches()
2637 self.store.invalidatecaches()
2633
2638
2634 def invalidateall(self):
2639 def invalidateall(self):
2635 '''Fully invalidates both store and non-store parts, causing the
2640 '''Fully invalidates both store and non-store parts, causing the
2636 subsequent operation to reread any outside changes.'''
2641 subsequent operation to reread any outside changes.'''
2637 # extension should hook this to invalidate its caches
2642 # extension should hook this to invalidate its caches
2638 self.invalidate()
2643 self.invalidate()
2639 self.invalidatedirstate()
2644 self.invalidatedirstate()
2640
2645
2641 @unfilteredmethod
2646 @unfilteredmethod
2642 def _refreshfilecachestats(self, tr):
2647 def _refreshfilecachestats(self, tr):
2643 """Reload stats of cached files so that they are flagged as valid"""
2648 """Reload stats of cached files so that they are flagged as valid"""
2644 for k, ce in self._filecache.items():
2649 for k, ce in self._filecache.items():
2645 k = pycompat.sysstr(k)
2650 k = pycompat.sysstr(k)
2646 if k == 'dirstate' or k not in self.__dict__:
2651 if k == 'dirstate' or k not in self.__dict__:
2647 continue
2652 continue
2648 ce.refresh()
2653 ce.refresh()
2649
2654
2650 def _lock(
2655 def _lock(
2651 self,
2656 self,
2652 vfs,
2657 vfs,
2653 lockname,
2658 lockname,
2654 wait,
2659 wait,
2655 releasefn,
2660 releasefn,
2656 acquirefn,
2661 acquirefn,
2657 desc,
2662 desc,
2658 inheritchecker=None,
2663 inheritchecker=None,
2659 parentenvvar=None,
2664 parentenvvar=None,
2660 ):
2665 ):
2661 parentlock = None
2666 parentlock = None
2662 # the contents of parentenvvar are used by the underlying lock to
2667 # the contents of parentenvvar are used by the underlying lock to
2663 # determine whether it can be inherited
2668 # determine whether it can be inherited
2664 if parentenvvar is not None:
2669 if parentenvvar is not None:
2665 parentlock = encoding.environ.get(parentenvvar)
2670 parentlock = encoding.environ.get(parentenvvar)
2666
2671
2667 timeout = 0
2672 timeout = 0
2668 warntimeout = 0
2673 warntimeout = 0
2669 if wait:
2674 if wait:
2670 timeout = self.ui.configint(b"ui", b"timeout")
2675 timeout = self.ui.configint(b"ui", b"timeout")
2671 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2676 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2672 # internal config: ui.signal-safe-lock
2677 # internal config: ui.signal-safe-lock
2673 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2678 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2674
2679
2675 l = lockmod.trylock(
2680 l = lockmod.trylock(
2676 self.ui,
2681 self.ui,
2677 vfs,
2682 vfs,
2678 lockname,
2683 lockname,
2679 timeout,
2684 timeout,
2680 warntimeout,
2685 warntimeout,
2681 releasefn=releasefn,
2686 releasefn=releasefn,
2682 acquirefn=acquirefn,
2687 acquirefn=acquirefn,
2683 desc=desc,
2688 desc=desc,
2684 inheritchecker=inheritchecker,
2689 inheritchecker=inheritchecker,
2685 parentlock=parentlock,
2690 parentlock=parentlock,
2686 signalsafe=signalsafe,
2691 signalsafe=signalsafe,
2687 )
2692 )
2688 return l
2693 return l
2689
2694
2690 def _afterlock(self, callback):
2695 def _afterlock(self, callback):
2691 """add a callback to be run when the repository is fully unlocked
2696 """add a callback to be run when the repository is fully unlocked
2692
2697
2693 The callback will be executed when the outermost lock is released
2698 The callback will be executed when the outermost lock is released
2694 (with wlock being higher level than 'lock')."""
2699 (with wlock being higher level than 'lock')."""
2695 for ref in (self._wlockref, self._lockref):
2700 for ref in (self._wlockref, self._lockref):
2696 l = ref and ref()
2701 l = ref and ref()
2697 if l and l.held:
2702 if l and l.held:
2698 l.postrelease.append(callback)
2703 l.postrelease.append(callback)
2699 break
2704 break
2700 else: # no lock have been found.
2705 else: # no lock have been found.
2701 callback(True)
2706 callback(True)
2702
2707
2703 def lock(self, wait=True):
2708 def lock(self, wait=True):
2704 '''Lock the repository store (.hg/store) and return a weak reference
2709 '''Lock the repository store (.hg/store) and return a weak reference
2705 to the lock. Use this before modifying the store (e.g. committing or
2710 to the lock. Use this before modifying the store (e.g. committing or
2706 stripping). If you are opening a transaction, get a lock as well.)
2711 stripping). If you are opening a transaction, get a lock as well.)
2707
2712
2708 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2713 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2709 'wlock' first to avoid a dead-lock hazard.'''
2714 'wlock' first to avoid a dead-lock hazard.'''
2710 l = self._currentlock(self._lockref)
2715 l = self._currentlock(self._lockref)
2711 if l is not None:
2716 if l is not None:
2712 l.lock()
2717 l.lock()
2713 return l
2718 return l
2714
2719
2715 l = self._lock(
2720 l = self._lock(
2716 vfs=self.svfs,
2721 vfs=self.svfs,
2717 lockname=b"lock",
2722 lockname=b"lock",
2718 wait=wait,
2723 wait=wait,
2719 releasefn=None,
2724 releasefn=None,
2720 acquirefn=self.invalidate,
2725 acquirefn=self.invalidate,
2721 desc=_(b'repository %s') % self.origroot,
2726 desc=_(b'repository %s') % self.origroot,
2722 )
2727 )
2723 self._lockref = weakref.ref(l)
2728 self._lockref = weakref.ref(l)
2724 return l
2729 return l
2725
2730
2726 def _wlockchecktransaction(self):
2731 def _wlockchecktransaction(self):
2727 if self.currenttransaction() is not None:
2732 if self.currenttransaction() is not None:
2728 raise error.LockInheritanceContractViolation(
2733 raise error.LockInheritanceContractViolation(
2729 b'wlock cannot be inherited in the middle of a transaction'
2734 b'wlock cannot be inherited in the middle of a transaction'
2730 )
2735 )
2731
2736
2732 def wlock(self, wait=True):
2737 def wlock(self, wait=True):
2733 '''Lock the non-store parts of the repository (everything under
2738 '''Lock the non-store parts of the repository (everything under
2734 .hg except .hg/store) and return a weak reference to the lock.
2739 .hg except .hg/store) and return a weak reference to the lock.
2735
2740
2736 Use this before modifying files in .hg.
2741 Use this before modifying files in .hg.
2737
2742
2738 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2743 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2739 'wlock' first to avoid a dead-lock hazard.'''
2744 'wlock' first to avoid a dead-lock hazard.'''
2740 l = self._wlockref and self._wlockref()
2745 l = self._wlockref and self._wlockref()
2741 if l is not None and l.held:
2746 if l is not None and l.held:
2742 l.lock()
2747 l.lock()
2743 return l
2748 return l
2744
2749
2745 # We do not need to check for non-waiting lock acquisition. Such
2750 # We do not need to check for non-waiting lock acquisition. Such
2746 # acquisition would not cause dead-lock as they would just fail.
2751 # acquisition would not cause dead-lock as they would just fail.
2747 if wait and (
2752 if wait and (
2748 self.ui.configbool(b'devel', b'all-warnings')
2753 self.ui.configbool(b'devel', b'all-warnings')
2749 or self.ui.configbool(b'devel', b'check-locks')
2754 or self.ui.configbool(b'devel', b'check-locks')
2750 ):
2755 ):
2751 if self._currentlock(self._lockref) is not None:
2756 if self._currentlock(self._lockref) is not None:
2752 self.ui.develwarn(b'"wlock" acquired after "lock"')
2757 self.ui.develwarn(b'"wlock" acquired after "lock"')
2753
2758
2754 def unlock():
2759 def unlock():
2755 if self.dirstate.pendingparentchange():
2760 if self.dirstate.pendingparentchange():
2756 self.dirstate.invalidate()
2761 self.dirstate.invalidate()
2757 else:
2762 else:
2758 self.dirstate.write(None)
2763 self.dirstate.write(None)
2759
2764
2760 self._filecache[b'dirstate'].refresh()
2765 self._filecache[b'dirstate'].refresh()
2761
2766
2762 l = self._lock(
2767 l = self._lock(
2763 self.vfs,
2768 self.vfs,
2764 b"wlock",
2769 b"wlock",
2765 wait,
2770 wait,
2766 unlock,
2771 unlock,
2767 self.invalidatedirstate,
2772 self.invalidatedirstate,
2768 _(b'working directory of %s') % self.origroot,
2773 _(b'working directory of %s') % self.origroot,
2769 inheritchecker=self._wlockchecktransaction,
2774 inheritchecker=self._wlockchecktransaction,
2770 parentenvvar=b'HG_WLOCK_LOCKER',
2775 parentenvvar=b'HG_WLOCK_LOCKER',
2771 )
2776 )
2772 self._wlockref = weakref.ref(l)
2777 self._wlockref = weakref.ref(l)
2773 return l
2778 return l
2774
2779
2775 def _currentlock(self, lockref):
2780 def _currentlock(self, lockref):
2776 """Returns the lock if it's held, or None if it's not."""
2781 """Returns the lock if it's held, or None if it's not."""
2777 if lockref is None:
2782 if lockref is None:
2778 return None
2783 return None
2779 l = lockref()
2784 l = lockref()
2780 if l is None or not l.held:
2785 if l is None or not l.held:
2781 return None
2786 return None
2782 return l
2787 return l
2783
2788
2784 def currentwlock(self):
2789 def currentwlock(self):
2785 """Returns the wlock if it's held, or None if it's not."""
2790 """Returns the wlock if it's held, or None if it's not."""
2786 return self._currentlock(self._wlockref)
2791 return self._currentlock(self._wlockref)
2787
2792
2788 def checkcommitpatterns(self, wctx, match, status, fail):
2793 def checkcommitpatterns(self, wctx, match, status, fail):
2789 """check for commit arguments that aren't committable"""
2794 """check for commit arguments that aren't committable"""
2790 if match.isexact() or match.prefix():
2795 if match.isexact() or match.prefix():
2791 matched = set(status.modified + status.added + status.removed)
2796 matched = set(status.modified + status.added + status.removed)
2792
2797
2793 for f in match.files():
2798 for f in match.files():
2794 f = self.dirstate.normalize(f)
2799 f = self.dirstate.normalize(f)
2795 if f == b'.' or f in matched or f in wctx.substate:
2800 if f == b'.' or f in matched or f in wctx.substate:
2796 continue
2801 continue
2797 if f in status.deleted:
2802 if f in status.deleted:
2798 fail(f, _(b'file not found!'))
2803 fail(f, _(b'file not found!'))
2799 # Is it a directory that exists or used to exist?
2804 # Is it a directory that exists or used to exist?
2800 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2805 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2801 d = f + b'/'
2806 d = f + b'/'
2802 for mf in matched:
2807 for mf in matched:
2803 if mf.startswith(d):
2808 if mf.startswith(d):
2804 break
2809 break
2805 else:
2810 else:
2806 fail(f, _(b"no match under directory!"))
2811 fail(f, _(b"no match under directory!"))
2807 elif f not in self.dirstate:
2812 elif f not in self.dirstate:
2808 fail(f, _(b"file not tracked!"))
2813 fail(f, _(b"file not tracked!"))
2809
2814
2810 @unfilteredmethod
2815 @unfilteredmethod
2811 def commit(
2816 def commit(
2812 self,
2817 self,
2813 text=b"",
2818 text=b"",
2814 user=None,
2819 user=None,
2815 date=None,
2820 date=None,
2816 match=None,
2821 match=None,
2817 force=False,
2822 force=False,
2818 editor=None,
2823 editor=None,
2819 extra=None,
2824 extra=None,
2820 ):
2825 ):
2821 """Add a new revision to current repository.
2826 """Add a new revision to current repository.
2822
2827
2823 Revision information is gathered from the working directory,
2828 Revision information is gathered from the working directory,
2824 match can be used to filter the committed files. If editor is
2829 match can be used to filter the committed files. If editor is
2825 supplied, it is called to get a commit message.
2830 supplied, it is called to get a commit message.
2826 """
2831 """
2827 if extra is None:
2832 if extra is None:
2828 extra = {}
2833 extra = {}
2829
2834
2830 def fail(f, msg):
2835 def fail(f, msg):
2831 raise error.Abort(b'%s: %s' % (f, msg))
2836 raise error.Abort(b'%s: %s' % (f, msg))
2832
2837
2833 if not match:
2838 if not match:
2834 match = matchmod.always()
2839 match = matchmod.always()
2835
2840
2836 if not force:
2841 if not force:
2837 match.bad = fail
2842 match.bad = fail
2838
2843
2839 # lock() for recent changelog (see issue4368)
2844 # lock() for recent changelog (see issue4368)
2840 with self.wlock(), self.lock():
2845 with self.wlock(), self.lock():
2841 wctx = self[None]
2846 wctx = self[None]
2842 merge = len(wctx.parents()) > 1
2847 merge = len(wctx.parents()) > 1
2843
2848
2844 if not force and merge and not match.always():
2849 if not force and merge and not match.always():
2845 raise error.Abort(
2850 raise error.Abort(
2846 _(
2851 _(
2847 b'cannot partially commit a merge '
2852 b'cannot partially commit a merge '
2848 b'(do not specify files or patterns)'
2853 b'(do not specify files or patterns)'
2849 )
2854 )
2850 )
2855 )
2851
2856
2852 status = self.status(match=match, clean=force)
2857 status = self.status(match=match, clean=force)
2853 if force:
2858 if force:
2854 status.modified.extend(
2859 status.modified.extend(
2855 status.clean
2860 status.clean
2856 ) # mq may commit clean files
2861 ) # mq may commit clean files
2857
2862
2858 # check subrepos
2863 # check subrepos
2859 subs, commitsubs, newstate = subrepoutil.precommit(
2864 subs, commitsubs, newstate = subrepoutil.precommit(
2860 self.ui, wctx, status, match, force=force
2865 self.ui, wctx, status, match, force=force
2861 )
2866 )
2862
2867
2863 # make sure all explicit patterns are matched
2868 # make sure all explicit patterns are matched
2864 if not force:
2869 if not force:
2865 self.checkcommitpatterns(wctx, match, status, fail)
2870 self.checkcommitpatterns(wctx, match, status, fail)
2866
2871
2867 cctx = context.workingcommitctx(
2872 cctx = context.workingcommitctx(
2868 self, status, text, user, date, extra
2873 self, status, text, user, date, extra
2869 )
2874 )
2870
2875
2871 ms = mergestatemod.mergestate.read(self)
2876 ms = mergestatemod.mergestate.read(self)
2872 mergeutil.checkunresolved(ms)
2877 mergeutil.checkunresolved(ms)
2873
2878
2874 # internal config: ui.allowemptycommit
2879 # internal config: ui.allowemptycommit
2875 if cctx.isempty() and not self.ui.configbool(
2880 if cctx.isempty() and not self.ui.configbool(
2876 b'ui', b'allowemptycommit'
2881 b'ui', b'allowemptycommit'
2877 ):
2882 ):
2878 self.ui.debug(b'nothing to commit, clearing merge state\n')
2883 self.ui.debug(b'nothing to commit, clearing merge state\n')
2879 ms.reset()
2884 ms.reset()
2880 return None
2885 return None
2881
2886
2882 if merge and cctx.deleted():
2887 if merge and cctx.deleted():
2883 raise error.Abort(_(b"cannot commit merge with missing files"))
2888 raise error.Abort(_(b"cannot commit merge with missing files"))
2884
2889
2885 if editor:
2890 if editor:
2886 cctx._text = editor(self, cctx, subs)
2891 cctx._text = editor(self, cctx, subs)
2887 edited = text != cctx._text
2892 edited = text != cctx._text
2888
2893
2889 # Save commit message in case this transaction gets rolled back
2894 # Save commit message in case this transaction gets rolled back
2890 # (e.g. by a pretxncommit hook). Leave the content alone on
2895 # (e.g. by a pretxncommit hook). Leave the content alone on
2891 # the assumption that the user will use the same editor again.
2896 # the assumption that the user will use the same editor again.
2892 msgfn = self.savecommitmessage(cctx._text)
2897 msgfn = self.savecommitmessage(cctx._text)
2893
2898
2894 # commit subs and write new state
2899 # commit subs and write new state
2895 if subs:
2900 if subs:
2896 uipathfn = scmutil.getuipathfn(self)
2901 uipathfn = scmutil.getuipathfn(self)
2897 for s in sorted(commitsubs):
2902 for s in sorted(commitsubs):
2898 sub = wctx.sub(s)
2903 sub = wctx.sub(s)
2899 self.ui.status(
2904 self.ui.status(
2900 _(b'committing subrepository %s\n')
2905 _(b'committing subrepository %s\n')
2901 % uipathfn(subrepoutil.subrelpath(sub))
2906 % uipathfn(subrepoutil.subrelpath(sub))
2902 )
2907 )
2903 sr = sub.commit(cctx._text, user, date)
2908 sr = sub.commit(cctx._text, user, date)
2904 newstate[s] = (newstate[s][0], sr)
2909 newstate[s] = (newstate[s][0], sr)
2905 subrepoutil.writestate(self, newstate)
2910 subrepoutil.writestate(self, newstate)
2906
2911
2907 p1, p2 = self.dirstate.parents()
2912 p1, p2 = self.dirstate.parents()
2908 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2913 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2909 try:
2914 try:
2910 self.hook(
2915 self.hook(
2911 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2916 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2912 )
2917 )
2913 with self.transaction(b'commit'):
2918 with self.transaction(b'commit'):
2914 ret = self.commitctx(cctx, True)
2919 ret = self.commitctx(cctx, True)
2915 # update bookmarks, dirstate and mergestate
2920 # update bookmarks, dirstate and mergestate
2916 bookmarks.update(self, [p1, p2], ret)
2921 bookmarks.update(self, [p1, p2], ret)
2917 cctx.markcommitted(ret)
2922 cctx.markcommitted(ret)
2918 ms.reset()
2923 ms.reset()
2919 except: # re-raises
2924 except: # re-raises
2920 if edited:
2925 if edited:
2921 self.ui.write(
2926 self.ui.write(
2922 _(b'note: commit message saved in %s\n') % msgfn
2927 _(b'note: commit message saved in %s\n') % msgfn
2923 )
2928 )
2924 self.ui.write(
2929 self.ui.write(
2925 _(
2930 _(
2926 b"note: use 'hg commit --logfile "
2931 b"note: use 'hg commit --logfile "
2927 b".hg/last-message.txt --edit' to reuse it\n"
2932 b".hg/last-message.txt --edit' to reuse it\n"
2928 )
2933 )
2929 )
2934 )
2930 raise
2935 raise
2931
2936
2932 def commithook(unused_success):
2937 def commithook(unused_success):
2933 # hack for command that use a temporary commit (eg: histedit)
2938 # hack for command that use a temporary commit (eg: histedit)
2934 # temporary commit got stripped before hook release
2939 # temporary commit got stripped before hook release
2935 if self.changelog.hasnode(ret):
2940 if self.changelog.hasnode(ret):
2936 self.hook(
2941 self.hook(
2937 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2942 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2938 )
2943 )
2939
2944
2940 self._afterlock(commithook)
2945 self._afterlock(commithook)
2941 return ret
2946 return ret
2942
2947
2943 @unfilteredmethod
2948 @unfilteredmethod
2944 def commitctx(self, ctx, error=False, origctx=None):
2949 def commitctx(self, ctx, error=False, origctx=None):
2945 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2950 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2946
2951
2947 @unfilteredmethod
2952 @unfilteredmethod
2948 def destroying(self):
2953 def destroying(self):
2949 '''Inform the repository that nodes are about to be destroyed.
2954 '''Inform the repository that nodes are about to be destroyed.
2950 Intended for use by strip and rollback, so there's a common
2955 Intended for use by strip and rollback, so there's a common
2951 place for anything that has to be done before destroying history.
2956 place for anything that has to be done before destroying history.
2952
2957
2953 This is mostly useful for saving state that is in memory and waiting
2958 This is mostly useful for saving state that is in memory and waiting
2954 to be flushed when the current lock is released. Because a call to
2959 to be flushed when the current lock is released. Because a call to
2955 destroyed is imminent, the repo will be invalidated causing those
2960 destroyed is imminent, the repo will be invalidated causing those
2956 changes to stay in memory (waiting for the next unlock), or vanish
2961 changes to stay in memory (waiting for the next unlock), or vanish
2957 completely.
2962 completely.
2958 '''
2963 '''
2959 # When using the same lock to commit and strip, the phasecache is left
2964 # When using the same lock to commit and strip, the phasecache is left
2960 # dirty after committing. Then when we strip, the repo is invalidated,
2965 # dirty after committing. Then when we strip, the repo is invalidated,
2961 # causing those changes to disappear.
2966 # causing those changes to disappear.
2962 if '_phasecache' in vars(self):
2967 if '_phasecache' in vars(self):
2963 self._phasecache.write()
2968 self._phasecache.write()
2964
2969
2965 @unfilteredmethod
2970 @unfilteredmethod
2966 def destroyed(self):
2971 def destroyed(self):
2967 '''Inform the repository that nodes have been destroyed.
2972 '''Inform the repository that nodes have been destroyed.
2968 Intended for use by strip and rollback, so there's a common
2973 Intended for use by strip and rollback, so there's a common
2969 place for anything that has to be done after destroying history.
2974 place for anything that has to be done after destroying history.
2970 '''
2975 '''
2971 # When one tries to:
2976 # When one tries to:
2972 # 1) destroy nodes thus calling this method (e.g. strip)
2977 # 1) destroy nodes thus calling this method (e.g. strip)
2973 # 2) use phasecache somewhere (e.g. commit)
2978 # 2) use phasecache somewhere (e.g. commit)
2974 #
2979 #
2975 # then 2) will fail because the phasecache contains nodes that were
2980 # then 2) will fail because the phasecache contains nodes that were
2976 # removed. We can either remove phasecache from the filecache,
2981 # removed. We can either remove phasecache from the filecache,
2977 # causing it to reload next time it is accessed, or simply filter
2982 # causing it to reload next time it is accessed, or simply filter
2978 # the removed nodes now and write the updated cache.
2983 # the removed nodes now and write the updated cache.
2979 self._phasecache.filterunknown(self)
2984 self._phasecache.filterunknown(self)
2980 self._phasecache.write()
2985 self._phasecache.write()
2981
2986
2982 # refresh all repository caches
2987 # refresh all repository caches
2983 self.updatecaches()
2988 self.updatecaches()
2984
2989
2985 # Ensure the persistent tag cache is updated. Doing it now
2990 # Ensure the persistent tag cache is updated. Doing it now
2986 # means that the tag cache only has to worry about destroyed
2991 # means that the tag cache only has to worry about destroyed
2987 # heads immediately after a strip/rollback. That in turn
2992 # heads immediately after a strip/rollback. That in turn
2988 # guarantees that "cachetip == currenttip" (comparing both rev
2993 # guarantees that "cachetip == currenttip" (comparing both rev
2989 # and node) always means no nodes have been added or destroyed.
2994 # and node) always means no nodes have been added or destroyed.
2990
2995
2991 # XXX this is suboptimal when qrefresh'ing: we strip the current
2996 # XXX this is suboptimal when qrefresh'ing: we strip the current
2992 # head, refresh the tag cache, then immediately add a new head.
2997 # head, refresh the tag cache, then immediately add a new head.
2993 # But I think doing it this way is necessary for the "instant
2998 # But I think doing it this way is necessary for the "instant
2994 # tag cache retrieval" case to work.
2999 # tag cache retrieval" case to work.
2995 self.invalidate()
3000 self.invalidate()
2996
3001
2997 def status(
3002 def status(
2998 self,
3003 self,
2999 node1=b'.',
3004 node1=b'.',
3000 node2=None,
3005 node2=None,
3001 match=None,
3006 match=None,
3002 ignored=False,
3007 ignored=False,
3003 clean=False,
3008 clean=False,
3004 unknown=False,
3009 unknown=False,
3005 listsubrepos=False,
3010 listsubrepos=False,
3006 ):
3011 ):
3007 '''a convenience method that calls node1.status(node2)'''
3012 '''a convenience method that calls node1.status(node2)'''
3008 return self[node1].status(
3013 return self[node1].status(
3009 node2, match, ignored, clean, unknown, listsubrepos
3014 node2, match, ignored, clean, unknown, listsubrepos
3010 )
3015 )
3011
3016
3012 def addpostdsstatus(self, ps):
3017 def addpostdsstatus(self, ps):
3013 """Add a callback to run within the wlock, at the point at which status
3018 """Add a callback to run within the wlock, at the point at which status
3014 fixups happen.
3019 fixups happen.
3015
3020
3016 On status completion, callback(wctx, status) will be called with the
3021 On status completion, callback(wctx, status) will be called with the
3017 wlock held, unless the dirstate has changed from underneath or the wlock
3022 wlock held, unless the dirstate has changed from underneath or the wlock
3018 couldn't be grabbed.
3023 couldn't be grabbed.
3019
3024
3020 Callbacks should not capture and use a cached copy of the dirstate --
3025 Callbacks should not capture and use a cached copy of the dirstate --
3021 it might change in the meanwhile. Instead, they should access the
3026 it might change in the meanwhile. Instead, they should access the
3022 dirstate via wctx.repo().dirstate.
3027 dirstate via wctx.repo().dirstate.
3023
3028
3024 This list is emptied out after each status run -- extensions should
3029 This list is emptied out after each status run -- extensions should
3025 make sure it adds to this list each time dirstate.status is called.
3030 make sure it adds to this list each time dirstate.status is called.
3026 Extensions should also make sure they don't call this for statuses
3031 Extensions should also make sure they don't call this for statuses
3027 that don't involve the dirstate.
3032 that don't involve the dirstate.
3028 """
3033 """
3029
3034
3030 # The list is located here for uniqueness reasons -- it is actually
3035 # The list is located here for uniqueness reasons -- it is actually
3031 # managed by the workingctx, but that isn't unique per-repo.
3036 # managed by the workingctx, but that isn't unique per-repo.
3032 self._postdsstatus.append(ps)
3037 self._postdsstatus.append(ps)
3033
3038
3034 def postdsstatus(self):
3039 def postdsstatus(self):
3035 """Used by workingctx to get the list of post-dirstate-status hooks."""
3040 """Used by workingctx to get the list of post-dirstate-status hooks."""
3036 return self._postdsstatus
3041 return self._postdsstatus
3037
3042
3038 def clearpostdsstatus(self):
3043 def clearpostdsstatus(self):
3039 """Used by workingctx to clear post-dirstate-status hooks."""
3044 """Used by workingctx to clear post-dirstate-status hooks."""
3040 del self._postdsstatus[:]
3045 del self._postdsstatus[:]
3041
3046
3042 def heads(self, start=None):
3047 def heads(self, start=None):
3043 if start is None:
3048 if start is None:
3044 cl = self.changelog
3049 cl = self.changelog
3045 headrevs = reversed(cl.headrevs())
3050 headrevs = reversed(cl.headrevs())
3046 return [cl.node(rev) for rev in headrevs]
3051 return [cl.node(rev) for rev in headrevs]
3047
3052
3048 heads = self.changelog.heads(start)
3053 heads = self.changelog.heads(start)
3049 # sort the output in rev descending order
3054 # sort the output in rev descending order
3050 return sorted(heads, key=self.changelog.rev, reverse=True)
3055 return sorted(heads, key=self.changelog.rev, reverse=True)
3051
3056
3052 def branchheads(self, branch=None, start=None, closed=False):
3057 def branchheads(self, branch=None, start=None, closed=False):
3053 '''return a (possibly filtered) list of heads for the given branch
3058 '''return a (possibly filtered) list of heads for the given branch
3054
3059
3055 Heads are returned in topological order, from newest to oldest.
3060 Heads are returned in topological order, from newest to oldest.
3056 If branch is None, use the dirstate branch.
3061 If branch is None, use the dirstate branch.
3057 If start is not None, return only heads reachable from start.
3062 If start is not None, return only heads reachable from start.
3058 If closed is True, return heads that are marked as closed as well.
3063 If closed is True, return heads that are marked as closed as well.
3059 '''
3064 '''
3060 if branch is None:
3065 if branch is None:
3061 branch = self[None].branch()
3066 branch = self[None].branch()
3062 branches = self.branchmap()
3067 branches = self.branchmap()
3063 if not branches.hasbranch(branch):
3068 if not branches.hasbranch(branch):
3064 return []
3069 return []
3065 # the cache returns heads ordered lowest to highest
3070 # the cache returns heads ordered lowest to highest
3066 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3071 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3067 if start is not None:
3072 if start is not None:
3068 # filter out the heads that cannot be reached from startrev
3073 # filter out the heads that cannot be reached from startrev
3069 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3074 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3070 bheads = [h for h in bheads if h in fbheads]
3075 bheads = [h for h in bheads if h in fbheads]
3071 return bheads
3076 return bheads
3072
3077
3073 def branches(self, nodes):
3078 def branches(self, nodes):
3074 if not nodes:
3079 if not nodes:
3075 nodes = [self.changelog.tip()]
3080 nodes = [self.changelog.tip()]
3076 b = []
3081 b = []
3077 for n in nodes:
3082 for n in nodes:
3078 t = n
3083 t = n
3079 while True:
3084 while True:
3080 p = self.changelog.parents(n)
3085 p = self.changelog.parents(n)
3081 if p[1] != nullid or p[0] == nullid:
3086 if p[1] != nullid or p[0] == nullid:
3082 b.append((t, n, p[0], p[1]))
3087 b.append((t, n, p[0], p[1]))
3083 break
3088 break
3084 n = p[0]
3089 n = p[0]
3085 return b
3090 return b
3086
3091
3087 def between(self, pairs):
3092 def between(self, pairs):
3088 r = []
3093 r = []
3089
3094
3090 for top, bottom in pairs:
3095 for top, bottom in pairs:
3091 n, l, i = top, [], 0
3096 n, l, i = top, [], 0
3092 f = 1
3097 f = 1
3093
3098
3094 while n != bottom and n != nullid:
3099 while n != bottom and n != nullid:
3095 p = self.changelog.parents(n)[0]
3100 p = self.changelog.parents(n)[0]
3096 if i == f:
3101 if i == f:
3097 l.append(n)
3102 l.append(n)
3098 f = f * 2
3103 f = f * 2
3099 n = p
3104 n = p
3100 i += 1
3105 i += 1
3101
3106
3102 r.append(l)
3107 r.append(l)
3103
3108
3104 return r
3109 return r
3105
3110
3106 def checkpush(self, pushop):
3111 def checkpush(self, pushop):
3107 """Extensions can override this function if additional checks have
3112 """Extensions can override this function if additional checks have
3108 to be performed before pushing, or call it if they override push
3113 to be performed before pushing, or call it if they override push
3109 command.
3114 command.
3110 """
3115 """
3111
3116
3112 @unfilteredpropertycache
3117 @unfilteredpropertycache
3113 def prepushoutgoinghooks(self):
3118 def prepushoutgoinghooks(self):
3114 """Return util.hooks consists of a pushop with repo, remote, outgoing
3119 """Return util.hooks consists of a pushop with repo, remote, outgoing
3115 methods, which are called before pushing changesets.
3120 methods, which are called before pushing changesets.
3116 """
3121 """
3117 return util.hooks()
3122 return util.hooks()
3118
3123
3119 def pushkey(self, namespace, key, old, new):
3124 def pushkey(self, namespace, key, old, new):
3120 try:
3125 try:
3121 tr = self.currenttransaction()
3126 tr = self.currenttransaction()
3122 hookargs = {}
3127 hookargs = {}
3123 if tr is not None:
3128 if tr is not None:
3124 hookargs.update(tr.hookargs)
3129 hookargs.update(tr.hookargs)
3125 hookargs = pycompat.strkwargs(hookargs)
3130 hookargs = pycompat.strkwargs(hookargs)
3126 hookargs['namespace'] = namespace
3131 hookargs['namespace'] = namespace
3127 hookargs['key'] = key
3132 hookargs['key'] = key
3128 hookargs['old'] = old
3133 hookargs['old'] = old
3129 hookargs['new'] = new
3134 hookargs['new'] = new
3130 self.hook(b'prepushkey', throw=True, **hookargs)
3135 self.hook(b'prepushkey', throw=True, **hookargs)
3131 except error.HookAbort as exc:
3136 except error.HookAbort as exc:
3132 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3137 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3133 if exc.hint:
3138 if exc.hint:
3134 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3139 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3135 return False
3140 return False
3136 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3141 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3137 ret = pushkey.push(self, namespace, key, old, new)
3142 ret = pushkey.push(self, namespace, key, old, new)
3138
3143
3139 def runhook(unused_success):
3144 def runhook(unused_success):
3140 self.hook(
3145 self.hook(
3141 b'pushkey',
3146 b'pushkey',
3142 namespace=namespace,
3147 namespace=namespace,
3143 key=key,
3148 key=key,
3144 old=old,
3149 old=old,
3145 new=new,
3150 new=new,
3146 ret=ret,
3151 ret=ret,
3147 )
3152 )
3148
3153
3149 self._afterlock(runhook)
3154 self._afterlock(runhook)
3150 return ret
3155 return ret
3151
3156
3152 def listkeys(self, namespace):
3157 def listkeys(self, namespace):
3153 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3158 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3154 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3159 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3155 values = pushkey.list(self, namespace)
3160 values = pushkey.list(self, namespace)
3156 self.hook(b'listkeys', namespace=namespace, values=values)
3161 self.hook(b'listkeys', namespace=namespace, values=values)
3157 return values
3162 return values
3158
3163
3159 def debugwireargs(self, one, two, three=None, four=None, five=None):
3164 def debugwireargs(self, one, two, three=None, four=None, five=None):
3160 '''used to test argument passing over the wire'''
3165 '''used to test argument passing over the wire'''
3161 return b"%s %s %s %s %s" % (
3166 return b"%s %s %s %s %s" % (
3162 one,
3167 one,
3163 two,
3168 two,
3164 pycompat.bytestr(three),
3169 pycompat.bytestr(three),
3165 pycompat.bytestr(four),
3170 pycompat.bytestr(four),
3166 pycompat.bytestr(five),
3171 pycompat.bytestr(five),
3167 )
3172 )
3168
3173
3169 def savecommitmessage(self, text):
3174 def savecommitmessage(self, text):
3170 fp = self.vfs(b'last-message.txt', b'wb')
3175 fp = self.vfs(b'last-message.txt', b'wb')
3171 try:
3176 try:
3172 fp.write(text)
3177 fp.write(text)
3173 finally:
3178 finally:
3174 fp.close()
3179 fp.close()
3175 return self.pathto(fp.name[len(self.root) + 1 :])
3180 return self.pathto(fp.name[len(self.root) + 1 :])
3176
3181
3177
3182
3178 # used to avoid circular references so destructors work
3183 # used to avoid circular references so destructors work
3179 def aftertrans(files):
3184 def aftertrans(files):
3180 renamefiles = [tuple(t) for t in files]
3185 renamefiles = [tuple(t) for t in files]
3181
3186
3182 def a():
3187 def a():
3183 for vfs, src, dest in renamefiles:
3188 for vfs, src, dest in renamefiles:
3184 # if src and dest refer to a same file, vfs.rename is a no-op,
3189 # if src and dest refer to a same file, vfs.rename is a no-op,
3185 # leaving both src and dest on disk. delete dest to make sure
3190 # leaving both src and dest on disk. delete dest to make sure
3186 # the rename couldn't be such a no-op.
3191 # the rename couldn't be such a no-op.
3187 vfs.tryunlink(dest)
3192 vfs.tryunlink(dest)
3188 try:
3193 try:
3189 vfs.rename(src, dest)
3194 vfs.rename(src, dest)
3190 except OSError: # journal file does not yet exist
3195 except OSError: # journal file does not yet exist
3191 pass
3196 pass
3192
3197
3193 return a
3198 return a
3194
3199
3195
3200
3196 def undoname(fn):
3201 def undoname(fn):
3197 base, name = os.path.split(fn)
3202 base, name = os.path.split(fn)
3198 assert name.startswith(b'journal')
3203 assert name.startswith(b'journal')
3199 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3204 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3200
3205
3201
3206
3202 def instance(ui, path, create, intents=None, createopts=None):
3207 def instance(ui, path, create, intents=None, createopts=None):
3203 localpath = util.urllocalpath(path)
3208 localpath = util.urllocalpath(path)
3204 if create:
3209 if create:
3205 createrepository(ui, localpath, createopts=createopts)
3210 createrepository(ui, localpath, createopts=createopts)
3206
3211
3207 return makelocalrepository(ui, localpath, intents=intents)
3212 return makelocalrepository(ui, localpath, intents=intents)
3208
3213
3209
3214
3210 def islocal(path):
3215 def islocal(path):
3211 return True
3216 return True
3212
3217
3213
3218
3214 def defaultcreateopts(ui, createopts=None):
3219 def defaultcreateopts(ui, createopts=None):
3215 """Populate the default creation options for a repository.
3220 """Populate the default creation options for a repository.
3216
3221
3217 A dictionary of explicitly requested creation options can be passed
3222 A dictionary of explicitly requested creation options can be passed
3218 in. Missing keys will be populated.
3223 in. Missing keys will be populated.
3219 """
3224 """
3220 createopts = dict(createopts or {})
3225 createopts = dict(createopts or {})
3221
3226
3222 if b'backend' not in createopts:
3227 if b'backend' not in createopts:
3223 # experimental config: storage.new-repo-backend
3228 # experimental config: storage.new-repo-backend
3224 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3229 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3225
3230
3226 return createopts
3231 return createopts
3227
3232
3228
3233
3229 def newreporequirements(ui, createopts):
3234 def newreporequirements(ui, createopts):
3230 """Determine the set of requirements for a new local repository.
3235 """Determine the set of requirements for a new local repository.
3231
3236
3232 Extensions can wrap this function to specify custom requirements for
3237 Extensions can wrap this function to specify custom requirements for
3233 new repositories.
3238 new repositories.
3234 """
3239 """
3235 # If the repo is being created from a shared repository, we copy
3240 # If the repo is being created from a shared repository, we copy
3236 # its requirements.
3241 # its requirements.
3237 if b'sharedrepo' in createopts:
3242 if b'sharedrepo' in createopts:
3238 requirements = set(createopts[b'sharedrepo'].requirements)
3243 requirements = set(createopts[b'sharedrepo'].requirements)
3239 if createopts.get(b'sharedrelative'):
3244 if createopts.get(b'sharedrelative'):
3240 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3245 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3241 else:
3246 else:
3242 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3247 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3243
3248
3244 return requirements
3249 return requirements
3245
3250
3246 if b'backend' not in createopts:
3251 if b'backend' not in createopts:
3247 raise error.ProgrammingError(
3252 raise error.ProgrammingError(
3248 b'backend key not present in createopts; '
3253 b'backend key not present in createopts; '
3249 b'was defaultcreateopts() called?'
3254 b'was defaultcreateopts() called?'
3250 )
3255 )
3251
3256
3252 if createopts[b'backend'] != b'revlogv1':
3257 if createopts[b'backend'] != b'revlogv1':
3253 raise error.Abort(
3258 raise error.Abort(
3254 _(
3259 _(
3255 b'unable to determine repository requirements for '
3260 b'unable to determine repository requirements for '
3256 b'storage backend: %s'
3261 b'storage backend: %s'
3257 )
3262 )
3258 % createopts[b'backend']
3263 % createopts[b'backend']
3259 )
3264 )
3260
3265
3261 requirements = {b'revlogv1'}
3266 requirements = {b'revlogv1'}
3262 if ui.configbool(b'format', b'usestore'):
3267 if ui.configbool(b'format', b'usestore'):
3263 requirements.add(b'store')
3268 requirements.add(b'store')
3264 if ui.configbool(b'format', b'usefncache'):
3269 if ui.configbool(b'format', b'usefncache'):
3265 requirements.add(b'fncache')
3270 requirements.add(b'fncache')
3266 if ui.configbool(b'format', b'dotencode'):
3271 if ui.configbool(b'format', b'dotencode'):
3267 requirements.add(b'dotencode')
3272 requirements.add(b'dotencode')
3268
3273
3269 compengines = ui.configlist(b'format', b'revlog-compression')
3274 compengines = ui.configlist(b'format', b'revlog-compression')
3270 for compengine in compengines:
3275 for compengine in compengines:
3271 if compengine in util.compengines:
3276 if compengine in util.compengines:
3272 break
3277 break
3273 else:
3278 else:
3274 raise error.Abort(
3279 raise error.Abort(
3275 _(
3280 _(
3276 b'compression engines %s defined by '
3281 b'compression engines %s defined by '
3277 b'format.revlog-compression not available'
3282 b'format.revlog-compression not available'
3278 )
3283 )
3279 % b', '.join(b'"%s"' % e for e in compengines),
3284 % b', '.join(b'"%s"' % e for e in compengines),
3280 hint=_(
3285 hint=_(
3281 b'run "hg debuginstall" to list available '
3286 b'run "hg debuginstall" to list available '
3282 b'compression engines'
3287 b'compression engines'
3283 ),
3288 ),
3284 )
3289 )
3285
3290
3286 # zlib is the historical default and doesn't need an explicit requirement.
3291 # zlib is the historical default and doesn't need an explicit requirement.
3287 if compengine == b'zstd':
3292 if compengine == b'zstd':
3288 requirements.add(b'revlog-compression-zstd')
3293 requirements.add(b'revlog-compression-zstd')
3289 elif compengine != b'zlib':
3294 elif compengine != b'zlib':
3290 requirements.add(b'exp-compression-%s' % compengine)
3295 requirements.add(b'exp-compression-%s' % compengine)
3291
3296
3292 if scmutil.gdinitconfig(ui):
3297 if scmutil.gdinitconfig(ui):
3293 requirements.add(b'generaldelta')
3298 requirements.add(b'generaldelta')
3294 if ui.configbool(b'format', b'sparse-revlog'):
3299 if ui.configbool(b'format', b'sparse-revlog'):
3295 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3300 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3296
3301
3297 # experimental config: format.exp-use-side-data
3302 # experimental config: format.exp-use-side-data
3298 if ui.configbool(b'format', b'exp-use-side-data'):
3303 if ui.configbool(b'format', b'exp-use-side-data'):
3299 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3304 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3300 # experimental config: format.exp-use-copies-side-data-changeset
3305 # experimental config: format.exp-use-copies-side-data-changeset
3301 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3306 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3302 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3307 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3303 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3308 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3304 if ui.configbool(b'experimental', b'treemanifest'):
3309 if ui.configbool(b'experimental', b'treemanifest'):
3305 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3310 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3306
3311
3307 revlogv2 = ui.config(b'experimental', b'revlogv2')
3312 revlogv2 = ui.config(b'experimental', b'revlogv2')
3308 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3313 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3309 requirements.remove(b'revlogv1')
3314 requirements.remove(b'revlogv1')
3310 # generaldelta is implied by revlogv2.
3315 # generaldelta is implied by revlogv2.
3311 requirements.discard(b'generaldelta')
3316 requirements.discard(b'generaldelta')
3312 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3317 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3313 # experimental config: format.internal-phase
3318 # experimental config: format.internal-phase
3314 if ui.configbool(b'format', b'internal-phase'):
3319 if ui.configbool(b'format', b'internal-phase'):
3315 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3320 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3316
3321
3317 if createopts.get(b'narrowfiles'):
3322 if createopts.get(b'narrowfiles'):
3318 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3323 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3319
3324
3320 if createopts.get(b'lfs'):
3325 if createopts.get(b'lfs'):
3321 requirements.add(b'lfs')
3326 requirements.add(b'lfs')
3322
3327
3323 if ui.configbool(b'format', b'bookmarks-in-store'):
3328 if ui.configbool(b'format', b'bookmarks-in-store'):
3324 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3329 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3325
3330
3326 if ui.configbool(b'format', b'use-persistent-nodemap'):
3331 if ui.configbool(b'format', b'use-persistent-nodemap'):
3327 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3332 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3328
3333
3329 return requirements
3334 return requirements
3330
3335
3331
3336
3332 def checkrequirementscompat(ui, requirements):
3337 def checkrequirementscompat(ui, requirements):
3333 """ Checks compatibility of repository requirements enabled and disabled.
3338 """ Checks compatibility of repository requirements enabled and disabled.
3334
3339
3335 Returns a set of requirements which needs to be dropped because dependend
3340 Returns a set of requirements which needs to be dropped because dependend
3336 requirements are not enabled. Also warns users about it """
3341 requirements are not enabled. Also warns users about it """
3337
3342
3338 dropped = set()
3343 dropped = set()
3339
3344
3340 if b'store' not in requirements:
3345 if b'store' not in requirements:
3341 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3346 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3342 ui.warn(
3347 ui.warn(
3343 _(
3348 _(
3344 b'ignoring enabled \'format.bookmarks-in-store\' config '
3349 b'ignoring enabled \'format.bookmarks-in-store\' config '
3345 b'beacuse it is incompatible with disabled '
3350 b'beacuse it is incompatible with disabled '
3346 b'\'format.usestore\' config\n'
3351 b'\'format.usestore\' config\n'
3347 )
3352 )
3348 )
3353 )
3349 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3354 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3350
3355
3351 if (
3356 if (
3352 requirementsmod.SHARED_REQUIREMENT in requirements
3357 requirementsmod.SHARED_REQUIREMENT in requirements
3353 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3358 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3354 ):
3359 ):
3355 raise error.Abort(
3360 raise error.Abort(
3356 _(
3361 _(
3357 b"cannot create shared repository as source was created"
3362 b"cannot create shared repository as source was created"
3358 b" with 'format.usestore' config disabled"
3363 b" with 'format.usestore' config disabled"
3359 )
3364 )
3360 )
3365 )
3361
3366
3362 return dropped
3367 return dropped
3363
3368
3364
3369
3365 def filterknowncreateopts(ui, createopts):
3370 def filterknowncreateopts(ui, createopts):
3366 """Filters a dict of repo creation options against options that are known.
3371 """Filters a dict of repo creation options against options that are known.
3367
3372
3368 Receives a dict of repo creation options and returns a dict of those
3373 Receives a dict of repo creation options and returns a dict of those
3369 options that we don't know how to handle.
3374 options that we don't know how to handle.
3370
3375
3371 This function is called as part of repository creation. If the
3376 This function is called as part of repository creation. If the
3372 returned dict contains any items, repository creation will not
3377 returned dict contains any items, repository creation will not
3373 be allowed, as it means there was a request to create a repository
3378 be allowed, as it means there was a request to create a repository
3374 with options not recognized by loaded code.
3379 with options not recognized by loaded code.
3375
3380
3376 Extensions can wrap this function to filter out creation options
3381 Extensions can wrap this function to filter out creation options
3377 they know how to handle.
3382 they know how to handle.
3378 """
3383 """
3379 known = {
3384 known = {
3380 b'backend',
3385 b'backend',
3381 b'lfs',
3386 b'lfs',
3382 b'narrowfiles',
3387 b'narrowfiles',
3383 b'sharedrepo',
3388 b'sharedrepo',
3384 b'sharedrelative',
3389 b'sharedrelative',
3385 b'shareditems',
3390 b'shareditems',
3386 b'shallowfilestore',
3391 b'shallowfilestore',
3387 }
3392 }
3388
3393
3389 return {k: v for k, v in createopts.items() if k not in known}
3394 return {k: v for k, v in createopts.items() if k not in known}
3390
3395
3391
3396
3392 def createrepository(ui, path, createopts=None):
3397 def createrepository(ui, path, createopts=None):
3393 """Create a new repository in a vfs.
3398 """Create a new repository in a vfs.
3394
3399
3395 ``path`` path to the new repo's working directory.
3400 ``path`` path to the new repo's working directory.
3396 ``createopts`` options for the new repository.
3401 ``createopts`` options for the new repository.
3397
3402
3398 The following keys for ``createopts`` are recognized:
3403 The following keys for ``createopts`` are recognized:
3399
3404
3400 backend
3405 backend
3401 The storage backend to use.
3406 The storage backend to use.
3402 lfs
3407 lfs
3403 Repository will be created with ``lfs`` requirement. The lfs extension
3408 Repository will be created with ``lfs`` requirement. The lfs extension
3404 will automatically be loaded when the repository is accessed.
3409 will automatically be loaded when the repository is accessed.
3405 narrowfiles
3410 narrowfiles
3406 Set up repository to support narrow file storage.
3411 Set up repository to support narrow file storage.
3407 sharedrepo
3412 sharedrepo
3408 Repository object from which storage should be shared.
3413 Repository object from which storage should be shared.
3409 sharedrelative
3414 sharedrelative
3410 Boolean indicating if the path to the shared repo should be
3415 Boolean indicating if the path to the shared repo should be
3411 stored as relative. By default, the pointer to the "parent" repo
3416 stored as relative. By default, the pointer to the "parent" repo
3412 is stored as an absolute path.
3417 is stored as an absolute path.
3413 shareditems
3418 shareditems
3414 Set of items to share to the new repository (in addition to storage).
3419 Set of items to share to the new repository (in addition to storage).
3415 shallowfilestore
3420 shallowfilestore
3416 Indicates that storage for files should be shallow (not all ancestor
3421 Indicates that storage for files should be shallow (not all ancestor
3417 revisions are known).
3422 revisions are known).
3418 """
3423 """
3419 createopts = defaultcreateopts(ui, createopts=createopts)
3424 createopts = defaultcreateopts(ui, createopts=createopts)
3420
3425
3421 unknownopts = filterknowncreateopts(ui, createopts)
3426 unknownopts = filterknowncreateopts(ui, createopts)
3422
3427
3423 if not isinstance(unknownopts, dict):
3428 if not isinstance(unknownopts, dict):
3424 raise error.ProgrammingError(
3429 raise error.ProgrammingError(
3425 b'filterknowncreateopts() did not return a dict'
3430 b'filterknowncreateopts() did not return a dict'
3426 )
3431 )
3427
3432
3428 if unknownopts:
3433 if unknownopts:
3429 raise error.Abort(
3434 raise error.Abort(
3430 _(
3435 _(
3431 b'unable to create repository because of unknown '
3436 b'unable to create repository because of unknown '
3432 b'creation option: %s'
3437 b'creation option: %s'
3433 )
3438 )
3434 % b', '.join(sorted(unknownopts)),
3439 % b', '.join(sorted(unknownopts)),
3435 hint=_(b'is a required extension not loaded?'),
3440 hint=_(b'is a required extension not loaded?'),
3436 )
3441 )
3437
3442
3438 requirements = newreporequirements(ui, createopts=createopts)
3443 requirements = newreporequirements(ui, createopts=createopts)
3439 requirements -= checkrequirementscompat(ui, requirements)
3444 requirements -= checkrequirementscompat(ui, requirements)
3440
3445
3441 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3446 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3442
3447
3443 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3448 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3444 if hgvfs.exists():
3449 if hgvfs.exists():
3445 raise error.RepoError(_(b'repository %s already exists') % path)
3450 raise error.RepoError(_(b'repository %s already exists') % path)
3446
3451
3447 if b'sharedrepo' in createopts:
3452 if b'sharedrepo' in createopts:
3448 sharedpath = createopts[b'sharedrepo'].sharedpath
3453 sharedpath = createopts[b'sharedrepo'].sharedpath
3449
3454
3450 if createopts.get(b'sharedrelative'):
3455 if createopts.get(b'sharedrelative'):
3451 try:
3456 try:
3452 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3457 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3453 except (IOError, ValueError) as e:
3458 except (IOError, ValueError) as e:
3454 # ValueError is raised on Windows if the drive letters differ
3459 # ValueError is raised on Windows if the drive letters differ
3455 # on each path.
3460 # on each path.
3456 raise error.Abort(
3461 raise error.Abort(
3457 _(b'cannot calculate relative path'),
3462 _(b'cannot calculate relative path'),
3458 hint=stringutil.forcebytestr(e),
3463 hint=stringutil.forcebytestr(e),
3459 )
3464 )
3460
3465
3461 if not wdirvfs.exists():
3466 if not wdirvfs.exists():
3462 wdirvfs.makedirs()
3467 wdirvfs.makedirs()
3463
3468
3464 hgvfs.makedir(notindexed=True)
3469 hgvfs.makedir(notindexed=True)
3465 if b'sharedrepo' not in createopts:
3470 if b'sharedrepo' not in createopts:
3466 hgvfs.mkdir(b'cache')
3471 hgvfs.mkdir(b'cache')
3467 hgvfs.mkdir(b'wcache')
3472 hgvfs.mkdir(b'wcache')
3468
3473
3469 if b'store' in requirements and b'sharedrepo' not in createopts:
3474 if b'store' in requirements and b'sharedrepo' not in createopts:
3470 hgvfs.mkdir(b'store')
3475 hgvfs.mkdir(b'store')
3471
3476
3472 # We create an invalid changelog outside the store so very old
3477 # We create an invalid changelog outside the store so very old
3473 # Mercurial versions (which didn't know about the requirements
3478 # Mercurial versions (which didn't know about the requirements
3474 # file) encounter an error on reading the changelog. This
3479 # file) encounter an error on reading the changelog. This
3475 # effectively locks out old clients and prevents them from
3480 # effectively locks out old clients and prevents them from
3476 # mucking with a repo in an unknown format.
3481 # mucking with a repo in an unknown format.
3477 #
3482 #
3478 # The revlog header has version 2, which won't be recognized by
3483 # The revlog header has version 2, which won't be recognized by
3479 # such old clients.
3484 # such old clients.
3480 hgvfs.append(
3485 hgvfs.append(
3481 b'00changelog.i',
3486 b'00changelog.i',
3482 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3487 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3483 b'layout',
3488 b'layout',
3484 )
3489 )
3485
3490
3486 scmutil.writerequires(hgvfs, requirements)
3491 scmutil.writerequires(hgvfs, requirements)
3487
3492
3488 # Write out file telling readers where to find the shared store.
3493 # Write out file telling readers where to find the shared store.
3489 if b'sharedrepo' in createopts:
3494 if b'sharedrepo' in createopts:
3490 hgvfs.write(b'sharedpath', sharedpath)
3495 hgvfs.write(b'sharedpath', sharedpath)
3491
3496
3492 if createopts.get(b'shareditems'):
3497 if createopts.get(b'shareditems'):
3493 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3498 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3494 hgvfs.write(b'shared', shared)
3499 hgvfs.write(b'shared', shared)
3495
3500
3496
3501
3497 def poisonrepository(repo):
3502 def poisonrepository(repo):
3498 """Poison a repository instance so it can no longer be used."""
3503 """Poison a repository instance so it can no longer be used."""
3499 # Perform any cleanup on the instance.
3504 # Perform any cleanup on the instance.
3500 repo.close()
3505 repo.close()
3501
3506
3502 # Our strategy is to replace the type of the object with one that
3507 # Our strategy is to replace the type of the object with one that
3503 # has all attribute lookups result in error.
3508 # has all attribute lookups result in error.
3504 #
3509 #
3505 # But we have to allow the close() method because some constructors
3510 # But we have to allow the close() method because some constructors
3506 # of repos call close() on repo references.
3511 # of repos call close() on repo references.
3507 class poisonedrepository(object):
3512 class poisonedrepository(object):
3508 def __getattribute__(self, item):
3513 def __getattribute__(self, item):
3509 if item == 'close':
3514 if item == 'close':
3510 return object.__getattribute__(self, item)
3515 return object.__getattribute__(self, item)
3511
3516
3512 raise error.ProgrammingError(
3517 raise error.ProgrammingError(
3513 b'repo instances should not be used after unshare'
3518 b'repo instances should not be used after unshare'
3514 )
3519 )
3515
3520
3516 def close(self):
3521 def close(self):
3517 pass
3522 pass
3518
3523
3519 # We may have a repoview, which intercepts __setattr__. So be sure
3524 # We may have a repoview, which intercepts __setattr__. So be sure
3520 # we operate at the lowest level possible.
3525 # we operate at the lowest level possible.
3521 object.__setattr__(repo, '__class__', poisonedrepository)
3526 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now