##// END OF EJS Templates
localrepo: refactor `.hg/requires` reading logic in separate function...
Pulkit Goyal -
r45913:c4fe2262 default
parent child Browse files
Show More
@@ -1,3521 +1,3530 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 commit,
35 commit,
36 context,
36 context,
37 dirstate,
37 dirstate,
38 dirstateguard,
38 dirstateguard,
39 discovery,
39 discovery,
40 encoding,
40 encoding,
41 error,
41 error,
42 exchange,
42 exchange,
43 extensions,
43 extensions,
44 filelog,
44 filelog,
45 hook,
45 hook,
46 lock as lockmod,
46 lock as lockmod,
47 match as matchmod,
47 match as matchmod,
48 mergestate as mergestatemod,
48 mergestate as mergestatemod,
49 mergeutil,
49 mergeutil,
50 namespaces,
50 namespaces,
51 narrowspec,
51 narrowspec,
52 obsolete,
52 obsolete,
53 pathutil,
53 pathutil,
54 phases,
54 phases,
55 pushkey,
55 pushkey,
56 pycompat,
56 pycompat,
57 rcutil,
57 rcutil,
58 repoview,
58 repoview,
59 revset,
59 revset,
60 revsetlang,
60 revsetlang,
61 scmutil,
61 scmutil,
62 sparse,
62 sparse,
63 store as storemod,
63 store as storemod,
64 subrepoutil,
64 subrepoutil,
65 tags as tagsmod,
65 tags as tagsmod,
66 transaction,
66 transaction,
67 txnutil,
67 txnutil,
68 util,
68 util,
69 vfs as vfsmod,
69 vfs as vfsmod,
70 )
70 )
71
71
72 from .interfaces import (
72 from .interfaces import (
73 repository,
73 repository,
74 util as interfaceutil,
74 util as interfaceutil,
75 )
75 )
76
76
77 from .utils import (
77 from .utils import (
78 hashutil,
78 hashutil,
79 procutil,
79 procutil,
80 stringutil,
80 stringutil,
81 )
81 )
82
82
83 from .revlogutils import constants as revlogconst
83 from .revlogutils import constants as revlogconst
84
84
85 release = lockmod.release
85 release = lockmod.release
86 urlerr = util.urlerr
86 urlerr = util.urlerr
87 urlreq = util.urlreq
87 urlreq = util.urlreq
88
88
89 # set of (path, vfs-location) tuples. vfs-location is:
89 # set of (path, vfs-location) tuples. vfs-location is:
90 # - 'plain for vfs relative paths
90 # - 'plain for vfs relative paths
91 # - '' for svfs relative paths
91 # - '' for svfs relative paths
92 _cachedfiles = set()
92 _cachedfiles = set()
93
93
94
94
95 class _basefilecache(scmutil.filecache):
95 class _basefilecache(scmutil.filecache):
96 """All filecache usage on repo are done for logic that should be unfiltered
96 """All filecache usage on repo are done for logic that should be unfiltered
97 """
97 """
98
98
99 def __get__(self, repo, type=None):
99 def __get__(self, repo, type=None):
100 if repo is None:
100 if repo is None:
101 return self
101 return self
102 # proxy to unfiltered __dict__ since filtered repo has no entry
102 # proxy to unfiltered __dict__ since filtered repo has no entry
103 unfi = repo.unfiltered()
103 unfi = repo.unfiltered()
104 try:
104 try:
105 return unfi.__dict__[self.sname]
105 return unfi.__dict__[self.sname]
106 except KeyError:
106 except KeyError:
107 pass
107 pass
108 return super(_basefilecache, self).__get__(unfi, type)
108 return super(_basefilecache, self).__get__(unfi, type)
109
109
110 def set(self, repo, value):
110 def set(self, repo, value):
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
112
112
113
113
114 class repofilecache(_basefilecache):
114 class repofilecache(_basefilecache):
115 """filecache for files in .hg but outside of .hg/store"""
115 """filecache for files in .hg but outside of .hg/store"""
116
116
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(repofilecache, self).__init__(*paths)
118 super(repofilecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, b'plain'))
120 _cachedfiles.add((path, b'plain'))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.vfs.join(fname)
123 return obj.vfs.join(fname)
124
124
125
125
126 class storecache(_basefilecache):
126 class storecache(_basefilecache):
127 """filecache for files in the store"""
127 """filecache for files in the store"""
128
128
129 def __init__(self, *paths):
129 def __init__(self, *paths):
130 super(storecache, self).__init__(*paths)
130 super(storecache, self).__init__(*paths)
131 for path in paths:
131 for path in paths:
132 _cachedfiles.add((path, b''))
132 _cachedfiles.add((path, b''))
133
133
134 def join(self, obj, fname):
134 def join(self, obj, fname):
135 return obj.sjoin(fname)
135 return obj.sjoin(fname)
136
136
137
137
138 class mixedrepostorecache(_basefilecache):
138 class mixedrepostorecache(_basefilecache):
139 """filecache for a mix files in .hg/store and outside"""
139 """filecache for a mix files in .hg/store and outside"""
140
140
141 def __init__(self, *pathsandlocations):
141 def __init__(self, *pathsandlocations):
142 # scmutil.filecache only uses the path for passing back into our
142 # scmutil.filecache only uses the path for passing back into our
143 # join(), so we can safely pass a list of paths and locations
143 # join(), so we can safely pass a list of paths and locations
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
146
146
147 def join(self, obj, fnameandlocation):
147 def join(self, obj, fnameandlocation):
148 fname, location = fnameandlocation
148 fname, location = fnameandlocation
149 if location == b'plain':
149 if location == b'plain':
150 return obj.vfs.join(fname)
150 return obj.vfs.join(fname)
151 else:
151 else:
152 if location != b'':
152 if location != b'':
153 raise error.ProgrammingError(
153 raise error.ProgrammingError(
154 b'unexpected location: %s' % location
154 b'unexpected location: %s' % location
155 )
155 )
156 return obj.sjoin(fname)
156 return obj.sjoin(fname)
157
157
158
158
159 def isfilecached(repo, name):
159 def isfilecached(repo, name):
160 """check if a repo has already cached "name" filecache-ed property
160 """check if a repo has already cached "name" filecache-ed property
161
161
162 This returns (cachedobj-or-None, iscached) tuple.
162 This returns (cachedobj-or-None, iscached) tuple.
163 """
163 """
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
165 if not cacheentry:
165 if not cacheentry:
166 return None, False
166 return None, False
167 return cacheentry.obj, True
167 return cacheentry.obj, True
168
168
169
169
170 class unfilteredpropertycache(util.propertycache):
170 class unfilteredpropertycache(util.propertycache):
171 """propertycache that apply to unfiltered repo only"""
171 """propertycache that apply to unfiltered repo only"""
172
172
173 def __get__(self, repo, type=None):
173 def __get__(self, repo, type=None):
174 unfi = repo.unfiltered()
174 unfi = repo.unfiltered()
175 if unfi is repo:
175 if unfi is repo:
176 return super(unfilteredpropertycache, self).__get__(unfi)
176 return super(unfilteredpropertycache, self).__get__(unfi)
177 return getattr(unfi, self.name)
177 return getattr(unfi, self.name)
178
178
179
179
180 class filteredpropertycache(util.propertycache):
180 class filteredpropertycache(util.propertycache):
181 """propertycache that must take filtering in account"""
181 """propertycache that must take filtering in account"""
182
182
183 def cachevalue(self, obj, value):
183 def cachevalue(self, obj, value):
184 object.__setattr__(obj, self.name, value)
184 object.__setattr__(obj, self.name, value)
185
185
186
186
187 def hasunfilteredcache(repo, name):
187 def hasunfilteredcache(repo, name):
188 """check if a repo has an unfilteredpropertycache value for <name>"""
188 """check if a repo has an unfilteredpropertycache value for <name>"""
189 return name in vars(repo.unfiltered())
189 return name in vars(repo.unfiltered())
190
190
191
191
192 def unfilteredmethod(orig):
192 def unfilteredmethod(orig):
193 """decorate method that always need to be run on unfiltered version"""
193 """decorate method that always need to be run on unfiltered version"""
194
194
195 def wrapper(repo, *args, **kwargs):
195 def wrapper(repo, *args, **kwargs):
196 return orig(repo.unfiltered(), *args, **kwargs)
196 return orig(repo.unfiltered(), *args, **kwargs)
197
197
198 return wrapper
198 return wrapper
199
199
200
200
201 moderncaps = {
201 moderncaps = {
202 b'lookup',
202 b'lookup',
203 b'branchmap',
203 b'branchmap',
204 b'pushkey',
204 b'pushkey',
205 b'known',
205 b'known',
206 b'getbundle',
206 b'getbundle',
207 b'unbundle',
207 b'unbundle',
208 }
208 }
209 legacycaps = moderncaps.union({b'changegroupsubset'})
209 legacycaps = moderncaps.union({b'changegroupsubset'})
210
210
211
211
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
213 class localcommandexecutor(object):
213 class localcommandexecutor(object):
214 def __init__(self, peer):
214 def __init__(self, peer):
215 self._peer = peer
215 self._peer = peer
216 self._sent = False
216 self._sent = False
217 self._closed = False
217 self._closed = False
218
218
219 def __enter__(self):
219 def __enter__(self):
220 return self
220 return self
221
221
222 def __exit__(self, exctype, excvalue, exctb):
222 def __exit__(self, exctype, excvalue, exctb):
223 self.close()
223 self.close()
224
224
225 def callcommand(self, command, args):
225 def callcommand(self, command, args):
226 if self._sent:
226 if self._sent:
227 raise error.ProgrammingError(
227 raise error.ProgrammingError(
228 b'callcommand() cannot be used after sendcommands()'
228 b'callcommand() cannot be used after sendcommands()'
229 )
229 )
230
230
231 if self._closed:
231 if self._closed:
232 raise error.ProgrammingError(
232 raise error.ProgrammingError(
233 b'callcommand() cannot be used after close()'
233 b'callcommand() cannot be used after close()'
234 )
234 )
235
235
236 # We don't need to support anything fancy. Just call the named
236 # We don't need to support anything fancy. Just call the named
237 # method on the peer and return a resolved future.
237 # method on the peer and return a resolved future.
238 fn = getattr(self._peer, pycompat.sysstr(command))
238 fn = getattr(self._peer, pycompat.sysstr(command))
239
239
240 f = pycompat.futures.Future()
240 f = pycompat.futures.Future()
241
241
242 try:
242 try:
243 result = fn(**pycompat.strkwargs(args))
243 result = fn(**pycompat.strkwargs(args))
244 except Exception:
244 except Exception:
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
246 else:
246 else:
247 f.set_result(result)
247 f.set_result(result)
248
248
249 return f
249 return f
250
250
251 def sendcommands(self):
251 def sendcommands(self):
252 self._sent = True
252 self._sent = True
253
253
254 def close(self):
254 def close(self):
255 self._closed = True
255 self._closed = True
256
256
257
257
258 @interfaceutil.implementer(repository.ipeercommands)
258 @interfaceutil.implementer(repository.ipeercommands)
259 class localpeer(repository.peer):
259 class localpeer(repository.peer):
260 '''peer for a local repo; reflects only the most recent API'''
260 '''peer for a local repo; reflects only the most recent API'''
261
261
262 def __init__(self, repo, caps=None):
262 def __init__(self, repo, caps=None):
263 super(localpeer, self).__init__()
263 super(localpeer, self).__init__()
264
264
265 if caps is None:
265 if caps is None:
266 caps = moderncaps.copy()
266 caps = moderncaps.copy()
267 self._repo = repo.filtered(b'served')
267 self._repo = repo.filtered(b'served')
268 self.ui = repo.ui
268 self.ui = repo.ui
269 self._caps = repo._restrictcapabilities(caps)
269 self._caps = repo._restrictcapabilities(caps)
270
270
271 # Begin of _basepeer interface.
271 # Begin of _basepeer interface.
272
272
273 def url(self):
273 def url(self):
274 return self._repo.url()
274 return self._repo.url()
275
275
276 def local(self):
276 def local(self):
277 return self._repo
277 return self._repo
278
278
279 def peer(self):
279 def peer(self):
280 return self
280 return self
281
281
282 def canpush(self):
282 def canpush(self):
283 return True
283 return True
284
284
285 def close(self):
285 def close(self):
286 self._repo.close()
286 self._repo.close()
287
287
288 # End of _basepeer interface.
288 # End of _basepeer interface.
289
289
290 # Begin of _basewirecommands interface.
290 # Begin of _basewirecommands interface.
291
291
292 def branchmap(self):
292 def branchmap(self):
293 return self._repo.branchmap()
293 return self._repo.branchmap()
294
294
295 def capabilities(self):
295 def capabilities(self):
296 return self._caps
296 return self._caps
297
297
298 def clonebundles(self):
298 def clonebundles(self):
299 return self._repo.tryread(b'clonebundles.manifest')
299 return self._repo.tryread(b'clonebundles.manifest')
300
300
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
302 """Used to test argument passing over the wire"""
302 """Used to test argument passing over the wire"""
303 return b"%s %s %s %s %s" % (
303 return b"%s %s %s %s %s" % (
304 one,
304 one,
305 two,
305 two,
306 pycompat.bytestr(three),
306 pycompat.bytestr(three),
307 pycompat.bytestr(four),
307 pycompat.bytestr(four),
308 pycompat.bytestr(five),
308 pycompat.bytestr(five),
309 )
309 )
310
310
311 def getbundle(
311 def getbundle(
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
313 ):
313 ):
314 chunks = exchange.getbundlechunks(
314 chunks = exchange.getbundlechunks(
315 self._repo,
315 self._repo,
316 source,
316 source,
317 heads=heads,
317 heads=heads,
318 common=common,
318 common=common,
319 bundlecaps=bundlecaps,
319 bundlecaps=bundlecaps,
320 **kwargs
320 **kwargs
321 )[1]
321 )[1]
322 cb = util.chunkbuffer(chunks)
322 cb = util.chunkbuffer(chunks)
323
323
324 if exchange.bundle2requested(bundlecaps):
324 if exchange.bundle2requested(bundlecaps):
325 # When requesting a bundle2, getbundle returns a stream to make the
325 # When requesting a bundle2, getbundle returns a stream to make the
326 # wire level function happier. We need to build a proper object
326 # wire level function happier. We need to build a proper object
327 # from it in local peer.
327 # from it in local peer.
328 return bundle2.getunbundler(self.ui, cb)
328 return bundle2.getunbundler(self.ui, cb)
329 else:
329 else:
330 return changegroup.getunbundler(b'01', cb, None)
330 return changegroup.getunbundler(b'01', cb, None)
331
331
332 def heads(self):
332 def heads(self):
333 return self._repo.heads()
333 return self._repo.heads()
334
334
335 def known(self, nodes):
335 def known(self, nodes):
336 return self._repo.known(nodes)
336 return self._repo.known(nodes)
337
337
338 def listkeys(self, namespace):
338 def listkeys(self, namespace):
339 return self._repo.listkeys(namespace)
339 return self._repo.listkeys(namespace)
340
340
341 def lookup(self, key):
341 def lookup(self, key):
342 return self._repo.lookup(key)
342 return self._repo.lookup(key)
343
343
344 def pushkey(self, namespace, key, old, new):
344 def pushkey(self, namespace, key, old, new):
345 return self._repo.pushkey(namespace, key, old, new)
345 return self._repo.pushkey(namespace, key, old, new)
346
346
347 def stream_out(self):
347 def stream_out(self):
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
349
349
350 def unbundle(self, bundle, heads, url):
350 def unbundle(self, bundle, heads, url):
351 """apply a bundle on a repo
351 """apply a bundle on a repo
352
352
353 This function handles the repo locking itself."""
353 This function handles the repo locking itself."""
354 try:
354 try:
355 try:
355 try:
356 bundle = exchange.readbundle(self.ui, bundle, None)
356 bundle = exchange.readbundle(self.ui, bundle, None)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
358 if util.safehasattr(ret, b'getchunks'):
358 if util.safehasattr(ret, b'getchunks'):
359 # This is a bundle20 object, turn it into an unbundler.
359 # This is a bundle20 object, turn it into an unbundler.
360 # This little dance should be dropped eventually when the
360 # This little dance should be dropped eventually when the
361 # API is finally improved.
361 # API is finally improved.
362 stream = util.chunkbuffer(ret.getchunks())
362 stream = util.chunkbuffer(ret.getchunks())
363 ret = bundle2.getunbundler(self.ui, stream)
363 ret = bundle2.getunbundler(self.ui, stream)
364 return ret
364 return ret
365 except Exception as exc:
365 except Exception as exc:
366 # If the exception contains output salvaged from a bundle2
366 # If the exception contains output salvaged from a bundle2
367 # reply, we need to make sure it is printed before continuing
367 # reply, we need to make sure it is printed before continuing
368 # to fail. So we build a bundle2 with such output and consume
368 # to fail. So we build a bundle2 with such output and consume
369 # it directly.
369 # it directly.
370 #
370 #
371 # This is not very elegant but allows a "simple" solution for
371 # This is not very elegant but allows a "simple" solution for
372 # issue4594
372 # issue4594
373 output = getattr(exc, '_bundle2salvagedoutput', ())
373 output = getattr(exc, '_bundle2salvagedoutput', ())
374 if output:
374 if output:
375 bundler = bundle2.bundle20(self._repo.ui)
375 bundler = bundle2.bundle20(self._repo.ui)
376 for out in output:
376 for out in output:
377 bundler.addpart(out)
377 bundler.addpart(out)
378 stream = util.chunkbuffer(bundler.getchunks())
378 stream = util.chunkbuffer(bundler.getchunks())
379 b = bundle2.getunbundler(self.ui, stream)
379 b = bundle2.getunbundler(self.ui, stream)
380 bundle2.processbundle(self._repo, b)
380 bundle2.processbundle(self._repo, b)
381 raise
381 raise
382 except error.PushRaced as exc:
382 except error.PushRaced as exc:
383 raise error.ResponseError(
383 raise error.ResponseError(
384 _(b'push failed:'), stringutil.forcebytestr(exc)
384 _(b'push failed:'), stringutil.forcebytestr(exc)
385 )
385 )
386
386
387 # End of _basewirecommands interface.
387 # End of _basewirecommands interface.
388
388
389 # Begin of peer interface.
389 # Begin of peer interface.
390
390
391 def commandexecutor(self):
391 def commandexecutor(self):
392 return localcommandexecutor(self)
392 return localcommandexecutor(self)
393
393
394 # End of peer interface.
394 # End of peer interface.
395
395
396
396
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
398 class locallegacypeer(localpeer):
398 class locallegacypeer(localpeer):
399 '''peer extension which implements legacy methods too; used for tests with
399 '''peer extension which implements legacy methods too; used for tests with
400 restricted capabilities'''
400 restricted capabilities'''
401
401
402 def __init__(self, repo):
402 def __init__(self, repo):
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
404
404
405 # Begin of baselegacywirecommands interface.
405 # Begin of baselegacywirecommands interface.
406
406
407 def between(self, pairs):
407 def between(self, pairs):
408 return self._repo.between(pairs)
408 return self._repo.between(pairs)
409
409
410 def branches(self, nodes):
410 def branches(self, nodes):
411 return self._repo.branches(nodes)
411 return self._repo.branches(nodes)
412
412
413 def changegroup(self, nodes, source):
413 def changegroup(self, nodes, source):
414 outgoing = discovery.outgoing(
414 outgoing = discovery.outgoing(
415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
416 )
416 )
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
418
418
419 def changegroupsubset(self, bases, heads, source):
419 def changegroupsubset(self, bases, heads, source):
420 outgoing = discovery.outgoing(
420 outgoing = discovery.outgoing(
421 self._repo, missingroots=bases, ancestorsof=heads
421 self._repo, missingroots=bases, ancestorsof=heads
422 )
422 )
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424
424
425 # End of baselegacywirecommands interface.
425 # End of baselegacywirecommands interface.
426
426
427
427
428 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # Increment the sub-version when the revlog v2 format changes to lock out old
429 # clients.
429 # clients.
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
431
431
432 # A repository with the sparserevlog feature will have delta chains that
432 # A repository with the sparserevlog feature will have delta chains that
433 # can spread over a larger span. Sparse reading cuts these large spans into
433 # can spread over a larger span. Sparse reading cuts these large spans into
434 # pieces, so that each piece isn't too big.
434 # pieces, so that each piece isn't too big.
435 # Without the sparserevlog capability, reading from the repository could use
435 # Without the sparserevlog capability, reading from the repository could use
436 # huge amounts of memory, because the whole span would be read at once,
436 # huge amounts of memory, because the whole span would be read at once,
437 # including all the intermediate revisions that aren't pertinent for the chain.
437 # including all the intermediate revisions that aren't pertinent for the chain.
438 # This is why once a repository has enabled sparse-read, it becomes required.
438 # This is why once a repository has enabled sparse-read, it becomes required.
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
440
440
441 # A repository with the sidedataflag requirement will allow to store extra
441 # A repository with the sidedataflag requirement will allow to store extra
442 # information for revision without altering their original hashes.
442 # information for revision without altering their original hashes.
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
444
444
445 # A repository with the the copies-sidedata-changeset requirement will store
445 # A repository with the the copies-sidedata-changeset requirement will store
446 # copies related information in changeset's sidedata.
446 # copies related information in changeset's sidedata.
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
448
448
449 # The repository use persistent nodemap for the changelog and the manifest.
449 # The repository use persistent nodemap for the changelog and the manifest.
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
451
451
452 # Functions receiving (ui, features) that extensions can register to impact
452 # Functions receiving (ui, features) that extensions can register to impact
453 # the ability to load repositories with custom requirements. Only
453 # the ability to load repositories with custom requirements. Only
454 # functions defined in loaded extensions are called.
454 # functions defined in loaded extensions are called.
455 #
455 #
456 # The function receives a set of requirement strings that the repository
456 # The function receives a set of requirement strings that the repository
457 # is capable of opening. Functions will typically add elements to the
457 # is capable of opening. Functions will typically add elements to the
458 # set to reflect that the extension knows how to handle that requirements.
458 # set to reflect that the extension knows how to handle that requirements.
459 featuresetupfuncs = set()
459 featuresetupfuncs = set()
460
460
461
461
462 def _getsharedvfs(hgvfs, requirements):
462 def _getsharedvfs(hgvfs, requirements):
463 """ returns the vfs object pointing to root of shared source
463 """ returns the vfs object pointing to root of shared source
464 repo for a shared repository
464 repo for a shared repository
465
465
466 hgvfs is vfs pointing at .hg/ of current repo (shared one)
466 hgvfs is vfs pointing at .hg/ of current repo (shared one)
467 requirements is a set of requirements of current repo (shared one)
467 requirements is a set of requirements of current repo (shared one)
468 """
468 """
469 # The ``shared`` or ``relshared`` requirements indicate the
469 # The ``shared`` or ``relshared`` requirements indicate the
470 # store lives in the path contained in the ``.hg/sharedpath`` file.
470 # store lives in the path contained in the ``.hg/sharedpath`` file.
471 # This is an absolute path for ``shared`` and relative to
471 # This is an absolute path for ``shared`` and relative to
472 # ``.hg/`` for ``relshared``.
472 # ``.hg/`` for ``relshared``.
473 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
473 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
474 if b'relshared' in requirements:
474 if b'relshared' in requirements:
475 sharedpath = hgvfs.join(sharedpath)
475 sharedpath = hgvfs.join(sharedpath)
476
476
477 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
477 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
478
478
479 if not sharedvfs.exists():
479 if not sharedvfs.exists():
480 raise error.RepoError(
480 raise error.RepoError(
481 _(b'.hg/sharedpath points to nonexistent directory %s')
481 _(b'.hg/sharedpath points to nonexistent directory %s')
482 % sharedvfs.base
482 % sharedvfs.base
483 )
483 )
484 return sharedvfs
484 return sharedvfs
485
485
486
486
487 def _readrequires(vfs, allowmissing):
488 """ reads the require file present at root of this vfs
489 and return a set of requirements
490
491 If allowmissing is True, we suppress ENOENT if raised"""
492 # requires file contains a newline-delimited list of
493 # features/capabilities the opener (us) must have in order to use
494 # the repository. This file was introduced in Mercurial 0.9.2,
495 # which means very old repositories may not have one. We assume
496 # a missing file translates to no requirements.
497 try:
498 requirements = set(vfs.read(b'requires').splitlines())
499 except IOError as e:
500 if not (allowmissing and e.errno == errno.ENOENT):
501 raise
502 requirements = set()
503 return requirements
504
505
487 def makelocalrepository(baseui, path, intents=None):
506 def makelocalrepository(baseui, path, intents=None):
488 """Create a local repository object.
507 """Create a local repository object.
489
508
490 Given arguments needed to construct a local repository, this function
509 Given arguments needed to construct a local repository, this function
491 performs various early repository loading functionality (such as
510 performs various early repository loading functionality (such as
492 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
511 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
493 the repository can be opened, derives a type suitable for representing
512 the repository can be opened, derives a type suitable for representing
494 that repository, and returns an instance of it.
513 that repository, and returns an instance of it.
495
514
496 The returned object conforms to the ``repository.completelocalrepository``
515 The returned object conforms to the ``repository.completelocalrepository``
497 interface.
516 interface.
498
517
499 The repository type is derived by calling a series of factory functions
518 The repository type is derived by calling a series of factory functions
500 for each aspect/interface of the final repository. These are defined by
519 for each aspect/interface of the final repository. These are defined by
501 ``REPO_INTERFACES``.
520 ``REPO_INTERFACES``.
502
521
503 Each factory function is called to produce a type implementing a specific
522 Each factory function is called to produce a type implementing a specific
504 interface. The cumulative list of returned types will be combined into a
523 interface. The cumulative list of returned types will be combined into a
505 new type and that type will be instantiated to represent the local
524 new type and that type will be instantiated to represent the local
506 repository.
525 repository.
507
526
508 The factory functions each receive various state that may be consulted
527 The factory functions each receive various state that may be consulted
509 as part of deriving a type.
528 as part of deriving a type.
510
529
511 Extensions should wrap these factory functions to customize repository type
530 Extensions should wrap these factory functions to customize repository type
512 creation. Note that an extension's wrapped function may be called even if
531 creation. Note that an extension's wrapped function may be called even if
513 that extension is not loaded for the repo being constructed. Extensions
532 that extension is not loaded for the repo being constructed. Extensions
514 should check if their ``__name__`` appears in the
533 should check if their ``__name__`` appears in the
515 ``extensionmodulenames`` set passed to the factory function and no-op if
534 ``extensionmodulenames`` set passed to the factory function and no-op if
516 not.
535 not.
517 """
536 """
518 ui = baseui.copy()
537 ui = baseui.copy()
519 # Prevent copying repo configuration.
538 # Prevent copying repo configuration.
520 ui.copy = baseui.copy
539 ui.copy = baseui.copy
521
540
522 # Working directory VFS rooted at repository root.
541 # Working directory VFS rooted at repository root.
523 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
542 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
524
543
525 # Main VFS for .hg/ directory.
544 # Main VFS for .hg/ directory.
526 hgpath = wdirvfs.join(b'.hg')
545 hgpath = wdirvfs.join(b'.hg')
527 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
546 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
528 # Whether this repository is shared one or not
547 # Whether this repository is shared one or not
529 shared = False
548 shared = False
530 # If this repository is shared, vfs pointing to shared repo
549 # If this repository is shared, vfs pointing to shared repo
531 sharedvfs = None
550 sharedvfs = None
532
551
533 # The .hg/ path should exist and should be a directory. All other
552 # The .hg/ path should exist and should be a directory. All other
534 # cases are errors.
553 # cases are errors.
535 if not hgvfs.isdir():
554 if not hgvfs.isdir():
536 try:
555 try:
537 hgvfs.stat()
556 hgvfs.stat()
538 except OSError as e:
557 except OSError as e:
539 if e.errno != errno.ENOENT:
558 if e.errno != errno.ENOENT:
540 raise
559 raise
541 except ValueError as e:
560 except ValueError as e:
542 # Can be raised on Python 3.8 when path is invalid.
561 # Can be raised on Python 3.8 when path is invalid.
543 raise error.Abort(
562 raise error.Abort(
544 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
563 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
545 )
564 )
546
565
547 raise error.RepoError(_(b'repository %s not found') % path)
566 raise error.RepoError(_(b'repository %s not found') % path)
548
567
549 # .hg/requires file contains a newline-delimited list of
568 requirements = _readrequires(hgvfs, True)
550 # features/capabilities the opener (us) must have in order to use
551 # the repository. This file was introduced in Mercurial 0.9.2,
552 # which means very old repositories may not have one. We assume
553 # a missing file translates to no requirements.
554 try:
555 requirements = set(hgvfs.read(b'requires').splitlines())
556 except IOError as e:
557 if e.errno != errno.ENOENT:
558 raise
559 requirements = set()
560
569
561 # The .hg/hgrc file may load extensions or contain config options
570 # The .hg/hgrc file may load extensions or contain config options
562 # that influence repository construction. Attempt to load it and
571 # that influence repository construction. Attempt to load it and
563 # process any new extensions that it may have pulled in.
572 # process any new extensions that it may have pulled in.
564 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
573 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
565 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
574 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
566 extensions.loadall(ui)
575 extensions.loadall(ui)
567 extensions.populateui(ui)
576 extensions.populateui(ui)
568
577
569 # Set of module names of extensions loaded for this repository.
578 # Set of module names of extensions loaded for this repository.
570 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
579 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
571
580
572 supportedrequirements = gathersupportedrequirements(ui)
581 supportedrequirements = gathersupportedrequirements(ui)
573
582
574 # We first validate the requirements are known.
583 # We first validate the requirements are known.
575 ensurerequirementsrecognized(requirements, supportedrequirements)
584 ensurerequirementsrecognized(requirements, supportedrequirements)
576
585
577 # Then we validate that the known set is reasonable to use together.
586 # Then we validate that the known set is reasonable to use together.
578 ensurerequirementscompatible(ui, requirements)
587 ensurerequirementscompatible(ui, requirements)
579
588
580 # TODO there are unhandled edge cases related to opening repositories with
589 # TODO there are unhandled edge cases related to opening repositories with
581 # shared storage. If storage is shared, we should also test for requirements
590 # shared storage. If storage is shared, we should also test for requirements
582 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
591 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
583 # that repo, as that repo may load extensions needed to open it. This is a
592 # that repo, as that repo may load extensions needed to open it. This is a
584 # bit complicated because we don't want the other hgrc to overwrite settings
593 # bit complicated because we don't want the other hgrc to overwrite settings
585 # in this hgrc.
594 # in this hgrc.
586 #
595 #
587 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
596 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
588 # file when sharing repos. But if a requirement is added after the share is
597 # file when sharing repos. But if a requirement is added after the share is
589 # performed, thereby introducing a new requirement for the opener, we may
598 # performed, thereby introducing a new requirement for the opener, we may
590 # will not see that and could encounter a run-time error interacting with
599 # will not see that and could encounter a run-time error interacting with
591 # that shared store since it has an unknown-to-us requirement.
600 # that shared store since it has an unknown-to-us requirement.
592
601
593 # At this point, we know we should be capable of opening the repository.
602 # At this point, we know we should be capable of opening the repository.
594 # Now get on with doing that.
603 # Now get on with doing that.
595
604
596 features = set()
605 features = set()
597
606
598 # The "store" part of the repository holds versioned data. How it is
607 # The "store" part of the repository holds versioned data. How it is
599 # accessed is determined by various requirements. If `shared` or
608 # accessed is determined by various requirements. If `shared` or
600 # `relshared` requirements are present, this indicates current repository
609 # `relshared` requirements are present, this indicates current repository
601 # is a share and store exists in path mentioned in `.hg/sharedpath`
610 # is a share and store exists in path mentioned in `.hg/sharedpath`
602 shared = b'shared' in requirements or b'relshared' in requirements
611 shared = b'shared' in requirements or b'relshared' in requirements
603 if shared:
612 if shared:
604 sharedvfs = _getsharedvfs(hgvfs, requirements)
613 sharedvfs = _getsharedvfs(hgvfs, requirements)
605 storebasepath = sharedvfs.base
614 storebasepath = sharedvfs.base
606 cachepath = sharedvfs.join(b'cache')
615 cachepath = sharedvfs.join(b'cache')
607 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
616 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
608 else:
617 else:
609 storebasepath = hgvfs.base
618 storebasepath = hgvfs.base
610 cachepath = hgvfs.join(b'cache')
619 cachepath = hgvfs.join(b'cache')
611 wcachepath = hgvfs.join(b'wcache')
620 wcachepath = hgvfs.join(b'wcache')
612
621
613 # The store has changed over time and the exact layout is dictated by
622 # The store has changed over time and the exact layout is dictated by
614 # requirements. The store interface abstracts differences across all
623 # requirements. The store interface abstracts differences across all
615 # of them.
624 # of them.
616 store = makestore(
625 store = makestore(
617 requirements,
626 requirements,
618 storebasepath,
627 storebasepath,
619 lambda base: vfsmod.vfs(base, cacheaudited=True),
628 lambda base: vfsmod.vfs(base, cacheaudited=True),
620 )
629 )
621 hgvfs.createmode = store.createmode
630 hgvfs.createmode = store.createmode
622
631
623 storevfs = store.vfs
632 storevfs = store.vfs
624 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
633 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
625
634
626 # The cache vfs is used to manage cache files.
635 # The cache vfs is used to manage cache files.
627 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
636 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
628 cachevfs.createmode = store.createmode
637 cachevfs.createmode = store.createmode
629 # The cache vfs is used to manage cache files related to the working copy
638 # The cache vfs is used to manage cache files related to the working copy
630 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
639 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
631 wcachevfs.createmode = store.createmode
640 wcachevfs.createmode = store.createmode
632
641
633 # Now resolve the type for the repository object. We do this by repeatedly
642 # Now resolve the type for the repository object. We do this by repeatedly
634 # calling a factory function to produces types for specific aspects of the
643 # calling a factory function to produces types for specific aspects of the
635 # repo's operation. The aggregate returned types are used as base classes
644 # repo's operation. The aggregate returned types are used as base classes
636 # for a dynamically-derived type, which will represent our new repository.
645 # for a dynamically-derived type, which will represent our new repository.
637
646
638 bases = []
647 bases = []
639 extrastate = {}
648 extrastate = {}
640
649
641 for iface, fn in REPO_INTERFACES:
650 for iface, fn in REPO_INTERFACES:
642 # We pass all potentially useful state to give extensions tons of
651 # We pass all potentially useful state to give extensions tons of
643 # flexibility.
652 # flexibility.
644 typ = fn()(
653 typ = fn()(
645 ui=ui,
654 ui=ui,
646 intents=intents,
655 intents=intents,
647 requirements=requirements,
656 requirements=requirements,
648 features=features,
657 features=features,
649 wdirvfs=wdirvfs,
658 wdirvfs=wdirvfs,
650 hgvfs=hgvfs,
659 hgvfs=hgvfs,
651 store=store,
660 store=store,
652 storevfs=storevfs,
661 storevfs=storevfs,
653 storeoptions=storevfs.options,
662 storeoptions=storevfs.options,
654 cachevfs=cachevfs,
663 cachevfs=cachevfs,
655 wcachevfs=wcachevfs,
664 wcachevfs=wcachevfs,
656 extensionmodulenames=extensionmodulenames,
665 extensionmodulenames=extensionmodulenames,
657 extrastate=extrastate,
666 extrastate=extrastate,
658 baseclasses=bases,
667 baseclasses=bases,
659 )
668 )
660
669
661 if not isinstance(typ, type):
670 if not isinstance(typ, type):
662 raise error.ProgrammingError(
671 raise error.ProgrammingError(
663 b'unable to construct type for %s' % iface
672 b'unable to construct type for %s' % iface
664 )
673 )
665
674
666 bases.append(typ)
675 bases.append(typ)
667
676
668 # type() allows you to use characters in type names that wouldn't be
677 # type() allows you to use characters in type names that wouldn't be
669 # recognized as Python symbols in source code. We abuse that to add
678 # recognized as Python symbols in source code. We abuse that to add
670 # rich information about our constructed repo.
679 # rich information about our constructed repo.
671 name = pycompat.sysstr(
680 name = pycompat.sysstr(
672 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
681 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
673 )
682 )
674
683
675 cls = type(name, tuple(bases), {})
684 cls = type(name, tuple(bases), {})
676
685
677 return cls(
686 return cls(
678 baseui=baseui,
687 baseui=baseui,
679 ui=ui,
688 ui=ui,
680 origroot=path,
689 origroot=path,
681 wdirvfs=wdirvfs,
690 wdirvfs=wdirvfs,
682 hgvfs=hgvfs,
691 hgvfs=hgvfs,
683 requirements=requirements,
692 requirements=requirements,
684 supportedrequirements=supportedrequirements,
693 supportedrequirements=supportedrequirements,
685 sharedpath=storebasepath,
694 sharedpath=storebasepath,
686 store=store,
695 store=store,
687 cachevfs=cachevfs,
696 cachevfs=cachevfs,
688 wcachevfs=wcachevfs,
697 wcachevfs=wcachevfs,
689 features=features,
698 features=features,
690 intents=intents,
699 intents=intents,
691 )
700 )
692
701
693
702
694 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
703 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
695 """Load hgrc files/content into a ui instance.
704 """Load hgrc files/content into a ui instance.
696
705
697 This is called during repository opening to load any additional
706 This is called during repository opening to load any additional
698 config files or settings relevant to the current repository.
707 config files or settings relevant to the current repository.
699
708
700 Returns a bool indicating whether any additional configs were loaded.
709 Returns a bool indicating whether any additional configs were loaded.
701
710
702 Extensions should monkeypatch this function to modify how per-repo
711 Extensions should monkeypatch this function to modify how per-repo
703 configs are loaded. For example, an extension may wish to pull in
712 configs are loaded. For example, an extension may wish to pull in
704 configs from alternate files or sources.
713 configs from alternate files or sources.
705 """
714 """
706 if not rcutil.use_repo_hgrc():
715 if not rcutil.use_repo_hgrc():
707 return False
716 return False
708 try:
717 try:
709 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
718 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
710 return True
719 return True
711 except IOError:
720 except IOError:
712 return False
721 return False
713
722
714
723
715 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
724 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
716 """Perform additional actions after .hg/hgrc is loaded.
725 """Perform additional actions after .hg/hgrc is loaded.
717
726
718 This function is called during repository loading immediately after
727 This function is called during repository loading immediately after
719 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
728 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
720
729
721 The function can be used to validate configs, automatically add
730 The function can be used to validate configs, automatically add
722 options (including extensions) based on requirements, etc.
731 options (including extensions) based on requirements, etc.
723 """
732 """
724
733
725 # Map of requirements to list of extensions to load automatically when
734 # Map of requirements to list of extensions to load automatically when
726 # requirement is present.
735 # requirement is present.
727 autoextensions = {
736 autoextensions = {
728 b'git': [b'git'],
737 b'git': [b'git'],
729 b'largefiles': [b'largefiles'],
738 b'largefiles': [b'largefiles'],
730 b'lfs': [b'lfs'],
739 b'lfs': [b'lfs'],
731 }
740 }
732
741
733 for requirement, names in sorted(autoextensions.items()):
742 for requirement, names in sorted(autoextensions.items()):
734 if requirement not in requirements:
743 if requirement not in requirements:
735 continue
744 continue
736
745
737 for name in names:
746 for name in names:
738 if not ui.hasconfig(b'extensions', name):
747 if not ui.hasconfig(b'extensions', name):
739 ui.setconfig(b'extensions', name, b'', source=b'autoload')
748 ui.setconfig(b'extensions', name, b'', source=b'autoload')
740
749
741
750
742 def gathersupportedrequirements(ui):
751 def gathersupportedrequirements(ui):
743 """Determine the complete set of recognized requirements."""
752 """Determine the complete set of recognized requirements."""
744 # Start with all requirements supported by this file.
753 # Start with all requirements supported by this file.
745 supported = set(localrepository._basesupported)
754 supported = set(localrepository._basesupported)
746
755
747 # Execute ``featuresetupfuncs`` entries if they belong to an extension
756 # Execute ``featuresetupfuncs`` entries if they belong to an extension
748 # relevant to this ui instance.
757 # relevant to this ui instance.
749 modules = {m.__name__ for n, m in extensions.extensions(ui)}
758 modules = {m.__name__ for n, m in extensions.extensions(ui)}
750
759
751 for fn in featuresetupfuncs:
760 for fn in featuresetupfuncs:
752 if fn.__module__ in modules:
761 if fn.__module__ in modules:
753 fn(ui, supported)
762 fn(ui, supported)
754
763
755 # Add derived requirements from registered compression engines.
764 # Add derived requirements from registered compression engines.
756 for name in util.compengines:
765 for name in util.compengines:
757 engine = util.compengines[name]
766 engine = util.compengines[name]
758 if engine.available() and engine.revlogheader():
767 if engine.available() and engine.revlogheader():
759 supported.add(b'exp-compression-%s' % name)
768 supported.add(b'exp-compression-%s' % name)
760 if engine.name() == b'zstd':
769 if engine.name() == b'zstd':
761 supported.add(b'revlog-compression-zstd')
770 supported.add(b'revlog-compression-zstd')
762
771
763 return supported
772 return supported
764
773
765
774
766 def ensurerequirementsrecognized(requirements, supported):
775 def ensurerequirementsrecognized(requirements, supported):
767 """Validate that a set of local requirements is recognized.
776 """Validate that a set of local requirements is recognized.
768
777
769 Receives a set of requirements. Raises an ``error.RepoError`` if there
778 Receives a set of requirements. Raises an ``error.RepoError`` if there
770 exists any requirement in that set that currently loaded code doesn't
779 exists any requirement in that set that currently loaded code doesn't
771 recognize.
780 recognize.
772
781
773 Returns a set of supported requirements.
782 Returns a set of supported requirements.
774 """
783 """
775 missing = set()
784 missing = set()
776
785
777 for requirement in requirements:
786 for requirement in requirements:
778 if requirement in supported:
787 if requirement in supported:
779 continue
788 continue
780
789
781 if not requirement or not requirement[0:1].isalnum():
790 if not requirement or not requirement[0:1].isalnum():
782 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
791 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
783
792
784 missing.add(requirement)
793 missing.add(requirement)
785
794
786 if missing:
795 if missing:
787 raise error.RequirementError(
796 raise error.RequirementError(
788 _(b'repository requires features unknown to this Mercurial: %s')
797 _(b'repository requires features unknown to this Mercurial: %s')
789 % b' '.join(sorted(missing)),
798 % b' '.join(sorted(missing)),
790 hint=_(
799 hint=_(
791 b'see https://mercurial-scm.org/wiki/MissingRequirement '
800 b'see https://mercurial-scm.org/wiki/MissingRequirement '
792 b'for more information'
801 b'for more information'
793 ),
802 ),
794 )
803 )
795
804
796
805
797 def ensurerequirementscompatible(ui, requirements):
806 def ensurerequirementscompatible(ui, requirements):
798 """Validates that a set of recognized requirements is mutually compatible.
807 """Validates that a set of recognized requirements is mutually compatible.
799
808
800 Some requirements may not be compatible with others or require
809 Some requirements may not be compatible with others or require
801 config options that aren't enabled. This function is called during
810 config options that aren't enabled. This function is called during
802 repository opening to ensure that the set of requirements needed
811 repository opening to ensure that the set of requirements needed
803 to open a repository is sane and compatible with config options.
812 to open a repository is sane and compatible with config options.
804
813
805 Extensions can monkeypatch this function to perform additional
814 Extensions can monkeypatch this function to perform additional
806 checking.
815 checking.
807
816
808 ``error.RepoError`` should be raised on failure.
817 ``error.RepoError`` should be raised on failure.
809 """
818 """
810 if b'exp-sparse' in requirements and not sparse.enabled:
819 if b'exp-sparse' in requirements and not sparse.enabled:
811 raise error.RepoError(
820 raise error.RepoError(
812 _(
821 _(
813 b'repository is using sparse feature but '
822 b'repository is using sparse feature but '
814 b'sparse is not enabled; enable the '
823 b'sparse is not enabled; enable the '
815 b'"sparse" extensions to access'
824 b'"sparse" extensions to access'
816 )
825 )
817 )
826 )
818
827
819
828
820 def makestore(requirements, path, vfstype):
829 def makestore(requirements, path, vfstype):
821 """Construct a storage object for a repository."""
830 """Construct a storage object for a repository."""
822 if b'store' in requirements:
831 if b'store' in requirements:
823 if b'fncache' in requirements:
832 if b'fncache' in requirements:
824 return storemod.fncachestore(
833 return storemod.fncachestore(
825 path, vfstype, b'dotencode' in requirements
834 path, vfstype, b'dotencode' in requirements
826 )
835 )
827
836
828 return storemod.encodedstore(path, vfstype)
837 return storemod.encodedstore(path, vfstype)
829
838
830 return storemod.basicstore(path, vfstype)
839 return storemod.basicstore(path, vfstype)
831
840
832
841
833 def resolvestorevfsoptions(ui, requirements, features):
842 def resolvestorevfsoptions(ui, requirements, features):
834 """Resolve the options to pass to the store vfs opener.
843 """Resolve the options to pass to the store vfs opener.
835
844
836 The returned dict is used to influence behavior of the storage layer.
845 The returned dict is used to influence behavior of the storage layer.
837 """
846 """
838 options = {}
847 options = {}
839
848
840 if b'treemanifest' in requirements:
849 if b'treemanifest' in requirements:
841 options[b'treemanifest'] = True
850 options[b'treemanifest'] = True
842
851
843 # experimental config: format.manifestcachesize
852 # experimental config: format.manifestcachesize
844 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
853 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
845 if manifestcachesize is not None:
854 if manifestcachesize is not None:
846 options[b'manifestcachesize'] = manifestcachesize
855 options[b'manifestcachesize'] = manifestcachesize
847
856
848 # In the absence of another requirement superseding a revlog-related
857 # In the absence of another requirement superseding a revlog-related
849 # requirement, we have to assume the repo is using revlog version 0.
858 # requirement, we have to assume the repo is using revlog version 0.
850 # This revlog format is super old and we don't bother trying to parse
859 # This revlog format is super old and we don't bother trying to parse
851 # opener options for it because those options wouldn't do anything
860 # opener options for it because those options wouldn't do anything
852 # meaningful on such old repos.
861 # meaningful on such old repos.
853 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
862 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
854 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
863 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
855 else: # explicitly mark repo as using revlogv0
864 else: # explicitly mark repo as using revlogv0
856 options[b'revlogv0'] = True
865 options[b'revlogv0'] = True
857
866
858 if COPIESSDC_REQUIREMENT in requirements:
867 if COPIESSDC_REQUIREMENT in requirements:
859 options[b'copies-storage'] = b'changeset-sidedata'
868 options[b'copies-storage'] = b'changeset-sidedata'
860 else:
869 else:
861 writecopiesto = ui.config(b'experimental', b'copies.write-to')
870 writecopiesto = ui.config(b'experimental', b'copies.write-to')
862 copiesextramode = (b'changeset-only', b'compatibility')
871 copiesextramode = (b'changeset-only', b'compatibility')
863 if writecopiesto in copiesextramode:
872 if writecopiesto in copiesextramode:
864 options[b'copies-storage'] = b'extra'
873 options[b'copies-storage'] = b'extra'
865
874
866 return options
875 return options
867
876
868
877
869 def resolverevlogstorevfsoptions(ui, requirements, features):
878 def resolverevlogstorevfsoptions(ui, requirements, features):
870 """Resolve opener options specific to revlogs."""
879 """Resolve opener options specific to revlogs."""
871
880
872 options = {}
881 options = {}
873 options[b'flagprocessors'] = {}
882 options[b'flagprocessors'] = {}
874
883
875 if b'revlogv1' in requirements:
884 if b'revlogv1' in requirements:
876 options[b'revlogv1'] = True
885 options[b'revlogv1'] = True
877 if REVLOGV2_REQUIREMENT in requirements:
886 if REVLOGV2_REQUIREMENT in requirements:
878 options[b'revlogv2'] = True
887 options[b'revlogv2'] = True
879
888
880 if b'generaldelta' in requirements:
889 if b'generaldelta' in requirements:
881 options[b'generaldelta'] = True
890 options[b'generaldelta'] = True
882
891
883 # experimental config: format.chunkcachesize
892 # experimental config: format.chunkcachesize
884 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
893 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
885 if chunkcachesize is not None:
894 if chunkcachesize is not None:
886 options[b'chunkcachesize'] = chunkcachesize
895 options[b'chunkcachesize'] = chunkcachesize
887
896
888 deltabothparents = ui.configbool(
897 deltabothparents = ui.configbool(
889 b'storage', b'revlog.optimize-delta-parent-choice'
898 b'storage', b'revlog.optimize-delta-parent-choice'
890 )
899 )
891 options[b'deltabothparents'] = deltabothparents
900 options[b'deltabothparents'] = deltabothparents
892
901
893 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
902 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
894 lazydeltabase = False
903 lazydeltabase = False
895 if lazydelta:
904 if lazydelta:
896 lazydeltabase = ui.configbool(
905 lazydeltabase = ui.configbool(
897 b'storage', b'revlog.reuse-external-delta-parent'
906 b'storage', b'revlog.reuse-external-delta-parent'
898 )
907 )
899 if lazydeltabase is None:
908 if lazydeltabase is None:
900 lazydeltabase = not scmutil.gddeltaconfig(ui)
909 lazydeltabase = not scmutil.gddeltaconfig(ui)
901 options[b'lazydelta'] = lazydelta
910 options[b'lazydelta'] = lazydelta
902 options[b'lazydeltabase'] = lazydeltabase
911 options[b'lazydeltabase'] = lazydeltabase
903
912
904 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
913 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
905 if 0 <= chainspan:
914 if 0 <= chainspan:
906 options[b'maxdeltachainspan'] = chainspan
915 options[b'maxdeltachainspan'] = chainspan
907
916
908 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
917 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
909 if mmapindexthreshold is not None:
918 if mmapindexthreshold is not None:
910 options[b'mmapindexthreshold'] = mmapindexthreshold
919 options[b'mmapindexthreshold'] = mmapindexthreshold
911
920
912 withsparseread = ui.configbool(b'experimental', b'sparse-read')
921 withsparseread = ui.configbool(b'experimental', b'sparse-read')
913 srdensitythres = float(
922 srdensitythres = float(
914 ui.config(b'experimental', b'sparse-read.density-threshold')
923 ui.config(b'experimental', b'sparse-read.density-threshold')
915 )
924 )
916 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
925 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
917 options[b'with-sparse-read'] = withsparseread
926 options[b'with-sparse-read'] = withsparseread
918 options[b'sparse-read-density-threshold'] = srdensitythres
927 options[b'sparse-read-density-threshold'] = srdensitythres
919 options[b'sparse-read-min-gap-size'] = srmingapsize
928 options[b'sparse-read-min-gap-size'] = srmingapsize
920
929
921 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
930 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
922 options[b'sparse-revlog'] = sparserevlog
931 options[b'sparse-revlog'] = sparserevlog
923 if sparserevlog:
932 if sparserevlog:
924 options[b'generaldelta'] = True
933 options[b'generaldelta'] = True
925
934
926 sidedata = SIDEDATA_REQUIREMENT in requirements
935 sidedata = SIDEDATA_REQUIREMENT in requirements
927 options[b'side-data'] = sidedata
936 options[b'side-data'] = sidedata
928
937
929 maxchainlen = None
938 maxchainlen = None
930 if sparserevlog:
939 if sparserevlog:
931 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
940 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
932 # experimental config: format.maxchainlen
941 # experimental config: format.maxchainlen
933 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
942 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
934 if maxchainlen is not None:
943 if maxchainlen is not None:
935 options[b'maxchainlen'] = maxchainlen
944 options[b'maxchainlen'] = maxchainlen
936
945
937 for r in requirements:
946 for r in requirements:
938 # we allow multiple compression engine requirement to co-exist because
947 # we allow multiple compression engine requirement to co-exist because
939 # strickly speaking, revlog seems to support mixed compression style.
948 # strickly speaking, revlog seems to support mixed compression style.
940 #
949 #
941 # The compression used for new entries will be "the last one"
950 # The compression used for new entries will be "the last one"
942 prefix = r.startswith
951 prefix = r.startswith
943 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
952 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
944 options[b'compengine'] = r.split(b'-', 2)[2]
953 options[b'compengine'] = r.split(b'-', 2)[2]
945
954
946 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
955 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
947 if options[b'zlib.level'] is not None:
956 if options[b'zlib.level'] is not None:
948 if not (0 <= options[b'zlib.level'] <= 9):
957 if not (0 <= options[b'zlib.level'] <= 9):
949 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
958 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
950 raise error.Abort(msg % options[b'zlib.level'])
959 raise error.Abort(msg % options[b'zlib.level'])
951 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
960 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
952 if options[b'zstd.level'] is not None:
961 if options[b'zstd.level'] is not None:
953 if not (0 <= options[b'zstd.level'] <= 22):
962 if not (0 <= options[b'zstd.level'] <= 22):
954 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
963 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
955 raise error.Abort(msg % options[b'zstd.level'])
964 raise error.Abort(msg % options[b'zstd.level'])
956
965
957 if repository.NARROW_REQUIREMENT in requirements:
966 if repository.NARROW_REQUIREMENT in requirements:
958 options[b'enableellipsis'] = True
967 options[b'enableellipsis'] = True
959
968
960 if ui.configbool(b'experimental', b'rust.index'):
969 if ui.configbool(b'experimental', b'rust.index'):
961 options[b'rust.index'] = True
970 options[b'rust.index'] = True
962 if NODEMAP_REQUIREMENT in requirements:
971 if NODEMAP_REQUIREMENT in requirements:
963 options[b'persistent-nodemap'] = True
972 options[b'persistent-nodemap'] = True
964 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
973 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
965 options[b'persistent-nodemap.mmap'] = True
974 options[b'persistent-nodemap.mmap'] = True
966 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
975 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
967 options[b'persistent-nodemap.mode'] = epnm
976 options[b'persistent-nodemap.mode'] = epnm
968 if ui.configbool(b'devel', b'persistent-nodemap'):
977 if ui.configbool(b'devel', b'persistent-nodemap'):
969 options[b'devel-force-nodemap'] = True
978 options[b'devel-force-nodemap'] = True
970
979
971 return options
980 return options
972
981
973
982
974 def makemain(**kwargs):
983 def makemain(**kwargs):
975 """Produce a type conforming to ``ilocalrepositorymain``."""
984 """Produce a type conforming to ``ilocalrepositorymain``."""
976 return localrepository
985 return localrepository
977
986
978
987
979 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
988 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
980 class revlogfilestorage(object):
989 class revlogfilestorage(object):
981 """File storage when using revlogs."""
990 """File storage when using revlogs."""
982
991
983 def file(self, path):
992 def file(self, path):
984 if path[0] == b'/':
993 if path[0] == b'/':
985 path = path[1:]
994 path = path[1:]
986
995
987 return filelog.filelog(self.svfs, path)
996 return filelog.filelog(self.svfs, path)
988
997
989
998
990 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
999 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
991 class revlognarrowfilestorage(object):
1000 class revlognarrowfilestorage(object):
992 """File storage when using revlogs and narrow files."""
1001 """File storage when using revlogs and narrow files."""
993
1002
994 def file(self, path):
1003 def file(self, path):
995 if path[0] == b'/':
1004 if path[0] == b'/':
996 path = path[1:]
1005 path = path[1:]
997
1006
998 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1007 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
999
1008
1000
1009
1001 def makefilestorage(requirements, features, **kwargs):
1010 def makefilestorage(requirements, features, **kwargs):
1002 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1011 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1003 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1012 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1004 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1013 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1005
1014
1006 if repository.NARROW_REQUIREMENT in requirements:
1015 if repository.NARROW_REQUIREMENT in requirements:
1007 return revlognarrowfilestorage
1016 return revlognarrowfilestorage
1008 else:
1017 else:
1009 return revlogfilestorage
1018 return revlogfilestorage
1010
1019
1011
1020
1012 # List of repository interfaces and factory functions for them. Each
1021 # List of repository interfaces and factory functions for them. Each
1013 # will be called in order during ``makelocalrepository()`` to iteratively
1022 # will be called in order during ``makelocalrepository()`` to iteratively
1014 # derive the final type for a local repository instance. We capture the
1023 # derive the final type for a local repository instance. We capture the
1015 # function as a lambda so we don't hold a reference and the module-level
1024 # function as a lambda so we don't hold a reference and the module-level
1016 # functions can be wrapped.
1025 # functions can be wrapped.
1017 REPO_INTERFACES = [
1026 REPO_INTERFACES = [
1018 (repository.ilocalrepositorymain, lambda: makemain),
1027 (repository.ilocalrepositorymain, lambda: makemain),
1019 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1028 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1020 ]
1029 ]
1021
1030
1022
1031
1023 @interfaceutil.implementer(repository.ilocalrepositorymain)
1032 @interfaceutil.implementer(repository.ilocalrepositorymain)
1024 class localrepository(object):
1033 class localrepository(object):
1025 """Main class for representing local repositories.
1034 """Main class for representing local repositories.
1026
1035
1027 All local repositories are instances of this class.
1036 All local repositories are instances of this class.
1028
1037
1029 Constructed on its own, instances of this class are not usable as
1038 Constructed on its own, instances of this class are not usable as
1030 repository objects. To obtain a usable repository object, call
1039 repository objects. To obtain a usable repository object, call
1031 ``hg.repository()``, ``localrepo.instance()``, or
1040 ``hg.repository()``, ``localrepo.instance()``, or
1032 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1041 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1033 ``instance()`` adds support for creating new repositories.
1042 ``instance()`` adds support for creating new repositories.
1034 ``hg.repository()`` adds more extension integration, including calling
1043 ``hg.repository()`` adds more extension integration, including calling
1035 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1044 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1036 used.
1045 used.
1037 """
1046 """
1038
1047
1039 # obsolete experimental requirements:
1048 # obsolete experimental requirements:
1040 # - manifestv2: An experimental new manifest format that allowed
1049 # - manifestv2: An experimental new manifest format that allowed
1041 # for stem compression of long paths. Experiment ended up not
1050 # for stem compression of long paths. Experiment ended up not
1042 # being successful (repository sizes went up due to worse delta
1051 # being successful (repository sizes went up due to worse delta
1043 # chains), and the code was deleted in 4.6.
1052 # chains), and the code was deleted in 4.6.
1044 supportedformats = {
1053 supportedformats = {
1045 b'revlogv1',
1054 b'revlogv1',
1046 b'generaldelta',
1055 b'generaldelta',
1047 b'treemanifest',
1056 b'treemanifest',
1048 COPIESSDC_REQUIREMENT,
1057 COPIESSDC_REQUIREMENT,
1049 REVLOGV2_REQUIREMENT,
1058 REVLOGV2_REQUIREMENT,
1050 SIDEDATA_REQUIREMENT,
1059 SIDEDATA_REQUIREMENT,
1051 SPARSEREVLOG_REQUIREMENT,
1060 SPARSEREVLOG_REQUIREMENT,
1052 NODEMAP_REQUIREMENT,
1061 NODEMAP_REQUIREMENT,
1053 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1062 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1054 }
1063 }
1055 _basesupported = supportedformats | {
1064 _basesupported = supportedformats | {
1056 b'store',
1065 b'store',
1057 b'fncache',
1066 b'fncache',
1058 b'shared',
1067 b'shared',
1059 b'relshared',
1068 b'relshared',
1060 b'dotencode',
1069 b'dotencode',
1061 b'exp-sparse',
1070 b'exp-sparse',
1062 b'internal-phase',
1071 b'internal-phase',
1063 }
1072 }
1064
1073
1065 # list of prefix for file which can be written without 'wlock'
1074 # list of prefix for file which can be written without 'wlock'
1066 # Extensions should extend this list when needed
1075 # Extensions should extend this list when needed
1067 _wlockfreeprefix = {
1076 _wlockfreeprefix = {
1068 # We migh consider requiring 'wlock' for the next
1077 # We migh consider requiring 'wlock' for the next
1069 # two, but pretty much all the existing code assume
1078 # two, but pretty much all the existing code assume
1070 # wlock is not needed so we keep them excluded for
1079 # wlock is not needed so we keep them excluded for
1071 # now.
1080 # now.
1072 b'hgrc',
1081 b'hgrc',
1073 b'requires',
1082 b'requires',
1074 # XXX cache is a complicatged business someone
1083 # XXX cache is a complicatged business someone
1075 # should investigate this in depth at some point
1084 # should investigate this in depth at some point
1076 b'cache/',
1085 b'cache/',
1077 # XXX shouldn't be dirstate covered by the wlock?
1086 # XXX shouldn't be dirstate covered by the wlock?
1078 b'dirstate',
1087 b'dirstate',
1079 # XXX bisect was still a bit too messy at the time
1088 # XXX bisect was still a bit too messy at the time
1080 # this changeset was introduced. Someone should fix
1089 # this changeset was introduced. Someone should fix
1081 # the remainig bit and drop this line
1090 # the remainig bit and drop this line
1082 b'bisect.state',
1091 b'bisect.state',
1083 }
1092 }
1084
1093
1085 def __init__(
1094 def __init__(
1086 self,
1095 self,
1087 baseui,
1096 baseui,
1088 ui,
1097 ui,
1089 origroot,
1098 origroot,
1090 wdirvfs,
1099 wdirvfs,
1091 hgvfs,
1100 hgvfs,
1092 requirements,
1101 requirements,
1093 supportedrequirements,
1102 supportedrequirements,
1094 sharedpath,
1103 sharedpath,
1095 store,
1104 store,
1096 cachevfs,
1105 cachevfs,
1097 wcachevfs,
1106 wcachevfs,
1098 features,
1107 features,
1099 intents=None,
1108 intents=None,
1100 ):
1109 ):
1101 """Create a new local repository instance.
1110 """Create a new local repository instance.
1102
1111
1103 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1112 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1104 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1113 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1105 object.
1114 object.
1106
1115
1107 Arguments:
1116 Arguments:
1108
1117
1109 baseui
1118 baseui
1110 ``ui.ui`` instance that ``ui`` argument was based off of.
1119 ``ui.ui`` instance that ``ui`` argument was based off of.
1111
1120
1112 ui
1121 ui
1113 ``ui.ui`` instance for use by the repository.
1122 ``ui.ui`` instance for use by the repository.
1114
1123
1115 origroot
1124 origroot
1116 ``bytes`` path to working directory root of this repository.
1125 ``bytes`` path to working directory root of this repository.
1117
1126
1118 wdirvfs
1127 wdirvfs
1119 ``vfs.vfs`` rooted at the working directory.
1128 ``vfs.vfs`` rooted at the working directory.
1120
1129
1121 hgvfs
1130 hgvfs
1122 ``vfs.vfs`` rooted at .hg/
1131 ``vfs.vfs`` rooted at .hg/
1123
1132
1124 requirements
1133 requirements
1125 ``set`` of bytestrings representing repository opening requirements.
1134 ``set`` of bytestrings representing repository opening requirements.
1126
1135
1127 supportedrequirements
1136 supportedrequirements
1128 ``set`` of bytestrings representing repository requirements that we
1137 ``set`` of bytestrings representing repository requirements that we
1129 know how to open. May be a supetset of ``requirements``.
1138 know how to open. May be a supetset of ``requirements``.
1130
1139
1131 sharedpath
1140 sharedpath
1132 ``bytes`` Defining path to storage base directory. Points to a
1141 ``bytes`` Defining path to storage base directory. Points to a
1133 ``.hg/`` directory somewhere.
1142 ``.hg/`` directory somewhere.
1134
1143
1135 store
1144 store
1136 ``store.basicstore`` (or derived) instance providing access to
1145 ``store.basicstore`` (or derived) instance providing access to
1137 versioned storage.
1146 versioned storage.
1138
1147
1139 cachevfs
1148 cachevfs
1140 ``vfs.vfs`` used for cache files.
1149 ``vfs.vfs`` used for cache files.
1141
1150
1142 wcachevfs
1151 wcachevfs
1143 ``vfs.vfs`` used for cache files related to the working copy.
1152 ``vfs.vfs`` used for cache files related to the working copy.
1144
1153
1145 features
1154 features
1146 ``set`` of bytestrings defining features/capabilities of this
1155 ``set`` of bytestrings defining features/capabilities of this
1147 instance.
1156 instance.
1148
1157
1149 intents
1158 intents
1150 ``set`` of system strings indicating what this repo will be used
1159 ``set`` of system strings indicating what this repo will be used
1151 for.
1160 for.
1152 """
1161 """
1153 self.baseui = baseui
1162 self.baseui = baseui
1154 self.ui = ui
1163 self.ui = ui
1155 self.origroot = origroot
1164 self.origroot = origroot
1156 # vfs rooted at working directory.
1165 # vfs rooted at working directory.
1157 self.wvfs = wdirvfs
1166 self.wvfs = wdirvfs
1158 self.root = wdirvfs.base
1167 self.root = wdirvfs.base
1159 # vfs rooted at .hg/. Used to access most non-store paths.
1168 # vfs rooted at .hg/. Used to access most non-store paths.
1160 self.vfs = hgvfs
1169 self.vfs = hgvfs
1161 self.path = hgvfs.base
1170 self.path = hgvfs.base
1162 self.requirements = requirements
1171 self.requirements = requirements
1163 self.supported = supportedrequirements
1172 self.supported = supportedrequirements
1164 self.sharedpath = sharedpath
1173 self.sharedpath = sharedpath
1165 self.store = store
1174 self.store = store
1166 self.cachevfs = cachevfs
1175 self.cachevfs = cachevfs
1167 self.wcachevfs = wcachevfs
1176 self.wcachevfs = wcachevfs
1168 self.features = features
1177 self.features = features
1169
1178
1170 self.filtername = None
1179 self.filtername = None
1171
1180
1172 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1181 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1173 b'devel', b'check-locks'
1182 b'devel', b'check-locks'
1174 ):
1183 ):
1175 self.vfs.audit = self._getvfsward(self.vfs.audit)
1184 self.vfs.audit = self._getvfsward(self.vfs.audit)
1176 # A list of callback to shape the phase if no data were found.
1185 # A list of callback to shape the phase if no data were found.
1177 # Callback are in the form: func(repo, roots) --> processed root.
1186 # Callback are in the form: func(repo, roots) --> processed root.
1178 # This list it to be filled by extension during repo setup
1187 # This list it to be filled by extension during repo setup
1179 self._phasedefaults = []
1188 self._phasedefaults = []
1180
1189
1181 color.setup(self.ui)
1190 color.setup(self.ui)
1182
1191
1183 self.spath = self.store.path
1192 self.spath = self.store.path
1184 self.svfs = self.store.vfs
1193 self.svfs = self.store.vfs
1185 self.sjoin = self.store.join
1194 self.sjoin = self.store.join
1186 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1195 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1187 b'devel', b'check-locks'
1196 b'devel', b'check-locks'
1188 ):
1197 ):
1189 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1198 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1190 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1199 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1191 else: # standard vfs
1200 else: # standard vfs
1192 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1201 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1193
1202
1194 self._dirstatevalidatewarned = False
1203 self._dirstatevalidatewarned = False
1195
1204
1196 self._branchcaches = branchmap.BranchMapCache()
1205 self._branchcaches = branchmap.BranchMapCache()
1197 self._revbranchcache = None
1206 self._revbranchcache = None
1198 self._filterpats = {}
1207 self._filterpats = {}
1199 self._datafilters = {}
1208 self._datafilters = {}
1200 self._transref = self._lockref = self._wlockref = None
1209 self._transref = self._lockref = self._wlockref = None
1201
1210
1202 # A cache for various files under .hg/ that tracks file changes,
1211 # A cache for various files under .hg/ that tracks file changes,
1203 # (used by the filecache decorator)
1212 # (used by the filecache decorator)
1204 #
1213 #
1205 # Maps a property name to its util.filecacheentry
1214 # Maps a property name to its util.filecacheentry
1206 self._filecache = {}
1215 self._filecache = {}
1207
1216
1208 # hold sets of revision to be filtered
1217 # hold sets of revision to be filtered
1209 # should be cleared when something might have changed the filter value:
1218 # should be cleared when something might have changed the filter value:
1210 # - new changesets,
1219 # - new changesets,
1211 # - phase change,
1220 # - phase change,
1212 # - new obsolescence marker,
1221 # - new obsolescence marker,
1213 # - working directory parent change,
1222 # - working directory parent change,
1214 # - bookmark changes
1223 # - bookmark changes
1215 self.filteredrevcache = {}
1224 self.filteredrevcache = {}
1216
1225
1217 # post-dirstate-status hooks
1226 # post-dirstate-status hooks
1218 self._postdsstatus = []
1227 self._postdsstatus = []
1219
1228
1220 # generic mapping between names and nodes
1229 # generic mapping between names and nodes
1221 self.names = namespaces.namespaces()
1230 self.names = namespaces.namespaces()
1222
1231
1223 # Key to signature value.
1232 # Key to signature value.
1224 self._sparsesignaturecache = {}
1233 self._sparsesignaturecache = {}
1225 # Signature to cached matcher instance.
1234 # Signature to cached matcher instance.
1226 self._sparsematchercache = {}
1235 self._sparsematchercache = {}
1227
1236
1228 self._extrafilterid = repoview.extrafilter(ui)
1237 self._extrafilterid = repoview.extrafilter(ui)
1229
1238
1230 self.filecopiesmode = None
1239 self.filecopiesmode = None
1231 if COPIESSDC_REQUIREMENT in self.requirements:
1240 if COPIESSDC_REQUIREMENT in self.requirements:
1232 self.filecopiesmode = b'changeset-sidedata'
1241 self.filecopiesmode = b'changeset-sidedata'
1233
1242
1234 def _getvfsward(self, origfunc):
1243 def _getvfsward(self, origfunc):
1235 """build a ward for self.vfs"""
1244 """build a ward for self.vfs"""
1236 rref = weakref.ref(self)
1245 rref = weakref.ref(self)
1237
1246
1238 def checkvfs(path, mode=None):
1247 def checkvfs(path, mode=None):
1239 ret = origfunc(path, mode=mode)
1248 ret = origfunc(path, mode=mode)
1240 repo = rref()
1249 repo = rref()
1241 if (
1250 if (
1242 repo is None
1251 repo is None
1243 or not util.safehasattr(repo, b'_wlockref')
1252 or not util.safehasattr(repo, b'_wlockref')
1244 or not util.safehasattr(repo, b'_lockref')
1253 or not util.safehasattr(repo, b'_lockref')
1245 ):
1254 ):
1246 return
1255 return
1247 if mode in (None, b'r', b'rb'):
1256 if mode in (None, b'r', b'rb'):
1248 return
1257 return
1249 if path.startswith(repo.path):
1258 if path.startswith(repo.path):
1250 # truncate name relative to the repository (.hg)
1259 # truncate name relative to the repository (.hg)
1251 path = path[len(repo.path) + 1 :]
1260 path = path[len(repo.path) + 1 :]
1252 if path.startswith(b'cache/'):
1261 if path.startswith(b'cache/'):
1253 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1262 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1254 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1263 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1255 # path prefixes covered by 'lock'
1264 # path prefixes covered by 'lock'
1256 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1265 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1257 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1266 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1258 if repo._currentlock(repo._lockref) is None:
1267 if repo._currentlock(repo._lockref) is None:
1259 repo.ui.develwarn(
1268 repo.ui.develwarn(
1260 b'write with no lock: "%s"' % path,
1269 b'write with no lock: "%s"' % path,
1261 stacklevel=3,
1270 stacklevel=3,
1262 config=b'check-locks',
1271 config=b'check-locks',
1263 )
1272 )
1264 elif repo._currentlock(repo._wlockref) is None:
1273 elif repo._currentlock(repo._wlockref) is None:
1265 # rest of vfs files are covered by 'wlock'
1274 # rest of vfs files are covered by 'wlock'
1266 #
1275 #
1267 # exclude special files
1276 # exclude special files
1268 for prefix in self._wlockfreeprefix:
1277 for prefix in self._wlockfreeprefix:
1269 if path.startswith(prefix):
1278 if path.startswith(prefix):
1270 return
1279 return
1271 repo.ui.develwarn(
1280 repo.ui.develwarn(
1272 b'write with no wlock: "%s"' % path,
1281 b'write with no wlock: "%s"' % path,
1273 stacklevel=3,
1282 stacklevel=3,
1274 config=b'check-locks',
1283 config=b'check-locks',
1275 )
1284 )
1276 return ret
1285 return ret
1277
1286
1278 return checkvfs
1287 return checkvfs
1279
1288
1280 def _getsvfsward(self, origfunc):
1289 def _getsvfsward(self, origfunc):
1281 """build a ward for self.svfs"""
1290 """build a ward for self.svfs"""
1282 rref = weakref.ref(self)
1291 rref = weakref.ref(self)
1283
1292
1284 def checksvfs(path, mode=None):
1293 def checksvfs(path, mode=None):
1285 ret = origfunc(path, mode=mode)
1294 ret = origfunc(path, mode=mode)
1286 repo = rref()
1295 repo = rref()
1287 if repo is None or not util.safehasattr(repo, b'_lockref'):
1296 if repo is None or not util.safehasattr(repo, b'_lockref'):
1288 return
1297 return
1289 if mode in (None, b'r', b'rb'):
1298 if mode in (None, b'r', b'rb'):
1290 return
1299 return
1291 if path.startswith(repo.sharedpath):
1300 if path.startswith(repo.sharedpath):
1292 # truncate name relative to the repository (.hg)
1301 # truncate name relative to the repository (.hg)
1293 path = path[len(repo.sharedpath) + 1 :]
1302 path = path[len(repo.sharedpath) + 1 :]
1294 if repo._currentlock(repo._lockref) is None:
1303 if repo._currentlock(repo._lockref) is None:
1295 repo.ui.develwarn(
1304 repo.ui.develwarn(
1296 b'write with no lock: "%s"' % path, stacklevel=4
1305 b'write with no lock: "%s"' % path, stacklevel=4
1297 )
1306 )
1298 return ret
1307 return ret
1299
1308
1300 return checksvfs
1309 return checksvfs
1301
1310
1302 def close(self):
1311 def close(self):
1303 self._writecaches()
1312 self._writecaches()
1304
1313
1305 def _writecaches(self):
1314 def _writecaches(self):
1306 if self._revbranchcache:
1315 if self._revbranchcache:
1307 self._revbranchcache.write()
1316 self._revbranchcache.write()
1308
1317
1309 def _restrictcapabilities(self, caps):
1318 def _restrictcapabilities(self, caps):
1310 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1319 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1311 caps = set(caps)
1320 caps = set(caps)
1312 capsblob = bundle2.encodecaps(
1321 capsblob = bundle2.encodecaps(
1313 bundle2.getrepocaps(self, role=b'client')
1322 bundle2.getrepocaps(self, role=b'client')
1314 )
1323 )
1315 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1324 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1316 return caps
1325 return caps
1317
1326
1318 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1327 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1319 # self -> auditor -> self._checknested -> self
1328 # self -> auditor -> self._checknested -> self
1320
1329
1321 @property
1330 @property
1322 def auditor(self):
1331 def auditor(self):
1323 # This is only used by context.workingctx.match in order to
1332 # This is only used by context.workingctx.match in order to
1324 # detect files in subrepos.
1333 # detect files in subrepos.
1325 return pathutil.pathauditor(self.root, callback=self._checknested)
1334 return pathutil.pathauditor(self.root, callback=self._checknested)
1326
1335
1327 @property
1336 @property
1328 def nofsauditor(self):
1337 def nofsauditor(self):
1329 # This is only used by context.basectx.match in order to detect
1338 # This is only used by context.basectx.match in order to detect
1330 # files in subrepos.
1339 # files in subrepos.
1331 return pathutil.pathauditor(
1340 return pathutil.pathauditor(
1332 self.root, callback=self._checknested, realfs=False, cached=True
1341 self.root, callback=self._checknested, realfs=False, cached=True
1333 )
1342 )
1334
1343
1335 def _checknested(self, path):
1344 def _checknested(self, path):
1336 """Determine if path is a legal nested repository."""
1345 """Determine if path is a legal nested repository."""
1337 if not path.startswith(self.root):
1346 if not path.startswith(self.root):
1338 return False
1347 return False
1339 subpath = path[len(self.root) + 1 :]
1348 subpath = path[len(self.root) + 1 :]
1340 normsubpath = util.pconvert(subpath)
1349 normsubpath = util.pconvert(subpath)
1341
1350
1342 # XXX: Checking against the current working copy is wrong in
1351 # XXX: Checking against the current working copy is wrong in
1343 # the sense that it can reject things like
1352 # the sense that it can reject things like
1344 #
1353 #
1345 # $ hg cat -r 10 sub/x.txt
1354 # $ hg cat -r 10 sub/x.txt
1346 #
1355 #
1347 # if sub/ is no longer a subrepository in the working copy
1356 # if sub/ is no longer a subrepository in the working copy
1348 # parent revision.
1357 # parent revision.
1349 #
1358 #
1350 # However, it can of course also allow things that would have
1359 # However, it can of course also allow things that would have
1351 # been rejected before, such as the above cat command if sub/
1360 # been rejected before, such as the above cat command if sub/
1352 # is a subrepository now, but was a normal directory before.
1361 # is a subrepository now, but was a normal directory before.
1353 # The old path auditor would have rejected by mistake since it
1362 # The old path auditor would have rejected by mistake since it
1354 # panics when it sees sub/.hg/.
1363 # panics when it sees sub/.hg/.
1355 #
1364 #
1356 # All in all, checking against the working copy seems sensible
1365 # All in all, checking against the working copy seems sensible
1357 # since we want to prevent access to nested repositories on
1366 # since we want to prevent access to nested repositories on
1358 # the filesystem *now*.
1367 # the filesystem *now*.
1359 ctx = self[None]
1368 ctx = self[None]
1360 parts = util.splitpath(subpath)
1369 parts = util.splitpath(subpath)
1361 while parts:
1370 while parts:
1362 prefix = b'/'.join(parts)
1371 prefix = b'/'.join(parts)
1363 if prefix in ctx.substate:
1372 if prefix in ctx.substate:
1364 if prefix == normsubpath:
1373 if prefix == normsubpath:
1365 return True
1374 return True
1366 else:
1375 else:
1367 sub = ctx.sub(prefix)
1376 sub = ctx.sub(prefix)
1368 return sub.checknested(subpath[len(prefix) + 1 :])
1377 return sub.checknested(subpath[len(prefix) + 1 :])
1369 else:
1378 else:
1370 parts.pop()
1379 parts.pop()
1371 return False
1380 return False
1372
1381
1373 def peer(self):
1382 def peer(self):
1374 return localpeer(self) # not cached to avoid reference cycle
1383 return localpeer(self) # not cached to avoid reference cycle
1375
1384
1376 def unfiltered(self):
1385 def unfiltered(self):
1377 """Return unfiltered version of the repository
1386 """Return unfiltered version of the repository
1378
1387
1379 Intended to be overwritten by filtered repo."""
1388 Intended to be overwritten by filtered repo."""
1380 return self
1389 return self
1381
1390
1382 def filtered(self, name, visibilityexceptions=None):
1391 def filtered(self, name, visibilityexceptions=None):
1383 """Return a filtered version of a repository
1392 """Return a filtered version of a repository
1384
1393
1385 The `name` parameter is the identifier of the requested view. This
1394 The `name` parameter is the identifier of the requested view. This
1386 will return a repoview object set "exactly" to the specified view.
1395 will return a repoview object set "exactly" to the specified view.
1387
1396
1388 This function does not apply recursive filtering to a repository. For
1397 This function does not apply recursive filtering to a repository. For
1389 example calling `repo.filtered("served")` will return a repoview using
1398 example calling `repo.filtered("served")` will return a repoview using
1390 the "served" view, regardless of the initial view used by `repo`.
1399 the "served" view, regardless of the initial view used by `repo`.
1391
1400
1392 In other word, there is always only one level of `repoview` "filtering".
1401 In other word, there is always only one level of `repoview` "filtering".
1393 """
1402 """
1394 if self._extrafilterid is not None and b'%' not in name:
1403 if self._extrafilterid is not None and b'%' not in name:
1395 name = name + b'%' + self._extrafilterid
1404 name = name + b'%' + self._extrafilterid
1396
1405
1397 cls = repoview.newtype(self.unfiltered().__class__)
1406 cls = repoview.newtype(self.unfiltered().__class__)
1398 return cls(self, name, visibilityexceptions)
1407 return cls(self, name, visibilityexceptions)
1399
1408
1400 @mixedrepostorecache(
1409 @mixedrepostorecache(
1401 (b'bookmarks', b'plain'),
1410 (b'bookmarks', b'plain'),
1402 (b'bookmarks.current', b'plain'),
1411 (b'bookmarks.current', b'plain'),
1403 (b'bookmarks', b''),
1412 (b'bookmarks', b''),
1404 (b'00changelog.i', b''),
1413 (b'00changelog.i', b''),
1405 )
1414 )
1406 def _bookmarks(self):
1415 def _bookmarks(self):
1407 # Since the multiple files involved in the transaction cannot be
1416 # Since the multiple files involved in the transaction cannot be
1408 # written atomically (with current repository format), there is a race
1417 # written atomically (with current repository format), there is a race
1409 # condition here.
1418 # condition here.
1410 #
1419 #
1411 # 1) changelog content A is read
1420 # 1) changelog content A is read
1412 # 2) outside transaction update changelog to content B
1421 # 2) outside transaction update changelog to content B
1413 # 3) outside transaction update bookmark file referring to content B
1422 # 3) outside transaction update bookmark file referring to content B
1414 # 4) bookmarks file content is read and filtered against changelog-A
1423 # 4) bookmarks file content is read and filtered against changelog-A
1415 #
1424 #
1416 # When this happens, bookmarks against nodes missing from A are dropped.
1425 # When this happens, bookmarks against nodes missing from A are dropped.
1417 #
1426 #
1418 # Having this happening during read is not great, but it become worse
1427 # Having this happening during read is not great, but it become worse
1419 # when this happen during write because the bookmarks to the "unknown"
1428 # when this happen during write because the bookmarks to the "unknown"
1420 # nodes will be dropped for good. However, writes happen within locks.
1429 # nodes will be dropped for good. However, writes happen within locks.
1421 # This locking makes it possible to have a race free consistent read.
1430 # This locking makes it possible to have a race free consistent read.
1422 # For this purpose data read from disc before locking are
1431 # For this purpose data read from disc before locking are
1423 # "invalidated" right after the locks are taken. This invalidations are
1432 # "invalidated" right after the locks are taken. This invalidations are
1424 # "light", the `filecache` mechanism keep the data in memory and will
1433 # "light", the `filecache` mechanism keep the data in memory and will
1425 # reuse them if the underlying files did not changed. Not parsing the
1434 # reuse them if the underlying files did not changed. Not parsing the
1426 # same data multiple times helps performances.
1435 # same data multiple times helps performances.
1427 #
1436 #
1428 # Unfortunately in the case describe above, the files tracked by the
1437 # Unfortunately in the case describe above, the files tracked by the
1429 # bookmarks file cache might not have changed, but the in-memory
1438 # bookmarks file cache might not have changed, but the in-memory
1430 # content is still "wrong" because we used an older changelog content
1439 # content is still "wrong" because we used an older changelog content
1431 # to process the on-disk data. So after locking, the changelog would be
1440 # to process the on-disk data. So after locking, the changelog would be
1432 # refreshed but `_bookmarks` would be preserved.
1441 # refreshed but `_bookmarks` would be preserved.
1433 # Adding `00changelog.i` to the list of tracked file is not
1442 # Adding `00changelog.i` to the list of tracked file is not
1434 # enough, because at the time we build the content for `_bookmarks` in
1443 # enough, because at the time we build the content for `_bookmarks` in
1435 # (4), the changelog file has already diverged from the content used
1444 # (4), the changelog file has already diverged from the content used
1436 # for loading `changelog` in (1)
1445 # for loading `changelog` in (1)
1437 #
1446 #
1438 # To prevent the issue, we force the changelog to be explicitly
1447 # To prevent the issue, we force the changelog to be explicitly
1439 # reloaded while computing `_bookmarks`. The data race can still happen
1448 # reloaded while computing `_bookmarks`. The data race can still happen
1440 # without the lock (with a narrower window), but it would no longer go
1449 # without the lock (with a narrower window), but it would no longer go
1441 # undetected during the lock time refresh.
1450 # undetected during the lock time refresh.
1442 #
1451 #
1443 # The new schedule is as follow
1452 # The new schedule is as follow
1444 #
1453 #
1445 # 1) filecache logic detect that `_bookmarks` needs to be computed
1454 # 1) filecache logic detect that `_bookmarks` needs to be computed
1446 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1455 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1447 # 3) We force `changelog` filecache to be tested
1456 # 3) We force `changelog` filecache to be tested
1448 # 4) cachestat for `changelog` are captured (for changelog)
1457 # 4) cachestat for `changelog` are captured (for changelog)
1449 # 5) `_bookmarks` is computed and cached
1458 # 5) `_bookmarks` is computed and cached
1450 #
1459 #
1451 # The step in (3) ensure we have a changelog at least as recent as the
1460 # The step in (3) ensure we have a changelog at least as recent as the
1452 # cache stat computed in (1). As a result at locking time:
1461 # cache stat computed in (1). As a result at locking time:
1453 # * if the changelog did not changed since (1) -> we can reuse the data
1462 # * if the changelog did not changed since (1) -> we can reuse the data
1454 # * otherwise -> the bookmarks get refreshed.
1463 # * otherwise -> the bookmarks get refreshed.
1455 self._refreshchangelog()
1464 self._refreshchangelog()
1456 return bookmarks.bmstore(self)
1465 return bookmarks.bmstore(self)
1457
1466
1458 def _refreshchangelog(self):
1467 def _refreshchangelog(self):
1459 """make sure the in memory changelog match the on-disk one"""
1468 """make sure the in memory changelog match the on-disk one"""
1460 if 'changelog' in vars(self) and self.currenttransaction() is None:
1469 if 'changelog' in vars(self) and self.currenttransaction() is None:
1461 del self.changelog
1470 del self.changelog
1462
1471
1463 @property
1472 @property
1464 def _activebookmark(self):
1473 def _activebookmark(self):
1465 return self._bookmarks.active
1474 return self._bookmarks.active
1466
1475
1467 # _phasesets depend on changelog. what we need is to call
1476 # _phasesets depend on changelog. what we need is to call
1468 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1477 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1469 # can't be easily expressed in filecache mechanism.
1478 # can't be easily expressed in filecache mechanism.
1470 @storecache(b'phaseroots', b'00changelog.i')
1479 @storecache(b'phaseroots', b'00changelog.i')
1471 def _phasecache(self):
1480 def _phasecache(self):
1472 return phases.phasecache(self, self._phasedefaults)
1481 return phases.phasecache(self, self._phasedefaults)
1473
1482
1474 @storecache(b'obsstore')
1483 @storecache(b'obsstore')
1475 def obsstore(self):
1484 def obsstore(self):
1476 return obsolete.makestore(self.ui, self)
1485 return obsolete.makestore(self.ui, self)
1477
1486
1478 @storecache(b'00changelog.i')
1487 @storecache(b'00changelog.i')
1479 def changelog(self):
1488 def changelog(self):
1480 # load dirstate before changelog to avoid race see issue6303
1489 # load dirstate before changelog to avoid race see issue6303
1481 self.dirstate.prefetch_parents()
1490 self.dirstate.prefetch_parents()
1482 return self.store.changelog(txnutil.mayhavepending(self.root))
1491 return self.store.changelog(txnutil.mayhavepending(self.root))
1483
1492
1484 @storecache(b'00manifest.i')
1493 @storecache(b'00manifest.i')
1485 def manifestlog(self):
1494 def manifestlog(self):
1486 return self.store.manifestlog(self, self._storenarrowmatch)
1495 return self.store.manifestlog(self, self._storenarrowmatch)
1487
1496
1488 @repofilecache(b'dirstate')
1497 @repofilecache(b'dirstate')
1489 def dirstate(self):
1498 def dirstate(self):
1490 return self._makedirstate()
1499 return self._makedirstate()
1491
1500
1492 def _makedirstate(self):
1501 def _makedirstate(self):
1493 """Extension point for wrapping the dirstate per-repo."""
1502 """Extension point for wrapping the dirstate per-repo."""
1494 sparsematchfn = lambda: sparse.matcher(self)
1503 sparsematchfn = lambda: sparse.matcher(self)
1495
1504
1496 return dirstate.dirstate(
1505 return dirstate.dirstate(
1497 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1506 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1498 )
1507 )
1499
1508
1500 def _dirstatevalidate(self, node):
1509 def _dirstatevalidate(self, node):
1501 try:
1510 try:
1502 self.changelog.rev(node)
1511 self.changelog.rev(node)
1503 return node
1512 return node
1504 except error.LookupError:
1513 except error.LookupError:
1505 if not self._dirstatevalidatewarned:
1514 if not self._dirstatevalidatewarned:
1506 self._dirstatevalidatewarned = True
1515 self._dirstatevalidatewarned = True
1507 self.ui.warn(
1516 self.ui.warn(
1508 _(b"warning: ignoring unknown working parent %s!\n")
1517 _(b"warning: ignoring unknown working parent %s!\n")
1509 % short(node)
1518 % short(node)
1510 )
1519 )
1511 return nullid
1520 return nullid
1512
1521
1513 @storecache(narrowspec.FILENAME)
1522 @storecache(narrowspec.FILENAME)
1514 def narrowpats(self):
1523 def narrowpats(self):
1515 """matcher patterns for this repository's narrowspec
1524 """matcher patterns for this repository's narrowspec
1516
1525
1517 A tuple of (includes, excludes).
1526 A tuple of (includes, excludes).
1518 """
1527 """
1519 return narrowspec.load(self)
1528 return narrowspec.load(self)
1520
1529
1521 @storecache(narrowspec.FILENAME)
1530 @storecache(narrowspec.FILENAME)
1522 def _storenarrowmatch(self):
1531 def _storenarrowmatch(self):
1523 if repository.NARROW_REQUIREMENT not in self.requirements:
1532 if repository.NARROW_REQUIREMENT not in self.requirements:
1524 return matchmod.always()
1533 return matchmod.always()
1525 include, exclude = self.narrowpats
1534 include, exclude = self.narrowpats
1526 return narrowspec.match(self.root, include=include, exclude=exclude)
1535 return narrowspec.match(self.root, include=include, exclude=exclude)
1527
1536
1528 @storecache(narrowspec.FILENAME)
1537 @storecache(narrowspec.FILENAME)
1529 def _narrowmatch(self):
1538 def _narrowmatch(self):
1530 if repository.NARROW_REQUIREMENT not in self.requirements:
1539 if repository.NARROW_REQUIREMENT not in self.requirements:
1531 return matchmod.always()
1540 return matchmod.always()
1532 narrowspec.checkworkingcopynarrowspec(self)
1541 narrowspec.checkworkingcopynarrowspec(self)
1533 include, exclude = self.narrowpats
1542 include, exclude = self.narrowpats
1534 return narrowspec.match(self.root, include=include, exclude=exclude)
1543 return narrowspec.match(self.root, include=include, exclude=exclude)
1535
1544
1536 def narrowmatch(self, match=None, includeexact=False):
1545 def narrowmatch(self, match=None, includeexact=False):
1537 """matcher corresponding the the repo's narrowspec
1546 """matcher corresponding the the repo's narrowspec
1538
1547
1539 If `match` is given, then that will be intersected with the narrow
1548 If `match` is given, then that will be intersected with the narrow
1540 matcher.
1549 matcher.
1541
1550
1542 If `includeexact` is True, then any exact matches from `match` will
1551 If `includeexact` is True, then any exact matches from `match` will
1543 be included even if they're outside the narrowspec.
1552 be included even if they're outside the narrowspec.
1544 """
1553 """
1545 if match:
1554 if match:
1546 if includeexact and not self._narrowmatch.always():
1555 if includeexact and not self._narrowmatch.always():
1547 # do not exclude explicitly-specified paths so that they can
1556 # do not exclude explicitly-specified paths so that they can
1548 # be warned later on
1557 # be warned later on
1549 em = matchmod.exact(match.files())
1558 em = matchmod.exact(match.files())
1550 nm = matchmod.unionmatcher([self._narrowmatch, em])
1559 nm = matchmod.unionmatcher([self._narrowmatch, em])
1551 return matchmod.intersectmatchers(match, nm)
1560 return matchmod.intersectmatchers(match, nm)
1552 return matchmod.intersectmatchers(match, self._narrowmatch)
1561 return matchmod.intersectmatchers(match, self._narrowmatch)
1553 return self._narrowmatch
1562 return self._narrowmatch
1554
1563
1555 def setnarrowpats(self, newincludes, newexcludes):
1564 def setnarrowpats(self, newincludes, newexcludes):
1556 narrowspec.save(self, newincludes, newexcludes)
1565 narrowspec.save(self, newincludes, newexcludes)
1557 self.invalidate(clearfilecache=True)
1566 self.invalidate(clearfilecache=True)
1558
1567
1559 @unfilteredpropertycache
1568 @unfilteredpropertycache
1560 def _quick_access_changeid_null(self):
1569 def _quick_access_changeid_null(self):
1561 return {
1570 return {
1562 b'null': (nullrev, nullid),
1571 b'null': (nullrev, nullid),
1563 nullrev: (nullrev, nullid),
1572 nullrev: (nullrev, nullid),
1564 nullid: (nullrev, nullid),
1573 nullid: (nullrev, nullid),
1565 }
1574 }
1566
1575
1567 @unfilteredpropertycache
1576 @unfilteredpropertycache
1568 def _quick_access_changeid_wc(self):
1577 def _quick_access_changeid_wc(self):
1569 # also fast path access to the working copy parents
1578 # also fast path access to the working copy parents
1570 # however, only do it for filter that ensure wc is visible.
1579 # however, only do it for filter that ensure wc is visible.
1571 quick = {}
1580 quick = {}
1572 cl = self.unfiltered().changelog
1581 cl = self.unfiltered().changelog
1573 for node in self.dirstate.parents():
1582 for node in self.dirstate.parents():
1574 if node == nullid:
1583 if node == nullid:
1575 continue
1584 continue
1576 rev = cl.index.get_rev(node)
1585 rev = cl.index.get_rev(node)
1577 if rev is None:
1586 if rev is None:
1578 # unknown working copy parent case:
1587 # unknown working copy parent case:
1579 #
1588 #
1580 # skip the fast path and let higher code deal with it
1589 # skip the fast path and let higher code deal with it
1581 continue
1590 continue
1582 pair = (rev, node)
1591 pair = (rev, node)
1583 quick[rev] = pair
1592 quick[rev] = pair
1584 quick[node] = pair
1593 quick[node] = pair
1585 # also add the parents of the parents
1594 # also add the parents of the parents
1586 for r in cl.parentrevs(rev):
1595 for r in cl.parentrevs(rev):
1587 if r == nullrev:
1596 if r == nullrev:
1588 continue
1597 continue
1589 n = cl.node(r)
1598 n = cl.node(r)
1590 pair = (r, n)
1599 pair = (r, n)
1591 quick[r] = pair
1600 quick[r] = pair
1592 quick[n] = pair
1601 quick[n] = pair
1593 p1node = self.dirstate.p1()
1602 p1node = self.dirstate.p1()
1594 if p1node != nullid:
1603 if p1node != nullid:
1595 quick[b'.'] = quick[p1node]
1604 quick[b'.'] = quick[p1node]
1596 return quick
1605 return quick
1597
1606
1598 @unfilteredmethod
1607 @unfilteredmethod
1599 def _quick_access_changeid_invalidate(self):
1608 def _quick_access_changeid_invalidate(self):
1600 if '_quick_access_changeid_wc' in vars(self):
1609 if '_quick_access_changeid_wc' in vars(self):
1601 del self.__dict__['_quick_access_changeid_wc']
1610 del self.__dict__['_quick_access_changeid_wc']
1602
1611
1603 @property
1612 @property
1604 def _quick_access_changeid(self):
1613 def _quick_access_changeid(self):
1605 """an helper dictionnary for __getitem__ calls
1614 """an helper dictionnary for __getitem__ calls
1606
1615
1607 This contains a list of symbol we can recognise right away without
1616 This contains a list of symbol we can recognise right away without
1608 further processing.
1617 further processing.
1609 """
1618 """
1610 mapping = self._quick_access_changeid_null
1619 mapping = self._quick_access_changeid_null
1611 if self.filtername in repoview.filter_has_wc:
1620 if self.filtername in repoview.filter_has_wc:
1612 mapping = mapping.copy()
1621 mapping = mapping.copy()
1613 mapping.update(self._quick_access_changeid_wc)
1622 mapping.update(self._quick_access_changeid_wc)
1614 return mapping
1623 return mapping
1615
1624
1616 def __getitem__(self, changeid):
1625 def __getitem__(self, changeid):
1617 # dealing with special cases
1626 # dealing with special cases
1618 if changeid is None:
1627 if changeid is None:
1619 return context.workingctx(self)
1628 return context.workingctx(self)
1620 if isinstance(changeid, context.basectx):
1629 if isinstance(changeid, context.basectx):
1621 return changeid
1630 return changeid
1622
1631
1623 # dealing with multiple revisions
1632 # dealing with multiple revisions
1624 if isinstance(changeid, slice):
1633 if isinstance(changeid, slice):
1625 # wdirrev isn't contiguous so the slice shouldn't include it
1634 # wdirrev isn't contiguous so the slice shouldn't include it
1626 return [
1635 return [
1627 self[i]
1636 self[i]
1628 for i in pycompat.xrange(*changeid.indices(len(self)))
1637 for i in pycompat.xrange(*changeid.indices(len(self)))
1629 if i not in self.changelog.filteredrevs
1638 if i not in self.changelog.filteredrevs
1630 ]
1639 ]
1631
1640
1632 # dealing with some special values
1641 # dealing with some special values
1633 quick_access = self._quick_access_changeid.get(changeid)
1642 quick_access = self._quick_access_changeid.get(changeid)
1634 if quick_access is not None:
1643 if quick_access is not None:
1635 rev, node = quick_access
1644 rev, node = quick_access
1636 return context.changectx(self, rev, node, maybe_filtered=False)
1645 return context.changectx(self, rev, node, maybe_filtered=False)
1637 if changeid == b'tip':
1646 if changeid == b'tip':
1638 node = self.changelog.tip()
1647 node = self.changelog.tip()
1639 rev = self.changelog.rev(node)
1648 rev = self.changelog.rev(node)
1640 return context.changectx(self, rev, node)
1649 return context.changectx(self, rev, node)
1641
1650
1642 # dealing with arbitrary values
1651 # dealing with arbitrary values
1643 try:
1652 try:
1644 if isinstance(changeid, int):
1653 if isinstance(changeid, int):
1645 node = self.changelog.node(changeid)
1654 node = self.changelog.node(changeid)
1646 rev = changeid
1655 rev = changeid
1647 elif changeid == b'.':
1656 elif changeid == b'.':
1648 # this is a hack to delay/avoid loading obsmarkers
1657 # this is a hack to delay/avoid loading obsmarkers
1649 # when we know that '.' won't be hidden
1658 # when we know that '.' won't be hidden
1650 node = self.dirstate.p1()
1659 node = self.dirstate.p1()
1651 rev = self.unfiltered().changelog.rev(node)
1660 rev = self.unfiltered().changelog.rev(node)
1652 elif len(changeid) == 20:
1661 elif len(changeid) == 20:
1653 try:
1662 try:
1654 node = changeid
1663 node = changeid
1655 rev = self.changelog.rev(changeid)
1664 rev = self.changelog.rev(changeid)
1656 except error.FilteredLookupError:
1665 except error.FilteredLookupError:
1657 changeid = hex(changeid) # for the error message
1666 changeid = hex(changeid) # for the error message
1658 raise
1667 raise
1659 except LookupError:
1668 except LookupError:
1660 # check if it might have come from damaged dirstate
1669 # check if it might have come from damaged dirstate
1661 #
1670 #
1662 # XXX we could avoid the unfiltered if we had a recognizable
1671 # XXX we could avoid the unfiltered if we had a recognizable
1663 # exception for filtered changeset access
1672 # exception for filtered changeset access
1664 if (
1673 if (
1665 self.local()
1674 self.local()
1666 and changeid in self.unfiltered().dirstate.parents()
1675 and changeid in self.unfiltered().dirstate.parents()
1667 ):
1676 ):
1668 msg = _(b"working directory has unknown parent '%s'!")
1677 msg = _(b"working directory has unknown parent '%s'!")
1669 raise error.Abort(msg % short(changeid))
1678 raise error.Abort(msg % short(changeid))
1670 changeid = hex(changeid) # for the error message
1679 changeid = hex(changeid) # for the error message
1671 raise
1680 raise
1672
1681
1673 elif len(changeid) == 40:
1682 elif len(changeid) == 40:
1674 node = bin(changeid)
1683 node = bin(changeid)
1675 rev = self.changelog.rev(node)
1684 rev = self.changelog.rev(node)
1676 else:
1685 else:
1677 raise error.ProgrammingError(
1686 raise error.ProgrammingError(
1678 b"unsupported changeid '%s' of type %s"
1687 b"unsupported changeid '%s' of type %s"
1679 % (changeid, pycompat.bytestr(type(changeid)))
1688 % (changeid, pycompat.bytestr(type(changeid)))
1680 )
1689 )
1681
1690
1682 return context.changectx(self, rev, node)
1691 return context.changectx(self, rev, node)
1683
1692
1684 except (error.FilteredIndexError, error.FilteredLookupError):
1693 except (error.FilteredIndexError, error.FilteredLookupError):
1685 raise error.FilteredRepoLookupError(
1694 raise error.FilteredRepoLookupError(
1686 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1695 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1687 )
1696 )
1688 except (IndexError, LookupError):
1697 except (IndexError, LookupError):
1689 raise error.RepoLookupError(
1698 raise error.RepoLookupError(
1690 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1699 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1691 )
1700 )
1692 except error.WdirUnsupported:
1701 except error.WdirUnsupported:
1693 return context.workingctx(self)
1702 return context.workingctx(self)
1694
1703
1695 def __contains__(self, changeid):
1704 def __contains__(self, changeid):
1696 """True if the given changeid exists
1705 """True if the given changeid exists
1697
1706
1698 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1707 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1699 specified.
1708 specified.
1700 """
1709 """
1701 try:
1710 try:
1702 self[changeid]
1711 self[changeid]
1703 return True
1712 return True
1704 except error.RepoLookupError:
1713 except error.RepoLookupError:
1705 return False
1714 return False
1706
1715
1707 def __nonzero__(self):
1716 def __nonzero__(self):
1708 return True
1717 return True
1709
1718
1710 __bool__ = __nonzero__
1719 __bool__ = __nonzero__
1711
1720
1712 def __len__(self):
1721 def __len__(self):
1713 # no need to pay the cost of repoview.changelog
1722 # no need to pay the cost of repoview.changelog
1714 unfi = self.unfiltered()
1723 unfi = self.unfiltered()
1715 return len(unfi.changelog)
1724 return len(unfi.changelog)
1716
1725
1717 def __iter__(self):
1726 def __iter__(self):
1718 return iter(self.changelog)
1727 return iter(self.changelog)
1719
1728
1720 def revs(self, expr, *args):
1729 def revs(self, expr, *args):
1721 '''Find revisions matching a revset.
1730 '''Find revisions matching a revset.
1722
1731
1723 The revset is specified as a string ``expr`` that may contain
1732 The revset is specified as a string ``expr`` that may contain
1724 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1733 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1725
1734
1726 Revset aliases from the configuration are not expanded. To expand
1735 Revset aliases from the configuration are not expanded. To expand
1727 user aliases, consider calling ``scmutil.revrange()`` or
1736 user aliases, consider calling ``scmutil.revrange()`` or
1728 ``repo.anyrevs([expr], user=True)``.
1737 ``repo.anyrevs([expr], user=True)``.
1729
1738
1730 Returns a smartset.abstractsmartset, which is a list-like interface
1739 Returns a smartset.abstractsmartset, which is a list-like interface
1731 that contains integer revisions.
1740 that contains integer revisions.
1732 '''
1741 '''
1733 tree = revsetlang.spectree(expr, *args)
1742 tree = revsetlang.spectree(expr, *args)
1734 return revset.makematcher(tree)(self)
1743 return revset.makematcher(tree)(self)
1735
1744
1736 def set(self, expr, *args):
1745 def set(self, expr, *args):
1737 '''Find revisions matching a revset and emit changectx instances.
1746 '''Find revisions matching a revset and emit changectx instances.
1738
1747
1739 This is a convenience wrapper around ``revs()`` that iterates the
1748 This is a convenience wrapper around ``revs()`` that iterates the
1740 result and is a generator of changectx instances.
1749 result and is a generator of changectx instances.
1741
1750
1742 Revset aliases from the configuration are not expanded. To expand
1751 Revset aliases from the configuration are not expanded. To expand
1743 user aliases, consider calling ``scmutil.revrange()``.
1752 user aliases, consider calling ``scmutil.revrange()``.
1744 '''
1753 '''
1745 for r in self.revs(expr, *args):
1754 for r in self.revs(expr, *args):
1746 yield self[r]
1755 yield self[r]
1747
1756
1748 def anyrevs(self, specs, user=False, localalias=None):
1757 def anyrevs(self, specs, user=False, localalias=None):
1749 '''Find revisions matching one of the given revsets.
1758 '''Find revisions matching one of the given revsets.
1750
1759
1751 Revset aliases from the configuration are not expanded by default. To
1760 Revset aliases from the configuration are not expanded by default. To
1752 expand user aliases, specify ``user=True``. To provide some local
1761 expand user aliases, specify ``user=True``. To provide some local
1753 definitions overriding user aliases, set ``localalias`` to
1762 definitions overriding user aliases, set ``localalias`` to
1754 ``{name: definitionstring}``.
1763 ``{name: definitionstring}``.
1755 '''
1764 '''
1756 if specs == [b'null']:
1765 if specs == [b'null']:
1757 return revset.baseset([nullrev])
1766 return revset.baseset([nullrev])
1758 if specs == [b'.']:
1767 if specs == [b'.']:
1759 quick_data = self._quick_access_changeid.get(b'.')
1768 quick_data = self._quick_access_changeid.get(b'.')
1760 if quick_data is not None:
1769 if quick_data is not None:
1761 return revset.baseset([quick_data[0]])
1770 return revset.baseset([quick_data[0]])
1762 if user:
1771 if user:
1763 m = revset.matchany(
1772 m = revset.matchany(
1764 self.ui,
1773 self.ui,
1765 specs,
1774 specs,
1766 lookup=revset.lookupfn(self),
1775 lookup=revset.lookupfn(self),
1767 localalias=localalias,
1776 localalias=localalias,
1768 )
1777 )
1769 else:
1778 else:
1770 m = revset.matchany(None, specs, localalias=localalias)
1779 m = revset.matchany(None, specs, localalias=localalias)
1771 return m(self)
1780 return m(self)
1772
1781
1773 def url(self):
1782 def url(self):
1774 return b'file:' + self.root
1783 return b'file:' + self.root
1775
1784
1776 def hook(self, name, throw=False, **args):
1785 def hook(self, name, throw=False, **args):
1777 """Call a hook, passing this repo instance.
1786 """Call a hook, passing this repo instance.
1778
1787
1779 This a convenience method to aid invoking hooks. Extensions likely
1788 This a convenience method to aid invoking hooks. Extensions likely
1780 won't call this unless they have registered a custom hook or are
1789 won't call this unless they have registered a custom hook or are
1781 replacing code that is expected to call a hook.
1790 replacing code that is expected to call a hook.
1782 """
1791 """
1783 return hook.hook(self.ui, self, name, throw, **args)
1792 return hook.hook(self.ui, self, name, throw, **args)
1784
1793
1785 @filteredpropertycache
1794 @filteredpropertycache
1786 def _tagscache(self):
1795 def _tagscache(self):
1787 '''Returns a tagscache object that contains various tags related
1796 '''Returns a tagscache object that contains various tags related
1788 caches.'''
1797 caches.'''
1789
1798
1790 # This simplifies its cache management by having one decorated
1799 # This simplifies its cache management by having one decorated
1791 # function (this one) and the rest simply fetch things from it.
1800 # function (this one) and the rest simply fetch things from it.
1792 class tagscache(object):
1801 class tagscache(object):
1793 def __init__(self):
1802 def __init__(self):
1794 # These two define the set of tags for this repository. tags
1803 # These two define the set of tags for this repository. tags
1795 # maps tag name to node; tagtypes maps tag name to 'global' or
1804 # maps tag name to node; tagtypes maps tag name to 'global' or
1796 # 'local'. (Global tags are defined by .hgtags across all
1805 # 'local'. (Global tags are defined by .hgtags across all
1797 # heads, and local tags are defined in .hg/localtags.)
1806 # heads, and local tags are defined in .hg/localtags.)
1798 # They constitute the in-memory cache of tags.
1807 # They constitute the in-memory cache of tags.
1799 self.tags = self.tagtypes = None
1808 self.tags = self.tagtypes = None
1800
1809
1801 self.nodetagscache = self.tagslist = None
1810 self.nodetagscache = self.tagslist = None
1802
1811
1803 cache = tagscache()
1812 cache = tagscache()
1804 cache.tags, cache.tagtypes = self._findtags()
1813 cache.tags, cache.tagtypes = self._findtags()
1805
1814
1806 return cache
1815 return cache
1807
1816
1808 def tags(self):
1817 def tags(self):
1809 '''return a mapping of tag to node'''
1818 '''return a mapping of tag to node'''
1810 t = {}
1819 t = {}
1811 if self.changelog.filteredrevs:
1820 if self.changelog.filteredrevs:
1812 tags, tt = self._findtags()
1821 tags, tt = self._findtags()
1813 else:
1822 else:
1814 tags = self._tagscache.tags
1823 tags = self._tagscache.tags
1815 rev = self.changelog.rev
1824 rev = self.changelog.rev
1816 for k, v in pycompat.iteritems(tags):
1825 for k, v in pycompat.iteritems(tags):
1817 try:
1826 try:
1818 # ignore tags to unknown nodes
1827 # ignore tags to unknown nodes
1819 rev(v)
1828 rev(v)
1820 t[k] = v
1829 t[k] = v
1821 except (error.LookupError, ValueError):
1830 except (error.LookupError, ValueError):
1822 pass
1831 pass
1823 return t
1832 return t
1824
1833
1825 def _findtags(self):
1834 def _findtags(self):
1826 '''Do the hard work of finding tags. Return a pair of dicts
1835 '''Do the hard work of finding tags. Return a pair of dicts
1827 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1836 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1828 maps tag name to a string like \'global\' or \'local\'.
1837 maps tag name to a string like \'global\' or \'local\'.
1829 Subclasses or extensions are free to add their own tags, but
1838 Subclasses or extensions are free to add their own tags, but
1830 should be aware that the returned dicts will be retained for the
1839 should be aware that the returned dicts will be retained for the
1831 duration of the localrepo object.'''
1840 duration of the localrepo object.'''
1832
1841
1833 # XXX what tagtype should subclasses/extensions use? Currently
1842 # XXX what tagtype should subclasses/extensions use? Currently
1834 # mq and bookmarks add tags, but do not set the tagtype at all.
1843 # mq and bookmarks add tags, but do not set the tagtype at all.
1835 # Should each extension invent its own tag type? Should there
1844 # Should each extension invent its own tag type? Should there
1836 # be one tagtype for all such "virtual" tags? Or is the status
1845 # be one tagtype for all such "virtual" tags? Or is the status
1837 # quo fine?
1846 # quo fine?
1838
1847
1839 # map tag name to (node, hist)
1848 # map tag name to (node, hist)
1840 alltags = tagsmod.findglobaltags(self.ui, self)
1849 alltags = tagsmod.findglobaltags(self.ui, self)
1841 # map tag name to tag type
1850 # map tag name to tag type
1842 tagtypes = {tag: b'global' for tag in alltags}
1851 tagtypes = {tag: b'global' for tag in alltags}
1843
1852
1844 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1853 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1845
1854
1846 # Build the return dicts. Have to re-encode tag names because
1855 # Build the return dicts. Have to re-encode tag names because
1847 # the tags module always uses UTF-8 (in order not to lose info
1856 # the tags module always uses UTF-8 (in order not to lose info
1848 # writing to the cache), but the rest of Mercurial wants them in
1857 # writing to the cache), but the rest of Mercurial wants them in
1849 # local encoding.
1858 # local encoding.
1850 tags = {}
1859 tags = {}
1851 for (name, (node, hist)) in pycompat.iteritems(alltags):
1860 for (name, (node, hist)) in pycompat.iteritems(alltags):
1852 if node != nullid:
1861 if node != nullid:
1853 tags[encoding.tolocal(name)] = node
1862 tags[encoding.tolocal(name)] = node
1854 tags[b'tip'] = self.changelog.tip()
1863 tags[b'tip'] = self.changelog.tip()
1855 tagtypes = {
1864 tagtypes = {
1856 encoding.tolocal(name): value
1865 encoding.tolocal(name): value
1857 for (name, value) in pycompat.iteritems(tagtypes)
1866 for (name, value) in pycompat.iteritems(tagtypes)
1858 }
1867 }
1859 return (tags, tagtypes)
1868 return (tags, tagtypes)
1860
1869
1861 def tagtype(self, tagname):
1870 def tagtype(self, tagname):
1862 '''
1871 '''
1863 return the type of the given tag. result can be:
1872 return the type of the given tag. result can be:
1864
1873
1865 'local' : a local tag
1874 'local' : a local tag
1866 'global' : a global tag
1875 'global' : a global tag
1867 None : tag does not exist
1876 None : tag does not exist
1868 '''
1877 '''
1869
1878
1870 return self._tagscache.tagtypes.get(tagname)
1879 return self._tagscache.tagtypes.get(tagname)
1871
1880
1872 def tagslist(self):
1881 def tagslist(self):
1873 '''return a list of tags ordered by revision'''
1882 '''return a list of tags ordered by revision'''
1874 if not self._tagscache.tagslist:
1883 if not self._tagscache.tagslist:
1875 l = []
1884 l = []
1876 for t, n in pycompat.iteritems(self.tags()):
1885 for t, n in pycompat.iteritems(self.tags()):
1877 l.append((self.changelog.rev(n), t, n))
1886 l.append((self.changelog.rev(n), t, n))
1878 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1887 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1879
1888
1880 return self._tagscache.tagslist
1889 return self._tagscache.tagslist
1881
1890
1882 def nodetags(self, node):
1891 def nodetags(self, node):
1883 '''return the tags associated with a node'''
1892 '''return the tags associated with a node'''
1884 if not self._tagscache.nodetagscache:
1893 if not self._tagscache.nodetagscache:
1885 nodetagscache = {}
1894 nodetagscache = {}
1886 for t, n in pycompat.iteritems(self._tagscache.tags):
1895 for t, n in pycompat.iteritems(self._tagscache.tags):
1887 nodetagscache.setdefault(n, []).append(t)
1896 nodetagscache.setdefault(n, []).append(t)
1888 for tags in pycompat.itervalues(nodetagscache):
1897 for tags in pycompat.itervalues(nodetagscache):
1889 tags.sort()
1898 tags.sort()
1890 self._tagscache.nodetagscache = nodetagscache
1899 self._tagscache.nodetagscache = nodetagscache
1891 return self._tagscache.nodetagscache.get(node, [])
1900 return self._tagscache.nodetagscache.get(node, [])
1892
1901
1893 def nodebookmarks(self, node):
1902 def nodebookmarks(self, node):
1894 """return the list of bookmarks pointing to the specified node"""
1903 """return the list of bookmarks pointing to the specified node"""
1895 return self._bookmarks.names(node)
1904 return self._bookmarks.names(node)
1896
1905
1897 def branchmap(self):
1906 def branchmap(self):
1898 '''returns a dictionary {branch: [branchheads]} with branchheads
1907 '''returns a dictionary {branch: [branchheads]} with branchheads
1899 ordered by increasing revision number'''
1908 ordered by increasing revision number'''
1900 return self._branchcaches[self]
1909 return self._branchcaches[self]
1901
1910
1902 @unfilteredmethod
1911 @unfilteredmethod
1903 def revbranchcache(self):
1912 def revbranchcache(self):
1904 if not self._revbranchcache:
1913 if not self._revbranchcache:
1905 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1914 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1906 return self._revbranchcache
1915 return self._revbranchcache
1907
1916
1908 def branchtip(self, branch, ignoremissing=False):
1917 def branchtip(self, branch, ignoremissing=False):
1909 '''return the tip node for a given branch
1918 '''return the tip node for a given branch
1910
1919
1911 If ignoremissing is True, then this method will not raise an error.
1920 If ignoremissing is True, then this method will not raise an error.
1912 This is helpful for callers that only expect None for a missing branch
1921 This is helpful for callers that only expect None for a missing branch
1913 (e.g. namespace).
1922 (e.g. namespace).
1914
1923
1915 '''
1924 '''
1916 try:
1925 try:
1917 return self.branchmap().branchtip(branch)
1926 return self.branchmap().branchtip(branch)
1918 except KeyError:
1927 except KeyError:
1919 if not ignoremissing:
1928 if not ignoremissing:
1920 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1929 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1921 else:
1930 else:
1922 pass
1931 pass
1923
1932
1924 def lookup(self, key):
1933 def lookup(self, key):
1925 node = scmutil.revsymbol(self, key).node()
1934 node = scmutil.revsymbol(self, key).node()
1926 if node is None:
1935 if node is None:
1927 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1936 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1928 return node
1937 return node
1929
1938
1930 def lookupbranch(self, key):
1939 def lookupbranch(self, key):
1931 if self.branchmap().hasbranch(key):
1940 if self.branchmap().hasbranch(key):
1932 return key
1941 return key
1933
1942
1934 return scmutil.revsymbol(self, key).branch()
1943 return scmutil.revsymbol(self, key).branch()
1935
1944
1936 def known(self, nodes):
1945 def known(self, nodes):
1937 cl = self.changelog
1946 cl = self.changelog
1938 get_rev = cl.index.get_rev
1947 get_rev = cl.index.get_rev
1939 filtered = cl.filteredrevs
1948 filtered = cl.filteredrevs
1940 result = []
1949 result = []
1941 for n in nodes:
1950 for n in nodes:
1942 r = get_rev(n)
1951 r = get_rev(n)
1943 resp = not (r is None or r in filtered)
1952 resp = not (r is None or r in filtered)
1944 result.append(resp)
1953 result.append(resp)
1945 return result
1954 return result
1946
1955
1947 def local(self):
1956 def local(self):
1948 return self
1957 return self
1949
1958
1950 def publishing(self):
1959 def publishing(self):
1951 # it's safe (and desirable) to trust the publish flag unconditionally
1960 # it's safe (and desirable) to trust the publish flag unconditionally
1952 # so that we don't finalize changes shared between users via ssh or nfs
1961 # so that we don't finalize changes shared between users via ssh or nfs
1953 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1962 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1954
1963
1955 def cancopy(self):
1964 def cancopy(self):
1956 # so statichttprepo's override of local() works
1965 # so statichttprepo's override of local() works
1957 if not self.local():
1966 if not self.local():
1958 return False
1967 return False
1959 if not self.publishing():
1968 if not self.publishing():
1960 return True
1969 return True
1961 # if publishing we can't copy if there is filtered content
1970 # if publishing we can't copy if there is filtered content
1962 return not self.filtered(b'visible').changelog.filteredrevs
1971 return not self.filtered(b'visible').changelog.filteredrevs
1963
1972
1964 def shared(self):
1973 def shared(self):
1965 '''the type of shared repository (None if not shared)'''
1974 '''the type of shared repository (None if not shared)'''
1966 if self.sharedpath != self.path:
1975 if self.sharedpath != self.path:
1967 return b'store'
1976 return b'store'
1968 return None
1977 return None
1969
1978
1970 def wjoin(self, f, *insidef):
1979 def wjoin(self, f, *insidef):
1971 return self.vfs.reljoin(self.root, f, *insidef)
1980 return self.vfs.reljoin(self.root, f, *insidef)
1972
1981
1973 def setparents(self, p1, p2=nullid):
1982 def setparents(self, p1, p2=nullid):
1974 self[None].setparents(p1, p2)
1983 self[None].setparents(p1, p2)
1975 self._quick_access_changeid_invalidate()
1984 self._quick_access_changeid_invalidate()
1976
1985
1977 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1986 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1978 """changeid must be a changeset revision, if specified.
1987 """changeid must be a changeset revision, if specified.
1979 fileid can be a file revision or node."""
1988 fileid can be a file revision or node."""
1980 return context.filectx(
1989 return context.filectx(
1981 self, path, changeid, fileid, changectx=changectx
1990 self, path, changeid, fileid, changectx=changectx
1982 )
1991 )
1983
1992
1984 def getcwd(self):
1993 def getcwd(self):
1985 return self.dirstate.getcwd()
1994 return self.dirstate.getcwd()
1986
1995
1987 def pathto(self, f, cwd=None):
1996 def pathto(self, f, cwd=None):
1988 return self.dirstate.pathto(f, cwd)
1997 return self.dirstate.pathto(f, cwd)
1989
1998
1990 def _loadfilter(self, filter):
1999 def _loadfilter(self, filter):
1991 if filter not in self._filterpats:
2000 if filter not in self._filterpats:
1992 l = []
2001 l = []
1993 for pat, cmd in self.ui.configitems(filter):
2002 for pat, cmd in self.ui.configitems(filter):
1994 if cmd == b'!':
2003 if cmd == b'!':
1995 continue
2004 continue
1996 mf = matchmod.match(self.root, b'', [pat])
2005 mf = matchmod.match(self.root, b'', [pat])
1997 fn = None
2006 fn = None
1998 params = cmd
2007 params = cmd
1999 for name, filterfn in pycompat.iteritems(self._datafilters):
2008 for name, filterfn in pycompat.iteritems(self._datafilters):
2000 if cmd.startswith(name):
2009 if cmd.startswith(name):
2001 fn = filterfn
2010 fn = filterfn
2002 params = cmd[len(name) :].lstrip()
2011 params = cmd[len(name) :].lstrip()
2003 break
2012 break
2004 if not fn:
2013 if not fn:
2005 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2014 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2006 fn.__name__ = 'commandfilter'
2015 fn.__name__ = 'commandfilter'
2007 # Wrap old filters not supporting keyword arguments
2016 # Wrap old filters not supporting keyword arguments
2008 if not pycompat.getargspec(fn)[2]:
2017 if not pycompat.getargspec(fn)[2]:
2009 oldfn = fn
2018 oldfn = fn
2010 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2019 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2011 fn.__name__ = 'compat-' + oldfn.__name__
2020 fn.__name__ = 'compat-' + oldfn.__name__
2012 l.append((mf, fn, params))
2021 l.append((mf, fn, params))
2013 self._filterpats[filter] = l
2022 self._filterpats[filter] = l
2014 return self._filterpats[filter]
2023 return self._filterpats[filter]
2015
2024
2016 def _filter(self, filterpats, filename, data):
2025 def _filter(self, filterpats, filename, data):
2017 for mf, fn, cmd in filterpats:
2026 for mf, fn, cmd in filterpats:
2018 if mf(filename):
2027 if mf(filename):
2019 self.ui.debug(
2028 self.ui.debug(
2020 b"filtering %s through %s\n"
2029 b"filtering %s through %s\n"
2021 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2030 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2022 )
2031 )
2023 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2032 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2024 break
2033 break
2025
2034
2026 return data
2035 return data
2027
2036
2028 @unfilteredpropertycache
2037 @unfilteredpropertycache
2029 def _encodefilterpats(self):
2038 def _encodefilterpats(self):
2030 return self._loadfilter(b'encode')
2039 return self._loadfilter(b'encode')
2031
2040
2032 @unfilteredpropertycache
2041 @unfilteredpropertycache
2033 def _decodefilterpats(self):
2042 def _decodefilterpats(self):
2034 return self._loadfilter(b'decode')
2043 return self._loadfilter(b'decode')
2035
2044
2036 def adddatafilter(self, name, filter):
2045 def adddatafilter(self, name, filter):
2037 self._datafilters[name] = filter
2046 self._datafilters[name] = filter
2038
2047
2039 def wread(self, filename):
2048 def wread(self, filename):
2040 if self.wvfs.islink(filename):
2049 if self.wvfs.islink(filename):
2041 data = self.wvfs.readlink(filename)
2050 data = self.wvfs.readlink(filename)
2042 else:
2051 else:
2043 data = self.wvfs.read(filename)
2052 data = self.wvfs.read(filename)
2044 return self._filter(self._encodefilterpats, filename, data)
2053 return self._filter(self._encodefilterpats, filename, data)
2045
2054
2046 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2055 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2047 """write ``data`` into ``filename`` in the working directory
2056 """write ``data`` into ``filename`` in the working directory
2048
2057
2049 This returns length of written (maybe decoded) data.
2058 This returns length of written (maybe decoded) data.
2050 """
2059 """
2051 data = self._filter(self._decodefilterpats, filename, data)
2060 data = self._filter(self._decodefilterpats, filename, data)
2052 if b'l' in flags:
2061 if b'l' in flags:
2053 self.wvfs.symlink(data, filename)
2062 self.wvfs.symlink(data, filename)
2054 else:
2063 else:
2055 self.wvfs.write(
2064 self.wvfs.write(
2056 filename, data, backgroundclose=backgroundclose, **kwargs
2065 filename, data, backgroundclose=backgroundclose, **kwargs
2057 )
2066 )
2058 if b'x' in flags:
2067 if b'x' in flags:
2059 self.wvfs.setflags(filename, False, True)
2068 self.wvfs.setflags(filename, False, True)
2060 else:
2069 else:
2061 self.wvfs.setflags(filename, False, False)
2070 self.wvfs.setflags(filename, False, False)
2062 return len(data)
2071 return len(data)
2063
2072
2064 def wwritedata(self, filename, data):
2073 def wwritedata(self, filename, data):
2065 return self._filter(self._decodefilterpats, filename, data)
2074 return self._filter(self._decodefilterpats, filename, data)
2066
2075
2067 def currenttransaction(self):
2076 def currenttransaction(self):
2068 """return the current transaction or None if non exists"""
2077 """return the current transaction or None if non exists"""
2069 if self._transref:
2078 if self._transref:
2070 tr = self._transref()
2079 tr = self._transref()
2071 else:
2080 else:
2072 tr = None
2081 tr = None
2073
2082
2074 if tr and tr.running():
2083 if tr and tr.running():
2075 return tr
2084 return tr
2076 return None
2085 return None
2077
2086
2078 def transaction(self, desc, report=None):
2087 def transaction(self, desc, report=None):
2079 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2088 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2080 b'devel', b'check-locks'
2089 b'devel', b'check-locks'
2081 ):
2090 ):
2082 if self._currentlock(self._lockref) is None:
2091 if self._currentlock(self._lockref) is None:
2083 raise error.ProgrammingError(b'transaction requires locking')
2092 raise error.ProgrammingError(b'transaction requires locking')
2084 tr = self.currenttransaction()
2093 tr = self.currenttransaction()
2085 if tr is not None:
2094 if tr is not None:
2086 return tr.nest(name=desc)
2095 return tr.nest(name=desc)
2087
2096
2088 # abort here if the journal already exists
2097 # abort here if the journal already exists
2089 if self.svfs.exists(b"journal"):
2098 if self.svfs.exists(b"journal"):
2090 raise error.RepoError(
2099 raise error.RepoError(
2091 _(b"abandoned transaction found"),
2100 _(b"abandoned transaction found"),
2092 hint=_(b"run 'hg recover' to clean up transaction"),
2101 hint=_(b"run 'hg recover' to clean up transaction"),
2093 )
2102 )
2094
2103
2095 idbase = b"%.40f#%f" % (random.random(), time.time())
2104 idbase = b"%.40f#%f" % (random.random(), time.time())
2096 ha = hex(hashutil.sha1(idbase).digest())
2105 ha = hex(hashutil.sha1(idbase).digest())
2097 txnid = b'TXN:' + ha
2106 txnid = b'TXN:' + ha
2098 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2107 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2099
2108
2100 self._writejournal(desc)
2109 self._writejournal(desc)
2101 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2110 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2102 if report:
2111 if report:
2103 rp = report
2112 rp = report
2104 else:
2113 else:
2105 rp = self.ui.warn
2114 rp = self.ui.warn
2106 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2115 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2107 # we must avoid cyclic reference between repo and transaction.
2116 # we must avoid cyclic reference between repo and transaction.
2108 reporef = weakref.ref(self)
2117 reporef = weakref.ref(self)
2109 # Code to track tag movement
2118 # Code to track tag movement
2110 #
2119 #
2111 # Since tags are all handled as file content, it is actually quite hard
2120 # Since tags are all handled as file content, it is actually quite hard
2112 # to track these movement from a code perspective. So we fallback to a
2121 # to track these movement from a code perspective. So we fallback to a
2113 # tracking at the repository level. One could envision to track changes
2122 # tracking at the repository level. One could envision to track changes
2114 # to the '.hgtags' file through changegroup apply but that fails to
2123 # to the '.hgtags' file through changegroup apply but that fails to
2115 # cope with case where transaction expose new heads without changegroup
2124 # cope with case where transaction expose new heads without changegroup
2116 # being involved (eg: phase movement).
2125 # being involved (eg: phase movement).
2117 #
2126 #
2118 # For now, We gate the feature behind a flag since this likely comes
2127 # For now, We gate the feature behind a flag since this likely comes
2119 # with performance impacts. The current code run more often than needed
2128 # with performance impacts. The current code run more often than needed
2120 # and do not use caches as much as it could. The current focus is on
2129 # and do not use caches as much as it could. The current focus is on
2121 # the behavior of the feature so we disable it by default. The flag
2130 # the behavior of the feature so we disable it by default. The flag
2122 # will be removed when we are happy with the performance impact.
2131 # will be removed when we are happy with the performance impact.
2123 #
2132 #
2124 # Once this feature is no longer experimental move the following
2133 # Once this feature is no longer experimental move the following
2125 # documentation to the appropriate help section:
2134 # documentation to the appropriate help section:
2126 #
2135 #
2127 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2136 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2128 # tags (new or changed or deleted tags). In addition the details of
2137 # tags (new or changed or deleted tags). In addition the details of
2129 # these changes are made available in a file at:
2138 # these changes are made available in a file at:
2130 # ``REPOROOT/.hg/changes/tags.changes``.
2139 # ``REPOROOT/.hg/changes/tags.changes``.
2131 # Make sure you check for HG_TAG_MOVED before reading that file as it
2140 # Make sure you check for HG_TAG_MOVED before reading that file as it
2132 # might exist from a previous transaction even if no tag were touched
2141 # might exist from a previous transaction even if no tag were touched
2133 # in this one. Changes are recorded in a line base format::
2142 # in this one. Changes are recorded in a line base format::
2134 #
2143 #
2135 # <action> <hex-node> <tag-name>\n
2144 # <action> <hex-node> <tag-name>\n
2136 #
2145 #
2137 # Actions are defined as follow:
2146 # Actions are defined as follow:
2138 # "-R": tag is removed,
2147 # "-R": tag is removed,
2139 # "+A": tag is added,
2148 # "+A": tag is added,
2140 # "-M": tag is moved (old value),
2149 # "-M": tag is moved (old value),
2141 # "+M": tag is moved (new value),
2150 # "+M": tag is moved (new value),
2142 tracktags = lambda x: None
2151 tracktags = lambda x: None
2143 # experimental config: experimental.hook-track-tags
2152 # experimental config: experimental.hook-track-tags
2144 shouldtracktags = self.ui.configbool(
2153 shouldtracktags = self.ui.configbool(
2145 b'experimental', b'hook-track-tags'
2154 b'experimental', b'hook-track-tags'
2146 )
2155 )
2147 if desc != b'strip' and shouldtracktags:
2156 if desc != b'strip' and shouldtracktags:
2148 oldheads = self.changelog.headrevs()
2157 oldheads = self.changelog.headrevs()
2149
2158
2150 def tracktags(tr2):
2159 def tracktags(tr2):
2151 repo = reporef()
2160 repo = reporef()
2152 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2161 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2153 newheads = repo.changelog.headrevs()
2162 newheads = repo.changelog.headrevs()
2154 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2163 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2155 # notes: we compare lists here.
2164 # notes: we compare lists here.
2156 # As we do it only once buiding set would not be cheaper
2165 # As we do it only once buiding set would not be cheaper
2157 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2166 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2158 if changes:
2167 if changes:
2159 tr2.hookargs[b'tag_moved'] = b'1'
2168 tr2.hookargs[b'tag_moved'] = b'1'
2160 with repo.vfs(
2169 with repo.vfs(
2161 b'changes/tags.changes', b'w', atomictemp=True
2170 b'changes/tags.changes', b'w', atomictemp=True
2162 ) as changesfile:
2171 ) as changesfile:
2163 # note: we do not register the file to the transaction
2172 # note: we do not register the file to the transaction
2164 # because we needs it to still exist on the transaction
2173 # because we needs it to still exist on the transaction
2165 # is close (for txnclose hooks)
2174 # is close (for txnclose hooks)
2166 tagsmod.writediff(changesfile, changes)
2175 tagsmod.writediff(changesfile, changes)
2167
2176
2168 def validate(tr2):
2177 def validate(tr2):
2169 """will run pre-closing hooks"""
2178 """will run pre-closing hooks"""
2170 # XXX the transaction API is a bit lacking here so we take a hacky
2179 # XXX the transaction API is a bit lacking here so we take a hacky
2171 # path for now
2180 # path for now
2172 #
2181 #
2173 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2182 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2174 # dict is copied before these run. In addition we needs the data
2183 # dict is copied before these run. In addition we needs the data
2175 # available to in memory hooks too.
2184 # available to in memory hooks too.
2176 #
2185 #
2177 # Moreover, we also need to make sure this runs before txnclose
2186 # Moreover, we also need to make sure this runs before txnclose
2178 # hooks and there is no "pending" mechanism that would execute
2187 # hooks and there is no "pending" mechanism that would execute
2179 # logic only if hooks are about to run.
2188 # logic only if hooks are about to run.
2180 #
2189 #
2181 # Fixing this limitation of the transaction is also needed to track
2190 # Fixing this limitation of the transaction is also needed to track
2182 # other families of changes (bookmarks, phases, obsolescence).
2191 # other families of changes (bookmarks, phases, obsolescence).
2183 #
2192 #
2184 # This will have to be fixed before we remove the experimental
2193 # This will have to be fixed before we remove the experimental
2185 # gating.
2194 # gating.
2186 tracktags(tr2)
2195 tracktags(tr2)
2187 repo = reporef()
2196 repo = reporef()
2188
2197
2189 singleheadopt = (b'experimental', b'single-head-per-branch')
2198 singleheadopt = (b'experimental', b'single-head-per-branch')
2190 singlehead = repo.ui.configbool(*singleheadopt)
2199 singlehead = repo.ui.configbool(*singleheadopt)
2191 if singlehead:
2200 if singlehead:
2192 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2201 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2193 accountclosed = singleheadsub.get(
2202 accountclosed = singleheadsub.get(
2194 b"account-closed-heads", False
2203 b"account-closed-heads", False
2195 )
2204 )
2196 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2205 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2197 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2206 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2198 for name, (old, new) in sorted(
2207 for name, (old, new) in sorted(
2199 tr.changes[b'bookmarks'].items()
2208 tr.changes[b'bookmarks'].items()
2200 ):
2209 ):
2201 args = tr.hookargs.copy()
2210 args = tr.hookargs.copy()
2202 args.update(bookmarks.preparehookargs(name, old, new))
2211 args.update(bookmarks.preparehookargs(name, old, new))
2203 repo.hook(
2212 repo.hook(
2204 b'pretxnclose-bookmark',
2213 b'pretxnclose-bookmark',
2205 throw=True,
2214 throw=True,
2206 **pycompat.strkwargs(args)
2215 **pycompat.strkwargs(args)
2207 )
2216 )
2208 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2217 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2209 cl = repo.unfiltered().changelog
2218 cl = repo.unfiltered().changelog
2210 for revs, (old, new) in tr.changes[b'phases']:
2219 for revs, (old, new) in tr.changes[b'phases']:
2211 for rev in revs:
2220 for rev in revs:
2212 args = tr.hookargs.copy()
2221 args = tr.hookargs.copy()
2213 node = hex(cl.node(rev))
2222 node = hex(cl.node(rev))
2214 args.update(phases.preparehookargs(node, old, new))
2223 args.update(phases.preparehookargs(node, old, new))
2215 repo.hook(
2224 repo.hook(
2216 b'pretxnclose-phase',
2225 b'pretxnclose-phase',
2217 throw=True,
2226 throw=True,
2218 **pycompat.strkwargs(args)
2227 **pycompat.strkwargs(args)
2219 )
2228 )
2220
2229
2221 repo.hook(
2230 repo.hook(
2222 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2231 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2223 )
2232 )
2224
2233
2225 def releasefn(tr, success):
2234 def releasefn(tr, success):
2226 repo = reporef()
2235 repo = reporef()
2227 if repo is None:
2236 if repo is None:
2228 # If the repo has been GC'd (and this release function is being
2237 # If the repo has been GC'd (and this release function is being
2229 # called from transaction.__del__), there's not much we can do,
2238 # called from transaction.__del__), there's not much we can do,
2230 # so just leave the unfinished transaction there and let the
2239 # so just leave the unfinished transaction there and let the
2231 # user run `hg recover`.
2240 # user run `hg recover`.
2232 return
2241 return
2233 if success:
2242 if success:
2234 # this should be explicitly invoked here, because
2243 # this should be explicitly invoked here, because
2235 # in-memory changes aren't written out at closing
2244 # in-memory changes aren't written out at closing
2236 # transaction, if tr.addfilegenerator (via
2245 # transaction, if tr.addfilegenerator (via
2237 # dirstate.write or so) isn't invoked while
2246 # dirstate.write or so) isn't invoked while
2238 # transaction running
2247 # transaction running
2239 repo.dirstate.write(None)
2248 repo.dirstate.write(None)
2240 else:
2249 else:
2241 # discard all changes (including ones already written
2250 # discard all changes (including ones already written
2242 # out) in this transaction
2251 # out) in this transaction
2243 narrowspec.restorebackup(self, b'journal.narrowspec')
2252 narrowspec.restorebackup(self, b'journal.narrowspec')
2244 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2253 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2245 repo.dirstate.restorebackup(None, b'journal.dirstate')
2254 repo.dirstate.restorebackup(None, b'journal.dirstate')
2246
2255
2247 repo.invalidate(clearfilecache=True)
2256 repo.invalidate(clearfilecache=True)
2248
2257
2249 tr = transaction.transaction(
2258 tr = transaction.transaction(
2250 rp,
2259 rp,
2251 self.svfs,
2260 self.svfs,
2252 vfsmap,
2261 vfsmap,
2253 b"journal",
2262 b"journal",
2254 b"undo",
2263 b"undo",
2255 aftertrans(renames),
2264 aftertrans(renames),
2256 self.store.createmode,
2265 self.store.createmode,
2257 validator=validate,
2266 validator=validate,
2258 releasefn=releasefn,
2267 releasefn=releasefn,
2259 checkambigfiles=_cachedfiles,
2268 checkambigfiles=_cachedfiles,
2260 name=desc,
2269 name=desc,
2261 )
2270 )
2262 tr.changes[b'origrepolen'] = len(self)
2271 tr.changes[b'origrepolen'] = len(self)
2263 tr.changes[b'obsmarkers'] = set()
2272 tr.changes[b'obsmarkers'] = set()
2264 tr.changes[b'phases'] = []
2273 tr.changes[b'phases'] = []
2265 tr.changes[b'bookmarks'] = {}
2274 tr.changes[b'bookmarks'] = {}
2266
2275
2267 tr.hookargs[b'txnid'] = txnid
2276 tr.hookargs[b'txnid'] = txnid
2268 tr.hookargs[b'txnname'] = desc
2277 tr.hookargs[b'txnname'] = desc
2269 tr.hookargs[b'changes'] = tr.changes
2278 tr.hookargs[b'changes'] = tr.changes
2270 # note: writing the fncache only during finalize mean that the file is
2279 # note: writing the fncache only during finalize mean that the file is
2271 # outdated when running hooks. As fncache is used for streaming clone,
2280 # outdated when running hooks. As fncache is used for streaming clone,
2272 # this is not expected to break anything that happen during the hooks.
2281 # this is not expected to break anything that happen during the hooks.
2273 tr.addfinalize(b'flush-fncache', self.store.write)
2282 tr.addfinalize(b'flush-fncache', self.store.write)
2274
2283
2275 def txnclosehook(tr2):
2284 def txnclosehook(tr2):
2276 """To be run if transaction is successful, will schedule a hook run
2285 """To be run if transaction is successful, will schedule a hook run
2277 """
2286 """
2278 # Don't reference tr2 in hook() so we don't hold a reference.
2287 # Don't reference tr2 in hook() so we don't hold a reference.
2279 # This reduces memory consumption when there are multiple
2288 # This reduces memory consumption when there are multiple
2280 # transactions per lock. This can likely go away if issue5045
2289 # transactions per lock. This can likely go away if issue5045
2281 # fixes the function accumulation.
2290 # fixes the function accumulation.
2282 hookargs = tr2.hookargs
2291 hookargs = tr2.hookargs
2283
2292
2284 def hookfunc(unused_success):
2293 def hookfunc(unused_success):
2285 repo = reporef()
2294 repo = reporef()
2286 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2295 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2287 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2296 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2288 for name, (old, new) in bmchanges:
2297 for name, (old, new) in bmchanges:
2289 args = tr.hookargs.copy()
2298 args = tr.hookargs.copy()
2290 args.update(bookmarks.preparehookargs(name, old, new))
2299 args.update(bookmarks.preparehookargs(name, old, new))
2291 repo.hook(
2300 repo.hook(
2292 b'txnclose-bookmark',
2301 b'txnclose-bookmark',
2293 throw=False,
2302 throw=False,
2294 **pycompat.strkwargs(args)
2303 **pycompat.strkwargs(args)
2295 )
2304 )
2296
2305
2297 if hook.hashook(repo.ui, b'txnclose-phase'):
2306 if hook.hashook(repo.ui, b'txnclose-phase'):
2298 cl = repo.unfiltered().changelog
2307 cl = repo.unfiltered().changelog
2299 phasemv = sorted(
2308 phasemv = sorted(
2300 tr.changes[b'phases'], key=lambda r: r[0][0]
2309 tr.changes[b'phases'], key=lambda r: r[0][0]
2301 )
2310 )
2302 for revs, (old, new) in phasemv:
2311 for revs, (old, new) in phasemv:
2303 for rev in revs:
2312 for rev in revs:
2304 args = tr.hookargs.copy()
2313 args = tr.hookargs.copy()
2305 node = hex(cl.node(rev))
2314 node = hex(cl.node(rev))
2306 args.update(phases.preparehookargs(node, old, new))
2315 args.update(phases.preparehookargs(node, old, new))
2307 repo.hook(
2316 repo.hook(
2308 b'txnclose-phase',
2317 b'txnclose-phase',
2309 throw=False,
2318 throw=False,
2310 **pycompat.strkwargs(args)
2319 **pycompat.strkwargs(args)
2311 )
2320 )
2312
2321
2313 repo.hook(
2322 repo.hook(
2314 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2323 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2315 )
2324 )
2316
2325
2317 reporef()._afterlock(hookfunc)
2326 reporef()._afterlock(hookfunc)
2318
2327
2319 tr.addfinalize(b'txnclose-hook', txnclosehook)
2328 tr.addfinalize(b'txnclose-hook', txnclosehook)
2320 # Include a leading "-" to make it happen before the transaction summary
2329 # Include a leading "-" to make it happen before the transaction summary
2321 # reports registered via scmutil.registersummarycallback() whose names
2330 # reports registered via scmutil.registersummarycallback() whose names
2322 # are 00-txnreport etc. That way, the caches will be warm when the
2331 # are 00-txnreport etc. That way, the caches will be warm when the
2323 # callbacks run.
2332 # callbacks run.
2324 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2333 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2325
2334
2326 def txnaborthook(tr2):
2335 def txnaborthook(tr2):
2327 """To be run if transaction is aborted
2336 """To be run if transaction is aborted
2328 """
2337 """
2329 reporef().hook(
2338 reporef().hook(
2330 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2339 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2331 )
2340 )
2332
2341
2333 tr.addabort(b'txnabort-hook', txnaborthook)
2342 tr.addabort(b'txnabort-hook', txnaborthook)
2334 # avoid eager cache invalidation. in-memory data should be identical
2343 # avoid eager cache invalidation. in-memory data should be identical
2335 # to stored data if transaction has no error.
2344 # to stored data if transaction has no error.
2336 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2345 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2337 self._transref = weakref.ref(tr)
2346 self._transref = weakref.ref(tr)
2338 scmutil.registersummarycallback(self, tr, desc)
2347 scmutil.registersummarycallback(self, tr, desc)
2339 return tr
2348 return tr
2340
2349
2341 def _journalfiles(self):
2350 def _journalfiles(self):
2342 return (
2351 return (
2343 (self.svfs, b'journal'),
2352 (self.svfs, b'journal'),
2344 (self.svfs, b'journal.narrowspec'),
2353 (self.svfs, b'journal.narrowspec'),
2345 (self.vfs, b'journal.narrowspec.dirstate'),
2354 (self.vfs, b'journal.narrowspec.dirstate'),
2346 (self.vfs, b'journal.dirstate'),
2355 (self.vfs, b'journal.dirstate'),
2347 (self.vfs, b'journal.branch'),
2356 (self.vfs, b'journal.branch'),
2348 (self.vfs, b'journal.desc'),
2357 (self.vfs, b'journal.desc'),
2349 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2358 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2350 (self.svfs, b'journal.phaseroots'),
2359 (self.svfs, b'journal.phaseroots'),
2351 )
2360 )
2352
2361
2353 def undofiles(self):
2362 def undofiles(self):
2354 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2363 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2355
2364
2356 @unfilteredmethod
2365 @unfilteredmethod
2357 def _writejournal(self, desc):
2366 def _writejournal(self, desc):
2358 self.dirstate.savebackup(None, b'journal.dirstate')
2367 self.dirstate.savebackup(None, b'journal.dirstate')
2359 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2368 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2360 narrowspec.savebackup(self, b'journal.narrowspec')
2369 narrowspec.savebackup(self, b'journal.narrowspec')
2361 self.vfs.write(
2370 self.vfs.write(
2362 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2371 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2363 )
2372 )
2364 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2373 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2365 bookmarksvfs = bookmarks.bookmarksvfs(self)
2374 bookmarksvfs = bookmarks.bookmarksvfs(self)
2366 bookmarksvfs.write(
2375 bookmarksvfs.write(
2367 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2376 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2368 )
2377 )
2369 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2378 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2370
2379
2371 def recover(self):
2380 def recover(self):
2372 with self.lock():
2381 with self.lock():
2373 if self.svfs.exists(b"journal"):
2382 if self.svfs.exists(b"journal"):
2374 self.ui.status(_(b"rolling back interrupted transaction\n"))
2383 self.ui.status(_(b"rolling back interrupted transaction\n"))
2375 vfsmap = {
2384 vfsmap = {
2376 b'': self.svfs,
2385 b'': self.svfs,
2377 b'plain': self.vfs,
2386 b'plain': self.vfs,
2378 }
2387 }
2379 transaction.rollback(
2388 transaction.rollback(
2380 self.svfs,
2389 self.svfs,
2381 vfsmap,
2390 vfsmap,
2382 b"journal",
2391 b"journal",
2383 self.ui.warn,
2392 self.ui.warn,
2384 checkambigfiles=_cachedfiles,
2393 checkambigfiles=_cachedfiles,
2385 )
2394 )
2386 self.invalidate()
2395 self.invalidate()
2387 return True
2396 return True
2388 else:
2397 else:
2389 self.ui.warn(_(b"no interrupted transaction available\n"))
2398 self.ui.warn(_(b"no interrupted transaction available\n"))
2390 return False
2399 return False
2391
2400
2392 def rollback(self, dryrun=False, force=False):
2401 def rollback(self, dryrun=False, force=False):
2393 wlock = lock = dsguard = None
2402 wlock = lock = dsguard = None
2394 try:
2403 try:
2395 wlock = self.wlock()
2404 wlock = self.wlock()
2396 lock = self.lock()
2405 lock = self.lock()
2397 if self.svfs.exists(b"undo"):
2406 if self.svfs.exists(b"undo"):
2398 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2407 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2399
2408
2400 return self._rollback(dryrun, force, dsguard)
2409 return self._rollback(dryrun, force, dsguard)
2401 else:
2410 else:
2402 self.ui.warn(_(b"no rollback information available\n"))
2411 self.ui.warn(_(b"no rollback information available\n"))
2403 return 1
2412 return 1
2404 finally:
2413 finally:
2405 release(dsguard, lock, wlock)
2414 release(dsguard, lock, wlock)
2406
2415
2407 @unfilteredmethod # Until we get smarter cache management
2416 @unfilteredmethod # Until we get smarter cache management
2408 def _rollback(self, dryrun, force, dsguard):
2417 def _rollback(self, dryrun, force, dsguard):
2409 ui = self.ui
2418 ui = self.ui
2410 try:
2419 try:
2411 args = self.vfs.read(b'undo.desc').splitlines()
2420 args = self.vfs.read(b'undo.desc').splitlines()
2412 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2421 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2413 if len(args) >= 3:
2422 if len(args) >= 3:
2414 detail = args[2]
2423 detail = args[2]
2415 oldtip = oldlen - 1
2424 oldtip = oldlen - 1
2416
2425
2417 if detail and ui.verbose:
2426 if detail and ui.verbose:
2418 msg = _(
2427 msg = _(
2419 b'repository tip rolled back to revision %d'
2428 b'repository tip rolled back to revision %d'
2420 b' (undo %s: %s)\n'
2429 b' (undo %s: %s)\n'
2421 ) % (oldtip, desc, detail)
2430 ) % (oldtip, desc, detail)
2422 else:
2431 else:
2423 msg = _(
2432 msg = _(
2424 b'repository tip rolled back to revision %d (undo %s)\n'
2433 b'repository tip rolled back to revision %d (undo %s)\n'
2425 ) % (oldtip, desc)
2434 ) % (oldtip, desc)
2426 except IOError:
2435 except IOError:
2427 msg = _(b'rolling back unknown transaction\n')
2436 msg = _(b'rolling back unknown transaction\n')
2428 desc = None
2437 desc = None
2429
2438
2430 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2439 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2431 raise error.Abort(
2440 raise error.Abort(
2432 _(
2441 _(
2433 b'rollback of last commit while not checked out '
2442 b'rollback of last commit while not checked out '
2434 b'may lose data'
2443 b'may lose data'
2435 ),
2444 ),
2436 hint=_(b'use -f to force'),
2445 hint=_(b'use -f to force'),
2437 )
2446 )
2438
2447
2439 ui.status(msg)
2448 ui.status(msg)
2440 if dryrun:
2449 if dryrun:
2441 return 0
2450 return 0
2442
2451
2443 parents = self.dirstate.parents()
2452 parents = self.dirstate.parents()
2444 self.destroying()
2453 self.destroying()
2445 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2454 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2446 transaction.rollback(
2455 transaction.rollback(
2447 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2456 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2448 )
2457 )
2449 bookmarksvfs = bookmarks.bookmarksvfs(self)
2458 bookmarksvfs = bookmarks.bookmarksvfs(self)
2450 if bookmarksvfs.exists(b'undo.bookmarks'):
2459 if bookmarksvfs.exists(b'undo.bookmarks'):
2451 bookmarksvfs.rename(
2460 bookmarksvfs.rename(
2452 b'undo.bookmarks', b'bookmarks', checkambig=True
2461 b'undo.bookmarks', b'bookmarks', checkambig=True
2453 )
2462 )
2454 if self.svfs.exists(b'undo.phaseroots'):
2463 if self.svfs.exists(b'undo.phaseroots'):
2455 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2464 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2456 self.invalidate()
2465 self.invalidate()
2457
2466
2458 has_node = self.changelog.index.has_node
2467 has_node = self.changelog.index.has_node
2459 parentgone = any(not has_node(p) for p in parents)
2468 parentgone = any(not has_node(p) for p in parents)
2460 if parentgone:
2469 if parentgone:
2461 # prevent dirstateguard from overwriting already restored one
2470 # prevent dirstateguard from overwriting already restored one
2462 dsguard.close()
2471 dsguard.close()
2463
2472
2464 narrowspec.restorebackup(self, b'undo.narrowspec')
2473 narrowspec.restorebackup(self, b'undo.narrowspec')
2465 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2474 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2466 self.dirstate.restorebackup(None, b'undo.dirstate')
2475 self.dirstate.restorebackup(None, b'undo.dirstate')
2467 try:
2476 try:
2468 branch = self.vfs.read(b'undo.branch')
2477 branch = self.vfs.read(b'undo.branch')
2469 self.dirstate.setbranch(encoding.tolocal(branch))
2478 self.dirstate.setbranch(encoding.tolocal(branch))
2470 except IOError:
2479 except IOError:
2471 ui.warn(
2480 ui.warn(
2472 _(
2481 _(
2473 b'named branch could not be reset: '
2482 b'named branch could not be reset: '
2474 b'current branch is still \'%s\'\n'
2483 b'current branch is still \'%s\'\n'
2475 )
2484 )
2476 % self.dirstate.branch()
2485 % self.dirstate.branch()
2477 )
2486 )
2478
2487
2479 parents = tuple([p.rev() for p in self[None].parents()])
2488 parents = tuple([p.rev() for p in self[None].parents()])
2480 if len(parents) > 1:
2489 if len(parents) > 1:
2481 ui.status(
2490 ui.status(
2482 _(
2491 _(
2483 b'working directory now based on '
2492 b'working directory now based on '
2484 b'revisions %d and %d\n'
2493 b'revisions %d and %d\n'
2485 )
2494 )
2486 % parents
2495 % parents
2487 )
2496 )
2488 else:
2497 else:
2489 ui.status(
2498 ui.status(
2490 _(b'working directory now based on revision %d\n') % parents
2499 _(b'working directory now based on revision %d\n') % parents
2491 )
2500 )
2492 mergestatemod.mergestate.clean(self, self[b'.'].node())
2501 mergestatemod.mergestate.clean(self, self[b'.'].node())
2493
2502
2494 # TODO: if we know which new heads may result from this rollback, pass
2503 # TODO: if we know which new heads may result from this rollback, pass
2495 # them to destroy(), which will prevent the branchhead cache from being
2504 # them to destroy(), which will prevent the branchhead cache from being
2496 # invalidated.
2505 # invalidated.
2497 self.destroyed()
2506 self.destroyed()
2498 return 0
2507 return 0
2499
2508
2500 def _buildcacheupdater(self, newtransaction):
2509 def _buildcacheupdater(self, newtransaction):
2501 """called during transaction to build the callback updating cache
2510 """called during transaction to build the callback updating cache
2502
2511
2503 Lives on the repository to help extension who might want to augment
2512 Lives on the repository to help extension who might want to augment
2504 this logic. For this purpose, the created transaction is passed to the
2513 this logic. For this purpose, the created transaction is passed to the
2505 method.
2514 method.
2506 """
2515 """
2507 # we must avoid cyclic reference between repo and transaction.
2516 # we must avoid cyclic reference between repo and transaction.
2508 reporef = weakref.ref(self)
2517 reporef = weakref.ref(self)
2509
2518
2510 def updater(tr):
2519 def updater(tr):
2511 repo = reporef()
2520 repo = reporef()
2512 repo.updatecaches(tr)
2521 repo.updatecaches(tr)
2513
2522
2514 return updater
2523 return updater
2515
2524
2516 @unfilteredmethod
2525 @unfilteredmethod
2517 def updatecaches(self, tr=None, full=False):
2526 def updatecaches(self, tr=None, full=False):
2518 """warm appropriate caches
2527 """warm appropriate caches
2519
2528
2520 If this function is called after a transaction closed. The transaction
2529 If this function is called after a transaction closed. The transaction
2521 will be available in the 'tr' argument. This can be used to selectively
2530 will be available in the 'tr' argument. This can be used to selectively
2522 update caches relevant to the changes in that transaction.
2531 update caches relevant to the changes in that transaction.
2523
2532
2524 If 'full' is set, make sure all caches the function knows about have
2533 If 'full' is set, make sure all caches the function knows about have
2525 up-to-date data. Even the ones usually loaded more lazily.
2534 up-to-date data. Even the ones usually loaded more lazily.
2526 """
2535 """
2527 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2536 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2528 # During strip, many caches are invalid but
2537 # During strip, many caches are invalid but
2529 # later call to `destroyed` will refresh them.
2538 # later call to `destroyed` will refresh them.
2530 return
2539 return
2531
2540
2532 if tr is None or tr.changes[b'origrepolen'] < len(self):
2541 if tr is None or tr.changes[b'origrepolen'] < len(self):
2533 # accessing the 'ser ved' branchmap should refresh all the others,
2542 # accessing the 'ser ved' branchmap should refresh all the others,
2534 self.ui.debug(b'updating the branch cache\n')
2543 self.ui.debug(b'updating the branch cache\n')
2535 self.filtered(b'served').branchmap()
2544 self.filtered(b'served').branchmap()
2536 self.filtered(b'served.hidden').branchmap()
2545 self.filtered(b'served.hidden').branchmap()
2537
2546
2538 if full:
2547 if full:
2539 unfi = self.unfiltered()
2548 unfi = self.unfiltered()
2540
2549
2541 self.changelog.update_caches(transaction=tr)
2550 self.changelog.update_caches(transaction=tr)
2542 self.manifestlog.update_caches(transaction=tr)
2551 self.manifestlog.update_caches(transaction=tr)
2543
2552
2544 rbc = unfi.revbranchcache()
2553 rbc = unfi.revbranchcache()
2545 for r in unfi.changelog:
2554 for r in unfi.changelog:
2546 rbc.branchinfo(r)
2555 rbc.branchinfo(r)
2547 rbc.write()
2556 rbc.write()
2548
2557
2549 # ensure the working copy parents are in the manifestfulltextcache
2558 # ensure the working copy parents are in the manifestfulltextcache
2550 for ctx in self[b'.'].parents():
2559 for ctx in self[b'.'].parents():
2551 ctx.manifest() # accessing the manifest is enough
2560 ctx.manifest() # accessing the manifest is enough
2552
2561
2553 # accessing fnode cache warms the cache
2562 # accessing fnode cache warms the cache
2554 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2563 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2555 # accessing tags warm the cache
2564 # accessing tags warm the cache
2556 self.tags()
2565 self.tags()
2557 self.filtered(b'served').tags()
2566 self.filtered(b'served').tags()
2558
2567
2559 # The `full` arg is documented as updating even the lazily-loaded
2568 # The `full` arg is documented as updating even the lazily-loaded
2560 # caches immediately, so we're forcing a write to cause these caches
2569 # caches immediately, so we're forcing a write to cause these caches
2561 # to be warmed up even if they haven't explicitly been requested
2570 # to be warmed up even if they haven't explicitly been requested
2562 # yet (if they've never been used by hg, they won't ever have been
2571 # yet (if they've never been used by hg, they won't ever have been
2563 # written, even if they're a subset of another kind of cache that
2572 # written, even if they're a subset of another kind of cache that
2564 # *has* been used).
2573 # *has* been used).
2565 for filt in repoview.filtertable.keys():
2574 for filt in repoview.filtertable.keys():
2566 filtered = self.filtered(filt)
2575 filtered = self.filtered(filt)
2567 filtered.branchmap().write(filtered)
2576 filtered.branchmap().write(filtered)
2568
2577
2569 def invalidatecaches(self):
2578 def invalidatecaches(self):
2570
2579
2571 if '_tagscache' in vars(self):
2580 if '_tagscache' in vars(self):
2572 # can't use delattr on proxy
2581 # can't use delattr on proxy
2573 del self.__dict__['_tagscache']
2582 del self.__dict__['_tagscache']
2574
2583
2575 self._branchcaches.clear()
2584 self._branchcaches.clear()
2576 self.invalidatevolatilesets()
2585 self.invalidatevolatilesets()
2577 self._sparsesignaturecache.clear()
2586 self._sparsesignaturecache.clear()
2578
2587
2579 def invalidatevolatilesets(self):
2588 def invalidatevolatilesets(self):
2580 self.filteredrevcache.clear()
2589 self.filteredrevcache.clear()
2581 obsolete.clearobscaches(self)
2590 obsolete.clearobscaches(self)
2582 self._quick_access_changeid_invalidate()
2591 self._quick_access_changeid_invalidate()
2583
2592
2584 def invalidatedirstate(self):
2593 def invalidatedirstate(self):
2585 '''Invalidates the dirstate, causing the next call to dirstate
2594 '''Invalidates the dirstate, causing the next call to dirstate
2586 to check if it was modified since the last time it was read,
2595 to check if it was modified since the last time it was read,
2587 rereading it if it has.
2596 rereading it if it has.
2588
2597
2589 This is different to dirstate.invalidate() that it doesn't always
2598 This is different to dirstate.invalidate() that it doesn't always
2590 rereads the dirstate. Use dirstate.invalidate() if you want to
2599 rereads the dirstate. Use dirstate.invalidate() if you want to
2591 explicitly read the dirstate again (i.e. restoring it to a previous
2600 explicitly read the dirstate again (i.e. restoring it to a previous
2592 known good state).'''
2601 known good state).'''
2593 if hasunfilteredcache(self, 'dirstate'):
2602 if hasunfilteredcache(self, 'dirstate'):
2594 for k in self.dirstate._filecache:
2603 for k in self.dirstate._filecache:
2595 try:
2604 try:
2596 delattr(self.dirstate, k)
2605 delattr(self.dirstate, k)
2597 except AttributeError:
2606 except AttributeError:
2598 pass
2607 pass
2599 delattr(self.unfiltered(), 'dirstate')
2608 delattr(self.unfiltered(), 'dirstate')
2600
2609
2601 def invalidate(self, clearfilecache=False):
2610 def invalidate(self, clearfilecache=False):
2602 '''Invalidates both store and non-store parts other than dirstate
2611 '''Invalidates both store and non-store parts other than dirstate
2603
2612
2604 If a transaction is running, invalidation of store is omitted,
2613 If a transaction is running, invalidation of store is omitted,
2605 because discarding in-memory changes might cause inconsistency
2614 because discarding in-memory changes might cause inconsistency
2606 (e.g. incomplete fncache causes unintentional failure, but
2615 (e.g. incomplete fncache causes unintentional failure, but
2607 redundant one doesn't).
2616 redundant one doesn't).
2608 '''
2617 '''
2609 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2618 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2610 for k in list(self._filecache.keys()):
2619 for k in list(self._filecache.keys()):
2611 # dirstate is invalidated separately in invalidatedirstate()
2620 # dirstate is invalidated separately in invalidatedirstate()
2612 if k == b'dirstate':
2621 if k == b'dirstate':
2613 continue
2622 continue
2614 if (
2623 if (
2615 k == b'changelog'
2624 k == b'changelog'
2616 and self.currenttransaction()
2625 and self.currenttransaction()
2617 and self.changelog._delayed
2626 and self.changelog._delayed
2618 ):
2627 ):
2619 # The changelog object may store unwritten revisions. We don't
2628 # The changelog object may store unwritten revisions. We don't
2620 # want to lose them.
2629 # want to lose them.
2621 # TODO: Solve the problem instead of working around it.
2630 # TODO: Solve the problem instead of working around it.
2622 continue
2631 continue
2623
2632
2624 if clearfilecache:
2633 if clearfilecache:
2625 del self._filecache[k]
2634 del self._filecache[k]
2626 try:
2635 try:
2627 delattr(unfiltered, k)
2636 delattr(unfiltered, k)
2628 except AttributeError:
2637 except AttributeError:
2629 pass
2638 pass
2630 self.invalidatecaches()
2639 self.invalidatecaches()
2631 if not self.currenttransaction():
2640 if not self.currenttransaction():
2632 # TODO: Changing contents of store outside transaction
2641 # TODO: Changing contents of store outside transaction
2633 # causes inconsistency. We should make in-memory store
2642 # causes inconsistency. We should make in-memory store
2634 # changes detectable, and abort if changed.
2643 # changes detectable, and abort if changed.
2635 self.store.invalidatecaches()
2644 self.store.invalidatecaches()
2636
2645
2637 def invalidateall(self):
2646 def invalidateall(self):
2638 '''Fully invalidates both store and non-store parts, causing the
2647 '''Fully invalidates both store and non-store parts, causing the
2639 subsequent operation to reread any outside changes.'''
2648 subsequent operation to reread any outside changes.'''
2640 # extension should hook this to invalidate its caches
2649 # extension should hook this to invalidate its caches
2641 self.invalidate()
2650 self.invalidate()
2642 self.invalidatedirstate()
2651 self.invalidatedirstate()
2643
2652
2644 @unfilteredmethod
2653 @unfilteredmethod
2645 def _refreshfilecachestats(self, tr):
2654 def _refreshfilecachestats(self, tr):
2646 """Reload stats of cached files so that they are flagged as valid"""
2655 """Reload stats of cached files so that they are flagged as valid"""
2647 for k, ce in self._filecache.items():
2656 for k, ce in self._filecache.items():
2648 k = pycompat.sysstr(k)
2657 k = pycompat.sysstr(k)
2649 if k == 'dirstate' or k not in self.__dict__:
2658 if k == 'dirstate' or k not in self.__dict__:
2650 continue
2659 continue
2651 ce.refresh()
2660 ce.refresh()
2652
2661
2653 def _lock(
2662 def _lock(
2654 self,
2663 self,
2655 vfs,
2664 vfs,
2656 lockname,
2665 lockname,
2657 wait,
2666 wait,
2658 releasefn,
2667 releasefn,
2659 acquirefn,
2668 acquirefn,
2660 desc,
2669 desc,
2661 inheritchecker=None,
2670 inheritchecker=None,
2662 parentenvvar=None,
2671 parentenvvar=None,
2663 ):
2672 ):
2664 parentlock = None
2673 parentlock = None
2665 # the contents of parentenvvar are used by the underlying lock to
2674 # the contents of parentenvvar are used by the underlying lock to
2666 # determine whether it can be inherited
2675 # determine whether it can be inherited
2667 if parentenvvar is not None:
2676 if parentenvvar is not None:
2668 parentlock = encoding.environ.get(parentenvvar)
2677 parentlock = encoding.environ.get(parentenvvar)
2669
2678
2670 timeout = 0
2679 timeout = 0
2671 warntimeout = 0
2680 warntimeout = 0
2672 if wait:
2681 if wait:
2673 timeout = self.ui.configint(b"ui", b"timeout")
2682 timeout = self.ui.configint(b"ui", b"timeout")
2674 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2683 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2675 # internal config: ui.signal-safe-lock
2684 # internal config: ui.signal-safe-lock
2676 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2685 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2677
2686
2678 l = lockmod.trylock(
2687 l = lockmod.trylock(
2679 self.ui,
2688 self.ui,
2680 vfs,
2689 vfs,
2681 lockname,
2690 lockname,
2682 timeout,
2691 timeout,
2683 warntimeout,
2692 warntimeout,
2684 releasefn=releasefn,
2693 releasefn=releasefn,
2685 acquirefn=acquirefn,
2694 acquirefn=acquirefn,
2686 desc=desc,
2695 desc=desc,
2687 inheritchecker=inheritchecker,
2696 inheritchecker=inheritchecker,
2688 parentlock=parentlock,
2697 parentlock=parentlock,
2689 signalsafe=signalsafe,
2698 signalsafe=signalsafe,
2690 )
2699 )
2691 return l
2700 return l
2692
2701
2693 def _afterlock(self, callback):
2702 def _afterlock(self, callback):
2694 """add a callback to be run when the repository is fully unlocked
2703 """add a callback to be run when the repository is fully unlocked
2695
2704
2696 The callback will be executed when the outermost lock is released
2705 The callback will be executed when the outermost lock is released
2697 (with wlock being higher level than 'lock')."""
2706 (with wlock being higher level than 'lock')."""
2698 for ref in (self._wlockref, self._lockref):
2707 for ref in (self._wlockref, self._lockref):
2699 l = ref and ref()
2708 l = ref and ref()
2700 if l and l.held:
2709 if l and l.held:
2701 l.postrelease.append(callback)
2710 l.postrelease.append(callback)
2702 break
2711 break
2703 else: # no lock have been found.
2712 else: # no lock have been found.
2704 callback(True)
2713 callback(True)
2705
2714
2706 def lock(self, wait=True):
2715 def lock(self, wait=True):
2707 '''Lock the repository store (.hg/store) and return a weak reference
2716 '''Lock the repository store (.hg/store) and return a weak reference
2708 to the lock. Use this before modifying the store (e.g. committing or
2717 to the lock. Use this before modifying the store (e.g. committing or
2709 stripping). If you are opening a transaction, get a lock as well.)
2718 stripping). If you are opening a transaction, get a lock as well.)
2710
2719
2711 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2720 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2712 'wlock' first to avoid a dead-lock hazard.'''
2721 'wlock' first to avoid a dead-lock hazard.'''
2713 l = self._currentlock(self._lockref)
2722 l = self._currentlock(self._lockref)
2714 if l is not None:
2723 if l is not None:
2715 l.lock()
2724 l.lock()
2716 return l
2725 return l
2717
2726
2718 l = self._lock(
2727 l = self._lock(
2719 vfs=self.svfs,
2728 vfs=self.svfs,
2720 lockname=b"lock",
2729 lockname=b"lock",
2721 wait=wait,
2730 wait=wait,
2722 releasefn=None,
2731 releasefn=None,
2723 acquirefn=self.invalidate,
2732 acquirefn=self.invalidate,
2724 desc=_(b'repository %s') % self.origroot,
2733 desc=_(b'repository %s') % self.origroot,
2725 )
2734 )
2726 self._lockref = weakref.ref(l)
2735 self._lockref = weakref.ref(l)
2727 return l
2736 return l
2728
2737
2729 def _wlockchecktransaction(self):
2738 def _wlockchecktransaction(self):
2730 if self.currenttransaction() is not None:
2739 if self.currenttransaction() is not None:
2731 raise error.LockInheritanceContractViolation(
2740 raise error.LockInheritanceContractViolation(
2732 b'wlock cannot be inherited in the middle of a transaction'
2741 b'wlock cannot be inherited in the middle of a transaction'
2733 )
2742 )
2734
2743
2735 def wlock(self, wait=True):
2744 def wlock(self, wait=True):
2736 '''Lock the non-store parts of the repository (everything under
2745 '''Lock the non-store parts of the repository (everything under
2737 .hg except .hg/store) and return a weak reference to the lock.
2746 .hg except .hg/store) and return a weak reference to the lock.
2738
2747
2739 Use this before modifying files in .hg.
2748 Use this before modifying files in .hg.
2740
2749
2741 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2750 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2742 'wlock' first to avoid a dead-lock hazard.'''
2751 'wlock' first to avoid a dead-lock hazard.'''
2743 l = self._wlockref and self._wlockref()
2752 l = self._wlockref and self._wlockref()
2744 if l is not None and l.held:
2753 if l is not None and l.held:
2745 l.lock()
2754 l.lock()
2746 return l
2755 return l
2747
2756
2748 # We do not need to check for non-waiting lock acquisition. Such
2757 # We do not need to check for non-waiting lock acquisition. Such
2749 # acquisition would not cause dead-lock as they would just fail.
2758 # acquisition would not cause dead-lock as they would just fail.
2750 if wait and (
2759 if wait and (
2751 self.ui.configbool(b'devel', b'all-warnings')
2760 self.ui.configbool(b'devel', b'all-warnings')
2752 or self.ui.configbool(b'devel', b'check-locks')
2761 or self.ui.configbool(b'devel', b'check-locks')
2753 ):
2762 ):
2754 if self._currentlock(self._lockref) is not None:
2763 if self._currentlock(self._lockref) is not None:
2755 self.ui.develwarn(b'"wlock" acquired after "lock"')
2764 self.ui.develwarn(b'"wlock" acquired after "lock"')
2756
2765
2757 def unlock():
2766 def unlock():
2758 if self.dirstate.pendingparentchange():
2767 if self.dirstate.pendingparentchange():
2759 self.dirstate.invalidate()
2768 self.dirstate.invalidate()
2760 else:
2769 else:
2761 self.dirstate.write(None)
2770 self.dirstate.write(None)
2762
2771
2763 self._filecache[b'dirstate'].refresh()
2772 self._filecache[b'dirstate'].refresh()
2764
2773
2765 l = self._lock(
2774 l = self._lock(
2766 self.vfs,
2775 self.vfs,
2767 b"wlock",
2776 b"wlock",
2768 wait,
2777 wait,
2769 unlock,
2778 unlock,
2770 self.invalidatedirstate,
2779 self.invalidatedirstate,
2771 _(b'working directory of %s') % self.origroot,
2780 _(b'working directory of %s') % self.origroot,
2772 inheritchecker=self._wlockchecktransaction,
2781 inheritchecker=self._wlockchecktransaction,
2773 parentenvvar=b'HG_WLOCK_LOCKER',
2782 parentenvvar=b'HG_WLOCK_LOCKER',
2774 )
2783 )
2775 self._wlockref = weakref.ref(l)
2784 self._wlockref = weakref.ref(l)
2776 return l
2785 return l
2777
2786
2778 def _currentlock(self, lockref):
2787 def _currentlock(self, lockref):
2779 """Returns the lock if it's held, or None if it's not."""
2788 """Returns the lock if it's held, or None if it's not."""
2780 if lockref is None:
2789 if lockref is None:
2781 return None
2790 return None
2782 l = lockref()
2791 l = lockref()
2783 if l is None or not l.held:
2792 if l is None or not l.held:
2784 return None
2793 return None
2785 return l
2794 return l
2786
2795
2787 def currentwlock(self):
2796 def currentwlock(self):
2788 """Returns the wlock if it's held, or None if it's not."""
2797 """Returns the wlock if it's held, or None if it's not."""
2789 return self._currentlock(self._wlockref)
2798 return self._currentlock(self._wlockref)
2790
2799
2791 def checkcommitpatterns(self, wctx, match, status, fail):
2800 def checkcommitpatterns(self, wctx, match, status, fail):
2792 """check for commit arguments that aren't committable"""
2801 """check for commit arguments that aren't committable"""
2793 if match.isexact() or match.prefix():
2802 if match.isexact() or match.prefix():
2794 matched = set(status.modified + status.added + status.removed)
2803 matched = set(status.modified + status.added + status.removed)
2795
2804
2796 for f in match.files():
2805 for f in match.files():
2797 f = self.dirstate.normalize(f)
2806 f = self.dirstate.normalize(f)
2798 if f == b'.' or f in matched or f in wctx.substate:
2807 if f == b'.' or f in matched or f in wctx.substate:
2799 continue
2808 continue
2800 if f in status.deleted:
2809 if f in status.deleted:
2801 fail(f, _(b'file not found!'))
2810 fail(f, _(b'file not found!'))
2802 # Is it a directory that exists or used to exist?
2811 # Is it a directory that exists or used to exist?
2803 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2812 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2804 d = f + b'/'
2813 d = f + b'/'
2805 for mf in matched:
2814 for mf in matched:
2806 if mf.startswith(d):
2815 if mf.startswith(d):
2807 break
2816 break
2808 else:
2817 else:
2809 fail(f, _(b"no match under directory!"))
2818 fail(f, _(b"no match under directory!"))
2810 elif f not in self.dirstate:
2819 elif f not in self.dirstate:
2811 fail(f, _(b"file not tracked!"))
2820 fail(f, _(b"file not tracked!"))
2812
2821
2813 @unfilteredmethod
2822 @unfilteredmethod
2814 def commit(
2823 def commit(
2815 self,
2824 self,
2816 text=b"",
2825 text=b"",
2817 user=None,
2826 user=None,
2818 date=None,
2827 date=None,
2819 match=None,
2828 match=None,
2820 force=False,
2829 force=False,
2821 editor=None,
2830 editor=None,
2822 extra=None,
2831 extra=None,
2823 ):
2832 ):
2824 """Add a new revision to current repository.
2833 """Add a new revision to current repository.
2825
2834
2826 Revision information is gathered from the working directory,
2835 Revision information is gathered from the working directory,
2827 match can be used to filter the committed files. If editor is
2836 match can be used to filter the committed files. If editor is
2828 supplied, it is called to get a commit message.
2837 supplied, it is called to get a commit message.
2829 """
2838 """
2830 if extra is None:
2839 if extra is None:
2831 extra = {}
2840 extra = {}
2832
2841
2833 def fail(f, msg):
2842 def fail(f, msg):
2834 raise error.Abort(b'%s: %s' % (f, msg))
2843 raise error.Abort(b'%s: %s' % (f, msg))
2835
2844
2836 if not match:
2845 if not match:
2837 match = matchmod.always()
2846 match = matchmod.always()
2838
2847
2839 if not force:
2848 if not force:
2840 match.bad = fail
2849 match.bad = fail
2841
2850
2842 # lock() for recent changelog (see issue4368)
2851 # lock() for recent changelog (see issue4368)
2843 with self.wlock(), self.lock():
2852 with self.wlock(), self.lock():
2844 wctx = self[None]
2853 wctx = self[None]
2845 merge = len(wctx.parents()) > 1
2854 merge = len(wctx.parents()) > 1
2846
2855
2847 if not force and merge and not match.always():
2856 if not force and merge and not match.always():
2848 raise error.Abort(
2857 raise error.Abort(
2849 _(
2858 _(
2850 b'cannot partially commit a merge '
2859 b'cannot partially commit a merge '
2851 b'(do not specify files or patterns)'
2860 b'(do not specify files or patterns)'
2852 )
2861 )
2853 )
2862 )
2854
2863
2855 status = self.status(match=match, clean=force)
2864 status = self.status(match=match, clean=force)
2856 if force:
2865 if force:
2857 status.modified.extend(
2866 status.modified.extend(
2858 status.clean
2867 status.clean
2859 ) # mq may commit clean files
2868 ) # mq may commit clean files
2860
2869
2861 # check subrepos
2870 # check subrepos
2862 subs, commitsubs, newstate = subrepoutil.precommit(
2871 subs, commitsubs, newstate = subrepoutil.precommit(
2863 self.ui, wctx, status, match, force=force
2872 self.ui, wctx, status, match, force=force
2864 )
2873 )
2865
2874
2866 # make sure all explicit patterns are matched
2875 # make sure all explicit patterns are matched
2867 if not force:
2876 if not force:
2868 self.checkcommitpatterns(wctx, match, status, fail)
2877 self.checkcommitpatterns(wctx, match, status, fail)
2869
2878
2870 cctx = context.workingcommitctx(
2879 cctx = context.workingcommitctx(
2871 self, status, text, user, date, extra
2880 self, status, text, user, date, extra
2872 )
2881 )
2873
2882
2874 ms = mergestatemod.mergestate.read(self)
2883 ms = mergestatemod.mergestate.read(self)
2875 mergeutil.checkunresolved(ms)
2884 mergeutil.checkunresolved(ms)
2876
2885
2877 # internal config: ui.allowemptycommit
2886 # internal config: ui.allowemptycommit
2878 if cctx.isempty() and not self.ui.configbool(
2887 if cctx.isempty() and not self.ui.configbool(
2879 b'ui', b'allowemptycommit'
2888 b'ui', b'allowemptycommit'
2880 ):
2889 ):
2881 self.ui.debug(b'nothing to commit, clearing merge state\n')
2890 self.ui.debug(b'nothing to commit, clearing merge state\n')
2882 ms.reset()
2891 ms.reset()
2883 return None
2892 return None
2884
2893
2885 if merge and cctx.deleted():
2894 if merge and cctx.deleted():
2886 raise error.Abort(_(b"cannot commit merge with missing files"))
2895 raise error.Abort(_(b"cannot commit merge with missing files"))
2887
2896
2888 if editor:
2897 if editor:
2889 cctx._text = editor(self, cctx, subs)
2898 cctx._text = editor(self, cctx, subs)
2890 edited = text != cctx._text
2899 edited = text != cctx._text
2891
2900
2892 # Save commit message in case this transaction gets rolled back
2901 # Save commit message in case this transaction gets rolled back
2893 # (e.g. by a pretxncommit hook). Leave the content alone on
2902 # (e.g. by a pretxncommit hook). Leave the content alone on
2894 # the assumption that the user will use the same editor again.
2903 # the assumption that the user will use the same editor again.
2895 msgfn = self.savecommitmessage(cctx._text)
2904 msgfn = self.savecommitmessage(cctx._text)
2896
2905
2897 # commit subs and write new state
2906 # commit subs and write new state
2898 if subs:
2907 if subs:
2899 uipathfn = scmutil.getuipathfn(self)
2908 uipathfn = scmutil.getuipathfn(self)
2900 for s in sorted(commitsubs):
2909 for s in sorted(commitsubs):
2901 sub = wctx.sub(s)
2910 sub = wctx.sub(s)
2902 self.ui.status(
2911 self.ui.status(
2903 _(b'committing subrepository %s\n')
2912 _(b'committing subrepository %s\n')
2904 % uipathfn(subrepoutil.subrelpath(sub))
2913 % uipathfn(subrepoutil.subrelpath(sub))
2905 )
2914 )
2906 sr = sub.commit(cctx._text, user, date)
2915 sr = sub.commit(cctx._text, user, date)
2907 newstate[s] = (newstate[s][0], sr)
2916 newstate[s] = (newstate[s][0], sr)
2908 subrepoutil.writestate(self, newstate)
2917 subrepoutil.writestate(self, newstate)
2909
2918
2910 p1, p2 = self.dirstate.parents()
2919 p1, p2 = self.dirstate.parents()
2911 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2920 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2912 try:
2921 try:
2913 self.hook(
2922 self.hook(
2914 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2923 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2915 )
2924 )
2916 with self.transaction(b'commit'):
2925 with self.transaction(b'commit'):
2917 ret = self.commitctx(cctx, True)
2926 ret = self.commitctx(cctx, True)
2918 # update bookmarks, dirstate and mergestate
2927 # update bookmarks, dirstate and mergestate
2919 bookmarks.update(self, [p1, p2], ret)
2928 bookmarks.update(self, [p1, p2], ret)
2920 cctx.markcommitted(ret)
2929 cctx.markcommitted(ret)
2921 ms.reset()
2930 ms.reset()
2922 except: # re-raises
2931 except: # re-raises
2923 if edited:
2932 if edited:
2924 self.ui.write(
2933 self.ui.write(
2925 _(b'note: commit message saved in %s\n') % msgfn
2934 _(b'note: commit message saved in %s\n') % msgfn
2926 )
2935 )
2927 self.ui.write(
2936 self.ui.write(
2928 _(
2937 _(
2929 b"note: use 'hg commit --logfile "
2938 b"note: use 'hg commit --logfile "
2930 b".hg/last-message.txt --edit' to reuse it\n"
2939 b".hg/last-message.txt --edit' to reuse it\n"
2931 )
2940 )
2932 )
2941 )
2933 raise
2942 raise
2934
2943
2935 def commithook(unused_success):
2944 def commithook(unused_success):
2936 # hack for command that use a temporary commit (eg: histedit)
2945 # hack for command that use a temporary commit (eg: histedit)
2937 # temporary commit got stripped before hook release
2946 # temporary commit got stripped before hook release
2938 if self.changelog.hasnode(ret):
2947 if self.changelog.hasnode(ret):
2939 self.hook(
2948 self.hook(
2940 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2949 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2941 )
2950 )
2942
2951
2943 self._afterlock(commithook)
2952 self._afterlock(commithook)
2944 return ret
2953 return ret
2945
2954
2946 @unfilteredmethod
2955 @unfilteredmethod
2947 def commitctx(self, ctx, error=False, origctx=None):
2956 def commitctx(self, ctx, error=False, origctx=None):
2948 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2957 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2949
2958
2950 @unfilteredmethod
2959 @unfilteredmethod
2951 def destroying(self):
2960 def destroying(self):
2952 '''Inform the repository that nodes are about to be destroyed.
2961 '''Inform the repository that nodes are about to be destroyed.
2953 Intended for use by strip and rollback, so there's a common
2962 Intended for use by strip and rollback, so there's a common
2954 place for anything that has to be done before destroying history.
2963 place for anything that has to be done before destroying history.
2955
2964
2956 This is mostly useful for saving state that is in memory and waiting
2965 This is mostly useful for saving state that is in memory and waiting
2957 to be flushed when the current lock is released. Because a call to
2966 to be flushed when the current lock is released. Because a call to
2958 destroyed is imminent, the repo will be invalidated causing those
2967 destroyed is imminent, the repo will be invalidated causing those
2959 changes to stay in memory (waiting for the next unlock), or vanish
2968 changes to stay in memory (waiting for the next unlock), or vanish
2960 completely.
2969 completely.
2961 '''
2970 '''
2962 # When using the same lock to commit and strip, the phasecache is left
2971 # When using the same lock to commit and strip, the phasecache is left
2963 # dirty after committing. Then when we strip, the repo is invalidated,
2972 # dirty after committing. Then when we strip, the repo is invalidated,
2964 # causing those changes to disappear.
2973 # causing those changes to disappear.
2965 if '_phasecache' in vars(self):
2974 if '_phasecache' in vars(self):
2966 self._phasecache.write()
2975 self._phasecache.write()
2967
2976
2968 @unfilteredmethod
2977 @unfilteredmethod
2969 def destroyed(self):
2978 def destroyed(self):
2970 '''Inform the repository that nodes have been destroyed.
2979 '''Inform the repository that nodes have been destroyed.
2971 Intended for use by strip and rollback, so there's a common
2980 Intended for use by strip and rollback, so there's a common
2972 place for anything that has to be done after destroying history.
2981 place for anything that has to be done after destroying history.
2973 '''
2982 '''
2974 # When one tries to:
2983 # When one tries to:
2975 # 1) destroy nodes thus calling this method (e.g. strip)
2984 # 1) destroy nodes thus calling this method (e.g. strip)
2976 # 2) use phasecache somewhere (e.g. commit)
2985 # 2) use phasecache somewhere (e.g. commit)
2977 #
2986 #
2978 # then 2) will fail because the phasecache contains nodes that were
2987 # then 2) will fail because the phasecache contains nodes that were
2979 # removed. We can either remove phasecache from the filecache,
2988 # removed. We can either remove phasecache from the filecache,
2980 # causing it to reload next time it is accessed, or simply filter
2989 # causing it to reload next time it is accessed, or simply filter
2981 # the removed nodes now and write the updated cache.
2990 # the removed nodes now and write the updated cache.
2982 self._phasecache.filterunknown(self)
2991 self._phasecache.filterunknown(self)
2983 self._phasecache.write()
2992 self._phasecache.write()
2984
2993
2985 # refresh all repository caches
2994 # refresh all repository caches
2986 self.updatecaches()
2995 self.updatecaches()
2987
2996
2988 # Ensure the persistent tag cache is updated. Doing it now
2997 # Ensure the persistent tag cache is updated. Doing it now
2989 # means that the tag cache only has to worry about destroyed
2998 # means that the tag cache only has to worry about destroyed
2990 # heads immediately after a strip/rollback. That in turn
2999 # heads immediately after a strip/rollback. That in turn
2991 # guarantees that "cachetip == currenttip" (comparing both rev
3000 # guarantees that "cachetip == currenttip" (comparing both rev
2992 # and node) always means no nodes have been added or destroyed.
3001 # and node) always means no nodes have been added or destroyed.
2993
3002
2994 # XXX this is suboptimal when qrefresh'ing: we strip the current
3003 # XXX this is suboptimal when qrefresh'ing: we strip the current
2995 # head, refresh the tag cache, then immediately add a new head.
3004 # head, refresh the tag cache, then immediately add a new head.
2996 # But I think doing it this way is necessary for the "instant
3005 # But I think doing it this way is necessary for the "instant
2997 # tag cache retrieval" case to work.
3006 # tag cache retrieval" case to work.
2998 self.invalidate()
3007 self.invalidate()
2999
3008
3000 def status(
3009 def status(
3001 self,
3010 self,
3002 node1=b'.',
3011 node1=b'.',
3003 node2=None,
3012 node2=None,
3004 match=None,
3013 match=None,
3005 ignored=False,
3014 ignored=False,
3006 clean=False,
3015 clean=False,
3007 unknown=False,
3016 unknown=False,
3008 listsubrepos=False,
3017 listsubrepos=False,
3009 ):
3018 ):
3010 '''a convenience method that calls node1.status(node2)'''
3019 '''a convenience method that calls node1.status(node2)'''
3011 return self[node1].status(
3020 return self[node1].status(
3012 node2, match, ignored, clean, unknown, listsubrepos
3021 node2, match, ignored, clean, unknown, listsubrepos
3013 )
3022 )
3014
3023
3015 def addpostdsstatus(self, ps):
3024 def addpostdsstatus(self, ps):
3016 """Add a callback to run within the wlock, at the point at which status
3025 """Add a callback to run within the wlock, at the point at which status
3017 fixups happen.
3026 fixups happen.
3018
3027
3019 On status completion, callback(wctx, status) will be called with the
3028 On status completion, callback(wctx, status) will be called with the
3020 wlock held, unless the dirstate has changed from underneath or the wlock
3029 wlock held, unless the dirstate has changed from underneath or the wlock
3021 couldn't be grabbed.
3030 couldn't be grabbed.
3022
3031
3023 Callbacks should not capture and use a cached copy of the dirstate --
3032 Callbacks should not capture and use a cached copy of the dirstate --
3024 it might change in the meanwhile. Instead, they should access the
3033 it might change in the meanwhile. Instead, they should access the
3025 dirstate via wctx.repo().dirstate.
3034 dirstate via wctx.repo().dirstate.
3026
3035
3027 This list is emptied out after each status run -- extensions should
3036 This list is emptied out after each status run -- extensions should
3028 make sure it adds to this list each time dirstate.status is called.
3037 make sure it adds to this list each time dirstate.status is called.
3029 Extensions should also make sure they don't call this for statuses
3038 Extensions should also make sure they don't call this for statuses
3030 that don't involve the dirstate.
3039 that don't involve the dirstate.
3031 """
3040 """
3032
3041
3033 # The list is located here for uniqueness reasons -- it is actually
3042 # The list is located here for uniqueness reasons -- it is actually
3034 # managed by the workingctx, but that isn't unique per-repo.
3043 # managed by the workingctx, but that isn't unique per-repo.
3035 self._postdsstatus.append(ps)
3044 self._postdsstatus.append(ps)
3036
3045
3037 def postdsstatus(self):
3046 def postdsstatus(self):
3038 """Used by workingctx to get the list of post-dirstate-status hooks."""
3047 """Used by workingctx to get the list of post-dirstate-status hooks."""
3039 return self._postdsstatus
3048 return self._postdsstatus
3040
3049
3041 def clearpostdsstatus(self):
3050 def clearpostdsstatus(self):
3042 """Used by workingctx to clear post-dirstate-status hooks."""
3051 """Used by workingctx to clear post-dirstate-status hooks."""
3043 del self._postdsstatus[:]
3052 del self._postdsstatus[:]
3044
3053
3045 def heads(self, start=None):
3054 def heads(self, start=None):
3046 if start is None:
3055 if start is None:
3047 cl = self.changelog
3056 cl = self.changelog
3048 headrevs = reversed(cl.headrevs())
3057 headrevs = reversed(cl.headrevs())
3049 return [cl.node(rev) for rev in headrevs]
3058 return [cl.node(rev) for rev in headrevs]
3050
3059
3051 heads = self.changelog.heads(start)
3060 heads = self.changelog.heads(start)
3052 # sort the output in rev descending order
3061 # sort the output in rev descending order
3053 return sorted(heads, key=self.changelog.rev, reverse=True)
3062 return sorted(heads, key=self.changelog.rev, reverse=True)
3054
3063
3055 def branchheads(self, branch=None, start=None, closed=False):
3064 def branchheads(self, branch=None, start=None, closed=False):
3056 '''return a (possibly filtered) list of heads for the given branch
3065 '''return a (possibly filtered) list of heads for the given branch
3057
3066
3058 Heads are returned in topological order, from newest to oldest.
3067 Heads are returned in topological order, from newest to oldest.
3059 If branch is None, use the dirstate branch.
3068 If branch is None, use the dirstate branch.
3060 If start is not None, return only heads reachable from start.
3069 If start is not None, return only heads reachable from start.
3061 If closed is True, return heads that are marked as closed as well.
3070 If closed is True, return heads that are marked as closed as well.
3062 '''
3071 '''
3063 if branch is None:
3072 if branch is None:
3064 branch = self[None].branch()
3073 branch = self[None].branch()
3065 branches = self.branchmap()
3074 branches = self.branchmap()
3066 if not branches.hasbranch(branch):
3075 if not branches.hasbranch(branch):
3067 return []
3076 return []
3068 # the cache returns heads ordered lowest to highest
3077 # the cache returns heads ordered lowest to highest
3069 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3078 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3070 if start is not None:
3079 if start is not None:
3071 # filter out the heads that cannot be reached from startrev
3080 # filter out the heads that cannot be reached from startrev
3072 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3081 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3073 bheads = [h for h in bheads if h in fbheads]
3082 bheads = [h for h in bheads if h in fbheads]
3074 return bheads
3083 return bheads
3075
3084
3076 def branches(self, nodes):
3085 def branches(self, nodes):
3077 if not nodes:
3086 if not nodes:
3078 nodes = [self.changelog.tip()]
3087 nodes = [self.changelog.tip()]
3079 b = []
3088 b = []
3080 for n in nodes:
3089 for n in nodes:
3081 t = n
3090 t = n
3082 while True:
3091 while True:
3083 p = self.changelog.parents(n)
3092 p = self.changelog.parents(n)
3084 if p[1] != nullid or p[0] == nullid:
3093 if p[1] != nullid or p[0] == nullid:
3085 b.append((t, n, p[0], p[1]))
3094 b.append((t, n, p[0], p[1]))
3086 break
3095 break
3087 n = p[0]
3096 n = p[0]
3088 return b
3097 return b
3089
3098
3090 def between(self, pairs):
3099 def between(self, pairs):
3091 r = []
3100 r = []
3092
3101
3093 for top, bottom in pairs:
3102 for top, bottom in pairs:
3094 n, l, i = top, [], 0
3103 n, l, i = top, [], 0
3095 f = 1
3104 f = 1
3096
3105
3097 while n != bottom and n != nullid:
3106 while n != bottom and n != nullid:
3098 p = self.changelog.parents(n)[0]
3107 p = self.changelog.parents(n)[0]
3099 if i == f:
3108 if i == f:
3100 l.append(n)
3109 l.append(n)
3101 f = f * 2
3110 f = f * 2
3102 n = p
3111 n = p
3103 i += 1
3112 i += 1
3104
3113
3105 r.append(l)
3114 r.append(l)
3106
3115
3107 return r
3116 return r
3108
3117
3109 def checkpush(self, pushop):
3118 def checkpush(self, pushop):
3110 """Extensions can override this function if additional checks have
3119 """Extensions can override this function if additional checks have
3111 to be performed before pushing, or call it if they override push
3120 to be performed before pushing, or call it if they override push
3112 command.
3121 command.
3113 """
3122 """
3114
3123
3115 @unfilteredpropertycache
3124 @unfilteredpropertycache
3116 def prepushoutgoinghooks(self):
3125 def prepushoutgoinghooks(self):
3117 """Return util.hooks consists of a pushop with repo, remote, outgoing
3126 """Return util.hooks consists of a pushop with repo, remote, outgoing
3118 methods, which are called before pushing changesets.
3127 methods, which are called before pushing changesets.
3119 """
3128 """
3120 return util.hooks()
3129 return util.hooks()
3121
3130
3122 def pushkey(self, namespace, key, old, new):
3131 def pushkey(self, namespace, key, old, new):
3123 try:
3132 try:
3124 tr = self.currenttransaction()
3133 tr = self.currenttransaction()
3125 hookargs = {}
3134 hookargs = {}
3126 if tr is not None:
3135 if tr is not None:
3127 hookargs.update(tr.hookargs)
3136 hookargs.update(tr.hookargs)
3128 hookargs = pycompat.strkwargs(hookargs)
3137 hookargs = pycompat.strkwargs(hookargs)
3129 hookargs['namespace'] = namespace
3138 hookargs['namespace'] = namespace
3130 hookargs['key'] = key
3139 hookargs['key'] = key
3131 hookargs['old'] = old
3140 hookargs['old'] = old
3132 hookargs['new'] = new
3141 hookargs['new'] = new
3133 self.hook(b'prepushkey', throw=True, **hookargs)
3142 self.hook(b'prepushkey', throw=True, **hookargs)
3134 except error.HookAbort as exc:
3143 except error.HookAbort as exc:
3135 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3144 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3136 if exc.hint:
3145 if exc.hint:
3137 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3146 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3138 return False
3147 return False
3139 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3148 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3140 ret = pushkey.push(self, namespace, key, old, new)
3149 ret = pushkey.push(self, namespace, key, old, new)
3141
3150
3142 def runhook(unused_success):
3151 def runhook(unused_success):
3143 self.hook(
3152 self.hook(
3144 b'pushkey',
3153 b'pushkey',
3145 namespace=namespace,
3154 namespace=namespace,
3146 key=key,
3155 key=key,
3147 old=old,
3156 old=old,
3148 new=new,
3157 new=new,
3149 ret=ret,
3158 ret=ret,
3150 )
3159 )
3151
3160
3152 self._afterlock(runhook)
3161 self._afterlock(runhook)
3153 return ret
3162 return ret
3154
3163
3155 def listkeys(self, namespace):
3164 def listkeys(self, namespace):
3156 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3165 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3157 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3166 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3158 values = pushkey.list(self, namespace)
3167 values = pushkey.list(self, namespace)
3159 self.hook(b'listkeys', namespace=namespace, values=values)
3168 self.hook(b'listkeys', namespace=namespace, values=values)
3160 return values
3169 return values
3161
3170
3162 def debugwireargs(self, one, two, three=None, four=None, five=None):
3171 def debugwireargs(self, one, two, three=None, four=None, five=None):
3163 '''used to test argument passing over the wire'''
3172 '''used to test argument passing over the wire'''
3164 return b"%s %s %s %s %s" % (
3173 return b"%s %s %s %s %s" % (
3165 one,
3174 one,
3166 two,
3175 two,
3167 pycompat.bytestr(three),
3176 pycompat.bytestr(three),
3168 pycompat.bytestr(four),
3177 pycompat.bytestr(four),
3169 pycompat.bytestr(five),
3178 pycompat.bytestr(five),
3170 )
3179 )
3171
3180
3172 def savecommitmessage(self, text):
3181 def savecommitmessage(self, text):
3173 fp = self.vfs(b'last-message.txt', b'wb')
3182 fp = self.vfs(b'last-message.txt', b'wb')
3174 try:
3183 try:
3175 fp.write(text)
3184 fp.write(text)
3176 finally:
3185 finally:
3177 fp.close()
3186 fp.close()
3178 return self.pathto(fp.name[len(self.root) + 1 :])
3187 return self.pathto(fp.name[len(self.root) + 1 :])
3179
3188
3180
3189
3181 # used to avoid circular references so destructors work
3190 # used to avoid circular references so destructors work
3182 def aftertrans(files):
3191 def aftertrans(files):
3183 renamefiles = [tuple(t) for t in files]
3192 renamefiles = [tuple(t) for t in files]
3184
3193
3185 def a():
3194 def a():
3186 for vfs, src, dest in renamefiles:
3195 for vfs, src, dest in renamefiles:
3187 # if src and dest refer to a same file, vfs.rename is a no-op,
3196 # if src and dest refer to a same file, vfs.rename is a no-op,
3188 # leaving both src and dest on disk. delete dest to make sure
3197 # leaving both src and dest on disk. delete dest to make sure
3189 # the rename couldn't be such a no-op.
3198 # the rename couldn't be such a no-op.
3190 vfs.tryunlink(dest)
3199 vfs.tryunlink(dest)
3191 try:
3200 try:
3192 vfs.rename(src, dest)
3201 vfs.rename(src, dest)
3193 except OSError: # journal file does not yet exist
3202 except OSError: # journal file does not yet exist
3194 pass
3203 pass
3195
3204
3196 return a
3205 return a
3197
3206
3198
3207
3199 def undoname(fn):
3208 def undoname(fn):
3200 base, name = os.path.split(fn)
3209 base, name = os.path.split(fn)
3201 assert name.startswith(b'journal')
3210 assert name.startswith(b'journal')
3202 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3211 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3203
3212
3204
3213
3205 def instance(ui, path, create, intents=None, createopts=None):
3214 def instance(ui, path, create, intents=None, createopts=None):
3206 localpath = util.urllocalpath(path)
3215 localpath = util.urllocalpath(path)
3207 if create:
3216 if create:
3208 createrepository(ui, localpath, createopts=createopts)
3217 createrepository(ui, localpath, createopts=createopts)
3209
3218
3210 return makelocalrepository(ui, localpath, intents=intents)
3219 return makelocalrepository(ui, localpath, intents=intents)
3211
3220
3212
3221
3213 def islocal(path):
3222 def islocal(path):
3214 return True
3223 return True
3215
3224
3216
3225
3217 def defaultcreateopts(ui, createopts=None):
3226 def defaultcreateopts(ui, createopts=None):
3218 """Populate the default creation options for a repository.
3227 """Populate the default creation options for a repository.
3219
3228
3220 A dictionary of explicitly requested creation options can be passed
3229 A dictionary of explicitly requested creation options can be passed
3221 in. Missing keys will be populated.
3230 in. Missing keys will be populated.
3222 """
3231 """
3223 createopts = dict(createopts or {})
3232 createopts = dict(createopts or {})
3224
3233
3225 if b'backend' not in createopts:
3234 if b'backend' not in createopts:
3226 # experimental config: storage.new-repo-backend
3235 # experimental config: storage.new-repo-backend
3227 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3236 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3228
3237
3229 return createopts
3238 return createopts
3230
3239
3231
3240
3232 def newreporequirements(ui, createopts):
3241 def newreporequirements(ui, createopts):
3233 """Determine the set of requirements for a new local repository.
3242 """Determine the set of requirements for a new local repository.
3234
3243
3235 Extensions can wrap this function to specify custom requirements for
3244 Extensions can wrap this function to specify custom requirements for
3236 new repositories.
3245 new repositories.
3237 """
3246 """
3238 # If the repo is being created from a shared repository, we copy
3247 # If the repo is being created from a shared repository, we copy
3239 # its requirements.
3248 # its requirements.
3240 if b'sharedrepo' in createopts:
3249 if b'sharedrepo' in createopts:
3241 requirements = set(createopts[b'sharedrepo'].requirements)
3250 requirements = set(createopts[b'sharedrepo'].requirements)
3242 if createopts.get(b'sharedrelative'):
3251 if createopts.get(b'sharedrelative'):
3243 requirements.add(b'relshared')
3252 requirements.add(b'relshared')
3244 else:
3253 else:
3245 requirements.add(b'shared')
3254 requirements.add(b'shared')
3246
3255
3247 return requirements
3256 return requirements
3248
3257
3249 if b'backend' not in createopts:
3258 if b'backend' not in createopts:
3250 raise error.ProgrammingError(
3259 raise error.ProgrammingError(
3251 b'backend key not present in createopts; '
3260 b'backend key not present in createopts; '
3252 b'was defaultcreateopts() called?'
3261 b'was defaultcreateopts() called?'
3253 )
3262 )
3254
3263
3255 if createopts[b'backend'] != b'revlogv1':
3264 if createopts[b'backend'] != b'revlogv1':
3256 raise error.Abort(
3265 raise error.Abort(
3257 _(
3266 _(
3258 b'unable to determine repository requirements for '
3267 b'unable to determine repository requirements for '
3259 b'storage backend: %s'
3268 b'storage backend: %s'
3260 )
3269 )
3261 % createopts[b'backend']
3270 % createopts[b'backend']
3262 )
3271 )
3263
3272
3264 requirements = {b'revlogv1'}
3273 requirements = {b'revlogv1'}
3265 if ui.configbool(b'format', b'usestore'):
3274 if ui.configbool(b'format', b'usestore'):
3266 requirements.add(b'store')
3275 requirements.add(b'store')
3267 if ui.configbool(b'format', b'usefncache'):
3276 if ui.configbool(b'format', b'usefncache'):
3268 requirements.add(b'fncache')
3277 requirements.add(b'fncache')
3269 if ui.configbool(b'format', b'dotencode'):
3278 if ui.configbool(b'format', b'dotencode'):
3270 requirements.add(b'dotencode')
3279 requirements.add(b'dotencode')
3271
3280
3272 compengines = ui.configlist(b'format', b'revlog-compression')
3281 compengines = ui.configlist(b'format', b'revlog-compression')
3273 for compengine in compengines:
3282 for compengine in compengines:
3274 if compengine in util.compengines:
3283 if compengine in util.compengines:
3275 break
3284 break
3276 else:
3285 else:
3277 raise error.Abort(
3286 raise error.Abort(
3278 _(
3287 _(
3279 b'compression engines %s defined by '
3288 b'compression engines %s defined by '
3280 b'format.revlog-compression not available'
3289 b'format.revlog-compression not available'
3281 )
3290 )
3282 % b', '.join(b'"%s"' % e for e in compengines),
3291 % b', '.join(b'"%s"' % e for e in compengines),
3283 hint=_(
3292 hint=_(
3284 b'run "hg debuginstall" to list available '
3293 b'run "hg debuginstall" to list available '
3285 b'compression engines'
3294 b'compression engines'
3286 ),
3295 ),
3287 )
3296 )
3288
3297
3289 # zlib is the historical default and doesn't need an explicit requirement.
3298 # zlib is the historical default and doesn't need an explicit requirement.
3290 if compengine == b'zstd':
3299 if compengine == b'zstd':
3291 requirements.add(b'revlog-compression-zstd')
3300 requirements.add(b'revlog-compression-zstd')
3292 elif compengine != b'zlib':
3301 elif compengine != b'zlib':
3293 requirements.add(b'exp-compression-%s' % compengine)
3302 requirements.add(b'exp-compression-%s' % compengine)
3294
3303
3295 if scmutil.gdinitconfig(ui):
3304 if scmutil.gdinitconfig(ui):
3296 requirements.add(b'generaldelta')
3305 requirements.add(b'generaldelta')
3297 if ui.configbool(b'format', b'sparse-revlog'):
3306 if ui.configbool(b'format', b'sparse-revlog'):
3298 requirements.add(SPARSEREVLOG_REQUIREMENT)
3307 requirements.add(SPARSEREVLOG_REQUIREMENT)
3299
3308
3300 # experimental config: format.exp-use-side-data
3309 # experimental config: format.exp-use-side-data
3301 if ui.configbool(b'format', b'exp-use-side-data'):
3310 if ui.configbool(b'format', b'exp-use-side-data'):
3302 requirements.add(SIDEDATA_REQUIREMENT)
3311 requirements.add(SIDEDATA_REQUIREMENT)
3303 # experimental config: format.exp-use-copies-side-data-changeset
3312 # experimental config: format.exp-use-copies-side-data-changeset
3304 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3313 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3305 requirements.add(SIDEDATA_REQUIREMENT)
3314 requirements.add(SIDEDATA_REQUIREMENT)
3306 requirements.add(COPIESSDC_REQUIREMENT)
3315 requirements.add(COPIESSDC_REQUIREMENT)
3307 if ui.configbool(b'experimental', b'treemanifest'):
3316 if ui.configbool(b'experimental', b'treemanifest'):
3308 requirements.add(b'treemanifest')
3317 requirements.add(b'treemanifest')
3309
3318
3310 revlogv2 = ui.config(b'experimental', b'revlogv2')
3319 revlogv2 = ui.config(b'experimental', b'revlogv2')
3311 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3320 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3312 requirements.remove(b'revlogv1')
3321 requirements.remove(b'revlogv1')
3313 # generaldelta is implied by revlogv2.
3322 # generaldelta is implied by revlogv2.
3314 requirements.discard(b'generaldelta')
3323 requirements.discard(b'generaldelta')
3315 requirements.add(REVLOGV2_REQUIREMENT)
3324 requirements.add(REVLOGV2_REQUIREMENT)
3316 # experimental config: format.internal-phase
3325 # experimental config: format.internal-phase
3317 if ui.configbool(b'format', b'internal-phase'):
3326 if ui.configbool(b'format', b'internal-phase'):
3318 requirements.add(b'internal-phase')
3327 requirements.add(b'internal-phase')
3319
3328
3320 if createopts.get(b'narrowfiles'):
3329 if createopts.get(b'narrowfiles'):
3321 requirements.add(repository.NARROW_REQUIREMENT)
3330 requirements.add(repository.NARROW_REQUIREMENT)
3322
3331
3323 if createopts.get(b'lfs'):
3332 if createopts.get(b'lfs'):
3324 requirements.add(b'lfs')
3333 requirements.add(b'lfs')
3325
3334
3326 if ui.configbool(b'format', b'bookmarks-in-store'):
3335 if ui.configbool(b'format', b'bookmarks-in-store'):
3327 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3336 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3328
3337
3329 if ui.configbool(b'format', b'use-persistent-nodemap'):
3338 if ui.configbool(b'format', b'use-persistent-nodemap'):
3330 requirements.add(NODEMAP_REQUIREMENT)
3339 requirements.add(NODEMAP_REQUIREMENT)
3331
3340
3332 return requirements
3341 return requirements
3333
3342
3334
3343
3335 def checkrequirementscompat(ui, requirements):
3344 def checkrequirementscompat(ui, requirements):
3336 """ Checks compatibility of repository requirements enabled and disabled.
3345 """ Checks compatibility of repository requirements enabled and disabled.
3337
3346
3338 Returns a set of requirements which needs to be dropped because dependend
3347 Returns a set of requirements which needs to be dropped because dependend
3339 requirements are not enabled. Also warns users about it """
3348 requirements are not enabled. Also warns users about it """
3340
3349
3341 dropped = set()
3350 dropped = set()
3342
3351
3343 if b'store' not in requirements:
3352 if b'store' not in requirements:
3344 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3353 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3345 ui.warn(
3354 ui.warn(
3346 _(
3355 _(
3347 b'ignoring enabled \'format.bookmarks-in-store\' config '
3356 b'ignoring enabled \'format.bookmarks-in-store\' config '
3348 b'beacuse it is incompatible with disabled '
3357 b'beacuse it is incompatible with disabled '
3349 b'\'format.usestore\' config\n'
3358 b'\'format.usestore\' config\n'
3350 )
3359 )
3351 )
3360 )
3352 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3361 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3353
3362
3354 if b'shared' in requirements or b'relshared' in requirements:
3363 if b'shared' in requirements or b'relshared' in requirements:
3355 raise error.Abort(
3364 raise error.Abort(
3356 _(
3365 _(
3357 b"cannot create shared repository as source was created"
3366 b"cannot create shared repository as source was created"
3358 b" with 'format.usestore' config disabled"
3367 b" with 'format.usestore' config disabled"
3359 )
3368 )
3360 )
3369 )
3361
3370
3362 return dropped
3371 return dropped
3363
3372
3364
3373
3365 def filterknowncreateopts(ui, createopts):
3374 def filterknowncreateopts(ui, createopts):
3366 """Filters a dict of repo creation options against options that are known.
3375 """Filters a dict of repo creation options against options that are known.
3367
3376
3368 Receives a dict of repo creation options and returns a dict of those
3377 Receives a dict of repo creation options and returns a dict of those
3369 options that we don't know how to handle.
3378 options that we don't know how to handle.
3370
3379
3371 This function is called as part of repository creation. If the
3380 This function is called as part of repository creation. If the
3372 returned dict contains any items, repository creation will not
3381 returned dict contains any items, repository creation will not
3373 be allowed, as it means there was a request to create a repository
3382 be allowed, as it means there was a request to create a repository
3374 with options not recognized by loaded code.
3383 with options not recognized by loaded code.
3375
3384
3376 Extensions can wrap this function to filter out creation options
3385 Extensions can wrap this function to filter out creation options
3377 they know how to handle.
3386 they know how to handle.
3378 """
3387 """
3379 known = {
3388 known = {
3380 b'backend',
3389 b'backend',
3381 b'lfs',
3390 b'lfs',
3382 b'narrowfiles',
3391 b'narrowfiles',
3383 b'sharedrepo',
3392 b'sharedrepo',
3384 b'sharedrelative',
3393 b'sharedrelative',
3385 b'shareditems',
3394 b'shareditems',
3386 b'shallowfilestore',
3395 b'shallowfilestore',
3387 }
3396 }
3388
3397
3389 return {k: v for k, v in createopts.items() if k not in known}
3398 return {k: v for k, v in createopts.items() if k not in known}
3390
3399
3391
3400
3392 def createrepository(ui, path, createopts=None):
3401 def createrepository(ui, path, createopts=None):
3393 """Create a new repository in a vfs.
3402 """Create a new repository in a vfs.
3394
3403
3395 ``path`` path to the new repo's working directory.
3404 ``path`` path to the new repo's working directory.
3396 ``createopts`` options for the new repository.
3405 ``createopts`` options for the new repository.
3397
3406
3398 The following keys for ``createopts`` are recognized:
3407 The following keys for ``createopts`` are recognized:
3399
3408
3400 backend
3409 backend
3401 The storage backend to use.
3410 The storage backend to use.
3402 lfs
3411 lfs
3403 Repository will be created with ``lfs`` requirement. The lfs extension
3412 Repository will be created with ``lfs`` requirement. The lfs extension
3404 will automatically be loaded when the repository is accessed.
3413 will automatically be loaded when the repository is accessed.
3405 narrowfiles
3414 narrowfiles
3406 Set up repository to support narrow file storage.
3415 Set up repository to support narrow file storage.
3407 sharedrepo
3416 sharedrepo
3408 Repository object from which storage should be shared.
3417 Repository object from which storage should be shared.
3409 sharedrelative
3418 sharedrelative
3410 Boolean indicating if the path to the shared repo should be
3419 Boolean indicating if the path to the shared repo should be
3411 stored as relative. By default, the pointer to the "parent" repo
3420 stored as relative. By default, the pointer to the "parent" repo
3412 is stored as an absolute path.
3421 is stored as an absolute path.
3413 shareditems
3422 shareditems
3414 Set of items to share to the new repository (in addition to storage).
3423 Set of items to share to the new repository (in addition to storage).
3415 shallowfilestore
3424 shallowfilestore
3416 Indicates that storage for files should be shallow (not all ancestor
3425 Indicates that storage for files should be shallow (not all ancestor
3417 revisions are known).
3426 revisions are known).
3418 """
3427 """
3419 createopts = defaultcreateopts(ui, createopts=createopts)
3428 createopts = defaultcreateopts(ui, createopts=createopts)
3420
3429
3421 unknownopts = filterknowncreateopts(ui, createopts)
3430 unknownopts = filterknowncreateopts(ui, createopts)
3422
3431
3423 if not isinstance(unknownopts, dict):
3432 if not isinstance(unknownopts, dict):
3424 raise error.ProgrammingError(
3433 raise error.ProgrammingError(
3425 b'filterknowncreateopts() did not return a dict'
3434 b'filterknowncreateopts() did not return a dict'
3426 )
3435 )
3427
3436
3428 if unknownopts:
3437 if unknownopts:
3429 raise error.Abort(
3438 raise error.Abort(
3430 _(
3439 _(
3431 b'unable to create repository because of unknown '
3440 b'unable to create repository because of unknown '
3432 b'creation option: %s'
3441 b'creation option: %s'
3433 )
3442 )
3434 % b', '.join(sorted(unknownopts)),
3443 % b', '.join(sorted(unknownopts)),
3435 hint=_(b'is a required extension not loaded?'),
3444 hint=_(b'is a required extension not loaded?'),
3436 )
3445 )
3437
3446
3438 requirements = newreporequirements(ui, createopts=createopts)
3447 requirements = newreporequirements(ui, createopts=createopts)
3439 requirements -= checkrequirementscompat(ui, requirements)
3448 requirements -= checkrequirementscompat(ui, requirements)
3440
3449
3441 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3450 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3442
3451
3443 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3452 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3444 if hgvfs.exists():
3453 if hgvfs.exists():
3445 raise error.RepoError(_(b'repository %s already exists') % path)
3454 raise error.RepoError(_(b'repository %s already exists') % path)
3446
3455
3447 if b'sharedrepo' in createopts:
3456 if b'sharedrepo' in createopts:
3448 sharedpath = createopts[b'sharedrepo'].sharedpath
3457 sharedpath = createopts[b'sharedrepo'].sharedpath
3449
3458
3450 if createopts.get(b'sharedrelative'):
3459 if createopts.get(b'sharedrelative'):
3451 try:
3460 try:
3452 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3461 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3453 except (IOError, ValueError) as e:
3462 except (IOError, ValueError) as e:
3454 # ValueError is raised on Windows if the drive letters differ
3463 # ValueError is raised on Windows if the drive letters differ
3455 # on each path.
3464 # on each path.
3456 raise error.Abort(
3465 raise error.Abort(
3457 _(b'cannot calculate relative path'),
3466 _(b'cannot calculate relative path'),
3458 hint=stringutil.forcebytestr(e),
3467 hint=stringutil.forcebytestr(e),
3459 )
3468 )
3460
3469
3461 if not wdirvfs.exists():
3470 if not wdirvfs.exists():
3462 wdirvfs.makedirs()
3471 wdirvfs.makedirs()
3463
3472
3464 hgvfs.makedir(notindexed=True)
3473 hgvfs.makedir(notindexed=True)
3465 if b'sharedrepo' not in createopts:
3474 if b'sharedrepo' not in createopts:
3466 hgvfs.mkdir(b'cache')
3475 hgvfs.mkdir(b'cache')
3467 hgvfs.mkdir(b'wcache')
3476 hgvfs.mkdir(b'wcache')
3468
3477
3469 if b'store' in requirements and b'sharedrepo' not in createopts:
3478 if b'store' in requirements and b'sharedrepo' not in createopts:
3470 hgvfs.mkdir(b'store')
3479 hgvfs.mkdir(b'store')
3471
3480
3472 # We create an invalid changelog outside the store so very old
3481 # We create an invalid changelog outside the store so very old
3473 # Mercurial versions (which didn't know about the requirements
3482 # Mercurial versions (which didn't know about the requirements
3474 # file) encounter an error on reading the changelog. This
3483 # file) encounter an error on reading the changelog. This
3475 # effectively locks out old clients and prevents them from
3484 # effectively locks out old clients and prevents them from
3476 # mucking with a repo in an unknown format.
3485 # mucking with a repo in an unknown format.
3477 #
3486 #
3478 # The revlog header has version 2, which won't be recognized by
3487 # The revlog header has version 2, which won't be recognized by
3479 # such old clients.
3488 # such old clients.
3480 hgvfs.append(
3489 hgvfs.append(
3481 b'00changelog.i',
3490 b'00changelog.i',
3482 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3491 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3483 b'layout',
3492 b'layout',
3484 )
3493 )
3485
3494
3486 scmutil.writerequires(hgvfs, requirements)
3495 scmutil.writerequires(hgvfs, requirements)
3487
3496
3488 # Write out file telling readers where to find the shared store.
3497 # Write out file telling readers where to find the shared store.
3489 if b'sharedrepo' in createopts:
3498 if b'sharedrepo' in createopts:
3490 hgvfs.write(b'sharedpath', sharedpath)
3499 hgvfs.write(b'sharedpath', sharedpath)
3491
3500
3492 if createopts.get(b'shareditems'):
3501 if createopts.get(b'shareditems'):
3493 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3502 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3494 hgvfs.write(b'shared', shared)
3503 hgvfs.write(b'shared', shared)
3495
3504
3496
3505
3497 def poisonrepository(repo):
3506 def poisonrepository(repo):
3498 """Poison a repository instance so it can no longer be used."""
3507 """Poison a repository instance so it can no longer be used."""
3499 # Perform any cleanup on the instance.
3508 # Perform any cleanup on the instance.
3500 repo.close()
3509 repo.close()
3501
3510
3502 # Our strategy is to replace the type of the object with one that
3511 # Our strategy is to replace the type of the object with one that
3503 # has all attribute lookups result in error.
3512 # has all attribute lookups result in error.
3504 #
3513 #
3505 # But we have to allow the close() method because some constructors
3514 # But we have to allow the close() method because some constructors
3506 # of repos call close() on repo references.
3515 # of repos call close() on repo references.
3507 class poisonedrepository(object):
3516 class poisonedrepository(object):
3508 def __getattribute__(self, item):
3517 def __getattribute__(self, item):
3509 if item == 'close':
3518 if item == 'close':
3510 return object.__getattribute__(self, item)
3519 return object.__getattribute__(self, item)
3511
3520
3512 raise error.ProgrammingError(
3521 raise error.ProgrammingError(
3513 b'repo instances should not be used after unshare'
3522 b'repo instances should not be used after unshare'
3514 )
3523 )
3515
3524
3516 def close(self):
3525 def close(self):
3517 pass
3526 pass
3518
3527
3519 # We may have a repoview, which intercepts __setattr__. So be sure
3528 # We may have a repoview, which intercepts __setattr__. So be sure
3520 # we operate at the lowest level possible.
3529 # we operate at the lowest level possible.
3521 object.__setattr__(repo, '__class__', poisonedrepository)
3530 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now