##// END OF EJS Templates
localrepo: use functools.wraps() in unfilteredmethod decorator...
Augie Fackler -
r45987:4111954c default
parent child Browse files
Show More
@@ -1,3519 +1,3521 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import os
12 import os
12 import random
13 import random
13 import sys
14 import sys
14 import time
15 import time
15 import weakref
16 import weakref
16
17
17 from .i18n import _
18 from .i18n import _
18 from .node import (
19 from .node import (
19 bin,
20 bin,
20 hex,
21 hex,
21 nullid,
22 nullid,
22 nullrev,
23 nullrev,
23 short,
24 short,
24 )
25 )
25 from .pycompat import (
26 from .pycompat import (
26 delattr,
27 delattr,
27 getattr,
28 getattr,
28 )
29 )
29 from . import (
30 from . import (
30 bookmarks,
31 bookmarks,
31 branchmap,
32 branchmap,
32 bundle2,
33 bundle2,
33 changegroup,
34 changegroup,
34 color,
35 color,
35 commit,
36 commit,
36 context,
37 context,
37 dirstate,
38 dirstate,
38 dirstateguard,
39 dirstateguard,
39 discovery,
40 discovery,
40 encoding,
41 encoding,
41 error,
42 error,
42 exchange,
43 exchange,
43 extensions,
44 extensions,
44 filelog,
45 filelog,
45 hook,
46 hook,
46 lock as lockmod,
47 lock as lockmod,
47 match as matchmod,
48 match as matchmod,
48 mergestate as mergestatemod,
49 mergestate as mergestatemod,
49 mergeutil,
50 mergeutil,
50 namespaces,
51 namespaces,
51 narrowspec,
52 narrowspec,
52 obsolete,
53 obsolete,
53 pathutil,
54 pathutil,
54 phases,
55 phases,
55 pushkey,
56 pushkey,
56 pycompat,
57 pycompat,
57 rcutil,
58 rcutil,
58 repoview,
59 repoview,
59 requirements as requirementsmod,
60 requirements as requirementsmod,
60 revset,
61 revset,
61 revsetlang,
62 revsetlang,
62 scmutil,
63 scmutil,
63 sparse,
64 sparse,
64 store as storemod,
65 store as storemod,
65 subrepoutil,
66 subrepoutil,
66 tags as tagsmod,
67 tags as tagsmod,
67 transaction,
68 transaction,
68 txnutil,
69 txnutil,
69 util,
70 util,
70 vfs as vfsmod,
71 vfs as vfsmod,
71 )
72 )
72
73
73 from .interfaces import (
74 from .interfaces import (
74 repository,
75 repository,
75 util as interfaceutil,
76 util as interfaceutil,
76 )
77 )
77
78
78 from .utils import (
79 from .utils import (
79 hashutil,
80 hashutil,
80 procutil,
81 procutil,
81 stringutil,
82 stringutil,
82 )
83 )
83
84
84 from .revlogutils import constants as revlogconst
85 from .revlogutils import constants as revlogconst
85
86
86 release = lockmod.release
87 release = lockmod.release
87 urlerr = util.urlerr
88 urlerr = util.urlerr
88 urlreq = util.urlreq
89 urlreq = util.urlreq
89
90
90 # set of (path, vfs-location) tuples. vfs-location is:
91 # set of (path, vfs-location) tuples. vfs-location is:
91 # - 'plain for vfs relative paths
92 # - 'plain for vfs relative paths
92 # - '' for svfs relative paths
93 # - '' for svfs relative paths
93 _cachedfiles = set()
94 _cachedfiles = set()
94
95
95
96
96 class _basefilecache(scmutil.filecache):
97 class _basefilecache(scmutil.filecache):
97 """All filecache usage on repo are done for logic that should be unfiltered
98 """All filecache usage on repo are done for logic that should be unfiltered
98 """
99 """
99
100
100 def __get__(self, repo, type=None):
101 def __get__(self, repo, type=None):
101 if repo is None:
102 if repo is None:
102 return self
103 return self
103 # proxy to unfiltered __dict__ since filtered repo has no entry
104 # proxy to unfiltered __dict__ since filtered repo has no entry
104 unfi = repo.unfiltered()
105 unfi = repo.unfiltered()
105 try:
106 try:
106 return unfi.__dict__[self.sname]
107 return unfi.__dict__[self.sname]
107 except KeyError:
108 except KeyError:
108 pass
109 pass
109 return super(_basefilecache, self).__get__(unfi, type)
110 return super(_basefilecache, self).__get__(unfi, type)
110
111
111 def set(self, repo, value):
112 def set(self, repo, value):
112 return super(_basefilecache, self).set(repo.unfiltered(), value)
113 return super(_basefilecache, self).set(repo.unfiltered(), value)
113
114
114
115
115 class repofilecache(_basefilecache):
116 class repofilecache(_basefilecache):
116 """filecache for files in .hg but outside of .hg/store"""
117 """filecache for files in .hg but outside of .hg/store"""
117
118
118 def __init__(self, *paths):
119 def __init__(self, *paths):
119 super(repofilecache, self).__init__(*paths)
120 super(repofilecache, self).__init__(*paths)
120 for path in paths:
121 for path in paths:
121 _cachedfiles.add((path, b'plain'))
122 _cachedfiles.add((path, b'plain'))
122
123
123 def join(self, obj, fname):
124 def join(self, obj, fname):
124 return obj.vfs.join(fname)
125 return obj.vfs.join(fname)
125
126
126
127
127 class storecache(_basefilecache):
128 class storecache(_basefilecache):
128 """filecache for files in the store"""
129 """filecache for files in the store"""
129
130
130 def __init__(self, *paths):
131 def __init__(self, *paths):
131 super(storecache, self).__init__(*paths)
132 super(storecache, self).__init__(*paths)
132 for path in paths:
133 for path in paths:
133 _cachedfiles.add((path, b''))
134 _cachedfiles.add((path, b''))
134
135
135 def join(self, obj, fname):
136 def join(self, obj, fname):
136 return obj.sjoin(fname)
137 return obj.sjoin(fname)
137
138
138
139
139 class mixedrepostorecache(_basefilecache):
140 class mixedrepostorecache(_basefilecache):
140 """filecache for a mix files in .hg/store and outside"""
141 """filecache for a mix files in .hg/store and outside"""
141
142
142 def __init__(self, *pathsandlocations):
143 def __init__(self, *pathsandlocations):
143 # scmutil.filecache only uses the path for passing back into our
144 # scmutil.filecache only uses the path for passing back into our
144 # join(), so we can safely pass a list of paths and locations
145 # join(), so we can safely pass a list of paths and locations
145 super(mixedrepostorecache, self).__init__(*pathsandlocations)
146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
146 _cachedfiles.update(pathsandlocations)
147 _cachedfiles.update(pathsandlocations)
147
148
148 def join(self, obj, fnameandlocation):
149 def join(self, obj, fnameandlocation):
149 fname, location = fnameandlocation
150 fname, location = fnameandlocation
150 if location == b'plain':
151 if location == b'plain':
151 return obj.vfs.join(fname)
152 return obj.vfs.join(fname)
152 else:
153 else:
153 if location != b'':
154 if location != b'':
154 raise error.ProgrammingError(
155 raise error.ProgrammingError(
155 b'unexpected location: %s' % location
156 b'unexpected location: %s' % location
156 )
157 )
157 return obj.sjoin(fname)
158 return obj.sjoin(fname)
158
159
159
160
160 def isfilecached(repo, name):
161 def isfilecached(repo, name):
161 """check if a repo has already cached "name" filecache-ed property
162 """check if a repo has already cached "name" filecache-ed property
162
163
163 This returns (cachedobj-or-None, iscached) tuple.
164 This returns (cachedobj-or-None, iscached) tuple.
164 """
165 """
165 cacheentry = repo.unfiltered()._filecache.get(name, None)
166 cacheentry = repo.unfiltered()._filecache.get(name, None)
166 if not cacheentry:
167 if not cacheentry:
167 return None, False
168 return None, False
168 return cacheentry.obj, True
169 return cacheentry.obj, True
169
170
170
171
171 class unfilteredpropertycache(util.propertycache):
172 class unfilteredpropertycache(util.propertycache):
172 """propertycache that apply to unfiltered repo only"""
173 """propertycache that apply to unfiltered repo only"""
173
174
174 def __get__(self, repo, type=None):
175 def __get__(self, repo, type=None):
175 unfi = repo.unfiltered()
176 unfi = repo.unfiltered()
176 if unfi is repo:
177 if unfi is repo:
177 return super(unfilteredpropertycache, self).__get__(unfi)
178 return super(unfilteredpropertycache, self).__get__(unfi)
178 return getattr(unfi, self.name)
179 return getattr(unfi, self.name)
179
180
180
181
181 class filteredpropertycache(util.propertycache):
182 class filteredpropertycache(util.propertycache):
182 """propertycache that must take filtering in account"""
183 """propertycache that must take filtering in account"""
183
184
184 def cachevalue(self, obj, value):
185 def cachevalue(self, obj, value):
185 object.__setattr__(obj, self.name, value)
186 object.__setattr__(obj, self.name, value)
186
187
187
188
188 def hasunfilteredcache(repo, name):
189 def hasunfilteredcache(repo, name):
189 """check if a repo has an unfilteredpropertycache value for <name>"""
190 """check if a repo has an unfilteredpropertycache value for <name>"""
190 return name in vars(repo.unfiltered())
191 return name in vars(repo.unfiltered())
191
192
192
193
193 def unfilteredmethod(orig):
194 def unfilteredmethod(orig):
194 """decorate method that always need to be run on unfiltered version"""
195 """decorate method that always need to be run on unfiltered version"""
195
196
197 @functools.wraps(orig)
196 def wrapper(repo, *args, **kwargs):
198 def wrapper(repo, *args, **kwargs):
197 return orig(repo.unfiltered(), *args, **kwargs)
199 return orig(repo.unfiltered(), *args, **kwargs)
198
200
199 return wrapper
201 return wrapper
200
202
201
203
202 moderncaps = {
204 moderncaps = {
203 b'lookup',
205 b'lookup',
204 b'branchmap',
206 b'branchmap',
205 b'pushkey',
207 b'pushkey',
206 b'known',
208 b'known',
207 b'getbundle',
209 b'getbundle',
208 b'unbundle',
210 b'unbundle',
209 }
211 }
210 legacycaps = moderncaps.union({b'changegroupsubset'})
212 legacycaps = moderncaps.union({b'changegroupsubset'})
211
213
212
214
213 @interfaceutil.implementer(repository.ipeercommandexecutor)
215 @interfaceutil.implementer(repository.ipeercommandexecutor)
214 class localcommandexecutor(object):
216 class localcommandexecutor(object):
215 def __init__(self, peer):
217 def __init__(self, peer):
216 self._peer = peer
218 self._peer = peer
217 self._sent = False
219 self._sent = False
218 self._closed = False
220 self._closed = False
219
221
220 def __enter__(self):
222 def __enter__(self):
221 return self
223 return self
222
224
223 def __exit__(self, exctype, excvalue, exctb):
225 def __exit__(self, exctype, excvalue, exctb):
224 self.close()
226 self.close()
225
227
226 def callcommand(self, command, args):
228 def callcommand(self, command, args):
227 if self._sent:
229 if self._sent:
228 raise error.ProgrammingError(
230 raise error.ProgrammingError(
229 b'callcommand() cannot be used after sendcommands()'
231 b'callcommand() cannot be used after sendcommands()'
230 )
232 )
231
233
232 if self._closed:
234 if self._closed:
233 raise error.ProgrammingError(
235 raise error.ProgrammingError(
234 b'callcommand() cannot be used after close()'
236 b'callcommand() cannot be used after close()'
235 )
237 )
236
238
237 # We don't need to support anything fancy. Just call the named
239 # We don't need to support anything fancy. Just call the named
238 # method on the peer and return a resolved future.
240 # method on the peer and return a resolved future.
239 fn = getattr(self._peer, pycompat.sysstr(command))
241 fn = getattr(self._peer, pycompat.sysstr(command))
240
242
241 f = pycompat.futures.Future()
243 f = pycompat.futures.Future()
242
244
243 try:
245 try:
244 result = fn(**pycompat.strkwargs(args))
246 result = fn(**pycompat.strkwargs(args))
245 except Exception:
247 except Exception:
246 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
247 else:
249 else:
248 f.set_result(result)
250 f.set_result(result)
249
251
250 return f
252 return f
251
253
252 def sendcommands(self):
254 def sendcommands(self):
253 self._sent = True
255 self._sent = True
254
256
255 def close(self):
257 def close(self):
256 self._closed = True
258 self._closed = True
257
259
258
260
259 @interfaceutil.implementer(repository.ipeercommands)
261 @interfaceutil.implementer(repository.ipeercommands)
260 class localpeer(repository.peer):
262 class localpeer(repository.peer):
261 '''peer for a local repo; reflects only the most recent API'''
263 '''peer for a local repo; reflects only the most recent API'''
262
264
263 def __init__(self, repo, caps=None):
265 def __init__(self, repo, caps=None):
264 super(localpeer, self).__init__()
266 super(localpeer, self).__init__()
265
267
266 if caps is None:
268 if caps is None:
267 caps = moderncaps.copy()
269 caps = moderncaps.copy()
268 self._repo = repo.filtered(b'served')
270 self._repo = repo.filtered(b'served')
269 self.ui = repo.ui
271 self.ui = repo.ui
270 self._caps = repo._restrictcapabilities(caps)
272 self._caps = repo._restrictcapabilities(caps)
271
273
272 # Begin of _basepeer interface.
274 # Begin of _basepeer interface.
273
275
274 def url(self):
276 def url(self):
275 return self._repo.url()
277 return self._repo.url()
276
278
277 def local(self):
279 def local(self):
278 return self._repo
280 return self._repo
279
281
280 def peer(self):
282 def peer(self):
281 return self
283 return self
282
284
283 def canpush(self):
285 def canpush(self):
284 return True
286 return True
285
287
286 def close(self):
288 def close(self):
287 self._repo.close()
289 self._repo.close()
288
290
289 # End of _basepeer interface.
291 # End of _basepeer interface.
290
292
291 # Begin of _basewirecommands interface.
293 # Begin of _basewirecommands interface.
292
294
293 def branchmap(self):
295 def branchmap(self):
294 return self._repo.branchmap()
296 return self._repo.branchmap()
295
297
296 def capabilities(self):
298 def capabilities(self):
297 return self._caps
299 return self._caps
298
300
299 def clonebundles(self):
301 def clonebundles(self):
300 return self._repo.tryread(b'clonebundles.manifest')
302 return self._repo.tryread(b'clonebundles.manifest')
301
303
302 def debugwireargs(self, one, two, three=None, four=None, five=None):
304 def debugwireargs(self, one, two, three=None, four=None, five=None):
303 """Used to test argument passing over the wire"""
305 """Used to test argument passing over the wire"""
304 return b"%s %s %s %s %s" % (
306 return b"%s %s %s %s %s" % (
305 one,
307 one,
306 two,
308 two,
307 pycompat.bytestr(three),
309 pycompat.bytestr(three),
308 pycompat.bytestr(four),
310 pycompat.bytestr(four),
309 pycompat.bytestr(five),
311 pycompat.bytestr(five),
310 )
312 )
311
313
312 def getbundle(
314 def getbundle(
313 self, source, heads=None, common=None, bundlecaps=None, **kwargs
315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
314 ):
316 ):
315 chunks = exchange.getbundlechunks(
317 chunks = exchange.getbundlechunks(
316 self._repo,
318 self._repo,
317 source,
319 source,
318 heads=heads,
320 heads=heads,
319 common=common,
321 common=common,
320 bundlecaps=bundlecaps,
322 bundlecaps=bundlecaps,
321 **kwargs
323 **kwargs
322 )[1]
324 )[1]
323 cb = util.chunkbuffer(chunks)
325 cb = util.chunkbuffer(chunks)
324
326
325 if exchange.bundle2requested(bundlecaps):
327 if exchange.bundle2requested(bundlecaps):
326 # When requesting a bundle2, getbundle returns a stream to make the
328 # When requesting a bundle2, getbundle returns a stream to make the
327 # wire level function happier. We need to build a proper object
329 # wire level function happier. We need to build a proper object
328 # from it in local peer.
330 # from it in local peer.
329 return bundle2.getunbundler(self.ui, cb)
331 return bundle2.getunbundler(self.ui, cb)
330 else:
332 else:
331 return changegroup.getunbundler(b'01', cb, None)
333 return changegroup.getunbundler(b'01', cb, None)
332
334
333 def heads(self):
335 def heads(self):
334 return self._repo.heads()
336 return self._repo.heads()
335
337
336 def known(self, nodes):
338 def known(self, nodes):
337 return self._repo.known(nodes)
339 return self._repo.known(nodes)
338
340
339 def listkeys(self, namespace):
341 def listkeys(self, namespace):
340 return self._repo.listkeys(namespace)
342 return self._repo.listkeys(namespace)
341
343
342 def lookup(self, key):
344 def lookup(self, key):
343 return self._repo.lookup(key)
345 return self._repo.lookup(key)
344
346
345 def pushkey(self, namespace, key, old, new):
347 def pushkey(self, namespace, key, old, new):
346 return self._repo.pushkey(namespace, key, old, new)
348 return self._repo.pushkey(namespace, key, old, new)
347
349
348 def stream_out(self):
350 def stream_out(self):
349 raise error.Abort(_(b'cannot perform stream clone against local peer'))
351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
350
352
351 def unbundle(self, bundle, heads, url):
353 def unbundle(self, bundle, heads, url):
352 """apply a bundle on a repo
354 """apply a bundle on a repo
353
355
354 This function handles the repo locking itself."""
356 This function handles the repo locking itself."""
355 try:
357 try:
356 try:
358 try:
357 bundle = exchange.readbundle(self.ui, bundle, None)
359 bundle = exchange.readbundle(self.ui, bundle, None)
358 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
359 if util.safehasattr(ret, b'getchunks'):
361 if util.safehasattr(ret, b'getchunks'):
360 # This is a bundle20 object, turn it into an unbundler.
362 # This is a bundle20 object, turn it into an unbundler.
361 # This little dance should be dropped eventually when the
363 # This little dance should be dropped eventually when the
362 # API is finally improved.
364 # API is finally improved.
363 stream = util.chunkbuffer(ret.getchunks())
365 stream = util.chunkbuffer(ret.getchunks())
364 ret = bundle2.getunbundler(self.ui, stream)
366 ret = bundle2.getunbundler(self.ui, stream)
365 return ret
367 return ret
366 except Exception as exc:
368 except Exception as exc:
367 # If the exception contains output salvaged from a bundle2
369 # If the exception contains output salvaged from a bundle2
368 # reply, we need to make sure it is printed before continuing
370 # reply, we need to make sure it is printed before continuing
369 # to fail. So we build a bundle2 with such output and consume
371 # to fail. So we build a bundle2 with such output and consume
370 # it directly.
372 # it directly.
371 #
373 #
372 # This is not very elegant but allows a "simple" solution for
374 # This is not very elegant but allows a "simple" solution for
373 # issue4594
375 # issue4594
374 output = getattr(exc, '_bundle2salvagedoutput', ())
376 output = getattr(exc, '_bundle2salvagedoutput', ())
375 if output:
377 if output:
376 bundler = bundle2.bundle20(self._repo.ui)
378 bundler = bundle2.bundle20(self._repo.ui)
377 for out in output:
379 for out in output:
378 bundler.addpart(out)
380 bundler.addpart(out)
379 stream = util.chunkbuffer(bundler.getchunks())
381 stream = util.chunkbuffer(bundler.getchunks())
380 b = bundle2.getunbundler(self.ui, stream)
382 b = bundle2.getunbundler(self.ui, stream)
381 bundle2.processbundle(self._repo, b)
383 bundle2.processbundle(self._repo, b)
382 raise
384 raise
383 except error.PushRaced as exc:
385 except error.PushRaced as exc:
384 raise error.ResponseError(
386 raise error.ResponseError(
385 _(b'push failed:'), stringutil.forcebytestr(exc)
387 _(b'push failed:'), stringutil.forcebytestr(exc)
386 )
388 )
387
389
388 # End of _basewirecommands interface.
390 # End of _basewirecommands interface.
389
391
390 # Begin of peer interface.
392 # Begin of peer interface.
391
393
392 def commandexecutor(self):
394 def commandexecutor(self):
393 return localcommandexecutor(self)
395 return localcommandexecutor(self)
394
396
395 # End of peer interface.
397 # End of peer interface.
396
398
397
399
398 @interfaceutil.implementer(repository.ipeerlegacycommands)
400 @interfaceutil.implementer(repository.ipeerlegacycommands)
399 class locallegacypeer(localpeer):
401 class locallegacypeer(localpeer):
400 '''peer extension which implements legacy methods too; used for tests with
402 '''peer extension which implements legacy methods too; used for tests with
401 restricted capabilities'''
403 restricted capabilities'''
402
404
403 def __init__(self, repo):
405 def __init__(self, repo):
404 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
405
407
406 # Begin of baselegacywirecommands interface.
408 # Begin of baselegacywirecommands interface.
407
409
408 def between(self, pairs):
410 def between(self, pairs):
409 return self._repo.between(pairs)
411 return self._repo.between(pairs)
410
412
411 def branches(self, nodes):
413 def branches(self, nodes):
412 return self._repo.branches(nodes)
414 return self._repo.branches(nodes)
413
415
414 def changegroup(self, nodes, source):
416 def changegroup(self, nodes, source):
415 outgoing = discovery.outgoing(
417 outgoing = discovery.outgoing(
416 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
417 )
419 )
418 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
419
421
420 def changegroupsubset(self, bases, heads, source):
422 def changegroupsubset(self, bases, heads, source):
421 outgoing = discovery.outgoing(
423 outgoing = discovery.outgoing(
422 self._repo, missingroots=bases, ancestorsof=heads
424 self._repo, missingroots=bases, ancestorsof=heads
423 )
425 )
424 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
425
427
426 # End of baselegacywirecommands interface.
428 # End of baselegacywirecommands interface.
427
429
428
430
429 # Functions receiving (ui, features) that extensions can register to impact
431 # Functions receiving (ui, features) that extensions can register to impact
430 # the ability to load repositories with custom requirements. Only
432 # the ability to load repositories with custom requirements. Only
431 # functions defined in loaded extensions are called.
433 # functions defined in loaded extensions are called.
432 #
434 #
433 # The function receives a set of requirement strings that the repository
435 # The function receives a set of requirement strings that the repository
434 # is capable of opening. Functions will typically add elements to the
436 # is capable of opening. Functions will typically add elements to the
435 # set to reflect that the extension knows how to handle that requirements.
437 # set to reflect that the extension knows how to handle that requirements.
436 featuresetupfuncs = set()
438 featuresetupfuncs = set()
437
439
438
440
439 def _getsharedvfs(hgvfs, requirements):
441 def _getsharedvfs(hgvfs, requirements):
440 """ returns the vfs object pointing to root of shared source
442 """ returns the vfs object pointing to root of shared source
441 repo for a shared repository
443 repo for a shared repository
442
444
443 hgvfs is vfs pointing at .hg/ of current repo (shared one)
445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
444 requirements is a set of requirements of current repo (shared one)
446 requirements is a set of requirements of current repo (shared one)
445 """
447 """
446 # The ``shared`` or ``relshared`` requirements indicate the
448 # The ``shared`` or ``relshared`` requirements indicate the
447 # store lives in the path contained in the ``.hg/sharedpath`` file.
449 # store lives in the path contained in the ``.hg/sharedpath`` file.
448 # This is an absolute path for ``shared`` and relative to
450 # This is an absolute path for ``shared`` and relative to
449 # ``.hg/`` for ``relshared``.
451 # ``.hg/`` for ``relshared``.
450 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
451 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
452 sharedpath = hgvfs.join(sharedpath)
454 sharedpath = hgvfs.join(sharedpath)
453
455
454 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
455
457
456 if not sharedvfs.exists():
458 if not sharedvfs.exists():
457 raise error.RepoError(
459 raise error.RepoError(
458 _(b'.hg/sharedpath points to nonexistent directory %s')
460 _(b'.hg/sharedpath points to nonexistent directory %s')
459 % sharedvfs.base
461 % sharedvfs.base
460 )
462 )
461 return sharedvfs
463 return sharedvfs
462
464
463
465
464 def _readrequires(vfs, allowmissing):
466 def _readrequires(vfs, allowmissing):
465 """ reads the require file present at root of this vfs
467 """ reads the require file present at root of this vfs
466 and return a set of requirements
468 and return a set of requirements
467
469
468 If allowmissing is True, we suppress ENOENT if raised"""
470 If allowmissing is True, we suppress ENOENT if raised"""
469 # requires file contains a newline-delimited list of
471 # requires file contains a newline-delimited list of
470 # features/capabilities the opener (us) must have in order to use
472 # features/capabilities the opener (us) must have in order to use
471 # the repository. This file was introduced in Mercurial 0.9.2,
473 # the repository. This file was introduced in Mercurial 0.9.2,
472 # which means very old repositories may not have one. We assume
474 # which means very old repositories may not have one. We assume
473 # a missing file translates to no requirements.
475 # a missing file translates to no requirements.
474 try:
476 try:
475 requirements = set(vfs.read(b'requires').splitlines())
477 requirements = set(vfs.read(b'requires').splitlines())
476 except IOError as e:
478 except IOError as e:
477 if not (allowmissing and e.errno == errno.ENOENT):
479 if not (allowmissing and e.errno == errno.ENOENT):
478 raise
480 raise
479 requirements = set()
481 requirements = set()
480 return requirements
482 return requirements
481
483
482
484
483 def makelocalrepository(baseui, path, intents=None):
485 def makelocalrepository(baseui, path, intents=None):
484 """Create a local repository object.
486 """Create a local repository object.
485
487
486 Given arguments needed to construct a local repository, this function
488 Given arguments needed to construct a local repository, this function
487 performs various early repository loading functionality (such as
489 performs various early repository loading functionality (such as
488 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
489 the repository can be opened, derives a type suitable for representing
491 the repository can be opened, derives a type suitable for representing
490 that repository, and returns an instance of it.
492 that repository, and returns an instance of it.
491
493
492 The returned object conforms to the ``repository.completelocalrepository``
494 The returned object conforms to the ``repository.completelocalrepository``
493 interface.
495 interface.
494
496
495 The repository type is derived by calling a series of factory functions
497 The repository type is derived by calling a series of factory functions
496 for each aspect/interface of the final repository. These are defined by
498 for each aspect/interface of the final repository. These are defined by
497 ``REPO_INTERFACES``.
499 ``REPO_INTERFACES``.
498
500
499 Each factory function is called to produce a type implementing a specific
501 Each factory function is called to produce a type implementing a specific
500 interface. The cumulative list of returned types will be combined into a
502 interface. The cumulative list of returned types will be combined into a
501 new type and that type will be instantiated to represent the local
503 new type and that type will be instantiated to represent the local
502 repository.
504 repository.
503
505
504 The factory functions each receive various state that may be consulted
506 The factory functions each receive various state that may be consulted
505 as part of deriving a type.
507 as part of deriving a type.
506
508
507 Extensions should wrap these factory functions to customize repository type
509 Extensions should wrap these factory functions to customize repository type
508 creation. Note that an extension's wrapped function may be called even if
510 creation. Note that an extension's wrapped function may be called even if
509 that extension is not loaded for the repo being constructed. Extensions
511 that extension is not loaded for the repo being constructed. Extensions
510 should check if their ``__name__`` appears in the
512 should check if their ``__name__`` appears in the
511 ``extensionmodulenames`` set passed to the factory function and no-op if
513 ``extensionmodulenames`` set passed to the factory function and no-op if
512 not.
514 not.
513 """
515 """
514 ui = baseui.copy()
516 ui = baseui.copy()
515 # Prevent copying repo configuration.
517 # Prevent copying repo configuration.
516 ui.copy = baseui.copy
518 ui.copy = baseui.copy
517
519
518 # Working directory VFS rooted at repository root.
520 # Working directory VFS rooted at repository root.
519 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
520
522
521 # Main VFS for .hg/ directory.
523 # Main VFS for .hg/ directory.
522 hgpath = wdirvfs.join(b'.hg')
524 hgpath = wdirvfs.join(b'.hg')
523 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
524 # Whether this repository is shared one or not
526 # Whether this repository is shared one or not
525 shared = False
527 shared = False
526 # If this repository is shared, vfs pointing to shared repo
528 # If this repository is shared, vfs pointing to shared repo
527 sharedvfs = None
529 sharedvfs = None
528
530
529 # The .hg/ path should exist and should be a directory. All other
531 # The .hg/ path should exist and should be a directory. All other
530 # cases are errors.
532 # cases are errors.
531 if not hgvfs.isdir():
533 if not hgvfs.isdir():
532 try:
534 try:
533 hgvfs.stat()
535 hgvfs.stat()
534 except OSError as e:
536 except OSError as e:
535 if e.errno != errno.ENOENT:
537 if e.errno != errno.ENOENT:
536 raise
538 raise
537 except ValueError as e:
539 except ValueError as e:
538 # Can be raised on Python 3.8 when path is invalid.
540 # Can be raised on Python 3.8 when path is invalid.
539 raise error.Abort(
541 raise error.Abort(
540 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
541 )
543 )
542
544
543 raise error.RepoError(_(b'repository %s not found') % path)
545 raise error.RepoError(_(b'repository %s not found') % path)
544
546
545 requirements = _readrequires(hgvfs, True)
547 requirements = _readrequires(hgvfs, True)
546
548
547 # The .hg/hgrc file may load extensions or contain config options
549 # The .hg/hgrc file may load extensions or contain config options
548 # that influence repository construction. Attempt to load it and
550 # that influence repository construction. Attempt to load it and
549 # process any new extensions that it may have pulled in.
551 # process any new extensions that it may have pulled in.
550 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
552 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
551 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
553 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
552 extensions.loadall(ui)
554 extensions.loadall(ui)
553 extensions.populateui(ui)
555 extensions.populateui(ui)
554
556
555 # Set of module names of extensions loaded for this repository.
557 # Set of module names of extensions loaded for this repository.
556 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
558 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
557
559
558 supportedrequirements = gathersupportedrequirements(ui)
560 supportedrequirements = gathersupportedrequirements(ui)
559
561
560 # We first validate the requirements are known.
562 # We first validate the requirements are known.
561 ensurerequirementsrecognized(requirements, supportedrequirements)
563 ensurerequirementsrecognized(requirements, supportedrequirements)
562
564
563 # Then we validate that the known set is reasonable to use together.
565 # Then we validate that the known set is reasonable to use together.
564 ensurerequirementscompatible(ui, requirements)
566 ensurerequirementscompatible(ui, requirements)
565
567
566 # TODO there are unhandled edge cases related to opening repositories with
568 # TODO there are unhandled edge cases related to opening repositories with
567 # shared storage. If storage is shared, we should also test for requirements
569 # shared storage. If storage is shared, we should also test for requirements
568 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
570 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
569 # that repo, as that repo may load extensions needed to open it. This is a
571 # that repo, as that repo may load extensions needed to open it. This is a
570 # bit complicated because we don't want the other hgrc to overwrite settings
572 # bit complicated because we don't want the other hgrc to overwrite settings
571 # in this hgrc.
573 # in this hgrc.
572 #
574 #
573 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
575 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
574 # file when sharing repos. But if a requirement is added after the share is
576 # file when sharing repos. But if a requirement is added after the share is
575 # performed, thereby introducing a new requirement for the opener, we may
577 # performed, thereby introducing a new requirement for the opener, we may
576 # will not see that and could encounter a run-time error interacting with
578 # will not see that and could encounter a run-time error interacting with
577 # that shared store since it has an unknown-to-us requirement.
579 # that shared store since it has an unknown-to-us requirement.
578
580
579 # At this point, we know we should be capable of opening the repository.
581 # At this point, we know we should be capable of opening the repository.
580 # Now get on with doing that.
582 # Now get on with doing that.
581
583
582 features = set()
584 features = set()
583
585
584 # The "store" part of the repository holds versioned data. How it is
586 # The "store" part of the repository holds versioned data. How it is
585 # accessed is determined by various requirements. If `shared` or
587 # accessed is determined by various requirements. If `shared` or
586 # `relshared` requirements are present, this indicates current repository
588 # `relshared` requirements are present, this indicates current repository
587 # is a share and store exists in path mentioned in `.hg/sharedpath`
589 # is a share and store exists in path mentioned in `.hg/sharedpath`
588 shared = (
590 shared = (
589 requirementsmod.SHARED_REQUIREMENT in requirements
591 requirementsmod.SHARED_REQUIREMENT in requirements
590 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
592 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
591 )
593 )
592 if shared:
594 if shared:
593 sharedvfs = _getsharedvfs(hgvfs, requirements)
595 sharedvfs = _getsharedvfs(hgvfs, requirements)
594 storebasepath = sharedvfs.base
596 storebasepath = sharedvfs.base
595 cachepath = sharedvfs.join(b'cache')
597 cachepath = sharedvfs.join(b'cache')
596 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
598 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
597 else:
599 else:
598 storebasepath = hgvfs.base
600 storebasepath = hgvfs.base
599 cachepath = hgvfs.join(b'cache')
601 cachepath = hgvfs.join(b'cache')
600 wcachepath = hgvfs.join(b'wcache')
602 wcachepath = hgvfs.join(b'wcache')
601
603
602 # The store has changed over time and the exact layout is dictated by
604 # The store has changed over time and the exact layout is dictated by
603 # requirements. The store interface abstracts differences across all
605 # requirements. The store interface abstracts differences across all
604 # of them.
606 # of them.
605 store = makestore(
607 store = makestore(
606 requirements,
608 requirements,
607 storebasepath,
609 storebasepath,
608 lambda base: vfsmod.vfs(base, cacheaudited=True),
610 lambda base: vfsmod.vfs(base, cacheaudited=True),
609 )
611 )
610 hgvfs.createmode = store.createmode
612 hgvfs.createmode = store.createmode
611
613
612 storevfs = store.vfs
614 storevfs = store.vfs
613 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
615 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
614
616
615 # The cache vfs is used to manage cache files.
617 # The cache vfs is used to manage cache files.
616 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
618 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
617 cachevfs.createmode = store.createmode
619 cachevfs.createmode = store.createmode
618 # The cache vfs is used to manage cache files related to the working copy
620 # The cache vfs is used to manage cache files related to the working copy
619 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
621 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
620 wcachevfs.createmode = store.createmode
622 wcachevfs.createmode = store.createmode
621
623
622 # Now resolve the type for the repository object. We do this by repeatedly
624 # Now resolve the type for the repository object. We do this by repeatedly
623 # calling a factory function to produces types for specific aspects of the
625 # calling a factory function to produces types for specific aspects of the
624 # repo's operation. The aggregate returned types are used as base classes
626 # repo's operation. The aggregate returned types are used as base classes
625 # for a dynamically-derived type, which will represent our new repository.
627 # for a dynamically-derived type, which will represent our new repository.
626
628
627 bases = []
629 bases = []
628 extrastate = {}
630 extrastate = {}
629
631
630 for iface, fn in REPO_INTERFACES:
632 for iface, fn in REPO_INTERFACES:
631 # We pass all potentially useful state to give extensions tons of
633 # We pass all potentially useful state to give extensions tons of
632 # flexibility.
634 # flexibility.
633 typ = fn()(
635 typ = fn()(
634 ui=ui,
636 ui=ui,
635 intents=intents,
637 intents=intents,
636 requirements=requirements,
638 requirements=requirements,
637 features=features,
639 features=features,
638 wdirvfs=wdirvfs,
640 wdirvfs=wdirvfs,
639 hgvfs=hgvfs,
641 hgvfs=hgvfs,
640 store=store,
642 store=store,
641 storevfs=storevfs,
643 storevfs=storevfs,
642 storeoptions=storevfs.options,
644 storeoptions=storevfs.options,
643 cachevfs=cachevfs,
645 cachevfs=cachevfs,
644 wcachevfs=wcachevfs,
646 wcachevfs=wcachevfs,
645 extensionmodulenames=extensionmodulenames,
647 extensionmodulenames=extensionmodulenames,
646 extrastate=extrastate,
648 extrastate=extrastate,
647 baseclasses=bases,
649 baseclasses=bases,
648 )
650 )
649
651
650 if not isinstance(typ, type):
652 if not isinstance(typ, type):
651 raise error.ProgrammingError(
653 raise error.ProgrammingError(
652 b'unable to construct type for %s' % iface
654 b'unable to construct type for %s' % iface
653 )
655 )
654
656
655 bases.append(typ)
657 bases.append(typ)
656
658
657 # type() allows you to use characters in type names that wouldn't be
659 # type() allows you to use characters in type names that wouldn't be
658 # recognized as Python symbols in source code. We abuse that to add
660 # recognized as Python symbols in source code. We abuse that to add
659 # rich information about our constructed repo.
661 # rich information about our constructed repo.
660 name = pycompat.sysstr(
662 name = pycompat.sysstr(
661 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
663 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
662 )
664 )
663
665
664 cls = type(name, tuple(bases), {})
666 cls = type(name, tuple(bases), {})
665
667
666 return cls(
668 return cls(
667 baseui=baseui,
669 baseui=baseui,
668 ui=ui,
670 ui=ui,
669 origroot=path,
671 origroot=path,
670 wdirvfs=wdirvfs,
672 wdirvfs=wdirvfs,
671 hgvfs=hgvfs,
673 hgvfs=hgvfs,
672 requirements=requirements,
674 requirements=requirements,
673 supportedrequirements=supportedrequirements,
675 supportedrequirements=supportedrequirements,
674 sharedpath=storebasepath,
676 sharedpath=storebasepath,
675 store=store,
677 store=store,
676 cachevfs=cachevfs,
678 cachevfs=cachevfs,
677 wcachevfs=wcachevfs,
679 wcachevfs=wcachevfs,
678 features=features,
680 features=features,
679 intents=intents,
681 intents=intents,
680 )
682 )
681
683
682
684
683 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
685 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
684 """Load hgrc files/content into a ui instance.
686 """Load hgrc files/content into a ui instance.
685
687
686 This is called during repository opening to load any additional
688 This is called during repository opening to load any additional
687 config files or settings relevant to the current repository.
689 config files or settings relevant to the current repository.
688
690
689 Returns a bool indicating whether any additional configs were loaded.
691 Returns a bool indicating whether any additional configs were loaded.
690
692
691 Extensions should monkeypatch this function to modify how per-repo
693 Extensions should monkeypatch this function to modify how per-repo
692 configs are loaded. For example, an extension may wish to pull in
694 configs are loaded. For example, an extension may wish to pull in
693 configs from alternate files or sources.
695 configs from alternate files or sources.
694 """
696 """
695 if not rcutil.use_repo_hgrc():
697 if not rcutil.use_repo_hgrc():
696 return False
698 return False
697 try:
699 try:
698 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
700 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
699 return True
701 return True
700 except IOError:
702 except IOError:
701 return False
703 return False
702
704
703
705
704 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
706 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
705 """Perform additional actions after .hg/hgrc is loaded.
707 """Perform additional actions after .hg/hgrc is loaded.
706
708
707 This function is called during repository loading immediately after
709 This function is called during repository loading immediately after
708 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
710 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
709
711
710 The function can be used to validate configs, automatically add
712 The function can be used to validate configs, automatically add
711 options (including extensions) based on requirements, etc.
713 options (including extensions) based on requirements, etc.
712 """
714 """
713
715
714 # Map of requirements to list of extensions to load automatically when
716 # Map of requirements to list of extensions to load automatically when
715 # requirement is present.
717 # requirement is present.
716 autoextensions = {
718 autoextensions = {
717 b'git': [b'git'],
719 b'git': [b'git'],
718 b'largefiles': [b'largefiles'],
720 b'largefiles': [b'largefiles'],
719 b'lfs': [b'lfs'],
721 b'lfs': [b'lfs'],
720 }
722 }
721
723
722 for requirement, names in sorted(autoextensions.items()):
724 for requirement, names in sorted(autoextensions.items()):
723 if requirement not in requirements:
725 if requirement not in requirements:
724 continue
726 continue
725
727
726 for name in names:
728 for name in names:
727 if not ui.hasconfig(b'extensions', name):
729 if not ui.hasconfig(b'extensions', name):
728 ui.setconfig(b'extensions', name, b'', source=b'autoload')
730 ui.setconfig(b'extensions', name, b'', source=b'autoload')
729
731
730
732
731 def gathersupportedrequirements(ui):
733 def gathersupportedrequirements(ui):
732 """Determine the complete set of recognized requirements."""
734 """Determine the complete set of recognized requirements."""
733 # Start with all requirements supported by this file.
735 # Start with all requirements supported by this file.
734 supported = set(localrepository._basesupported)
736 supported = set(localrepository._basesupported)
735
737
736 # Execute ``featuresetupfuncs`` entries if they belong to an extension
738 # Execute ``featuresetupfuncs`` entries if they belong to an extension
737 # relevant to this ui instance.
739 # relevant to this ui instance.
738 modules = {m.__name__ for n, m in extensions.extensions(ui)}
740 modules = {m.__name__ for n, m in extensions.extensions(ui)}
739
741
740 for fn in featuresetupfuncs:
742 for fn in featuresetupfuncs:
741 if fn.__module__ in modules:
743 if fn.__module__ in modules:
742 fn(ui, supported)
744 fn(ui, supported)
743
745
744 # Add derived requirements from registered compression engines.
746 # Add derived requirements from registered compression engines.
745 for name in util.compengines:
747 for name in util.compengines:
746 engine = util.compengines[name]
748 engine = util.compengines[name]
747 if engine.available() and engine.revlogheader():
749 if engine.available() and engine.revlogheader():
748 supported.add(b'exp-compression-%s' % name)
750 supported.add(b'exp-compression-%s' % name)
749 if engine.name() == b'zstd':
751 if engine.name() == b'zstd':
750 supported.add(b'revlog-compression-zstd')
752 supported.add(b'revlog-compression-zstd')
751
753
752 return supported
754 return supported
753
755
754
756
755 def ensurerequirementsrecognized(requirements, supported):
757 def ensurerequirementsrecognized(requirements, supported):
756 """Validate that a set of local requirements is recognized.
758 """Validate that a set of local requirements is recognized.
757
759
758 Receives a set of requirements. Raises an ``error.RepoError`` if there
760 Receives a set of requirements. Raises an ``error.RepoError`` if there
759 exists any requirement in that set that currently loaded code doesn't
761 exists any requirement in that set that currently loaded code doesn't
760 recognize.
762 recognize.
761
763
762 Returns a set of supported requirements.
764 Returns a set of supported requirements.
763 """
765 """
764 missing = set()
766 missing = set()
765
767
766 for requirement in requirements:
768 for requirement in requirements:
767 if requirement in supported:
769 if requirement in supported:
768 continue
770 continue
769
771
770 if not requirement or not requirement[0:1].isalnum():
772 if not requirement or not requirement[0:1].isalnum():
771 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
773 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
772
774
773 missing.add(requirement)
775 missing.add(requirement)
774
776
775 if missing:
777 if missing:
776 raise error.RequirementError(
778 raise error.RequirementError(
777 _(b'repository requires features unknown to this Mercurial: %s')
779 _(b'repository requires features unknown to this Mercurial: %s')
778 % b' '.join(sorted(missing)),
780 % b' '.join(sorted(missing)),
779 hint=_(
781 hint=_(
780 b'see https://mercurial-scm.org/wiki/MissingRequirement '
782 b'see https://mercurial-scm.org/wiki/MissingRequirement '
781 b'for more information'
783 b'for more information'
782 ),
784 ),
783 )
785 )
784
786
785
787
786 def ensurerequirementscompatible(ui, requirements):
788 def ensurerequirementscompatible(ui, requirements):
787 """Validates that a set of recognized requirements is mutually compatible.
789 """Validates that a set of recognized requirements is mutually compatible.
788
790
789 Some requirements may not be compatible with others or require
791 Some requirements may not be compatible with others or require
790 config options that aren't enabled. This function is called during
792 config options that aren't enabled. This function is called during
791 repository opening to ensure that the set of requirements needed
793 repository opening to ensure that the set of requirements needed
792 to open a repository is sane and compatible with config options.
794 to open a repository is sane and compatible with config options.
793
795
794 Extensions can monkeypatch this function to perform additional
796 Extensions can monkeypatch this function to perform additional
795 checking.
797 checking.
796
798
797 ``error.RepoError`` should be raised on failure.
799 ``error.RepoError`` should be raised on failure.
798 """
800 """
799 if (
801 if (
800 requirementsmod.SPARSE_REQUIREMENT in requirements
802 requirementsmod.SPARSE_REQUIREMENT in requirements
801 and not sparse.enabled
803 and not sparse.enabled
802 ):
804 ):
803 raise error.RepoError(
805 raise error.RepoError(
804 _(
806 _(
805 b'repository is using sparse feature but '
807 b'repository is using sparse feature but '
806 b'sparse is not enabled; enable the '
808 b'sparse is not enabled; enable the '
807 b'"sparse" extensions to access'
809 b'"sparse" extensions to access'
808 )
810 )
809 )
811 )
810
812
811
813
812 def makestore(requirements, path, vfstype):
814 def makestore(requirements, path, vfstype):
813 """Construct a storage object for a repository."""
815 """Construct a storage object for a repository."""
814 if b'store' in requirements:
816 if b'store' in requirements:
815 if b'fncache' in requirements:
817 if b'fncache' in requirements:
816 return storemod.fncachestore(
818 return storemod.fncachestore(
817 path, vfstype, b'dotencode' in requirements
819 path, vfstype, b'dotencode' in requirements
818 )
820 )
819
821
820 return storemod.encodedstore(path, vfstype)
822 return storemod.encodedstore(path, vfstype)
821
823
822 return storemod.basicstore(path, vfstype)
824 return storemod.basicstore(path, vfstype)
823
825
824
826
825 def resolvestorevfsoptions(ui, requirements, features):
827 def resolvestorevfsoptions(ui, requirements, features):
826 """Resolve the options to pass to the store vfs opener.
828 """Resolve the options to pass to the store vfs opener.
827
829
828 The returned dict is used to influence behavior of the storage layer.
830 The returned dict is used to influence behavior of the storage layer.
829 """
831 """
830 options = {}
832 options = {}
831
833
832 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
834 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
833 options[b'treemanifest'] = True
835 options[b'treemanifest'] = True
834
836
835 # experimental config: format.manifestcachesize
837 # experimental config: format.manifestcachesize
836 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
838 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
837 if manifestcachesize is not None:
839 if manifestcachesize is not None:
838 options[b'manifestcachesize'] = manifestcachesize
840 options[b'manifestcachesize'] = manifestcachesize
839
841
840 # In the absence of another requirement superseding a revlog-related
842 # In the absence of another requirement superseding a revlog-related
841 # requirement, we have to assume the repo is using revlog version 0.
843 # requirement, we have to assume the repo is using revlog version 0.
842 # This revlog format is super old and we don't bother trying to parse
844 # This revlog format is super old and we don't bother trying to parse
843 # opener options for it because those options wouldn't do anything
845 # opener options for it because those options wouldn't do anything
844 # meaningful on such old repos.
846 # meaningful on such old repos.
845 if (
847 if (
846 b'revlogv1' in requirements
848 b'revlogv1' in requirements
847 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
849 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
848 ):
850 ):
849 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
851 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
850 else: # explicitly mark repo as using revlogv0
852 else: # explicitly mark repo as using revlogv0
851 options[b'revlogv0'] = True
853 options[b'revlogv0'] = True
852
854
853 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
855 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
854 options[b'copies-storage'] = b'changeset-sidedata'
856 options[b'copies-storage'] = b'changeset-sidedata'
855 else:
857 else:
856 writecopiesto = ui.config(b'experimental', b'copies.write-to')
858 writecopiesto = ui.config(b'experimental', b'copies.write-to')
857 copiesextramode = (b'changeset-only', b'compatibility')
859 copiesextramode = (b'changeset-only', b'compatibility')
858 if writecopiesto in copiesextramode:
860 if writecopiesto in copiesextramode:
859 options[b'copies-storage'] = b'extra'
861 options[b'copies-storage'] = b'extra'
860
862
861 return options
863 return options
862
864
863
865
864 def resolverevlogstorevfsoptions(ui, requirements, features):
866 def resolverevlogstorevfsoptions(ui, requirements, features):
865 """Resolve opener options specific to revlogs."""
867 """Resolve opener options specific to revlogs."""
866
868
867 options = {}
869 options = {}
868 options[b'flagprocessors'] = {}
870 options[b'flagprocessors'] = {}
869
871
870 if b'revlogv1' in requirements:
872 if b'revlogv1' in requirements:
871 options[b'revlogv1'] = True
873 options[b'revlogv1'] = True
872 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
874 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
873 options[b'revlogv2'] = True
875 options[b'revlogv2'] = True
874
876
875 if b'generaldelta' in requirements:
877 if b'generaldelta' in requirements:
876 options[b'generaldelta'] = True
878 options[b'generaldelta'] = True
877
879
878 # experimental config: format.chunkcachesize
880 # experimental config: format.chunkcachesize
879 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
881 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
880 if chunkcachesize is not None:
882 if chunkcachesize is not None:
881 options[b'chunkcachesize'] = chunkcachesize
883 options[b'chunkcachesize'] = chunkcachesize
882
884
883 deltabothparents = ui.configbool(
885 deltabothparents = ui.configbool(
884 b'storage', b'revlog.optimize-delta-parent-choice'
886 b'storage', b'revlog.optimize-delta-parent-choice'
885 )
887 )
886 options[b'deltabothparents'] = deltabothparents
888 options[b'deltabothparents'] = deltabothparents
887
889
888 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
890 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
889 lazydeltabase = False
891 lazydeltabase = False
890 if lazydelta:
892 if lazydelta:
891 lazydeltabase = ui.configbool(
893 lazydeltabase = ui.configbool(
892 b'storage', b'revlog.reuse-external-delta-parent'
894 b'storage', b'revlog.reuse-external-delta-parent'
893 )
895 )
894 if lazydeltabase is None:
896 if lazydeltabase is None:
895 lazydeltabase = not scmutil.gddeltaconfig(ui)
897 lazydeltabase = not scmutil.gddeltaconfig(ui)
896 options[b'lazydelta'] = lazydelta
898 options[b'lazydelta'] = lazydelta
897 options[b'lazydeltabase'] = lazydeltabase
899 options[b'lazydeltabase'] = lazydeltabase
898
900
899 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
901 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
900 if 0 <= chainspan:
902 if 0 <= chainspan:
901 options[b'maxdeltachainspan'] = chainspan
903 options[b'maxdeltachainspan'] = chainspan
902
904
903 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
905 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
904 if mmapindexthreshold is not None:
906 if mmapindexthreshold is not None:
905 options[b'mmapindexthreshold'] = mmapindexthreshold
907 options[b'mmapindexthreshold'] = mmapindexthreshold
906
908
907 withsparseread = ui.configbool(b'experimental', b'sparse-read')
909 withsparseread = ui.configbool(b'experimental', b'sparse-read')
908 srdensitythres = float(
910 srdensitythres = float(
909 ui.config(b'experimental', b'sparse-read.density-threshold')
911 ui.config(b'experimental', b'sparse-read.density-threshold')
910 )
912 )
911 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
913 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
912 options[b'with-sparse-read'] = withsparseread
914 options[b'with-sparse-read'] = withsparseread
913 options[b'sparse-read-density-threshold'] = srdensitythres
915 options[b'sparse-read-density-threshold'] = srdensitythres
914 options[b'sparse-read-min-gap-size'] = srmingapsize
916 options[b'sparse-read-min-gap-size'] = srmingapsize
915
917
916 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
918 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
917 options[b'sparse-revlog'] = sparserevlog
919 options[b'sparse-revlog'] = sparserevlog
918 if sparserevlog:
920 if sparserevlog:
919 options[b'generaldelta'] = True
921 options[b'generaldelta'] = True
920
922
921 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
923 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
922 options[b'side-data'] = sidedata
924 options[b'side-data'] = sidedata
923
925
924 maxchainlen = None
926 maxchainlen = None
925 if sparserevlog:
927 if sparserevlog:
926 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
928 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
927 # experimental config: format.maxchainlen
929 # experimental config: format.maxchainlen
928 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
930 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
929 if maxchainlen is not None:
931 if maxchainlen is not None:
930 options[b'maxchainlen'] = maxchainlen
932 options[b'maxchainlen'] = maxchainlen
931
933
932 for r in requirements:
934 for r in requirements:
933 # we allow multiple compression engine requirement to co-exist because
935 # we allow multiple compression engine requirement to co-exist because
934 # strickly speaking, revlog seems to support mixed compression style.
936 # strickly speaking, revlog seems to support mixed compression style.
935 #
937 #
936 # The compression used for new entries will be "the last one"
938 # The compression used for new entries will be "the last one"
937 prefix = r.startswith
939 prefix = r.startswith
938 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
940 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
939 options[b'compengine'] = r.split(b'-', 2)[2]
941 options[b'compengine'] = r.split(b'-', 2)[2]
940
942
941 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
943 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
942 if options[b'zlib.level'] is not None:
944 if options[b'zlib.level'] is not None:
943 if not (0 <= options[b'zlib.level'] <= 9):
945 if not (0 <= options[b'zlib.level'] <= 9):
944 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
946 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
945 raise error.Abort(msg % options[b'zlib.level'])
947 raise error.Abort(msg % options[b'zlib.level'])
946 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
948 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
947 if options[b'zstd.level'] is not None:
949 if options[b'zstd.level'] is not None:
948 if not (0 <= options[b'zstd.level'] <= 22):
950 if not (0 <= options[b'zstd.level'] <= 22):
949 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
951 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
950 raise error.Abort(msg % options[b'zstd.level'])
952 raise error.Abort(msg % options[b'zstd.level'])
951
953
952 if requirementsmod.NARROW_REQUIREMENT in requirements:
954 if requirementsmod.NARROW_REQUIREMENT in requirements:
953 options[b'enableellipsis'] = True
955 options[b'enableellipsis'] = True
954
956
955 if ui.configbool(b'experimental', b'rust.index'):
957 if ui.configbool(b'experimental', b'rust.index'):
956 options[b'rust.index'] = True
958 options[b'rust.index'] = True
957 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
959 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
958 options[b'persistent-nodemap'] = True
960 options[b'persistent-nodemap'] = True
959 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
961 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
960 options[b'persistent-nodemap.mmap'] = True
962 options[b'persistent-nodemap.mmap'] = True
961 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
963 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
962 options[b'persistent-nodemap.mode'] = epnm
964 options[b'persistent-nodemap.mode'] = epnm
963 if ui.configbool(b'devel', b'persistent-nodemap'):
965 if ui.configbool(b'devel', b'persistent-nodemap'):
964 options[b'devel-force-nodemap'] = True
966 options[b'devel-force-nodemap'] = True
965
967
966 return options
968 return options
967
969
968
970
969 def makemain(**kwargs):
971 def makemain(**kwargs):
970 """Produce a type conforming to ``ilocalrepositorymain``."""
972 """Produce a type conforming to ``ilocalrepositorymain``."""
971 return localrepository
973 return localrepository
972
974
973
975
974 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
976 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
975 class revlogfilestorage(object):
977 class revlogfilestorage(object):
976 """File storage when using revlogs."""
978 """File storage when using revlogs."""
977
979
978 def file(self, path):
980 def file(self, path):
979 if path[0] == b'/':
981 if path[0] == b'/':
980 path = path[1:]
982 path = path[1:]
981
983
982 return filelog.filelog(self.svfs, path)
984 return filelog.filelog(self.svfs, path)
983
985
984
986
985 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
987 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
986 class revlognarrowfilestorage(object):
988 class revlognarrowfilestorage(object):
987 """File storage when using revlogs and narrow files."""
989 """File storage when using revlogs and narrow files."""
988
990
989 def file(self, path):
991 def file(self, path):
990 if path[0] == b'/':
992 if path[0] == b'/':
991 path = path[1:]
993 path = path[1:]
992
994
993 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
995 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
994
996
995
997
996 def makefilestorage(requirements, features, **kwargs):
998 def makefilestorage(requirements, features, **kwargs):
997 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
999 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
998 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1000 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
999 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1001 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1000
1002
1001 if requirementsmod.NARROW_REQUIREMENT in requirements:
1003 if requirementsmod.NARROW_REQUIREMENT in requirements:
1002 return revlognarrowfilestorage
1004 return revlognarrowfilestorage
1003 else:
1005 else:
1004 return revlogfilestorage
1006 return revlogfilestorage
1005
1007
1006
1008
1007 # List of repository interfaces and factory functions for them. Each
1009 # List of repository interfaces and factory functions for them. Each
1008 # will be called in order during ``makelocalrepository()`` to iteratively
1010 # will be called in order during ``makelocalrepository()`` to iteratively
1009 # derive the final type for a local repository instance. We capture the
1011 # derive the final type for a local repository instance. We capture the
1010 # function as a lambda so we don't hold a reference and the module-level
1012 # function as a lambda so we don't hold a reference and the module-level
1011 # functions can be wrapped.
1013 # functions can be wrapped.
1012 REPO_INTERFACES = [
1014 REPO_INTERFACES = [
1013 (repository.ilocalrepositorymain, lambda: makemain),
1015 (repository.ilocalrepositorymain, lambda: makemain),
1014 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1016 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1015 ]
1017 ]
1016
1018
1017
1019
1018 @interfaceutil.implementer(repository.ilocalrepositorymain)
1020 @interfaceutil.implementer(repository.ilocalrepositorymain)
1019 class localrepository(object):
1021 class localrepository(object):
1020 """Main class for representing local repositories.
1022 """Main class for representing local repositories.
1021
1023
1022 All local repositories are instances of this class.
1024 All local repositories are instances of this class.
1023
1025
1024 Constructed on its own, instances of this class are not usable as
1026 Constructed on its own, instances of this class are not usable as
1025 repository objects. To obtain a usable repository object, call
1027 repository objects. To obtain a usable repository object, call
1026 ``hg.repository()``, ``localrepo.instance()``, or
1028 ``hg.repository()``, ``localrepo.instance()``, or
1027 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1029 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1028 ``instance()`` adds support for creating new repositories.
1030 ``instance()`` adds support for creating new repositories.
1029 ``hg.repository()`` adds more extension integration, including calling
1031 ``hg.repository()`` adds more extension integration, including calling
1030 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1032 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1031 used.
1033 used.
1032 """
1034 """
1033
1035
1034 # obsolete experimental requirements:
1036 # obsolete experimental requirements:
1035 # - manifestv2: An experimental new manifest format that allowed
1037 # - manifestv2: An experimental new manifest format that allowed
1036 # for stem compression of long paths. Experiment ended up not
1038 # for stem compression of long paths. Experiment ended up not
1037 # being successful (repository sizes went up due to worse delta
1039 # being successful (repository sizes went up due to worse delta
1038 # chains), and the code was deleted in 4.6.
1040 # chains), and the code was deleted in 4.6.
1039 supportedformats = {
1041 supportedformats = {
1040 b'revlogv1',
1042 b'revlogv1',
1041 b'generaldelta',
1043 b'generaldelta',
1042 requirementsmod.TREEMANIFEST_REQUIREMENT,
1044 requirementsmod.TREEMANIFEST_REQUIREMENT,
1043 requirementsmod.COPIESSDC_REQUIREMENT,
1045 requirementsmod.COPIESSDC_REQUIREMENT,
1044 requirementsmod.REVLOGV2_REQUIREMENT,
1046 requirementsmod.REVLOGV2_REQUIREMENT,
1045 requirementsmod.SIDEDATA_REQUIREMENT,
1047 requirementsmod.SIDEDATA_REQUIREMENT,
1046 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1048 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1047 requirementsmod.NODEMAP_REQUIREMENT,
1049 requirementsmod.NODEMAP_REQUIREMENT,
1048 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1050 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1049 }
1051 }
1050 _basesupported = supportedformats | {
1052 _basesupported = supportedformats | {
1051 b'store',
1053 b'store',
1052 b'fncache',
1054 b'fncache',
1053 requirementsmod.SHARED_REQUIREMENT,
1055 requirementsmod.SHARED_REQUIREMENT,
1054 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1056 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1055 b'dotencode',
1057 b'dotencode',
1056 requirementsmod.SPARSE_REQUIREMENT,
1058 requirementsmod.SPARSE_REQUIREMENT,
1057 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1059 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1058 }
1060 }
1059
1061
1060 # list of prefix for file which can be written without 'wlock'
1062 # list of prefix for file which can be written without 'wlock'
1061 # Extensions should extend this list when needed
1063 # Extensions should extend this list when needed
1062 _wlockfreeprefix = {
1064 _wlockfreeprefix = {
1063 # We migh consider requiring 'wlock' for the next
1065 # We migh consider requiring 'wlock' for the next
1064 # two, but pretty much all the existing code assume
1066 # two, but pretty much all the existing code assume
1065 # wlock is not needed so we keep them excluded for
1067 # wlock is not needed so we keep them excluded for
1066 # now.
1068 # now.
1067 b'hgrc',
1069 b'hgrc',
1068 b'requires',
1070 b'requires',
1069 # XXX cache is a complicatged business someone
1071 # XXX cache is a complicatged business someone
1070 # should investigate this in depth at some point
1072 # should investigate this in depth at some point
1071 b'cache/',
1073 b'cache/',
1072 # XXX shouldn't be dirstate covered by the wlock?
1074 # XXX shouldn't be dirstate covered by the wlock?
1073 b'dirstate',
1075 b'dirstate',
1074 # XXX bisect was still a bit too messy at the time
1076 # XXX bisect was still a bit too messy at the time
1075 # this changeset was introduced. Someone should fix
1077 # this changeset was introduced. Someone should fix
1076 # the remainig bit and drop this line
1078 # the remainig bit and drop this line
1077 b'bisect.state',
1079 b'bisect.state',
1078 }
1080 }
1079
1081
1080 def __init__(
1082 def __init__(
1081 self,
1083 self,
1082 baseui,
1084 baseui,
1083 ui,
1085 ui,
1084 origroot,
1086 origroot,
1085 wdirvfs,
1087 wdirvfs,
1086 hgvfs,
1088 hgvfs,
1087 requirements,
1089 requirements,
1088 supportedrequirements,
1090 supportedrequirements,
1089 sharedpath,
1091 sharedpath,
1090 store,
1092 store,
1091 cachevfs,
1093 cachevfs,
1092 wcachevfs,
1094 wcachevfs,
1093 features,
1095 features,
1094 intents=None,
1096 intents=None,
1095 ):
1097 ):
1096 """Create a new local repository instance.
1098 """Create a new local repository instance.
1097
1099
1098 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1100 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1099 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1101 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1100 object.
1102 object.
1101
1103
1102 Arguments:
1104 Arguments:
1103
1105
1104 baseui
1106 baseui
1105 ``ui.ui`` instance that ``ui`` argument was based off of.
1107 ``ui.ui`` instance that ``ui`` argument was based off of.
1106
1108
1107 ui
1109 ui
1108 ``ui.ui`` instance for use by the repository.
1110 ``ui.ui`` instance for use by the repository.
1109
1111
1110 origroot
1112 origroot
1111 ``bytes`` path to working directory root of this repository.
1113 ``bytes`` path to working directory root of this repository.
1112
1114
1113 wdirvfs
1115 wdirvfs
1114 ``vfs.vfs`` rooted at the working directory.
1116 ``vfs.vfs`` rooted at the working directory.
1115
1117
1116 hgvfs
1118 hgvfs
1117 ``vfs.vfs`` rooted at .hg/
1119 ``vfs.vfs`` rooted at .hg/
1118
1120
1119 requirements
1121 requirements
1120 ``set`` of bytestrings representing repository opening requirements.
1122 ``set`` of bytestrings representing repository opening requirements.
1121
1123
1122 supportedrequirements
1124 supportedrequirements
1123 ``set`` of bytestrings representing repository requirements that we
1125 ``set`` of bytestrings representing repository requirements that we
1124 know how to open. May be a supetset of ``requirements``.
1126 know how to open. May be a supetset of ``requirements``.
1125
1127
1126 sharedpath
1128 sharedpath
1127 ``bytes`` Defining path to storage base directory. Points to a
1129 ``bytes`` Defining path to storage base directory. Points to a
1128 ``.hg/`` directory somewhere.
1130 ``.hg/`` directory somewhere.
1129
1131
1130 store
1132 store
1131 ``store.basicstore`` (or derived) instance providing access to
1133 ``store.basicstore`` (or derived) instance providing access to
1132 versioned storage.
1134 versioned storage.
1133
1135
1134 cachevfs
1136 cachevfs
1135 ``vfs.vfs`` used for cache files.
1137 ``vfs.vfs`` used for cache files.
1136
1138
1137 wcachevfs
1139 wcachevfs
1138 ``vfs.vfs`` used for cache files related to the working copy.
1140 ``vfs.vfs`` used for cache files related to the working copy.
1139
1141
1140 features
1142 features
1141 ``set`` of bytestrings defining features/capabilities of this
1143 ``set`` of bytestrings defining features/capabilities of this
1142 instance.
1144 instance.
1143
1145
1144 intents
1146 intents
1145 ``set`` of system strings indicating what this repo will be used
1147 ``set`` of system strings indicating what this repo will be used
1146 for.
1148 for.
1147 """
1149 """
1148 self.baseui = baseui
1150 self.baseui = baseui
1149 self.ui = ui
1151 self.ui = ui
1150 self.origroot = origroot
1152 self.origroot = origroot
1151 # vfs rooted at working directory.
1153 # vfs rooted at working directory.
1152 self.wvfs = wdirvfs
1154 self.wvfs = wdirvfs
1153 self.root = wdirvfs.base
1155 self.root = wdirvfs.base
1154 # vfs rooted at .hg/. Used to access most non-store paths.
1156 # vfs rooted at .hg/. Used to access most non-store paths.
1155 self.vfs = hgvfs
1157 self.vfs = hgvfs
1156 self.path = hgvfs.base
1158 self.path = hgvfs.base
1157 self.requirements = requirements
1159 self.requirements = requirements
1158 self.supported = supportedrequirements
1160 self.supported = supportedrequirements
1159 self.sharedpath = sharedpath
1161 self.sharedpath = sharedpath
1160 self.store = store
1162 self.store = store
1161 self.cachevfs = cachevfs
1163 self.cachevfs = cachevfs
1162 self.wcachevfs = wcachevfs
1164 self.wcachevfs = wcachevfs
1163 self.features = features
1165 self.features = features
1164
1166
1165 self.filtername = None
1167 self.filtername = None
1166
1168
1167 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1168 b'devel', b'check-locks'
1170 b'devel', b'check-locks'
1169 ):
1171 ):
1170 self.vfs.audit = self._getvfsward(self.vfs.audit)
1172 self.vfs.audit = self._getvfsward(self.vfs.audit)
1171 # A list of callback to shape the phase if no data were found.
1173 # A list of callback to shape the phase if no data were found.
1172 # Callback are in the form: func(repo, roots) --> processed root.
1174 # Callback are in the form: func(repo, roots) --> processed root.
1173 # This list it to be filled by extension during repo setup
1175 # This list it to be filled by extension during repo setup
1174 self._phasedefaults = []
1176 self._phasedefaults = []
1175
1177
1176 color.setup(self.ui)
1178 color.setup(self.ui)
1177
1179
1178 self.spath = self.store.path
1180 self.spath = self.store.path
1179 self.svfs = self.store.vfs
1181 self.svfs = self.store.vfs
1180 self.sjoin = self.store.join
1182 self.sjoin = self.store.join
1181 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1183 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1182 b'devel', b'check-locks'
1184 b'devel', b'check-locks'
1183 ):
1185 ):
1184 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1186 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1185 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1187 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1186 else: # standard vfs
1188 else: # standard vfs
1187 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1189 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1188
1190
1189 self._dirstatevalidatewarned = False
1191 self._dirstatevalidatewarned = False
1190
1192
1191 self._branchcaches = branchmap.BranchMapCache()
1193 self._branchcaches = branchmap.BranchMapCache()
1192 self._revbranchcache = None
1194 self._revbranchcache = None
1193 self._filterpats = {}
1195 self._filterpats = {}
1194 self._datafilters = {}
1196 self._datafilters = {}
1195 self._transref = self._lockref = self._wlockref = None
1197 self._transref = self._lockref = self._wlockref = None
1196
1198
1197 # A cache for various files under .hg/ that tracks file changes,
1199 # A cache for various files under .hg/ that tracks file changes,
1198 # (used by the filecache decorator)
1200 # (used by the filecache decorator)
1199 #
1201 #
1200 # Maps a property name to its util.filecacheentry
1202 # Maps a property name to its util.filecacheentry
1201 self._filecache = {}
1203 self._filecache = {}
1202
1204
1203 # hold sets of revision to be filtered
1205 # hold sets of revision to be filtered
1204 # should be cleared when something might have changed the filter value:
1206 # should be cleared when something might have changed the filter value:
1205 # - new changesets,
1207 # - new changesets,
1206 # - phase change,
1208 # - phase change,
1207 # - new obsolescence marker,
1209 # - new obsolescence marker,
1208 # - working directory parent change,
1210 # - working directory parent change,
1209 # - bookmark changes
1211 # - bookmark changes
1210 self.filteredrevcache = {}
1212 self.filteredrevcache = {}
1211
1213
1212 # post-dirstate-status hooks
1214 # post-dirstate-status hooks
1213 self._postdsstatus = []
1215 self._postdsstatus = []
1214
1216
1215 # generic mapping between names and nodes
1217 # generic mapping between names and nodes
1216 self.names = namespaces.namespaces()
1218 self.names = namespaces.namespaces()
1217
1219
1218 # Key to signature value.
1220 # Key to signature value.
1219 self._sparsesignaturecache = {}
1221 self._sparsesignaturecache = {}
1220 # Signature to cached matcher instance.
1222 # Signature to cached matcher instance.
1221 self._sparsematchercache = {}
1223 self._sparsematchercache = {}
1222
1224
1223 self._extrafilterid = repoview.extrafilter(ui)
1225 self._extrafilterid = repoview.extrafilter(ui)
1224
1226
1225 self.filecopiesmode = None
1227 self.filecopiesmode = None
1226 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1228 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1227 self.filecopiesmode = b'changeset-sidedata'
1229 self.filecopiesmode = b'changeset-sidedata'
1228
1230
1229 def _getvfsward(self, origfunc):
1231 def _getvfsward(self, origfunc):
1230 """build a ward for self.vfs"""
1232 """build a ward for self.vfs"""
1231 rref = weakref.ref(self)
1233 rref = weakref.ref(self)
1232
1234
1233 def checkvfs(path, mode=None):
1235 def checkvfs(path, mode=None):
1234 ret = origfunc(path, mode=mode)
1236 ret = origfunc(path, mode=mode)
1235 repo = rref()
1237 repo = rref()
1236 if (
1238 if (
1237 repo is None
1239 repo is None
1238 or not util.safehasattr(repo, b'_wlockref')
1240 or not util.safehasattr(repo, b'_wlockref')
1239 or not util.safehasattr(repo, b'_lockref')
1241 or not util.safehasattr(repo, b'_lockref')
1240 ):
1242 ):
1241 return
1243 return
1242 if mode in (None, b'r', b'rb'):
1244 if mode in (None, b'r', b'rb'):
1243 return
1245 return
1244 if path.startswith(repo.path):
1246 if path.startswith(repo.path):
1245 # truncate name relative to the repository (.hg)
1247 # truncate name relative to the repository (.hg)
1246 path = path[len(repo.path) + 1 :]
1248 path = path[len(repo.path) + 1 :]
1247 if path.startswith(b'cache/'):
1249 if path.startswith(b'cache/'):
1248 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1250 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1249 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1251 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1250 # path prefixes covered by 'lock'
1252 # path prefixes covered by 'lock'
1251 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1253 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1252 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1254 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1253 if repo._currentlock(repo._lockref) is None:
1255 if repo._currentlock(repo._lockref) is None:
1254 repo.ui.develwarn(
1256 repo.ui.develwarn(
1255 b'write with no lock: "%s"' % path,
1257 b'write with no lock: "%s"' % path,
1256 stacklevel=3,
1258 stacklevel=3,
1257 config=b'check-locks',
1259 config=b'check-locks',
1258 )
1260 )
1259 elif repo._currentlock(repo._wlockref) is None:
1261 elif repo._currentlock(repo._wlockref) is None:
1260 # rest of vfs files are covered by 'wlock'
1262 # rest of vfs files are covered by 'wlock'
1261 #
1263 #
1262 # exclude special files
1264 # exclude special files
1263 for prefix in self._wlockfreeprefix:
1265 for prefix in self._wlockfreeprefix:
1264 if path.startswith(prefix):
1266 if path.startswith(prefix):
1265 return
1267 return
1266 repo.ui.develwarn(
1268 repo.ui.develwarn(
1267 b'write with no wlock: "%s"' % path,
1269 b'write with no wlock: "%s"' % path,
1268 stacklevel=3,
1270 stacklevel=3,
1269 config=b'check-locks',
1271 config=b'check-locks',
1270 )
1272 )
1271 return ret
1273 return ret
1272
1274
1273 return checkvfs
1275 return checkvfs
1274
1276
1275 def _getsvfsward(self, origfunc):
1277 def _getsvfsward(self, origfunc):
1276 """build a ward for self.svfs"""
1278 """build a ward for self.svfs"""
1277 rref = weakref.ref(self)
1279 rref = weakref.ref(self)
1278
1280
1279 def checksvfs(path, mode=None):
1281 def checksvfs(path, mode=None):
1280 ret = origfunc(path, mode=mode)
1282 ret = origfunc(path, mode=mode)
1281 repo = rref()
1283 repo = rref()
1282 if repo is None or not util.safehasattr(repo, b'_lockref'):
1284 if repo is None or not util.safehasattr(repo, b'_lockref'):
1283 return
1285 return
1284 if mode in (None, b'r', b'rb'):
1286 if mode in (None, b'r', b'rb'):
1285 return
1287 return
1286 if path.startswith(repo.sharedpath):
1288 if path.startswith(repo.sharedpath):
1287 # truncate name relative to the repository (.hg)
1289 # truncate name relative to the repository (.hg)
1288 path = path[len(repo.sharedpath) + 1 :]
1290 path = path[len(repo.sharedpath) + 1 :]
1289 if repo._currentlock(repo._lockref) is None:
1291 if repo._currentlock(repo._lockref) is None:
1290 repo.ui.develwarn(
1292 repo.ui.develwarn(
1291 b'write with no lock: "%s"' % path, stacklevel=4
1293 b'write with no lock: "%s"' % path, stacklevel=4
1292 )
1294 )
1293 return ret
1295 return ret
1294
1296
1295 return checksvfs
1297 return checksvfs
1296
1298
1297 def close(self):
1299 def close(self):
1298 self._writecaches()
1300 self._writecaches()
1299
1301
1300 def _writecaches(self):
1302 def _writecaches(self):
1301 if self._revbranchcache:
1303 if self._revbranchcache:
1302 self._revbranchcache.write()
1304 self._revbranchcache.write()
1303
1305
1304 def _restrictcapabilities(self, caps):
1306 def _restrictcapabilities(self, caps):
1305 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1307 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1306 caps = set(caps)
1308 caps = set(caps)
1307 capsblob = bundle2.encodecaps(
1309 capsblob = bundle2.encodecaps(
1308 bundle2.getrepocaps(self, role=b'client')
1310 bundle2.getrepocaps(self, role=b'client')
1309 )
1311 )
1310 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1312 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1311 return caps
1313 return caps
1312
1314
1313 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1315 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1314 # self -> auditor -> self._checknested -> self
1316 # self -> auditor -> self._checknested -> self
1315
1317
1316 @property
1318 @property
1317 def auditor(self):
1319 def auditor(self):
1318 # This is only used by context.workingctx.match in order to
1320 # This is only used by context.workingctx.match in order to
1319 # detect files in subrepos.
1321 # detect files in subrepos.
1320 return pathutil.pathauditor(self.root, callback=self._checknested)
1322 return pathutil.pathauditor(self.root, callback=self._checknested)
1321
1323
1322 @property
1324 @property
1323 def nofsauditor(self):
1325 def nofsauditor(self):
1324 # This is only used by context.basectx.match in order to detect
1326 # This is only used by context.basectx.match in order to detect
1325 # files in subrepos.
1327 # files in subrepos.
1326 return pathutil.pathauditor(
1328 return pathutil.pathauditor(
1327 self.root, callback=self._checknested, realfs=False, cached=True
1329 self.root, callback=self._checknested, realfs=False, cached=True
1328 )
1330 )
1329
1331
1330 def _checknested(self, path):
1332 def _checknested(self, path):
1331 """Determine if path is a legal nested repository."""
1333 """Determine if path is a legal nested repository."""
1332 if not path.startswith(self.root):
1334 if not path.startswith(self.root):
1333 return False
1335 return False
1334 subpath = path[len(self.root) + 1 :]
1336 subpath = path[len(self.root) + 1 :]
1335 normsubpath = util.pconvert(subpath)
1337 normsubpath = util.pconvert(subpath)
1336
1338
1337 # XXX: Checking against the current working copy is wrong in
1339 # XXX: Checking against the current working copy is wrong in
1338 # the sense that it can reject things like
1340 # the sense that it can reject things like
1339 #
1341 #
1340 # $ hg cat -r 10 sub/x.txt
1342 # $ hg cat -r 10 sub/x.txt
1341 #
1343 #
1342 # if sub/ is no longer a subrepository in the working copy
1344 # if sub/ is no longer a subrepository in the working copy
1343 # parent revision.
1345 # parent revision.
1344 #
1346 #
1345 # However, it can of course also allow things that would have
1347 # However, it can of course also allow things that would have
1346 # been rejected before, such as the above cat command if sub/
1348 # been rejected before, such as the above cat command if sub/
1347 # is a subrepository now, but was a normal directory before.
1349 # is a subrepository now, but was a normal directory before.
1348 # The old path auditor would have rejected by mistake since it
1350 # The old path auditor would have rejected by mistake since it
1349 # panics when it sees sub/.hg/.
1351 # panics when it sees sub/.hg/.
1350 #
1352 #
1351 # All in all, checking against the working copy seems sensible
1353 # All in all, checking against the working copy seems sensible
1352 # since we want to prevent access to nested repositories on
1354 # since we want to prevent access to nested repositories on
1353 # the filesystem *now*.
1355 # the filesystem *now*.
1354 ctx = self[None]
1356 ctx = self[None]
1355 parts = util.splitpath(subpath)
1357 parts = util.splitpath(subpath)
1356 while parts:
1358 while parts:
1357 prefix = b'/'.join(parts)
1359 prefix = b'/'.join(parts)
1358 if prefix in ctx.substate:
1360 if prefix in ctx.substate:
1359 if prefix == normsubpath:
1361 if prefix == normsubpath:
1360 return True
1362 return True
1361 else:
1363 else:
1362 sub = ctx.sub(prefix)
1364 sub = ctx.sub(prefix)
1363 return sub.checknested(subpath[len(prefix) + 1 :])
1365 return sub.checknested(subpath[len(prefix) + 1 :])
1364 else:
1366 else:
1365 parts.pop()
1367 parts.pop()
1366 return False
1368 return False
1367
1369
1368 def peer(self):
1370 def peer(self):
1369 return localpeer(self) # not cached to avoid reference cycle
1371 return localpeer(self) # not cached to avoid reference cycle
1370
1372
1371 def unfiltered(self):
1373 def unfiltered(self):
1372 """Return unfiltered version of the repository
1374 """Return unfiltered version of the repository
1373
1375
1374 Intended to be overwritten by filtered repo."""
1376 Intended to be overwritten by filtered repo."""
1375 return self
1377 return self
1376
1378
1377 def filtered(self, name, visibilityexceptions=None):
1379 def filtered(self, name, visibilityexceptions=None):
1378 """Return a filtered version of a repository
1380 """Return a filtered version of a repository
1379
1381
1380 The `name` parameter is the identifier of the requested view. This
1382 The `name` parameter is the identifier of the requested view. This
1381 will return a repoview object set "exactly" to the specified view.
1383 will return a repoview object set "exactly" to the specified view.
1382
1384
1383 This function does not apply recursive filtering to a repository. For
1385 This function does not apply recursive filtering to a repository. For
1384 example calling `repo.filtered("served")` will return a repoview using
1386 example calling `repo.filtered("served")` will return a repoview using
1385 the "served" view, regardless of the initial view used by `repo`.
1387 the "served" view, regardless of the initial view used by `repo`.
1386
1388
1387 In other word, there is always only one level of `repoview` "filtering".
1389 In other word, there is always only one level of `repoview` "filtering".
1388 """
1390 """
1389 if self._extrafilterid is not None and b'%' not in name:
1391 if self._extrafilterid is not None and b'%' not in name:
1390 name = name + b'%' + self._extrafilterid
1392 name = name + b'%' + self._extrafilterid
1391
1393
1392 cls = repoview.newtype(self.unfiltered().__class__)
1394 cls = repoview.newtype(self.unfiltered().__class__)
1393 return cls(self, name, visibilityexceptions)
1395 return cls(self, name, visibilityexceptions)
1394
1396
1395 @mixedrepostorecache(
1397 @mixedrepostorecache(
1396 (b'bookmarks', b'plain'),
1398 (b'bookmarks', b'plain'),
1397 (b'bookmarks.current', b'plain'),
1399 (b'bookmarks.current', b'plain'),
1398 (b'bookmarks', b''),
1400 (b'bookmarks', b''),
1399 (b'00changelog.i', b''),
1401 (b'00changelog.i', b''),
1400 )
1402 )
1401 def _bookmarks(self):
1403 def _bookmarks(self):
1402 # Since the multiple files involved in the transaction cannot be
1404 # Since the multiple files involved in the transaction cannot be
1403 # written atomically (with current repository format), there is a race
1405 # written atomically (with current repository format), there is a race
1404 # condition here.
1406 # condition here.
1405 #
1407 #
1406 # 1) changelog content A is read
1408 # 1) changelog content A is read
1407 # 2) outside transaction update changelog to content B
1409 # 2) outside transaction update changelog to content B
1408 # 3) outside transaction update bookmark file referring to content B
1410 # 3) outside transaction update bookmark file referring to content B
1409 # 4) bookmarks file content is read and filtered against changelog-A
1411 # 4) bookmarks file content is read and filtered against changelog-A
1410 #
1412 #
1411 # When this happens, bookmarks against nodes missing from A are dropped.
1413 # When this happens, bookmarks against nodes missing from A are dropped.
1412 #
1414 #
1413 # Having this happening during read is not great, but it become worse
1415 # Having this happening during read is not great, but it become worse
1414 # when this happen during write because the bookmarks to the "unknown"
1416 # when this happen during write because the bookmarks to the "unknown"
1415 # nodes will be dropped for good. However, writes happen within locks.
1417 # nodes will be dropped for good. However, writes happen within locks.
1416 # This locking makes it possible to have a race free consistent read.
1418 # This locking makes it possible to have a race free consistent read.
1417 # For this purpose data read from disc before locking are
1419 # For this purpose data read from disc before locking are
1418 # "invalidated" right after the locks are taken. This invalidations are
1420 # "invalidated" right after the locks are taken. This invalidations are
1419 # "light", the `filecache` mechanism keep the data in memory and will
1421 # "light", the `filecache` mechanism keep the data in memory and will
1420 # reuse them if the underlying files did not changed. Not parsing the
1422 # reuse them if the underlying files did not changed. Not parsing the
1421 # same data multiple times helps performances.
1423 # same data multiple times helps performances.
1422 #
1424 #
1423 # Unfortunately in the case describe above, the files tracked by the
1425 # Unfortunately in the case describe above, the files tracked by the
1424 # bookmarks file cache might not have changed, but the in-memory
1426 # bookmarks file cache might not have changed, but the in-memory
1425 # content is still "wrong" because we used an older changelog content
1427 # content is still "wrong" because we used an older changelog content
1426 # to process the on-disk data. So after locking, the changelog would be
1428 # to process the on-disk data. So after locking, the changelog would be
1427 # refreshed but `_bookmarks` would be preserved.
1429 # refreshed but `_bookmarks` would be preserved.
1428 # Adding `00changelog.i` to the list of tracked file is not
1430 # Adding `00changelog.i` to the list of tracked file is not
1429 # enough, because at the time we build the content for `_bookmarks` in
1431 # enough, because at the time we build the content for `_bookmarks` in
1430 # (4), the changelog file has already diverged from the content used
1432 # (4), the changelog file has already diverged from the content used
1431 # for loading `changelog` in (1)
1433 # for loading `changelog` in (1)
1432 #
1434 #
1433 # To prevent the issue, we force the changelog to be explicitly
1435 # To prevent the issue, we force the changelog to be explicitly
1434 # reloaded while computing `_bookmarks`. The data race can still happen
1436 # reloaded while computing `_bookmarks`. The data race can still happen
1435 # without the lock (with a narrower window), but it would no longer go
1437 # without the lock (with a narrower window), but it would no longer go
1436 # undetected during the lock time refresh.
1438 # undetected during the lock time refresh.
1437 #
1439 #
1438 # The new schedule is as follow
1440 # The new schedule is as follow
1439 #
1441 #
1440 # 1) filecache logic detect that `_bookmarks` needs to be computed
1442 # 1) filecache logic detect that `_bookmarks` needs to be computed
1441 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1443 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1442 # 3) We force `changelog` filecache to be tested
1444 # 3) We force `changelog` filecache to be tested
1443 # 4) cachestat for `changelog` are captured (for changelog)
1445 # 4) cachestat for `changelog` are captured (for changelog)
1444 # 5) `_bookmarks` is computed and cached
1446 # 5) `_bookmarks` is computed and cached
1445 #
1447 #
1446 # The step in (3) ensure we have a changelog at least as recent as the
1448 # The step in (3) ensure we have a changelog at least as recent as the
1447 # cache stat computed in (1). As a result at locking time:
1449 # cache stat computed in (1). As a result at locking time:
1448 # * if the changelog did not changed since (1) -> we can reuse the data
1450 # * if the changelog did not changed since (1) -> we can reuse the data
1449 # * otherwise -> the bookmarks get refreshed.
1451 # * otherwise -> the bookmarks get refreshed.
1450 self._refreshchangelog()
1452 self._refreshchangelog()
1451 return bookmarks.bmstore(self)
1453 return bookmarks.bmstore(self)
1452
1454
1453 def _refreshchangelog(self):
1455 def _refreshchangelog(self):
1454 """make sure the in memory changelog match the on-disk one"""
1456 """make sure the in memory changelog match the on-disk one"""
1455 if 'changelog' in vars(self) and self.currenttransaction() is None:
1457 if 'changelog' in vars(self) and self.currenttransaction() is None:
1456 del self.changelog
1458 del self.changelog
1457
1459
1458 @property
1460 @property
1459 def _activebookmark(self):
1461 def _activebookmark(self):
1460 return self._bookmarks.active
1462 return self._bookmarks.active
1461
1463
1462 # _phasesets depend on changelog. what we need is to call
1464 # _phasesets depend on changelog. what we need is to call
1463 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1465 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1464 # can't be easily expressed in filecache mechanism.
1466 # can't be easily expressed in filecache mechanism.
1465 @storecache(b'phaseroots', b'00changelog.i')
1467 @storecache(b'phaseroots', b'00changelog.i')
1466 def _phasecache(self):
1468 def _phasecache(self):
1467 return phases.phasecache(self, self._phasedefaults)
1469 return phases.phasecache(self, self._phasedefaults)
1468
1470
1469 @storecache(b'obsstore')
1471 @storecache(b'obsstore')
1470 def obsstore(self):
1472 def obsstore(self):
1471 return obsolete.makestore(self.ui, self)
1473 return obsolete.makestore(self.ui, self)
1472
1474
1473 @storecache(b'00changelog.i')
1475 @storecache(b'00changelog.i')
1474 def changelog(self):
1476 def changelog(self):
1475 # load dirstate before changelog to avoid race see issue6303
1477 # load dirstate before changelog to avoid race see issue6303
1476 self.dirstate.prefetch_parents()
1478 self.dirstate.prefetch_parents()
1477 return self.store.changelog(txnutil.mayhavepending(self.root))
1479 return self.store.changelog(txnutil.mayhavepending(self.root))
1478
1480
1479 @storecache(b'00manifest.i')
1481 @storecache(b'00manifest.i')
1480 def manifestlog(self):
1482 def manifestlog(self):
1481 return self.store.manifestlog(self, self._storenarrowmatch)
1483 return self.store.manifestlog(self, self._storenarrowmatch)
1482
1484
1483 @repofilecache(b'dirstate')
1485 @repofilecache(b'dirstate')
1484 def dirstate(self):
1486 def dirstate(self):
1485 return self._makedirstate()
1487 return self._makedirstate()
1486
1488
1487 def _makedirstate(self):
1489 def _makedirstate(self):
1488 """Extension point for wrapping the dirstate per-repo."""
1490 """Extension point for wrapping the dirstate per-repo."""
1489 sparsematchfn = lambda: sparse.matcher(self)
1491 sparsematchfn = lambda: sparse.matcher(self)
1490
1492
1491 return dirstate.dirstate(
1493 return dirstate.dirstate(
1492 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1494 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1493 )
1495 )
1494
1496
1495 def _dirstatevalidate(self, node):
1497 def _dirstatevalidate(self, node):
1496 try:
1498 try:
1497 self.changelog.rev(node)
1499 self.changelog.rev(node)
1498 return node
1500 return node
1499 except error.LookupError:
1501 except error.LookupError:
1500 if not self._dirstatevalidatewarned:
1502 if not self._dirstatevalidatewarned:
1501 self._dirstatevalidatewarned = True
1503 self._dirstatevalidatewarned = True
1502 self.ui.warn(
1504 self.ui.warn(
1503 _(b"warning: ignoring unknown working parent %s!\n")
1505 _(b"warning: ignoring unknown working parent %s!\n")
1504 % short(node)
1506 % short(node)
1505 )
1507 )
1506 return nullid
1508 return nullid
1507
1509
1508 @storecache(narrowspec.FILENAME)
1510 @storecache(narrowspec.FILENAME)
1509 def narrowpats(self):
1511 def narrowpats(self):
1510 """matcher patterns for this repository's narrowspec
1512 """matcher patterns for this repository's narrowspec
1511
1513
1512 A tuple of (includes, excludes).
1514 A tuple of (includes, excludes).
1513 """
1515 """
1514 return narrowspec.load(self)
1516 return narrowspec.load(self)
1515
1517
1516 @storecache(narrowspec.FILENAME)
1518 @storecache(narrowspec.FILENAME)
1517 def _storenarrowmatch(self):
1519 def _storenarrowmatch(self):
1518 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1520 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1519 return matchmod.always()
1521 return matchmod.always()
1520 include, exclude = self.narrowpats
1522 include, exclude = self.narrowpats
1521 return narrowspec.match(self.root, include=include, exclude=exclude)
1523 return narrowspec.match(self.root, include=include, exclude=exclude)
1522
1524
1523 @storecache(narrowspec.FILENAME)
1525 @storecache(narrowspec.FILENAME)
1524 def _narrowmatch(self):
1526 def _narrowmatch(self):
1525 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1527 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1526 return matchmod.always()
1528 return matchmod.always()
1527 narrowspec.checkworkingcopynarrowspec(self)
1529 narrowspec.checkworkingcopynarrowspec(self)
1528 include, exclude = self.narrowpats
1530 include, exclude = self.narrowpats
1529 return narrowspec.match(self.root, include=include, exclude=exclude)
1531 return narrowspec.match(self.root, include=include, exclude=exclude)
1530
1532
1531 def narrowmatch(self, match=None, includeexact=False):
1533 def narrowmatch(self, match=None, includeexact=False):
1532 """matcher corresponding the the repo's narrowspec
1534 """matcher corresponding the the repo's narrowspec
1533
1535
1534 If `match` is given, then that will be intersected with the narrow
1536 If `match` is given, then that will be intersected with the narrow
1535 matcher.
1537 matcher.
1536
1538
1537 If `includeexact` is True, then any exact matches from `match` will
1539 If `includeexact` is True, then any exact matches from `match` will
1538 be included even if they're outside the narrowspec.
1540 be included even if they're outside the narrowspec.
1539 """
1541 """
1540 if match:
1542 if match:
1541 if includeexact and not self._narrowmatch.always():
1543 if includeexact and not self._narrowmatch.always():
1542 # do not exclude explicitly-specified paths so that they can
1544 # do not exclude explicitly-specified paths so that they can
1543 # be warned later on
1545 # be warned later on
1544 em = matchmod.exact(match.files())
1546 em = matchmod.exact(match.files())
1545 nm = matchmod.unionmatcher([self._narrowmatch, em])
1547 nm = matchmod.unionmatcher([self._narrowmatch, em])
1546 return matchmod.intersectmatchers(match, nm)
1548 return matchmod.intersectmatchers(match, nm)
1547 return matchmod.intersectmatchers(match, self._narrowmatch)
1549 return matchmod.intersectmatchers(match, self._narrowmatch)
1548 return self._narrowmatch
1550 return self._narrowmatch
1549
1551
1550 def setnarrowpats(self, newincludes, newexcludes):
1552 def setnarrowpats(self, newincludes, newexcludes):
1551 narrowspec.save(self, newincludes, newexcludes)
1553 narrowspec.save(self, newincludes, newexcludes)
1552 self.invalidate(clearfilecache=True)
1554 self.invalidate(clearfilecache=True)
1553
1555
1554 @unfilteredpropertycache
1556 @unfilteredpropertycache
1555 def _quick_access_changeid_null(self):
1557 def _quick_access_changeid_null(self):
1556 return {
1558 return {
1557 b'null': (nullrev, nullid),
1559 b'null': (nullrev, nullid),
1558 nullrev: (nullrev, nullid),
1560 nullrev: (nullrev, nullid),
1559 nullid: (nullrev, nullid),
1561 nullid: (nullrev, nullid),
1560 }
1562 }
1561
1563
1562 @unfilteredpropertycache
1564 @unfilteredpropertycache
1563 def _quick_access_changeid_wc(self):
1565 def _quick_access_changeid_wc(self):
1564 # also fast path access to the working copy parents
1566 # also fast path access to the working copy parents
1565 # however, only do it for filter that ensure wc is visible.
1567 # however, only do it for filter that ensure wc is visible.
1566 quick = {}
1568 quick = {}
1567 cl = self.unfiltered().changelog
1569 cl = self.unfiltered().changelog
1568 for node in self.dirstate.parents():
1570 for node in self.dirstate.parents():
1569 if node == nullid:
1571 if node == nullid:
1570 continue
1572 continue
1571 rev = cl.index.get_rev(node)
1573 rev = cl.index.get_rev(node)
1572 if rev is None:
1574 if rev is None:
1573 # unknown working copy parent case:
1575 # unknown working copy parent case:
1574 #
1576 #
1575 # skip the fast path and let higher code deal with it
1577 # skip the fast path and let higher code deal with it
1576 continue
1578 continue
1577 pair = (rev, node)
1579 pair = (rev, node)
1578 quick[rev] = pair
1580 quick[rev] = pair
1579 quick[node] = pair
1581 quick[node] = pair
1580 # also add the parents of the parents
1582 # also add the parents of the parents
1581 for r in cl.parentrevs(rev):
1583 for r in cl.parentrevs(rev):
1582 if r == nullrev:
1584 if r == nullrev:
1583 continue
1585 continue
1584 n = cl.node(r)
1586 n = cl.node(r)
1585 pair = (r, n)
1587 pair = (r, n)
1586 quick[r] = pair
1588 quick[r] = pair
1587 quick[n] = pair
1589 quick[n] = pair
1588 p1node = self.dirstate.p1()
1590 p1node = self.dirstate.p1()
1589 if p1node != nullid:
1591 if p1node != nullid:
1590 quick[b'.'] = quick[p1node]
1592 quick[b'.'] = quick[p1node]
1591 return quick
1593 return quick
1592
1594
1593 @unfilteredmethod
1595 @unfilteredmethod
1594 def _quick_access_changeid_invalidate(self):
1596 def _quick_access_changeid_invalidate(self):
1595 if '_quick_access_changeid_wc' in vars(self):
1597 if '_quick_access_changeid_wc' in vars(self):
1596 del self.__dict__['_quick_access_changeid_wc']
1598 del self.__dict__['_quick_access_changeid_wc']
1597
1599
1598 @property
1600 @property
1599 def _quick_access_changeid(self):
1601 def _quick_access_changeid(self):
1600 """an helper dictionnary for __getitem__ calls
1602 """an helper dictionnary for __getitem__ calls
1601
1603
1602 This contains a list of symbol we can recognise right away without
1604 This contains a list of symbol we can recognise right away without
1603 further processing.
1605 further processing.
1604 """
1606 """
1605 mapping = self._quick_access_changeid_null
1607 mapping = self._quick_access_changeid_null
1606 if self.filtername in repoview.filter_has_wc:
1608 if self.filtername in repoview.filter_has_wc:
1607 mapping = mapping.copy()
1609 mapping = mapping.copy()
1608 mapping.update(self._quick_access_changeid_wc)
1610 mapping.update(self._quick_access_changeid_wc)
1609 return mapping
1611 return mapping
1610
1612
1611 def __getitem__(self, changeid):
1613 def __getitem__(self, changeid):
1612 # dealing with special cases
1614 # dealing with special cases
1613 if changeid is None:
1615 if changeid is None:
1614 return context.workingctx(self)
1616 return context.workingctx(self)
1615 if isinstance(changeid, context.basectx):
1617 if isinstance(changeid, context.basectx):
1616 return changeid
1618 return changeid
1617
1619
1618 # dealing with multiple revisions
1620 # dealing with multiple revisions
1619 if isinstance(changeid, slice):
1621 if isinstance(changeid, slice):
1620 # wdirrev isn't contiguous so the slice shouldn't include it
1622 # wdirrev isn't contiguous so the slice shouldn't include it
1621 return [
1623 return [
1622 self[i]
1624 self[i]
1623 for i in pycompat.xrange(*changeid.indices(len(self)))
1625 for i in pycompat.xrange(*changeid.indices(len(self)))
1624 if i not in self.changelog.filteredrevs
1626 if i not in self.changelog.filteredrevs
1625 ]
1627 ]
1626
1628
1627 # dealing with some special values
1629 # dealing with some special values
1628 quick_access = self._quick_access_changeid.get(changeid)
1630 quick_access = self._quick_access_changeid.get(changeid)
1629 if quick_access is not None:
1631 if quick_access is not None:
1630 rev, node = quick_access
1632 rev, node = quick_access
1631 return context.changectx(self, rev, node, maybe_filtered=False)
1633 return context.changectx(self, rev, node, maybe_filtered=False)
1632 if changeid == b'tip':
1634 if changeid == b'tip':
1633 node = self.changelog.tip()
1635 node = self.changelog.tip()
1634 rev = self.changelog.rev(node)
1636 rev = self.changelog.rev(node)
1635 return context.changectx(self, rev, node)
1637 return context.changectx(self, rev, node)
1636
1638
1637 # dealing with arbitrary values
1639 # dealing with arbitrary values
1638 try:
1640 try:
1639 if isinstance(changeid, int):
1641 if isinstance(changeid, int):
1640 node = self.changelog.node(changeid)
1642 node = self.changelog.node(changeid)
1641 rev = changeid
1643 rev = changeid
1642 elif changeid == b'.':
1644 elif changeid == b'.':
1643 # this is a hack to delay/avoid loading obsmarkers
1645 # this is a hack to delay/avoid loading obsmarkers
1644 # when we know that '.' won't be hidden
1646 # when we know that '.' won't be hidden
1645 node = self.dirstate.p1()
1647 node = self.dirstate.p1()
1646 rev = self.unfiltered().changelog.rev(node)
1648 rev = self.unfiltered().changelog.rev(node)
1647 elif len(changeid) == 20:
1649 elif len(changeid) == 20:
1648 try:
1650 try:
1649 node = changeid
1651 node = changeid
1650 rev = self.changelog.rev(changeid)
1652 rev = self.changelog.rev(changeid)
1651 except error.FilteredLookupError:
1653 except error.FilteredLookupError:
1652 changeid = hex(changeid) # for the error message
1654 changeid = hex(changeid) # for the error message
1653 raise
1655 raise
1654 except LookupError:
1656 except LookupError:
1655 # check if it might have come from damaged dirstate
1657 # check if it might have come from damaged dirstate
1656 #
1658 #
1657 # XXX we could avoid the unfiltered if we had a recognizable
1659 # XXX we could avoid the unfiltered if we had a recognizable
1658 # exception for filtered changeset access
1660 # exception for filtered changeset access
1659 if (
1661 if (
1660 self.local()
1662 self.local()
1661 and changeid in self.unfiltered().dirstate.parents()
1663 and changeid in self.unfiltered().dirstate.parents()
1662 ):
1664 ):
1663 msg = _(b"working directory has unknown parent '%s'!")
1665 msg = _(b"working directory has unknown parent '%s'!")
1664 raise error.Abort(msg % short(changeid))
1666 raise error.Abort(msg % short(changeid))
1665 changeid = hex(changeid) # for the error message
1667 changeid = hex(changeid) # for the error message
1666 raise
1668 raise
1667
1669
1668 elif len(changeid) == 40:
1670 elif len(changeid) == 40:
1669 node = bin(changeid)
1671 node = bin(changeid)
1670 rev = self.changelog.rev(node)
1672 rev = self.changelog.rev(node)
1671 else:
1673 else:
1672 raise error.ProgrammingError(
1674 raise error.ProgrammingError(
1673 b"unsupported changeid '%s' of type %s"
1675 b"unsupported changeid '%s' of type %s"
1674 % (changeid, pycompat.bytestr(type(changeid)))
1676 % (changeid, pycompat.bytestr(type(changeid)))
1675 )
1677 )
1676
1678
1677 return context.changectx(self, rev, node)
1679 return context.changectx(self, rev, node)
1678
1680
1679 except (error.FilteredIndexError, error.FilteredLookupError):
1681 except (error.FilteredIndexError, error.FilteredLookupError):
1680 raise error.FilteredRepoLookupError(
1682 raise error.FilteredRepoLookupError(
1681 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1683 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1682 )
1684 )
1683 except (IndexError, LookupError):
1685 except (IndexError, LookupError):
1684 raise error.RepoLookupError(
1686 raise error.RepoLookupError(
1685 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1687 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1686 )
1688 )
1687 except error.WdirUnsupported:
1689 except error.WdirUnsupported:
1688 return context.workingctx(self)
1690 return context.workingctx(self)
1689
1691
1690 def __contains__(self, changeid):
1692 def __contains__(self, changeid):
1691 """True if the given changeid exists
1693 """True if the given changeid exists
1692
1694
1693 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1695 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1694 specified.
1696 specified.
1695 """
1697 """
1696 try:
1698 try:
1697 self[changeid]
1699 self[changeid]
1698 return True
1700 return True
1699 except error.RepoLookupError:
1701 except error.RepoLookupError:
1700 return False
1702 return False
1701
1703
1702 def __nonzero__(self):
1704 def __nonzero__(self):
1703 return True
1705 return True
1704
1706
1705 __bool__ = __nonzero__
1707 __bool__ = __nonzero__
1706
1708
1707 def __len__(self):
1709 def __len__(self):
1708 # no need to pay the cost of repoview.changelog
1710 # no need to pay the cost of repoview.changelog
1709 unfi = self.unfiltered()
1711 unfi = self.unfiltered()
1710 return len(unfi.changelog)
1712 return len(unfi.changelog)
1711
1713
1712 def __iter__(self):
1714 def __iter__(self):
1713 return iter(self.changelog)
1715 return iter(self.changelog)
1714
1716
1715 def revs(self, expr, *args):
1717 def revs(self, expr, *args):
1716 '''Find revisions matching a revset.
1718 '''Find revisions matching a revset.
1717
1719
1718 The revset is specified as a string ``expr`` that may contain
1720 The revset is specified as a string ``expr`` that may contain
1719 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1721 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1720
1722
1721 Revset aliases from the configuration are not expanded. To expand
1723 Revset aliases from the configuration are not expanded. To expand
1722 user aliases, consider calling ``scmutil.revrange()`` or
1724 user aliases, consider calling ``scmutil.revrange()`` or
1723 ``repo.anyrevs([expr], user=True)``.
1725 ``repo.anyrevs([expr], user=True)``.
1724
1726
1725 Returns a smartset.abstractsmartset, which is a list-like interface
1727 Returns a smartset.abstractsmartset, which is a list-like interface
1726 that contains integer revisions.
1728 that contains integer revisions.
1727 '''
1729 '''
1728 tree = revsetlang.spectree(expr, *args)
1730 tree = revsetlang.spectree(expr, *args)
1729 return revset.makematcher(tree)(self)
1731 return revset.makematcher(tree)(self)
1730
1732
1731 def set(self, expr, *args):
1733 def set(self, expr, *args):
1732 '''Find revisions matching a revset and emit changectx instances.
1734 '''Find revisions matching a revset and emit changectx instances.
1733
1735
1734 This is a convenience wrapper around ``revs()`` that iterates the
1736 This is a convenience wrapper around ``revs()`` that iterates the
1735 result and is a generator of changectx instances.
1737 result and is a generator of changectx instances.
1736
1738
1737 Revset aliases from the configuration are not expanded. To expand
1739 Revset aliases from the configuration are not expanded. To expand
1738 user aliases, consider calling ``scmutil.revrange()``.
1740 user aliases, consider calling ``scmutil.revrange()``.
1739 '''
1741 '''
1740 for r in self.revs(expr, *args):
1742 for r in self.revs(expr, *args):
1741 yield self[r]
1743 yield self[r]
1742
1744
1743 def anyrevs(self, specs, user=False, localalias=None):
1745 def anyrevs(self, specs, user=False, localalias=None):
1744 '''Find revisions matching one of the given revsets.
1746 '''Find revisions matching one of the given revsets.
1745
1747
1746 Revset aliases from the configuration are not expanded by default. To
1748 Revset aliases from the configuration are not expanded by default. To
1747 expand user aliases, specify ``user=True``. To provide some local
1749 expand user aliases, specify ``user=True``. To provide some local
1748 definitions overriding user aliases, set ``localalias`` to
1750 definitions overriding user aliases, set ``localalias`` to
1749 ``{name: definitionstring}``.
1751 ``{name: definitionstring}``.
1750 '''
1752 '''
1751 if specs == [b'null']:
1753 if specs == [b'null']:
1752 return revset.baseset([nullrev])
1754 return revset.baseset([nullrev])
1753 if specs == [b'.']:
1755 if specs == [b'.']:
1754 quick_data = self._quick_access_changeid.get(b'.')
1756 quick_data = self._quick_access_changeid.get(b'.')
1755 if quick_data is not None:
1757 if quick_data is not None:
1756 return revset.baseset([quick_data[0]])
1758 return revset.baseset([quick_data[0]])
1757 if user:
1759 if user:
1758 m = revset.matchany(
1760 m = revset.matchany(
1759 self.ui,
1761 self.ui,
1760 specs,
1762 specs,
1761 lookup=revset.lookupfn(self),
1763 lookup=revset.lookupfn(self),
1762 localalias=localalias,
1764 localalias=localalias,
1763 )
1765 )
1764 else:
1766 else:
1765 m = revset.matchany(None, specs, localalias=localalias)
1767 m = revset.matchany(None, specs, localalias=localalias)
1766 return m(self)
1768 return m(self)
1767
1769
1768 def url(self):
1770 def url(self):
1769 return b'file:' + self.root
1771 return b'file:' + self.root
1770
1772
1771 def hook(self, name, throw=False, **args):
1773 def hook(self, name, throw=False, **args):
1772 """Call a hook, passing this repo instance.
1774 """Call a hook, passing this repo instance.
1773
1775
1774 This a convenience method to aid invoking hooks. Extensions likely
1776 This a convenience method to aid invoking hooks. Extensions likely
1775 won't call this unless they have registered a custom hook or are
1777 won't call this unless they have registered a custom hook or are
1776 replacing code that is expected to call a hook.
1778 replacing code that is expected to call a hook.
1777 """
1779 """
1778 return hook.hook(self.ui, self, name, throw, **args)
1780 return hook.hook(self.ui, self, name, throw, **args)
1779
1781
1780 @filteredpropertycache
1782 @filteredpropertycache
1781 def _tagscache(self):
1783 def _tagscache(self):
1782 '''Returns a tagscache object that contains various tags related
1784 '''Returns a tagscache object that contains various tags related
1783 caches.'''
1785 caches.'''
1784
1786
1785 # This simplifies its cache management by having one decorated
1787 # This simplifies its cache management by having one decorated
1786 # function (this one) and the rest simply fetch things from it.
1788 # function (this one) and the rest simply fetch things from it.
1787 class tagscache(object):
1789 class tagscache(object):
1788 def __init__(self):
1790 def __init__(self):
1789 # These two define the set of tags for this repository. tags
1791 # These two define the set of tags for this repository. tags
1790 # maps tag name to node; tagtypes maps tag name to 'global' or
1792 # maps tag name to node; tagtypes maps tag name to 'global' or
1791 # 'local'. (Global tags are defined by .hgtags across all
1793 # 'local'. (Global tags are defined by .hgtags across all
1792 # heads, and local tags are defined in .hg/localtags.)
1794 # heads, and local tags are defined in .hg/localtags.)
1793 # They constitute the in-memory cache of tags.
1795 # They constitute the in-memory cache of tags.
1794 self.tags = self.tagtypes = None
1796 self.tags = self.tagtypes = None
1795
1797
1796 self.nodetagscache = self.tagslist = None
1798 self.nodetagscache = self.tagslist = None
1797
1799
1798 cache = tagscache()
1800 cache = tagscache()
1799 cache.tags, cache.tagtypes = self._findtags()
1801 cache.tags, cache.tagtypes = self._findtags()
1800
1802
1801 return cache
1803 return cache
1802
1804
1803 def tags(self):
1805 def tags(self):
1804 '''return a mapping of tag to node'''
1806 '''return a mapping of tag to node'''
1805 t = {}
1807 t = {}
1806 if self.changelog.filteredrevs:
1808 if self.changelog.filteredrevs:
1807 tags, tt = self._findtags()
1809 tags, tt = self._findtags()
1808 else:
1810 else:
1809 tags = self._tagscache.tags
1811 tags = self._tagscache.tags
1810 rev = self.changelog.rev
1812 rev = self.changelog.rev
1811 for k, v in pycompat.iteritems(tags):
1813 for k, v in pycompat.iteritems(tags):
1812 try:
1814 try:
1813 # ignore tags to unknown nodes
1815 # ignore tags to unknown nodes
1814 rev(v)
1816 rev(v)
1815 t[k] = v
1817 t[k] = v
1816 except (error.LookupError, ValueError):
1818 except (error.LookupError, ValueError):
1817 pass
1819 pass
1818 return t
1820 return t
1819
1821
1820 def _findtags(self):
1822 def _findtags(self):
1821 '''Do the hard work of finding tags. Return a pair of dicts
1823 '''Do the hard work of finding tags. Return a pair of dicts
1822 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1824 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1823 maps tag name to a string like \'global\' or \'local\'.
1825 maps tag name to a string like \'global\' or \'local\'.
1824 Subclasses or extensions are free to add their own tags, but
1826 Subclasses or extensions are free to add their own tags, but
1825 should be aware that the returned dicts will be retained for the
1827 should be aware that the returned dicts will be retained for the
1826 duration of the localrepo object.'''
1828 duration of the localrepo object.'''
1827
1829
1828 # XXX what tagtype should subclasses/extensions use? Currently
1830 # XXX what tagtype should subclasses/extensions use? Currently
1829 # mq and bookmarks add tags, but do not set the tagtype at all.
1831 # mq and bookmarks add tags, but do not set the tagtype at all.
1830 # Should each extension invent its own tag type? Should there
1832 # Should each extension invent its own tag type? Should there
1831 # be one tagtype for all such "virtual" tags? Or is the status
1833 # be one tagtype for all such "virtual" tags? Or is the status
1832 # quo fine?
1834 # quo fine?
1833
1835
1834 # map tag name to (node, hist)
1836 # map tag name to (node, hist)
1835 alltags = tagsmod.findglobaltags(self.ui, self)
1837 alltags = tagsmod.findglobaltags(self.ui, self)
1836 # map tag name to tag type
1838 # map tag name to tag type
1837 tagtypes = {tag: b'global' for tag in alltags}
1839 tagtypes = {tag: b'global' for tag in alltags}
1838
1840
1839 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1841 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1840
1842
1841 # Build the return dicts. Have to re-encode tag names because
1843 # Build the return dicts. Have to re-encode tag names because
1842 # the tags module always uses UTF-8 (in order not to lose info
1844 # the tags module always uses UTF-8 (in order not to lose info
1843 # writing to the cache), but the rest of Mercurial wants them in
1845 # writing to the cache), but the rest of Mercurial wants them in
1844 # local encoding.
1846 # local encoding.
1845 tags = {}
1847 tags = {}
1846 for (name, (node, hist)) in pycompat.iteritems(alltags):
1848 for (name, (node, hist)) in pycompat.iteritems(alltags):
1847 if node != nullid:
1849 if node != nullid:
1848 tags[encoding.tolocal(name)] = node
1850 tags[encoding.tolocal(name)] = node
1849 tags[b'tip'] = self.changelog.tip()
1851 tags[b'tip'] = self.changelog.tip()
1850 tagtypes = {
1852 tagtypes = {
1851 encoding.tolocal(name): value
1853 encoding.tolocal(name): value
1852 for (name, value) in pycompat.iteritems(tagtypes)
1854 for (name, value) in pycompat.iteritems(tagtypes)
1853 }
1855 }
1854 return (tags, tagtypes)
1856 return (tags, tagtypes)
1855
1857
1856 def tagtype(self, tagname):
1858 def tagtype(self, tagname):
1857 '''
1859 '''
1858 return the type of the given tag. result can be:
1860 return the type of the given tag. result can be:
1859
1861
1860 'local' : a local tag
1862 'local' : a local tag
1861 'global' : a global tag
1863 'global' : a global tag
1862 None : tag does not exist
1864 None : tag does not exist
1863 '''
1865 '''
1864
1866
1865 return self._tagscache.tagtypes.get(tagname)
1867 return self._tagscache.tagtypes.get(tagname)
1866
1868
1867 def tagslist(self):
1869 def tagslist(self):
1868 '''return a list of tags ordered by revision'''
1870 '''return a list of tags ordered by revision'''
1869 if not self._tagscache.tagslist:
1871 if not self._tagscache.tagslist:
1870 l = []
1872 l = []
1871 for t, n in pycompat.iteritems(self.tags()):
1873 for t, n in pycompat.iteritems(self.tags()):
1872 l.append((self.changelog.rev(n), t, n))
1874 l.append((self.changelog.rev(n), t, n))
1873 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1875 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1874
1876
1875 return self._tagscache.tagslist
1877 return self._tagscache.tagslist
1876
1878
1877 def nodetags(self, node):
1879 def nodetags(self, node):
1878 '''return the tags associated with a node'''
1880 '''return the tags associated with a node'''
1879 if not self._tagscache.nodetagscache:
1881 if not self._tagscache.nodetagscache:
1880 nodetagscache = {}
1882 nodetagscache = {}
1881 for t, n in pycompat.iteritems(self._tagscache.tags):
1883 for t, n in pycompat.iteritems(self._tagscache.tags):
1882 nodetagscache.setdefault(n, []).append(t)
1884 nodetagscache.setdefault(n, []).append(t)
1883 for tags in pycompat.itervalues(nodetagscache):
1885 for tags in pycompat.itervalues(nodetagscache):
1884 tags.sort()
1886 tags.sort()
1885 self._tagscache.nodetagscache = nodetagscache
1887 self._tagscache.nodetagscache = nodetagscache
1886 return self._tagscache.nodetagscache.get(node, [])
1888 return self._tagscache.nodetagscache.get(node, [])
1887
1889
1888 def nodebookmarks(self, node):
1890 def nodebookmarks(self, node):
1889 """return the list of bookmarks pointing to the specified node"""
1891 """return the list of bookmarks pointing to the specified node"""
1890 return self._bookmarks.names(node)
1892 return self._bookmarks.names(node)
1891
1893
1892 def branchmap(self):
1894 def branchmap(self):
1893 '''returns a dictionary {branch: [branchheads]} with branchheads
1895 '''returns a dictionary {branch: [branchheads]} with branchheads
1894 ordered by increasing revision number'''
1896 ordered by increasing revision number'''
1895 return self._branchcaches[self]
1897 return self._branchcaches[self]
1896
1898
1897 @unfilteredmethod
1899 @unfilteredmethod
1898 def revbranchcache(self):
1900 def revbranchcache(self):
1899 if not self._revbranchcache:
1901 if not self._revbranchcache:
1900 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1902 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1901 return self._revbranchcache
1903 return self._revbranchcache
1902
1904
1903 def branchtip(self, branch, ignoremissing=False):
1905 def branchtip(self, branch, ignoremissing=False):
1904 '''return the tip node for a given branch
1906 '''return the tip node for a given branch
1905
1907
1906 If ignoremissing is True, then this method will not raise an error.
1908 If ignoremissing is True, then this method will not raise an error.
1907 This is helpful for callers that only expect None for a missing branch
1909 This is helpful for callers that only expect None for a missing branch
1908 (e.g. namespace).
1910 (e.g. namespace).
1909
1911
1910 '''
1912 '''
1911 try:
1913 try:
1912 return self.branchmap().branchtip(branch)
1914 return self.branchmap().branchtip(branch)
1913 except KeyError:
1915 except KeyError:
1914 if not ignoremissing:
1916 if not ignoremissing:
1915 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1917 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1916 else:
1918 else:
1917 pass
1919 pass
1918
1920
1919 def lookup(self, key):
1921 def lookup(self, key):
1920 node = scmutil.revsymbol(self, key).node()
1922 node = scmutil.revsymbol(self, key).node()
1921 if node is None:
1923 if node is None:
1922 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1924 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1923 return node
1925 return node
1924
1926
1925 def lookupbranch(self, key):
1927 def lookupbranch(self, key):
1926 if self.branchmap().hasbranch(key):
1928 if self.branchmap().hasbranch(key):
1927 return key
1929 return key
1928
1930
1929 return scmutil.revsymbol(self, key).branch()
1931 return scmutil.revsymbol(self, key).branch()
1930
1932
1931 def known(self, nodes):
1933 def known(self, nodes):
1932 cl = self.changelog
1934 cl = self.changelog
1933 get_rev = cl.index.get_rev
1935 get_rev = cl.index.get_rev
1934 filtered = cl.filteredrevs
1936 filtered = cl.filteredrevs
1935 result = []
1937 result = []
1936 for n in nodes:
1938 for n in nodes:
1937 r = get_rev(n)
1939 r = get_rev(n)
1938 resp = not (r is None or r in filtered)
1940 resp = not (r is None or r in filtered)
1939 result.append(resp)
1941 result.append(resp)
1940 return result
1942 return result
1941
1943
1942 def local(self):
1944 def local(self):
1943 return self
1945 return self
1944
1946
1945 def publishing(self):
1947 def publishing(self):
1946 # it's safe (and desirable) to trust the publish flag unconditionally
1948 # it's safe (and desirable) to trust the publish flag unconditionally
1947 # so that we don't finalize changes shared between users via ssh or nfs
1949 # so that we don't finalize changes shared between users via ssh or nfs
1948 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1950 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1949
1951
1950 def cancopy(self):
1952 def cancopy(self):
1951 # so statichttprepo's override of local() works
1953 # so statichttprepo's override of local() works
1952 if not self.local():
1954 if not self.local():
1953 return False
1955 return False
1954 if not self.publishing():
1956 if not self.publishing():
1955 return True
1957 return True
1956 # if publishing we can't copy if there is filtered content
1958 # if publishing we can't copy if there is filtered content
1957 return not self.filtered(b'visible').changelog.filteredrevs
1959 return not self.filtered(b'visible').changelog.filteredrevs
1958
1960
1959 def shared(self):
1961 def shared(self):
1960 '''the type of shared repository (None if not shared)'''
1962 '''the type of shared repository (None if not shared)'''
1961 if self.sharedpath != self.path:
1963 if self.sharedpath != self.path:
1962 return b'store'
1964 return b'store'
1963 return None
1965 return None
1964
1966
1965 def wjoin(self, f, *insidef):
1967 def wjoin(self, f, *insidef):
1966 return self.vfs.reljoin(self.root, f, *insidef)
1968 return self.vfs.reljoin(self.root, f, *insidef)
1967
1969
1968 def setparents(self, p1, p2=nullid):
1970 def setparents(self, p1, p2=nullid):
1969 self[None].setparents(p1, p2)
1971 self[None].setparents(p1, p2)
1970 self._quick_access_changeid_invalidate()
1972 self._quick_access_changeid_invalidate()
1971
1973
1972 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1974 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1973 """changeid must be a changeset revision, if specified.
1975 """changeid must be a changeset revision, if specified.
1974 fileid can be a file revision or node."""
1976 fileid can be a file revision or node."""
1975 return context.filectx(
1977 return context.filectx(
1976 self, path, changeid, fileid, changectx=changectx
1978 self, path, changeid, fileid, changectx=changectx
1977 )
1979 )
1978
1980
1979 def getcwd(self):
1981 def getcwd(self):
1980 return self.dirstate.getcwd()
1982 return self.dirstate.getcwd()
1981
1983
1982 def pathto(self, f, cwd=None):
1984 def pathto(self, f, cwd=None):
1983 return self.dirstate.pathto(f, cwd)
1985 return self.dirstate.pathto(f, cwd)
1984
1986
1985 def _loadfilter(self, filter):
1987 def _loadfilter(self, filter):
1986 if filter not in self._filterpats:
1988 if filter not in self._filterpats:
1987 l = []
1989 l = []
1988 for pat, cmd in self.ui.configitems(filter):
1990 for pat, cmd in self.ui.configitems(filter):
1989 if cmd == b'!':
1991 if cmd == b'!':
1990 continue
1992 continue
1991 mf = matchmod.match(self.root, b'', [pat])
1993 mf = matchmod.match(self.root, b'', [pat])
1992 fn = None
1994 fn = None
1993 params = cmd
1995 params = cmd
1994 for name, filterfn in pycompat.iteritems(self._datafilters):
1996 for name, filterfn in pycompat.iteritems(self._datafilters):
1995 if cmd.startswith(name):
1997 if cmd.startswith(name):
1996 fn = filterfn
1998 fn = filterfn
1997 params = cmd[len(name) :].lstrip()
1999 params = cmd[len(name) :].lstrip()
1998 break
2000 break
1999 if not fn:
2001 if not fn:
2000 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2002 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2001 fn.__name__ = 'commandfilter'
2003 fn.__name__ = 'commandfilter'
2002 # Wrap old filters not supporting keyword arguments
2004 # Wrap old filters not supporting keyword arguments
2003 if not pycompat.getargspec(fn)[2]:
2005 if not pycompat.getargspec(fn)[2]:
2004 oldfn = fn
2006 oldfn = fn
2005 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2007 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2006 fn.__name__ = 'compat-' + oldfn.__name__
2008 fn.__name__ = 'compat-' + oldfn.__name__
2007 l.append((mf, fn, params))
2009 l.append((mf, fn, params))
2008 self._filterpats[filter] = l
2010 self._filterpats[filter] = l
2009 return self._filterpats[filter]
2011 return self._filterpats[filter]
2010
2012
2011 def _filter(self, filterpats, filename, data):
2013 def _filter(self, filterpats, filename, data):
2012 for mf, fn, cmd in filterpats:
2014 for mf, fn, cmd in filterpats:
2013 if mf(filename):
2015 if mf(filename):
2014 self.ui.debug(
2016 self.ui.debug(
2015 b"filtering %s through %s\n"
2017 b"filtering %s through %s\n"
2016 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2018 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2017 )
2019 )
2018 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2020 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2019 break
2021 break
2020
2022
2021 return data
2023 return data
2022
2024
2023 @unfilteredpropertycache
2025 @unfilteredpropertycache
2024 def _encodefilterpats(self):
2026 def _encodefilterpats(self):
2025 return self._loadfilter(b'encode')
2027 return self._loadfilter(b'encode')
2026
2028
2027 @unfilteredpropertycache
2029 @unfilteredpropertycache
2028 def _decodefilterpats(self):
2030 def _decodefilterpats(self):
2029 return self._loadfilter(b'decode')
2031 return self._loadfilter(b'decode')
2030
2032
2031 def adddatafilter(self, name, filter):
2033 def adddatafilter(self, name, filter):
2032 self._datafilters[name] = filter
2034 self._datafilters[name] = filter
2033
2035
2034 def wread(self, filename):
2036 def wread(self, filename):
2035 if self.wvfs.islink(filename):
2037 if self.wvfs.islink(filename):
2036 data = self.wvfs.readlink(filename)
2038 data = self.wvfs.readlink(filename)
2037 else:
2039 else:
2038 data = self.wvfs.read(filename)
2040 data = self.wvfs.read(filename)
2039 return self._filter(self._encodefilterpats, filename, data)
2041 return self._filter(self._encodefilterpats, filename, data)
2040
2042
2041 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2043 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2042 """write ``data`` into ``filename`` in the working directory
2044 """write ``data`` into ``filename`` in the working directory
2043
2045
2044 This returns length of written (maybe decoded) data.
2046 This returns length of written (maybe decoded) data.
2045 """
2047 """
2046 data = self._filter(self._decodefilterpats, filename, data)
2048 data = self._filter(self._decodefilterpats, filename, data)
2047 if b'l' in flags:
2049 if b'l' in flags:
2048 self.wvfs.symlink(data, filename)
2050 self.wvfs.symlink(data, filename)
2049 else:
2051 else:
2050 self.wvfs.write(
2052 self.wvfs.write(
2051 filename, data, backgroundclose=backgroundclose, **kwargs
2053 filename, data, backgroundclose=backgroundclose, **kwargs
2052 )
2054 )
2053 if b'x' in flags:
2055 if b'x' in flags:
2054 self.wvfs.setflags(filename, False, True)
2056 self.wvfs.setflags(filename, False, True)
2055 else:
2057 else:
2056 self.wvfs.setflags(filename, False, False)
2058 self.wvfs.setflags(filename, False, False)
2057 return len(data)
2059 return len(data)
2058
2060
2059 def wwritedata(self, filename, data):
2061 def wwritedata(self, filename, data):
2060 return self._filter(self._decodefilterpats, filename, data)
2062 return self._filter(self._decodefilterpats, filename, data)
2061
2063
2062 def currenttransaction(self):
2064 def currenttransaction(self):
2063 """return the current transaction or None if non exists"""
2065 """return the current transaction or None if non exists"""
2064 if self._transref:
2066 if self._transref:
2065 tr = self._transref()
2067 tr = self._transref()
2066 else:
2068 else:
2067 tr = None
2069 tr = None
2068
2070
2069 if tr and tr.running():
2071 if tr and tr.running():
2070 return tr
2072 return tr
2071 return None
2073 return None
2072
2074
2073 def transaction(self, desc, report=None):
2075 def transaction(self, desc, report=None):
2074 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2076 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2075 b'devel', b'check-locks'
2077 b'devel', b'check-locks'
2076 ):
2078 ):
2077 if self._currentlock(self._lockref) is None:
2079 if self._currentlock(self._lockref) is None:
2078 raise error.ProgrammingError(b'transaction requires locking')
2080 raise error.ProgrammingError(b'transaction requires locking')
2079 tr = self.currenttransaction()
2081 tr = self.currenttransaction()
2080 if tr is not None:
2082 if tr is not None:
2081 return tr.nest(name=desc)
2083 return tr.nest(name=desc)
2082
2084
2083 # abort here if the journal already exists
2085 # abort here if the journal already exists
2084 if self.svfs.exists(b"journal"):
2086 if self.svfs.exists(b"journal"):
2085 raise error.RepoError(
2087 raise error.RepoError(
2086 _(b"abandoned transaction found"),
2088 _(b"abandoned transaction found"),
2087 hint=_(b"run 'hg recover' to clean up transaction"),
2089 hint=_(b"run 'hg recover' to clean up transaction"),
2088 )
2090 )
2089
2091
2090 idbase = b"%.40f#%f" % (random.random(), time.time())
2092 idbase = b"%.40f#%f" % (random.random(), time.time())
2091 ha = hex(hashutil.sha1(idbase).digest())
2093 ha = hex(hashutil.sha1(idbase).digest())
2092 txnid = b'TXN:' + ha
2094 txnid = b'TXN:' + ha
2093 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2095 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2094
2096
2095 self._writejournal(desc)
2097 self._writejournal(desc)
2096 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2098 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2097 if report:
2099 if report:
2098 rp = report
2100 rp = report
2099 else:
2101 else:
2100 rp = self.ui.warn
2102 rp = self.ui.warn
2101 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2103 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2102 # we must avoid cyclic reference between repo and transaction.
2104 # we must avoid cyclic reference between repo and transaction.
2103 reporef = weakref.ref(self)
2105 reporef = weakref.ref(self)
2104 # Code to track tag movement
2106 # Code to track tag movement
2105 #
2107 #
2106 # Since tags are all handled as file content, it is actually quite hard
2108 # Since tags are all handled as file content, it is actually quite hard
2107 # to track these movement from a code perspective. So we fallback to a
2109 # to track these movement from a code perspective. So we fallback to a
2108 # tracking at the repository level. One could envision to track changes
2110 # tracking at the repository level. One could envision to track changes
2109 # to the '.hgtags' file through changegroup apply but that fails to
2111 # to the '.hgtags' file through changegroup apply but that fails to
2110 # cope with case where transaction expose new heads without changegroup
2112 # cope with case where transaction expose new heads without changegroup
2111 # being involved (eg: phase movement).
2113 # being involved (eg: phase movement).
2112 #
2114 #
2113 # For now, We gate the feature behind a flag since this likely comes
2115 # For now, We gate the feature behind a flag since this likely comes
2114 # with performance impacts. The current code run more often than needed
2116 # with performance impacts. The current code run more often than needed
2115 # and do not use caches as much as it could. The current focus is on
2117 # and do not use caches as much as it could. The current focus is on
2116 # the behavior of the feature so we disable it by default. The flag
2118 # the behavior of the feature so we disable it by default. The flag
2117 # will be removed when we are happy with the performance impact.
2119 # will be removed when we are happy with the performance impact.
2118 #
2120 #
2119 # Once this feature is no longer experimental move the following
2121 # Once this feature is no longer experimental move the following
2120 # documentation to the appropriate help section:
2122 # documentation to the appropriate help section:
2121 #
2123 #
2122 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2124 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2123 # tags (new or changed or deleted tags). In addition the details of
2125 # tags (new or changed or deleted tags). In addition the details of
2124 # these changes are made available in a file at:
2126 # these changes are made available in a file at:
2125 # ``REPOROOT/.hg/changes/tags.changes``.
2127 # ``REPOROOT/.hg/changes/tags.changes``.
2126 # Make sure you check for HG_TAG_MOVED before reading that file as it
2128 # Make sure you check for HG_TAG_MOVED before reading that file as it
2127 # might exist from a previous transaction even if no tag were touched
2129 # might exist from a previous transaction even if no tag were touched
2128 # in this one. Changes are recorded in a line base format::
2130 # in this one. Changes are recorded in a line base format::
2129 #
2131 #
2130 # <action> <hex-node> <tag-name>\n
2132 # <action> <hex-node> <tag-name>\n
2131 #
2133 #
2132 # Actions are defined as follow:
2134 # Actions are defined as follow:
2133 # "-R": tag is removed,
2135 # "-R": tag is removed,
2134 # "+A": tag is added,
2136 # "+A": tag is added,
2135 # "-M": tag is moved (old value),
2137 # "-M": tag is moved (old value),
2136 # "+M": tag is moved (new value),
2138 # "+M": tag is moved (new value),
2137 tracktags = lambda x: None
2139 tracktags = lambda x: None
2138 # experimental config: experimental.hook-track-tags
2140 # experimental config: experimental.hook-track-tags
2139 shouldtracktags = self.ui.configbool(
2141 shouldtracktags = self.ui.configbool(
2140 b'experimental', b'hook-track-tags'
2142 b'experimental', b'hook-track-tags'
2141 )
2143 )
2142 if desc != b'strip' and shouldtracktags:
2144 if desc != b'strip' and shouldtracktags:
2143 oldheads = self.changelog.headrevs()
2145 oldheads = self.changelog.headrevs()
2144
2146
2145 def tracktags(tr2):
2147 def tracktags(tr2):
2146 repo = reporef()
2148 repo = reporef()
2147 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2149 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2148 newheads = repo.changelog.headrevs()
2150 newheads = repo.changelog.headrevs()
2149 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2151 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2150 # notes: we compare lists here.
2152 # notes: we compare lists here.
2151 # As we do it only once buiding set would not be cheaper
2153 # As we do it only once buiding set would not be cheaper
2152 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2154 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2153 if changes:
2155 if changes:
2154 tr2.hookargs[b'tag_moved'] = b'1'
2156 tr2.hookargs[b'tag_moved'] = b'1'
2155 with repo.vfs(
2157 with repo.vfs(
2156 b'changes/tags.changes', b'w', atomictemp=True
2158 b'changes/tags.changes', b'w', atomictemp=True
2157 ) as changesfile:
2159 ) as changesfile:
2158 # note: we do not register the file to the transaction
2160 # note: we do not register the file to the transaction
2159 # because we needs it to still exist on the transaction
2161 # because we needs it to still exist on the transaction
2160 # is close (for txnclose hooks)
2162 # is close (for txnclose hooks)
2161 tagsmod.writediff(changesfile, changes)
2163 tagsmod.writediff(changesfile, changes)
2162
2164
2163 def validate(tr2):
2165 def validate(tr2):
2164 """will run pre-closing hooks"""
2166 """will run pre-closing hooks"""
2165 # XXX the transaction API is a bit lacking here so we take a hacky
2167 # XXX the transaction API is a bit lacking here so we take a hacky
2166 # path for now
2168 # path for now
2167 #
2169 #
2168 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2170 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2169 # dict is copied before these run. In addition we needs the data
2171 # dict is copied before these run. In addition we needs the data
2170 # available to in memory hooks too.
2172 # available to in memory hooks too.
2171 #
2173 #
2172 # Moreover, we also need to make sure this runs before txnclose
2174 # Moreover, we also need to make sure this runs before txnclose
2173 # hooks and there is no "pending" mechanism that would execute
2175 # hooks and there is no "pending" mechanism that would execute
2174 # logic only if hooks are about to run.
2176 # logic only if hooks are about to run.
2175 #
2177 #
2176 # Fixing this limitation of the transaction is also needed to track
2178 # Fixing this limitation of the transaction is also needed to track
2177 # other families of changes (bookmarks, phases, obsolescence).
2179 # other families of changes (bookmarks, phases, obsolescence).
2178 #
2180 #
2179 # This will have to be fixed before we remove the experimental
2181 # This will have to be fixed before we remove the experimental
2180 # gating.
2182 # gating.
2181 tracktags(tr2)
2183 tracktags(tr2)
2182 repo = reporef()
2184 repo = reporef()
2183
2185
2184 singleheadopt = (b'experimental', b'single-head-per-branch')
2186 singleheadopt = (b'experimental', b'single-head-per-branch')
2185 singlehead = repo.ui.configbool(*singleheadopt)
2187 singlehead = repo.ui.configbool(*singleheadopt)
2186 if singlehead:
2188 if singlehead:
2187 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2189 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2188 accountclosed = singleheadsub.get(
2190 accountclosed = singleheadsub.get(
2189 b"account-closed-heads", False
2191 b"account-closed-heads", False
2190 )
2192 )
2191 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2193 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2192 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2194 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2193 for name, (old, new) in sorted(
2195 for name, (old, new) in sorted(
2194 tr.changes[b'bookmarks'].items()
2196 tr.changes[b'bookmarks'].items()
2195 ):
2197 ):
2196 args = tr.hookargs.copy()
2198 args = tr.hookargs.copy()
2197 args.update(bookmarks.preparehookargs(name, old, new))
2199 args.update(bookmarks.preparehookargs(name, old, new))
2198 repo.hook(
2200 repo.hook(
2199 b'pretxnclose-bookmark',
2201 b'pretxnclose-bookmark',
2200 throw=True,
2202 throw=True,
2201 **pycompat.strkwargs(args)
2203 **pycompat.strkwargs(args)
2202 )
2204 )
2203 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2205 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2204 cl = repo.unfiltered().changelog
2206 cl = repo.unfiltered().changelog
2205 for revs, (old, new) in tr.changes[b'phases']:
2207 for revs, (old, new) in tr.changes[b'phases']:
2206 for rev in revs:
2208 for rev in revs:
2207 args = tr.hookargs.copy()
2209 args = tr.hookargs.copy()
2208 node = hex(cl.node(rev))
2210 node = hex(cl.node(rev))
2209 args.update(phases.preparehookargs(node, old, new))
2211 args.update(phases.preparehookargs(node, old, new))
2210 repo.hook(
2212 repo.hook(
2211 b'pretxnclose-phase',
2213 b'pretxnclose-phase',
2212 throw=True,
2214 throw=True,
2213 **pycompat.strkwargs(args)
2215 **pycompat.strkwargs(args)
2214 )
2216 )
2215
2217
2216 repo.hook(
2218 repo.hook(
2217 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2219 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2218 )
2220 )
2219
2221
2220 def releasefn(tr, success):
2222 def releasefn(tr, success):
2221 repo = reporef()
2223 repo = reporef()
2222 if repo is None:
2224 if repo is None:
2223 # If the repo has been GC'd (and this release function is being
2225 # If the repo has been GC'd (and this release function is being
2224 # called from transaction.__del__), there's not much we can do,
2226 # called from transaction.__del__), there's not much we can do,
2225 # so just leave the unfinished transaction there and let the
2227 # so just leave the unfinished transaction there and let the
2226 # user run `hg recover`.
2228 # user run `hg recover`.
2227 return
2229 return
2228 if success:
2230 if success:
2229 # this should be explicitly invoked here, because
2231 # this should be explicitly invoked here, because
2230 # in-memory changes aren't written out at closing
2232 # in-memory changes aren't written out at closing
2231 # transaction, if tr.addfilegenerator (via
2233 # transaction, if tr.addfilegenerator (via
2232 # dirstate.write or so) isn't invoked while
2234 # dirstate.write or so) isn't invoked while
2233 # transaction running
2235 # transaction running
2234 repo.dirstate.write(None)
2236 repo.dirstate.write(None)
2235 else:
2237 else:
2236 # discard all changes (including ones already written
2238 # discard all changes (including ones already written
2237 # out) in this transaction
2239 # out) in this transaction
2238 narrowspec.restorebackup(self, b'journal.narrowspec')
2240 narrowspec.restorebackup(self, b'journal.narrowspec')
2239 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2241 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2240 repo.dirstate.restorebackup(None, b'journal.dirstate')
2242 repo.dirstate.restorebackup(None, b'journal.dirstate')
2241
2243
2242 repo.invalidate(clearfilecache=True)
2244 repo.invalidate(clearfilecache=True)
2243
2245
2244 tr = transaction.transaction(
2246 tr = transaction.transaction(
2245 rp,
2247 rp,
2246 self.svfs,
2248 self.svfs,
2247 vfsmap,
2249 vfsmap,
2248 b"journal",
2250 b"journal",
2249 b"undo",
2251 b"undo",
2250 aftertrans(renames),
2252 aftertrans(renames),
2251 self.store.createmode,
2253 self.store.createmode,
2252 validator=validate,
2254 validator=validate,
2253 releasefn=releasefn,
2255 releasefn=releasefn,
2254 checkambigfiles=_cachedfiles,
2256 checkambigfiles=_cachedfiles,
2255 name=desc,
2257 name=desc,
2256 )
2258 )
2257 tr.changes[b'origrepolen'] = len(self)
2259 tr.changes[b'origrepolen'] = len(self)
2258 tr.changes[b'obsmarkers'] = set()
2260 tr.changes[b'obsmarkers'] = set()
2259 tr.changes[b'phases'] = []
2261 tr.changes[b'phases'] = []
2260 tr.changes[b'bookmarks'] = {}
2262 tr.changes[b'bookmarks'] = {}
2261
2263
2262 tr.hookargs[b'txnid'] = txnid
2264 tr.hookargs[b'txnid'] = txnid
2263 tr.hookargs[b'txnname'] = desc
2265 tr.hookargs[b'txnname'] = desc
2264 tr.hookargs[b'changes'] = tr.changes
2266 tr.hookargs[b'changes'] = tr.changes
2265 # note: writing the fncache only during finalize mean that the file is
2267 # note: writing the fncache only during finalize mean that the file is
2266 # outdated when running hooks. As fncache is used for streaming clone,
2268 # outdated when running hooks. As fncache is used for streaming clone,
2267 # this is not expected to break anything that happen during the hooks.
2269 # this is not expected to break anything that happen during the hooks.
2268 tr.addfinalize(b'flush-fncache', self.store.write)
2270 tr.addfinalize(b'flush-fncache', self.store.write)
2269
2271
2270 def txnclosehook(tr2):
2272 def txnclosehook(tr2):
2271 """To be run if transaction is successful, will schedule a hook run
2273 """To be run if transaction is successful, will schedule a hook run
2272 """
2274 """
2273 # Don't reference tr2 in hook() so we don't hold a reference.
2275 # Don't reference tr2 in hook() so we don't hold a reference.
2274 # This reduces memory consumption when there are multiple
2276 # This reduces memory consumption when there are multiple
2275 # transactions per lock. This can likely go away if issue5045
2277 # transactions per lock. This can likely go away if issue5045
2276 # fixes the function accumulation.
2278 # fixes the function accumulation.
2277 hookargs = tr2.hookargs
2279 hookargs = tr2.hookargs
2278
2280
2279 def hookfunc(unused_success):
2281 def hookfunc(unused_success):
2280 repo = reporef()
2282 repo = reporef()
2281 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2283 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2282 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2284 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2283 for name, (old, new) in bmchanges:
2285 for name, (old, new) in bmchanges:
2284 args = tr.hookargs.copy()
2286 args = tr.hookargs.copy()
2285 args.update(bookmarks.preparehookargs(name, old, new))
2287 args.update(bookmarks.preparehookargs(name, old, new))
2286 repo.hook(
2288 repo.hook(
2287 b'txnclose-bookmark',
2289 b'txnclose-bookmark',
2288 throw=False,
2290 throw=False,
2289 **pycompat.strkwargs(args)
2291 **pycompat.strkwargs(args)
2290 )
2292 )
2291
2293
2292 if hook.hashook(repo.ui, b'txnclose-phase'):
2294 if hook.hashook(repo.ui, b'txnclose-phase'):
2293 cl = repo.unfiltered().changelog
2295 cl = repo.unfiltered().changelog
2294 phasemv = sorted(
2296 phasemv = sorted(
2295 tr.changes[b'phases'], key=lambda r: r[0][0]
2297 tr.changes[b'phases'], key=lambda r: r[0][0]
2296 )
2298 )
2297 for revs, (old, new) in phasemv:
2299 for revs, (old, new) in phasemv:
2298 for rev in revs:
2300 for rev in revs:
2299 args = tr.hookargs.copy()
2301 args = tr.hookargs.copy()
2300 node = hex(cl.node(rev))
2302 node = hex(cl.node(rev))
2301 args.update(phases.preparehookargs(node, old, new))
2303 args.update(phases.preparehookargs(node, old, new))
2302 repo.hook(
2304 repo.hook(
2303 b'txnclose-phase',
2305 b'txnclose-phase',
2304 throw=False,
2306 throw=False,
2305 **pycompat.strkwargs(args)
2307 **pycompat.strkwargs(args)
2306 )
2308 )
2307
2309
2308 repo.hook(
2310 repo.hook(
2309 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2311 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2310 )
2312 )
2311
2313
2312 reporef()._afterlock(hookfunc)
2314 reporef()._afterlock(hookfunc)
2313
2315
2314 tr.addfinalize(b'txnclose-hook', txnclosehook)
2316 tr.addfinalize(b'txnclose-hook', txnclosehook)
2315 # Include a leading "-" to make it happen before the transaction summary
2317 # Include a leading "-" to make it happen before the transaction summary
2316 # reports registered via scmutil.registersummarycallback() whose names
2318 # reports registered via scmutil.registersummarycallback() whose names
2317 # are 00-txnreport etc. That way, the caches will be warm when the
2319 # are 00-txnreport etc. That way, the caches will be warm when the
2318 # callbacks run.
2320 # callbacks run.
2319 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2321 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2320
2322
2321 def txnaborthook(tr2):
2323 def txnaborthook(tr2):
2322 """To be run if transaction is aborted
2324 """To be run if transaction is aborted
2323 """
2325 """
2324 reporef().hook(
2326 reporef().hook(
2325 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2327 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2326 )
2328 )
2327
2329
2328 tr.addabort(b'txnabort-hook', txnaborthook)
2330 tr.addabort(b'txnabort-hook', txnaborthook)
2329 # avoid eager cache invalidation. in-memory data should be identical
2331 # avoid eager cache invalidation. in-memory data should be identical
2330 # to stored data if transaction has no error.
2332 # to stored data if transaction has no error.
2331 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2333 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2332 self._transref = weakref.ref(tr)
2334 self._transref = weakref.ref(tr)
2333 scmutil.registersummarycallback(self, tr, desc)
2335 scmutil.registersummarycallback(self, tr, desc)
2334 return tr
2336 return tr
2335
2337
2336 def _journalfiles(self):
2338 def _journalfiles(self):
2337 return (
2339 return (
2338 (self.svfs, b'journal'),
2340 (self.svfs, b'journal'),
2339 (self.svfs, b'journal.narrowspec'),
2341 (self.svfs, b'journal.narrowspec'),
2340 (self.vfs, b'journal.narrowspec.dirstate'),
2342 (self.vfs, b'journal.narrowspec.dirstate'),
2341 (self.vfs, b'journal.dirstate'),
2343 (self.vfs, b'journal.dirstate'),
2342 (self.vfs, b'journal.branch'),
2344 (self.vfs, b'journal.branch'),
2343 (self.vfs, b'journal.desc'),
2345 (self.vfs, b'journal.desc'),
2344 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2346 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2345 (self.svfs, b'journal.phaseroots'),
2347 (self.svfs, b'journal.phaseroots'),
2346 )
2348 )
2347
2349
2348 def undofiles(self):
2350 def undofiles(self):
2349 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2351 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2350
2352
2351 @unfilteredmethod
2353 @unfilteredmethod
2352 def _writejournal(self, desc):
2354 def _writejournal(self, desc):
2353 self.dirstate.savebackup(None, b'journal.dirstate')
2355 self.dirstate.savebackup(None, b'journal.dirstate')
2354 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2356 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2355 narrowspec.savebackup(self, b'journal.narrowspec')
2357 narrowspec.savebackup(self, b'journal.narrowspec')
2356 self.vfs.write(
2358 self.vfs.write(
2357 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2359 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2358 )
2360 )
2359 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2361 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2360 bookmarksvfs = bookmarks.bookmarksvfs(self)
2362 bookmarksvfs = bookmarks.bookmarksvfs(self)
2361 bookmarksvfs.write(
2363 bookmarksvfs.write(
2362 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2364 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2363 )
2365 )
2364 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2366 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2365
2367
2366 def recover(self):
2368 def recover(self):
2367 with self.lock():
2369 with self.lock():
2368 if self.svfs.exists(b"journal"):
2370 if self.svfs.exists(b"journal"):
2369 self.ui.status(_(b"rolling back interrupted transaction\n"))
2371 self.ui.status(_(b"rolling back interrupted transaction\n"))
2370 vfsmap = {
2372 vfsmap = {
2371 b'': self.svfs,
2373 b'': self.svfs,
2372 b'plain': self.vfs,
2374 b'plain': self.vfs,
2373 }
2375 }
2374 transaction.rollback(
2376 transaction.rollback(
2375 self.svfs,
2377 self.svfs,
2376 vfsmap,
2378 vfsmap,
2377 b"journal",
2379 b"journal",
2378 self.ui.warn,
2380 self.ui.warn,
2379 checkambigfiles=_cachedfiles,
2381 checkambigfiles=_cachedfiles,
2380 )
2382 )
2381 self.invalidate()
2383 self.invalidate()
2382 return True
2384 return True
2383 else:
2385 else:
2384 self.ui.warn(_(b"no interrupted transaction available\n"))
2386 self.ui.warn(_(b"no interrupted transaction available\n"))
2385 return False
2387 return False
2386
2388
2387 def rollback(self, dryrun=False, force=False):
2389 def rollback(self, dryrun=False, force=False):
2388 wlock = lock = dsguard = None
2390 wlock = lock = dsguard = None
2389 try:
2391 try:
2390 wlock = self.wlock()
2392 wlock = self.wlock()
2391 lock = self.lock()
2393 lock = self.lock()
2392 if self.svfs.exists(b"undo"):
2394 if self.svfs.exists(b"undo"):
2393 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2395 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2394
2396
2395 return self._rollback(dryrun, force, dsguard)
2397 return self._rollback(dryrun, force, dsguard)
2396 else:
2398 else:
2397 self.ui.warn(_(b"no rollback information available\n"))
2399 self.ui.warn(_(b"no rollback information available\n"))
2398 return 1
2400 return 1
2399 finally:
2401 finally:
2400 release(dsguard, lock, wlock)
2402 release(dsguard, lock, wlock)
2401
2403
2402 @unfilteredmethod # Until we get smarter cache management
2404 @unfilteredmethod # Until we get smarter cache management
2403 def _rollback(self, dryrun, force, dsguard):
2405 def _rollback(self, dryrun, force, dsguard):
2404 ui = self.ui
2406 ui = self.ui
2405 try:
2407 try:
2406 args = self.vfs.read(b'undo.desc').splitlines()
2408 args = self.vfs.read(b'undo.desc').splitlines()
2407 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2409 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2408 if len(args) >= 3:
2410 if len(args) >= 3:
2409 detail = args[2]
2411 detail = args[2]
2410 oldtip = oldlen - 1
2412 oldtip = oldlen - 1
2411
2413
2412 if detail and ui.verbose:
2414 if detail and ui.verbose:
2413 msg = _(
2415 msg = _(
2414 b'repository tip rolled back to revision %d'
2416 b'repository tip rolled back to revision %d'
2415 b' (undo %s: %s)\n'
2417 b' (undo %s: %s)\n'
2416 ) % (oldtip, desc, detail)
2418 ) % (oldtip, desc, detail)
2417 else:
2419 else:
2418 msg = _(
2420 msg = _(
2419 b'repository tip rolled back to revision %d (undo %s)\n'
2421 b'repository tip rolled back to revision %d (undo %s)\n'
2420 ) % (oldtip, desc)
2422 ) % (oldtip, desc)
2421 except IOError:
2423 except IOError:
2422 msg = _(b'rolling back unknown transaction\n')
2424 msg = _(b'rolling back unknown transaction\n')
2423 desc = None
2425 desc = None
2424
2426
2425 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2427 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2426 raise error.Abort(
2428 raise error.Abort(
2427 _(
2429 _(
2428 b'rollback of last commit while not checked out '
2430 b'rollback of last commit while not checked out '
2429 b'may lose data'
2431 b'may lose data'
2430 ),
2432 ),
2431 hint=_(b'use -f to force'),
2433 hint=_(b'use -f to force'),
2432 )
2434 )
2433
2435
2434 ui.status(msg)
2436 ui.status(msg)
2435 if dryrun:
2437 if dryrun:
2436 return 0
2438 return 0
2437
2439
2438 parents = self.dirstate.parents()
2440 parents = self.dirstate.parents()
2439 self.destroying()
2441 self.destroying()
2440 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2442 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2441 transaction.rollback(
2443 transaction.rollback(
2442 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2444 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2443 )
2445 )
2444 bookmarksvfs = bookmarks.bookmarksvfs(self)
2446 bookmarksvfs = bookmarks.bookmarksvfs(self)
2445 if bookmarksvfs.exists(b'undo.bookmarks'):
2447 if bookmarksvfs.exists(b'undo.bookmarks'):
2446 bookmarksvfs.rename(
2448 bookmarksvfs.rename(
2447 b'undo.bookmarks', b'bookmarks', checkambig=True
2449 b'undo.bookmarks', b'bookmarks', checkambig=True
2448 )
2450 )
2449 if self.svfs.exists(b'undo.phaseroots'):
2451 if self.svfs.exists(b'undo.phaseroots'):
2450 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2452 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2451 self.invalidate()
2453 self.invalidate()
2452
2454
2453 has_node = self.changelog.index.has_node
2455 has_node = self.changelog.index.has_node
2454 parentgone = any(not has_node(p) for p in parents)
2456 parentgone = any(not has_node(p) for p in parents)
2455 if parentgone:
2457 if parentgone:
2456 # prevent dirstateguard from overwriting already restored one
2458 # prevent dirstateguard from overwriting already restored one
2457 dsguard.close()
2459 dsguard.close()
2458
2460
2459 narrowspec.restorebackup(self, b'undo.narrowspec')
2461 narrowspec.restorebackup(self, b'undo.narrowspec')
2460 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2462 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2461 self.dirstate.restorebackup(None, b'undo.dirstate')
2463 self.dirstate.restorebackup(None, b'undo.dirstate')
2462 try:
2464 try:
2463 branch = self.vfs.read(b'undo.branch')
2465 branch = self.vfs.read(b'undo.branch')
2464 self.dirstate.setbranch(encoding.tolocal(branch))
2466 self.dirstate.setbranch(encoding.tolocal(branch))
2465 except IOError:
2467 except IOError:
2466 ui.warn(
2468 ui.warn(
2467 _(
2469 _(
2468 b'named branch could not be reset: '
2470 b'named branch could not be reset: '
2469 b'current branch is still \'%s\'\n'
2471 b'current branch is still \'%s\'\n'
2470 )
2472 )
2471 % self.dirstate.branch()
2473 % self.dirstate.branch()
2472 )
2474 )
2473
2475
2474 parents = tuple([p.rev() for p in self[None].parents()])
2476 parents = tuple([p.rev() for p in self[None].parents()])
2475 if len(parents) > 1:
2477 if len(parents) > 1:
2476 ui.status(
2478 ui.status(
2477 _(
2479 _(
2478 b'working directory now based on '
2480 b'working directory now based on '
2479 b'revisions %d and %d\n'
2481 b'revisions %d and %d\n'
2480 )
2482 )
2481 % parents
2483 % parents
2482 )
2484 )
2483 else:
2485 else:
2484 ui.status(
2486 ui.status(
2485 _(b'working directory now based on revision %d\n') % parents
2487 _(b'working directory now based on revision %d\n') % parents
2486 )
2488 )
2487 mergestatemod.mergestate.clean(self, self[b'.'].node())
2489 mergestatemod.mergestate.clean(self, self[b'.'].node())
2488
2490
2489 # TODO: if we know which new heads may result from this rollback, pass
2491 # TODO: if we know which new heads may result from this rollback, pass
2490 # them to destroy(), which will prevent the branchhead cache from being
2492 # them to destroy(), which will prevent the branchhead cache from being
2491 # invalidated.
2493 # invalidated.
2492 self.destroyed()
2494 self.destroyed()
2493 return 0
2495 return 0
2494
2496
2495 def _buildcacheupdater(self, newtransaction):
2497 def _buildcacheupdater(self, newtransaction):
2496 """called during transaction to build the callback updating cache
2498 """called during transaction to build the callback updating cache
2497
2499
2498 Lives on the repository to help extension who might want to augment
2500 Lives on the repository to help extension who might want to augment
2499 this logic. For this purpose, the created transaction is passed to the
2501 this logic. For this purpose, the created transaction is passed to the
2500 method.
2502 method.
2501 """
2503 """
2502 # we must avoid cyclic reference between repo and transaction.
2504 # we must avoid cyclic reference between repo and transaction.
2503 reporef = weakref.ref(self)
2505 reporef = weakref.ref(self)
2504
2506
2505 def updater(tr):
2507 def updater(tr):
2506 repo = reporef()
2508 repo = reporef()
2507 repo.updatecaches(tr)
2509 repo.updatecaches(tr)
2508
2510
2509 return updater
2511 return updater
2510
2512
2511 @unfilteredmethod
2513 @unfilteredmethod
2512 def updatecaches(self, tr=None, full=False):
2514 def updatecaches(self, tr=None, full=False):
2513 """warm appropriate caches
2515 """warm appropriate caches
2514
2516
2515 If this function is called after a transaction closed. The transaction
2517 If this function is called after a transaction closed. The transaction
2516 will be available in the 'tr' argument. This can be used to selectively
2518 will be available in the 'tr' argument. This can be used to selectively
2517 update caches relevant to the changes in that transaction.
2519 update caches relevant to the changes in that transaction.
2518
2520
2519 If 'full' is set, make sure all caches the function knows about have
2521 If 'full' is set, make sure all caches the function knows about have
2520 up-to-date data. Even the ones usually loaded more lazily.
2522 up-to-date data. Even the ones usually loaded more lazily.
2521 """
2523 """
2522 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2524 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2523 # During strip, many caches are invalid but
2525 # During strip, many caches are invalid but
2524 # later call to `destroyed` will refresh them.
2526 # later call to `destroyed` will refresh them.
2525 return
2527 return
2526
2528
2527 if tr is None or tr.changes[b'origrepolen'] < len(self):
2529 if tr is None or tr.changes[b'origrepolen'] < len(self):
2528 # accessing the 'ser ved' branchmap should refresh all the others,
2530 # accessing the 'ser ved' branchmap should refresh all the others,
2529 self.ui.debug(b'updating the branch cache\n')
2531 self.ui.debug(b'updating the branch cache\n')
2530 self.filtered(b'served').branchmap()
2532 self.filtered(b'served').branchmap()
2531 self.filtered(b'served.hidden').branchmap()
2533 self.filtered(b'served.hidden').branchmap()
2532
2534
2533 if full:
2535 if full:
2534 unfi = self.unfiltered()
2536 unfi = self.unfiltered()
2535
2537
2536 self.changelog.update_caches(transaction=tr)
2538 self.changelog.update_caches(transaction=tr)
2537 self.manifestlog.update_caches(transaction=tr)
2539 self.manifestlog.update_caches(transaction=tr)
2538
2540
2539 rbc = unfi.revbranchcache()
2541 rbc = unfi.revbranchcache()
2540 for r in unfi.changelog:
2542 for r in unfi.changelog:
2541 rbc.branchinfo(r)
2543 rbc.branchinfo(r)
2542 rbc.write()
2544 rbc.write()
2543
2545
2544 # ensure the working copy parents are in the manifestfulltextcache
2546 # ensure the working copy parents are in the manifestfulltextcache
2545 for ctx in self[b'.'].parents():
2547 for ctx in self[b'.'].parents():
2546 ctx.manifest() # accessing the manifest is enough
2548 ctx.manifest() # accessing the manifest is enough
2547
2549
2548 # accessing fnode cache warms the cache
2550 # accessing fnode cache warms the cache
2549 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2551 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2550 # accessing tags warm the cache
2552 # accessing tags warm the cache
2551 self.tags()
2553 self.tags()
2552 self.filtered(b'served').tags()
2554 self.filtered(b'served').tags()
2553
2555
2554 # The `full` arg is documented as updating even the lazily-loaded
2556 # The `full` arg is documented as updating even the lazily-loaded
2555 # caches immediately, so we're forcing a write to cause these caches
2557 # caches immediately, so we're forcing a write to cause these caches
2556 # to be warmed up even if they haven't explicitly been requested
2558 # to be warmed up even if they haven't explicitly been requested
2557 # yet (if they've never been used by hg, they won't ever have been
2559 # yet (if they've never been used by hg, they won't ever have been
2558 # written, even if they're a subset of another kind of cache that
2560 # written, even if they're a subset of another kind of cache that
2559 # *has* been used).
2561 # *has* been used).
2560 for filt in repoview.filtertable.keys():
2562 for filt in repoview.filtertable.keys():
2561 filtered = self.filtered(filt)
2563 filtered = self.filtered(filt)
2562 filtered.branchmap().write(filtered)
2564 filtered.branchmap().write(filtered)
2563
2565
2564 def invalidatecaches(self):
2566 def invalidatecaches(self):
2565
2567
2566 if '_tagscache' in vars(self):
2568 if '_tagscache' in vars(self):
2567 # can't use delattr on proxy
2569 # can't use delattr on proxy
2568 del self.__dict__['_tagscache']
2570 del self.__dict__['_tagscache']
2569
2571
2570 self._branchcaches.clear()
2572 self._branchcaches.clear()
2571 self.invalidatevolatilesets()
2573 self.invalidatevolatilesets()
2572 self._sparsesignaturecache.clear()
2574 self._sparsesignaturecache.clear()
2573
2575
2574 def invalidatevolatilesets(self):
2576 def invalidatevolatilesets(self):
2575 self.filteredrevcache.clear()
2577 self.filteredrevcache.clear()
2576 obsolete.clearobscaches(self)
2578 obsolete.clearobscaches(self)
2577 self._quick_access_changeid_invalidate()
2579 self._quick_access_changeid_invalidate()
2578
2580
2579 def invalidatedirstate(self):
2581 def invalidatedirstate(self):
2580 '''Invalidates the dirstate, causing the next call to dirstate
2582 '''Invalidates the dirstate, causing the next call to dirstate
2581 to check if it was modified since the last time it was read,
2583 to check if it was modified since the last time it was read,
2582 rereading it if it has.
2584 rereading it if it has.
2583
2585
2584 This is different to dirstate.invalidate() that it doesn't always
2586 This is different to dirstate.invalidate() that it doesn't always
2585 rereads the dirstate. Use dirstate.invalidate() if you want to
2587 rereads the dirstate. Use dirstate.invalidate() if you want to
2586 explicitly read the dirstate again (i.e. restoring it to a previous
2588 explicitly read the dirstate again (i.e. restoring it to a previous
2587 known good state).'''
2589 known good state).'''
2588 if hasunfilteredcache(self, 'dirstate'):
2590 if hasunfilteredcache(self, 'dirstate'):
2589 for k in self.dirstate._filecache:
2591 for k in self.dirstate._filecache:
2590 try:
2592 try:
2591 delattr(self.dirstate, k)
2593 delattr(self.dirstate, k)
2592 except AttributeError:
2594 except AttributeError:
2593 pass
2595 pass
2594 delattr(self.unfiltered(), 'dirstate')
2596 delattr(self.unfiltered(), 'dirstate')
2595
2597
2596 def invalidate(self, clearfilecache=False):
2598 def invalidate(self, clearfilecache=False):
2597 '''Invalidates both store and non-store parts other than dirstate
2599 '''Invalidates both store and non-store parts other than dirstate
2598
2600
2599 If a transaction is running, invalidation of store is omitted,
2601 If a transaction is running, invalidation of store is omitted,
2600 because discarding in-memory changes might cause inconsistency
2602 because discarding in-memory changes might cause inconsistency
2601 (e.g. incomplete fncache causes unintentional failure, but
2603 (e.g. incomplete fncache causes unintentional failure, but
2602 redundant one doesn't).
2604 redundant one doesn't).
2603 '''
2605 '''
2604 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2606 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2605 for k in list(self._filecache.keys()):
2607 for k in list(self._filecache.keys()):
2606 # dirstate is invalidated separately in invalidatedirstate()
2608 # dirstate is invalidated separately in invalidatedirstate()
2607 if k == b'dirstate':
2609 if k == b'dirstate':
2608 continue
2610 continue
2609 if (
2611 if (
2610 k == b'changelog'
2612 k == b'changelog'
2611 and self.currenttransaction()
2613 and self.currenttransaction()
2612 and self.changelog._delayed
2614 and self.changelog._delayed
2613 ):
2615 ):
2614 # The changelog object may store unwritten revisions. We don't
2616 # The changelog object may store unwritten revisions. We don't
2615 # want to lose them.
2617 # want to lose them.
2616 # TODO: Solve the problem instead of working around it.
2618 # TODO: Solve the problem instead of working around it.
2617 continue
2619 continue
2618
2620
2619 if clearfilecache:
2621 if clearfilecache:
2620 del self._filecache[k]
2622 del self._filecache[k]
2621 try:
2623 try:
2622 delattr(unfiltered, k)
2624 delattr(unfiltered, k)
2623 except AttributeError:
2625 except AttributeError:
2624 pass
2626 pass
2625 self.invalidatecaches()
2627 self.invalidatecaches()
2626 if not self.currenttransaction():
2628 if not self.currenttransaction():
2627 # TODO: Changing contents of store outside transaction
2629 # TODO: Changing contents of store outside transaction
2628 # causes inconsistency. We should make in-memory store
2630 # causes inconsistency. We should make in-memory store
2629 # changes detectable, and abort if changed.
2631 # changes detectable, and abort if changed.
2630 self.store.invalidatecaches()
2632 self.store.invalidatecaches()
2631
2633
2632 def invalidateall(self):
2634 def invalidateall(self):
2633 '''Fully invalidates both store and non-store parts, causing the
2635 '''Fully invalidates both store and non-store parts, causing the
2634 subsequent operation to reread any outside changes.'''
2636 subsequent operation to reread any outside changes.'''
2635 # extension should hook this to invalidate its caches
2637 # extension should hook this to invalidate its caches
2636 self.invalidate()
2638 self.invalidate()
2637 self.invalidatedirstate()
2639 self.invalidatedirstate()
2638
2640
2639 @unfilteredmethod
2641 @unfilteredmethod
2640 def _refreshfilecachestats(self, tr):
2642 def _refreshfilecachestats(self, tr):
2641 """Reload stats of cached files so that they are flagged as valid"""
2643 """Reload stats of cached files so that they are flagged as valid"""
2642 for k, ce in self._filecache.items():
2644 for k, ce in self._filecache.items():
2643 k = pycompat.sysstr(k)
2645 k = pycompat.sysstr(k)
2644 if k == 'dirstate' or k not in self.__dict__:
2646 if k == 'dirstate' or k not in self.__dict__:
2645 continue
2647 continue
2646 ce.refresh()
2648 ce.refresh()
2647
2649
2648 def _lock(
2650 def _lock(
2649 self,
2651 self,
2650 vfs,
2652 vfs,
2651 lockname,
2653 lockname,
2652 wait,
2654 wait,
2653 releasefn,
2655 releasefn,
2654 acquirefn,
2656 acquirefn,
2655 desc,
2657 desc,
2656 inheritchecker=None,
2658 inheritchecker=None,
2657 parentenvvar=None,
2659 parentenvvar=None,
2658 ):
2660 ):
2659 parentlock = None
2661 parentlock = None
2660 # the contents of parentenvvar are used by the underlying lock to
2662 # the contents of parentenvvar are used by the underlying lock to
2661 # determine whether it can be inherited
2663 # determine whether it can be inherited
2662 if parentenvvar is not None:
2664 if parentenvvar is not None:
2663 parentlock = encoding.environ.get(parentenvvar)
2665 parentlock = encoding.environ.get(parentenvvar)
2664
2666
2665 timeout = 0
2667 timeout = 0
2666 warntimeout = 0
2668 warntimeout = 0
2667 if wait:
2669 if wait:
2668 timeout = self.ui.configint(b"ui", b"timeout")
2670 timeout = self.ui.configint(b"ui", b"timeout")
2669 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2671 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2670 # internal config: ui.signal-safe-lock
2672 # internal config: ui.signal-safe-lock
2671 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2673 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2672
2674
2673 l = lockmod.trylock(
2675 l = lockmod.trylock(
2674 self.ui,
2676 self.ui,
2675 vfs,
2677 vfs,
2676 lockname,
2678 lockname,
2677 timeout,
2679 timeout,
2678 warntimeout,
2680 warntimeout,
2679 releasefn=releasefn,
2681 releasefn=releasefn,
2680 acquirefn=acquirefn,
2682 acquirefn=acquirefn,
2681 desc=desc,
2683 desc=desc,
2682 inheritchecker=inheritchecker,
2684 inheritchecker=inheritchecker,
2683 parentlock=parentlock,
2685 parentlock=parentlock,
2684 signalsafe=signalsafe,
2686 signalsafe=signalsafe,
2685 )
2687 )
2686 return l
2688 return l
2687
2689
2688 def _afterlock(self, callback):
2690 def _afterlock(self, callback):
2689 """add a callback to be run when the repository is fully unlocked
2691 """add a callback to be run when the repository is fully unlocked
2690
2692
2691 The callback will be executed when the outermost lock is released
2693 The callback will be executed when the outermost lock is released
2692 (with wlock being higher level than 'lock')."""
2694 (with wlock being higher level than 'lock')."""
2693 for ref in (self._wlockref, self._lockref):
2695 for ref in (self._wlockref, self._lockref):
2694 l = ref and ref()
2696 l = ref and ref()
2695 if l and l.held:
2697 if l and l.held:
2696 l.postrelease.append(callback)
2698 l.postrelease.append(callback)
2697 break
2699 break
2698 else: # no lock have been found.
2700 else: # no lock have been found.
2699 callback(True)
2701 callback(True)
2700
2702
2701 def lock(self, wait=True):
2703 def lock(self, wait=True):
2702 '''Lock the repository store (.hg/store) and return a weak reference
2704 '''Lock the repository store (.hg/store) and return a weak reference
2703 to the lock. Use this before modifying the store (e.g. committing or
2705 to the lock. Use this before modifying the store (e.g. committing or
2704 stripping). If you are opening a transaction, get a lock as well.)
2706 stripping). If you are opening a transaction, get a lock as well.)
2705
2707
2706 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2708 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2707 'wlock' first to avoid a dead-lock hazard.'''
2709 'wlock' first to avoid a dead-lock hazard.'''
2708 l = self._currentlock(self._lockref)
2710 l = self._currentlock(self._lockref)
2709 if l is not None:
2711 if l is not None:
2710 l.lock()
2712 l.lock()
2711 return l
2713 return l
2712
2714
2713 l = self._lock(
2715 l = self._lock(
2714 vfs=self.svfs,
2716 vfs=self.svfs,
2715 lockname=b"lock",
2717 lockname=b"lock",
2716 wait=wait,
2718 wait=wait,
2717 releasefn=None,
2719 releasefn=None,
2718 acquirefn=self.invalidate,
2720 acquirefn=self.invalidate,
2719 desc=_(b'repository %s') % self.origroot,
2721 desc=_(b'repository %s') % self.origroot,
2720 )
2722 )
2721 self._lockref = weakref.ref(l)
2723 self._lockref = weakref.ref(l)
2722 return l
2724 return l
2723
2725
2724 def _wlockchecktransaction(self):
2726 def _wlockchecktransaction(self):
2725 if self.currenttransaction() is not None:
2727 if self.currenttransaction() is not None:
2726 raise error.LockInheritanceContractViolation(
2728 raise error.LockInheritanceContractViolation(
2727 b'wlock cannot be inherited in the middle of a transaction'
2729 b'wlock cannot be inherited in the middle of a transaction'
2728 )
2730 )
2729
2731
2730 def wlock(self, wait=True):
2732 def wlock(self, wait=True):
2731 '''Lock the non-store parts of the repository (everything under
2733 '''Lock the non-store parts of the repository (everything under
2732 .hg except .hg/store) and return a weak reference to the lock.
2734 .hg except .hg/store) and return a weak reference to the lock.
2733
2735
2734 Use this before modifying files in .hg.
2736 Use this before modifying files in .hg.
2735
2737
2736 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2738 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2737 'wlock' first to avoid a dead-lock hazard.'''
2739 'wlock' first to avoid a dead-lock hazard.'''
2738 l = self._wlockref and self._wlockref()
2740 l = self._wlockref and self._wlockref()
2739 if l is not None and l.held:
2741 if l is not None and l.held:
2740 l.lock()
2742 l.lock()
2741 return l
2743 return l
2742
2744
2743 # We do not need to check for non-waiting lock acquisition. Such
2745 # We do not need to check for non-waiting lock acquisition. Such
2744 # acquisition would not cause dead-lock as they would just fail.
2746 # acquisition would not cause dead-lock as they would just fail.
2745 if wait and (
2747 if wait and (
2746 self.ui.configbool(b'devel', b'all-warnings')
2748 self.ui.configbool(b'devel', b'all-warnings')
2747 or self.ui.configbool(b'devel', b'check-locks')
2749 or self.ui.configbool(b'devel', b'check-locks')
2748 ):
2750 ):
2749 if self._currentlock(self._lockref) is not None:
2751 if self._currentlock(self._lockref) is not None:
2750 self.ui.develwarn(b'"wlock" acquired after "lock"')
2752 self.ui.develwarn(b'"wlock" acquired after "lock"')
2751
2753
2752 def unlock():
2754 def unlock():
2753 if self.dirstate.pendingparentchange():
2755 if self.dirstate.pendingparentchange():
2754 self.dirstate.invalidate()
2756 self.dirstate.invalidate()
2755 else:
2757 else:
2756 self.dirstate.write(None)
2758 self.dirstate.write(None)
2757
2759
2758 self._filecache[b'dirstate'].refresh()
2760 self._filecache[b'dirstate'].refresh()
2759
2761
2760 l = self._lock(
2762 l = self._lock(
2761 self.vfs,
2763 self.vfs,
2762 b"wlock",
2764 b"wlock",
2763 wait,
2765 wait,
2764 unlock,
2766 unlock,
2765 self.invalidatedirstate,
2767 self.invalidatedirstate,
2766 _(b'working directory of %s') % self.origroot,
2768 _(b'working directory of %s') % self.origroot,
2767 inheritchecker=self._wlockchecktransaction,
2769 inheritchecker=self._wlockchecktransaction,
2768 parentenvvar=b'HG_WLOCK_LOCKER',
2770 parentenvvar=b'HG_WLOCK_LOCKER',
2769 )
2771 )
2770 self._wlockref = weakref.ref(l)
2772 self._wlockref = weakref.ref(l)
2771 return l
2773 return l
2772
2774
2773 def _currentlock(self, lockref):
2775 def _currentlock(self, lockref):
2774 """Returns the lock if it's held, or None if it's not."""
2776 """Returns the lock if it's held, or None if it's not."""
2775 if lockref is None:
2777 if lockref is None:
2776 return None
2778 return None
2777 l = lockref()
2779 l = lockref()
2778 if l is None or not l.held:
2780 if l is None or not l.held:
2779 return None
2781 return None
2780 return l
2782 return l
2781
2783
2782 def currentwlock(self):
2784 def currentwlock(self):
2783 """Returns the wlock if it's held, or None if it's not."""
2785 """Returns the wlock if it's held, or None if it's not."""
2784 return self._currentlock(self._wlockref)
2786 return self._currentlock(self._wlockref)
2785
2787
2786 def checkcommitpatterns(self, wctx, match, status, fail):
2788 def checkcommitpatterns(self, wctx, match, status, fail):
2787 """check for commit arguments that aren't committable"""
2789 """check for commit arguments that aren't committable"""
2788 if match.isexact() or match.prefix():
2790 if match.isexact() or match.prefix():
2789 matched = set(status.modified + status.added + status.removed)
2791 matched = set(status.modified + status.added + status.removed)
2790
2792
2791 for f in match.files():
2793 for f in match.files():
2792 f = self.dirstate.normalize(f)
2794 f = self.dirstate.normalize(f)
2793 if f == b'.' or f in matched or f in wctx.substate:
2795 if f == b'.' or f in matched or f in wctx.substate:
2794 continue
2796 continue
2795 if f in status.deleted:
2797 if f in status.deleted:
2796 fail(f, _(b'file not found!'))
2798 fail(f, _(b'file not found!'))
2797 # Is it a directory that exists or used to exist?
2799 # Is it a directory that exists or used to exist?
2798 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2800 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2799 d = f + b'/'
2801 d = f + b'/'
2800 for mf in matched:
2802 for mf in matched:
2801 if mf.startswith(d):
2803 if mf.startswith(d):
2802 break
2804 break
2803 else:
2805 else:
2804 fail(f, _(b"no match under directory!"))
2806 fail(f, _(b"no match under directory!"))
2805 elif f not in self.dirstate:
2807 elif f not in self.dirstate:
2806 fail(f, _(b"file not tracked!"))
2808 fail(f, _(b"file not tracked!"))
2807
2809
2808 @unfilteredmethod
2810 @unfilteredmethod
2809 def commit(
2811 def commit(
2810 self,
2812 self,
2811 text=b"",
2813 text=b"",
2812 user=None,
2814 user=None,
2813 date=None,
2815 date=None,
2814 match=None,
2816 match=None,
2815 force=False,
2817 force=False,
2816 editor=None,
2818 editor=None,
2817 extra=None,
2819 extra=None,
2818 ):
2820 ):
2819 """Add a new revision to current repository.
2821 """Add a new revision to current repository.
2820
2822
2821 Revision information is gathered from the working directory,
2823 Revision information is gathered from the working directory,
2822 match can be used to filter the committed files. If editor is
2824 match can be used to filter the committed files. If editor is
2823 supplied, it is called to get a commit message.
2825 supplied, it is called to get a commit message.
2824 """
2826 """
2825 if extra is None:
2827 if extra is None:
2826 extra = {}
2828 extra = {}
2827
2829
2828 def fail(f, msg):
2830 def fail(f, msg):
2829 raise error.Abort(b'%s: %s' % (f, msg))
2831 raise error.Abort(b'%s: %s' % (f, msg))
2830
2832
2831 if not match:
2833 if not match:
2832 match = matchmod.always()
2834 match = matchmod.always()
2833
2835
2834 if not force:
2836 if not force:
2835 match.bad = fail
2837 match.bad = fail
2836
2838
2837 # lock() for recent changelog (see issue4368)
2839 # lock() for recent changelog (see issue4368)
2838 with self.wlock(), self.lock():
2840 with self.wlock(), self.lock():
2839 wctx = self[None]
2841 wctx = self[None]
2840 merge = len(wctx.parents()) > 1
2842 merge = len(wctx.parents()) > 1
2841
2843
2842 if not force and merge and not match.always():
2844 if not force and merge and not match.always():
2843 raise error.Abort(
2845 raise error.Abort(
2844 _(
2846 _(
2845 b'cannot partially commit a merge '
2847 b'cannot partially commit a merge '
2846 b'(do not specify files or patterns)'
2848 b'(do not specify files or patterns)'
2847 )
2849 )
2848 )
2850 )
2849
2851
2850 status = self.status(match=match, clean=force)
2852 status = self.status(match=match, clean=force)
2851 if force:
2853 if force:
2852 status.modified.extend(
2854 status.modified.extend(
2853 status.clean
2855 status.clean
2854 ) # mq may commit clean files
2856 ) # mq may commit clean files
2855
2857
2856 # check subrepos
2858 # check subrepos
2857 subs, commitsubs, newstate = subrepoutil.precommit(
2859 subs, commitsubs, newstate = subrepoutil.precommit(
2858 self.ui, wctx, status, match, force=force
2860 self.ui, wctx, status, match, force=force
2859 )
2861 )
2860
2862
2861 # make sure all explicit patterns are matched
2863 # make sure all explicit patterns are matched
2862 if not force:
2864 if not force:
2863 self.checkcommitpatterns(wctx, match, status, fail)
2865 self.checkcommitpatterns(wctx, match, status, fail)
2864
2866
2865 cctx = context.workingcommitctx(
2867 cctx = context.workingcommitctx(
2866 self, status, text, user, date, extra
2868 self, status, text, user, date, extra
2867 )
2869 )
2868
2870
2869 ms = mergestatemod.mergestate.read(self)
2871 ms = mergestatemod.mergestate.read(self)
2870 mergeutil.checkunresolved(ms)
2872 mergeutil.checkunresolved(ms)
2871
2873
2872 # internal config: ui.allowemptycommit
2874 # internal config: ui.allowemptycommit
2873 if cctx.isempty() and not self.ui.configbool(
2875 if cctx.isempty() and not self.ui.configbool(
2874 b'ui', b'allowemptycommit'
2876 b'ui', b'allowemptycommit'
2875 ):
2877 ):
2876 self.ui.debug(b'nothing to commit, clearing merge state\n')
2878 self.ui.debug(b'nothing to commit, clearing merge state\n')
2877 ms.reset()
2879 ms.reset()
2878 return None
2880 return None
2879
2881
2880 if merge and cctx.deleted():
2882 if merge and cctx.deleted():
2881 raise error.Abort(_(b"cannot commit merge with missing files"))
2883 raise error.Abort(_(b"cannot commit merge with missing files"))
2882
2884
2883 if editor:
2885 if editor:
2884 cctx._text = editor(self, cctx, subs)
2886 cctx._text = editor(self, cctx, subs)
2885 edited = text != cctx._text
2887 edited = text != cctx._text
2886
2888
2887 # Save commit message in case this transaction gets rolled back
2889 # Save commit message in case this transaction gets rolled back
2888 # (e.g. by a pretxncommit hook). Leave the content alone on
2890 # (e.g. by a pretxncommit hook). Leave the content alone on
2889 # the assumption that the user will use the same editor again.
2891 # the assumption that the user will use the same editor again.
2890 msgfn = self.savecommitmessage(cctx._text)
2892 msgfn = self.savecommitmessage(cctx._text)
2891
2893
2892 # commit subs and write new state
2894 # commit subs and write new state
2893 if subs:
2895 if subs:
2894 uipathfn = scmutil.getuipathfn(self)
2896 uipathfn = scmutil.getuipathfn(self)
2895 for s in sorted(commitsubs):
2897 for s in sorted(commitsubs):
2896 sub = wctx.sub(s)
2898 sub = wctx.sub(s)
2897 self.ui.status(
2899 self.ui.status(
2898 _(b'committing subrepository %s\n')
2900 _(b'committing subrepository %s\n')
2899 % uipathfn(subrepoutil.subrelpath(sub))
2901 % uipathfn(subrepoutil.subrelpath(sub))
2900 )
2902 )
2901 sr = sub.commit(cctx._text, user, date)
2903 sr = sub.commit(cctx._text, user, date)
2902 newstate[s] = (newstate[s][0], sr)
2904 newstate[s] = (newstate[s][0], sr)
2903 subrepoutil.writestate(self, newstate)
2905 subrepoutil.writestate(self, newstate)
2904
2906
2905 p1, p2 = self.dirstate.parents()
2907 p1, p2 = self.dirstate.parents()
2906 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2908 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2907 try:
2909 try:
2908 self.hook(
2910 self.hook(
2909 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2911 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2910 )
2912 )
2911 with self.transaction(b'commit'):
2913 with self.transaction(b'commit'):
2912 ret = self.commitctx(cctx, True)
2914 ret = self.commitctx(cctx, True)
2913 # update bookmarks, dirstate and mergestate
2915 # update bookmarks, dirstate and mergestate
2914 bookmarks.update(self, [p1, p2], ret)
2916 bookmarks.update(self, [p1, p2], ret)
2915 cctx.markcommitted(ret)
2917 cctx.markcommitted(ret)
2916 ms.reset()
2918 ms.reset()
2917 except: # re-raises
2919 except: # re-raises
2918 if edited:
2920 if edited:
2919 self.ui.write(
2921 self.ui.write(
2920 _(b'note: commit message saved in %s\n') % msgfn
2922 _(b'note: commit message saved in %s\n') % msgfn
2921 )
2923 )
2922 self.ui.write(
2924 self.ui.write(
2923 _(
2925 _(
2924 b"note: use 'hg commit --logfile "
2926 b"note: use 'hg commit --logfile "
2925 b".hg/last-message.txt --edit' to reuse it\n"
2927 b".hg/last-message.txt --edit' to reuse it\n"
2926 )
2928 )
2927 )
2929 )
2928 raise
2930 raise
2929
2931
2930 def commithook(unused_success):
2932 def commithook(unused_success):
2931 # hack for command that use a temporary commit (eg: histedit)
2933 # hack for command that use a temporary commit (eg: histedit)
2932 # temporary commit got stripped before hook release
2934 # temporary commit got stripped before hook release
2933 if self.changelog.hasnode(ret):
2935 if self.changelog.hasnode(ret):
2934 self.hook(
2936 self.hook(
2935 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2937 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2936 )
2938 )
2937
2939
2938 self._afterlock(commithook)
2940 self._afterlock(commithook)
2939 return ret
2941 return ret
2940
2942
2941 @unfilteredmethod
2943 @unfilteredmethod
2942 def commitctx(self, ctx, error=False, origctx=None):
2944 def commitctx(self, ctx, error=False, origctx=None):
2943 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2945 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2944
2946
2945 @unfilteredmethod
2947 @unfilteredmethod
2946 def destroying(self):
2948 def destroying(self):
2947 '''Inform the repository that nodes are about to be destroyed.
2949 '''Inform the repository that nodes are about to be destroyed.
2948 Intended for use by strip and rollback, so there's a common
2950 Intended for use by strip and rollback, so there's a common
2949 place for anything that has to be done before destroying history.
2951 place for anything that has to be done before destroying history.
2950
2952
2951 This is mostly useful for saving state that is in memory and waiting
2953 This is mostly useful for saving state that is in memory and waiting
2952 to be flushed when the current lock is released. Because a call to
2954 to be flushed when the current lock is released. Because a call to
2953 destroyed is imminent, the repo will be invalidated causing those
2955 destroyed is imminent, the repo will be invalidated causing those
2954 changes to stay in memory (waiting for the next unlock), or vanish
2956 changes to stay in memory (waiting for the next unlock), or vanish
2955 completely.
2957 completely.
2956 '''
2958 '''
2957 # When using the same lock to commit and strip, the phasecache is left
2959 # When using the same lock to commit and strip, the phasecache is left
2958 # dirty after committing. Then when we strip, the repo is invalidated,
2960 # dirty after committing. Then when we strip, the repo is invalidated,
2959 # causing those changes to disappear.
2961 # causing those changes to disappear.
2960 if '_phasecache' in vars(self):
2962 if '_phasecache' in vars(self):
2961 self._phasecache.write()
2963 self._phasecache.write()
2962
2964
2963 @unfilteredmethod
2965 @unfilteredmethod
2964 def destroyed(self):
2966 def destroyed(self):
2965 '''Inform the repository that nodes have been destroyed.
2967 '''Inform the repository that nodes have been destroyed.
2966 Intended for use by strip and rollback, so there's a common
2968 Intended for use by strip and rollback, so there's a common
2967 place for anything that has to be done after destroying history.
2969 place for anything that has to be done after destroying history.
2968 '''
2970 '''
2969 # When one tries to:
2971 # When one tries to:
2970 # 1) destroy nodes thus calling this method (e.g. strip)
2972 # 1) destroy nodes thus calling this method (e.g. strip)
2971 # 2) use phasecache somewhere (e.g. commit)
2973 # 2) use phasecache somewhere (e.g. commit)
2972 #
2974 #
2973 # then 2) will fail because the phasecache contains nodes that were
2975 # then 2) will fail because the phasecache contains nodes that were
2974 # removed. We can either remove phasecache from the filecache,
2976 # removed. We can either remove phasecache from the filecache,
2975 # causing it to reload next time it is accessed, or simply filter
2977 # causing it to reload next time it is accessed, or simply filter
2976 # the removed nodes now and write the updated cache.
2978 # the removed nodes now and write the updated cache.
2977 self._phasecache.filterunknown(self)
2979 self._phasecache.filterunknown(self)
2978 self._phasecache.write()
2980 self._phasecache.write()
2979
2981
2980 # refresh all repository caches
2982 # refresh all repository caches
2981 self.updatecaches()
2983 self.updatecaches()
2982
2984
2983 # Ensure the persistent tag cache is updated. Doing it now
2985 # Ensure the persistent tag cache is updated. Doing it now
2984 # means that the tag cache only has to worry about destroyed
2986 # means that the tag cache only has to worry about destroyed
2985 # heads immediately after a strip/rollback. That in turn
2987 # heads immediately after a strip/rollback. That in turn
2986 # guarantees that "cachetip == currenttip" (comparing both rev
2988 # guarantees that "cachetip == currenttip" (comparing both rev
2987 # and node) always means no nodes have been added or destroyed.
2989 # and node) always means no nodes have been added or destroyed.
2988
2990
2989 # XXX this is suboptimal when qrefresh'ing: we strip the current
2991 # XXX this is suboptimal when qrefresh'ing: we strip the current
2990 # head, refresh the tag cache, then immediately add a new head.
2992 # head, refresh the tag cache, then immediately add a new head.
2991 # But I think doing it this way is necessary for the "instant
2993 # But I think doing it this way is necessary for the "instant
2992 # tag cache retrieval" case to work.
2994 # tag cache retrieval" case to work.
2993 self.invalidate()
2995 self.invalidate()
2994
2996
2995 def status(
2997 def status(
2996 self,
2998 self,
2997 node1=b'.',
2999 node1=b'.',
2998 node2=None,
3000 node2=None,
2999 match=None,
3001 match=None,
3000 ignored=False,
3002 ignored=False,
3001 clean=False,
3003 clean=False,
3002 unknown=False,
3004 unknown=False,
3003 listsubrepos=False,
3005 listsubrepos=False,
3004 ):
3006 ):
3005 '''a convenience method that calls node1.status(node2)'''
3007 '''a convenience method that calls node1.status(node2)'''
3006 return self[node1].status(
3008 return self[node1].status(
3007 node2, match, ignored, clean, unknown, listsubrepos
3009 node2, match, ignored, clean, unknown, listsubrepos
3008 )
3010 )
3009
3011
3010 def addpostdsstatus(self, ps):
3012 def addpostdsstatus(self, ps):
3011 """Add a callback to run within the wlock, at the point at which status
3013 """Add a callback to run within the wlock, at the point at which status
3012 fixups happen.
3014 fixups happen.
3013
3015
3014 On status completion, callback(wctx, status) will be called with the
3016 On status completion, callback(wctx, status) will be called with the
3015 wlock held, unless the dirstate has changed from underneath or the wlock
3017 wlock held, unless the dirstate has changed from underneath or the wlock
3016 couldn't be grabbed.
3018 couldn't be grabbed.
3017
3019
3018 Callbacks should not capture and use a cached copy of the dirstate --
3020 Callbacks should not capture and use a cached copy of the dirstate --
3019 it might change in the meanwhile. Instead, they should access the
3021 it might change in the meanwhile. Instead, they should access the
3020 dirstate via wctx.repo().dirstate.
3022 dirstate via wctx.repo().dirstate.
3021
3023
3022 This list is emptied out after each status run -- extensions should
3024 This list is emptied out after each status run -- extensions should
3023 make sure it adds to this list each time dirstate.status is called.
3025 make sure it adds to this list each time dirstate.status is called.
3024 Extensions should also make sure they don't call this for statuses
3026 Extensions should also make sure they don't call this for statuses
3025 that don't involve the dirstate.
3027 that don't involve the dirstate.
3026 """
3028 """
3027
3029
3028 # The list is located here for uniqueness reasons -- it is actually
3030 # The list is located here for uniqueness reasons -- it is actually
3029 # managed by the workingctx, but that isn't unique per-repo.
3031 # managed by the workingctx, but that isn't unique per-repo.
3030 self._postdsstatus.append(ps)
3032 self._postdsstatus.append(ps)
3031
3033
3032 def postdsstatus(self):
3034 def postdsstatus(self):
3033 """Used by workingctx to get the list of post-dirstate-status hooks."""
3035 """Used by workingctx to get the list of post-dirstate-status hooks."""
3034 return self._postdsstatus
3036 return self._postdsstatus
3035
3037
3036 def clearpostdsstatus(self):
3038 def clearpostdsstatus(self):
3037 """Used by workingctx to clear post-dirstate-status hooks."""
3039 """Used by workingctx to clear post-dirstate-status hooks."""
3038 del self._postdsstatus[:]
3040 del self._postdsstatus[:]
3039
3041
3040 def heads(self, start=None):
3042 def heads(self, start=None):
3041 if start is None:
3043 if start is None:
3042 cl = self.changelog
3044 cl = self.changelog
3043 headrevs = reversed(cl.headrevs())
3045 headrevs = reversed(cl.headrevs())
3044 return [cl.node(rev) for rev in headrevs]
3046 return [cl.node(rev) for rev in headrevs]
3045
3047
3046 heads = self.changelog.heads(start)
3048 heads = self.changelog.heads(start)
3047 # sort the output in rev descending order
3049 # sort the output in rev descending order
3048 return sorted(heads, key=self.changelog.rev, reverse=True)
3050 return sorted(heads, key=self.changelog.rev, reverse=True)
3049
3051
3050 def branchheads(self, branch=None, start=None, closed=False):
3052 def branchheads(self, branch=None, start=None, closed=False):
3051 '''return a (possibly filtered) list of heads for the given branch
3053 '''return a (possibly filtered) list of heads for the given branch
3052
3054
3053 Heads are returned in topological order, from newest to oldest.
3055 Heads are returned in topological order, from newest to oldest.
3054 If branch is None, use the dirstate branch.
3056 If branch is None, use the dirstate branch.
3055 If start is not None, return only heads reachable from start.
3057 If start is not None, return only heads reachable from start.
3056 If closed is True, return heads that are marked as closed as well.
3058 If closed is True, return heads that are marked as closed as well.
3057 '''
3059 '''
3058 if branch is None:
3060 if branch is None:
3059 branch = self[None].branch()
3061 branch = self[None].branch()
3060 branches = self.branchmap()
3062 branches = self.branchmap()
3061 if not branches.hasbranch(branch):
3063 if not branches.hasbranch(branch):
3062 return []
3064 return []
3063 # the cache returns heads ordered lowest to highest
3065 # the cache returns heads ordered lowest to highest
3064 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3066 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3065 if start is not None:
3067 if start is not None:
3066 # filter out the heads that cannot be reached from startrev
3068 # filter out the heads that cannot be reached from startrev
3067 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3069 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3068 bheads = [h for h in bheads if h in fbheads]
3070 bheads = [h for h in bheads if h in fbheads]
3069 return bheads
3071 return bheads
3070
3072
3071 def branches(self, nodes):
3073 def branches(self, nodes):
3072 if not nodes:
3074 if not nodes:
3073 nodes = [self.changelog.tip()]
3075 nodes = [self.changelog.tip()]
3074 b = []
3076 b = []
3075 for n in nodes:
3077 for n in nodes:
3076 t = n
3078 t = n
3077 while True:
3079 while True:
3078 p = self.changelog.parents(n)
3080 p = self.changelog.parents(n)
3079 if p[1] != nullid or p[0] == nullid:
3081 if p[1] != nullid or p[0] == nullid:
3080 b.append((t, n, p[0], p[1]))
3082 b.append((t, n, p[0], p[1]))
3081 break
3083 break
3082 n = p[0]
3084 n = p[0]
3083 return b
3085 return b
3084
3086
3085 def between(self, pairs):
3087 def between(self, pairs):
3086 r = []
3088 r = []
3087
3089
3088 for top, bottom in pairs:
3090 for top, bottom in pairs:
3089 n, l, i = top, [], 0
3091 n, l, i = top, [], 0
3090 f = 1
3092 f = 1
3091
3093
3092 while n != bottom and n != nullid:
3094 while n != bottom and n != nullid:
3093 p = self.changelog.parents(n)[0]
3095 p = self.changelog.parents(n)[0]
3094 if i == f:
3096 if i == f:
3095 l.append(n)
3097 l.append(n)
3096 f = f * 2
3098 f = f * 2
3097 n = p
3099 n = p
3098 i += 1
3100 i += 1
3099
3101
3100 r.append(l)
3102 r.append(l)
3101
3103
3102 return r
3104 return r
3103
3105
3104 def checkpush(self, pushop):
3106 def checkpush(self, pushop):
3105 """Extensions can override this function if additional checks have
3107 """Extensions can override this function if additional checks have
3106 to be performed before pushing, or call it if they override push
3108 to be performed before pushing, or call it if they override push
3107 command.
3109 command.
3108 """
3110 """
3109
3111
3110 @unfilteredpropertycache
3112 @unfilteredpropertycache
3111 def prepushoutgoinghooks(self):
3113 def prepushoutgoinghooks(self):
3112 """Return util.hooks consists of a pushop with repo, remote, outgoing
3114 """Return util.hooks consists of a pushop with repo, remote, outgoing
3113 methods, which are called before pushing changesets.
3115 methods, which are called before pushing changesets.
3114 """
3116 """
3115 return util.hooks()
3117 return util.hooks()
3116
3118
3117 def pushkey(self, namespace, key, old, new):
3119 def pushkey(self, namespace, key, old, new):
3118 try:
3120 try:
3119 tr = self.currenttransaction()
3121 tr = self.currenttransaction()
3120 hookargs = {}
3122 hookargs = {}
3121 if tr is not None:
3123 if tr is not None:
3122 hookargs.update(tr.hookargs)
3124 hookargs.update(tr.hookargs)
3123 hookargs = pycompat.strkwargs(hookargs)
3125 hookargs = pycompat.strkwargs(hookargs)
3124 hookargs['namespace'] = namespace
3126 hookargs['namespace'] = namespace
3125 hookargs['key'] = key
3127 hookargs['key'] = key
3126 hookargs['old'] = old
3128 hookargs['old'] = old
3127 hookargs['new'] = new
3129 hookargs['new'] = new
3128 self.hook(b'prepushkey', throw=True, **hookargs)
3130 self.hook(b'prepushkey', throw=True, **hookargs)
3129 except error.HookAbort as exc:
3131 except error.HookAbort as exc:
3130 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3132 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3131 if exc.hint:
3133 if exc.hint:
3132 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3134 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3133 return False
3135 return False
3134 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3136 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3135 ret = pushkey.push(self, namespace, key, old, new)
3137 ret = pushkey.push(self, namespace, key, old, new)
3136
3138
3137 def runhook(unused_success):
3139 def runhook(unused_success):
3138 self.hook(
3140 self.hook(
3139 b'pushkey',
3141 b'pushkey',
3140 namespace=namespace,
3142 namespace=namespace,
3141 key=key,
3143 key=key,
3142 old=old,
3144 old=old,
3143 new=new,
3145 new=new,
3144 ret=ret,
3146 ret=ret,
3145 )
3147 )
3146
3148
3147 self._afterlock(runhook)
3149 self._afterlock(runhook)
3148 return ret
3150 return ret
3149
3151
3150 def listkeys(self, namespace):
3152 def listkeys(self, namespace):
3151 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3153 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3152 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3154 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3153 values = pushkey.list(self, namespace)
3155 values = pushkey.list(self, namespace)
3154 self.hook(b'listkeys', namespace=namespace, values=values)
3156 self.hook(b'listkeys', namespace=namespace, values=values)
3155 return values
3157 return values
3156
3158
3157 def debugwireargs(self, one, two, three=None, four=None, five=None):
3159 def debugwireargs(self, one, two, three=None, four=None, five=None):
3158 '''used to test argument passing over the wire'''
3160 '''used to test argument passing over the wire'''
3159 return b"%s %s %s %s %s" % (
3161 return b"%s %s %s %s %s" % (
3160 one,
3162 one,
3161 two,
3163 two,
3162 pycompat.bytestr(three),
3164 pycompat.bytestr(three),
3163 pycompat.bytestr(four),
3165 pycompat.bytestr(four),
3164 pycompat.bytestr(five),
3166 pycompat.bytestr(five),
3165 )
3167 )
3166
3168
3167 def savecommitmessage(self, text):
3169 def savecommitmessage(self, text):
3168 fp = self.vfs(b'last-message.txt', b'wb')
3170 fp = self.vfs(b'last-message.txt', b'wb')
3169 try:
3171 try:
3170 fp.write(text)
3172 fp.write(text)
3171 finally:
3173 finally:
3172 fp.close()
3174 fp.close()
3173 return self.pathto(fp.name[len(self.root) + 1 :])
3175 return self.pathto(fp.name[len(self.root) + 1 :])
3174
3176
3175
3177
3176 # used to avoid circular references so destructors work
3178 # used to avoid circular references so destructors work
3177 def aftertrans(files):
3179 def aftertrans(files):
3178 renamefiles = [tuple(t) for t in files]
3180 renamefiles = [tuple(t) for t in files]
3179
3181
3180 def a():
3182 def a():
3181 for vfs, src, dest in renamefiles:
3183 for vfs, src, dest in renamefiles:
3182 # if src and dest refer to a same file, vfs.rename is a no-op,
3184 # if src and dest refer to a same file, vfs.rename is a no-op,
3183 # leaving both src and dest on disk. delete dest to make sure
3185 # leaving both src and dest on disk. delete dest to make sure
3184 # the rename couldn't be such a no-op.
3186 # the rename couldn't be such a no-op.
3185 vfs.tryunlink(dest)
3187 vfs.tryunlink(dest)
3186 try:
3188 try:
3187 vfs.rename(src, dest)
3189 vfs.rename(src, dest)
3188 except OSError: # journal file does not yet exist
3190 except OSError: # journal file does not yet exist
3189 pass
3191 pass
3190
3192
3191 return a
3193 return a
3192
3194
3193
3195
3194 def undoname(fn):
3196 def undoname(fn):
3195 base, name = os.path.split(fn)
3197 base, name = os.path.split(fn)
3196 assert name.startswith(b'journal')
3198 assert name.startswith(b'journal')
3197 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3199 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3198
3200
3199
3201
3200 def instance(ui, path, create, intents=None, createopts=None):
3202 def instance(ui, path, create, intents=None, createopts=None):
3201 localpath = util.urllocalpath(path)
3203 localpath = util.urllocalpath(path)
3202 if create:
3204 if create:
3203 createrepository(ui, localpath, createopts=createopts)
3205 createrepository(ui, localpath, createopts=createopts)
3204
3206
3205 return makelocalrepository(ui, localpath, intents=intents)
3207 return makelocalrepository(ui, localpath, intents=intents)
3206
3208
3207
3209
3208 def islocal(path):
3210 def islocal(path):
3209 return True
3211 return True
3210
3212
3211
3213
3212 def defaultcreateopts(ui, createopts=None):
3214 def defaultcreateopts(ui, createopts=None):
3213 """Populate the default creation options for a repository.
3215 """Populate the default creation options for a repository.
3214
3216
3215 A dictionary of explicitly requested creation options can be passed
3217 A dictionary of explicitly requested creation options can be passed
3216 in. Missing keys will be populated.
3218 in. Missing keys will be populated.
3217 """
3219 """
3218 createopts = dict(createopts or {})
3220 createopts = dict(createopts or {})
3219
3221
3220 if b'backend' not in createopts:
3222 if b'backend' not in createopts:
3221 # experimental config: storage.new-repo-backend
3223 # experimental config: storage.new-repo-backend
3222 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3224 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3223
3225
3224 return createopts
3226 return createopts
3225
3227
3226
3228
3227 def newreporequirements(ui, createopts):
3229 def newreporequirements(ui, createopts):
3228 """Determine the set of requirements for a new local repository.
3230 """Determine the set of requirements for a new local repository.
3229
3231
3230 Extensions can wrap this function to specify custom requirements for
3232 Extensions can wrap this function to specify custom requirements for
3231 new repositories.
3233 new repositories.
3232 """
3234 """
3233 # If the repo is being created from a shared repository, we copy
3235 # If the repo is being created from a shared repository, we copy
3234 # its requirements.
3236 # its requirements.
3235 if b'sharedrepo' in createopts:
3237 if b'sharedrepo' in createopts:
3236 requirements = set(createopts[b'sharedrepo'].requirements)
3238 requirements = set(createopts[b'sharedrepo'].requirements)
3237 if createopts.get(b'sharedrelative'):
3239 if createopts.get(b'sharedrelative'):
3238 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3240 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3239 else:
3241 else:
3240 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3242 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3241
3243
3242 return requirements
3244 return requirements
3243
3245
3244 if b'backend' not in createopts:
3246 if b'backend' not in createopts:
3245 raise error.ProgrammingError(
3247 raise error.ProgrammingError(
3246 b'backend key not present in createopts; '
3248 b'backend key not present in createopts; '
3247 b'was defaultcreateopts() called?'
3249 b'was defaultcreateopts() called?'
3248 )
3250 )
3249
3251
3250 if createopts[b'backend'] != b'revlogv1':
3252 if createopts[b'backend'] != b'revlogv1':
3251 raise error.Abort(
3253 raise error.Abort(
3252 _(
3254 _(
3253 b'unable to determine repository requirements for '
3255 b'unable to determine repository requirements for '
3254 b'storage backend: %s'
3256 b'storage backend: %s'
3255 )
3257 )
3256 % createopts[b'backend']
3258 % createopts[b'backend']
3257 )
3259 )
3258
3260
3259 requirements = {b'revlogv1'}
3261 requirements = {b'revlogv1'}
3260 if ui.configbool(b'format', b'usestore'):
3262 if ui.configbool(b'format', b'usestore'):
3261 requirements.add(b'store')
3263 requirements.add(b'store')
3262 if ui.configbool(b'format', b'usefncache'):
3264 if ui.configbool(b'format', b'usefncache'):
3263 requirements.add(b'fncache')
3265 requirements.add(b'fncache')
3264 if ui.configbool(b'format', b'dotencode'):
3266 if ui.configbool(b'format', b'dotencode'):
3265 requirements.add(b'dotencode')
3267 requirements.add(b'dotencode')
3266
3268
3267 compengines = ui.configlist(b'format', b'revlog-compression')
3269 compengines = ui.configlist(b'format', b'revlog-compression')
3268 for compengine in compengines:
3270 for compengine in compengines:
3269 if compengine in util.compengines:
3271 if compengine in util.compengines:
3270 break
3272 break
3271 else:
3273 else:
3272 raise error.Abort(
3274 raise error.Abort(
3273 _(
3275 _(
3274 b'compression engines %s defined by '
3276 b'compression engines %s defined by '
3275 b'format.revlog-compression not available'
3277 b'format.revlog-compression not available'
3276 )
3278 )
3277 % b', '.join(b'"%s"' % e for e in compengines),
3279 % b', '.join(b'"%s"' % e for e in compengines),
3278 hint=_(
3280 hint=_(
3279 b'run "hg debuginstall" to list available '
3281 b'run "hg debuginstall" to list available '
3280 b'compression engines'
3282 b'compression engines'
3281 ),
3283 ),
3282 )
3284 )
3283
3285
3284 # zlib is the historical default and doesn't need an explicit requirement.
3286 # zlib is the historical default and doesn't need an explicit requirement.
3285 if compengine == b'zstd':
3287 if compengine == b'zstd':
3286 requirements.add(b'revlog-compression-zstd')
3288 requirements.add(b'revlog-compression-zstd')
3287 elif compengine != b'zlib':
3289 elif compengine != b'zlib':
3288 requirements.add(b'exp-compression-%s' % compengine)
3290 requirements.add(b'exp-compression-%s' % compengine)
3289
3291
3290 if scmutil.gdinitconfig(ui):
3292 if scmutil.gdinitconfig(ui):
3291 requirements.add(b'generaldelta')
3293 requirements.add(b'generaldelta')
3292 if ui.configbool(b'format', b'sparse-revlog'):
3294 if ui.configbool(b'format', b'sparse-revlog'):
3293 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3295 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3294
3296
3295 # experimental config: format.exp-use-side-data
3297 # experimental config: format.exp-use-side-data
3296 if ui.configbool(b'format', b'exp-use-side-data'):
3298 if ui.configbool(b'format', b'exp-use-side-data'):
3297 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3299 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3298 # experimental config: format.exp-use-copies-side-data-changeset
3300 # experimental config: format.exp-use-copies-side-data-changeset
3299 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3301 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3300 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3302 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3301 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3303 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3302 if ui.configbool(b'experimental', b'treemanifest'):
3304 if ui.configbool(b'experimental', b'treemanifest'):
3303 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3305 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3304
3306
3305 revlogv2 = ui.config(b'experimental', b'revlogv2')
3307 revlogv2 = ui.config(b'experimental', b'revlogv2')
3306 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3308 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3307 requirements.remove(b'revlogv1')
3309 requirements.remove(b'revlogv1')
3308 # generaldelta is implied by revlogv2.
3310 # generaldelta is implied by revlogv2.
3309 requirements.discard(b'generaldelta')
3311 requirements.discard(b'generaldelta')
3310 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3312 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3311 # experimental config: format.internal-phase
3313 # experimental config: format.internal-phase
3312 if ui.configbool(b'format', b'internal-phase'):
3314 if ui.configbool(b'format', b'internal-phase'):
3313 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3315 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3314
3316
3315 if createopts.get(b'narrowfiles'):
3317 if createopts.get(b'narrowfiles'):
3316 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3318 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3317
3319
3318 if createopts.get(b'lfs'):
3320 if createopts.get(b'lfs'):
3319 requirements.add(b'lfs')
3321 requirements.add(b'lfs')
3320
3322
3321 if ui.configbool(b'format', b'bookmarks-in-store'):
3323 if ui.configbool(b'format', b'bookmarks-in-store'):
3322 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3324 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3323
3325
3324 if ui.configbool(b'format', b'use-persistent-nodemap'):
3326 if ui.configbool(b'format', b'use-persistent-nodemap'):
3325 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3327 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3326
3328
3327 return requirements
3329 return requirements
3328
3330
3329
3331
3330 def checkrequirementscompat(ui, requirements):
3332 def checkrequirementscompat(ui, requirements):
3331 """ Checks compatibility of repository requirements enabled and disabled.
3333 """ Checks compatibility of repository requirements enabled and disabled.
3332
3334
3333 Returns a set of requirements which needs to be dropped because dependend
3335 Returns a set of requirements which needs to be dropped because dependend
3334 requirements are not enabled. Also warns users about it """
3336 requirements are not enabled. Also warns users about it """
3335
3337
3336 dropped = set()
3338 dropped = set()
3337
3339
3338 if b'store' not in requirements:
3340 if b'store' not in requirements:
3339 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3341 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3340 ui.warn(
3342 ui.warn(
3341 _(
3343 _(
3342 b'ignoring enabled \'format.bookmarks-in-store\' config '
3344 b'ignoring enabled \'format.bookmarks-in-store\' config '
3343 b'beacuse it is incompatible with disabled '
3345 b'beacuse it is incompatible with disabled '
3344 b'\'format.usestore\' config\n'
3346 b'\'format.usestore\' config\n'
3345 )
3347 )
3346 )
3348 )
3347 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3349 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3348
3350
3349 if (
3351 if (
3350 requirementsmod.SHARED_REQUIREMENT in requirements
3352 requirementsmod.SHARED_REQUIREMENT in requirements
3351 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3353 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3352 ):
3354 ):
3353 raise error.Abort(
3355 raise error.Abort(
3354 _(
3356 _(
3355 b"cannot create shared repository as source was created"
3357 b"cannot create shared repository as source was created"
3356 b" with 'format.usestore' config disabled"
3358 b" with 'format.usestore' config disabled"
3357 )
3359 )
3358 )
3360 )
3359
3361
3360 return dropped
3362 return dropped
3361
3363
3362
3364
3363 def filterknowncreateopts(ui, createopts):
3365 def filterknowncreateopts(ui, createopts):
3364 """Filters a dict of repo creation options against options that are known.
3366 """Filters a dict of repo creation options against options that are known.
3365
3367
3366 Receives a dict of repo creation options and returns a dict of those
3368 Receives a dict of repo creation options and returns a dict of those
3367 options that we don't know how to handle.
3369 options that we don't know how to handle.
3368
3370
3369 This function is called as part of repository creation. If the
3371 This function is called as part of repository creation. If the
3370 returned dict contains any items, repository creation will not
3372 returned dict contains any items, repository creation will not
3371 be allowed, as it means there was a request to create a repository
3373 be allowed, as it means there was a request to create a repository
3372 with options not recognized by loaded code.
3374 with options not recognized by loaded code.
3373
3375
3374 Extensions can wrap this function to filter out creation options
3376 Extensions can wrap this function to filter out creation options
3375 they know how to handle.
3377 they know how to handle.
3376 """
3378 """
3377 known = {
3379 known = {
3378 b'backend',
3380 b'backend',
3379 b'lfs',
3381 b'lfs',
3380 b'narrowfiles',
3382 b'narrowfiles',
3381 b'sharedrepo',
3383 b'sharedrepo',
3382 b'sharedrelative',
3384 b'sharedrelative',
3383 b'shareditems',
3385 b'shareditems',
3384 b'shallowfilestore',
3386 b'shallowfilestore',
3385 }
3387 }
3386
3388
3387 return {k: v for k, v in createopts.items() if k not in known}
3389 return {k: v for k, v in createopts.items() if k not in known}
3388
3390
3389
3391
3390 def createrepository(ui, path, createopts=None):
3392 def createrepository(ui, path, createopts=None):
3391 """Create a new repository in a vfs.
3393 """Create a new repository in a vfs.
3392
3394
3393 ``path`` path to the new repo's working directory.
3395 ``path`` path to the new repo's working directory.
3394 ``createopts`` options for the new repository.
3396 ``createopts`` options for the new repository.
3395
3397
3396 The following keys for ``createopts`` are recognized:
3398 The following keys for ``createopts`` are recognized:
3397
3399
3398 backend
3400 backend
3399 The storage backend to use.
3401 The storage backend to use.
3400 lfs
3402 lfs
3401 Repository will be created with ``lfs`` requirement. The lfs extension
3403 Repository will be created with ``lfs`` requirement. The lfs extension
3402 will automatically be loaded when the repository is accessed.
3404 will automatically be loaded when the repository is accessed.
3403 narrowfiles
3405 narrowfiles
3404 Set up repository to support narrow file storage.
3406 Set up repository to support narrow file storage.
3405 sharedrepo
3407 sharedrepo
3406 Repository object from which storage should be shared.
3408 Repository object from which storage should be shared.
3407 sharedrelative
3409 sharedrelative
3408 Boolean indicating if the path to the shared repo should be
3410 Boolean indicating if the path to the shared repo should be
3409 stored as relative. By default, the pointer to the "parent" repo
3411 stored as relative. By default, the pointer to the "parent" repo
3410 is stored as an absolute path.
3412 is stored as an absolute path.
3411 shareditems
3413 shareditems
3412 Set of items to share to the new repository (in addition to storage).
3414 Set of items to share to the new repository (in addition to storage).
3413 shallowfilestore
3415 shallowfilestore
3414 Indicates that storage for files should be shallow (not all ancestor
3416 Indicates that storage for files should be shallow (not all ancestor
3415 revisions are known).
3417 revisions are known).
3416 """
3418 """
3417 createopts = defaultcreateopts(ui, createopts=createopts)
3419 createopts = defaultcreateopts(ui, createopts=createopts)
3418
3420
3419 unknownopts = filterknowncreateopts(ui, createopts)
3421 unknownopts = filterknowncreateopts(ui, createopts)
3420
3422
3421 if not isinstance(unknownopts, dict):
3423 if not isinstance(unknownopts, dict):
3422 raise error.ProgrammingError(
3424 raise error.ProgrammingError(
3423 b'filterknowncreateopts() did not return a dict'
3425 b'filterknowncreateopts() did not return a dict'
3424 )
3426 )
3425
3427
3426 if unknownopts:
3428 if unknownopts:
3427 raise error.Abort(
3429 raise error.Abort(
3428 _(
3430 _(
3429 b'unable to create repository because of unknown '
3431 b'unable to create repository because of unknown '
3430 b'creation option: %s'
3432 b'creation option: %s'
3431 )
3433 )
3432 % b', '.join(sorted(unknownopts)),
3434 % b', '.join(sorted(unknownopts)),
3433 hint=_(b'is a required extension not loaded?'),
3435 hint=_(b'is a required extension not loaded?'),
3434 )
3436 )
3435
3437
3436 requirements = newreporequirements(ui, createopts=createopts)
3438 requirements = newreporequirements(ui, createopts=createopts)
3437 requirements -= checkrequirementscompat(ui, requirements)
3439 requirements -= checkrequirementscompat(ui, requirements)
3438
3440
3439 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3441 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3440
3442
3441 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3443 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3442 if hgvfs.exists():
3444 if hgvfs.exists():
3443 raise error.RepoError(_(b'repository %s already exists') % path)
3445 raise error.RepoError(_(b'repository %s already exists') % path)
3444
3446
3445 if b'sharedrepo' in createopts:
3447 if b'sharedrepo' in createopts:
3446 sharedpath = createopts[b'sharedrepo'].sharedpath
3448 sharedpath = createopts[b'sharedrepo'].sharedpath
3447
3449
3448 if createopts.get(b'sharedrelative'):
3450 if createopts.get(b'sharedrelative'):
3449 try:
3451 try:
3450 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3452 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3451 except (IOError, ValueError) as e:
3453 except (IOError, ValueError) as e:
3452 # ValueError is raised on Windows if the drive letters differ
3454 # ValueError is raised on Windows if the drive letters differ
3453 # on each path.
3455 # on each path.
3454 raise error.Abort(
3456 raise error.Abort(
3455 _(b'cannot calculate relative path'),
3457 _(b'cannot calculate relative path'),
3456 hint=stringutil.forcebytestr(e),
3458 hint=stringutil.forcebytestr(e),
3457 )
3459 )
3458
3460
3459 if not wdirvfs.exists():
3461 if not wdirvfs.exists():
3460 wdirvfs.makedirs()
3462 wdirvfs.makedirs()
3461
3463
3462 hgvfs.makedir(notindexed=True)
3464 hgvfs.makedir(notindexed=True)
3463 if b'sharedrepo' not in createopts:
3465 if b'sharedrepo' not in createopts:
3464 hgvfs.mkdir(b'cache')
3466 hgvfs.mkdir(b'cache')
3465 hgvfs.mkdir(b'wcache')
3467 hgvfs.mkdir(b'wcache')
3466
3468
3467 if b'store' in requirements and b'sharedrepo' not in createopts:
3469 if b'store' in requirements and b'sharedrepo' not in createopts:
3468 hgvfs.mkdir(b'store')
3470 hgvfs.mkdir(b'store')
3469
3471
3470 # We create an invalid changelog outside the store so very old
3472 # We create an invalid changelog outside the store so very old
3471 # Mercurial versions (which didn't know about the requirements
3473 # Mercurial versions (which didn't know about the requirements
3472 # file) encounter an error on reading the changelog. This
3474 # file) encounter an error on reading the changelog. This
3473 # effectively locks out old clients and prevents them from
3475 # effectively locks out old clients and prevents them from
3474 # mucking with a repo in an unknown format.
3476 # mucking with a repo in an unknown format.
3475 #
3477 #
3476 # The revlog header has version 2, which won't be recognized by
3478 # The revlog header has version 2, which won't be recognized by
3477 # such old clients.
3479 # such old clients.
3478 hgvfs.append(
3480 hgvfs.append(
3479 b'00changelog.i',
3481 b'00changelog.i',
3480 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3482 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3481 b'layout',
3483 b'layout',
3482 )
3484 )
3483
3485
3484 scmutil.writerequires(hgvfs, requirements)
3486 scmutil.writerequires(hgvfs, requirements)
3485
3487
3486 # Write out file telling readers where to find the shared store.
3488 # Write out file telling readers where to find the shared store.
3487 if b'sharedrepo' in createopts:
3489 if b'sharedrepo' in createopts:
3488 hgvfs.write(b'sharedpath', sharedpath)
3490 hgvfs.write(b'sharedpath', sharedpath)
3489
3491
3490 if createopts.get(b'shareditems'):
3492 if createopts.get(b'shareditems'):
3491 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3493 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3492 hgvfs.write(b'shared', shared)
3494 hgvfs.write(b'shared', shared)
3493
3495
3494
3496
3495 def poisonrepository(repo):
3497 def poisonrepository(repo):
3496 """Poison a repository instance so it can no longer be used."""
3498 """Poison a repository instance so it can no longer be used."""
3497 # Perform any cleanup on the instance.
3499 # Perform any cleanup on the instance.
3498 repo.close()
3500 repo.close()
3499
3501
3500 # Our strategy is to replace the type of the object with one that
3502 # Our strategy is to replace the type of the object with one that
3501 # has all attribute lookups result in error.
3503 # has all attribute lookups result in error.
3502 #
3504 #
3503 # But we have to allow the close() method because some constructors
3505 # But we have to allow the close() method because some constructors
3504 # of repos call close() on repo references.
3506 # of repos call close() on repo references.
3505 class poisonedrepository(object):
3507 class poisonedrepository(object):
3506 def __getattribute__(self, item):
3508 def __getattribute__(self, item):
3507 if item == 'close':
3509 if item == 'close':
3508 return object.__getattribute__(self, item)
3510 return object.__getattribute__(self, item)
3509
3511
3510 raise error.ProgrammingError(
3512 raise error.ProgrammingError(
3511 b'repo instances should not be used after unshare'
3513 b'repo instances should not be used after unshare'
3512 )
3514 )
3513
3515
3514 def close(self):
3516 def close(self):
3515 pass
3517 pass
3516
3518
3517 # We may have a repoview, which intercepts __setattr__. So be sure
3519 # We may have a repoview, which intercepts __setattr__. So be sure
3518 # we operate at the lowest level possible.
3520 # we operate at the lowest level possible.
3519 object.__setattr__(repo, '__class__', poisonedrepository)
3521 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now