##// END OF EJS Templates
localrepo: disallow share if there is a version mismatch by default...
Pulkit Goyal -
r47049:8788981c default
parent child Browse files
Show More
@@ -1,3651 +1,3649
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revlog,
62 revlog,
63 revset,
63 revset,
64 revsetlang,
64 revsetlang,
65 scmutil,
65 scmutil,
66 sparse,
66 sparse,
67 store as storemod,
67 store as storemod,
68 subrepoutil,
68 subrepoutil,
69 tags as tagsmod,
69 tags as tagsmod,
70 transaction,
70 transaction,
71 txnutil,
71 txnutil,
72 util,
72 util,
73 vfs as vfsmod,
73 vfs as vfsmod,
74 )
74 )
75
75
76 from .interfaces import (
76 from .interfaces import (
77 repository,
77 repository,
78 util as interfaceutil,
78 util as interfaceutil,
79 )
79 )
80
80
81 from .utils import (
81 from .utils import (
82 hashutil,
82 hashutil,
83 procutil,
83 procutil,
84 stringutil,
84 stringutil,
85 )
85 )
86
86
87 from .revlogutils import constants as revlogconst
87 from .revlogutils import constants as revlogconst
88
88
89 release = lockmod.release
89 release = lockmod.release
90 urlerr = util.urlerr
90 urlerr = util.urlerr
91 urlreq = util.urlreq
91 urlreq = util.urlreq
92
92
93 # set of (path, vfs-location) tuples. vfs-location is:
93 # set of (path, vfs-location) tuples. vfs-location is:
94 # - 'plain for vfs relative paths
94 # - 'plain for vfs relative paths
95 # - '' for svfs relative paths
95 # - '' for svfs relative paths
96 _cachedfiles = set()
96 _cachedfiles = set()
97
97
98
98
99 class _basefilecache(scmutil.filecache):
99 class _basefilecache(scmutil.filecache):
100 """All filecache usage on repo are done for logic that should be unfiltered"""
100 """All filecache usage on repo are done for logic that should be unfiltered"""
101
101
102 def __get__(self, repo, type=None):
102 def __get__(self, repo, type=None):
103 if repo is None:
103 if repo is None:
104 return self
104 return self
105 # proxy to unfiltered __dict__ since filtered repo has no entry
105 # proxy to unfiltered __dict__ since filtered repo has no entry
106 unfi = repo.unfiltered()
106 unfi = repo.unfiltered()
107 try:
107 try:
108 return unfi.__dict__[self.sname]
108 return unfi.__dict__[self.sname]
109 except KeyError:
109 except KeyError:
110 pass
110 pass
111 return super(_basefilecache, self).__get__(unfi, type)
111 return super(_basefilecache, self).__get__(unfi, type)
112
112
113 def set(self, repo, value):
113 def set(self, repo, value):
114 return super(_basefilecache, self).set(repo.unfiltered(), value)
114 return super(_basefilecache, self).set(repo.unfiltered(), value)
115
115
116
116
117 class repofilecache(_basefilecache):
117 class repofilecache(_basefilecache):
118 """filecache for files in .hg but outside of .hg/store"""
118 """filecache for files in .hg but outside of .hg/store"""
119
119
120 def __init__(self, *paths):
120 def __init__(self, *paths):
121 super(repofilecache, self).__init__(*paths)
121 super(repofilecache, self).__init__(*paths)
122 for path in paths:
122 for path in paths:
123 _cachedfiles.add((path, b'plain'))
123 _cachedfiles.add((path, b'plain'))
124
124
125 def join(self, obj, fname):
125 def join(self, obj, fname):
126 return obj.vfs.join(fname)
126 return obj.vfs.join(fname)
127
127
128
128
129 class storecache(_basefilecache):
129 class storecache(_basefilecache):
130 """filecache for files in the store"""
130 """filecache for files in the store"""
131
131
132 def __init__(self, *paths):
132 def __init__(self, *paths):
133 super(storecache, self).__init__(*paths)
133 super(storecache, self).__init__(*paths)
134 for path in paths:
134 for path in paths:
135 _cachedfiles.add((path, b''))
135 _cachedfiles.add((path, b''))
136
136
137 def join(self, obj, fname):
137 def join(self, obj, fname):
138 return obj.sjoin(fname)
138 return obj.sjoin(fname)
139
139
140
140
141 class mixedrepostorecache(_basefilecache):
141 class mixedrepostorecache(_basefilecache):
142 """filecache for a mix files in .hg/store and outside"""
142 """filecache for a mix files in .hg/store and outside"""
143
143
144 def __init__(self, *pathsandlocations):
144 def __init__(self, *pathsandlocations):
145 # scmutil.filecache only uses the path for passing back into our
145 # scmutil.filecache only uses the path for passing back into our
146 # join(), so we can safely pass a list of paths and locations
146 # join(), so we can safely pass a list of paths and locations
147 super(mixedrepostorecache, self).__init__(*pathsandlocations)
147 super(mixedrepostorecache, self).__init__(*pathsandlocations)
148 _cachedfiles.update(pathsandlocations)
148 _cachedfiles.update(pathsandlocations)
149
149
150 def join(self, obj, fnameandlocation):
150 def join(self, obj, fnameandlocation):
151 fname, location = fnameandlocation
151 fname, location = fnameandlocation
152 if location == b'plain':
152 if location == b'plain':
153 return obj.vfs.join(fname)
153 return obj.vfs.join(fname)
154 else:
154 else:
155 if location != b'':
155 if location != b'':
156 raise error.ProgrammingError(
156 raise error.ProgrammingError(
157 b'unexpected location: %s' % location
157 b'unexpected location: %s' % location
158 )
158 )
159 return obj.sjoin(fname)
159 return obj.sjoin(fname)
160
160
161
161
162 def isfilecached(repo, name):
162 def isfilecached(repo, name):
163 """check if a repo has already cached "name" filecache-ed property
163 """check if a repo has already cached "name" filecache-ed property
164
164
165 This returns (cachedobj-or-None, iscached) tuple.
165 This returns (cachedobj-or-None, iscached) tuple.
166 """
166 """
167 cacheentry = repo.unfiltered()._filecache.get(name, None)
167 cacheentry = repo.unfiltered()._filecache.get(name, None)
168 if not cacheentry:
168 if not cacheentry:
169 return None, False
169 return None, False
170 return cacheentry.obj, True
170 return cacheentry.obj, True
171
171
172
172
173 class unfilteredpropertycache(util.propertycache):
173 class unfilteredpropertycache(util.propertycache):
174 """propertycache that apply to unfiltered repo only"""
174 """propertycache that apply to unfiltered repo only"""
175
175
176 def __get__(self, repo, type=None):
176 def __get__(self, repo, type=None):
177 unfi = repo.unfiltered()
177 unfi = repo.unfiltered()
178 if unfi is repo:
178 if unfi is repo:
179 return super(unfilteredpropertycache, self).__get__(unfi)
179 return super(unfilteredpropertycache, self).__get__(unfi)
180 return getattr(unfi, self.name)
180 return getattr(unfi, self.name)
181
181
182
182
183 class filteredpropertycache(util.propertycache):
183 class filteredpropertycache(util.propertycache):
184 """propertycache that must take filtering in account"""
184 """propertycache that must take filtering in account"""
185
185
186 def cachevalue(self, obj, value):
186 def cachevalue(self, obj, value):
187 object.__setattr__(obj, self.name, value)
187 object.__setattr__(obj, self.name, value)
188
188
189
189
190 def hasunfilteredcache(repo, name):
190 def hasunfilteredcache(repo, name):
191 """check if a repo has an unfilteredpropertycache value for <name>"""
191 """check if a repo has an unfilteredpropertycache value for <name>"""
192 return name in vars(repo.unfiltered())
192 return name in vars(repo.unfiltered())
193
193
194
194
195 def unfilteredmethod(orig):
195 def unfilteredmethod(orig):
196 """decorate method that always need to be run on unfiltered version"""
196 """decorate method that always need to be run on unfiltered version"""
197
197
198 @functools.wraps(orig)
198 @functools.wraps(orig)
199 def wrapper(repo, *args, **kwargs):
199 def wrapper(repo, *args, **kwargs):
200 return orig(repo.unfiltered(), *args, **kwargs)
200 return orig(repo.unfiltered(), *args, **kwargs)
201
201
202 return wrapper
202 return wrapper
203
203
204
204
205 moderncaps = {
205 moderncaps = {
206 b'lookup',
206 b'lookup',
207 b'branchmap',
207 b'branchmap',
208 b'pushkey',
208 b'pushkey',
209 b'known',
209 b'known',
210 b'getbundle',
210 b'getbundle',
211 b'unbundle',
211 b'unbundle',
212 }
212 }
213 legacycaps = moderncaps.union({b'changegroupsubset'})
213 legacycaps = moderncaps.union({b'changegroupsubset'})
214
214
215
215
216 @interfaceutil.implementer(repository.ipeercommandexecutor)
216 @interfaceutil.implementer(repository.ipeercommandexecutor)
217 class localcommandexecutor(object):
217 class localcommandexecutor(object):
218 def __init__(self, peer):
218 def __init__(self, peer):
219 self._peer = peer
219 self._peer = peer
220 self._sent = False
220 self._sent = False
221 self._closed = False
221 self._closed = False
222
222
223 def __enter__(self):
223 def __enter__(self):
224 return self
224 return self
225
225
226 def __exit__(self, exctype, excvalue, exctb):
226 def __exit__(self, exctype, excvalue, exctb):
227 self.close()
227 self.close()
228
228
229 def callcommand(self, command, args):
229 def callcommand(self, command, args):
230 if self._sent:
230 if self._sent:
231 raise error.ProgrammingError(
231 raise error.ProgrammingError(
232 b'callcommand() cannot be used after sendcommands()'
232 b'callcommand() cannot be used after sendcommands()'
233 )
233 )
234
234
235 if self._closed:
235 if self._closed:
236 raise error.ProgrammingError(
236 raise error.ProgrammingError(
237 b'callcommand() cannot be used after close()'
237 b'callcommand() cannot be used after close()'
238 )
238 )
239
239
240 # We don't need to support anything fancy. Just call the named
240 # We don't need to support anything fancy. Just call the named
241 # method on the peer and return a resolved future.
241 # method on the peer and return a resolved future.
242 fn = getattr(self._peer, pycompat.sysstr(command))
242 fn = getattr(self._peer, pycompat.sysstr(command))
243
243
244 f = pycompat.futures.Future()
244 f = pycompat.futures.Future()
245
245
246 try:
246 try:
247 result = fn(**pycompat.strkwargs(args))
247 result = fn(**pycompat.strkwargs(args))
248 except Exception:
248 except Exception:
249 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
249 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
250 else:
250 else:
251 f.set_result(result)
251 f.set_result(result)
252
252
253 return f
253 return f
254
254
255 def sendcommands(self):
255 def sendcommands(self):
256 self._sent = True
256 self._sent = True
257
257
258 def close(self):
258 def close(self):
259 self._closed = True
259 self._closed = True
260
260
261
261
262 @interfaceutil.implementer(repository.ipeercommands)
262 @interfaceutil.implementer(repository.ipeercommands)
263 class localpeer(repository.peer):
263 class localpeer(repository.peer):
264 '''peer for a local repo; reflects only the most recent API'''
264 '''peer for a local repo; reflects only the most recent API'''
265
265
266 def __init__(self, repo, caps=None):
266 def __init__(self, repo, caps=None):
267 super(localpeer, self).__init__()
267 super(localpeer, self).__init__()
268
268
269 if caps is None:
269 if caps is None:
270 caps = moderncaps.copy()
270 caps = moderncaps.copy()
271 self._repo = repo.filtered(b'served')
271 self._repo = repo.filtered(b'served')
272 self.ui = repo.ui
272 self.ui = repo.ui
273 self._caps = repo._restrictcapabilities(caps)
273 self._caps = repo._restrictcapabilities(caps)
274
274
275 # Begin of _basepeer interface.
275 # Begin of _basepeer interface.
276
276
277 def url(self):
277 def url(self):
278 return self._repo.url()
278 return self._repo.url()
279
279
280 def local(self):
280 def local(self):
281 return self._repo
281 return self._repo
282
282
283 def peer(self):
283 def peer(self):
284 return self
284 return self
285
285
286 def canpush(self):
286 def canpush(self):
287 return True
287 return True
288
288
289 def close(self):
289 def close(self):
290 self._repo.close()
290 self._repo.close()
291
291
292 # End of _basepeer interface.
292 # End of _basepeer interface.
293
293
294 # Begin of _basewirecommands interface.
294 # Begin of _basewirecommands interface.
295
295
296 def branchmap(self):
296 def branchmap(self):
297 return self._repo.branchmap()
297 return self._repo.branchmap()
298
298
299 def capabilities(self):
299 def capabilities(self):
300 return self._caps
300 return self._caps
301
301
302 def clonebundles(self):
302 def clonebundles(self):
303 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
303 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
304
304
305 def debugwireargs(self, one, two, three=None, four=None, five=None):
305 def debugwireargs(self, one, two, three=None, four=None, five=None):
306 """Used to test argument passing over the wire"""
306 """Used to test argument passing over the wire"""
307 return b"%s %s %s %s %s" % (
307 return b"%s %s %s %s %s" % (
308 one,
308 one,
309 two,
309 two,
310 pycompat.bytestr(three),
310 pycompat.bytestr(three),
311 pycompat.bytestr(four),
311 pycompat.bytestr(four),
312 pycompat.bytestr(five),
312 pycompat.bytestr(five),
313 )
313 )
314
314
315 def getbundle(
315 def getbundle(
316 self, source, heads=None, common=None, bundlecaps=None, **kwargs
316 self, source, heads=None, common=None, bundlecaps=None, **kwargs
317 ):
317 ):
318 chunks = exchange.getbundlechunks(
318 chunks = exchange.getbundlechunks(
319 self._repo,
319 self._repo,
320 source,
320 source,
321 heads=heads,
321 heads=heads,
322 common=common,
322 common=common,
323 bundlecaps=bundlecaps,
323 bundlecaps=bundlecaps,
324 **kwargs
324 **kwargs
325 )[1]
325 )[1]
326 cb = util.chunkbuffer(chunks)
326 cb = util.chunkbuffer(chunks)
327
327
328 if exchange.bundle2requested(bundlecaps):
328 if exchange.bundle2requested(bundlecaps):
329 # When requesting a bundle2, getbundle returns a stream to make the
329 # When requesting a bundle2, getbundle returns a stream to make the
330 # wire level function happier. We need to build a proper object
330 # wire level function happier. We need to build a proper object
331 # from it in local peer.
331 # from it in local peer.
332 return bundle2.getunbundler(self.ui, cb)
332 return bundle2.getunbundler(self.ui, cb)
333 else:
333 else:
334 return changegroup.getunbundler(b'01', cb, None)
334 return changegroup.getunbundler(b'01', cb, None)
335
335
336 def heads(self):
336 def heads(self):
337 return self._repo.heads()
337 return self._repo.heads()
338
338
339 def known(self, nodes):
339 def known(self, nodes):
340 return self._repo.known(nodes)
340 return self._repo.known(nodes)
341
341
342 def listkeys(self, namespace):
342 def listkeys(self, namespace):
343 return self._repo.listkeys(namespace)
343 return self._repo.listkeys(namespace)
344
344
345 def lookup(self, key):
345 def lookup(self, key):
346 return self._repo.lookup(key)
346 return self._repo.lookup(key)
347
347
348 def pushkey(self, namespace, key, old, new):
348 def pushkey(self, namespace, key, old, new):
349 return self._repo.pushkey(namespace, key, old, new)
349 return self._repo.pushkey(namespace, key, old, new)
350
350
351 def stream_out(self):
351 def stream_out(self):
352 raise error.Abort(_(b'cannot perform stream clone against local peer'))
352 raise error.Abort(_(b'cannot perform stream clone against local peer'))
353
353
354 def unbundle(self, bundle, heads, url):
354 def unbundle(self, bundle, heads, url):
355 """apply a bundle on a repo
355 """apply a bundle on a repo
356
356
357 This function handles the repo locking itself."""
357 This function handles the repo locking itself."""
358 try:
358 try:
359 try:
359 try:
360 bundle = exchange.readbundle(self.ui, bundle, None)
360 bundle = exchange.readbundle(self.ui, bundle, None)
361 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
361 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
362 if util.safehasattr(ret, b'getchunks'):
362 if util.safehasattr(ret, b'getchunks'):
363 # This is a bundle20 object, turn it into an unbundler.
363 # This is a bundle20 object, turn it into an unbundler.
364 # This little dance should be dropped eventually when the
364 # This little dance should be dropped eventually when the
365 # API is finally improved.
365 # API is finally improved.
366 stream = util.chunkbuffer(ret.getchunks())
366 stream = util.chunkbuffer(ret.getchunks())
367 ret = bundle2.getunbundler(self.ui, stream)
367 ret = bundle2.getunbundler(self.ui, stream)
368 return ret
368 return ret
369 except Exception as exc:
369 except Exception as exc:
370 # If the exception contains output salvaged from a bundle2
370 # If the exception contains output salvaged from a bundle2
371 # reply, we need to make sure it is printed before continuing
371 # reply, we need to make sure it is printed before continuing
372 # to fail. So we build a bundle2 with such output and consume
372 # to fail. So we build a bundle2 with such output and consume
373 # it directly.
373 # it directly.
374 #
374 #
375 # This is not very elegant but allows a "simple" solution for
375 # This is not very elegant but allows a "simple" solution for
376 # issue4594
376 # issue4594
377 output = getattr(exc, '_bundle2salvagedoutput', ())
377 output = getattr(exc, '_bundle2salvagedoutput', ())
378 if output:
378 if output:
379 bundler = bundle2.bundle20(self._repo.ui)
379 bundler = bundle2.bundle20(self._repo.ui)
380 for out in output:
380 for out in output:
381 bundler.addpart(out)
381 bundler.addpart(out)
382 stream = util.chunkbuffer(bundler.getchunks())
382 stream = util.chunkbuffer(bundler.getchunks())
383 b = bundle2.getunbundler(self.ui, stream)
383 b = bundle2.getunbundler(self.ui, stream)
384 bundle2.processbundle(self._repo, b)
384 bundle2.processbundle(self._repo, b)
385 raise
385 raise
386 except error.PushRaced as exc:
386 except error.PushRaced as exc:
387 raise error.ResponseError(
387 raise error.ResponseError(
388 _(b'push failed:'), stringutil.forcebytestr(exc)
388 _(b'push failed:'), stringutil.forcebytestr(exc)
389 )
389 )
390
390
391 # End of _basewirecommands interface.
391 # End of _basewirecommands interface.
392
392
393 # Begin of peer interface.
393 # Begin of peer interface.
394
394
395 def commandexecutor(self):
395 def commandexecutor(self):
396 return localcommandexecutor(self)
396 return localcommandexecutor(self)
397
397
398 # End of peer interface.
398 # End of peer interface.
399
399
400
400
401 @interfaceutil.implementer(repository.ipeerlegacycommands)
401 @interfaceutil.implementer(repository.ipeerlegacycommands)
402 class locallegacypeer(localpeer):
402 class locallegacypeer(localpeer):
403 """peer extension which implements legacy methods too; used for tests with
403 """peer extension which implements legacy methods too; used for tests with
404 restricted capabilities"""
404 restricted capabilities"""
405
405
406 def __init__(self, repo):
406 def __init__(self, repo):
407 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
407 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
408
408
409 # Begin of baselegacywirecommands interface.
409 # Begin of baselegacywirecommands interface.
410
410
411 def between(self, pairs):
411 def between(self, pairs):
412 return self._repo.between(pairs)
412 return self._repo.between(pairs)
413
413
414 def branches(self, nodes):
414 def branches(self, nodes):
415 return self._repo.branches(nodes)
415 return self._repo.branches(nodes)
416
416
417 def changegroup(self, nodes, source):
417 def changegroup(self, nodes, source):
418 outgoing = discovery.outgoing(
418 outgoing = discovery.outgoing(
419 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
419 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
420 )
420 )
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422
422
423 def changegroupsubset(self, bases, heads, source):
423 def changegroupsubset(self, bases, heads, source):
424 outgoing = discovery.outgoing(
424 outgoing = discovery.outgoing(
425 self._repo, missingroots=bases, ancestorsof=heads
425 self._repo, missingroots=bases, ancestorsof=heads
426 )
426 )
427 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
427 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
428
428
429 # End of baselegacywirecommands interface.
429 # End of baselegacywirecommands interface.
430
430
431
431
432 # Functions receiving (ui, features) that extensions can register to impact
432 # Functions receiving (ui, features) that extensions can register to impact
433 # the ability to load repositories with custom requirements. Only
433 # the ability to load repositories with custom requirements. Only
434 # functions defined in loaded extensions are called.
434 # functions defined in loaded extensions are called.
435 #
435 #
436 # The function receives a set of requirement strings that the repository
436 # The function receives a set of requirement strings that the repository
437 # is capable of opening. Functions will typically add elements to the
437 # is capable of opening. Functions will typically add elements to the
438 # set to reflect that the extension knows how to handle that requirements.
438 # set to reflect that the extension knows how to handle that requirements.
439 featuresetupfuncs = set()
439 featuresetupfuncs = set()
440
440
441
441
442 def _getsharedvfs(hgvfs, requirements):
442 def _getsharedvfs(hgvfs, requirements):
443 """returns the vfs object pointing to root of shared source
443 """returns the vfs object pointing to root of shared source
444 repo for a shared repository
444 repo for a shared repository
445
445
446 hgvfs is vfs pointing at .hg/ of current repo (shared one)
446 hgvfs is vfs pointing at .hg/ of current repo (shared one)
447 requirements is a set of requirements of current repo (shared one)
447 requirements is a set of requirements of current repo (shared one)
448 """
448 """
449 # The ``shared`` or ``relshared`` requirements indicate the
449 # The ``shared`` or ``relshared`` requirements indicate the
450 # store lives in the path contained in the ``.hg/sharedpath`` file.
450 # store lives in the path contained in the ``.hg/sharedpath`` file.
451 # This is an absolute path for ``shared`` and relative to
451 # This is an absolute path for ``shared`` and relative to
452 # ``.hg/`` for ``relshared``.
452 # ``.hg/`` for ``relshared``.
453 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
453 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
454 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
454 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
455 sharedpath = hgvfs.join(sharedpath)
455 sharedpath = hgvfs.join(sharedpath)
456
456
457 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
457 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
458
458
459 if not sharedvfs.exists():
459 if not sharedvfs.exists():
460 raise error.RepoError(
460 raise error.RepoError(
461 _(b'.hg/sharedpath points to nonexistent directory %s')
461 _(b'.hg/sharedpath points to nonexistent directory %s')
462 % sharedvfs.base
462 % sharedvfs.base
463 )
463 )
464 return sharedvfs
464 return sharedvfs
465
465
466
466
467 def _readrequires(vfs, allowmissing):
467 def _readrequires(vfs, allowmissing):
468 """reads the require file present at root of this vfs
468 """reads the require file present at root of this vfs
469 and return a set of requirements
469 and return a set of requirements
470
470
471 If allowmissing is True, we suppress ENOENT if raised"""
471 If allowmissing is True, we suppress ENOENT if raised"""
472 # requires file contains a newline-delimited list of
472 # requires file contains a newline-delimited list of
473 # features/capabilities the opener (us) must have in order to use
473 # features/capabilities the opener (us) must have in order to use
474 # the repository. This file was introduced in Mercurial 0.9.2,
474 # the repository. This file was introduced in Mercurial 0.9.2,
475 # which means very old repositories may not have one. We assume
475 # which means very old repositories may not have one. We assume
476 # a missing file translates to no requirements.
476 # a missing file translates to no requirements.
477 try:
477 try:
478 requirements = set(vfs.read(b'requires').splitlines())
478 requirements = set(vfs.read(b'requires').splitlines())
479 except IOError as e:
479 except IOError as e:
480 if not (allowmissing and e.errno == errno.ENOENT):
480 if not (allowmissing and e.errno == errno.ENOENT):
481 raise
481 raise
482 requirements = set()
482 requirements = set()
483 return requirements
483 return requirements
484
484
485
485
486 def makelocalrepository(baseui, path, intents=None):
486 def makelocalrepository(baseui, path, intents=None):
487 """Create a local repository object.
487 """Create a local repository object.
488
488
489 Given arguments needed to construct a local repository, this function
489 Given arguments needed to construct a local repository, this function
490 performs various early repository loading functionality (such as
490 performs various early repository loading functionality (such as
491 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
491 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
492 the repository can be opened, derives a type suitable for representing
492 the repository can be opened, derives a type suitable for representing
493 that repository, and returns an instance of it.
493 that repository, and returns an instance of it.
494
494
495 The returned object conforms to the ``repository.completelocalrepository``
495 The returned object conforms to the ``repository.completelocalrepository``
496 interface.
496 interface.
497
497
498 The repository type is derived by calling a series of factory functions
498 The repository type is derived by calling a series of factory functions
499 for each aspect/interface of the final repository. These are defined by
499 for each aspect/interface of the final repository. These are defined by
500 ``REPO_INTERFACES``.
500 ``REPO_INTERFACES``.
501
501
502 Each factory function is called to produce a type implementing a specific
502 Each factory function is called to produce a type implementing a specific
503 interface. The cumulative list of returned types will be combined into a
503 interface. The cumulative list of returned types will be combined into a
504 new type and that type will be instantiated to represent the local
504 new type and that type will be instantiated to represent the local
505 repository.
505 repository.
506
506
507 The factory functions each receive various state that may be consulted
507 The factory functions each receive various state that may be consulted
508 as part of deriving a type.
508 as part of deriving a type.
509
509
510 Extensions should wrap these factory functions to customize repository type
510 Extensions should wrap these factory functions to customize repository type
511 creation. Note that an extension's wrapped function may be called even if
511 creation. Note that an extension's wrapped function may be called even if
512 that extension is not loaded for the repo being constructed. Extensions
512 that extension is not loaded for the repo being constructed. Extensions
513 should check if their ``__name__`` appears in the
513 should check if their ``__name__`` appears in the
514 ``extensionmodulenames`` set passed to the factory function and no-op if
514 ``extensionmodulenames`` set passed to the factory function and no-op if
515 not.
515 not.
516 """
516 """
517 ui = baseui.copy()
517 ui = baseui.copy()
518 # Prevent copying repo configuration.
518 # Prevent copying repo configuration.
519 ui.copy = baseui.copy
519 ui.copy = baseui.copy
520
520
521 # Working directory VFS rooted at repository root.
521 # Working directory VFS rooted at repository root.
522 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
522 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
523
523
524 # Main VFS for .hg/ directory.
524 # Main VFS for .hg/ directory.
525 hgpath = wdirvfs.join(b'.hg')
525 hgpath = wdirvfs.join(b'.hg')
526 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
526 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
527 # Whether this repository is shared one or not
527 # Whether this repository is shared one or not
528 shared = False
528 shared = False
529 # If this repository is shared, vfs pointing to shared repo
529 # If this repository is shared, vfs pointing to shared repo
530 sharedvfs = None
530 sharedvfs = None
531
531
532 # The .hg/ path should exist and should be a directory. All other
532 # The .hg/ path should exist and should be a directory. All other
533 # cases are errors.
533 # cases are errors.
534 if not hgvfs.isdir():
534 if not hgvfs.isdir():
535 try:
535 try:
536 hgvfs.stat()
536 hgvfs.stat()
537 except OSError as e:
537 except OSError as e:
538 if e.errno != errno.ENOENT:
538 if e.errno != errno.ENOENT:
539 raise
539 raise
540 except ValueError as e:
540 except ValueError as e:
541 # Can be raised on Python 3.8 when path is invalid.
541 # Can be raised on Python 3.8 when path is invalid.
542 raise error.Abort(
542 raise error.Abort(
543 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
543 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
544 )
544 )
545
545
546 raise error.RepoError(_(b'repository %s not found') % path)
546 raise error.RepoError(_(b'repository %s not found') % path)
547
547
548 requirements = _readrequires(hgvfs, True)
548 requirements = _readrequires(hgvfs, True)
549 shared = (
549 shared = (
550 requirementsmod.SHARED_REQUIREMENT in requirements
550 requirementsmod.SHARED_REQUIREMENT in requirements
551 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
551 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
552 )
552 )
553 storevfs = None
553 storevfs = None
554 if shared:
554 if shared:
555 # This is a shared repo
555 # This is a shared repo
556 sharedvfs = _getsharedvfs(hgvfs, requirements)
556 sharedvfs = _getsharedvfs(hgvfs, requirements)
557 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
557 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
558 else:
558 else:
559 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
559 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
560
560
561 # if .hg/requires contains the sharesafe requirement, it means
561 # if .hg/requires contains the sharesafe requirement, it means
562 # there exists a `.hg/store/requires` too and we should read it
562 # there exists a `.hg/store/requires` too and we should read it
563 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
563 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
564 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
564 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
565 # is not present, refer checkrequirementscompat() for that
565 # is not present, refer checkrequirementscompat() for that
566 #
566 #
567 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
567 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
568 # repository was shared the old way. We check the share source .hg/requires
568 # repository was shared the old way. We check the share source .hg/requires
569 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
569 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
570 # to be reshared
570 # to be reshared
571 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
571 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
572
572
573 if (
573 if (
574 shared
574 shared
575 and requirementsmod.SHARESAFE_REQUIREMENT
575 and requirementsmod.SHARESAFE_REQUIREMENT
576 not in _readrequires(sharedvfs, True)
576 not in _readrequires(sharedvfs, True)
577 ):
577 ):
578 if ui.configbool(
578 if ui.configbool(
579 b'experimental', b'sharesafe-auto-downgrade-shares'
579 b'experimental', b'sharesafe-auto-downgrade-shares'
580 ):
580 ):
581 # prevent cyclic import localrepo -> upgrade -> localrepo
581 # prevent cyclic import localrepo -> upgrade -> localrepo
582 from . import upgrade
582 from . import upgrade
583
583
584 upgrade.downgrade_share_to_non_safe(
584 upgrade.downgrade_share_to_non_safe(
585 ui,
585 ui,
586 hgvfs,
586 hgvfs,
587 sharedvfs,
587 sharedvfs,
588 requirements,
588 requirements,
589 )
589 )
590 else:
590 else:
591 raise error.Abort(
591 raise error.Abort(
592 _(
592 _(
593 b"share source does not support exp-sharesafe requirement"
593 b"share source does not support exp-sharesafe requirement"
594 )
594 )
595 )
595 )
596 else:
596 else:
597 requirements |= _readrequires(storevfs, False)
597 requirements |= _readrequires(storevfs, False)
598 elif shared:
598 elif shared:
599 sourcerequires = _readrequires(sharedvfs, False)
599 sourcerequires = _readrequires(sharedvfs, False)
600 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
600 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
601 if ui.configbool(b'experimental', b'sharesafe-auto-upgrade-shares'):
601 if ui.configbool(b'experimental', b'sharesafe-auto-upgrade-shares'):
602 # prevent cyclic import localrepo -> upgrade -> localrepo
602 # prevent cyclic import localrepo -> upgrade -> localrepo
603 from . import upgrade
603 from . import upgrade
604
604
605 upgrade.upgrade_share_to_safe(
605 upgrade.upgrade_share_to_safe(
606 ui,
606 ui,
607 hgvfs,
607 hgvfs,
608 storevfs,
608 storevfs,
609 requirements,
609 requirements,
610 )
610 )
611 elif ui.configbool(
611 else:
612 b'experimental', b'sharesafe-warn-outdated-shares'
612 raise error.Abort(
613 ):
614 ui.warn(
615 _(
613 _(
616 b'warning: source repository supports share-safe functionality.'
614 b'version mismatch: source uses share-safe'
617 b' Reshare to upgrade.\n'
615 b' functionality while the current share does not'
618 )
616 )
619 )
617 )
620
618
621 # The .hg/hgrc file may load extensions or contain config options
619 # The .hg/hgrc file may load extensions or contain config options
622 # that influence repository construction. Attempt to load it and
620 # that influence repository construction. Attempt to load it and
623 # process any new extensions that it may have pulled in.
621 # process any new extensions that it may have pulled in.
624 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
622 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
625 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
623 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
626 extensions.loadall(ui)
624 extensions.loadall(ui)
627 extensions.populateui(ui)
625 extensions.populateui(ui)
628
626
629 # Set of module names of extensions loaded for this repository.
627 # Set of module names of extensions loaded for this repository.
630 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
628 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
631
629
632 supportedrequirements = gathersupportedrequirements(ui)
630 supportedrequirements = gathersupportedrequirements(ui)
633
631
634 # We first validate the requirements are known.
632 # We first validate the requirements are known.
635 ensurerequirementsrecognized(requirements, supportedrequirements)
633 ensurerequirementsrecognized(requirements, supportedrequirements)
636
634
637 # Then we validate that the known set is reasonable to use together.
635 # Then we validate that the known set is reasonable to use together.
638 ensurerequirementscompatible(ui, requirements)
636 ensurerequirementscompatible(ui, requirements)
639
637
640 # TODO there are unhandled edge cases related to opening repositories with
638 # TODO there are unhandled edge cases related to opening repositories with
641 # shared storage. If storage is shared, we should also test for requirements
639 # shared storage. If storage is shared, we should also test for requirements
642 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
640 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
643 # that repo, as that repo may load extensions needed to open it. This is a
641 # that repo, as that repo may load extensions needed to open it. This is a
644 # bit complicated because we don't want the other hgrc to overwrite settings
642 # bit complicated because we don't want the other hgrc to overwrite settings
645 # in this hgrc.
643 # in this hgrc.
646 #
644 #
647 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
645 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
648 # file when sharing repos. But if a requirement is added after the share is
646 # file when sharing repos. But if a requirement is added after the share is
649 # performed, thereby introducing a new requirement for the opener, we may
647 # performed, thereby introducing a new requirement for the opener, we may
650 # will not see that and could encounter a run-time error interacting with
648 # will not see that and could encounter a run-time error interacting with
651 # that shared store since it has an unknown-to-us requirement.
649 # that shared store since it has an unknown-to-us requirement.
652
650
653 # At this point, we know we should be capable of opening the repository.
651 # At this point, we know we should be capable of opening the repository.
654 # Now get on with doing that.
652 # Now get on with doing that.
655
653
656 features = set()
654 features = set()
657
655
658 # The "store" part of the repository holds versioned data. How it is
656 # The "store" part of the repository holds versioned data. How it is
659 # accessed is determined by various requirements. If `shared` or
657 # accessed is determined by various requirements. If `shared` or
660 # `relshared` requirements are present, this indicates current repository
658 # `relshared` requirements are present, this indicates current repository
661 # is a share and store exists in path mentioned in `.hg/sharedpath`
659 # is a share and store exists in path mentioned in `.hg/sharedpath`
662 if shared:
660 if shared:
663 storebasepath = sharedvfs.base
661 storebasepath = sharedvfs.base
664 cachepath = sharedvfs.join(b'cache')
662 cachepath = sharedvfs.join(b'cache')
665 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
663 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
666 else:
664 else:
667 storebasepath = hgvfs.base
665 storebasepath = hgvfs.base
668 cachepath = hgvfs.join(b'cache')
666 cachepath = hgvfs.join(b'cache')
669 wcachepath = hgvfs.join(b'wcache')
667 wcachepath = hgvfs.join(b'wcache')
670
668
671 # The store has changed over time and the exact layout is dictated by
669 # The store has changed over time and the exact layout is dictated by
672 # requirements. The store interface abstracts differences across all
670 # requirements. The store interface abstracts differences across all
673 # of them.
671 # of them.
674 store = makestore(
672 store = makestore(
675 requirements,
673 requirements,
676 storebasepath,
674 storebasepath,
677 lambda base: vfsmod.vfs(base, cacheaudited=True),
675 lambda base: vfsmod.vfs(base, cacheaudited=True),
678 )
676 )
679 hgvfs.createmode = store.createmode
677 hgvfs.createmode = store.createmode
680
678
681 storevfs = store.vfs
679 storevfs = store.vfs
682 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
680 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
683
681
684 # The cache vfs is used to manage cache files.
682 # The cache vfs is used to manage cache files.
685 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
683 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
686 cachevfs.createmode = store.createmode
684 cachevfs.createmode = store.createmode
687 # The cache vfs is used to manage cache files related to the working copy
685 # The cache vfs is used to manage cache files related to the working copy
688 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
686 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
689 wcachevfs.createmode = store.createmode
687 wcachevfs.createmode = store.createmode
690
688
691 # Now resolve the type for the repository object. We do this by repeatedly
689 # Now resolve the type for the repository object. We do this by repeatedly
692 # calling a factory function to produces types for specific aspects of the
690 # calling a factory function to produces types for specific aspects of the
693 # repo's operation. The aggregate returned types are used as base classes
691 # repo's operation. The aggregate returned types are used as base classes
694 # for a dynamically-derived type, which will represent our new repository.
692 # for a dynamically-derived type, which will represent our new repository.
695
693
696 bases = []
694 bases = []
697 extrastate = {}
695 extrastate = {}
698
696
699 for iface, fn in REPO_INTERFACES:
697 for iface, fn in REPO_INTERFACES:
700 # We pass all potentially useful state to give extensions tons of
698 # We pass all potentially useful state to give extensions tons of
701 # flexibility.
699 # flexibility.
702 typ = fn()(
700 typ = fn()(
703 ui=ui,
701 ui=ui,
704 intents=intents,
702 intents=intents,
705 requirements=requirements,
703 requirements=requirements,
706 features=features,
704 features=features,
707 wdirvfs=wdirvfs,
705 wdirvfs=wdirvfs,
708 hgvfs=hgvfs,
706 hgvfs=hgvfs,
709 store=store,
707 store=store,
710 storevfs=storevfs,
708 storevfs=storevfs,
711 storeoptions=storevfs.options,
709 storeoptions=storevfs.options,
712 cachevfs=cachevfs,
710 cachevfs=cachevfs,
713 wcachevfs=wcachevfs,
711 wcachevfs=wcachevfs,
714 extensionmodulenames=extensionmodulenames,
712 extensionmodulenames=extensionmodulenames,
715 extrastate=extrastate,
713 extrastate=extrastate,
716 baseclasses=bases,
714 baseclasses=bases,
717 )
715 )
718
716
719 if not isinstance(typ, type):
717 if not isinstance(typ, type):
720 raise error.ProgrammingError(
718 raise error.ProgrammingError(
721 b'unable to construct type for %s' % iface
719 b'unable to construct type for %s' % iface
722 )
720 )
723
721
724 bases.append(typ)
722 bases.append(typ)
725
723
726 # type() allows you to use characters in type names that wouldn't be
724 # type() allows you to use characters in type names that wouldn't be
727 # recognized as Python symbols in source code. We abuse that to add
725 # recognized as Python symbols in source code. We abuse that to add
728 # rich information about our constructed repo.
726 # rich information about our constructed repo.
729 name = pycompat.sysstr(
727 name = pycompat.sysstr(
730 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
728 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
731 )
729 )
732
730
733 cls = type(name, tuple(bases), {})
731 cls = type(name, tuple(bases), {})
734
732
735 return cls(
733 return cls(
736 baseui=baseui,
734 baseui=baseui,
737 ui=ui,
735 ui=ui,
738 origroot=path,
736 origroot=path,
739 wdirvfs=wdirvfs,
737 wdirvfs=wdirvfs,
740 hgvfs=hgvfs,
738 hgvfs=hgvfs,
741 requirements=requirements,
739 requirements=requirements,
742 supportedrequirements=supportedrequirements,
740 supportedrequirements=supportedrequirements,
743 sharedpath=storebasepath,
741 sharedpath=storebasepath,
744 store=store,
742 store=store,
745 cachevfs=cachevfs,
743 cachevfs=cachevfs,
746 wcachevfs=wcachevfs,
744 wcachevfs=wcachevfs,
747 features=features,
745 features=features,
748 intents=intents,
746 intents=intents,
749 )
747 )
750
748
751
749
752 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
750 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
753 """Load hgrc files/content into a ui instance.
751 """Load hgrc files/content into a ui instance.
754
752
755 This is called during repository opening to load any additional
753 This is called during repository opening to load any additional
756 config files or settings relevant to the current repository.
754 config files or settings relevant to the current repository.
757
755
758 Returns a bool indicating whether any additional configs were loaded.
756 Returns a bool indicating whether any additional configs were loaded.
759
757
760 Extensions should monkeypatch this function to modify how per-repo
758 Extensions should monkeypatch this function to modify how per-repo
761 configs are loaded. For example, an extension may wish to pull in
759 configs are loaded. For example, an extension may wish to pull in
762 configs from alternate files or sources.
760 configs from alternate files or sources.
763
761
764 sharedvfs is vfs object pointing to source repo if the current one is a
762 sharedvfs is vfs object pointing to source repo if the current one is a
765 shared one
763 shared one
766 """
764 """
767 if not rcutil.use_repo_hgrc():
765 if not rcutil.use_repo_hgrc():
768 return False
766 return False
769
767
770 ret = False
768 ret = False
771 # first load config from shared source if we has to
769 # first load config from shared source if we has to
772 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
770 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
773 try:
771 try:
774 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
772 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
775 ret = True
773 ret = True
776 except IOError:
774 except IOError:
777 pass
775 pass
778
776
779 try:
777 try:
780 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
778 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
781 ret = True
779 ret = True
782 except IOError:
780 except IOError:
783 pass
781 pass
784
782
785 try:
783 try:
786 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
784 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
787 ret = True
785 ret = True
788 except IOError:
786 except IOError:
789 pass
787 pass
790
788
791 return ret
789 return ret
792
790
793
791
794 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
792 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
795 """Perform additional actions after .hg/hgrc is loaded.
793 """Perform additional actions after .hg/hgrc is loaded.
796
794
797 This function is called during repository loading immediately after
795 This function is called during repository loading immediately after
798 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
796 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
799
797
800 The function can be used to validate configs, automatically add
798 The function can be used to validate configs, automatically add
801 options (including extensions) based on requirements, etc.
799 options (including extensions) based on requirements, etc.
802 """
800 """
803
801
804 # Map of requirements to list of extensions to load automatically when
802 # Map of requirements to list of extensions to load automatically when
805 # requirement is present.
803 # requirement is present.
806 autoextensions = {
804 autoextensions = {
807 b'git': [b'git'],
805 b'git': [b'git'],
808 b'largefiles': [b'largefiles'],
806 b'largefiles': [b'largefiles'],
809 b'lfs': [b'lfs'],
807 b'lfs': [b'lfs'],
810 }
808 }
811
809
812 for requirement, names in sorted(autoextensions.items()):
810 for requirement, names in sorted(autoextensions.items()):
813 if requirement not in requirements:
811 if requirement not in requirements:
814 continue
812 continue
815
813
816 for name in names:
814 for name in names:
817 if not ui.hasconfig(b'extensions', name):
815 if not ui.hasconfig(b'extensions', name):
818 ui.setconfig(b'extensions', name, b'', source=b'autoload')
816 ui.setconfig(b'extensions', name, b'', source=b'autoload')
819
817
820
818
821 def gathersupportedrequirements(ui):
819 def gathersupportedrequirements(ui):
822 """Determine the complete set of recognized requirements."""
820 """Determine the complete set of recognized requirements."""
823 # Start with all requirements supported by this file.
821 # Start with all requirements supported by this file.
824 supported = set(localrepository._basesupported)
822 supported = set(localrepository._basesupported)
825
823
826 # Execute ``featuresetupfuncs`` entries if they belong to an extension
824 # Execute ``featuresetupfuncs`` entries if they belong to an extension
827 # relevant to this ui instance.
825 # relevant to this ui instance.
828 modules = {m.__name__ for n, m in extensions.extensions(ui)}
826 modules = {m.__name__ for n, m in extensions.extensions(ui)}
829
827
830 for fn in featuresetupfuncs:
828 for fn in featuresetupfuncs:
831 if fn.__module__ in modules:
829 if fn.__module__ in modules:
832 fn(ui, supported)
830 fn(ui, supported)
833
831
834 # Add derived requirements from registered compression engines.
832 # Add derived requirements from registered compression engines.
835 for name in util.compengines:
833 for name in util.compengines:
836 engine = util.compengines[name]
834 engine = util.compengines[name]
837 if engine.available() and engine.revlogheader():
835 if engine.available() and engine.revlogheader():
838 supported.add(b'exp-compression-%s' % name)
836 supported.add(b'exp-compression-%s' % name)
839 if engine.name() == b'zstd':
837 if engine.name() == b'zstd':
840 supported.add(b'revlog-compression-zstd')
838 supported.add(b'revlog-compression-zstd')
841
839
842 return supported
840 return supported
843
841
844
842
845 def ensurerequirementsrecognized(requirements, supported):
843 def ensurerequirementsrecognized(requirements, supported):
846 """Validate that a set of local requirements is recognized.
844 """Validate that a set of local requirements is recognized.
847
845
848 Receives a set of requirements. Raises an ``error.RepoError`` if there
846 Receives a set of requirements. Raises an ``error.RepoError`` if there
849 exists any requirement in that set that currently loaded code doesn't
847 exists any requirement in that set that currently loaded code doesn't
850 recognize.
848 recognize.
851
849
852 Returns a set of supported requirements.
850 Returns a set of supported requirements.
853 """
851 """
854 missing = set()
852 missing = set()
855
853
856 for requirement in requirements:
854 for requirement in requirements:
857 if requirement in supported:
855 if requirement in supported:
858 continue
856 continue
859
857
860 if not requirement or not requirement[0:1].isalnum():
858 if not requirement or not requirement[0:1].isalnum():
861 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
859 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
862
860
863 missing.add(requirement)
861 missing.add(requirement)
864
862
865 if missing:
863 if missing:
866 raise error.RequirementError(
864 raise error.RequirementError(
867 _(b'repository requires features unknown to this Mercurial: %s')
865 _(b'repository requires features unknown to this Mercurial: %s')
868 % b' '.join(sorted(missing)),
866 % b' '.join(sorted(missing)),
869 hint=_(
867 hint=_(
870 b'see https://mercurial-scm.org/wiki/MissingRequirement '
868 b'see https://mercurial-scm.org/wiki/MissingRequirement '
871 b'for more information'
869 b'for more information'
872 ),
870 ),
873 )
871 )
874
872
875
873
876 def ensurerequirementscompatible(ui, requirements):
874 def ensurerequirementscompatible(ui, requirements):
877 """Validates that a set of recognized requirements is mutually compatible.
875 """Validates that a set of recognized requirements is mutually compatible.
878
876
879 Some requirements may not be compatible with others or require
877 Some requirements may not be compatible with others or require
880 config options that aren't enabled. This function is called during
878 config options that aren't enabled. This function is called during
881 repository opening to ensure that the set of requirements needed
879 repository opening to ensure that the set of requirements needed
882 to open a repository is sane and compatible with config options.
880 to open a repository is sane and compatible with config options.
883
881
884 Extensions can monkeypatch this function to perform additional
882 Extensions can monkeypatch this function to perform additional
885 checking.
883 checking.
886
884
887 ``error.RepoError`` should be raised on failure.
885 ``error.RepoError`` should be raised on failure.
888 """
886 """
889 if (
887 if (
890 requirementsmod.SPARSE_REQUIREMENT in requirements
888 requirementsmod.SPARSE_REQUIREMENT in requirements
891 and not sparse.enabled
889 and not sparse.enabled
892 ):
890 ):
893 raise error.RepoError(
891 raise error.RepoError(
894 _(
892 _(
895 b'repository is using sparse feature but '
893 b'repository is using sparse feature but '
896 b'sparse is not enabled; enable the '
894 b'sparse is not enabled; enable the '
897 b'"sparse" extensions to access'
895 b'"sparse" extensions to access'
898 )
896 )
899 )
897 )
900
898
901
899
902 def makestore(requirements, path, vfstype):
900 def makestore(requirements, path, vfstype):
903 """Construct a storage object for a repository."""
901 """Construct a storage object for a repository."""
904 if b'store' in requirements:
902 if b'store' in requirements:
905 if b'fncache' in requirements:
903 if b'fncache' in requirements:
906 return storemod.fncachestore(
904 return storemod.fncachestore(
907 path, vfstype, b'dotencode' in requirements
905 path, vfstype, b'dotencode' in requirements
908 )
906 )
909
907
910 return storemod.encodedstore(path, vfstype)
908 return storemod.encodedstore(path, vfstype)
911
909
912 return storemod.basicstore(path, vfstype)
910 return storemod.basicstore(path, vfstype)
913
911
914
912
915 def resolvestorevfsoptions(ui, requirements, features):
913 def resolvestorevfsoptions(ui, requirements, features):
916 """Resolve the options to pass to the store vfs opener.
914 """Resolve the options to pass to the store vfs opener.
917
915
918 The returned dict is used to influence behavior of the storage layer.
916 The returned dict is used to influence behavior of the storage layer.
919 """
917 """
920 options = {}
918 options = {}
921
919
922 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
920 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
923 options[b'treemanifest'] = True
921 options[b'treemanifest'] = True
924
922
925 # experimental config: format.manifestcachesize
923 # experimental config: format.manifestcachesize
926 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
924 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
927 if manifestcachesize is not None:
925 if manifestcachesize is not None:
928 options[b'manifestcachesize'] = manifestcachesize
926 options[b'manifestcachesize'] = manifestcachesize
929
927
930 # In the absence of another requirement superseding a revlog-related
928 # In the absence of another requirement superseding a revlog-related
931 # requirement, we have to assume the repo is using revlog version 0.
929 # requirement, we have to assume the repo is using revlog version 0.
932 # This revlog format is super old and we don't bother trying to parse
930 # This revlog format is super old and we don't bother trying to parse
933 # opener options for it because those options wouldn't do anything
931 # opener options for it because those options wouldn't do anything
934 # meaningful on such old repos.
932 # meaningful on such old repos.
935 if (
933 if (
936 b'revlogv1' in requirements
934 b'revlogv1' in requirements
937 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
935 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
938 ):
936 ):
939 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
937 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
940 else: # explicitly mark repo as using revlogv0
938 else: # explicitly mark repo as using revlogv0
941 options[b'revlogv0'] = True
939 options[b'revlogv0'] = True
942
940
943 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
941 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
944 options[b'copies-storage'] = b'changeset-sidedata'
942 options[b'copies-storage'] = b'changeset-sidedata'
945 else:
943 else:
946 writecopiesto = ui.config(b'experimental', b'copies.write-to')
944 writecopiesto = ui.config(b'experimental', b'copies.write-to')
947 copiesextramode = (b'changeset-only', b'compatibility')
945 copiesextramode = (b'changeset-only', b'compatibility')
948 if writecopiesto in copiesextramode:
946 if writecopiesto in copiesextramode:
949 options[b'copies-storage'] = b'extra'
947 options[b'copies-storage'] = b'extra'
950
948
951 return options
949 return options
952
950
953
951
954 def resolverevlogstorevfsoptions(ui, requirements, features):
952 def resolverevlogstorevfsoptions(ui, requirements, features):
955 """Resolve opener options specific to revlogs."""
953 """Resolve opener options specific to revlogs."""
956
954
957 options = {}
955 options = {}
958 options[b'flagprocessors'] = {}
956 options[b'flagprocessors'] = {}
959
957
960 if b'revlogv1' in requirements:
958 if b'revlogv1' in requirements:
961 options[b'revlogv1'] = True
959 options[b'revlogv1'] = True
962 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
960 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
963 options[b'revlogv2'] = True
961 options[b'revlogv2'] = True
964
962
965 if b'generaldelta' in requirements:
963 if b'generaldelta' in requirements:
966 options[b'generaldelta'] = True
964 options[b'generaldelta'] = True
967
965
968 # experimental config: format.chunkcachesize
966 # experimental config: format.chunkcachesize
969 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
967 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
970 if chunkcachesize is not None:
968 if chunkcachesize is not None:
971 options[b'chunkcachesize'] = chunkcachesize
969 options[b'chunkcachesize'] = chunkcachesize
972
970
973 deltabothparents = ui.configbool(
971 deltabothparents = ui.configbool(
974 b'storage', b'revlog.optimize-delta-parent-choice'
972 b'storage', b'revlog.optimize-delta-parent-choice'
975 )
973 )
976 options[b'deltabothparents'] = deltabothparents
974 options[b'deltabothparents'] = deltabothparents
977
975
978 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
976 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
979 lazydeltabase = False
977 lazydeltabase = False
980 if lazydelta:
978 if lazydelta:
981 lazydeltabase = ui.configbool(
979 lazydeltabase = ui.configbool(
982 b'storage', b'revlog.reuse-external-delta-parent'
980 b'storage', b'revlog.reuse-external-delta-parent'
983 )
981 )
984 if lazydeltabase is None:
982 if lazydeltabase is None:
985 lazydeltabase = not scmutil.gddeltaconfig(ui)
983 lazydeltabase = not scmutil.gddeltaconfig(ui)
986 options[b'lazydelta'] = lazydelta
984 options[b'lazydelta'] = lazydelta
987 options[b'lazydeltabase'] = lazydeltabase
985 options[b'lazydeltabase'] = lazydeltabase
988
986
989 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
987 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
990 if 0 <= chainspan:
988 if 0 <= chainspan:
991 options[b'maxdeltachainspan'] = chainspan
989 options[b'maxdeltachainspan'] = chainspan
992
990
993 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
991 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
994 if mmapindexthreshold is not None:
992 if mmapindexthreshold is not None:
995 options[b'mmapindexthreshold'] = mmapindexthreshold
993 options[b'mmapindexthreshold'] = mmapindexthreshold
996
994
997 withsparseread = ui.configbool(b'experimental', b'sparse-read')
995 withsparseread = ui.configbool(b'experimental', b'sparse-read')
998 srdensitythres = float(
996 srdensitythres = float(
999 ui.config(b'experimental', b'sparse-read.density-threshold')
997 ui.config(b'experimental', b'sparse-read.density-threshold')
1000 )
998 )
1001 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
999 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1002 options[b'with-sparse-read'] = withsparseread
1000 options[b'with-sparse-read'] = withsparseread
1003 options[b'sparse-read-density-threshold'] = srdensitythres
1001 options[b'sparse-read-density-threshold'] = srdensitythres
1004 options[b'sparse-read-min-gap-size'] = srmingapsize
1002 options[b'sparse-read-min-gap-size'] = srmingapsize
1005
1003
1006 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1004 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1007 options[b'sparse-revlog'] = sparserevlog
1005 options[b'sparse-revlog'] = sparserevlog
1008 if sparserevlog:
1006 if sparserevlog:
1009 options[b'generaldelta'] = True
1007 options[b'generaldelta'] = True
1010
1008
1011 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1009 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
1012 options[b'side-data'] = sidedata
1010 options[b'side-data'] = sidedata
1013
1011
1014 maxchainlen = None
1012 maxchainlen = None
1015 if sparserevlog:
1013 if sparserevlog:
1016 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1014 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1017 # experimental config: format.maxchainlen
1015 # experimental config: format.maxchainlen
1018 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1016 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1019 if maxchainlen is not None:
1017 if maxchainlen is not None:
1020 options[b'maxchainlen'] = maxchainlen
1018 options[b'maxchainlen'] = maxchainlen
1021
1019
1022 for r in requirements:
1020 for r in requirements:
1023 # we allow multiple compression engine requirement to co-exist because
1021 # we allow multiple compression engine requirement to co-exist because
1024 # strickly speaking, revlog seems to support mixed compression style.
1022 # strickly speaking, revlog seems to support mixed compression style.
1025 #
1023 #
1026 # The compression used for new entries will be "the last one"
1024 # The compression used for new entries will be "the last one"
1027 prefix = r.startswith
1025 prefix = r.startswith
1028 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1026 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1029 options[b'compengine'] = r.split(b'-', 2)[2]
1027 options[b'compengine'] = r.split(b'-', 2)[2]
1030
1028
1031 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1029 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1032 if options[b'zlib.level'] is not None:
1030 if options[b'zlib.level'] is not None:
1033 if not (0 <= options[b'zlib.level'] <= 9):
1031 if not (0 <= options[b'zlib.level'] <= 9):
1034 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1032 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1035 raise error.Abort(msg % options[b'zlib.level'])
1033 raise error.Abort(msg % options[b'zlib.level'])
1036 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1034 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1037 if options[b'zstd.level'] is not None:
1035 if options[b'zstd.level'] is not None:
1038 if not (0 <= options[b'zstd.level'] <= 22):
1036 if not (0 <= options[b'zstd.level'] <= 22):
1039 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1037 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1040 raise error.Abort(msg % options[b'zstd.level'])
1038 raise error.Abort(msg % options[b'zstd.level'])
1041
1039
1042 if requirementsmod.NARROW_REQUIREMENT in requirements:
1040 if requirementsmod.NARROW_REQUIREMENT in requirements:
1043 options[b'enableellipsis'] = True
1041 options[b'enableellipsis'] = True
1044
1042
1045 if ui.configbool(b'experimental', b'rust.index'):
1043 if ui.configbool(b'experimental', b'rust.index'):
1046 options[b'rust.index'] = True
1044 options[b'rust.index'] = True
1047 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1045 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1048 slow_path = ui.config(
1046 slow_path = ui.config(
1049 b'storage', b'revlog.persistent-nodemap.slow-path'
1047 b'storage', b'revlog.persistent-nodemap.slow-path'
1050 )
1048 )
1051 if slow_path not in (b'allow', b'warn', b'abort'):
1049 if slow_path not in (b'allow', b'warn', b'abort'):
1052 default = ui.config_default(
1050 default = ui.config_default(
1053 b'storage', b'revlog.persistent-nodemap.slow-path'
1051 b'storage', b'revlog.persistent-nodemap.slow-path'
1054 )
1052 )
1055 msg = _(
1053 msg = _(
1056 b'unknown value for config '
1054 b'unknown value for config '
1057 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1055 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1058 )
1056 )
1059 ui.warn(msg % slow_path)
1057 ui.warn(msg % slow_path)
1060 if not ui.quiet:
1058 if not ui.quiet:
1061 ui.warn(_(b'falling back to default value: %s\n') % default)
1059 ui.warn(_(b'falling back to default value: %s\n') % default)
1062 slow_path = default
1060 slow_path = default
1063
1061
1064 msg = _(
1062 msg = _(
1065 b"accessing `persistent-nodemap` repository without associated "
1063 b"accessing `persistent-nodemap` repository without associated "
1066 b"fast implementation."
1064 b"fast implementation."
1067 )
1065 )
1068 hint = _(
1066 hint = _(
1069 b"check `hg help config.format.use-persistent-nodemap` "
1067 b"check `hg help config.format.use-persistent-nodemap` "
1070 b"for details"
1068 b"for details"
1071 )
1069 )
1072 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1070 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1073 if slow_path == b'warn':
1071 if slow_path == b'warn':
1074 msg = b"warning: " + msg + b'\n'
1072 msg = b"warning: " + msg + b'\n'
1075 ui.warn(msg)
1073 ui.warn(msg)
1076 if not ui.quiet:
1074 if not ui.quiet:
1077 hint = b'(' + hint + b')\n'
1075 hint = b'(' + hint + b')\n'
1078 ui.warn(hint)
1076 ui.warn(hint)
1079 if slow_path == b'abort':
1077 if slow_path == b'abort':
1080 raise error.Abort(msg, hint=hint)
1078 raise error.Abort(msg, hint=hint)
1081 options[b'persistent-nodemap'] = True
1079 options[b'persistent-nodemap'] = True
1082 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1080 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1083 options[b'persistent-nodemap.mmap'] = True
1081 options[b'persistent-nodemap.mmap'] = True
1084 if ui.configbool(b'devel', b'persistent-nodemap'):
1082 if ui.configbool(b'devel', b'persistent-nodemap'):
1085 options[b'devel-force-nodemap'] = True
1083 options[b'devel-force-nodemap'] = True
1086
1084
1087 return options
1085 return options
1088
1086
1089
1087
1090 def makemain(**kwargs):
1088 def makemain(**kwargs):
1091 """Produce a type conforming to ``ilocalrepositorymain``."""
1089 """Produce a type conforming to ``ilocalrepositorymain``."""
1092 return localrepository
1090 return localrepository
1093
1091
1094
1092
1095 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1093 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1096 class revlogfilestorage(object):
1094 class revlogfilestorage(object):
1097 """File storage when using revlogs."""
1095 """File storage when using revlogs."""
1098
1096
1099 def file(self, path):
1097 def file(self, path):
1100 if path[0] == b'/':
1098 if path[0] == b'/':
1101 path = path[1:]
1099 path = path[1:]
1102
1100
1103 return filelog.filelog(self.svfs, path)
1101 return filelog.filelog(self.svfs, path)
1104
1102
1105
1103
1106 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1104 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1107 class revlognarrowfilestorage(object):
1105 class revlognarrowfilestorage(object):
1108 """File storage when using revlogs and narrow files."""
1106 """File storage when using revlogs and narrow files."""
1109
1107
1110 def file(self, path):
1108 def file(self, path):
1111 if path[0] == b'/':
1109 if path[0] == b'/':
1112 path = path[1:]
1110 path = path[1:]
1113
1111
1114 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1112 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1115
1113
1116
1114
1117 def makefilestorage(requirements, features, **kwargs):
1115 def makefilestorage(requirements, features, **kwargs):
1118 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1116 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1119 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1117 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1120 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1118 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1121
1119
1122 if requirementsmod.NARROW_REQUIREMENT in requirements:
1120 if requirementsmod.NARROW_REQUIREMENT in requirements:
1123 return revlognarrowfilestorage
1121 return revlognarrowfilestorage
1124 else:
1122 else:
1125 return revlogfilestorage
1123 return revlogfilestorage
1126
1124
1127
1125
1128 # List of repository interfaces and factory functions for them. Each
1126 # List of repository interfaces and factory functions for them. Each
1129 # will be called in order during ``makelocalrepository()`` to iteratively
1127 # will be called in order during ``makelocalrepository()`` to iteratively
1130 # derive the final type for a local repository instance. We capture the
1128 # derive the final type for a local repository instance. We capture the
1131 # function as a lambda so we don't hold a reference and the module-level
1129 # function as a lambda so we don't hold a reference and the module-level
1132 # functions can be wrapped.
1130 # functions can be wrapped.
1133 REPO_INTERFACES = [
1131 REPO_INTERFACES = [
1134 (repository.ilocalrepositorymain, lambda: makemain),
1132 (repository.ilocalrepositorymain, lambda: makemain),
1135 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1133 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1136 ]
1134 ]
1137
1135
1138
1136
1139 @interfaceutil.implementer(repository.ilocalrepositorymain)
1137 @interfaceutil.implementer(repository.ilocalrepositorymain)
1140 class localrepository(object):
1138 class localrepository(object):
1141 """Main class for representing local repositories.
1139 """Main class for representing local repositories.
1142
1140
1143 All local repositories are instances of this class.
1141 All local repositories are instances of this class.
1144
1142
1145 Constructed on its own, instances of this class are not usable as
1143 Constructed on its own, instances of this class are not usable as
1146 repository objects. To obtain a usable repository object, call
1144 repository objects. To obtain a usable repository object, call
1147 ``hg.repository()``, ``localrepo.instance()``, or
1145 ``hg.repository()``, ``localrepo.instance()``, or
1148 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1146 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1149 ``instance()`` adds support for creating new repositories.
1147 ``instance()`` adds support for creating new repositories.
1150 ``hg.repository()`` adds more extension integration, including calling
1148 ``hg.repository()`` adds more extension integration, including calling
1151 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1149 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1152 used.
1150 used.
1153 """
1151 """
1154
1152
1155 # obsolete experimental requirements:
1153 # obsolete experimental requirements:
1156 # - manifestv2: An experimental new manifest format that allowed
1154 # - manifestv2: An experimental new manifest format that allowed
1157 # for stem compression of long paths. Experiment ended up not
1155 # for stem compression of long paths. Experiment ended up not
1158 # being successful (repository sizes went up due to worse delta
1156 # being successful (repository sizes went up due to worse delta
1159 # chains), and the code was deleted in 4.6.
1157 # chains), and the code was deleted in 4.6.
1160 supportedformats = {
1158 supportedformats = {
1161 b'revlogv1',
1159 b'revlogv1',
1162 b'generaldelta',
1160 b'generaldelta',
1163 requirementsmod.TREEMANIFEST_REQUIREMENT,
1161 requirementsmod.TREEMANIFEST_REQUIREMENT,
1164 requirementsmod.COPIESSDC_REQUIREMENT,
1162 requirementsmod.COPIESSDC_REQUIREMENT,
1165 requirementsmod.REVLOGV2_REQUIREMENT,
1163 requirementsmod.REVLOGV2_REQUIREMENT,
1166 requirementsmod.SIDEDATA_REQUIREMENT,
1164 requirementsmod.SIDEDATA_REQUIREMENT,
1167 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1165 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1168 requirementsmod.NODEMAP_REQUIREMENT,
1166 requirementsmod.NODEMAP_REQUIREMENT,
1169 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1167 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1170 requirementsmod.SHARESAFE_REQUIREMENT,
1168 requirementsmod.SHARESAFE_REQUIREMENT,
1171 }
1169 }
1172 _basesupported = supportedformats | {
1170 _basesupported = supportedformats | {
1173 b'store',
1171 b'store',
1174 b'fncache',
1172 b'fncache',
1175 requirementsmod.SHARED_REQUIREMENT,
1173 requirementsmod.SHARED_REQUIREMENT,
1176 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1174 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1177 b'dotencode',
1175 b'dotencode',
1178 requirementsmod.SPARSE_REQUIREMENT,
1176 requirementsmod.SPARSE_REQUIREMENT,
1179 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1177 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1180 }
1178 }
1181
1179
1182 # list of prefix for file which can be written without 'wlock'
1180 # list of prefix for file which can be written without 'wlock'
1183 # Extensions should extend this list when needed
1181 # Extensions should extend this list when needed
1184 _wlockfreeprefix = {
1182 _wlockfreeprefix = {
1185 # We migh consider requiring 'wlock' for the next
1183 # We migh consider requiring 'wlock' for the next
1186 # two, but pretty much all the existing code assume
1184 # two, but pretty much all the existing code assume
1187 # wlock is not needed so we keep them excluded for
1185 # wlock is not needed so we keep them excluded for
1188 # now.
1186 # now.
1189 b'hgrc',
1187 b'hgrc',
1190 b'requires',
1188 b'requires',
1191 # XXX cache is a complicatged business someone
1189 # XXX cache is a complicatged business someone
1192 # should investigate this in depth at some point
1190 # should investigate this in depth at some point
1193 b'cache/',
1191 b'cache/',
1194 # XXX shouldn't be dirstate covered by the wlock?
1192 # XXX shouldn't be dirstate covered by the wlock?
1195 b'dirstate',
1193 b'dirstate',
1196 # XXX bisect was still a bit too messy at the time
1194 # XXX bisect was still a bit too messy at the time
1197 # this changeset was introduced. Someone should fix
1195 # this changeset was introduced. Someone should fix
1198 # the remainig bit and drop this line
1196 # the remainig bit and drop this line
1199 b'bisect.state',
1197 b'bisect.state',
1200 }
1198 }
1201
1199
1202 def __init__(
1200 def __init__(
1203 self,
1201 self,
1204 baseui,
1202 baseui,
1205 ui,
1203 ui,
1206 origroot,
1204 origroot,
1207 wdirvfs,
1205 wdirvfs,
1208 hgvfs,
1206 hgvfs,
1209 requirements,
1207 requirements,
1210 supportedrequirements,
1208 supportedrequirements,
1211 sharedpath,
1209 sharedpath,
1212 store,
1210 store,
1213 cachevfs,
1211 cachevfs,
1214 wcachevfs,
1212 wcachevfs,
1215 features,
1213 features,
1216 intents=None,
1214 intents=None,
1217 ):
1215 ):
1218 """Create a new local repository instance.
1216 """Create a new local repository instance.
1219
1217
1220 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1218 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1221 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1219 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1222 object.
1220 object.
1223
1221
1224 Arguments:
1222 Arguments:
1225
1223
1226 baseui
1224 baseui
1227 ``ui.ui`` instance that ``ui`` argument was based off of.
1225 ``ui.ui`` instance that ``ui`` argument was based off of.
1228
1226
1229 ui
1227 ui
1230 ``ui.ui`` instance for use by the repository.
1228 ``ui.ui`` instance for use by the repository.
1231
1229
1232 origroot
1230 origroot
1233 ``bytes`` path to working directory root of this repository.
1231 ``bytes`` path to working directory root of this repository.
1234
1232
1235 wdirvfs
1233 wdirvfs
1236 ``vfs.vfs`` rooted at the working directory.
1234 ``vfs.vfs`` rooted at the working directory.
1237
1235
1238 hgvfs
1236 hgvfs
1239 ``vfs.vfs`` rooted at .hg/
1237 ``vfs.vfs`` rooted at .hg/
1240
1238
1241 requirements
1239 requirements
1242 ``set`` of bytestrings representing repository opening requirements.
1240 ``set`` of bytestrings representing repository opening requirements.
1243
1241
1244 supportedrequirements
1242 supportedrequirements
1245 ``set`` of bytestrings representing repository requirements that we
1243 ``set`` of bytestrings representing repository requirements that we
1246 know how to open. May be a supetset of ``requirements``.
1244 know how to open. May be a supetset of ``requirements``.
1247
1245
1248 sharedpath
1246 sharedpath
1249 ``bytes`` Defining path to storage base directory. Points to a
1247 ``bytes`` Defining path to storage base directory. Points to a
1250 ``.hg/`` directory somewhere.
1248 ``.hg/`` directory somewhere.
1251
1249
1252 store
1250 store
1253 ``store.basicstore`` (or derived) instance providing access to
1251 ``store.basicstore`` (or derived) instance providing access to
1254 versioned storage.
1252 versioned storage.
1255
1253
1256 cachevfs
1254 cachevfs
1257 ``vfs.vfs`` used for cache files.
1255 ``vfs.vfs`` used for cache files.
1258
1256
1259 wcachevfs
1257 wcachevfs
1260 ``vfs.vfs`` used for cache files related to the working copy.
1258 ``vfs.vfs`` used for cache files related to the working copy.
1261
1259
1262 features
1260 features
1263 ``set`` of bytestrings defining features/capabilities of this
1261 ``set`` of bytestrings defining features/capabilities of this
1264 instance.
1262 instance.
1265
1263
1266 intents
1264 intents
1267 ``set`` of system strings indicating what this repo will be used
1265 ``set`` of system strings indicating what this repo will be used
1268 for.
1266 for.
1269 """
1267 """
1270 self.baseui = baseui
1268 self.baseui = baseui
1271 self.ui = ui
1269 self.ui = ui
1272 self.origroot = origroot
1270 self.origroot = origroot
1273 # vfs rooted at working directory.
1271 # vfs rooted at working directory.
1274 self.wvfs = wdirvfs
1272 self.wvfs = wdirvfs
1275 self.root = wdirvfs.base
1273 self.root = wdirvfs.base
1276 # vfs rooted at .hg/. Used to access most non-store paths.
1274 # vfs rooted at .hg/. Used to access most non-store paths.
1277 self.vfs = hgvfs
1275 self.vfs = hgvfs
1278 self.path = hgvfs.base
1276 self.path = hgvfs.base
1279 self.requirements = requirements
1277 self.requirements = requirements
1280 self.supported = supportedrequirements
1278 self.supported = supportedrequirements
1281 self.sharedpath = sharedpath
1279 self.sharedpath = sharedpath
1282 self.store = store
1280 self.store = store
1283 self.cachevfs = cachevfs
1281 self.cachevfs = cachevfs
1284 self.wcachevfs = wcachevfs
1282 self.wcachevfs = wcachevfs
1285 self.features = features
1283 self.features = features
1286
1284
1287 self.filtername = None
1285 self.filtername = None
1288
1286
1289 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1287 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1290 b'devel', b'check-locks'
1288 b'devel', b'check-locks'
1291 ):
1289 ):
1292 self.vfs.audit = self._getvfsward(self.vfs.audit)
1290 self.vfs.audit = self._getvfsward(self.vfs.audit)
1293 # A list of callback to shape the phase if no data were found.
1291 # A list of callback to shape the phase if no data were found.
1294 # Callback are in the form: func(repo, roots) --> processed root.
1292 # Callback are in the form: func(repo, roots) --> processed root.
1295 # This list it to be filled by extension during repo setup
1293 # This list it to be filled by extension during repo setup
1296 self._phasedefaults = []
1294 self._phasedefaults = []
1297
1295
1298 color.setup(self.ui)
1296 color.setup(self.ui)
1299
1297
1300 self.spath = self.store.path
1298 self.spath = self.store.path
1301 self.svfs = self.store.vfs
1299 self.svfs = self.store.vfs
1302 self.sjoin = self.store.join
1300 self.sjoin = self.store.join
1303 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1301 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1304 b'devel', b'check-locks'
1302 b'devel', b'check-locks'
1305 ):
1303 ):
1306 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1304 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1307 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1305 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1308 else: # standard vfs
1306 else: # standard vfs
1309 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1307 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1310
1308
1311 self._dirstatevalidatewarned = False
1309 self._dirstatevalidatewarned = False
1312
1310
1313 self._branchcaches = branchmap.BranchMapCache()
1311 self._branchcaches = branchmap.BranchMapCache()
1314 self._revbranchcache = None
1312 self._revbranchcache = None
1315 self._filterpats = {}
1313 self._filterpats = {}
1316 self._datafilters = {}
1314 self._datafilters = {}
1317 self._transref = self._lockref = self._wlockref = None
1315 self._transref = self._lockref = self._wlockref = None
1318
1316
1319 # A cache for various files under .hg/ that tracks file changes,
1317 # A cache for various files under .hg/ that tracks file changes,
1320 # (used by the filecache decorator)
1318 # (used by the filecache decorator)
1321 #
1319 #
1322 # Maps a property name to its util.filecacheentry
1320 # Maps a property name to its util.filecacheentry
1323 self._filecache = {}
1321 self._filecache = {}
1324
1322
1325 # hold sets of revision to be filtered
1323 # hold sets of revision to be filtered
1326 # should be cleared when something might have changed the filter value:
1324 # should be cleared when something might have changed the filter value:
1327 # - new changesets,
1325 # - new changesets,
1328 # - phase change,
1326 # - phase change,
1329 # - new obsolescence marker,
1327 # - new obsolescence marker,
1330 # - working directory parent change,
1328 # - working directory parent change,
1331 # - bookmark changes
1329 # - bookmark changes
1332 self.filteredrevcache = {}
1330 self.filteredrevcache = {}
1333
1331
1334 # post-dirstate-status hooks
1332 # post-dirstate-status hooks
1335 self._postdsstatus = []
1333 self._postdsstatus = []
1336
1334
1337 # generic mapping between names and nodes
1335 # generic mapping between names and nodes
1338 self.names = namespaces.namespaces()
1336 self.names = namespaces.namespaces()
1339
1337
1340 # Key to signature value.
1338 # Key to signature value.
1341 self._sparsesignaturecache = {}
1339 self._sparsesignaturecache = {}
1342 # Signature to cached matcher instance.
1340 # Signature to cached matcher instance.
1343 self._sparsematchercache = {}
1341 self._sparsematchercache = {}
1344
1342
1345 self._extrafilterid = repoview.extrafilter(ui)
1343 self._extrafilterid = repoview.extrafilter(ui)
1346
1344
1347 self.filecopiesmode = None
1345 self.filecopiesmode = None
1348 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1346 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1349 self.filecopiesmode = b'changeset-sidedata'
1347 self.filecopiesmode = b'changeset-sidedata'
1350
1348
1351 def _getvfsward(self, origfunc):
1349 def _getvfsward(self, origfunc):
1352 """build a ward for self.vfs"""
1350 """build a ward for self.vfs"""
1353 rref = weakref.ref(self)
1351 rref = weakref.ref(self)
1354
1352
1355 def checkvfs(path, mode=None):
1353 def checkvfs(path, mode=None):
1356 ret = origfunc(path, mode=mode)
1354 ret = origfunc(path, mode=mode)
1357 repo = rref()
1355 repo = rref()
1358 if (
1356 if (
1359 repo is None
1357 repo is None
1360 or not util.safehasattr(repo, b'_wlockref')
1358 or not util.safehasattr(repo, b'_wlockref')
1361 or not util.safehasattr(repo, b'_lockref')
1359 or not util.safehasattr(repo, b'_lockref')
1362 ):
1360 ):
1363 return
1361 return
1364 if mode in (None, b'r', b'rb'):
1362 if mode in (None, b'r', b'rb'):
1365 return
1363 return
1366 if path.startswith(repo.path):
1364 if path.startswith(repo.path):
1367 # truncate name relative to the repository (.hg)
1365 # truncate name relative to the repository (.hg)
1368 path = path[len(repo.path) + 1 :]
1366 path = path[len(repo.path) + 1 :]
1369 if path.startswith(b'cache/'):
1367 if path.startswith(b'cache/'):
1370 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1368 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1371 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1369 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1372 # path prefixes covered by 'lock'
1370 # path prefixes covered by 'lock'
1373 vfs_path_prefixes = (
1371 vfs_path_prefixes = (
1374 b'journal.',
1372 b'journal.',
1375 b'undo.',
1373 b'undo.',
1376 b'strip-backup/',
1374 b'strip-backup/',
1377 b'cache/',
1375 b'cache/',
1378 )
1376 )
1379 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1377 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1380 if repo._currentlock(repo._lockref) is None:
1378 if repo._currentlock(repo._lockref) is None:
1381 repo.ui.develwarn(
1379 repo.ui.develwarn(
1382 b'write with no lock: "%s"' % path,
1380 b'write with no lock: "%s"' % path,
1383 stacklevel=3,
1381 stacklevel=3,
1384 config=b'check-locks',
1382 config=b'check-locks',
1385 )
1383 )
1386 elif repo._currentlock(repo._wlockref) is None:
1384 elif repo._currentlock(repo._wlockref) is None:
1387 # rest of vfs files are covered by 'wlock'
1385 # rest of vfs files are covered by 'wlock'
1388 #
1386 #
1389 # exclude special files
1387 # exclude special files
1390 for prefix in self._wlockfreeprefix:
1388 for prefix in self._wlockfreeprefix:
1391 if path.startswith(prefix):
1389 if path.startswith(prefix):
1392 return
1390 return
1393 repo.ui.develwarn(
1391 repo.ui.develwarn(
1394 b'write with no wlock: "%s"' % path,
1392 b'write with no wlock: "%s"' % path,
1395 stacklevel=3,
1393 stacklevel=3,
1396 config=b'check-locks',
1394 config=b'check-locks',
1397 )
1395 )
1398 return ret
1396 return ret
1399
1397
1400 return checkvfs
1398 return checkvfs
1401
1399
1402 def _getsvfsward(self, origfunc):
1400 def _getsvfsward(self, origfunc):
1403 """build a ward for self.svfs"""
1401 """build a ward for self.svfs"""
1404 rref = weakref.ref(self)
1402 rref = weakref.ref(self)
1405
1403
1406 def checksvfs(path, mode=None):
1404 def checksvfs(path, mode=None):
1407 ret = origfunc(path, mode=mode)
1405 ret = origfunc(path, mode=mode)
1408 repo = rref()
1406 repo = rref()
1409 if repo is None or not util.safehasattr(repo, b'_lockref'):
1407 if repo is None or not util.safehasattr(repo, b'_lockref'):
1410 return
1408 return
1411 if mode in (None, b'r', b'rb'):
1409 if mode in (None, b'r', b'rb'):
1412 return
1410 return
1413 if path.startswith(repo.sharedpath):
1411 if path.startswith(repo.sharedpath):
1414 # truncate name relative to the repository (.hg)
1412 # truncate name relative to the repository (.hg)
1415 path = path[len(repo.sharedpath) + 1 :]
1413 path = path[len(repo.sharedpath) + 1 :]
1416 if repo._currentlock(repo._lockref) is None:
1414 if repo._currentlock(repo._lockref) is None:
1417 repo.ui.develwarn(
1415 repo.ui.develwarn(
1418 b'write with no lock: "%s"' % path, stacklevel=4
1416 b'write with no lock: "%s"' % path, stacklevel=4
1419 )
1417 )
1420 return ret
1418 return ret
1421
1419
1422 return checksvfs
1420 return checksvfs
1423
1421
1424 def close(self):
1422 def close(self):
1425 self._writecaches()
1423 self._writecaches()
1426
1424
1427 def _writecaches(self):
1425 def _writecaches(self):
1428 if self._revbranchcache:
1426 if self._revbranchcache:
1429 self._revbranchcache.write()
1427 self._revbranchcache.write()
1430
1428
1431 def _restrictcapabilities(self, caps):
1429 def _restrictcapabilities(self, caps):
1432 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1430 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1433 caps = set(caps)
1431 caps = set(caps)
1434 capsblob = bundle2.encodecaps(
1432 capsblob = bundle2.encodecaps(
1435 bundle2.getrepocaps(self, role=b'client')
1433 bundle2.getrepocaps(self, role=b'client')
1436 )
1434 )
1437 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1435 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1438 return caps
1436 return caps
1439
1437
1440 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1438 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1441 # self -> auditor -> self._checknested -> self
1439 # self -> auditor -> self._checknested -> self
1442
1440
1443 @property
1441 @property
1444 def auditor(self):
1442 def auditor(self):
1445 # This is only used by context.workingctx.match in order to
1443 # This is only used by context.workingctx.match in order to
1446 # detect files in subrepos.
1444 # detect files in subrepos.
1447 return pathutil.pathauditor(self.root, callback=self._checknested)
1445 return pathutil.pathauditor(self.root, callback=self._checknested)
1448
1446
1449 @property
1447 @property
1450 def nofsauditor(self):
1448 def nofsauditor(self):
1451 # This is only used by context.basectx.match in order to detect
1449 # This is only used by context.basectx.match in order to detect
1452 # files in subrepos.
1450 # files in subrepos.
1453 return pathutil.pathauditor(
1451 return pathutil.pathauditor(
1454 self.root, callback=self._checknested, realfs=False, cached=True
1452 self.root, callback=self._checknested, realfs=False, cached=True
1455 )
1453 )
1456
1454
1457 def _checknested(self, path):
1455 def _checknested(self, path):
1458 """Determine if path is a legal nested repository."""
1456 """Determine if path is a legal nested repository."""
1459 if not path.startswith(self.root):
1457 if not path.startswith(self.root):
1460 return False
1458 return False
1461 subpath = path[len(self.root) + 1 :]
1459 subpath = path[len(self.root) + 1 :]
1462 normsubpath = util.pconvert(subpath)
1460 normsubpath = util.pconvert(subpath)
1463
1461
1464 # XXX: Checking against the current working copy is wrong in
1462 # XXX: Checking against the current working copy is wrong in
1465 # the sense that it can reject things like
1463 # the sense that it can reject things like
1466 #
1464 #
1467 # $ hg cat -r 10 sub/x.txt
1465 # $ hg cat -r 10 sub/x.txt
1468 #
1466 #
1469 # if sub/ is no longer a subrepository in the working copy
1467 # if sub/ is no longer a subrepository in the working copy
1470 # parent revision.
1468 # parent revision.
1471 #
1469 #
1472 # However, it can of course also allow things that would have
1470 # However, it can of course also allow things that would have
1473 # been rejected before, such as the above cat command if sub/
1471 # been rejected before, such as the above cat command if sub/
1474 # is a subrepository now, but was a normal directory before.
1472 # is a subrepository now, but was a normal directory before.
1475 # The old path auditor would have rejected by mistake since it
1473 # The old path auditor would have rejected by mistake since it
1476 # panics when it sees sub/.hg/.
1474 # panics when it sees sub/.hg/.
1477 #
1475 #
1478 # All in all, checking against the working copy seems sensible
1476 # All in all, checking against the working copy seems sensible
1479 # since we want to prevent access to nested repositories on
1477 # since we want to prevent access to nested repositories on
1480 # the filesystem *now*.
1478 # the filesystem *now*.
1481 ctx = self[None]
1479 ctx = self[None]
1482 parts = util.splitpath(subpath)
1480 parts = util.splitpath(subpath)
1483 while parts:
1481 while parts:
1484 prefix = b'/'.join(parts)
1482 prefix = b'/'.join(parts)
1485 if prefix in ctx.substate:
1483 if prefix in ctx.substate:
1486 if prefix == normsubpath:
1484 if prefix == normsubpath:
1487 return True
1485 return True
1488 else:
1486 else:
1489 sub = ctx.sub(prefix)
1487 sub = ctx.sub(prefix)
1490 return sub.checknested(subpath[len(prefix) + 1 :])
1488 return sub.checknested(subpath[len(prefix) + 1 :])
1491 else:
1489 else:
1492 parts.pop()
1490 parts.pop()
1493 return False
1491 return False
1494
1492
1495 def peer(self):
1493 def peer(self):
1496 return localpeer(self) # not cached to avoid reference cycle
1494 return localpeer(self) # not cached to avoid reference cycle
1497
1495
1498 def unfiltered(self):
1496 def unfiltered(self):
1499 """Return unfiltered version of the repository
1497 """Return unfiltered version of the repository
1500
1498
1501 Intended to be overwritten by filtered repo."""
1499 Intended to be overwritten by filtered repo."""
1502 return self
1500 return self
1503
1501
1504 def filtered(self, name, visibilityexceptions=None):
1502 def filtered(self, name, visibilityexceptions=None):
1505 """Return a filtered version of a repository
1503 """Return a filtered version of a repository
1506
1504
1507 The `name` parameter is the identifier of the requested view. This
1505 The `name` parameter is the identifier of the requested view. This
1508 will return a repoview object set "exactly" to the specified view.
1506 will return a repoview object set "exactly" to the specified view.
1509
1507
1510 This function does not apply recursive filtering to a repository. For
1508 This function does not apply recursive filtering to a repository. For
1511 example calling `repo.filtered("served")` will return a repoview using
1509 example calling `repo.filtered("served")` will return a repoview using
1512 the "served" view, regardless of the initial view used by `repo`.
1510 the "served" view, regardless of the initial view used by `repo`.
1513
1511
1514 In other word, there is always only one level of `repoview` "filtering".
1512 In other word, there is always only one level of `repoview` "filtering".
1515 """
1513 """
1516 if self._extrafilterid is not None and b'%' not in name:
1514 if self._extrafilterid is not None and b'%' not in name:
1517 name = name + b'%' + self._extrafilterid
1515 name = name + b'%' + self._extrafilterid
1518
1516
1519 cls = repoview.newtype(self.unfiltered().__class__)
1517 cls = repoview.newtype(self.unfiltered().__class__)
1520 return cls(self, name, visibilityexceptions)
1518 return cls(self, name, visibilityexceptions)
1521
1519
1522 @mixedrepostorecache(
1520 @mixedrepostorecache(
1523 (b'bookmarks', b'plain'),
1521 (b'bookmarks', b'plain'),
1524 (b'bookmarks.current', b'plain'),
1522 (b'bookmarks.current', b'plain'),
1525 (b'bookmarks', b''),
1523 (b'bookmarks', b''),
1526 (b'00changelog.i', b''),
1524 (b'00changelog.i', b''),
1527 )
1525 )
1528 def _bookmarks(self):
1526 def _bookmarks(self):
1529 # Since the multiple files involved in the transaction cannot be
1527 # Since the multiple files involved in the transaction cannot be
1530 # written atomically (with current repository format), there is a race
1528 # written atomically (with current repository format), there is a race
1531 # condition here.
1529 # condition here.
1532 #
1530 #
1533 # 1) changelog content A is read
1531 # 1) changelog content A is read
1534 # 2) outside transaction update changelog to content B
1532 # 2) outside transaction update changelog to content B
1535 # 3) outside transaction update bookmark file referring to content B
1533 # 3) outside transaction update bookmark file referring to content B
1536 # 4) bookmarks file content is read and filtered against changelog-A
1534 # 4) bookmarks file content is read and filtered against changelog-A
1537 #
1535 #
1538 # When this happens, bookmarks against nodes missing from A are dropped.
1536 # When this happens, bookmarks against nodes missing from A are dropped.
1539 #
1537 #
1540 # Having this happening during read is not great, but it become worse
1538 # Having this happening during read is not great, but it become worse
1541 # when this happen during write because the bookmarks to the "unknown"
1539 # when this happen during write because the bookmarks to the "unknown"
1542 # nodes will be dropped for good. However, writes happen within locks.
1540 # nodes will be dropped for good. However, writes happen within locks.
1543 # This locking makes it possible to have a race free consistent read.
1541 # This locking makes it possible to have a race free consistent read.
1544 # For this purpose data read from disc before locking are
1542 # For this purpose data read from disc before locking are
1545 # "invalidated" right after the locks are taken. This invalidations are
1543 # "invalidated" right after the locks are taken. This invalidations are
1546 # "light", the `filecache` mechanism keep the data in memory and will
1544 # "light", the `filecache` mechanism keep the data in memory and will
1547 # reuse them if the underlying files did not changed. Not parsing the
1545 # reuse them if the underlying files did not changed. Not parsing the
1548 # same data multiple times helps performances.
1546 # same data multiple times helps performances.
1549 #
1547 #
1550 # Unfortunately in the case describe above, the files tracked by the
1548 # Unfortunately in the case describe above, the files tracked by the
1551 # bookmarks file cache might not have changed, but the in-memory
1549 # bookmarks file cache might not have changed, but the in-memory
1552 # content is still "wrong" because we used an older changelog content
1550 # content is still "wrong" because we used an older changelog content
1553 # to process the on-disk data. So after locking, the changelog would be
1551 # to process the on-disk data. So after locking, the changelog would be
1554 # refreshed but `_bookmarks` would be preserved.
1552 # refreshed but `_bookmarks` would be preserved.
1555 # Adding `00changelog.i` to the list of tracked file is not
1553 # Adding `00changelog.i` to the list of tracked file is not
1556 # enough, because at the time we build the content for `_bookmarks` in
1554 # enough, because at the time we build the content for `_bookmarks` in
1557 # (4), the changelog file has already diverged from the content used
1555 # (4), the changelog file has already diverged from the content used
1558 # for loading `changelog` in (1)
1556 # for loading `changelog` in (1)
1559 #
1557 #
1560 # To prevent the issue, we force the changelog to be explicitly
1558 # To prevent the issue, we force the changelog to be explicitly
1561 # reloaded while computing `_bookmarks`. The data race can still happen
1559 # reloaded while computing `_bookmarks`. The data race can still happen
1562 # without the lock (with a narrower window), but it would no longer go
1560 # without the lock (with a narrower window), but it would no longer go
1563 # undetected during the lock time refresh.
1561 # undetected during the lock time refresh.
1564 #
1562 #
1565 # The new schedule is as follow
1563 # The new schedule is as follow
1566 #
1564 #
1567 # 1) filecache logic detect that `_bookmarks` needs to be computed
1565 # 1) filecache logic detect that `_bookmarks` needs to be computed
1568 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1566 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1569 # 3) We force `changelog` filecache to be tested
1567 # 3) We force `changelog` filecache to be tested
1570 # 4) cachestat for `changelog` are captured (for changelog)
1568 # 4) cachestat for `changelog` are captured (for changelog)
1571 # 5) `_bookmarks` is computed and cached
1569 # 5) `_bookmarks` is computed and cached
1572 #
1570 #
1573 # The step in (3) ensure we have a changelog at least as recent as the
1571 # The step in (3) ensure we have a changelog at least as recent as the
1574 # cache stat computed in (1). As a result at locking time:
1572 # cache stat computed in (1). As a result at locking time:
1575 # * if the changelog did not changed since (1) -> we can reuse the data
1573 # * if the changelog did not changed since (1) -> we can reuse the data
1576 # * otherwise -> the bookmarks get refreshed.
1574 # * otherwise -> the bookmarks get refreshed.
1577 self._refreshchangelog()
1575 self._refreshchangelog()
1578 return bookmarks.bmstore(self)
1576 return bookmarks.bmstore(self)
1579
1577
1580 def _refreshchangelog(self):
1578 def _refreshchangelog(self):
1581 """make sure the in memory changelog match the on-disk one"""
1579 """make sure the in memory changelog match the on-disk one"""
1582 if 'changelog' in vars(self) and self.currenttransaction() is None:
1580 if 'changelog' in vars(self) and self.currenttransaction() is None:
1583 del self.changelog
1581 del self.changelog
1584
1582
1585 @property
1583 @property
1586 def _activebookmark(self):
1584 def _activebookmark(self):
1587 return self._bookmarks.active
1585 return self._bookmarks.active
1588
1586
1589 # _phasesets depend on changelog. what we need is to call
1587 # _phasesets depend on changelog. what we need is to call
1590 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1588 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1591 # can't be easily expressed in filecache mechanism.
1589 # can't be easily expressed in filecache mechanism.
1592 @storecache(b'phaseroots', b'00changelog.i')
1590 @storecache(b'phaseroots', b'00changelog.i')
1593 def _phasecache(self):
1591 def _phasecache(self):
1594 return phases.phasecache(self, self._phasedefaults)
1592 return phases.phasecache(self, self._phasedefaults)
1595
1593
1596 @storecache(b'obsstore')
1594 @storecache(b'obsstore')
1597 def obsstore(self):
1595 def obsstore(self):
1598 return obsolete.makestore(self.ui, self)
1596 return obsolete.makestore(self.ui, self)
1599
1597
1600 @storecache(b'00changelog.i')
1598 @storecache(b'00changelog.i')
1601 def changelog(self):
1599 def changelog(self):
1602 # load dirstate before changelog to avoid race see issue6303
1600 # load dirstate before changelog to avoid race see issue6303
1603 self.dirstate.prefetch_parents()
1601 self.dirstate.prefetch_parents()
1604 return self.store.changelog(txnutil.mayhavepending(self.root))
1602 return self.store.changelog(txnutil.mayhavepending(self.root))
1605
1603
1606 @storecache(b'00manifest.i')
1604 @storecache(b'00manifest.i')
1607 def manifestlog(self):
1605 def manifestlog(self):
1608 return self.store.manifestlog(self, self._storenarrowmatch)
1606 return self.store.manifestlog(self, self._storenarrowmatch)
1609
1607
1610 @repofilecache(b'dirstate')
1608 @repofilecache(b'dirstate')
1611 def dirstate(self):
1609 def dirstate(self):
1612 return self._makedirstate()
1610 return self._makedirstate()
1613
1611
1614 def _makedirstate(self):
1612 def _makedirstate(self):
1615 """Extension point for wrapping the dirstate per-repo."""
1613 """Extension point for wrapping the dirstate per-repo."""
1616 sparsematchfn = lambda: sparse.matcher(self)
1614 sparsematchfn = lambda: sparse.matcher(self)
1617
1615
1618 return dirstate.dirstate(
1616 return dirstate.dirstate(
1619 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1617 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1620 )
1618 )
1621
1619
1622 def _dirstatevalidate(self, node):
1620 def _dirstatevalidate(self, node):
1623 try:
1621 try:
1624 self.changelog.rev(node)
1622 self.changelog.rev(node)
1625 return node
1623 return node
1626 except error.LookupError:
1624 except error.LookupError:
1627 if not self._dirstatevalidatewarned:
1625 if not self._dirstatevalidatewarned:
1628 self._dirstatevalidatewarned = True
1626 self._dirstatevalidatewarned = True
1629 self.ui.warn(
1627 self.ui.warn(
1630 _(b"warning: ignoring unknown working parent %s!\n")
1628 _(b"warning: ignoring unknown working parent %s!\n")
1631 % short(node)
1629 % short(node)
1632 )
1630 )
1633 return nullid
1631 return nullid
1634
1632
1635 @storecache(narrowspec.FILENAME)
1633 @storecache(narrowspec.FILENAME)
1636 def narrowpats(self):
1634 def narrowpats(self):
1637 """matcher patterns for this repository's narrowspec
1635 """matcher patterns for this repository's narrowspec
1638
1636
1639 A tuple of (includes, excludes).
1637 A tuple of (includes, excludes).
1640 """
1638 """
1641 return narrowspec.load(self)
1639 return narrowspec.load(self)
1642
1640
1643 @storecache(narrowspec.FILENAME)
1641 @storecache(narrowspec.FILENAME)
1644 def _storenarrowmatch(self):
1642 def _storenarrowmatch(self):
1645 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1643 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1646 return matchmod.always()
1644 return matchmod.always()
1647 include, exclude = self.narrowpats
1645 include, exclude = self.narrowpats
1648 return narrowspec.match(self.root, include=include, exclude=exclude)
1646 return narrowspec.match(self.root, include=include, exclude=exclude)
1649
1647
1650 @storecache(narrowspec.FILENAME)
1648 @storecache(narrowspec.FILENAME)
1651 def _narrowmatch(self):
1649 def _narrowmatch(self):
1652 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1650 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1653 return matchmod.always()
1651 return matchmod.always()
1654 narrowspec.checkworkingcopynarrowspec(self)
1652 narrowspec.checkworkingcopynarrowspec(self)
1655 include, exclude = self.narrowpats
1653 include, exclude = self.narrowpats
1656 return narrowspec.match(self.root, include=include, exclude=exclude)
1654 return narrowspec.match(self.root, include=include, exclude=exclude)
1657
1655
1658 def narrowmatch(self, match=None, includeexact=False):
1656 def narrowmatch(self, match=None, includeexact=False):
1659 """matcher corresponding the the repo's narrowspec
1657 """matcher corresponding the the repo's narrowspec
1660
1658
1661 If `match` is given, then that will be intersected with the narrow
1659 If `match` is given, then that will be intersected with the narrow
1662 matcher.
1660 matcher.
1663
1661
1664 If `includeexact` is True, then any exact matches from `match` will
1662 If `includeexact` is True, then any exact matches from `match` will
1665 be included even if they're outside the narrowspec.
1663 be included even if they're outside the narrowspec.
1666 """
1664 """
1667 if match:
1665 if match:
1668 if includeexact and not self._narrowmatch.always():
1666 if includeexact and not self._narrowmatch.always():
1669 # do not exclude explicitly-specified paths so that they can
1667 # do not exclude explicitly-specified paths so that they can
1670 # be warned later on
1668 # be warned later on
1671 em = matchmod.exact(match.files())
1669 em = matchmod.exact(match.files())
1672 nm = matchmod.unionmatcher([self._narrowmatch, em])
1670 nm = matchmod.unionmatcher([self._narrowmatch, em])
1673 return matchmod.intersectmatchers(match, nm)
1671 return matchmod.intersectmatchers(match, nm)
1674 return matchmod.intersectmatchers(match, self._narrowmatch)
1672 return matchmod.intersectmatchers(match, self._narrowmatch)
1675 return self._narrowmatch
1673 return self._narrowmatch
1676
1674
1677 def setnarrowpats(self, newincludes, newexcludes):
1675 def setnarrowpats(self, newincludes, newexcludes):
1678 narrowspec.save(self, newincludes, newexcludes)
1676 narrowspec.save(self, newincludes, newexcludes)
1679 self.invalidate(clearfilecache=True)
1677 self.invalidate(clearfilecache=True)
1680
1678
1681 @unfilteredpropertycache
1679 @unfilteredpropertycache
1682 def _quick_access_changeid_null(self):
1680 def _quick_access_changeid_null(self):
1683 return {
1681 return {
1684 b'null': (nullrev, nullid),
1682 b'null': (nullrev, nullid),
1685 nullrev: (nullrev, nullid),
1683 nullrev: (nullrev, nullid),
1686 nullid: (nullrev, nullid),
1684 nullid: (nullrev, nullid),
1687 }
1685 }
1688
1686
1689 @unfilteredpropertycache
1687 @unfilteredpropertycache
1690 def _quick_access_changeid_wc(self):
1688 def _quick_access_changeid_wc(self):
1691 # also fast path access to the working copy parents
1689 # also fast path access to the working copy parents
1692 # however, only do it for filter that ensure wc is visible.
1690 # however, only do it for filter that ensure wc is visible.
1693 quick = self._quick_access_changeid_null.copy()
1691 quick = self._quick_access_changeid_null.copy()
1694 cl = self.unfiltered().changelog
1692 cl = self.unfiltered().changelog
1695 for node in self.dirstate.parents():
1693 for node in self.dirstate.parents():
1696 if node == nullid:
1694 if node == nullid:
1697 continue
1695 continue
1698 rev = cl.index.get_rev(node)
1696 rev = cl.index.get_rev(node)
1699 if rev is None:
1697 if rev is None:
1700 # unknown working copy parent case:
1698 # unknown working copy parent case:
1701 #
1699 #
1702 # skip the fast path and let higher code deal with it
1700 # skip the fast path and let higher code deal with it
1703 continue
1701 continue
1704 pair = (rev, node)
1702 pair = (rev, node)
1705 quick[rev] = pair
1703 quick[rev] = pair
1706 quick[node] = pair
1704 quick[node] = pair
1707 # also add the parents of the parents
1705 # also add the parents of the parents
1708 for r in cl.parentrevs(rev):
1706 for r in cl.parentrevs(rev):
1709 if r == nullrev:
1707 if r == nullrev:
1710 continue
1708 continue
1711 n = cl.node(r)
1709 n = cl.node(r)
1712 pair = (r, n)
1710 pair = (r, n)
1713 quick[r] = pair
1711 quick[r] = pair
1714 quick[n] = pair
1712 quick[n] = pair
1715 p1node = self.dirstate.p1()
1713 p1node = self.dirstate.p1()
1716 if p1node != nullid:
1714 if p1node != nullid:
1717 quick[b'.'] = quick[p1node]
1715 quick[b'.'] = quick[p1node]
1718 return quick
1716 return quick
1719
1717
1720 @unfilteredmethod
1718 @unfilteredmethod
1721 def _quick_access_changeid_invalidate(self):
1719 def _quick_access_changeid_invalidate(self):
1722 if '_quick_access_changeid_wc' in vars(self):
1720 if '_quick_access_changeid_wc' in vars(self):
1723 del self.__dict__['_quick_access_changeid_wc']
1721 del self.__dict__['_quick_access_changeid_wc']
1724
1722
1725 @property
1723 @property
1726 def _quick_access_changeid(self):
1724 def _quick_access_changeid(self):
1727 """an helper dictionnary for __getitem__ calls
1725 """an helper dictionnary for __getitem__ calls
1728
1726
1729 This contains a list of symbol we can recognise right away without
1727 This contains a list of symbol we can recognise right away without
1730 further processing.
1728 further processing.
1731 """
1729 """
1732 if self.filtername in repoview.filter_has_wc:
1730 if self.filtername in repoview.filter_has_wc:
1733 return self._quick_access_changeid_wc
1731 return self._quick_access_changeid_wc
1734 return self._quick_access_changeid_null
1732 return self._quick_access_changeid_null
1735
1733
1736 def __getitem__(self, changeid):
1734 def __getitem__(self, changeid):
1737 # dealing with special cases
1735 # dealing with special cases
1738 if changeid is None:
1736 if changeid is None:
1739 return context.workingctx(self)
1737 return context.workingctx(self)
1740 if isinstance(changeid, context.basectx):
1738 if isinstance(changeid, context.basectx):
1741 return changeid
1739 return changeid
1742
1740
1743 # dealing with multiple revisions
1741 # dealing with multiple revisions
1744 if isinstance(changeid, slice):
1742 if isinstance(changeid, slice):
1745 # wdirrev isn't contiguous so the slice shouldn't include it
1743 # wdirrev isn't contiguous so the slice shouldn't include it
1746 return [
1744 return [
1747 self[i]
1745 self[i]
1748 for i in pycompat.xrange(*changeid.indices(len(self)))
1746 for i in pycompat.xrange(*changeid.indices(len(self)))
1749 if i not in self.changelog.filteredrevs
1747 if i not in self.changelog.filteredrevs
1750 ]
1748 ]
1751
1749
1752 # dealing with some special values
1750 # dealing with some special values
1753 quick_access = self._quick_access_changeid.get(changeid)
1751 quick_access = self._quick_access_changeid.get(changeid)
1754 if quick_access is not None:
1752 if quick_access is not None:
1755 rev, node = quick_access
1753 rev, node = quick_access
1756 return context.changectx(self, rev, node, maybe_filtered=False)
1754 return context.changectx(self, rev, node, maybe_filtered=False)
1757 if changeid == b'tip':
1755 if changeid == b'tip':
1758 node = self.changelog.tip()
1756 node = self.changelog.tip()
1759 rev = self.changelog.rev(node)
1757 rev = self.changelog.rev(node)
1760 return context.changectx(self, rev, node)
1758 return context.changectx(self, rev, node)
1761
1759
1762 # dealing with arbitrary values
1760 # dealing with arbitrary values
1763 try:
1761 try:
1764 if isinstance(changeid, int):
1762 if isinstance(changeid, int):
1765 node = self.changelog.node(changeid)
1763 node = self.changelog.node(changeid)
1766 rev = changeid
1764 rev = changeid
1767 elif changeid == b'.':
1765 elif changeid == b'.':
1768 # this is a hack to delay/avoid loading obsmarkers
1766 # this is a hack to delay/avoid loading obsmarkers
1769 # when we know that '.' won't be hidden
1767 # when we know that '.' won't be hidden
1770 node = self.dirstate.p1()
1768 node = self.dirstate.p1()
1771 rev = self.unfiltered().changelog.rev(node)
1769 rev = self.unfiltered().changelog.rev(node)
1772 elif len(changeid) == 20:
1770 elif len(changeid) == 20:
1773 try:
1771 try:
1774 node = changeid
1772 node = changeid
1775 rev = self.changelog.rev(changeid)
1773 rev = self.changelog.rev(changeid)
1776 except error.FilteredLookupError:
1774 except error.FilteredLookupError:
1777 changeid = hex(changeid) # for the error message
1775 changeid = hex(changeid) # for the error message
1778 raise
1776 raise
1779 except LookupError:
1777 except LookupError:
1780 # check if it might have come from damaged dirstate
1778 # check if it might have come from damaged dirstate
1781 #
1779 #
1782 # XXX we could avoid the unfiltered if we had a recognizable
1780 # XXX we could avoid the unfiltered if we had a recognizable
1783 # exception for filtered changeset access
1781 # exception for filtered changeset access
1784 if (
1782 if (
1785 self.local()
1783 self.local()
1786 and changeid in self.unfiltered().dirstate.parents()
1784 and changeid in self.unfiltered().dirstate.parents()
1787 ):
1785 ):
1788 msg = _(b"working directory has unknown parent '%s'!")
1786 msg = _(b"working directory has unknown parent '%s'!")
1789 raise error.Abort(msg % short(changeid))
1787 raise error.Abort(msg % short(changeid))
1790 changeid = hex(changeid) # for the error message
1788 changeid = hex(changeid) # for the error message
1791 raise
1789 raise
1792
1790
1793 elif len(changeid) == 40:
1791 elif len(changeid) == 40:
1794 node = bin(changeid)
1792 node = bin(changeid)
1795 rev = self.changelog.rev(node)
1793 rev = self.changelog.rev(node)
1796 else:
1794 else:
1797 raise error.ProgrammingError(
1795 raise error.ProgrammingError(
1798 b"unsupported changeid '%s' of type %s"
1796 b"unsupported changeid '%s' of type %s"
1799 % (changeid, pycompat.bytestr(type(changeid)))
1797 % (changeid, pycompat.bytestr(type(changeid)))
1800 )
1798 )
1801
1799
1802 return context.changectx(self, rev, node)
1800 return context.changectx(self, rev, node)
1803
1801
1804 except (error.FilteredIndexError, error.FilteredLookupError):
1802 except (error.FilteredIndexError, error.FilteredLookupError):
1805 raise error.FilteredRepoLookupError(
1803 raise error.FilteredRepoLookupError(
1806 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1804 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1807 )
1805 )
1808 except (IndexError, LookupError):
1806 except (IndexError, LookupError):
1809 raise error.RepoLookupError(
1807 raise error.RepoLookupError(
1810 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1808 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1811 )
1809 )
1812 except error.WdirUnsupported:
1810 except error.WdirUnsupported:
1813 return context.workingctx(self)
1811 return context.workingctx(self)
1814
1812
1815 def __contains__(self, changeid):
1813 def __contains__(self, changeid):
1816 """True if the given changeid exists"""
1814 """True if the given changeid exists"""
1817 try:
1815 try:
1818 self[changeid]
1816 self[changeid]
1819 return True
1817 return True
1820 except error.RepoLookupError:
1818 except error.RepoLookupError:
1821 return False
1819 return False
1822
1820
1823 def __nonzero__(self):
1821 def __nonzero__(self):
1824 return True
1822 return True
1825
1823
1826 __bool__ = __nonzero__
1824 __bool__ = __nonzero__
1827
1825
1828 def __len__(self):
1826 def __len__(self):
1829 # no need to pay the cost of repoview.changelog
1827 # no need to pay the cost of repoview.changelog
1830 unfi = self.unfiltered()
1828 unfi = self.unfiltered()
1831 return len(unfi.changelog)
1829 return len(unfi.changelog)
1832
1830
1833 def __iter__(self):
1831 def __iter__(self):
1834 return iter(self.changelog)
1832 return iter(self.changelog)
1835
1833
1836 def revs(self, expr, *args):
1834 def revs(self, expr, *args):
1837 """Find revisions matching a revset.
1835 """Find revisions matching a revset.
1838
1836
1839 The revset is specified as a string ``expr`` that may contain
1837 The revset is specified as a string ``expr`` that may contain
1840 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1838 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1841
1839
1842 Revset aliases from the configuration are not expanded. To expand
1840 Revset aliases from the configuration are not expanded. To expand
1843 user aliases, consider calling ``scmutil.revrange()`` or
1841 user aliases, consider calling ``scmutil.revrange()`` or
1844 ``repo.anyrevs([expr], user=True)``.
1842 ``repo.anyrevs([expr], user=True)``.
1845
1843
1846 Returns a smartset.abstractsmartset, which is a list-like interface
1844 Returns a smartset.abstractsmartset, which is a list-like interface
1847 that contains integer revisions.
1845 that contains integer revisions.
1848 """
1846 """
1849 tree = revsetlang.spectree(expr, *args)
1847 tree = revsetlang.spectree(expr, *args)
1850 return revset.makematcher(tree)(self)
1848 return revset.makematcher(tree)(self)
1851
1849
1852 def set(self, expr, *args):
1850 def set(self, expr, *args):
1853 """Find revisions matching a revset and emit changectx instances.
1851 """Find revisions matching a revset and emit changectx instances.
1854
1852
1855 This is a convenience wrapper around ``revs()`` that iterates the
1853 This is a convenience wrapper around ``revs()`` that iterates the
1856 result and is a generator of changectx instances.
1854 result and is a generator of changectx instances.
1857
1855
1858 Revset aliases from the configuration are not expanded. To expand
1856 Revset aliases from the configuration are not expanded. To expand
1859 user aliases, consider calling ``scmutil.revrange()``.
1857 user aliases, consider calling ``scmutil.revrange()``.
1860 """
1858 """
1861 for r in self.revs(expr, *args):
1859 for r in self.revs(expr, *args):
1862 yield self[r]
1860 yield self[r]
1863
1861
1864 def anyrevs(self, specs, user=False, localalias=None):
1862 def anyrevs(self, specs, user=False, localalias=None):
1865 """Find revisions matching one of the given revsets.
1863 """Find revisions matching one of the given revsets.
1866
1864
1867 Revset aliases from the configuration are not expanded by default. To
1865 Revset aliases from the configuration are not expanded by default. To
1868 expand user aliases, specify ``user=True``. To provide some local
1866 expand user aliases, specify ``user=True``. To provide some local
1869 definitions overriding user aliases, set ``localalias`` to
1867 definitions overriding user aliases, set ``localalias`` to
1870 ``{name: definitionstring}``.
1868 ``{name: definitionstring}``.
1871 """
1869 """
1872 if specs == [b'null']:
1870 if specs == [b'null']:
1873 return revset.baseset([nullrev])
1871 return revset.baseset([nullrev])
1874 if specs == [b'.']:
1872 if specs == [b'.']:
1875 quick_data = self._quick_access_changeid.get(b'.')
1873 quick_data = self._quick_access_changeid.get(b'.')
1876 if quick_data is not None:
1874 if quick_data is not None:
1877 return revset.baseset([quick_data[0]])
1875 return revset.baseset([quick_data[0]])
1878 if user:
1876 if user:
1879 m = revset.matchany(
1877 m = revset.matchany(
1880 self.ui,
1878 self.ui,
1881 specs,
1879 specs,
1882 lookup=revset.lookupfn(self),
1880 lookup=revset.lookupfn(self),
1883 localalias=localalias,
1881 localalias=localalias,
1884 )
1882 )
1885 else:
1883 else:
1886 m = revset.matchany(None, specs, localalias=localalias)
1884 m = revset.matchany(None, specs, localalias=localalias)
1887 return m(self)
1885 return m(self)
1888
1886
1889 def url(self):
1887 def url(self):
1890 return b'file:' + self.root
1888 return b'file:' + self.root
1891
1889
1892 def hook(self, name, throw=False, **args):
1890 def hook(self, name, throw=False, **args):
1893 """Call a hook, passing this repo instance.
1891 """Call a hook, passing this repo instance.
1894
1892
1895 This a convenience method to aid invoking hooks. Extensions likely
1893 This a convenience method to aid invoking hooks. Extensions likely
1896 won't call this unless they have registered a custom hook or are
1894 won't call this unless they have registered a custom hook or are
1897 replacing code that is expected to call a hook.
1895 replacing code that is expected to call a hook.
1898 """
1896 """
1899 return hook.hook(self.ui, self, name, throw, **args)
1897 return hook.hook(self.ui, self, name, throw, **args)
1900
1898
1901 @filteredpropertycache
1899 @filteredpropertycache
1902 def _tagscache(self):
1900 def _tagscache(self):
1903 """Returns a tagscache object that contains various tags related
1901 """Returns a tagscache object that contains various tags related
1904 caches."""
1902 caches."""
1905
1903
1906 # This simplifies its cache management by having one decorated
1904 # This simplifies its cache management by having one decorated
1907 # function (this one) and the rest simply fetch things from it.
1905 # function (this one) and the rest simply fetch things from it.
1908 class tagscache(object):
1906 class tagscache(object):
1909 def __init__(self):
1907 def __init__(self):
1910 # These two define the set of tags for this repository. tags
1908 # These two define the set of tags for this repository. tags
1911 # maps tag name to node; tagtypes maps tag name to 'global' or
1909 # maps tag name to node; tagtypes maps tag name to 'global' or
1912 # 'local'. (Global tags are defined by .hgtags across all
1910 # 'local'. (Global tags are defined by .hgtags across all
1913 # heads, and local tags are defined in .hg/localtags.)
1911 # heads, and local tags are defined in .hg/localtags.)
1914 # They constitute the in-memory cache of tags.
1912 # They constitute the in-memory cache of tags.
1915 self.tags = self.tagtypes = None
1913 self.tags = self.tagtypes = None
1916
1914
1917 self.nodetagscache = self.tagslist = None
1915 self.nodetagscache = self.tagslist = None
1918
1916
1919 cache = tagscache()
1917 cache = tagscache()
1920 cache.tags, cache.tagtypes = self._findtags()
1918 cache.tags, cache.tagtypes = self._findtags()
1921
1919
1922 return cache
1920 return cache
1923
1921
1924 def tags(self):
1922 def tags(self):
1925 '''return a mapping of tag to node'''
1923 '''return a mapping of tag to node'''
1926 t = {}
1924 t = {}
1927 if self.changelog.filteredrevs:
1925 if self.changelog.filteredrevs:
1928 tags, tt = self._findtags()
1926 tags, tt = self._findtags()
1929 else:
1927 else:
1930 tags = self._tagscache.tags
1928 tags = self._tagscache.tags
1931 rev = self.changelog.rev
1929 rev = self.changelog.rev
1932 for k, v in pycompat.iteritems(tags):
1930 for k, v in pycompat.iteritems(tags):
1933 try:
1931 try:
1934 # ignore tags to unknown nodes
1932 # ignore tags to unknown nodes
1935 rev(v)
1933 rev(v)
1936 t[k] = v
1934 t[k] = v
1937 except (error.LookupError, ValueError):
1935 except (error.LookupError, ValueError):
1938 pass
1936 pass
1939 return t
1937 return t
1940
1938
1941 def _findtags(self):
1939 def _findtags(self):
1942 """Do the hard work of finding tags. Return a pair of dicts
1940 """Do the hard work of finding tags. Return a pair of dicts
1943 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1941 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1944 maps tag name to a string like \'global\' or \'local\'.
1942 maps tag name to a string like \'global\' or \'local\'.
1945 Subclasses or extensions are free to add their own tags, but
1943 Subclasses or extensions are free to add their own tags, but
1946 should be aware that the returned dicts will be retained for the
1944 should be aware that the returned dicts will be retained for the
1947 duration of the localrepo object."""
1945 duration of the localrepo object."""
1948
1946
1949 # XXX what tagtype should subclasses/extensions use? Currently
1947 # XXX what tagtype should subclasses/extensions use? Currently
1950 # mq and bookmarks add tags, but do not set the tagtype at all.
1948 # mq and bookmarks add tags, but do not set the tagtype at all.
1951 # Should each extension invent its own tag type? Should there
1949 # Should each extension invent its own tag type? Should there
1952 # be one tagtype for all such "virtual" tags? Or is the status
1950 # be one tagtype for all such "virtual" tags? Or is the status
1953 # quo fine?
1951 # quo fine?
1954
1952
1955 # map tag name to (node, hist)
1953 # map tag name to (node, hist)
1956 alltags = tagsmod.findglobaltags(self.ui, self)
1954 alltags = tagsmod.findglobaltags(self.ui, self)
1957 # map tag name to tag type
1955 # map tag name to tag type
1958 tagtypes = {tag: b'global' for tag in alltags}
1956 tagtypes = {tag: b'global' for tag in alltags}
1959
1957
1960 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1958 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1961
1959
1962 # Build the return dicts. Have to re-encode tag names because
1960 # Build the return dicts. Have to re-encode tag names because
1963 # the tags module always uses UTF-8 (in order not to lose info
1961 # the tags module always uses UTF-8 (in order not to lose info
1964 # writing to the cache), but the rest of Mercurial wants them in
1962 # writing to the cache), but the rest of Mercurial wants them in
1965 # local encoding.
1963 # local encoding.
1966 tags = {}
1964 tags = {}
1967 for (name, (node, hist)) in pycompat.iteritems(alltags):
1965 for (name, (node, hist)) in pycompat.iteritems(alltags):
1968 if node != nullid:
1966 if node != nullid:
1969 tags[encoding.tolocal(name)] = node
1967 tags[encoding.tolocal(name)] = node
1970 tags[b'tip'] = self.changelog.tip()
1968 tags[b'tip'] = self.changelog.tip()
1971 tagtypes = {
1969 tagtypes = {
1972 encoding.tolocal(name): value
1970 encoding.tolocal(name): value
1973 for (name, value) in pycompat.iteritems(tagtypes)
1971 for (name, value) in pycompat.iteritems(tagtypes)
1974 }
1972 }
1975 return (tags, tagtypes)
1973 return (tags, tagtypes)
1976
1974
1977 def tagtype(self, tagname):
1975 def tagtype(self, tagname):
1978 """
1976 """
1979 return the type of the given tag. result can be:
1977 return the type of the given tag. result can be:
1980
1978
1981 'local' : a local tag
1979 'local' : a local tag
1982 'global' : a global tag
1980 'global' : a global tag
1983 None : tag does not exist
1981 None : tag does not exist
1984 """
1982 """
1985
1983
1986 return self._tagscache.tagtypes.get(tagname)
1984 return self._tagscache.tagtypes.get(tagname)
1987
1985
1988 def tagslist(self):
1986 def tagslist(self):
1989 '''return a list of tags ordered by revision'''
1987 '''return a list of tags ordered by revision'''
1990 if not self._tagscache.tagslist:
1988 if not self._tagscache.tagslist:
1991 l = []
1989 l = []
1992 for t, n in pycompat.iteritems(self.tags()):
1990 for t, n in pycompat.iteritems(self.tags()):
1993 l.append((self.changelog.rev(n), t, n))
1991 l.append((self.changelog.rev(n), t, n))
1994 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1992 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1995
1993
1996 return self._tagscache.tagslist
1994 return self._tagscache.tagslist
1997
1995
1998 def nodetags(self, node):
1996 def nodetags(self, node):
1999 '''return the tags associated with a node'''
1997 '''return the tags associated with a node'''
2000 if not self._tagscache.nodetagscache:
1998 if not self._tagscache.nodetagscache:
2001 nodetagscache = {}
1999 nodetagscache = {}
2002 for t, n in pycompat.iteritems(self._tagscache.tags):
2000 for t, n in pycompat.iteritems(self._tagscache.tags):
2003 nodetagscache.setdefault(n, []).append(t)
2001 nodetagscache.setdefault(n, []).append(t)
2004 for tags in pycompat.itervalues(nodetagscache):
2002 for tags in pycompat.itervalues(nodetagscache):
2005 tags.sort()
2003 tags.sort()
2006 self._tagscache.nodetagscache = nodetagscache
2004 self._tagscache.nodetagscache = nodetagscache
2007 return self._tagscache.nodetagscache.get(node, [])
2005 return self._tagscache.nodetagscache.get(node, [])
2008
2006
2009 def nodebookmarks(self, node):
2007 def nodebookmarks(self, node):
2010 """return the list of bookmarks pointing to the specified node"""
2008 """return the list of bookmarks pointing to the specified node"""
2011 return self._bookmarks.names(node)
2009 return self._bookmarks.names(node)
2012
2010
2013 def branchmap(self):
2011 def branchmap(self):
2014 """returns a dictionary {branch: [branchheads]} with branchheads
2012 """returns a dictionary {branch: [branchheads]} with branchheads
2015 ordered by increasing revision number"""
2013 ordered by increasing revision number"""
2016 return self._branchcaches[self]
2014 return self._branchcaches[self]
2017
2015
2018 @unfilteredmethod
2016 @unfilteredmethod
2019 def revbranchcache(self):
2017 def revbranchcache(self):
2020 if not self._revbranchcache:
2018 if not self._revbranchcache:
2021 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2019 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2022 return self._revbranchcache
2020 return self._revbranchcache
2023
2021
2024 def branchtip(self, branch, ignoremissing=False):
2022 def branchtip(self, branch, ignoremissing=False):
2025 """return the tip node for a given branch
2023 """return the tip node for a given branch
2026
2024
2027 If ignoremissing is True, then this method will not raise an error.
2025 If ignoremissing is True, then this method will not raise an error.
2028 This is helpful for callers that only expect None for a missing branch
2026 This is helpful for callers that only expect None for a missing branch
2029 (e.g. namespace).
2027 (e.g. namespace).
2030
2028
2031 """
2029 """
2032 try:
2030 try:
2033 return self.branchmap().branchtip(branch)
2031 return self.branchmap().branchtip(branch)
2034 except KeyError:
2032 except KeyError:
2035 if not ignoremissing:
2033 if not ignoremissing:
2036 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2034 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2037 else:
2035 else:
2038 pass
2036 pass
2039
2037
2040 def lookup(self, key):
2038 def lookup(self, key):
2041 node = scmutil.revsymbol(self, key).node()
2039 node = scmutil.revsymbol(self, key).node()
2042 if node is None:
2040 if node is None:
2043 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2041 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2044 return node
2042 return node
2045
2043
2046 def lookupbranch(self, key):
2044 def lookupbranch(self, key):
2047 if self.branchmap().hasbranch(key):
2045 if self.branchmap().hasbranch(key):
2048 return key
2046 return key
2049
2047
2050 return scmutil.revsymbol(self, key).branch()
2048 return scmutil.revsymbol(self, key).branch()
2051
2049
2052 def known(self, nodes):
2050 def known(self, nodes):
2053 cl = self.changelog
2051 cl = self.changelog
2054 get_rev = cl.index.get_rev
2052 get_rev = cl.index.get_rev
2055 filtered = cl.filteredrevs
2053 filtered = cl.filteredrevs
2056 result = []
2054 result = []
2057 for n in nodes:
2055 for n in nodes:
2058 r = get_rev(n)
2056 r = get_rev(n)
2059 resp = not (r is None or r in filtered)
2057 resp = not (r is None or r in filtered)
2060 result.append(resp)
2058 result.append(resp)
2061 return result
2059 return result
2062
2060
2063 def local(self):
2061 def local(self):
2064 return self
2062 return self
2065
2063
2066 def publishing(self):
2064 def publishing(self):
2067 # it's safe (and desirable) to trust the publish flag unconditionally
2065 # it's safe (and desirable) to trust the publish flag unconditionally
2068 # so that we don't finalize changes shared between users via ssh or nfs
2066 # so that we don't finalize changes shared between users via ssh or nfs
2069 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2067 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2070
2068
2071 def cancopy(self):
2069 def cancopy(self):
2072 # so statichttprepo's override of local() works
2070 # so statichttprepo's override of local() works
2073 if not self.local():
2071 if not self.local():
2074 return False
2072 return False
2075 if not self.publishing():
2073 if not self.publishing():
2076 return True
2074 return True
2077 # if publishing we can't copy if there is filtered content
2075 # if publishing we can't copy if there is filtered content
2078 return not self.filtered(b'visible').changelog.filteredrevs
2076 return not self.filtered(b'visible').changelog.filteredrevs
2079
2077
2080 def shared(self):
2078 def shared(self):
2081 '''the type of shared repository (None if not shared)'''
2079 '''the type of shared repository (None if not shared)'''
2082 if self.sharedpath != self.path:
2080 if self.sharedpath != self.path:
2083 return b'store'
2081 return b'store'
2084 return None
2082 return None
2085
2083
2086 def wjoin(self, f, *insidef):
2084 def wjoin(self, f, *insidef):
2087 return self.vfs.reljoin(self.root, f, *insidef)
2085 return self.vfs.reljoin(self.root, f, *insidef)
2088
2086
2089 def setparents(self, p1, p2=nullid):
2087 def setparents(self, p1, p2=nullid):
2090 self[None].setparents(p1, p2)
2088 self[None].setparents(p1, p2)
2091 self._quick_access_changeid_invalidate()
2089 self._quick_access_changeid_invalidate()
2092
2090
2093 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2091 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2094 """changeid must be a changeset revision, if specified.
2092 """changeid must be a changeset revision, if specified.
2095 fileid can be a file revision or node."""
2093 fileid can be a file revision or node."""
2096 return context.filectx(
2094 return context.filectx(
2097 self, path, changeid, fileid, changectx=changectx
2095 self, path, changeid, fileid, changectx=changectx
2098 )
2096 )
2099
2097
2100 def getcwd(self):
2098 def getcwd(self):
2101 return self.dirstate.getcwd()
2099 return self.dirstate.getcwd()
2102
2100
2103 def pathto(self, f, cwd=None):
2101 def pathto(self, f, cwd=None):
2104 return self.dirstate.pathto(f, cwd)
2102 return self.dirstate.pathto(f, cwd)
2105
2103
2106 def _loadfilter(self, filter):
2104 def _loadfilter(self, filter):
2107 if filter not in self._filterpats:
2105 if filter not in self._filterpats:
2108 l = []
2106 l = []
2109 for pat, cmd in self.ui.configitems(filter):
2107 for pat, cmd in self.ui.configitems(filter):
2110 if cmd == b'!':
2108 if cmd == b'!':
2111 continue
2109 continue
2112 mf = matchmod.match(self.root, b'', [pat])
2110 mf = matchmod.match(self.root, b'', [pat])
2113 fn = None
2111 fn = None
2114 params = cmd
2112 params = cmd
2115 for name, filterfn in pycompat.iteritems(self._datafilters):
2113 for name, filterfn in pycompat.iteritems(self._datafilters):
2116 if cmd.startswith(name):
2114 if cmd.startswith(name):
2117 fn = filterfn
2115 fn = filterfn
2118 params = cmd[len(name) :].lstrip()
2116 params = cmd[len(name) :].lstrip()
2119 break
2117 break
2120 if not fn:
2118 if not fn:
2121 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2119 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2122 fn.__name__ = 'commandfilter'
2120 fn.__name__ = 'commandfilter'
2123 # Wrap old filters not supporting keyword arguments
2121 # Wrap old filters not supporting keyword arguments
2124 if not pycompat.getargspec(fn)[2]:
2122 if not pycompat.getargspec(fn)[2]:
2125 oldfn = fn
2123 oldfn = fn
2126 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2124 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2127 fn.__name__ = 'compat-' + oldfn.__name__
2125 fn.__name__ = 'compat-' + oldfn.__name__
2128 l.append((mf, fn, params))
2126 l.append((mf, fn, params))
2129 self._filterpats[filter] = l
2127 self._filterpats[filter] = l
2130 return self._filterpats[filter]
2128 return self._filterpats[filter]
2131
2129
2132 def _filter(self, filterpats, filename, data):
2130 def _filter(self, filterpats, filename, data):
2133 for mf, fn, cmd in filterpats:
2131 for mf, fn, cmd in filterpats:
2134 if mf(filename):
2132 if mf(filename):
2135 self.ui.debug(
2133 self.ui.debug(
2136 b"filtering %s through %s\n"
2134 b"filtering %s through %s\n"
2137 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2135 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2138 )
2136 )
2139 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2137 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2140 break
2138 break
2141
2139
2142 return data
2140 return data
2143
2141
2144 @unfilteredpropertycache
2142 @unfilteredpropertycache
2145 def _encodefilterpats(self):
2143 def _encodefilterpats(self):
2146 return self._loadfilter(b'encode')
2144 return self._loadfilter(b'encode')
2147
2145
2148 @unfilteredpropertycache
2146 @unfilteredpropertycache
2149 def _decodefilterpats(self):
2147 def _decodefilterpats(self):
2150 return self._loadfilter(b'decode')
2148 return self._loadfilter(b'decode')
2151
2149
2152 def adddatafilter(self, name, filter):
2150 def adddatafilter(self, name, filter):
2153 self._datafilters[name] = filter
2151 self._datafilters[name] = filter
2154
2152
2155 def wread(self, filename):
2153 def wread(self, filename):
2156 if self.wvfs.islink(filename):
2154 if self.wvfs.islink(filename):
2157 data = self.wvfs.readlink(filename)
2155 data = self.wvfs.readlink(filename)
2158 else:
2156 else:
2159 data = self.wvfs.read(filename)
2157 data = self.wvfs.read(filename)
2160 return self._filter(self._encodefilterpats, filename, data)
2158 return self._filter(self._encodefilterpats, filename, data)
2161
2159
2162 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2160 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2163 """write ``data`` into ``filename`` in the working directory
2161 """write ``data`` into ``filename`` in the working directory
2164
2162
2165 This returns length of written (maybe decoded) data.
2163 This returns length of written (maybe decoded) data.
2166 """
2164 """
2167 data = self._filter(self._decodefilterpats, filename, data)
2165 data = self._filter(self._decodefilterpats, filename, data)
2168 if b'l' in flags:
2166 if b'l' in flags:
2169 self.wvfs.symlink(data, filename)
2167 self.wvfs.symlink(data, filename)
2170 else:
2168 else:
2171 self.wvfs.write(
2169 self.wvfs.write(
2172 filename, data, backgroundclose=backgroundclose, **kwargs
2170 filename, data, backgroundclose=backgroundclose, **kwargs
2173 )
2171 )
2174 if b'x' in flags:
2172 if b'x' in flags:
2175 self.wvfs.setflags(filename, False, True)
2173 self.wvfs.setflags(filename, False, True)
2176 else:
2174 else:
2177 self.wvfs.setflags(filename, False, False)
2175 self.wvfs.setflags(filename, False, False)
2178 return len(data)
2176 return len(data)
2179
2177
2180 def wwritedata(self, filename, data):
2178 def wwritedata(self, filename, data):
2181 return self._filter(self._decodefilterpats, filename, data)
2179 return self._filter(self._decodefilterpats, filename, data)
2182
2180
2183 def currenttransaction(self):
2181 def currenttransaction(self):
2184 """return the current transaction or None if non exists"""
2182 """return the current transaction or None if non exists"""
2185 if self._transref:
2183 if self._transref:
2186 tr = self._transref()
2184 tr = self._transref()
2187 else:
2185 else:
2188 tr = None
2186 tr = None
2189
2187
2190 if tr and tr.running():
2188 if tr and tr.running():
2191 return tr
2189 return tr
2192 return None
2190 return None
2193
2191
2194 def transaction(self, desc, report=None):
2192 def transaction(self, desc, report=None):
2195 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2193 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2196 b'devel', b'check-locks'
2194 b'devel', b'check-locks'
2197 ):
2195 ):
2198 if self._currentlock(self._lockref) is None:
2196 if self._currentlock(self._lockref) is None:
2199 raise error.ProgrammingError(b'transaction requires locking')
2197 raise error.ProgrammingError(b'transaction requires locking')
2200 tr = self.currenttransaction()
2198 tr = self.currenttransaction()
2201 if tr is not None:
2199 if tr is not None:
2202 return tr.nest(name=desc)
2200 return tr.nest(name=desc)
2203
2201
2204 # abort here if the journal already exists
2202 # abort here if the journal already exists
2205 if self.svfs.exists(b"journal"):
2203 if self.svfs.exists(b"journal"):
2206 raise error.RepoError(
2204 raise error.RepoError(
2207 _(b"abandoned transaction found"),
2205 _(b"abandoned transaction found"),
2208 hint=_(b"run 'hg recover' to clean up transaction"),
2206 hint=_(b"run 'hg recover' to clean up transaction"),
2209 )
2207 )
2210
2208
2211 idbase = b"%.40f#%f" % (random.random(), time.time())
2209 idbase = b"%.40f#%f" % (random.random(), time.time())
2212 ha = hex(hashutil.sha1(idbase).digest())
2210 ha = hex(hashutil.sha1(idbase).digest())
2213 txnid = b'TXN:' + ha
2211 txnid = b'TXN:' + ha
2214 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2212 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2215
2213
2216 self._writejournal(desc)
2214 self._writejournal(desc)
2217 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2215 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2218 if report:
2216 if report:
2219 rp = report
2217 rp = report
2220 else:
2218 else:
2221 rp = self.ui.warn
2219 rp = self.ui.warn
2222 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2220 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2223 # we must avoid cyclic reference between repo and transaction.
2221 # we must avoid cyclic reference between repo and transaction.
2224 reporef = weakref.ref(self)
2222 reporef = weakref.ref(self)
2225 # Code to track tag movement
2223 # Code to track tag movement
2226 #
2224 #
2227 # Since tags are all handled as file content, it is actually quite hard
2225 # Since tags are all handled as file content, it is actually quite hard
2228 # to track these movement from a code perspective. So we fallback to a
2226 # to track these movement from a code perspective. So we fallback to a
2229 # tracking at the repository level. One could envision to track changes
2227 # tracking at the repository level. One could envision to track changes
2230 # to the '.hgtags' file through changegroup apply but that fails to
2228 # to the '.hgtags' file through changegroup apply but that fails to
2231 # cope with case where transaction expose new heads without changegroup
2229 # cope with case where transaction expose new heads without changegroup
2232 # being involved (eg: phase movement).
2230 # being involved (eg: phase movement).
2233 #
2231 #
2234 # For now, We gate the feature behind a flag since this likely comes
2232 # For now, We gate the feature behind a flag since this likely comes
2235 # with performance impacts. The current code run more often than needed
2233 # with performance impacts. The current code run more often than needed
2236 # and do not use caches as much as it could. The current focus is on
2234 # and do not use caches as much as it could. The current focus is on
2237 # the behavior of the feature so we disable it by default. The flag
2235 # the behavior of the feature so we disable it by default. The flag
2238 # will be removed when we are happy with the performance impact.
2236 # will be removed when we are happy with the performance impact.
2239 #
2237 #
2240 # Once this feature is no longer experimental move the following
2238 # Once this feature is no longer experimental move the following
2241 # documentation to the appropriate help section:
2239 # documentation to the appropriate help section:
2242 #
2240 #
2243 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2241 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2244 # tags (new or changed or deleted tags). In addition the details of
2242 # tags (new or changed or deleted tags). In addition the details of
2245 # these changes are made available in a file at:
2243 # these changes are made available in a file at:
2246 # ``REPOROOT/.hg/changes/tags.changes``.
2244 # ``REPOROOT/.hg/changes/tags.changes``.
2247 # Make sure you check for HG_TAG_MOVED before reading that file as it
2245 # Make sure you check for HG_TAG_MOVED before reading that file as it
2248 # might exist from a previous transaction even if no tag were touched
2246 # might exist from a previous transaction even if no tag were touched
2249 # in this one. Changes are recorded in a line base format::
2247 # in this one. Changes are recorded in a line base format::
2250 #
2248 #
2251 # <action> <hex-node> <tag-name>\n
2249 # <action> <hex-node> <tag-name>\n
2252 #
2250 #
2253 # Actions are defined as follow:
2251 # Actions are defined as follow:
2254 # "-R": tag is removed,
2252 # "-R": tag is removed,
2255 # "+A": tag is added,
2253 # "+A": tag is added,
2256 # "-M": tag is moved (old value),
2254 # "-M": tag is moved (old value),
2257 # "+M": tag is moved (new value),
2255 # "+M": tag is moved (new value),
2258 tracktags = lambda x: None
2256 tracktags = lambda x: None
2259 # experimental config: experimental.hook-track-tags
2257 # experimental config: experimental.hook-track-tags
2260 shouldtracktags = self.ui.configbool(
2258 shouldtracktags = self.ui.configbool(
2261 b'experimental', b'hook-track-tags'
2259 b'experimental', b'hook-track-tags'
2262 )
2260 )
2263 if desc != b'strip' and shouldtracktags:
2261 if desc != b'strip' and shouldtracktags:
2264 oldheads = self.changelog.headrevs()
2262 oldheads = self.changelog.headrevs()
2265
2263
2266 def tracktags(tr2):
2264 def tracktags(tr2):
2267 repo = reporef()
2265 repo = reporef()
2268 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2266 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2269 newheads = repo.changelog.headrevs()
2267 newheads = repo.changelog.headrevs()
2270 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2268 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2271 # notes: we compare lists here.
2269 # notes: we compare lists here.
2272 # As we do it only once buiding set would not be cheaper
2270 # As we do it only once buiding set would not be cheaper
2273 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2271 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2274 if changes:
2272 if changes:
2275 tr2.hookargs[b'tag_moved'] = b'1'
2273 tr2.hookargs[b'tag_moved'] = b'1'
2276 with repo.vfs(
2274 with repo.vfs(
2277 b'changes/tags.changes', b'w', atomictemp=True
2275 b'changes/tags.changes', b'w', atomictemp=True
2278 ) as changesfile:
2276 ) as changesfile:
2279 # note: we do not register the file to the transaction
2277 # note: we do not register the file to the transaction
2280 # because we needs it to still exist on the transaction
2278 # because we needs it to still exist on the transaction
2281 # is close (for txnclose hooks)
2279 # is close (for txnclose hooks)
2282 tagsmod.writediff(changesfile, changes)
2280 tagsmod.writediff(changesfile, changes)
2283
2281
2284 def validate(tr2):
2282 def validate(tr2):
2285 """will run pre-closing hooks"""
2283 """will run pre-closing hooks"""
2286 # XXX the transaction API is a bit lacking here so we take a hacky
2284 # XXX the transaction API is a bit lacking here so we take a hacky
2287 # path for now
2285 # path for now
2288 #
2286 #
2289 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2287 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2290 # dict is copied before these run. In addition we needs the data
2288 # dict is copied before these run. In addition we needs the data
2291 # available to in memory hooks too.
2289 # available to in memory hooks too.
2292 #
2290 #
2293 # Moreover, we also need to make sure this runs before txnclose
2291 # Moreover, we also need to make sure this runs before txnclose
2294 # hooks and there is no "pending" mechanism that would execute
2292 # hooks and there is no "pending" mechanism that would execute
2295 # logic only if hooks are about to run.
2293 # logic only if hooks are about to run.
2296 #
2294 #
2297 # Fixing this limitation of the transaction is also needed to track
2295 # Fixing this limitation of the transaction is also needed to track
2298 # other families of changes (bookmarks, phases, obsolescence).
2296 # other families of changes (bookmarks, phases, obsolescence).
2299 #
2297 #
2300 # This will have to be fixed before we remove the experimental
2298 # This will have to be fixed before we remove the experimental
2301 # gating.
2299 # gating.
2302 tracktags(tr2)
2300 tracktags(tr2)
2303 repo = reporef()
2301 repo = reporef()
2304
2302
2305 singleheadopt = (b'experimental', b'single-head-per-branch')
2303 singleheadopt = (b'experimental', b'single-head-per-branch')
2306 singlehead = repo.ui.configbool(*singleheadopt)
2304 singlehead = repo.ui.configbool(*singleheadopt)
2307 if singlehead:
2305 if singlehead:
2308 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2306 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2309 accountclosed = singleheadsub.get(
2307 accountclosed = singleheadsub.get(
2310 b"account-closed-heads", False
2308 b"account-closed-heads", False
2311 )
2309 )
2312 if singleheadsub.get(b"public-changes-only", False):
2310 if singleheadsub.get(b"public-changes-only", False):
2313 filtername = b"immutable"
2311 filtername = b"immutable"
2314 else:
2312 else:
2315 filtername = b"visible"
2313 filtername = b"visible"
2316 scmutil.enforcesinglehead(
2314 scmutil.enforcesinglehead(
2317 repo, tr2, desc, accountclosed, filtername
2315 repo, tr2, desc, accountclosed, filtername
2318 )
2316 )
2319 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2317 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2320 for name, (old, new) in sorted(
2318 for name, (old, new) in sorted(
2321 tr.changes[b'bookmarks'].items()
2319 tr.changes[b'bookmarks'].items()
2322 ):
2320 ):
2323 args = tr.hookargs.copy()
2321 args = tr.hookargs.copy()
2324 args.update(bookmarks.preparehookargs(name, old, new))
2322 args.update(bookmarks.preparehookargs(name, old, new))
2325 repo.hook(
2323 repo.hook(
2326 b'pretxnclose-bookmark',
2324 b'pretxnclose-bookmark',
2327 throw=True,
2325 throw=True,
2328 **pycompat.strkwargs(args)
2326 **pycompat.strkwargs(args)
2329 )
2327 )
2330 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2328 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2331 cl = repo.unfiltered().changelog
2329 cl = repo.unfiltered().changelog
2332 for revs, (old, new) in tr.changes[b'phases']:
2330 for revs, (old, new) in tr.changes[b'phases']:
2333 for rev in revs:
2331 for rev in revs:
2334 args = tr.hookargs.copy()
2332 args = tr.hookargs.copy()
2335 node = hex(cl.node(rev))
2333 node = hex(cl.node(rev))
2336 args.update(phases.preparehookargs(node, old, new))
2334 args.update(phases.preparehookargs(node, old, new))
2337 repo.hook(
2335 repo.hook(
2338 b'pretxnclose-phase',
2336 b'pretxnclose-phase',
2339 throw=True,
2337 throw=True,
2340 **pycompat.strkwargs(args)
2338 **pycompat.strkwargs(args)
2341 )
2339 )
2342
2340
2343 repo.hook(
2341 repo.hook(
2344 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2342 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2345 )
2343 )
2346
2344
2347 def releasefn(tr, success):
2345 def releasefn(tr, success):
2348 repo = reporef()
2346 repo = reporef()
2349 if repo is None:
2347 if repo is None:
2350 # If the repo has been GC'd (and this release function is being
2348 # If the repo has been GC'd (and this release function is being
2351 # called from transaction.__del__), there's not much we can do,
2349 # called from transaction.__del__), there's not much we can do,
2352 # so just leave the unfinished transaction there and let the
2350 # so just leave the unfinished transaction there and let the
2353 # user run `hg recover`.
2351 # user run `hg recover`.
2354 return
2352 return
2355 if success:
2353 if success:
2356 # this should be explicitly invoked here, because
2354 # this should be explicitly invoked here, because
2357 # in-memory changes aren't written out at closing
2355 # in-memory changes aren't written out at closing
2358 # transaction, if tr.addfilegenerator (via
2356 # transaction, if tr.addfilegenerator (via
2359 # dirstate.write or so) isn't invoked while
2357 # dirstate.write or so) isn't invoked while
2360 # transaction running
2358 # transaction running
2361 repo.dirstate.write(None)
2359 repo.dirstate.write(None)
2362 else:
2360 else:
2363 # discard all changes (including ones already written
2361 # discard all changes (including ones already written
2364 # out) in this transaction
2362 # out) in this transaction
2365 narrowspec.restorebackup(self, b'journal.narrowspec')
2363 narrowspec.restorebackup(self, b'journal.narrowspec')
2366 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2364 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2367 repo.dirstate.restorebackup(None, b'journal.dirstate')
2365 repo.dirstate.restorebackup(None, b'journal.dirstate')
2368
2366
2369 repo.invalidate(clearfilecache=True)
2367 repo.invalidate(clearfilecache=True)
2370
2368
2371 tr = transaction.transaction(
2369 tr = transaction.transaction(
2372 rp,
2370 rp,
2373 self.svfs,
2371 self.svfs,
2374 vfsmap,
2372 vfsmap,
2375 b"journal",
2373 b"journal",
2376 b"undo",
2374 b"undo",
2377 aftertrans(renames),
2375 aftertrans(renames),
2378 self.store.createmode,
2376 self.store.createmode,
2379 validator=validate,
2377 validator=validate,
2380 releasefn=releasefn,
2378 releasefn=releasefn,
2381 checkambigfiles=_cachedfiles,
2379 checkambigfiles=_cachedfiles,
2382 name=desc,
2380 name=desc,
2383 )
2381 )
2384 tr.changes[b'origrepolen'] = len(self)
2382 tr.changes[b'origrepolen'] = len(self)
2385 tr.changes[b'obsmarkers'] = set()
2383 tr.changes[b'obsmarkers'] = set()
2386 tr.changes[b'phases'] = []
2384 tr.changes[b'phases'] = []
2387 tr.changes[b'bookmarks'] = {}
2385 tr.changes[b'bookmarks'] = {}
2388
2386
2389 tr.hookargs[b'txnid'] = txnid
2387 tr.hookargs[b'txnid'] = txnid
2390 tr.hookargs[b'txnname'] = desc
2388 tr.hookargs[b'txnname'] = desc
2391 tr.hookargs[b'changes'] = tr.changes
2389 tr.hookargs[b'changes'] = tr.changes
2392 # note: writing the fncache only during finalize mean that the file is
2390 # note: writing the fncache only during finalize mean that the file is
2393 # outdated when running hooks. As fncache is used for streaming clone,
2391 # outdated when running hooks. As fncache is used for streaming clone,
2394 # this is not expected to break anything that happen during the hooks.
2392 # this is not expected to break anything that happen during the hooks.
2395 tr.addfinalize(b'flush-fncache', self.store.write)
2393 tr.addfinalize(b'flush-fncache', self.store.write)
2396
2394
2397 def txnclosehook(tr2):
2395 def txnclosehook(tr2):
2398 """To be run if transaction is successful, will schedule a hook run"""
2396 """To be run if transaction is successful, will schedule a hook run"""
2399 # Don't reference tr2 in hook() so we don't hold a reference.
2397 # Don't reference tr2 in hook() so we don't hold a reference.
2400 # This reduces memory consumption when there are multiple
2398 # This reduces memory consumption when there are multiple
2401 # transactions per lock. This can likely go away if issue5045
2399 # transactions per lock. This can likely go away if issue5045
2402 # fixes the function accumulation.
2400 # fixes the function accumulation.
2403 hookargs = tr2.hookargs
2401 hookargs = tr2.hookargs
2404
2402
2405 def hookfunc(unused_success):
2403 def hookfunc(unused_success):
2406 repo = reporef()
2404 repo = reporef()
2407 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2405 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2408 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2406 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2409 for name, (old, new) in bmchanges:
2407 for name, (old, new) in bmchanges:
2410 args = tr.hookargs.copy()
2408 args = tr.hookargs.copy()
2411 args.update(bookmarks.preparehookargs(name, old, new))
2409 args.update(bookmarks.preparehookargs(name, old, new))
2412 repo.hook(
2410 repo.hook(
2413 b'txnclose-bookmark',
2411 b'txnclose-bookmark',
2414 throw=False,
2412 throw=False,
2415 **pycompat.strkwargs(args)
2413 **pycompat.strkwargs(args)
2416 )
2414 )
2417
2415
2418 if hook.hashook(repo.ui, b'txnclose-phase'):
2416 if hook.hashook(repo.ui, b'txnclose-phase'):
2419 cl = repo.unfiltered().changelog
2417 cl = repo.unfiltered().changelog
2420 phasemv = sorted(
2418 phasemv = sorted(
2421 tr.changes[b'phases'], key=lambda r: r[0][0]
2419 tr.changes[b'phases'], key=lambda r: r[0][0]
2422 )
2420 )
2423 for revs, (old, new) in phasemv:
2421 for revs, (old, new) in phasemv:
2424 for rev in revs:
2422 for rev in revs:
2425 args = tr.hookargs.copy()
2423 args = tr.hookargs.copy()
2426 node = hex(cl.node(rev))
2424 node = hex(cl.node(rev))
2427 args.update(phases.preparehookargs(node, old, new))
2425 args.update(phases.preparehookargs(node, old, new))
2428 repo.hook(
2426 repo.hook(
2429 b'txnclose-phase',
2427 b'txnclose-phase',
2430 throw=False,
2428 throw=False,
2431 **pycompat.strkwargs(args)
2429 **pycompat.strkwargs(args)
2432 )
2430 )
2433
2431
2434 repo.hook(
2432 repo.hook(
2435 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2433 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2436 )
2434 )
2437
2435
2438 reporef()._afterlock(hookfunc)
2436 reporef()._afterlock(hookfunc)
2439
2437
2440 tr.addfinalize(b'txnclose-hook', txnclosehook)
2438 tr.addfinalize(b'txnclose-hook', txnclosehook)
2441 # Include a leading "-" to make it happen before the transaction summary
2439 # Include a leading "-" to make it happen before the transaction summary
2442 # reports registered via scmutil.registersummarycallback() whose names
2440 # reports registered via scmutil.registersummarycallback() whose names
2443 # are 00-txnreport etc. That way, the caches will be warm when the
2441 # are 00-txnreport etc. That way, the caches will be warm when the
2444 # callbacks run.
2442 # callbacks run.
2445 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2443 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2446
2444
2447 def txnaborthook(tr2):
2445 def txnaborthook(tr2):
2448 """To be run if transaction is aborted"""
2446 """To be run if transaction is aborted"""
2449 reporef().hook(
2447 reporef().hook(
2450 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2448 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2451 )
2449 )
2452
2450
2453 tr.addabort(b'txnabort-hook', txnaborthook)
2451 tr.addabort(b'txnabort-hook', txnaborthook)
2454 # avoid eager cache invalidation. in-memory data should be identical
2452 # avoid eager cache invalidation. in-memory data should be identical
2455 # to stored data if transaction has no error.
2453 # to stored data if transaction has no error.
2456 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2454 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2457 self._transref = weakref.ref(tr)
2455 self._transref = weakref.ref(tr)
2458 scmutil.registersummarycallback(self, tr, desc)
2456 scmutil.registersummarycallback(self, tr, desc)
2459 return tr
2457 return tr
2460
2458
2461 def _journalfiles(self):
2459 def _journalfiles(self):
2462 return (
2460 return (
2463 (self.svfs, b'journal'),
2461 (self.svfs, b'journal'),
2464 (self.svfs, b'journal.narrowspec'),
2462 (self.svfs, b'journal.narrowspec'),
2465 (self.vfs, b'journal.narrowspec.dirstate'),
2463 (self.vfs, b'journal.narrowspec.dirstate'),
2466 (self.vfs, b'journal.dirstate'),
2464 (self.vfs, b'journal.dirstate'),
2467 (self.vfs, b'journal.branch'),
2465 (self.vfs, b'journal.branch'),
2468 (self.vfs, b'journal.desc'),
2466 (self.vfs, b'journal.desc'),
2469 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2467 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2470 (self.svfs, b'journal.phaseroots'),
2468 (self.svfs, b'journal.phaseroots'),
2471 )
2469 )
2472
2470
2473 def undofiles(self):
2471 def undofiles(self):
2474 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2472 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2475
2473
2476 @unfilteredmethod
2474 @unfilteredmethod
2477 def _writejournal(self, desc):
2475 def _writejournal(self, desc):
2478 self.dirstate.savebackup(None, b'journal.dirstate')
2476 self.dirstate.savebackup(None, b'journal.dirstate')
2479 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2477 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2480 narrowspec.savebackup(self, b'journal.narrowspec')
2478 narrowspec.savebackup(self, b'journal.narrowspec')
2481 self.vfs.write(
2479 self.vfs.write(
2482 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2480 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2483 )
2481 )
2484 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2482 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2485 bookmarksvfs = bookmarks.bookmarksvfs(self)
2483 bookmarksvfs = bookmarks.bookmarksvfs(self)
2486 bookmarksvfs.write(
2484 bookmarksvfs.write(
2487 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2485 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2488 )
2486 )
2489 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2487 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2490
2488
2491 def recover(self):
2489 def recover(self):
2492 with self.lock():
2490 with self.lock():
2493 if self.svfs.exists(b"journal"):
2491 if self.svfs.exists(b"journal"):
2494 self.ui.status(_(b"rolling back interrupted transaction\n"))
2492 self.ui.status(_(b"rolling back interrupted transaction\n"))
2495 vfsmap = {
2493 vfsmap = {
2496 b'': self.svfs,
2494 b'': self.svfs,
2497 b'plain': self.vfs,
2495 b'plain': self.vfs,
2498 }
2496 }
2499 transaction.rollback(
2497 transaction.rollback(
2500 self.svfs,
2498 self.svfs,
2501 vfsmap,
2499 vfsmap,
2502 b"journal",
2500 b"journal",
2503 self.ui.warn,
2501 self.ui.warn,
2504 checkambigfiles=_cachedfiles,
2502 checkambigfiles=_cachedfiles,
2505 )
2503 )
2506 self.invalidate()
2504 self.invalidate()
2507 return True
2505 return True
2508 else:
2506 else:
2509 self.ui.warn(_(b"no interrupted transaction available\n"))
2507 self.ui.warn(_(b"no interrupted transaction available\n"))
2510 return False
2508 return False
2511
2509
2512 def rollback(self, dryrun=False, force=False):
2510 def rollback(self, dryrun=False, force=False):
2513 wlock = lock = dsguard = None
2511 wlock = lock = dsguard = None
2514 try:
2512 try:
2515 wlock = self.wlock()
2513 wlock = self.wlock()
2516 lock = self.lock()
2514 lock = self.lock()
2517 if self.svfs.exists(b"undo"):
2515 if self.svfs.exists(b"undo"):
2518 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2516 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2519
2517
2520 return self._rollback(dryrun, force, dsguard)
2518 return self._rollback(dryrun, force, dsguard)
2521 else:
2519 else:
2522 self.ui.warn(_(b"no rollback information available\n"))
2520 self.ui.warn(_(b"no rollback information available\n"))
2523 return 1
2521 return 1
2524 finally:
2522 finally:
2525 release(dsguard, lock, wlock)
2523 release(dsguard, lock, wlock)
2526
2524
2527 @unfilteredmethod # Until we get smarter cache management
2525 @unfilteredmethod # Until we get smarter cache management
2528 def _rollback(self, dryrun, force, dsguard):
2526 def _rollback(self, dryrun, force, dsguard):
2529 ui = self.ui
2527 ui = self.ui
2530 try:
2528 try:
2531 args = self.vfs.read(b'undo.desc').splitlines()
2529 args = self.vfs.read(b'undo.desc').splitlines()
2532 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2530 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2533 if len(args) >= 3:
2531 if len(args) >= 3:
2534 detail = args[2]
2532 detail = args[2]
2535 oldtip = oldlen - 1
2533 oldtip = oldlen - 1
2536
2534
2537 if detail and ui.verbose:
2535 if detail and ui.verbose:
2538 msg = _(
2536 msg = _(
2539 b'repository tip rolled back to revision %d'
2537 b'repository tip rolled back to revision %d'
2540 b' (undo %s: %s)\n'
2538 b' (undo %s: %s)\n'
2541 ) % (oldtip, desc, detail)
2539 ) % (oldtip, desc, detail)
2542 else:
2540 else:
2543 msg = _(
2541 msg = _(
2544 b'repository tip rolled back to revision %d (undo %s)\n'
2542 b'repository tip rolled back to revision %d (undo %s)\n'
2545 ) % (oldtip, desc)
2543 ) % (oldtip, desc)
2546 except IOError:
2544 except IOError:
2547 msg = _(b'rolling back unknown transaction\n')
2545 msg = _(b'rolling back unknown transaction\n')
2548 desc = None
2546 desc = None
2549
2547
2550 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2548 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2551 raise error.Abort(
2549 raise error.Abort(
2552 _(
2550 _(
2553 b'rollback of last commit while not checked out '
2551 b'rollback of last commit while not checked out '
2554 b'may lose data'
2552 b'may lose data'
2555 ),
2553 ),
2556 hint=_(b'use -f to force'),
2554 hint=_(b'use -f to force'),
2557 )
2555 )
2558
2556
2559 ui.status(msg)
2557 ui.status(msg)
2560 if dryrun:
2558 if dryrun:
2561 return 0
2559 return 0
2562
2560
2563 parents = self.dirstate.parents()
2561 parents = self.dirstate.parents()
2564 self.destroying()
2562 self.destroying()
2565 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2563 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2566 transaction.rollback(
2564 transaction.rollback(
2567 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2565 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2568 )
2566 )
2569 bookmarksvfs = bookmarks.bookmarksvfs(self)
2567 bookmarksvfs = bookmarks.bookmarksvfs(self)
2570 if bookmarksvfs.exists(b'undo.bookmarks'):
2568 if bookmarksvfs.exists(b'undo.bookmarks'):
2571 bookmarksvfs.rename(
2569 bookmarksvfs.rename(
2572 b'undo.bookmarks', b'bookmarks', checkambig=True
2570 b'undo.bookmarks', b'bookmarks', checkambig=True
2573 )
2571 )
2574 if self.svfs.exists(b'undo.phaseroots'):
2572 if self.svfs.exists(b'undo.phaseroots'):
2575 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2573 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2576 self.invalidate()
2574 self.invalidate()
2577
2575
2578 has_node = self.changelog.index.has_node
2576 has_node = self.changelog.index.has_node
2579 parentgone = any(not has_node(p) for p in parents)
2577 parentgone = any(not has_node(p) for p in parents)
2580 if parentgone:
2578 if parentgone:
2581 # prevent dirstateguard from overwriting already restored one
2579 # prevent dirstateguard from overwriting already restored one
2582 dsguard.close()
2580 dsguard.close()
2583
2581
2584 narrowspec.restorebackup(self, b'undo.narrowspec')
2582 narrowspec.restorebackup(self, b'undo.narrowspec')
2585 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2583 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2586 self.dirstate.restorebackup(None, b'undo.dirstate')
2584 self.dirstate.restorebackup(None, b'undo.dirstate')
2587 try:
2585 try:
2588 branch = self.vfs.read(b'undo.branch')
2586 branch = self.vfs.read(b'undo.branch')
2589 self.dirstate.setbranch(encoding.tolocal(branch))
2587 self.dirstate.setbranch(encoding.tolocal(branch))
2590 except IOError:
2588 except IOError:
2591 ui.warn(
2589 ui.warn(
2592 _(
2590 _(
2593 b'named branch could not be reset: '
2591 b'named branch could not be reset: '
2594 b'current branch is still \'%s\'\n'
2592 b'current branch is still \'%s\'\n'
2595 )
2593 )
2596 % self.dirstate.branch()
2594 % self.dirstate.branch()
2597 )
2595 )
2598
2596
2599 parents = tuple([p.rev() for p in self[None].parents()])
2597 parents = tuple([p.rev() for p in self[None].parents()])
2600 if len(parents) > 1:
2598 if len(parents) > 1:
2601 ui.status(
2599 ui.status(
2602 _(
2600 _(
2603 b'working directory now based on '
2601 b'working directory now based on '
2604 b'revisions %d and %d\n'
2602 b'revisions %d and %d\n'
2605 )
2603 )
2606 % parents
2604 % parents
2607 )
2605 )
2608 else:
2606 else:
2609 ui.status(
2607 ui.status(
2610 _(b'working directory now based on revision %d\n') % parents
2608 _(b'working directory now based on revision %d\n') % parents
2611 )
2609 )
2612 mergestatemod.mergestate.clean(self)
2610 mergestatemod.mergestate.clean(self)
2613
2611
2614 # TODO: if we know which new heads may result from this rollback, pass
2612 # TODO: if we know which new heads may result from this rollback, pass
2615 # them to destroy(), which will prevent the branchhead cache from being
2613 # them to destroy(), which will prevent the branchhead cache from being
2616 # invalidated.
2614 # invalidated.
2617 self.destroyed()
2615 self.destroyed()
2618 return 0
2616 return 0
2619
2617
2620 def _buildcacheupdater(self, newtransaction):
2618 def _buildcacheupdater(self, newtransaction):
2621 """called during transaction to build the callback updating cache
2619 """called during transaction to build the callback updating cache
2622
2620
2623 Lives on the repository to help extension who might want to augment
2621 Lives on the repository to help extension who might want to augment
2624 this logic. For this purpose, the created transaction is passed to the
2622 this logic. For this purpose, the created transaction is passed to the
2625 method.
2623 method.
2626 """
2624 """
2627 # we must avoid cyclic reference between repo and transaction.
2625 # we must avoid cyclic reference between repo and transaction.
2628 reporef = weakref.ref(self)
2626 reporef = weakref.ref(self)
2629
2627
2630 def updater(tr):
2628 def updater(tr):
2631 repo = reporef()
2629 repo = reporef()
2632 repo.updatecaches(tr)
2630 repo.updatecaches(tr)
2633
2631
2634 return updater
2632 return updater
2635
2633
2636 @unfilteredmethod
2634 @unfilteredmethod
2637 def updatecaches(self, tr=None, full=False):
2635 def updatecaches(self, tr=None, full=False):
2638 """warm appropriate caches
2636 """warm appropriate caches
2639
2637
2640 If this function is called after a transaction closed. The transaction
2638 If this function is called after a transaction closed. The transaction
2641 will be available in the 'tr' argument. This can be used to selectively
2639 will be available in the 'tr' argument. This can be used to selectively
2642 update caches relevant to the changes in that transaction.
2640 update caches relevant to the changes in that transaction.
2643
2641
2644 If 'full' is set, make sure all caches the function knows about have
2642 If 'full' is set, make sure all caches the function knows about have
2645 up-to-date data. Even the ones usually loaded more lazily.
2643 up-to-date data. Even the ones usually loaded more lazily.
2646 """
2644 """
2647 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2645 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2648 # During strip, many caches are invalid but
2646 # During strip, many caches are invalid but
2649 # later call to `destroyed` will refresh them.
2647 # later call to `destroyed` will refresh them.
2650 return
2648 return
2651
2649
2652 if tr is None or tr.changes[b'origrepolen'] < len(self):
2650 if tr is None or tr.changes[b'origrepolen'] < len(self):
2653 # accessing the 'served' branchmap should refresh all the others,
2651 # accessing the 'served' branchmap should refresh all the others,
2654 self.ui.debug(b'updating the branch cache\n')
2652 self.ui.debug(b'updating the branch cache\n')
2655 self.filtered(b'served').branchmap()
2653 self.filtered(b'served').branchmap()
2656 self.filtered(b'served.hidden').branchmap()
2654 self.filtered(b'served.hidden').branchmap()
2657
2655
2658 if full:
2656 if full:
2659 unfi = self.unfiltered()
2657 unfi = self.unfiltered()
2660
2658
2661 self.changelog.update_caches(transaction=tr)
2659 self.changelog.update_caches(transaction=tr)
2662 self.manifestlog.update_caches(transaction=tr)
2660 self.manifestlog.update_caches(transaction=tr)
2663
2661
2664 rbc = unfi.revbranchcache()
2662 rbc = unfi.revbranchcache()
2665 for r in unfi.changelog:
2663 for r in unfi.changelog:
2666 rbc.branchinfo(r)
2664 rbc.branchinfo(r)
2667 rbc.write()
2665 rbc.write()
2668
2666
2669 # ensure the working copy parents are in the manifestfulltextcache
2667 # ensure the working copy parents are in the manifestfulltextcache
2670 for ctx in self[b'.'].parents():
2668 for ctx in self[b'.'].parents():
2671 ctx.manifest() # accessing the manifest is enough
2669 ctx.manifest() # accessing the manifest is enough
2672
2670
2673 # accessing fnode cache warms the cache
2671 # accessing fnode cache warms the cache
2674 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2672 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2675 # accessing tags warm the cache
2673 # accessing tags warm the cache
2676 self.tags()
2674 self.tags()
2677 self.filtered(b'served').tags()
2675 self.filtered(b'served').tags()
2678
2676
2679 # The `full` arg is documented as updating even the lazily-loaded
2677 # The `full` arg is documented as updating even the lazily-loaded
2680 # caches immediately, so we're forcing a write to cause these caches
2678 # caches immediately, so we're forcing a write to cause these caches
2681 # to be warmed up even if they haven't explicitly been requested
2679 # to be warmed up even if they haven't explicitly been requested
2682 # yet (if they've never been used by hg, they won't ever have been
2680 # yet (if they've never been used by hg, they won't ever have been
2683 # written, even if they're a subset of another kind of cache that
2681 # written, even if they're a subset of another kind of cache that
2684 # *has* been used).
2682 # *has* been used).
2685 for filt in repoview.filtertable.keys():
2683 for filt in repoview.filtertable.keys():
2686 filtered = self.filtered(filt)
2684 filtered = self.filtered(filt)
2687 filtered.branchmap().write(filtered)
2685 filtered.branchmap().write(filtered)
2688
2686
2689 def invalidatecaches(self):
2687 def invalidatecaches(self):
2690
2688
2691 if '_tagscache' in vars(self):
2689 if '_tagscache' in vars(self):
2692 # can't use delattr on proxy
2690 # can't use delattr on proxy
2693 del self.__dict__['_tagscache']
2691 del self.__dict__['_tagscache']
2694
2692
2695 self._branchcaches.clear()
2693 self._branchcaches.clear()
2696 self.invalidatevolatilesets()
2694 self.invalidatevolatilesets()
2697 self._sparsesignaturecache.clear()
2695 self._sparsesignaturecache.clear()
2698
2696
2699 def invalidatevolatilesets(self):
2697 def invalidatevolatilesets(self):
2700 self.filteredrevcache.clear()
2698 self.filteredrevcache.clear()
2701 obsolete.clearobscaches(self)
2699 obsolete.clearobscaches(self)
2702 self._quick_access_changeid_invalidate()
2700 self._quick_access_changeid_invalidate()
2703
2701
2704 def invalidatedirstate(self):
2702 def invalidatedirstate(self):
2705 """Invalidates the dirstate, causing the next call to dirstate
2703 """Invalidates the dirstate, causing the next call to dirstate
2706 to check if it was modified since the last time it was read,
2704 to check if it was modified since the last time it was read,
2707 rereading it if it has.
2705 rereading it if it has.
2708
2706
2709 This is different to dirstate.invalidate() that it doesn't always
2707 This is different to dirstate.invalidate() that it doesn't always
2710 rereads the dirstate. Use dirstate.invalidate() if you want to
2708 rereads the dirstate. Use dirstate.invalidate() if you want to
2711 explicitly read the dirstate again (i.e. restoring it to a previous
2709 explicitly read the dirstate again (i.e. restoring it to a previous
2712 known good state)."""
2710 known good state)."""
2713 if hasunfilteredcache(self, 'dirstate'):
2711 if hasunfilteredcache(self, 'dirstate'):
2714 for k in self.dirstate._filecache:
2712 for k in self.dirstate._filecache:
2715 try:
2713 try:
2716 delattr(self.dirstate, k)
2714 delattr(self.dirstate, k)
2717 except AttributeError:
2715 except AttributeError:
2718 pass
2716 pass
2719 delattr(self.unfiltered(), 'dirstate')
2717 delattr(self.unfiltered(), 'dirstate')
2720
2718
2721 def invalidate(self, clearfilecache=False):
2719 def invalidate(self, clearfilecache=False):
2722 """Invalidates both store and non-store parts other than dirstate
2720 """Invalidates both store and non-store parts other than dirstate
2723
2721
2724 If a transaction is running, invalidation of store is omitted,
2722 If a transaction is running, invalidation of store is omitted,
2725 because discarding in-memory changes might cause inconsistency
2723 because discarding in-memory changes might cause inconsistency
2726 (e.g. incomplete fncache causes unintentional failure, but
2724 (e.g. incomplete fncache causes unintentional failure, but
2727 redundant one doesn't).
2725 redundant one doesn't).
2728 """
2726 """
2729 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2727 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2730 for k in list(self._filecache.keys()):
2728 for k in list(self._filecache.keys()):
2731 # dirstate is invalidated separately in invalidatedirstate()
2729 # dirstate is invalidated separately in invalidatedirstate()
2732 if k == b'dirstate':
2730 if k == b'dirstate':
2733 continue
2731 continue
2734 if (
2732 if (
2735 k == b'changelog'
2733 k == b'changelog'
2736 and self.currenttransaction()
2734 and self.currenttransaction()
2737 and self.changelog._delayed
2735 and self.changelog._delayed
2738 ):
2736 ):
2739 # The changelog object may store unwritten revisions. We don't
2737 # The changelog object may store unwritten revisions. We don't
2740 # want to lose them.
2738 # want to lose them.
2741 # TODO: Solve the problem instead of working around it.
2739 # TODO: Solve the problem instead of working around it.
2742 continue
2740 continue
2743
2741
2744 if clearfilecache:
2742 if clearfilecache:
2745 del self._filecache[k]
2743 del self._filecache[k]
2746 try:
2744 try:
2747 delattr(unfiltered, k)
2745 delattr(unfiltered, k)
2748 except AttributeError:
2746 except AttributeError:
2749 pass
2747 pass
2750 self.invalidatecaches()
2748 self.invalidatecaches()
2751 if not self.currenttransaction():
2749 if not self.currenttransaction():
2752 # TODO: Changing contents of store outside transaction
2750 # TODO: Changing contents of store outside transaction
2753 # causes inconsistency. We should make in-memory store
2751 # causes inconsistency. We should make in-memory store
2754 # changes detectable, and abort if changed.
2752 # changes detectable, and abort if changed.
2755 self.store.invalidatecaches()
2753 self.store.invalidatecaches()
2756
2754
2757 def invalidateall(self):
2755 def invalidateall(self):
2758 """Fully invalidates both store and non-store parts, causing the
2756 """Fully invalidates both store and non-store parts, causing the
2759 subsequent operation to reread any outside changes."""
2757 subsequent operation to reread any outside changes."""
2760 # extension should hook this to invalidate its caches
2758 # extension should hook this to invalidate its caches
2761 self.invalidate()
2759 self.invalidate()
2762 self.invalidatedirstate()
2760 self.invalidatedirstate()
2763
2761
2764 @unfilteredmethod
2762 @unfilteredmethod
2765 def _refreshfilecachestats(self, tr):
2763 def _refreshfilecachestats(self, tr):
2766 """Reload stats of cached files so that they are flagged as valid"""
2764 """Reload stats of cached files so that they are flagged as valid"""
2767 for k, ce in self._filecache.items():
2765 for k, ce in self._filecache.items():
2768 k = pycompat.sysstr(k)
2766 k = pycompat.sysstr(k)
2769 if k == 'dirstate' or k not in self.__dict__:
2767 if k == 'dirstate' or k not in self.__dict__:
2770 continue
2768 continue
2771 ce.refresh()
2769 ce.refresh()
2772
2770
2773 def _lock(
2771 def _lock(
2774 self,
2772 self,
2775 vfs,
2773 vfs,
2776 lockname,
2774 lockname,
2777 wait,
2775 wait,
2778 releasefn,
2776 releasefn,
2779 acquirefn,
2777 acquirefn,
2780 desc,
2778 desc,
2781 ):
2779 ):
2782 timeout = 0
2780 timeout = 0
2783 warntimeout = 0
2781 warntimeout = 0
2784 if wait:
2782 if wait:
2785 timeout = self.ui.configint(b"ui", b"timeout")
2783 timeout = self.ui.configint(b"ui", b"timeout")
2786 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2784 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2787 # internal config: ui.signal-safe-lock
2785 # internal config: ui.signal-safe-lock
2788 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2786 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2789
2787
2790 l = lockmod.trylock(
2788 l = lockmod.trylock(
2791 self.ui,
2789 self.ui,
2792 vfs,
2790 vfs,
2793 lockname,
2791 lockname,
2794 timeout,
2792 timeout,
2795 warntimeout,
2793 warntimeout,
2796 releasefn=releasefn,
2794 releasefn=releasefn,
2797 acquirefn=acquirefn,
2795 acquirefn=acquirefn,
2798 desc=desc,
2796 desc=desc,
2799 signalsafe=signalsafe,
2797 signalsafe=signalsafe,
2800 )
2798 )
2801 return l
2799 return l
2802
2800
2803 def _afterlock(self, callback):
2801 def _afterlock(self, callback):
2804 """add a callback to be run when the repository is fully unlocked
2802 """add a callback to be run when the repository is fully unlocked
2805
2803
2806 The callback will be executed when the outermost lock is released
2804 The callback will be executed when the outermost lock is released
2807 (with wlock being higher level than 'lock')."""
2805 (with wlock being higher level than 'lock')."""
2808 for ref in (self._wlockref, self._lockref):
2806 for ref in (self._wlockref, self._lockref):
2809 l = ref and ref()
2807 l = ref and ref()
2810 if l and l.held:
2808 if l and l.held:
2811 l.postrelease.append(callback)
2809 l.postrelease.append(callback)
2812 break
2810 break
2813 else: # no lock have been found.
2811 else: # no lock have been found.
2814 callback(True)
2812 callback(True)
2815
2813
2816 def lock(self, wait=True):
2814 def lock(self, wait=True):
2817 """Lock the repository store (.hg/store) and return a weak reference
2815 """Lock the repository store (.hg/store) and return a weak reference
2818 to the lock. Use this before modifying the store (e.g. committing or
2816 to the lock. Use this before modifying the store (e.g. committing or
2819 stripping). If you are opening a transaction, get a lock as well.)
2817 stripping). If you are opening a transaction, get a lock as well.)
2820
2818
2821 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2819 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2822 'wlock' first to avoid a dead-lock hazard."""
2820 'wlock' first to avoid a dead-lock hazard."""
2823 l = self._currentlock(self._lockref)
2821 l = self._currentlock(self._lockref)
2824 if l is not None:
2822 if l is not None:
2825 l.lock()
2823 l.lock()
2826 return l
2824 return l
2827
2825
2828 l = self._lock(
2826 l = self._lock(
2829 vfs=self.svfs,
2827 vfs=self.svfs,
2830 lockname=b"lock",
2828 lockname=b"lock",
2831 wait=wait,
2829 wait=wait,
2832 releasefn=None,
2830 releasefn=None,
2833 acquirefn=self.invalidate,
2831 acquirefn=self.invalidate,
2834 desc=_(b'repository %s') % self.origroot,
2832 desc=_(b'repository %s') % self.origroot,
2835 )
2833 )
2836 self._lockref = weakref.ref(l)
2834 self._lockref = weakref.ref(l)
2837 return l
2835 return l
2838
2836
2839 def wlock(self, wait=True):
2837 def wlock(self, wait=True):
2840 """Lock the non-store parts of the repository (everything under
2838 """Lock the non-store parts of the repository (everything under
2841 .hg except .hg/store) and return a weak reference to the lock.
2839 .hg except .hg/store) and return a weak reference to the lock.
2842
2840
2843 Use this before modifying files in .hg.
2841 Use this before modifying files in .hg.
2844
2842
2845 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2843 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2846 'wlock' first to avoid a dead-lock hazard."""
2844 'wlock' first to avoid a dead-lock hazard."""
2847 l = self._wlockref and self._wlockref()
2845 l = self._wlockref and self._wlockref()
2848 if l is not None and l.held:
2846 if l is not None and l.held:
2849 l.lock()
2847 l.lock()
2850 return l
2848 return l
2851
2849
2852 # We do not need to check for non-waiting lock acquisition. Such
2850 # We do not need to check for non-waiting lock acquisition. Such
2853 # acquisition would not cause dead-lock as they would just fail.
2851 # acquisition would not cause dead-lock as they would just fail.
2854 if wait and (
2852 if wait and (
2855 self.ui.configbool(b'devel', b'all-warnings')
2853 self.ui.configbool(b'devel', b'all-warnings')
2856 or self.ui.configbool(b'devel', b'check-locks')
2854 or self.ui.configbool(b'devel', b'check-locks')
2857 ):
2855 ):
2858 if self._currentlock(self._lockref) is not None:
2856 if self._currentlock(self._lockref) is not None:
2859 self.ui.develwarn(b'"wlock" acquired after "lock"')
2857 self.ui.develwarn(b'"wlock" acquired after "lock"')
2860
2858
2861 def unlock():
2859 def unlock():
2862 if self.dirstate.pendingparentchange():
2860 if self.dirstate.pendingparentchange():
2863 self.dirstate.invalidate()
2861 self.dirstate.invalidate()
2864 else:
2862 else:
2865 self.dirstate.write(None)
2863 self.dirstate.write(None)
2866
2864
2867 self._filecache[b'dirstate'].refresh()
2865 self._filecache[b'dirstate'].refresh()
2868
2866
2869 l = self._lock(
2867 l = self._lock(
2870 self.vfs,
2868 self.vfs,
2871 b"wlock",
2869 b"wlock",
2872 wait,
2870 wait,
2873 unlock,
2871 unlock,
2874 self.invalidatedirstate,
2872 self.invalidatedirstate,
2875 _(b'working directory of %s') % self.origroot,
2873 _(b'working directory of %s') % self.origroot,
2876 )
2874 )
2877 self._wlockref = weakref.ref(l)
2875 self._wlockref = weakref.ref(l)
2878 return l
2876 return l
2879
2877
2880 def _currentlock(self, lockref):
2878 def _currentlock(self, lockref):
2881 """Returns the lock if it's held, or None if it's not."""
2879 """Returns the lock if it's held, or None if it's not."""
2882 if lockref is None:
2880 if lockref is None:
2883 return None
2881 return None
2884 l = lockref()
2882 l = lockref()
2885 if l is None or not l.held:
2883 if l is None or not l.held:
2886 return None
2884 return None
2887 return l
2885 return l
2888
2886
2889 def currentwlock(self):
2887 def currentwlock(self):
2890 """Returns the wlock if it's held, or None if it's not."""
2888 """Returns the wlock if it's held, or None if it's not."""
2891 return self._currentlock(self._wlockref)
2889 return self._currentlock(self._wlockref)
2892
2890
2893 def checkcommitpatterns(self, wctx, match, status, fail):
2891 def checkcommitpatterns(self, wctx, match, status, fail):
2894 """check for commit arguments that aren't committable"""
2892 """check for commit arguments that aren't committable"""
2895 if match.isexact() or match.prefix():
2893 if match.isexact() or match.prefix():
2896 matched = set(status.modified + status.added + status.removed)
2894 matched = set(status.modified + status.added + status.removed)
2897
2895
2898 for f in match.files():
2896 for f in match.files():
2899 f = self.dirstate.normalize(f)
2897 f = self.dirstate.normalize(f)
2900 if f == b'.' or f in matched or f in wctx.substate:
2898 if f == b'.' or f in matched or f in wctx.substate:
2901 continue
2899 continue
2902 if f in status.deleted:
2900 if f in status.deleted:
2903 fail(f, _(b'file not found!'))
2901 fail(f, _(b'file not found!'))
2904 # Is it a directory that exists or used to exist?
2902 # Is it a directory that exists or used to exist?
2905 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2903 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2906 d = f + b'/'
2904 d = f + b'/'
2907 for mf in matched:
2905 for mf in matched:
2908 if mf.startswith(d):
2906 if mf.startswith(d):
2909 break
2907 break
2910 else:
2908 else:
2911 fail(f, _(b"no match under directory!"))
2909 fail(f, _(b"no match under directory!"))
2912 elif f not in self.dirstate:
2910 elif f not in self.dirstate:
2913 fail(f, _(b"file not tracked!"))
2911 fail(f, _(b"file not tracked!"))
2914
2912
2915 @unfilteredmethod
2913 @unfilteredmethod
2916 def commit(
2914 def commit(
2917 self,
2915 self,
2918 text=b"",
2916 text=b"",
2919 user=None,
2917 user=None,
2920 date=None,
2918 date=None,
2921 match=None,
2919 match=None,
2922 force=False,
2920 force=False,
2923 editor=None,
2921 editor=None,
2924 extra=None,
2922 extra=None,
2925 ):
2923 ):
2926 """Add a new revision to current repository.
2924 """Add a new revision to current repository.
2927
2925
2928 Revision information is gathered from the working directory,
2926 Revision information is gathered from the working directory,
2929 match can be used to filter the committed files. If editor is
2927 match can be used to filter the committed files. If editor is
2930 supplied, it is called to get a commit message.
2928 supplied, it is called to get a commit message.
2931 """
2929 """
2932 if extra is None:
2930 if extra is None:
2933 extra = {}
2931 extra = {}
2934
2932
2935 def fail(f, msg):
2933 def fail(f, msg):
2936 raise error.InputError(b'%s: %s' % (f, msg))
2934 raise error.InputError(b'%s: %s' % (f, msg))
2937
2935
2938 if not match:
2936 if not match:
2939 match = matchmod.always()
2937 match = matchmod.always()
2940
2938
2941 if not force:
2939 if not force:
2942 match.bad = fail
2940 match.bad = fail
2943
2941
2944 # lock() for recent changelog (see issue4368)
2942 # lock() for recent changelog (see issue4368)
2945 with self.wlock(), self.lock():
2943 with self.wlock(), self.lock():
2946 wctx = self[None]
2944 wctx = self[None]
2947 merge = len(wctx.parents()) > 1
2945 merge = len(wctx.parents()) > 1
2948
2946
2949 if not force and merge and not match.always():
2947 if not force and merge and not match.always():
2950 raise error.Abort(
2948 raise error.Abort(
2951 _(
2949 _(
2952 b'cannot partially commit a merge '
2950 b'cannot partially commit a merge '
2953 b'(do not specify files or patterns)'
2951 b'(do not specify files or patterns)'
2954 )
2952 )
2955 )
2953 )
2956
2954
2957 status = self.status(match=match, clean=force)
2955 status = self.status(match=match, clean=force)
2958 if force:
2956 if force:
2959 status.modified.extend(
2957 status.modified.extend(
2960 status.clean
2958 status.clean
2961 ) # mq may commit clean files
2959 ) # mq may commit clean files
2962
2960
2963 # check subrepos
2961 # check subrepos
2964 subs, commitsubs, newstate = subrepoutil.precommit(
2962 subs, commitsubs, newstate = subrepoutil.precommit(
2965 self.ui, wctx, status, match, force=force
2963 self.ui, wctx, status, match, force=force
2966 )
2964 )
2967
2965
2968 # make sure all explicit patterns are matched
2966 # make sure all explicit patterns are matched
2969 if not force:
2967 if not force:
2970 self.checkcommitpatterns(wctx, match, status, fail)
2968 self.checkcommitpatterns(wctx, match, status, fail)
2971
2969
2972 cctx = context.workingcommitctx(
2970 cctx = context.workingcommitctx(
2973 self, status, text, user, date, extra
2971 self, status, text, user, date, extra
2974 )
2972 )
2975
2973
2976 ms = mergestatemod.mergestate.read(self)
2974 ms = mergestatemod.mergestate.read(self)
2977 mergeutil.checkunresolved(ms)
2975 mergeutil.checkunresolved(ms)
2978
2976
2979 # internal config: ui.allowemptycommit
2977 # internal config: ui.allowemptycommit
2980 if cctx.isempty() and not self.ui.configbool(
2978 if cctx.isempty() and not self.ui.configbool(
2981 b'ui', b'allowemptycommit'
2979 b'ui', b'allowemptycommit'
2982 ):
2980 ):
2983 self.ui.debug(b'nothing to commit, clearing merge state\n')
2981 self.ui.debug(b'nothing to commit, clearing merge state\n')
2984 ms.reset()
2982 ms.reset()
2985 return None
2983 return None
2986
2984
2987 if merge and cctx.deleted():
2985 if merge and cctx.deleted():
2988 raise error.Abort(_(b"cannot commit merge with missing files"))
2986 raise error.Abort(_(b"cannot commit merge with missing files"))
2989
2987
2990 if editor:
2988 if editor:
2991 cctx._text = editor(self, cctx, subs)
2989 cctx._text = editor(self, cctx, subs)
2992 edited = text != cctx._text
2990 edited = text != cctx._text
2993
2991
2994 # Save commit message in case this transaction gets rolled back
2992 # Save commit message in case this transaction gets rolled back
2995 # (e.g. by a pretxncommit hook). Leave the content alone on
2993 # (e.g. by a pretxncommit hook). Leave the content alone on
2996 # the assumption that the user will use the same editor again.
2994 # the assumption that the user will use the same editor again.
2997 msgfn = self.savecommitmessage(cctx._text)
2995 msgfn = self.savecommitmessage(cctx._text)
2998
2996
2999 # commit subs and write new state
2997 # commit subs and write new state
3000 if subs:
2998 if subs:
3001 uipathfn = scmutil.getuipathfn(self)
2999 uipathfn = scmutil.getuipathfn(self)
3002 for s in sorted(commitsubs):
3000 for s in sorted(commitsubs):
3003 sub = wctx.sub(s)
3001 sub = wctx.sub(s)
3004 self.ui.status(
3002 self.ui.status(
3005 _(b'committing subrepository %s\n')
3003 _(b'committing subrepository %s\n')
3006 % uipathfn(subrepoutil.subrelpath(sub))
3004 % uipathfn(subrepoutil.subrelpath(sub))
3007 )
3005 )
3008 sr = sub.commit(cctx._text, user, date)
3006 sr = sub.commit(cctx._text, user, date)
3009 newstate[s] = (newstate[s][0], sr)
3007 newstate[s] = (newstate[s][0], sr)
3010 subrepoutil.writestate(self, newstate)
3008 subrepoutil.writestate(self, newstate)
3011
3009
3012 p1, p2 = self.dirstate.parents()
3010 p1, p2 = self.dirstate.parents()
3013 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3011 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3014 try:
3012 try:
3015 self.hook(
3013 self.hook(
3016 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3014 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3017 )
3015 )
3018 with self.transaction(b'commit'):
3016 with self.transaction(b'commit'):
3019 ret = self.commitctx(cctx, True)
3017 ret = self.commitctx(cctx, True)
3020 # update bookmarks, dirstate and mergestate
3018 # update bookmarks, dirstate and mergestate
3021 bookmarks.update(self, [p1, p2], ret)
3019 bookmarks.update(self, [p1, p2], ret)
3022 cctx.markcommitted(ret)
3020 cctx.markcommitted(ret)
3023 ms.reset()
3021 ms.reset()
3024 except: # re-raises
3022 except: # re-raises
3025 if edited:
3023 if edited:
3026 self.ui.write(
3024 self.ui.write(
3027 _(b'note: commit message saved in %s\n') % msgfn
3025 _(b'note: commit message saved in %s\n') % msgfn
3028 )
3026 )
3029 self.ui.write(
3027 self.ui.write(
3030 _(
3028 _(
3031 b"note: use 'hg commit --logfile "
3029 b"note: use 'hg commit --logfile "
3032 b".hg/last-message.txt --edit' to reuse it\n"
3030 b".hg/last-message.txt --edit' to reuse it\n"
3033 )
3031 )
3034 )
3032 )
3035 raise
3033 raise
3036
3034
3037 def commithook(unused_success):
3035 def commithook(unused_success):
3038 # hack for command that use a temporary commit (eg: histedit)
3036 # hack for command that use a temporary commit (eg: histedit)
3039 # temporary commit got stripped before hook release
3037 # temporary commit got stripped before hook release
3040 if self.changelog.hasnode(ret):
3038 if self.changelog.hasnode(ret):
3041 self.hook(
3039 self.hook(
3042 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3040 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3043 )
3041 )
3044
3042
3045 self._afterlock(commithook)
3043 self._afterlock(commithook)
3046 return ret
3044 return ret
3047
3045
3048 @unfilteredmethod
3046 @unfilteredmethod
3049 def commitctx(self, ctx, error=False, origctx=None):
3047 def commitctx(self, ctx, error=False, origctx=None):
3050 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3048 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3051
3049
3052 @unfilteredmethod
3050 @unfilteredmethod
3053 def destroying(self):
3051 def destroying(self):
3054 """Inform the repository that nodes are about to be destroyed.
3052 """Inform the repository that nodes are about to be destroyed.
3055 Intended for use by strip and rollback, so there's a common
3053 Intended for use by strip and rollback, so there's a common
3056 place for anything that has to be done before destroying history.
3054 place for anything that has to be done before destroying history.
3057
3055
3058 This is mostly useful for saving state that is in memory and waiting
3056 This is mostly useful for saving state that is in memory and waiting
3059 to be flushed when the current lock is released. Because a call to
3057 to be flushed when the current lock is released. Because a call to
3060 destroyed is imminent, the repo will be invalidated causing those
3058 destroyed is imminent, the repo will be invalidated causing those
3061 changes to stay in memory (waiting for the next unlock), or vanish
3059 changes to stay in memory (waiting for the next unlock), or vanish
3062 completely.
3060 completely.
3063 """
3061 """
3064 # When using the same lock to commit and strip, the phasecache is left
3062 # When using the same lock to commit and strip, the phasecache is left
3065 # dirty after committing. Then when we strip, the repo is invalidated,
3063 # dirty after committing. Then when we strip, the repo is invalidated,
3066 # causing those changes to disappear.
3064 # causing those changes to disappear.
3067 if '_phasecache' in vars(self):
3065 if '_phasecache' in vars(self):
3068 self._phasecache.write()
3066 self._phasecache.write()
3069
3067
3070 @unfilteredmethod
3068 @unfilteredmethod
3071 def destroyed(self):
3069 def destroyed(self):
3072 """Inform the repository that nodes have been destroyed.
3070 """Inform the repository that nodes have been destroyed.
3073 Intended for use by strip and rollback, so there's a common
3071 Intended for use by strip and rollback, so there's a common
3074 place for anything that has to be done after destroying history.
3072 place for anything that has to be done after destroying history.
3075 """
3073 """
3076 # When one tries to:
3074 # When one tries to:
3077 # 1) destroy nodes thus calling this method (e.g. strip)
3075 # 1) destroy nodes thus calling this method (e.g. strip)
3078 # 2) use phasecache somewhere (e.g. commit)
3076 # 2) use phasecache somewhere (e.g. commit)
3079 #
3077 #
3080 # then 2) will fail because the phasecache contains nodes that were
3078 # then 2) will fail because the phasecache contains nodes that were
3081 # removed. We can either remove phasecache from the filecache,
3079 # removed. We can either remove phasecache from the filecache,
3082 # causing it to reload next time it is accessed, or simply filter
3080 # causing it to reload next time it is accessed, or simply filter
3083 # the removed nodes now and write the updated cache.
3081 # the removed nodes now and write the updated cache.
3084 self._phasecache.filterunknown(self)
3082 self._phasecache.filterunknown(self)
3085 self._phasecache.write()
3083 self._phasecache.write()
3086
3084
3087 # refresh all repository caches
3085 # refresh all repository caches
3088 self.updatecaches()
3086 self.updatecaches()
3089
3087
3090 # Ensure the persistent tag cache is updated. Doing it now
3088 # Ensure the persistent tag cache is updated. Doing it now
3091 # means that the tag cache only has to worry about destroyed
3089 # means that the tag cache only has to worry about destroyed
3092 # heads immediately after a strip/rollback. That in turn
3090 # heads immediately after a strip/rollback. That in turn
3093 # guarantees that "cachetip == currenttip" (comparing both rev
3091 # guarantees that "cachetip == currenttip" (comparing both rev
3094 # and node) always means no nodes have been added or destroyed.
3092 # and node) always means no nodes have been added or destroyed.
3095
3093
3096 # XXX this is suboptimal when qrefresh'ing: we strip the current
3094 # XXX this is suboptimal when qrefresh'ing: we strip the current
3097 # head, refresh the tag cache, then immediately add a new head.
3095 # head, refresh the tag cache, then immediately add a new head.
3098 # But I think doing it this way is necessary for the "instant
3096 # But I think doing it this way is necessary for the "instant
3099 # tag cache retrieval" case to work.
3097 # tag cache retrieval" case to work.
3100 self.invalidate()
3098 self.invalidate()
3101
3099
3102 def status(
3100 def status(
3103 self,
3101 self,
3104 node1=b'.',
3102 node1=b'.',
3105 node2=None,
3103 node2=None,
3106 match=None,
3104 match=None,
3107 ignored=False,
3105 ignored=False,
3108 clean=False,
3106 clean=False,
3109 unknown=False,
3107 unknown=False,
3110 listsubrepos=False,
3108 listsubrepos=False,
3111 ):
3109 ):
3112 '''a convenience method that calls node1.status(node2)'''
3110 '''a convenience method that calls node1.status(node2)'''
3113 return self[node1].status(
3111 return self[node1].status(
3114 node2, match, ignored, clean, unknown, listsubrepos
3112 node2, match, ignored, clean, unknown, listsubrepos
3115 )
3113 )
3116
3114
3117 def addpostdsstatus(self, ps):
3115 def addpostdsstatus(self, ps):
3118 """Add a callback to run within the wlock, at the point at which status
3116 """Add a callback to run within the wlock, at the point at which status
3119 fixups happen.
3117 fixups happen.
3120
3118
3121 On status completion, callback(wctx, status) will be called with the
3119 On status completion, callback(wctx, status) will be called with the
3122 wlock held, unless the dirstate has changed from underneath or the wlock
3120 wlock held, unless the dirstate has changed from underneath or the wlock
3123 couldn't be grabbed.
3121 couldn't be grabbed.
3124
3122
3125 Callbacks should not capture and use a cached copy of the dirstate --
3123 Callbacks should not capture and use a cached copy of the dirstate --
3126 it might change in the meanwhile. Instead, they should access the
3124 it might change in the meanwhile. Instead, they should access the
3127 dirstate via wctx.repo().dirstate.
3125 dirstate via wctx.repo().dirstate.
3128
3126
3129 This list is emptied out after each status run -- extensions should
3127 This list is emptied out after each status run -- extensions should
3130 make sure it adds to this list each time dirstate.status is called.
3128 make sure it adds to this list each time dirstate.status is called.
3131 Extensions should also make sure they don't call this for statuses
3129 Extensions should also make sure they don't call this for statuses
3132 that don't involve the dirstate.
3130 that don't involve the dirstate.
3133 """
3131 """
3134
3132
3135 # The list is located here for uniqueness reasons -- it is actually
3133 # The list is located here for uniqueness reasons -- it is actually
3136 # managed by the workingctx, but that isn't unique per-repo.
3134 # managed by the workingctx, but that isn't unique per-repo.
3137 self._postdsstatus.append(ps)
3135 self._postdsstatus.append(ps)
3138
3136
3139 def postdsstatus(self):
3137 def postdsstatus(self):
3140 """Used by workingctx to get the list of post-dirstate-status hooks."""
3138 """Used by workingctx to get the list of post-dirstate-status hooks."""
3141 return self._postdsstatus
3139 return self._postdsstatus
3142
3140
3143 def clearpostdsstatus(self):
3141 def clearpostdsstatus(self):
3144 """Used by workingctx to clear post-dirstate-status hooks."""
3142 """Used by workingctx to clear post-dirstate-status hooks."""
3145 del self._postdsstatus[:]
3143 del self._postdsstatus[:]
3146
3144
3147 def heads(self, start=None):
3145 def heads(self, start=None):
3148 if start is None:
3146 if start is None:
3149 cl = self.changelog
3147 cl = self.changelog
3150 headrevs = reversed(cl.headrevs())
3148 headrevs = reversed(cl.headrevs())
3151 return [cl.node(rev) for rev in headrevs]
3149 return [cl.node(rev) for rev in headrevs]
3152
3150
3153 heads = self.changelog.heads(start)
3151 heads = self.changelog.heads(start)
3154 # sort the output in rev descending order
3152 # sort the output in rev descending order
3155 return sorted(heads, key=self.changelog.rev, reverse=True)
3153 return sorted(heads, key=self.changelog.rev, reverse=True)
3156
3154
3157 def branchheads(self, branch=None, start=None, closed=False):
3155 def branchheads(self, branch=None, start=None, closed=False):
3158 """return a (possibly filtered) list of heads for the given branch
3156 """return a (possibly filtered) list of heads for the given branch
3159
3157
3160 Heads are returned in topological order, from newest to oldest.
3158 Heads are returned in topological order, from newest to oldest.
3161 If branch is None, use the dirstate branch.
3159 If branch is None, use the dirstate branch.
3162 If start is not None, return only heads reachable from start.
3160 If start is not None, return only heads reachable from start.
3163 If closed is True, return heads that are marked as closed as well.
3161 If closed is True, return heads that are marked as closed as well.
3164 """
3162 """
3165 if branch is None:
3163 if branch is None:
3166 branch = self[None].branch()
3164 branch = self[None].branch()
3167 branches = self.branchmap()
3165 branches = self.branchmap()
3168 if not branches.hasbranch(branch):
3166 if not branches.hasbranch(branch):
3169 return []
3167 return []
3170 # the cache returns heads ordered lowest to highest
3168 # the cache returns heads ordered lowest to highest
3171 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3169 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3172 if start is not None:
3170 if start is not None:
3173 # filter out the heads that cannot be reached from startrev
3171 # filter out the heads that cannot be reached from startrev
3174 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3172 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3175 bheads = [h for h in bheads if h in fbheads]
3173 bheads = [h for h in bheads if h in fbheads]
3176 return bheads
3174 return bheads
3177
3175
3178 def branches(self, nodes):
3176 def branches(self, nodes):
3179 if not nodes:
3177 if not nodes:
3180 nodes = [self.changelog.tip()]
3178 nodes = [self.changelog.tip()]
3181 b = []
3179 b = []
3182 for n in nodes:
3180 for n in nodes:
3183 t = n
3181 t = n
3184 while True:
3182 while True:
3185 p = self.changelog.parents(n)
3183 p = self.changelog.parents(n)
3186 if p[1] != nullid or p[0] == nullid:
3184 if p[1] != nullid or p[0] == nullid:
3187 b.append((t, n, p[0], p[1]))
3185 b.append((t, n, p[0], p[1]))
3188 break
3186 break
3189 n = p[0]
3187 n = p[0]
3190 return b
3188 return b
3191
3189
3192 def between(self, pairs):
3190 def between(self, pairs):
3193 r = []
3191 r = []
3194
3192
3195 for top, bottom in pairs:
3193 for top, bottom in pairs:
3196 n, l, i = top, [], 0
3194 n, l, i = top, [], 0
3197 f = 1
3195 f = 1
3198
3196
3199 while n != bottom and n != nullid:
3197 while n != bottom and n != nullid:
3200 p = self.changelog.parents(n)[0]
3198 p = self.changelog.parents(n)[0]
3201 if i == f:
3199 if i == f:
3202 l.append(n)
3200 l.append(n)
3203 f = f * 2
3201 f = f * 2
3204 n = p
3202 n = p
3205 i += 1
3203 i += 1
3206
3204
3207 r.append(l)
3205 r.append(l)
3208
3206
3209 return r
3207 return r
3210
3208
3211 def checkpush(self, pushop):
3209 def checkpush(self, pushop):
3212 """Extensions can override this function if additional checks have
3210 """Extensions can override this function if additional checks have
3213 to be performed before pushing, or call it if they override push
3211 to be performed before pushing, or call it if they override push
3214 command.
3212 command.
3215 """
3213 """
3216
3214
3217 @unfilteredpropertycache
3215 @unfilteredpropertycache
3218 def prepushoutgoinghooks(self):
3216 def prepushoutgoinghooks(self):
3219 """Return util.hooks consists of a pushop with repo, remote, outgoing
3217 """Return util.hooks consists of a pushop with repo, remote, outgoing
3220 methods, which are called before pushing changesets.
3218 methods, which are called before pushing changesets.
3221 """
3219 """
3222 return util.hooks()
3220 return util.hooks()
3223
3221
3224 def pushkey(self, namespace, key, old, new):
3222 def pushkey(self, namespace, key, old, new):
3225 try:
3223 try:
3226 tr = self.currenttransaction()
3224 tr = self.currenttransaction()
3227 hookargs = {}
3225 hookargs = {}
3228 if tr is not None:
3226 if tr is not None:
3229 hookargs.update(tr.hookargs)
3227 hookargs.update(tr.hookargs)
3230 hookargs = pycompat.strkwargs(hookargs)
3228 hookargs = pycompat.strkwargs(hookargs)
3231 hookargs['namespace'] = namespace
3229 hookargs['namespace'] = namespace
3232 hookargs['key'] = key
3230 hookargs['key'] = key
3233 hookargs['old'] = old
3231 hookargs['old'] = old
3234 hookargs['new'] = new
3232 hookargs['new'] = new
3235 self.hook(b'prepushkey', throw=True, **hookargs)
3233 self.hook(b'prepushkey', throw=True, **hookargs)
3236 except error.HookAbort as exc:
3234 except error.HookAbort as exc:
3237 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3235 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3238 if exc.hint:
3236 if exc.hint:
3239 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3237 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3240 return False
3238 return False
3241 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3239 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3242 ret = pushkey.push(self, namespace, key, old, new)
3240 ret = pushkey.push(self, namespace, key, old, new)
3243
3241
3244 def runhook(unused_success):
3242 def runhook(unused_success):
3245 self.hook(
3243 self.hook(
3246 b'pushkey',
3244 b'pushkey',
3247 namespace=namespace,
3245 namespace=namespace,
3248 key=key,
3246 key=key,
3249 old=old,
3247 old=old,
3250 new=new,
3248 new=new,
3251 ret=ret,
3249 ret=ret,
3252 )
3250 )
3253
3251
3254 self._afterlock(runhook)
3252 self._afterlock(runhook)
3255 return ret
3253 return ret
3256
3254
3257 def listkeys(self, namespace):
3255 def listkeys(self, namespace):
3258 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3256 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3259 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3257 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3260 values = pushkey.list(self, namespace)
3258 values = pushkey.list(self, namespace)
3261 self.hook(b'listkeys', namespace=namespace, values=values)
3259 self.hook(b'listkeys', namespace=namespace, values=values)
3262 return values
3260 return values
3263
3261
3264 def debugwireargs(self, one, two, three=None, four=None, five=None):
3262 def debugwireargs(self, one, two, three=None, four=None, five=None):
3265 '''used to test argument passing over the wire'''
3263 '''used to test argument passing over the wire'''
3266 return b"%s %s %s %s %s" % (
3264 return b"%s %s %s %s %s" % (
3267 one,
3265 one,
3268 two,
3266 two,
3269 pycompat.bytestr(three),
3267 pycompat.bytestr(three),
3270 pycompat.bytestr(four),
3268 pycompat.bytestr(four),
3271 pycompat.bytestr(five),
3269 pycompat.bytestr(five),
3272 )
3270 )
3273
3271
3274 def savecommitmessage(self, text):
3272 def savecommitmessage(self, text):
3275 fp = self.vfs(b'last-message.txt', b'wb')
3273 fp = self.vfs(b'last-message.txt', b'wb')
3276 try:
3274 try:
3277 fp.write(text)
3275 fp.write(text)
3278 finally:
3276 finally:
3279 fp.close()
3277 fp.close()
3280 return self.pathto(fp.name[len(self.root) + 1 :])
3278 return self.pathto(fp.name[len(self.root) + 1 :])
3281
3279
3282
3280
3283 # used to avoid circular references so destructors work
3281 # used to avoid circular references so destructors work
3284 def aftertrans(files):
3282 def aftertrans(files):
3285 renamefiles = [tuple(t) for t in files]
3283 renamefiles = [tuple(t) for t in files]
3286
3284
3287 def a():
3285 def a():
3288 for vfs, src, dest in renamefiles:
3286 for vfs, src, dest in renamefiles:
3289 # if src and dest refer to a same file, vfs.rename is a no-op,
3287 # if src and dest refer to a same file, vfs.rename is a no-op,
3290 # leaving both src and dest on disk. delete dest to make sure
3288 # leaving both src and dest on disk. delete dest to make sure
3291 # the rename couldn't be such a no-op.
3289 # the rename couldn't be such a no-op.
3292 vfs.tryunlink(dest)
3290 vfs.tryunlink(dest)
3293 try:
3291 try:
3294 vfs.rename(src, dest)
3292 vfs.rename(src, dest)
3295 except OSError: # journal file does not yet exist
3293 except OSError: # journal file does not yet exist
3296 pass
3294 pass
3297
3295
3298 return a
3296 return a
3299
3297
3300
3298
3301 def undoname(fn):
3299 def undoname(fn):
3302 base, name = os.path.split(fn)
3300 base, name = os.path.split(fn)
3303 assert name.startswith(b'journal')
3301 assert name.startswith(b'journal')
3304 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3302 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3305
3303
3306
3304
3307 def instance(ui, path, create, intents=None, createopts=None):
3305 def instance(ui, path, create, intents=None, createopts=None):
3308 localpath = util.urllocalpath(path)
3306 localpath = util.urllocalpath(path)
3309 if create:
3307 if create:
3310 createrepository(ui, localpath, createopts=createopts)
3308 createrepository(ui, localpath, createopts=createopts)
3311
3309
3312 return makelocalrepository(ui, localpath, intents=intents)
3310 return makelocalrepository(ui, localpath, intents=intents)
3313
3311
3314
3312
3315 def islocal(path):
3313 def islocal(path):
3316 return True
3314 return True
3317
3315
3318
3316
3319 def defaultcreateopts(ui, createopts=None):
3317 def defaultcreateopts(ui, createopts=None):
3320 """Populate the default creation options for a repository.
3318 """Populate the default creation options for a repository.
3321
3319
3322 A dictionary of explicitly requested creation options can be passed
3320 A dictionary of explicitly requested creation options can be passed
3323 in. Missing keys will be populated.
3321 in. Missing keys will be populated.
3324 """
3322 """
3325 createopts = dict(createopts or {})
3323 createopts = dict(createopts or {})
3326
3324
3327 if b'backend' not in createopts:
3325 if b'backend' not in createopts:
3328 # experimental config: storage.new-repo-backend
3326 # experimental config: storage.new-repo-backend
3329 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3327 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3330
3328
3331 return createopts
3329 return createopts
3332
3330
3333
3331
3334 def newreporequirements(ui, createopts):
3332 def newreporequirements(ui, createopts):
3335 """Determine the set of requirements for a new local repository.
3333 """Determine the set of requirements for a new local repository.
3336
3334
3337 Extensions can wrap this function to specify custom requirements for
3335 Extensions can wrap this function to specify custom requirements for
3338 new repositories.
3336 new repositories.
3339 """
3337 """
3340 # If the repo is being created from a shared repository, we copy
3338 # If the repo is being created from a shared repository, we copy
3341 # its requirements.
3339 # its requirements.
3342 if b'sharedrepo' in createopts:
3340 if b'sharedrepo' in createopts:
3343 requirements = set(createopts[b'sharedrepo'].requirements)
3341 requirements = set(createopts[b'sharedrepo'].requirements)
3344 if createopts.get(b'sharedrelative'):
3342 if createopts.get(b'sharedrelative'):
3345 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3343 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3346 else:
3344 else:
3347 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3345 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3348
3346
3349 return requirements
3347 return requirements
3350
3348
3351 if b'backend' not in createopts:
3349 if b'backend' not in createopts:
3352 raise error.ProgrammingError(
3350 raise error.ProgrammingError(
3353 b'backend key not present in createopts; '
3351 b'backend key not present in createopts; '
3354 b'was defaultcreateopts() called?'
3352 b'was defaultcreateopts() called?'
3355 )
3353 )
3356
3354
3357 if createopts[b'backend'] != b'revlogv1':
3355 if createopts[b'backend'] != b'revlogv1':
3358 raise error.Abort(
3356 raise error.Abort(
3359 _(
3357 _(
3360 b'unable to determine repository requirements for '
3358 b'unable to determine repository requirements for '
3361 b'storage backend: %s'
3359 b'storage backend: %s'
3362 )
3360 )
3363 % createopts[b'backend']
3361 % createopts[b'backend']
3364 )
3362 )
3365
3363
3366 requirements = {b'revlogv1'}
3364 requirements = {b'revlogv1'}
3367 if ui.configbool(b'format', b'usestore'):
3365 if ui.configbool(b'format', b'usestore'):
3368 requirements.add(b'store')
3366 requirements.add(b'store')
3369 if ui.configbool(b'format', b'usefncache'):
3367 if ui.configbool(b'format', b'usefncache'):
3370 requirements.add(b'fncache')
3368 requirements.add(b'fncache')
3371 if ui.configbool(b'format', b'dotencode'):
3369 if ui.configbool(b'format', b'dotencode'):
3372 requirements.add(b'dotencode')
3370 requirements.add(b'dotencode')
3373
3371
3374 compengines = ui.configlist(b'format', b'revlog-compression')
3372 compengines = ui.configlist(b'format', b'revlog-compression')
3375 for compengine in compengines:
3373 for compengine in compengines:
3376 if compengine in util.compengines:
3374 if compengine in util.compengines:
3377 break
3375 break
3378 else:
3376 else:
3379 raise error.Abort(
3377 raise error.Abort(
3380 _(
3378 _(
3381 b'compression engines %s defined by '
3379 b'compression engines %s defined by '
3382 b'format.revlog-compression not available'
3380 b'format.revlog-compression not available'
3383 )
3381 )
3384 % b', '.join(b'"%s"' % e for e in compengines),
3382 % b', '.join(b'"%s"' % e for e in compengines),
3385 hint=_(
3383 hint=_(
3386 b'run "hg debuginstall" to list available '
3384 b'run "hg debuginstall" to list available '
3387 b'compression engines'
3385 b'compression engines'
3388 ),
3386 ),
3389 )
3387 )
3390
3388
3391 # zlib is the historical default and doesn't need an explicit requirement.
3389 # zlib is the historical default and doesn't need an explicit requirement.
3392 if compengine == b'zstd':
3390 if compengine == b'zstd':
3393 requirements.add(b'revlog-compression-zstd')
3391 requirements.add(b'revlog-compression-zstd')
3394 elif compengine != b'zlib':
3392 elif compengine != b'zlib':
3395 requirements.add(b'exp-compression-%s' % compengine)
3393 requirements.add(b'exp-compression-%s' % compengine)
3396
3394
3397 if scmutil.gdinitconfig(ui):
3395 if scmutil.gdinitconfig(ui):
3398 requirements.add(b'generaldelta')
3396 requirements.add(b'generaldelta')
3399 if ui.configbool(b'format', b'sparse-revlog'):
3397 if ui.configbool(b'format', b'sparse-revlog'):
3400 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3398 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3401
3399
3402 # experimental config: format.exp-use-side-data
3400 # experimental config: format.exp-use-side-data
3403 if ui.configbool(b'format', b'exp-use-side-data'):
3401 if ui.configbool(b'format', b'exp-use-side-data'):
3404 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3402 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3405 # experimental config: format.exp-use-copies-side-data-changeset
3403 # experimental config: format.exp-use-copies-side-data-changeset
3406 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3404 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3407 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3405 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3408 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3406 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3409 if ui.configbool(b'experimental', b'treemanifest'):
3407 if ui.configbool(b'experimental', b'treemanifest'):
3410 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3408 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3411
3409
3412 revlogv2 = ui.config(b'experimental', b'revlogv2')
3410 revlogv2 = ui.config(b'experimental', b'revlogv2')
3413 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3411 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3414 requirements.remove(b'revlogv1')
3412 requirements.remove(b'revlogv1')
3415 # generaldelta is implied by revlogv2.
3413 # generaldelta is implied by revlogv2.
3416 requirements.discard(b'generaldelta')
3414 requirements.discard(b'generaldelta')
3417 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3415 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3418 # experimental config: format.internal-phase
3416 # experimental config: format.internal-phase
3419 if ui.configbool(b'format', b'internal-phase'):
3417 if ui.configbool(b'format', b'internal-phase'):
3420 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3418 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3421
3419
3422 if createopts.get(b'narrowfiles'):
3420 if createopts.get(b'narrowfiles'):
3423 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3421 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3424
3422
3425 if createopts.get(b'lfs'):
3423 if createopts.get(b'lfs'):
3426 requirements.add(b'lfs')
3424 requirements.add(b'lfs')
3427
3425
3428 if ui.configbool(b'format', b'bookmarks-in-store'):
3426 if ui.configbool(b'format', b'bookmarks-in-store'):
3429 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3427 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3430
3428
3431 if ui.configbool(b'format', b'use-persistent-nodemap'):
3429 if ui.configbool(b'format', b'use-persistent-nodemap'):
3432 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3430 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3433
3431
3434 # if share-safe is enabled, let's create the new repository with the new
3432 # if share-safe is enabled, let's create the new repository with the new
3435 # requirement
3433 # requirement
3436 if ui.configbool(b'format', b'exp-share-safe'):
3434 if ui.configbool(b'format', b'exp-share-safe'):
3437 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3435 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3438
3436
3439 return requirements
3437 return requirements
3440
3438
3441
3439
3442 def checkrequirementscompat(ui, requirements):
3440 def checkrequirementscompat(ui, requirements):
3443 """Checks compatibility of repository requirements enabled and disabled.
3441 """Checks compatibility of repository requirements enabled and disabled.
3444
3442
3445 Returns a set of requirements which needs to be dropped because dependend
3443 Returns a set of requirements which needs to be dropped because dependend
3446 requirements are not enabled. Also warns users about it"""
3444 requirements are not enabled. Also warns users about it"""
3447
3445
3448 dropped = set()
3446 dropped = set()
3449
3447
3450 if b'store' not in requirements:
3448 if b'store' not in requirements:
3451 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3449 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3452 ui.warn(
3450 ui.warn(
3453 _(
3451 _(
3454 b'ignoring enabled \'format.bookmarks-in-store\' config '
3452 b'ignoring enabled \'format.bookmarks-in-store\' config '
3455 b'beacuse it is incompatible with disabled '
3453 b'beacuse it is incompatible with disabled '
3456 b'\'format.usestore\' config\n'
3454 b'\'format.usestore\' config\n'
3457 )
3455 )
3458 )
3456 )
3459 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3457 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3460
3458
3461 if (
3459 if (
3462 requirementsmod.SHARED_REQUIREMENT in requirements
3460 requirementsmod.SHARED_REQUIREMENT in requirements
3463 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3461 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3464 ):
3462 ):
3465 raise error.Abort(
3463 raise error.Abort(
3466 _(
3464 _(
3467 b"cannot create shared repository as source was created"
3465 b"cannot create shared repository as source was created"
3468 b" with 'format.usestore' config disabled"
3466 b" with 'format.usestore' config disabled"
3469 )
3467 )
3470 )
3468 )
3471
3469
3472 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3470 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3473 ui.warn(
3471 ui.warn(
3474 _(
3472 _(
3475 b"ignoring enabled 'format.exp-share-safe' config because "
3473 b"ignoring enabled 'format.exp-share-safe' config because "
3476 b"it is incompatible with disabled 'format.usestore'"
3474 b"it is incompatible with disabled 'format.usestore'"
3477 b" config\n"
3475 b" config\n"
3478 )
3476 )
3479 )
3477 )
3480 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3478 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3481
3479
3482 return dropped
3480 return dropped
3483
3481
3484
3482
3485 def filterknowncreateopts(ui, createopts):
3483 def filterknowncreateopts(ui, createopts):
3486 """Filters a dict of repo creation options against options that are known.
3484 """Filters a dict of repo creation options against options that are known.
3487
3485
3488 Receives a dict of repo creation options and returns a dict of those
3486 Receives a dict of repo creation options and returns a dict of those
3489 options that we don't know how to handle.
3487 options that we don't know how to handle.
3490
3488
3491 This function is called as part of repository creation. If the
3489 This function is called as part of repository creation. If the
3492 returned dict contains any items, repository creation will not
3490 returned dict contains any items, repository creation will not
3493 be allowed, as it means there was a request to create a repository
3491 be allowed, as it means there was a request to create a repository
3494 with options not recognized by loaded code.
3492 with options not recognized by loaded code.
3495
3493
3496 Extensions can wrap this function to filter out creation options
3494 Extensions can wrap this function to filter out creation options
3497 they know how to handle.
3495 they know how to handle.
3498 """
3496 """
3499 known = {
3497 known = {
3500 b'backend',
3498 b'backend',
3501 b'lfs',
3499 b'lfs',
3502 b'narrowfiles',
3500 b'narrowfiles',
3503 b'sharedrepo',
3501 b'sharedrepo',
3504 b'sharedrelative',
3502 b'sharedrelative',
3505 b'shareditems',
3503 b'shareditems',
3506 b'shallowfilestore',
3504 b'shallowfilestore',
3507 }
3505 }
3508
3506
3509 return {k: v for k, v in createopts.items() if k not in known}
3507 return {k: v for k, v in createopts.items() if k not in known}
3510
3508
3511
3509
3512 def createrepository(ui, path, createopts=None):
3510 def createrepository(ui, path, createopts=None):
3513 """Create a new repository in a vfs.
3511 """Create a new repository in a vfs.
3514
3512
3515 ``path`` path to the new repo's working directory.
3513 ``path`` path to the new repo's working directory.
3516 ``createopts`` options for the new repository.
3514 ``createopts`` options for the new repository.
3517
3515
3518 The following keys for ``createopts`` are recognized:
3516 The following keys for ``createopts`` are recognized:
3519
3517
3520 backend
3518 backend
3521 The storage backend to use.
3519 The storage backend to use.
3522 lfs
3520 lfs
3523 Repository will be created with ``lfs`` requirement. The lfs extension
3521 Repository will be created with ``lfs`` requirement. The lfs extension
3524 will automatically be loaded when the repository is accessed.
3522 will automatically be loaded when the repository is accessed.
3525 narrowfiles
3523 narrowfiles
3526 Set up repository to support narrow file storage.
3524 Set up repository to support narrow file storage.
3527 sharedrepo
3525 sharedrepo
3528 Repository object from which storage should be shared.
3526 Repository object from which storage should be shared.
3529 sharedrelative
3527 sharedrelative
3530 Boolean indicating if the path to the shared repo should be
3528 Boolean indicating if the path to the shared repo should be
3531 stored as relative. By default, the pointer to the "parent" repo
3529 stored as relative. By default, the pointer to the "parent" repo
3532 is stored as an absolute path.
3530 is stored as an absolute path.
3533 shareditems
3531 shareditems
3534 Set of items to share to the new repository (in addition to storage).
3532 Set of items to share to the new repository (in addition to storage).
3535 shallowfilestore
3533 shallowfilestore
3536 Indicates that storage for files should be shallow (not all ancestor
3534 Indicates that storage for files should be shallow (not all ancestor
3537 revisions are known).
3535 revisions are known).
3538 """
3536 """
3539 createopts = defaultcreateopts(ui, createopts=createopts)
3537 createopts = defaultcreateopts(ui, createopts=createopts)
3540
3538
3541 unknownopts = filterknowncreateopts(ui, createopts)
3539 unknownopts = filterknowncreateopts(ui, createopts)
3542
3540
3543 if not isinstance(unknownopts, dict):
3541 if not isinstance(unknownopts, dict):
3544 raise error.ProgrammingError(
3542 raise error.ProgrammingError(
3545 b'filterknowncreateopts() did not return a dict'
3543 b'filterknowncreateopts() did not return a dict'
3546 )
3544 )
3547
3545
3548 if unknownopts:
3546 if unknownopts:
3549 raise error.Abort(
3547 raise error.Abort(
3550 _(
3548 _(
3551 b'unable to create repository because of unknown '
3549 b'unable to create repository because of unknown '
3552 b'creation option: %s'
3550 b'creation option: %s'
3553 )
3551 )
3554 % b', '.join(sorted(unknownopts)),
3552 % b', '.join(sorted(unknownopts)),
3555 hint=_(b'is a required extension not loaded?'),
3553 hint=_(b'is a required extension not loaded?'),
3556 )
3554 )
3557
3555
3558 requirements = newreporequirements(ui, createopts=createopts)
3556 requirements = newreporequirements(ui, createopts=createopts)
3559 requirements -= checkrequirementscompat(ui, requirements)
3557 requirements -= checkrequirementscompat(ui, requirements)
3560
3558
3561 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3559 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3562
3560
3563 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3561 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3564 if hgvfs.exists():
3562 if hgvfs.exists():
3565 raise error.RepoError(_(b'repository %s already exists') % path)
3563 raise error.RepoError(_(b'repository %s already exists') % path)
3566
3564
3567 if b'sharedrepo' in createopts:
3565 if b'sharedrepo' in createopts:
3568 sharedpath = createopts[b'sharedrepo'].sharedpath
3566 sharedpath = createopts[b'sharedrepo'].sharedpath
3569
3567
3570 if createopts.get(b'sharedrelative'):
3568 if createopts.get(b'sharedrelative'):
3571 try:
3569 try:
3572 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3570 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3573 except (IOError, ValueError) as e:
3571 except (IOError, ValueError) as e:
3574 # ValueError is raised on Windows if the drive letters differ
3572 # ValueError is raised on Windows if the drive letters differ
3575 # on each path.
3573 # on each path.
3576 raise error.Abort(
3574 raise error.Abort(
3577 _(b'cannot calculate relative path'),
3575 _(b'cannot calculate relative path'),
3578 hint=stringutil.forcebytestr(e),
3576 hint=stringutil.forcebytestr(e),
3579 )
3577 )
3580
3578
3581 if not wdirvfs.exists():
3579 if not wdirvfs.exists():
3582 wdirvfs.makedirs()
3580 wdirvfs.makedirs()
3583
3581
3584 hgvfs.makedir(notindexed=True)
3582 hgvfs.makedir(notindexed=True)
3585 if b'sharedrepo' not in createopts:
3583 if b'sharedrepo' not in createopts:
3586 hgvfs.mkdir(b'cache')
3584 hgvfs.mkdir(b'cache')
3587 hgvfs.mkdir(b'wcache')
3585 hgvfs.mkdir(b'wcache')
3588
3586
3589 if b'store' in requirements and b'sharedrepo' not in createopts:
3587 if b'store' in requirements and b'sharedrepo' not in createopts:
3590 hgvfs.mkdir(b'store')
3588 hgvfs.mkdir(b'store')
3591
3589
3592 # We create an invalid changelog outside the store so very old
3590 # We create an invalid changelog outside the store so very old
3593 # Mercurial versions (which didn't know about the requirements
3591 # Mercurial versions (which didn't know about the requirements
3594 # file) encounter an error on reading the changelog. This
3592 # file) encounter an error on reading the changelog. This
3595 # effectively locks out old clients and prevents them from
3593 # effectively locks out old clients and prevents them from
3596 # mucking with a repo in an unknown format.
3594 # mucking with a repo in an unknown format.
3597 #
3595 #
3598 # The revlog header has version 2, which won't be recognized by
3596 # The revlog header has version 2, which won't be recognized by
3599 # such old clients.
3597 # such old clients.
3600 hgvfs.append(
3598 hgvfs.append(
3601 b'00changelog.i',
3599 b'00changelog.i',
3602 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3600 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3603 b'layout',
3601 b'layout',
3604 )
3602 )
3605
3603
3606 # Filter the requirements into working copy and store ones
3604 # Filter the requirements into working copy and store ones
3607 wcreq, storereq = scmutil.filterrequirements(requirements)
3605 wcreq, storereq = scmutil.filterrequirements(requirements)
3608 # write working copy ones
3606 # write working copy ones
3609 scmutil.writerequires(hgvfs, wcreq)
3607 scmutil.writerequires(hgvfs, wcreq)
3610 # If there are store requirements and the current repository
3608 # If there are store requirements and the current repository
3611 # is not a shared one, write stored requirements
3609 # is not a shared one, write stored requirements
3612 # For new shared repository, we don't need to write the store
3610 # For new shared repository, we don't need to write the store
3613 # requirements as they are already present in store requires
3611 # requirements as they are already present in store requires
3614 if storereq and b'sharedrepo' not in createopts:
3612 if storereq and b'sharedrepo' not in createopts:
3615 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3613 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3616 scmutil.writerequires(storevfs, storereq)
3614 scmutil.writerequires(storevfs, storereq)
3617
3615
3618 # Write out file telling readers where to find the shared store.
3616 # Write out file telling readers where to find the shared store.
3619 if b'sharedrepo' in createopts:
3617 if b'sharedrepo' in createopts:
3620 hgvfs.write(b'sharedpath', sharedpath)
3618 hgvfs.write(b'sharedpath', sharedpath)
3621
3619
3622 if createopts.get(b'shareditems'):
3620 if createopts.get(b'shareditems'):
3623 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3621 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3624 hgvfs.write(b'shared', shared)
3622 hgvfs.write(b'shared', shared)
3625
3623
3626
3624
3627 def poisonrepository(repo):
3625 def poisonrepository(repo):
3628 """Poison a repository instance so it can no longer be used."""
3626 """Poison a repository instance so it can no longer be used."""
3629 # Perform any cleanup on the instance.
3627 # Perform any cleanup on the instance.
3630 repo.close()
3628 repo.close()
3631
3629
3632 # Our strategy is to replace the type of the object with one that
3630 # Our strategy is to replace the type of the object with one that
3633 # has all attribute lookups result in error.
3631 # has all attribute lookups result in error.
3634 #
3632 #
3635 # But we have to allow the close() method because some constructors
3633 # But we have to allow the close() method because some constructors
3636 # of repos call close() on repo references.
3634 # of repos call close() on repo references.
3637 class poisonedrepository(object):
3635 class poisonedrepository(object):
3638 def __getattribute__(self, item):
3636 def __getattribute__(self, item):
3639 if item == 'close':
3637 if item == 'close':
3640 return object.__getattribute__(self, item)
3638 return object.__getattribute__(self, item)
3641
3639
3642 raise error.ProgrammingError(
3640 raise error.ProgrammingError(
3643 b'repo instances should not be used after unshare'
3641 b'repo instances should not be used after unshare'
3644 )
3642 )
3645
3643
3646 def close(self):
3644 def close(self):
3647 pass
3645 pass
3648
3646
3649 # We may have a repoview, which intercepts __setattr__. So be sure
3647 # We may have a repoview, which intercepts __setattr__. So be sure
3650 # we operate at the lowest level possible.
3648 # we operate at the lowest level possible.
3651 object.__setattr__(repo, '__class__', poisonedrepository)
3649 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,581 +1,569
1 setup
1 setup
2
2
3 $ cat >> $HGRCPATH <<EOF
3 $ cat >> $HGRCPATH <<EOF
4 > [extensions]
4 > [extensions]
5 > share =
5 > share =
6 > [format]
6 > [format]
7 > exp-share-safe = True
7 > exp-share-safe = True
8 > [storage]
8 > [storage]
9 > revlog.persistent-nodemap.slow-path=allow
9 > revlog.persistent-nodemap.slow-path=allow
10 > EOF
10 > EOF
11
11
12 prepare source repo
12 prepare source repo
13
13
14 $ hg init source
14 $ hg init source
15 $ cd source
15 $ cd source
16 $ cat .hg/requires
16 $ cat .hg/requires
17 exp-sharesafe
17 exp-sharesafe
18 $ cat .hg/store/requires
18 $ cat .hg/store/requires
19 dotencode
19 dotencode
20 fncache
20 fncache
21 generaldelta
21 generaldelta
22 revlogv1
22 revlogv1
23 sparserevlog
23 sparserevlog
24 store
24 store
25 $ hg debugrequirements
25 $ hg debugrequirements
26 dotencode
26 dotencode
27 exp-sharesafe
27 exp-sharesafe
28 fncache
28 fncache
29 generaldelta
29 generaldelta
30 revlogv1
30 revlogv1
31 sparserevlog
31 sparserevlog
32 store
32 store
33
33
34 $ echo a > a
34 $ echo a > a
35 $ hg ci -Aqm "added a"
35 $ hg ci -Aqm "added a"
36 $ echo b > b
36 $ echo b > b
37 $ hg ci -Aqm "added b"
37 $ hg ci -Aqm "added b"
38
38
39 $ HGEDITOR=cat hg config --shared
39 $ HGEDITOR=cat hg config --shared
40 abort: repository is not shared; can't use --shared
40 abort: repository is not shared; can't use --shared
41 [10]
41 [10]
42 $ cd ..
42 $ cd ..
43
43
44 Create a shared repo and check the requirements are shared and read correctly
44 Create a shared repo and check the requirements are shared and read correctly
45 $ hg share source shared1
45 $ hg share source shared1
46 updating working directory
46 updating working directory
47 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
47 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
48 $ cd shared1
48 $ cd shared1
49 $ cat .hg/requires
49 $ cat .hg/requires
50 exp-sharesafe
50 exp-sharesafe
51 shared
51 shared
52
52
53 $ hg debugrequirements -R ../source
53 $ hg debugrequirements -R ../source
54 dotencode
54 dotencode
55 exp-sharesafe
55 exp-sharesafe
56 fncache
56 fncache
57 generaldelta
57 generaldelta
58 revlogv1
58 revlogv1
59 sparserevlog
59 sparserevlog
60 store
60 store
61
61
62 $ hg debugrequirements
62 $ hg debugrequirements
63 dotencode
63 dotencode
64 exp-sharesafe
64 exp-sharesafe
65 fncache
65 fncache
66 generaldelta
66 generaldelta
67 revlogv1
67 revlogv1
68 shared
68 shared
69 sparserevlog
69 sparserevlog
70 store
70 store
71
71
72 $ echo c > c
72 $ echo c > c
73 $ hg ci -Aqm "added c"
73 $ hg ci -Aqm "added c"
74
74
75 Check that config of the source repository is also loaded
75 Check that config of the source repository is also loaded
76
76
77 $ hg showconfig ui.curses
77 $ hg showconfig ui.curses
78 [1]
78 [1]
79
79
80 $ echo "[ui]" >> ../source/.hg/hgrc
80 $ echo "[ui]" >> ../source/.hg/hgrc
81 $ echo "curses=true" >> ../source/.hg/hgrc
81 $ echo "curses=true" >> ../source/.hg/hgrc
82
82
83 $ hg showconfig ui.curses
83 $ hg showconfig ui.curses
84 true
84 true
85
85
86 Test that extensions of source repository are also loaded
86 Test that extensions of source repository are also loaded
87
87
88 $ hg debugextensions
88 $ hg debugextensions
89 share
89 share
90 $ hg extdiff -p echo
90 $ hg extdiff -p echo
91 hg: unknown command 'extdiff'
91 hg: unknown command 'extdiff'
92 'extdiff' is provided by the following extension:
92 'extdiff' is provided by the following extension:
93
93
94 extdiff command to allow external programs to compare revisions
94 extdiff command to allow external programs to compare revisions
95
95
96 (use 'hg help extensions' for information on enabling extensions)
96 (use 'hg help extensions' for information on enabling extensions)
97 [10]
97 [10]
98
98
99 $ echo "[extensions]" >> ../source/.hg/hgrc
99 $ echo "[extensions]" >> ../source/.hg/hgrc
100 $ echo "extdiff=" >> ../source/.hg/hgrc
100 $ echo "extdiff=" >> ../source/.hg/hgrc
101
101
102 $ hg debugextensions -R ../source
102 $ hg debugextensions -R ../source
103 extdiff
103 extdiff
104 share
104 share
105 $ hg extdiff -R ../source -p echo
105 $ hg extdiff -R ../source -p echo
106
106
107 BROKEN: the command below will not work if config of shared source is not loaded
107 BROKEN: the command below will not work if config of shared source is not loaded
108 on dispatch but debugextensions says that extension
108 on dispatch but debugextensions says that extension
109 is loaded
109 is loaded
110 $ hg debugextensions
110 $ hg debugextensions
111 extdiff
111 extdiff
112 share
112 share
113
113
114 $ hg extdiff -p echo
114 $ hg extdiff -p echo
115
115
116 However, local .hg/hgrc should override the config set by share source
116 However, local .hg/hgrc should override the config set by share source
117
117
118 $ echo "[ui]" >> .hg/hgrc
118 $ echo "[ui]" >> .hg/hgrc
119 $ echo "curses=false" >> .hg/hgrc
119 $ echo "curses=false" >> .hg/hgrc
120
120
121 $ hg showconfig ui.curses
121 $ hg showconfig ui.curses
122 false
122 false
123
123
124 $ HGEDITOR=cat hg config --shared
124 $ HGEDITOR=cat hg config --shared
125 [ui]
125 [ui]
126 curses=true
126 curses=true
127 [extensions]
127 [extensions]
128 extdiff=
128 extdiff=
129
129
130 $ HGEDITOR=cat hg config --local
130 $ HGEDITOR=cat hg config --local
131 [ui]
131 [ui]
132 curses=false
132 curses=false
133
133
134 Testing that hooks set in source repository also runs in shared repo
134 Testing that hooks set in source repository also runs in shared repo
135
135
136 $ cd ../source
136 $ cd ../source
137 $ cat <<EOF >> .hg/hgrc
137 $ cat <<EOF >> .hg/hgrc
138 > [extensions]
138 > [extensions]
139 > hooklib=
139 > hooklib=
140 > [hooks]
140 > [hooks]
141 > pretxnchangegroup.reject_merge_commits = \
141 > pretxnchangegroup.reject_merge_commits = \
142 > python:hgext.hooklib.reject_merge_commits.hook
142 > python:hgext.hooklib.reject_merge_commits.hook
143 > EOF
143 > EOF
144
144
145 $ cd ..
145 $ cd ..
146 $ hg clone source cloned
146 $ hg clone source cloned
147 updating to branch default
147 updating to branch default
148 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
148 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
149 $ cd cloned
149 $ cd cloned
150 $ hg up 0
150 $ hg up 0
151 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
151 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
152 $ echo bar > bar
152 $ echo bar > bar
153 $ hg ci -Aqm "added bar"
153 $ hg ci -Aqm "added bar"
154 $ hg merge
154 $ hg merge
155 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
155 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
156 (branch merge, don't forget to commit)
156 (branch merge, don't forget to commit)
157 $ hg ci -m "merge commit"
157 $ hg ci -m "merge commit"
158
158
159 $ hg push ../source
159 $ hg push ../source
160 pushing to ../source
160 pushing to ../source
161 searching for changes
161 searching for changes
162 adding changesets
162 adding changesets
163 adding manifests
163 adding manifests
164 adding file changes
164 adding file changes
165 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
165 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
166 transaction abort!
166 transaction abort!
167 rollback completed
167 rollback completed
168 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
168 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
169 [255]
169 [255]
170
170
171 $ hg push ../shared1
171 $ hg push ../shared1
172 pushing to ../shared1
172 pushing to ../shared1
173 searching for changes
173 searching for changes
174 adding changesets
174 adding changesets
175 adding manifests
175 adding manifests
176 adding file changes
176 adding file changes
177 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
177 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
178 transaction abort!
178 transaction abort!
179 rollback completed
179 rollback completed
180 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
180 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
181 [255]
181 [255]
182
182
183 Test that if share source config is untrusted, we dont read it
183 Test that if share source config is untrusted, we dont read it
184
184
185 $ cd ../shared1
185 $ cd ../shared1
186
186
187 $ cat << EOF > $TESTTMP/untrusted.py
187 $ cat << EOF > $TESTTMP/untrusted.py
188 > from mercurial import scmutil, util
188 > from mercurial import scmutil, util
189 > def uisetup(ui):
189 > def uisetup(ui):
190 > class untrustedui(ui.__class__):
190 > class untrustedui(ui.__class__):
191 > def _trusted(self, fp, f):
191 > def _trusted(self, fp, f):
192 > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
192 > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
193 > return False
193 > return False
194 > return super(untrustedui, self)._trusted(fp, f)
194 > return super(untrustedui, self)._trusted(fp, f)
195 > ui.__class__ = untrustedui
195 > ui.__class__ = untrustedui
196 > EOF
196 > EOF
197
197
198 $ hg showconfig hooks
198 $ hg showconfig hooks
199 hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
199 hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
200
200
201 $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
201 $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
202 [1]
202 [1]
203
203
204 Update the source repository format and check that shared repo works
204 Update the source repository format and check that shared repo works
205
205
206 $ cd ../source
206 $ cd ../source
207
207
208 Disable zstd related tests because its not present on pure version
208 Disable zstd related tests because its not present on pure version
209 #if zstd
209 #if zstd
210 $ echo "[format]" >> .hg/hgrc
210 $ echo "[format]" >> .hg/hgrc
211 $ echo "revlog-compression=zstd" >> .hg/hgrc
211 $ echo "revlog-compression=zstd" >> .hg/hgrc
212
212
213 $ hg debugupgraderepo --run -q
213 $ hg debugupgraderepo --run -q
214 upgrade will perform the following actions:
214 upgrade will perform the following actions:
215
215
216 requirements
216 requirements
217 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store
217 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store
218 added: revlog-compression-zstd
218 added: revlog-compression-zstd
219
219
220 processed revlogs:
220 processed revlogs:
221 - all-filelogs
221 - all-filelogs
222 - changelog
222 - changelog
223 - manifest
223 - manifest
224
224
225 $ hg log -r .
225 $ hg log -r .
226 changeset: 1:5f6d8a4bf34a
226 changeset: 1:5f6d8a4bf34a
227 user: test
227 user: test
228 date: Thu Jan 01 00:00:00 1970 +0000
228 date: Thu Jan 01 00:00:00 1970 +0000
229 summary: added b
229 summary: added b
230
230
231 #endif
231 #endif
232 $ echo "[format]" >> .hg/hgrc
232 $ echo "[format]" >> .hg/hgrc
233 $ echo "use-persistent-nodemap=True" >> .hg/hgrc
233 $ echo "use-persistent-nodemap=True" >> .hg/hgrc
234
234
235 $ hg debugupgraderepo --run -q -R ../shared1
235 $ hg debugupgraderepo --run -q -R ../shared1
236 abort: cannot upgrade repository; unsupported source requirement: shared
236 abort: cannot upgrade repository; unsupported source requirement: shared
237 [255]
237 [255]
238
238
239 $ hg debugupgraderepo --run -q
239 $ hg debugupgraderepo --run -q
240 upgrade will perform the following actions:
240 upgrade will perform the following actions:
241
241
242 requirements
242 requirements
243 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
243 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
244 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
244 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
245 added: persistent-nodemap
245 added: persistent-nodemap
246
246
247 processed revlogs:
247 processed revlogs:
248 - all-filelogs
248 - all-filelogs
249 - changelog
249 - changelog
250 - manifest
250 - manifest
251
251
252 $ hg log -r .
252 $ hg log -r .
253 changeset: 1:5f6d8a4bf34a
253 changeset: 1:5f6d8a4bf34a
254 user: test
254 user: test
255 date: Thu Jan 01 00:00:00 1970 +0000
255 date: Thu Jan 01 00:00:00 1970 +0000
256 summary: added b
256 summary: added b
257
257
258
258
259 Shared one should work
259 Shared one should work
260 $ cd ../shared1
260 $ cd ../shared1
261 $ hg log -r .
261 $ hg log -r .
262 changeset: 2:155349b645be
262 changeset: 2:155349b645be
263 tag: tip
263 tag: tip
264 user: test
264 user: test
265 date: Thu Jan 01 00:00:00 1970 +0000
265 date: Thu Jan 01 00:00:00 1970 +0000
266 summary: added c
266 summary: added c
267
267
268
268
269 Testing that nonsharedrc is loaded for source and not shared
269 Testing that nonsharedrc is loaded for source and not shared
270
270
271 $ cd ../source
271 $ cd ../source
272 $ touch .hg/hgrc-not-shared
272 $ touch .hg/hgrc-not-shared
273 $ echo "[ui]" >> .hg/hgrc-not-shared
273 $ echo "[ui]" >> .hg/hgrc-not-shared
274 $ echo "traceback=true" >> .hg/hgrc-not-shared
274 $ echo "traceback=true" >> .hg/hgrc-not-shared
275
275
276 $ hg showconfig ui.traceback
276 $ hg showconfig ui.traceback
277 true
277 true
278
278
279 $ HGEDITOR=cat hg config --non-shared
279 $ HGEDITOR=cat hg config --non-shared
280 [ui]
280 [ui]
281 traceback=true
281 traceback=true
282
282
283 $ cd ../shared1
283 $ cd ../shared1
284 $ hg showconfig ui.traceback
284 $ hg showconfig ui.traceback
285 [1]
285 [1]
286
286
287 Unsharing works
287 Unsharing works
288
288
289 $ hg unshare
289 $ hg unshare
290
290
291 Test that source config is added to the shared one after unshare, and the config
291 Test that source config is added to the shared one after unshare, and the config
292 of current repo is still respected over the config which came from source config
292 of current repo is still respected over the config which came from source config
293 $ cd ../cloned
293 $ cd ../cloned
294 $ hg push ../shared1
294 $ hg push ../shared1
295 pushing to ../shared1
295 pushing to ../shared1
296 searching for changes
296 searching for changes
297 adding changesets
297 adding changesets
298 adding manifests
298 adding manifests
299 adding file changes
299 adding file changes
300 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
300 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
301 transaction abort!
301 transaction abort!
302 rollback completed
302 rollback completed
303 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
303 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
304 [255]
304 [255]
305 $ hg showconfig ui.curses -R ../shared1
305 $ hg showconfig ui.curses -R ../shared1
306 false
306 false
307
307
308 $ cd ../
308 $ cd ../
309
309
310 Test that upgrading using debugupgraderepo works
310 Test that upgrading using debugupgraderepo works
311 =================================================
311 =================================================
312
312
313 $ hg init non-share-safe --config format.exp-share-safe=false
313 $ hg init non-share-safe --config format.exp-share-safe=false
314 $ cd non-share-safe
314 $ cd non-share-safe
315 $ hg debugrequirements
315 $ hg debugrequirements
316 dotencode
316 dotencode
317 fncache
317 fncache
318 generaldelta
318 generaldelta
319 revlogv1
319 revlogv1
320 sparserevlog
320 sparserevlog
321 store
321 store
322 $ echo foo > foo
322 $ echo foo > foo
323 $ hg ci -Aqm 'added foo'
323 $ hg ci -Aqm 'added foo'
324 $ echo bar > bar
324 $ echo bar > bar
325 $ hg ci -Aqm 'added bar'
325 $ hg ci -Aqm 'added bar'
326
326
327 Create a share before upgrading
327 Create a share before upgrading
328
328
329 $ cd ..
329 $ cd ..
330 $ hg share non-share-safe nss-share
330 $ hg share non-share-safe nss-share
331 updating working directory
331 updating working directory
332 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
332 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
333 $ hg debugrequirements -R nss-share
333 $ hg debugrequirements -R nss-share
334 dotencode
334 dotencode
335 fncache
335 fncache
336 generaldelta
336 generaldelta
337 revlogv1
337 revlogv1
338 shared
338 shared
339 sparserevlog
339 sparserevlog
340 store
340 store
341 $ cd non-share-safe
341 $ cd non-share-safe
342
342
343 Upgrade
343 Upgrade
344
344
345 $ hg debugupgraderepo -q
345 $ hg debugupgraderepo -q
346 requirements
346 requirements
347 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
347 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
348 added: exp-sharesafe
348 added: exp-sharesafe
349
349
350 processed revlogs:
350 processed revlogs:
351 - all-filelogs
351 - all-filelogs
352 - changelog
352 - changelog
353 - manifest
353 - manifest
354
354
355 $ hg debugupgraderepo --run -q
355 $ hg debugupgraderepo --run -q
356 upgrade will perform the following actions:
356 upgrade will perform the following actions:
357
357
358 requirements
358 requirements
359 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
359 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
360 added: exp-sharesafe
360 added: exp-sharesafe
361
361
362 processed revlogs:
362 processed revlogs:
363 - all-filelogs
363 - all-filelogs
364 - changelog
364 - changelog
365 - manifest
365 - manifest
366
366
367 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
367 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
368
368
369 $ hg debugrequirements
369 $ hg debugrequirements
370 dotencode
370 dotencode
371 exp-sharesafe
371 exp-sharesafe
372 fncache
372 fncache
373 generaldelta
373 generaldelta
374 revlogv1
374 revlogv1
375 sparserevlog
375 sparserevlog
376 store
376 store
377
377
378 $ cat .hg/requires
378 $ cat .hg/requires
379 exp-sharesafe
379 exp-sharesafe
380
380
381 $ cat .hg/store/requires
381 $ cat .hg/store/requires
382 dotencode
382 dotencode
383 fncache
383 fncache
384 generaldelta
384 generaldelta
385 revlogv1
385 revlogv1
386 sparserevlog
386 sparserevlog
387 store
387 store
388
388
389 $ hg log -GT "{node}: {desc}\n"
389 $ hg log -GT "{node}: {desc}\n"
390 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
390 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
391 |
391 |
392 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
392 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
393
393
394
394
395 Make sure existing shares still works
395 Make sure existing shares dont work with default config
396
397 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-warn-outdated-shares=false
398 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
399 |
400 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
401
402
396
403 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
397 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
404 warning: source repository supports share-safe functionality. Reshare to upgrade.
398 abort: version mismatch: source uses share-safe functionality while the current share does not
405 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
399 [255]
406 |
407 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
408
409
400
410
401
411 Create a safe share from upgrade one
402 Create a safe share from upgrade one
412
403
413 $ cd ..
404 $ cd ..
414 $ hg share non-share-safe ss-share
405 $ hg share non-share-safe ss-share
415 updating working directory
406 updating working directory
416 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
407 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
417 $ cd ss-share
408 $ cd ss-share
418 $ hg log -GT "{node}: {desc}\n"
409 $ hg log -GT "{node}: {desc}\n"
419 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
410 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
420 |
411 |
421 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
412 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
422
413
423 $ cd ../non-share-safe
414 $ cd ../non-share-safe
424
415
425 Test that downgrading works too
416 Test that downgrading works too
426
417
427 $ cat >> $HGRCPATH <<EOF
418 $ cat >> $HGRCPATH <<EOF
428 > [extensions]
419 > [extensions]
429 > share =
420 > share =
430 > [format]
421 > [format]
431 > exp-share-safe = False
422 > exp-share-safe = False
432 > EOF
423 > EOF
433
424
434 $ hg debugupgraderepo -q
425 $ hg debugupgraderepo -q
435 requirements
426 requirements
436 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
427 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
437 removed: exp-sharesafe
428 removed: exp-sharesafe
438
429
439 processed revlogs:
430 processed revlogs:
440 - all-filelogs
431 - all-filelogs
441 - changelog
432 - changelog
442 - manifest
433 - manifest
443
434
444 $ hg debugupgraderepo -q --run
435 $ hg debugupgraderepo -q --run
445 upgrade will perform the following actions:
436 upgrade will perform the following actions:
446
437
447 requirements
438 requirements
448 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
439 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
449 removed: exp-sharesafe
440 removed: exp-sharesafe
450
441
451 processed revlogs:
442 processed revlogs:
452 - all-filelogs
443 - all-filelogs
453 - changelog
444 - changelog
454 - manifest
445 - manifest
455
446
456 repository downgraded to not use share safe mode, existing shares will not work and needs to be reshared.
447 repository downgraded to not use share safe mode, existing shares will not work and needs to be reshared.
457
448
458 $ hg debugrequirements
449 $ hg debugrequirements
459 dotencode
450 dotencode
460 fncache
451 fncache
461 generaldelta
452 generaldelta
462 revlogv1
453 revlogv1
463 sparserevlog
454 sparserevlog
464 store
455 store
465
456
466 $ cat .hg/requires
457 $ cat .hg/requires
467 dotencode
458 dotencode
468 fncache
459 fncache
469 generaldelta
460 generaldelta
470 revlogv1
461 revlogv1
471 sparserevlog
462 sparserevlog
472 store
463 store
473
464
474 $ test -f .hg/store/requires
465 $ test -f .hg/store/requires
475 [1]
466 [1]
476
467
477 $ hg log -GT "{node}: {desc}\n"
468 $ hg log -GT "{node}: {desc}\n"
478 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
469 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
479 |
470 |
480 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
471 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
481
472
482
473
483 Make sure existing shares still works
474 Make sure existing shares still works
484
475
485 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
476 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
486 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
477 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
487 |
478 |
488 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
479 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
489
480
490
481
491 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
482 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
492 abort: share source does not support exp-sharesafe requirement
483 abort: share source does not support exp-sharesafe requirement
493 [255]
484 [255]
494
485
495 Testing automatic downgrade of shares when config is set
486 Testing automatic downgrade of shares when config is set
496
487
497 $ touch ../ss-share/.hg/wlock
488 $ touch ../ss-share/.hg/wlock
498 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config experimental.sharesafe-auto-downgrade-shares=true
489 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config experimental.sharesafe-auto-downgrade-shares=true
499 abort: failed to downgrade share, got error: Lock held
490 abort: failed to downgrade share, got error: Lock held
500 [255]
491 [255]
501 $ rm ../ss-share/.hg/wlock
492 $ rm ../ss-share/.hg/wlock
502
493
503 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config experimental.sharesafe-auto-downgrade-shares=true
494 $ hg log -GT "{node}: {desc}\n" -R ../ss-share --config experimental.sharesafe-auto-downgrade-shares=true
504 repository downgraded to not use share-safe mode
495 repository downgraded to not use share-safe mode
505 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
496 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
506 |
497 |
507 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
498 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
508
499
509
500
510 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
501 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
511 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
502 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
512 |
503 |
513 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
504 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
514
505
515
506
516
507
517 Testing automatic upgrade of shares when config is set
508 Testing automatic upgrade of shares when config is set
518
509
519 $ hg debugupgraderepo -q --run --config format.exp-share-safe=True
510 $ hg debugupgraderepo -q --run --config format.exp-share-safe=True
520 upgrade will perform the following actions:
511 upgrade will perform the following actions:
521
512
522 requirements
513 requirements
523 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
514 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
524 added: exp-sharesafe
515 added: exp-sharesafe
525
516
526 processed revlogs:
517 processed revlogs:
527 - all-filelogs
518 - all-filelogs
528 - changelog
519 - changelog
529 - manifest
520 - manifest
530
521
531 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
522 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
532 $ hg debugrequirements
523 $ hg debugrequirements
533 dotencode
524 dotencode
534 exp-sharesafe
525 exp-sharesafe
535 fncache
526 fncache
536 generaldelta
527 generaldelta
537 revlogv1
528 revlogv1
538 sparserevlog
529 sparserevlog
539 store
530 store
540 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
531 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
541 warning: source repository supports share-safe functionality. Reshare to upgrade.
532 abort: version mismatch: source uses share-safe functionality while the current share does not
542 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
533 [255]
543 |
544 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
545
546
534
547 Check that if lock is taken, upgrade fails but read operation are successful
535 Check that if lock is taken, upgrade fails but read operation are successful
548 $ touch ../nss-share/.hg/wlock
536 $ touch ../nss-share/.hg/wlock
549 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true
537 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true
550 failed to upgrade share, got error: Lock held
538 failed to upgrade share, got error: Lock held
551 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
539 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
552 |
540 |
553 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
541 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
554
542
555
543
556 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true --config experimental.sharesafe-warn-outdated-shares=false
544 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true --config experimental.sharesafe-warn-outdated-shares=false
557 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
545 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
558 |
546 |
559 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
547 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
560
548
561
549
562 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true --config experimental.sharesafe-auto-upgrade-fail-error=true
550 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true --config experimental.sharesafe-auto-upgrade-fail-error=true
563 abort: failed to upgrade share, got error: Lock held
551 abort: failed to upgrade share, got error: Lock held
564 [255]
552 [255]
565
553
566 $ rm ../nss-share/.hg/wlock
554 $ rm ../nss-share/.hg/wlock
567 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true
555 $ hg log -GT "{node}: {desc}\n" -R ../nss-share --config experimental.sharesafe-auto-upgrade-shares=true
568 repository upgraded to use share-safe mode
556 repository upgraded to use share-safe mode
569 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
557 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
570 |
558 |
571 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
559 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
572
560
573
561
574 Test that unshare works
562 Test that unshare works
575
563
576 $ hg unshare -R ../nss-share
564 $ hg unshare -R ../nss-share
577 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
565 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
578 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
566 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
579 |
567 |
580 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
568 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
581
569
General Comments 0
You need to be logged in to leave comments. Login now