##// END OF EJS Templates
share: show warning if share is outdated while source supports share-safe...
Pulkit Goyal -
r46619:49b4ab1d default
parent child Browse files
Show More
@@ -1,3576 +1,3590 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revset,
62 revset,
63 revsetlang,
63 revsetlang,
64 scmutil,
64 scmutil,
65 sparse,
65 sparse,
66 store as storemod,
66 store as storemod,
67 subrepoutil,
67 subrepoutil,
68 tags as tagsmod,
68 tags as tagsmod,
69 transaction,
69 transaction,
70 txnutil,
70 txnutil,
71 util,
71 util,
72 vfs as vfsmod,
72 vfs as vfsmod,
73 )
73 )
74
74
75 from .interfaces import (
75 from .interfaces import (
76 repository,
76 repository,
77 util as interfaceutil,
77 util as interfaceutil,
78 )
78 )
79
79
80 from .utils import (
80 from .utils import (
81 hashutil,
81 hashutil,
82 procutil,
82 procutil,
83 stringutil,
83 stringutil,
84 )
84 )
85
85
86 from .revlogutils import constants as revlogconst
86 from .revlogutils import constants as revlogconst
87
87
88 release = lockmod.release
88 release = lockmod.release
89 urlerr = util.urlerr
89 urlerr = util.urlerr
90 urlreq = util.urlreq
90 urlreq = util.urlreq
91
91
92 # set of (path, vfs-location) tuples. vfs-location is:
92 # set of (path, vfs-location) tuples. vfs-location is:
93 # - 'plain for vfs relative paths
93 # - 'plain for vfs relative paths
94 # - '' for svfs relative paths
94 # - '' for svfs relative paths
95 _cachedfiles = set()
95 _cachedfiles = set()
96
96
97
97
98 class _basefilecache(scmutil.filecache):
98 class _basefilecache(scmutil.filecache):
99 """All filecache usage on repo are done for logic that should be unfiltered"""
99 """All filecache usage on repo are done for logic that should be unfiltered"""
100
100
101 def __get__(self, repo, type=None):
101 def __get__(self, repo, type=None):
102 if repo is None:
102 if repo is None:
103 return self
103 return self
104 # proxy to unfiltered __dict__ since filtered repo has no entry
104 # proxy to unfiltered __dict__ since filtered repo has no entry
105 unfi = repo.unfiltered()
105 unfi = repo.unfiltered()
106 try:
106 try:
107 return unfi.__dict__[self.sname]
107 return unfi.__dict__[self.sname]
108 except KeyError:
108 except KeyError:
109 pass
109 pass
110 return super(_basefilecache, self).__get__(unfi, type)
110 return super(_basefilecache, self).__get__(unfi, type)
111
111
112 def set(self, repo, value):
112 def set(self, repo, value):
113 return super(_basefilecache, self).set(repo.unfiltered(), value)
113 return super(_basefilecache, self).set(repo.unfiltered(), value)
114
114
115
115
116 class repofilecache(_basefilecache):
116 class repofilecache(_basefilecache):
117 """filecache for files in .hg but outside of .hg/store"""
117 """filecache for files in .hg but outside of .hg/store"""
118
118
119 def __init__(self, *paths):
119 def __init__(self, *paths):
120 super(repofilecache, self).__init__(*paths)
120 super(repofilecache, self).__init__(*paths)
121 for path in paths:
121 for path in paths:
122 _cachedfiles.add((path, b'plain'))
122 _cachedfiles.add((path, b'plain'))
123
123
124 def join(self, obj, fname):
124 def join(self, obj, fname):
125 return obj.vfs.join(fname)
125 return obj.vfs.join(fname)
126
126
127
127
128 class storecache(_basefilecache):
128 class storecache(_basefilecache):
129 """filecache for files in the store"""
129 """filecache for files in the store"""
130
130
131 def __init__(self, *paths):
131 def __init__(self, *paths):
132 super(storecache, self).__init__(*paths)
132 super(storecache, self).__init__(*paths)
133 for path in paths:
133 for path in paths:
134 _cachedfiles.add((path, b''))
134 _cachedfiles.add((path, b''))
135
135
136 def join(self, obj, fname):
136 def join(self, obj, fname):
137 return obj.sjoin(fname)
137 return obj.sjoin(fname)
138
138
139
139
140 class mixedrepostorecache(_basefilecache):
140 class mixedrepostorecache(_basefilecache):
141 """filecache for a mix files in .hg/store and outside"""
141 """filecache for a mix files in .hg/store and outside"""
142
142
143 def __init__(self, *pathsandlocations):
143 def __init__(self, *pathsandlocations):
144 # scmutil.filecache only uses the path for passing back into our
144 # scmutil.filecache only uses the path for passing back into our
145 # join(), so we can safely pass a list of paths and locations
145 # join(), so we can safely pass a list of paths and locations
146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
147 _cachedfiles.update(pathsandlocations)
147 _cachedfiles.update(pathsandlocations)
148
148
149 def join(self, obj, fnameandlocation):
149 def join(self, obj, fnameandlocation):
150 fname, location = fnameandlocation
150 fname, location = fnameandlocation
151 if location == b'plain':
151 if location == b'plain':
152 return obj.vfs.join(fname)
152 return obj.vfs.join(fname)
153 else:
153 else:
154 if location != b'':
154 if location != b'':
155 raise error.ProgrammingError(
155 raise error.ProgrammingError(
156 b'unexpected location: %s' % location
156 b'unexpected location: %s' % location
157 )
157 )
158 return obj.sjoin(fname)
158 return obj.sjoin(fname)
159
159
160
160
161 def isfilecached(repo, name):
161 def isfilecached(repo, name):
162 """check if a repo has already cached "name" filecache-ed property
162 """check if a repo has already cached "name" filecache-ed property
163
163
164 This returns (cachedobj-or-None, iscached) tuple.
164 This returns (cachedobj-or-None, iscached) tuple.
165 """
165 """
166 cacheentry = repo.unfiltered()._filecache.get(name, None)
166 cacheentry = repo.unfiltered()._filecache.get(name, None)
167 if not cacheentry:
167 if not cacheentry:
168 return None, False
168 return None, False
169 return cacheentry.obj, True
169 return cacheentry.obj, True
170
170
171
171
172 class unfilteredpropertycache(util.propertycache):
172 class unfilteredpropertycache(util.propertycache):
173 """propertycache that apply to unfiltered repo only"""
173 """propertycache that apply to unfiltered repo only"""
174
174
175 def __get__(self, repo, type=None):
175 def __get__(self, repo, type=None):
176 unfi = repo.unfiltered()
176 unfi = repo.unfiltered()
177 if unfi is repo:
177 if unfi is repo:
178 return super(unfilteredpropertycache, self).__get__(unfi)
178 return super(unfilteredpropertycache, self).__get__(unfi)
179 return getattr(unfi, self.name)
179 return getattr(unfi, self.name)
180
180
181
181
182 class filteredpropertycache(util.propertycache):
182 class filteredpropertycache(util.propertycache):
183 """propertycache that must take filtering in account"""
183 """propertycache that must take filtering in account"""
184
184
185 def cachevalue(self, obj, value):
185 def cachevalue(self, obj, value):
186 object.__setattr__(obj, self.name, value)
186 object.__setattr__(obj, self.name, value)
187
187
188
188
189 def hasunfilteredcache(repo, name):
189 def hasunfilteredcache(repo, name):
190 """check if a repo has an unfilteredpropertycache value for <name>"""
190 """check if a repo has an unfilteredpropertycache value for <name>"""
191 return name in vars(repo.unfiltered())
191 return name in vars(repo.unfiltered())
192
192
193
193
194 def unfilteredmethod(orig):
194 def unfilteredmethod(orig):
195 """decorate method that always need to be run on unfiltered version"""
195 """decorate method that always need to be run on unfiltered version"""
196
196
197 @functools.wraps(orig)
197 @functools.wraps(orig)
198 def wrapper(repo, *args, **kwargs):
198 def wrapper(repo, *args, **kwargs):
199 return orig(repo.unfiltered(), *args, **kwargs)
199 return orig(repo.unfiltered(), *args, **kwargs)
200
200
201 return wrapper
201 return wrapper
202
202
203
203
204 moderncaps = {
204 moderncaps = {
205 b'lookup',
205 b'lookup',
206 b'branchmap',
206 b'branchmap',
207 b'pushkey',
207 b'pushkey',
208 b'known',
208 b'known',
209 b'getbundle',
209 b'getbundle',
210 b'unbundle',
210 b'unbundle',
211 }
211 }
212 legacycaps = moderncaps.union({b'changegroupsubset'})
212 legacycaps = moderncaps.union({b'changegroupsubset'})
213
213
214
214
215 @interfaceutil.implementer(repository.ipeercommandexecutor)
215 @interfaceutil.implementer(repository.ipeercommandexecutor)
216 class localcommandexecutor(object):
216 class localcommandexecutor(object):
217 def __init__(self, peer):
217 def __init__(self, peer):
218 self._peer = peer
218 self._peer = peer
219 self._sent = False
219 self._sent = False
220 self._closed = False
220 self._closed = False
221
221
222 def __enter__(self):
222 def __enter__(self):
223 return self
223 return self
224
224
225 def __exit__(self, exctype, excvalue, exctb):
225 def __exit__(self, exctype, excvalue, exctb):
226 self.close()
226 self.close()
227
227
228 def callcommand(self, command, args):
228 def callcommand(self, command, args):
229 if self._sent:
229 if self._sent:
230 raise error.ProgrammingError(
230 raise error.ProgrammingError(
231 b'callcommand() cannot be used after sendcommands()'
231 b'callcommand() cannot be used after sendcommands()'
232 )
232 )
233
233
234 if self._closed:
234 if self._closed:
235 raise error.ProgrammingError(
235 raise error.ProgrammingError(
236 b'callcommand() cannot be used after close()'
236 b'callcommand() cannot be used after close()'
237 )
237 )
238
238
239 # We don't need to support anything fancy. Just call the named
239 # We don't need to support anything fancy. Just call the named
240 # method on the peer and return a resolved future.
240 # method on the peer and return a resolved future.
241 fn = getattr(self._peer, pycompat.sysstr(command))
241 fn = getattr(self._peer, pycompat.sysstr(command))
242
242
243 f = pycompat.futures.Future()
243 f = pycompat.futures.Future()
244
244
245 try:
245 try:
246 result = fn(**pycompat.strkwargs(args))
246 result = fn(**pycompat.strkwargs(args))
247 except Exception:
247 except Exception:
248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
249 else:
249 else:
250 f.set_result(result)
250 f.set_result(result)
251
251
252 return f
252 return f
253
253
254 def sendcommands(self):
254 def sendcommands(self):
255 self._sent = True
255 self._sent = True
256
256
257 def close(self):
257 def close(self):
258 self._closed = True
258 self._closed = True
259
259
260
260
261 @interfaceutil.implementer(repository.ipeercommands)
261 @interfaceutil.implementer(repository.ipeercommands)
262 class localpeer(repository.peer):
262 class localpeer(repository.peer):
263 '''peer for a local repo; reflects only the most recent API'''
263 '''peer for a local repo; reflects only the most recent API'''
264
264
265 def __init__(self, repo, caps=None):
265 def __init__(self, repo, caps=None):
266 super(localpeer, self).__init__()
266 super(localpeer, self).__init__()
267
267
268 if caps is None:
268 if caps is None:
269 caps = moderncaps.copy()
269 caps = moderncaps.copy()
270 self._repo = repo.filtered(b'served')
270 self._repo = repo.filtered(b'served')
271 self.ui = repo.ui
271 self.ui = repo.ui
272 self._caps = repo._restrictcapabilities(caps)
272 self._caps = repo._restrictcapabilities(caps)
273
273
274 # Begin of _basepeer interface.
274 # Begin of _basepeer interface.
275
275
276 def url(self):
276 def url(self):
277 return self._repo.url()
277 return self._repo.url()
278
278
279 def local(self):
279 def local(self):
280 return self._repo
280 return self._repo
281
281
282 def peer(self):
282 def peer(self):
283 return self
283 return self
284
284
285 def canpush(self):
285 def canpush(self):
286 return True
286 return True
287
287
288 def close(self):
288 def close(self):
289 self._repo.close()
289 self._repo.close()
290
290
291 # End of _basepeer interface.
291 # End of _basepeer interface.
292
292
293 # Begin of _basewirecommands interface.
293 # Begin of _basewirecommands interface.
294
294
295 def branchmap(self):
295 def branchmap(self):
296 return self._repo.branchmap()
296 return self._repo.branchmap()
297
297
298 def capabilities(self):
298 def capabilities(self):
299 return self._caps
299 return self._caps
300
300
301 def clonebundles(self):
301 def clonebundles(self):
302 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
302 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
303
303
304 def debugwireargs(self, one, two, three=None, four=None, five=None):
304 def debugwireargs(self, one, two, three=None, four=None, five=None):
305 """Used to test argument passing over the wire"""
305 """Used to test argument passing over the wire"""
306 return b"%s %s %s %s %s" % (
306 return b"%s %s %s %s %s" % (
307 one,
307 one,
308 two,
308 two,
309 pycompat.bytestr(three),
309 pycompat.bytestr(three),
310 pycompat.bytestr(four),
310 pycompat.bytestr(four),
311 pycompat.bytestr(five),
311 pycompat.bytestr(five),
312 )
312 )
313
313
314 def getbundle(
314 def getbundle(
315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
316 ):
316 ):
317 chunks = exchange.getbundlechunks(
317 chunks = exchange.getbundlechunks(
318 self._repo,
318 self._repo,
319 source,
319 source,
320 heads=heads,
320 heads=heads,
321 common=common,
321 common=common,
322 bundlecaps=bundlecaps,
322 bundlecaps=bundlecaps,
323 **kwargs
323 **kwargs
324 )[1]
324 )[1]
325 cb = util.chunkbuffer(chunks)
325 cb = util.chunkbuffer(chunks)
326
326
327 if exchange.bundle2requested(bundlecaps):
327 if exchange.bundle2requested(bundlecaps):
328 # When requesting a bundle2, getbundle returns a stream to make the
328 # When requesting a bundle2, getbundle returns a stream to make the
329 # wire level function happier. We need to build a proper object
329 # wire level function happier. We need to build a proper object
330 # from it in local peer.
330 # from it in local peer.
331 return bundle2.getunbundler(self.ui, cb)
331 return bundle2.getunbundler(self.ui, cb)
332 else:
332 else:
333 return changegroup.getunbundler(b'01', cb, None)
333 return changegroup.getunbundler(b'01', cb, None)
334
334
335 def heads(self):
335 def heads(self):
336 return self._repo.heads()
336 return self._repo.heads()
337
337
338 def known(self, nodes):
338 def known(self, nodes):
339 return self._repo.known(nodes)
339 return self._repo.known(nodes)
340
340
341 def listkeys(self, namespace):
341 def listkeys(self, namespace):
342 return self._repo.listkeys(namespace)
342 return self._repo.listkeys(namespace)
343
343
344 def lookup(self, key):
344 def lookup(self, key):
345 return self._repo.lookup(key)
345 return self._repo.lookup(key)
346
346
347 def pushkey(self, namespace, key, old, new):
347 def pushkey(self, namespace, key, old, new):
348 return self._repo.pushkey(namespace, key, old, new)
348 return self._repo.pushkey(namespace, key, old, new)
349
349
350 def stream_out(self):
350 def stream_out(self):
351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
352
352
353 def unbundle(self, bundle, heads, url):
353 def unbundle(self, bundle, heads, url):
354 """apply a bundle on a repo
354 """apply a bundle on a repo
355
355
356 This function handles the repo locking itself."""
356 This function handles the repo locking itself."""
357 try:
357 try:
358 try:
358 try:
359 bundle = exchange.readbundle(self.ui, bundle, None)
359 bundle = exchange.readbundle(self.ui, bundle, None)
360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
361 if util.safehasattr(ret, b'getchunks'):
361 if util.safehasattr(ret, b'getchunks'):
362 # This is a bundle20 object, turn it into an unbundler.
362 # This is a bundle20 object, turn it into an unbundler.
363 # This little dance should be dropped eventually when the
363 # This little dance should be dropped eventually when the
364 # API is finally improved.
364 # API is finally improved.
365 stream = util.chunkbuffer(ret.getchunks())
365 stream = util.chunkbuffer(ret.getchunks())
366 ret = bundle2.getunbundler(self.ui, stream)
366 ret = bundle2.getunbundler(self.ui, stream)
367 return ret
367 return ret
368 except Exception as exc:
368 except Exception as exc:
369 # If the exception contains output salvaged from a bundle2
369 # If the exception contains output salvaged from a bundle2
370 # reply, we need to make sure it is printed before continuing
370 # reply, we need to make sure it is printed before continuing
371 # to fail. So we build a bundle2 with such output and consume
371 # to fail. So we build a bundle2 with such output and consume
372 # it directly.
372 # it directly.
373 #
373 #
374 # This is not very elegant but allows a "simple" solution for
374 # This is not very elegant but allows a "simple" solution for
375 # issue4594
375 # issue4594
376 output = getattr(exc, '_bundle2salvagedoutput', ())
376 output = getattr(exc, '_bundle2salvagedoutput', ())
377 if output:
377 if output:
378 bundler = bundle2.bundle20(self._repo.ui)
378 bundler = bundle2.bundle20(self._repo.ui)
379 for out in output:
379 for out in output:
380 bundler.addpart(out)
380 bundler.addpart(out)
381 stream = util.chunkbuffer(bundler.getchunks())
381 stream = util.chunkbuffer(bundler.getchunks())
382 b = bundle2.getunbundler(self.ui, stream)
382 b = bundle2.getunbundler(self.ui, stream)
383 bundle2.processbundle(self._repo, b)
383 bundle2.processbundle(self._repo, b)
384 raise
384 raise
385 except error.PushRaced as exc:
385 except error.PushRaced as exc:
386 raise error.ResponseError(
386 raise error.ResponseError(
387 _(b'push failed:'), stringutil.forcebytestr(exc)
387 _(b'push failed:'), stringutil.forcebytestr(exc)
388 )
388 )
389
389
390 # End of _basewirecommands interface.
390 # End of _basewirecommands interface.
391
391
392 # Begin of peer interface.
392 # Begin of peer interface.
393
393
394 def commandexecutor(self):
394 def commandexecutor(self):
395 return localcommandexecutor(self)
395 return localcommandexecutor(self)
396
396
397 # End of peer interface.
397 # End of peer interface.
398
398
399
399
400 @interfaceutil.implementer(repository.ipeerlegacycommands)
400 @interfaceutil.implementer(repository.ipeerlegacycommands)
401 class locallegacypeer(localpeer):
401 class locallegacypeer(localpeer):
402 """peer extension which implements legacy methods too; used for tests with
402 """peer extension which implements legacy methods too; used for tests with
403 restricted capabilities"""
403 restricted capabilities"""
404
404
405 def __init__(self, repo):
405 def __init__(self, repo):
406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
407
407
408 # Begin of baselegacywirecommands interface.
408 # Begin of baselegacywirecommands interface.
409
409
410 def between(self, pairs):
410 def between(self, pairs):
411 return self._repo.between(pairs)
411 return self._repo.between(pairs)
412
412
413 def branches(self, nodes):
413 def branches(self, nodes):
414 return self._repo.branches(nodes)
414 return self._repo.branches(nodes)
415
415
416 def changegroup(self, nodes, source):
416 def changegroup(self, nodes, source):
417 outgoing = discovery.outgoing(
417 outgoing = discovery.outgoing(
418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
419 )
419 )
420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421
421
422 def changegroupsubset(self, bases, heads, source):
422 def changegroupsubset(self, bases, heads, source):
423 outgoing = discovery.outgoing(
423 outgoing = discovery.outgoing(
424 self._repo, missingroots=bases, ancestorsof=heads
424 self._repo, missingroots=bases, ancestorsof=heads
425 )
425 )
426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
427
427
428 # End of baselegacywirecommands interface.
428 # End of baselegacywirecommands interface.
429
429
430
430
431 # Functions receiving (ui, features) that extensions can register to impact
431 # Functions receiving (ui, features) that extensions can register to impact
432 # the ability to load repositories with custom requirements. Only
432 # the ability to load repositories with custom requirements. Only
433 # functions defined in loaded extensions are called.
433 # functions defined in loaded extensions are called.
434 #
434 #
435 # The function receives a set of requirement strings that the repository
435 # The function receives a set of requirement strings that the repository
436 # is capable of opening. Functions will typically add elements to the
436 # is capable of opening. Functions will typically add elements to the
437 # set to reflect that the extension knows how to handle that requirements.
437 # set to reflect that the extension knows how to handle that requirements.
438 featuresetupfuncs = set()
438 featuresetupfuncs = set()
439
439
440
440
441 def _getsharedvfs(hgvfs, requirements):
441 def _getsharedvfs(hgvfs, requirements):
442 """returns the vfs object pointing to root of shared source
442 """returns the vfs object pointing to root of shared source
443 repo for a shared repository
443 repo for a shared repository
444
444
445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
446 requirements is a set of requirements of current repo (shared one)
446 requirements is a set of requirements of current repo (shared one)
447 """
447 """
448 # The ``shared`` or ``relshared`` requirements indicate the
448 # The ``shared`` or ``relshared`` requirements indicate the
449 # store lives in the path contained in the ``.hg/sharedpath`` file.
449 # store lives in the path contained in the ``.hg/sharedpath`` file.
450 # This is an absolute path for ``shared`` and relative to
450 # This is an absolute path for ``shared`` and relative to
451 # ``.hg/`` for ``relshared``.
451 # ``.hg/`` for ``relshared``.
452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
454 sharedpath = hgvfs.join(sharedpath)
454 sharedpath = hgvfs.join(sharedpath)
455
455
456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
457
457
458 if not sharedvfs.exists():
458 if not sharedvfs.exists():
459 raise error.RepoError(
459 raise error.RepoError(
460 _(b'.hg/sharedpath points to nonexistent directory %s')
460 _(b'.hg/sharedpath points to nonexistent directory %s')
461 % sharedvfs.base
461 % sharedvfs.base
462 )
462 )
463 return sharedvfs
463 return sharedvfs
464
464
465
465
466 def _readrequires(vfs, allowmissing):
466 def _readrequires(vfs, allowmissing):
467 """reads the require file present at root of this vfs
467 """reads the require file present at root of this vfs
468 and return a set of requirements
468 and return a set of requirements
469
469
470 If allowmissing is True, we suppress ENOENT if raised"""
470 If allowmissing is True, we suppress ENOENT if raised"""
471 # requires file contains a newline-delimited list of
471 # requires file contains a newline-delimited list of
472 # features/capabilities the opener (us) must have in order to use
472 # features/capabilities the opener (us) must have in order to use
473 # the repository. This file was introduced in Mercurial 0.9.2,
473 # the repository. This file was introduced in Mercurial 0.9.2,
474 # which means very old repositories may not have one. We assume
474 # which means very old repositories may not have one. We assume
475 # a missing file translates to no requirements.
475 # a missing file translates to no requirements.
476 try:
476 try:
477 requirements = set(vfs.read(b'requires').splitlines())
477 requirements = set(vfs.read(b'requires').splitlines())
478 except IOError as e:
478 except IOError as e:
479 if not (allowmissing and e.errno == errno.ENOENT):
479 if not (allowmissing and e.errno == errno.ENOENT):
480 raise
480 raise
481 requirements = set()
481 requirements = set()
482 return requirements
482 return requirements
483
483
484
484
485 def makelocalrepository(baseui, path, intents=None):
485 def makelocalrepository(baseui, path, intents=None):
486 """Create a local repository object.
486 """Create a local repository object.
487
487
488 Given arguments needed to construct a local repository, this function
488 Given arguments needed to construct a local repository, this function
489 performs various early repository loading functionality (such as
489 performs various early repository loading functionality (such as
490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
491 the repository can be opened, derives a type suitable for representing
491 the repository can be opened, derives a type suitable for representing
492 that repository, and returns an instance of it.
492 that repository, and returns an instance of it.
493
493
494 The returned object conforms to the ``repository.completelocalrepository``
494 The returned object conforms to the ``repository.completelocalrepository``
495 interface.
495 interface.
496
496
497 The repository type is derived by calling a series of factory functions
497 The repository type is derived by calling a series of factory functions
498 for each aspect/interface of the final repository. These are defined by
498 for each aspect/interface of the final repository. These are defined by
499 ``REPO_INTERFACES``.
499 ``REPO_INTERFACES``.
500
500
501 Each factory function is called to produce a type implementing a specific
501 Each factory function is called to produce a type implementing a specific
502 interface. The cumulative list of returned types will be combined into a
502 interface. The cumulative list of returned types will be combined into a
503 new type and that type will be instantiated to represent the local
503 new type and that type will be instantiated to represent the local
504 repository.
504 repository.
505
505
506 The factory functions each receive various state that may be consulted
506 The factory functions each receive various state that may be consulted
507 as part of deriving a type.
507 as part of deriving a type.
508
508
509 Extensions should wrap these factory functions to customize repository type
509 Extensions should wrap these factory functions to customize repository type
510 creation. Note that an extension's wrapped function may be called even if
510 creation. Note that an extension's wrapped function may be called even if
511 that extension is not loaded for the repo being constructed. Extensions
511 that extension is not loaded for the repo being constructed. Extensions
512 should check if their ``__name__`` appears in the
512 should check if their ``__name__`` appears in the
513 ``extensionmodulenames`` set passed to the factory function and no-op if
513 ``extensionmodulenames`` set passed to the factory function and no-op if
514 not.
514 not.
515 """
515 """
516 ui = baseui.copy()
516 ui = baseui.copy()
517 # Prevent copying repo configuration.
517 # Prevent copying repo configuration.
518 ui.copy = baseui.copy
518 ui.copy = baseui.copy
519
519
520 # Working directory VFS rooted at repository root.
520 # Working directory VFS rooted at repository root.
521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
522
522
523 # Main VFS for .hg/ directory.
523 # Main VFS for .hg/ directory.
524 hgpath = wdirvfs.join(b'.hg')
524 hgpath = wdirvfs.join(b'.hg')
525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
526 # Whether this repository is shared one or not
526 # Whether this repository is shared one or not
527 shared = False
527 shared = False
528 # If this repository is shared, vfs pointing to shared repo
528 # If this repository is shared, vfs pointing to shared repo
529 sharedvfs = None
529 sharedvfs = None
530
530
531 # The .hg/ path should exist and should be a directory. All other
531 # The .hg/ path should exist and should be a directory. All other
532 # cases are errors.
532 # cases are errors.
533 if not hgvfs.isdir():
533 if not hgvfs.isdir():
534 try:
534 try:
535 hgvfs.stat()
535 hgvfs.stat()
536 except OSError as e:
536 except OSError as e:
537 if e.errno != errno.ENOENT:
537 if e.errno != errno.ENOENT:
538 raise
538 raise
539 except ValueError as e:
539 except ValueError as e:
540 # Can be raised on Python 3.8 when path is invalid.
540 # Can be raised on Python 3.8 when path is invalid.
541 raise error.Abort(
541 raise error.Abort(
542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
543 )
543 )
544
544
545 raise error.RepoError(_(b'repository %s not found') % path)
545 raise error.RepoError(_(b'repository %s not found') % path)
546
546
547 requirements = _readrequires(hgvfs, True)
547 requirements = _readrequires(hgvfs, True)
548 shared = (
548 shared = (
549 requirementsmod.SHARED_REQUIREMENT in requirements
549 requirementsmod.SHARED_REQUIREMENT in requirements
550 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
550 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
551 )
551 )
552 if shared:
552 if shared:
553 sharedvfs = _getsharedvfs(hgvfs, requirements)
553 sharedvfs = _getsharedvfs(hgvfs, requirements)
554
554
555 # if .hg/requires contains the sharesafe requirement, it means
555 # if .hg/requires contains the sharesafe requirement, it means
556 # there exists a `.hg/store/requires` too and we should read it
556 # there exists a `.hg/store/requires` too and we should read it
557 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
557 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
558 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
558 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
559 # is not present, refer checkrequirementscompat() for that
559 # is not present, refer checkrequirementscompat() for that
560 #
561 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
562 # repository was shared the old way. We check the share source .hg/requires
563 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
564 # to be reshared
560 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
565 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
561
566
562 if (
567 if (
563 shared
568 shared
564 and requirementsmod.SHARESAFE_REQUIREMENT
569 and requirementsmod.SHARESAFE_REQUIREMENT
565 not in _readrequires(sharedvfs, True)
570 not in _readrequires(sharedvfs, True)
566 ):
571 ):
567 raise error.Abort(
572 raise error.Abort(
568 _(b"share source does not support exp-sharesafe requirement")
573 _(b"share source does not support exp-sharesafe requirement")
569 )
574 )
570
575
571 if shared:
576 if shared:
572 # This is a shared repo
577 # This is a shared repo
573 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
578 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
574 else:
579 else:
575 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
580 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
576
581
577 requirements |= _readrequires(storevfs, False)
582 requirements |= _readrequires(storevfs, False)
583 elif shared:
584 sourcerequires = _readrequires(sharedvfs, False)
585 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
586 ui.warn(
587 _(
588 b'warning: source repository supports share-safe functionality.'
589 b' Reshare to upgrade.\n'
590 )
591 )
578
592
579 # The .hg/hgrc file may load extensions or contain config options
593 # The .hg/hgrc file may load extensions or contain config options
580 # that influence repository construction. Attempt to load it and
594 # that influence repository construction. Attempt to load it and
581 # process any new extensions that it may have pulled in.
595 # process any new extensions that it may have pulled in.
582 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
596 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
583 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
597 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
584 extensions.loadall(ui)
598 extensions.loadall(ui)
585 extensions.populateui(ui)
599 extensions.populateui(ui)
586
600
587 # Set of module names of extensions loaded for this repository.
601 # Set of module names of extensions loaded for this repository.
588 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
602 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
589
603
590 supportedrequirements = gathersupportedrequirements(ui)
604 supportedrequirements = gathersupportedrequirements(ui)
591
605
592 # We first validate the requirements are known.
606 # We first validate the requirements are known.
593 ensurerequirementsrecognized(requirements, supportedrequirements)
607 ensurerequirementsrecognized(requirements, supportedrequirements)
594
608
595 # Then we validate that the known set is reasonable to use together.
609 # Then we validate that the known set is reasonable to use together.
596 ensurerequirementscompatible(ui, requirements)
610 ensurerequirementscompatible(ui, requirements)
597
611
598 # TODO there are unhandled edge cases related to opening repositories with
612 # TODO there are unhandled edge cases related to opening repositories with
599 # shared storage. If storage is shared, we should also test for requirements
613 # shared storage. If storage is shared, we should also test for requirements
600 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
614 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
601 # that repo, as that repo may load extensions needed to open it. This is a
615 # that repo, as that repo may load extensions needed to open it. This is a
602 # bit complicated because we don't want the other hgrc to overwrite settings
616 # bit complicated because we don't want the other hgrc to overwrite settings
603 # in this hgrc.
617 # in this hgrc.
604 #
618 #
605 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
619 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
606 # file when sharing repos. But if a requirement is added after the share is
620 # file when sharing repos. But if a requirement is added after the share is
607 # performed, thereby introducing a new requirement for the opener, we may
621 # performed, thereby introducing a new requirement for the opener, we may
608 # will not see that and could encounter a run-time error interacting with
622 # will not see that and could encounter a run-time error interacting with
609 # that shared store since it has an unknown-to-us requirement.
623 # that shared store since it has an unknown-to-us requirement.
610
624
611 # At this point, we know we should be capable of opening the repository.
625 # At this point, we know we should be capable of opening the repository.
612 # Now get on with doing that.
626 # Now get on with doing that.
613
627
614 features = set()
628 features = set()
615
629
616 # The "store" part of the repository holds versioned data. How it is
630 # The "store" part of the repository holds versioned data. How it is
617 # accessed is determined by various requirements. If `shared` or
631 # accessed is determined by various requirements. If `shared` or
618 # `relshared` requirements are present, this indicates current repository
632 # `relshared` requirements are present, this indicates current repository
619 # is a share and store exists in path mentioned in `.hg/sharedpath`
633 # is a share and store exists in path mentioned in `.hg/sharedpath`
620 if shared:
634 if shared:
621 storebasepath = sharedvfs.base
635 storebasepath = sharedvfs.base
622 cachepath = sharedvfs.join(b'cache')
636 cachepath = sharedvfs.join(b'cache')
623 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
637 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
624 else:
638 else:
625 storebasepath = hgvfs.base
639 storebasepath = hgvfs.base
626 cachepath = hgvfs.join(b'cache')
640 cachepath = hgvfs.join(b'cache')
627 wcachepath = hgvfs.join(b'wcache')
641 wcachepath = hgvfs.join(b'wcache')
628
642
629 # The store has changed over time and the exact layout is dictated by
643 # The store has changed over time and the exact layout is dictated by
630 # requirements. The store interface abstracts differences across all
644 # requirements. The store interface abstracts differences across all
631 # of them.
645 # of them.
632 store = makestore(
646 store = makestore(
633 requirements,
647 requirements,
634 storebasepath,
648 storebasepath,
635 lambda base: vfsmod.vfs(base, cacheaudited=True),
649 lambda base: vfsmod.vfs(base, cacheaudited=True),
636 )
650 )
637 hgvfs.createmode = store.createmode
651 hgvfs.createmode = store.createmode
638
652
639 storevfs = store.vfs
653 storevfs = store.vfs
640 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
654 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
641
655
642 # The cache vfs is used to manage cache files.
656 # The cache vfs is used to manage cache files.
643 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
657 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
644 cachevfs.createmode = store.createmode
658 cachevfs.createmode = store.createmode
645 # The cache vfs is used to manage cache files related to the working copy
659 # The cache vfs is used to manage cache files related to the working copy
646 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
660 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
647 wcachevfs.createmode = store.createmode
661 wcachevfs.createmode = store.createmode
648
662
649 # Now resolve the type for the repository object. We do this by repeatedly
663 # Now resolve the type for the repository object. We do this by repeatedly
650 # calling a factory function to produces types for specific aspects of the
664 # calling a factory function to produces types for specific aspects of the
651 # repo's operation. The aggregate returned types are used as base classes
665 # repo's operation. The aggregate returned types are used as base classes
652 # for a dynamically-derived type, which will represent our new repository.
666 # for a dynamically-derived type, which will represent our new repository.
653
667
654 bases = []
668 bases = []
655 extrastate = {}
669 extrastate = {}
656
670
657 for iface, fn in REPO_INTERFACES:
671 for iface, fn in REPO_INTERFACES:
658 # We pass all potentially useful state to give extensions tons of
672 # We pass all potentially useful state to give extensions tons of
659 # flexibility.
673 # flexibility.
660 typ = fn()(
674 typ = fn()(
661 ui=ui,
675 ui=ui,
662 intents=intents,
676 intents=intents,
663 requirements=requirements,
677 requirements=requirements,
664 features=features,
678 features=features,
665 wdirvfs=wdirvfs,
679 wdirvfs=wdirvfs,
666 hgvfs=hgvfs,
680 hgvfs=hgvfs,
667 store=store,
681 store=store,
668 storevfs=storevfs,
682 storevfs=storevfs,
669 storeoptions=storevfs.options,
683 storeoptions=storevfs.options,
670 cachevfs=cachevfs,
684 cachevfs=cachevfs,
671 wcachevfs=wcachevfs,
685 wcachevfs=wcachevfs,
672 extensionmodulenames=extensionmodulenames,
686 extensionmodulenames=extensionmodulenames,
673 extrastate=extrastate,
687 extrastate=extrastate,
674 baseclasses=bases,
688 baseclasses=bases,
675 )
689 )
676
690
677 if not isinstance(typ, type):
691 if not isinstance(typ, type):
678 raise error.ProgrammingError(
692 raise error.ProgrammingError(
679 b'unable to construct type for %s' % iface
693 b'unable to construct type for %s' % iface
680 )
694 )
681
695
682 bases.append(typ)
696 bases.append(typ)
683
697
684 # type() allows you to use characters in type names that wouldn't be
698 # type() allows you to use characters in type names that wouldn't be
685 # recognized as Python symbols in source code. We abuse that to add
699 # recognized as Python symbols in source code. We abuse that to add
686 # rich information about our constructed repo.
700 # rich information about our constructed repo.
687 name = pycompat.sysstr(
701 name = pycompat.sysstr(
688 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
702 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
689 )
703 )
690
704
691 cls = type(name, tuple(bases), {})
705 cls = type(name, tuple(bases), {})
692
706
693 return cls(
707 return cls(
694 baseui=baseui,
708 baseui=baseui,
695 ui=ui,
709 ui=ui,
696 origroot=path,
710 origroot=path,
697 wdirvfs=wdirvfs,
711 wdirvfs=wdirvfs,
698 hgvfs=hgvfs,
712 hgvfs=hgvfs,
699 requirements=requirements,
713 requirements=requirements,
700 supportedrequirements=supportedrequirements,
714 supportedrequirements=supportedrequirements,
701 sharedpath=storebasepath,
715 sharedpath=storebasepath,
702 store=store,
716 store=store,
703 cachevfs=cachevfs,
717 cachevfs=cachevfs,
704 wcachevfs=wcachevfs,
718 wcachevfs=wcachevfs,
705 features=features,
719 features=features,
706 intents=intents,
720 intents=intents,
707 )
721 )
708
722
709
723
710 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
724 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
711 """Load hgrc files/content into a ui instance.
725 """Load hgrc files/content into a ui instance.
712
726
713 This is called during repository opening to load any additional
727 This is called during repository opening to load any additional
714 config files or settings relevant to the current repository.
728 config files or settings relevant to the current repository.
715
729
716 Returns a bool indicating whether any additional configs were loaded.
730 Returns a bool indicating whether any additional configs were loaded.
717
731
718 Extensions should monkeypatch this function to modify how per-repo
732 Extensions should monkeypatch this function to modify how per-repo
719 configs are loaded. For example, an extension may wish to pull in
733 configs are loaded. For example, an extension may wish to pull in
720 configs from alternate files or sources.
734 configs from alternate files or sources.
721
735
722 sharedvfs is vfs object pointing to source repo if the current one is a
736 sharedvfs is vfs object pointing to source repo if the current one is a
723 shared one
737 shared one
724 """
738 """
725 if not rcutil.use_repo_hgrc():
739 if not rcutil.use_repo_hgrc():
726 return False
740 return False
727
741
728 ret = False
742 ret = False
729 # first load config from shared source if we has to
743 # first load config from shared source if we has to
730 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
744 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
731 try:
745 try:
732 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
746 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
733 ret = True
747 ret = True
734 except IOError:
748 except IOError:
735 pass
749 pass
736
750
737 try:
751 try:
738 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
752 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
739 ret = True
753 ret = True
740 except IOError:
754 except IOError:
741 pass
755 pass
742
756
743 try:
757 try:
744 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
758 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
745 ret = True
759 ret = True
746 except IOError:
760 except IOError:
747 pass
761 pass
748
762
749 return ret
763 return ret
750
764
751
765
752 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
766 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
753 """Perform additional actions after .hg/hgrc is loaded.
767 """Perform additional actions after .hg/hgrc is loaded.
754
768
755 This function is called during repository loading immediately after
769 This function is called during repository loading immediately after
756 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
770 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
757
771
758 The function can be used to validate configs, automatically add
772 The function can be used to validate configs, automatically add
759 options (including extensions) based on requirements, etc.
773 options (including extensions) based on requirements, etc.
760 """
774 """
761
775
762 # Map of requirements to list of extensions to load automatically when
776 # Map of requirements to list of extensions to load automatically when
763 # requirement is present.
777 # requirement is present.
764 autoextensions = {
778 autoextensions = {
765 b'git': [b'git'],
779 b'git': [b'git'],
766 b'largefiles': [b'largefiles'],
780 b'largefiles': [b'largefiles'],
767 b'lfs': [b'lfs'],
781 b'lfs': [b'lfs'],
768 }
782 }
769
783
770 for requirement, names in sorted(autoextensions.items()):
784 for requirement, names in sorted(autoextensions.items()):
771 if requirement not in requirements:
785 if requirement not in requirements:
772 continue
786 continue
773
787
774 for name in names:
788 for name in names:
775 if not ui.hasconfig(b'extensions', name):
789 if not ui.hasconfig(b'extensions', name):
776 ui.setconfig(b'extensions', name, b'', source=b'autoload')
790 ui.setconfig(b'extensions', name, b'', source=b'autoload')
777
791
778
792
779 def gathersupportedrequirements(ui):
793 def gathersupportedrequirements(ui):
780 """Determine the complete set of recognized requirements."""
794 """Determine the complete set of recognized requirements."""
781 # Start with all requirements supported by this file.
795 # Start with all requirements supported by this file.
782 supported = set(localrepository._basesupported)
796 supported = set(localrepository._basesupported)
783
797
784 # Execute ``featuresetupfuncs`` entries if they belong to an extension
798 # Execute ``featuresetupfuncs`` entries if they belong to an extension
785 # relevant to this ui instance.
799 # relevant to this ui instance.
786 modules = {m.__name__ for n, m in extensions.extensions(ui)}
800 modules = {m.__name__ for n, m in extensions.extensions(ui)}
787
801
788 for fn in featuresetupfuncs:
802 for fn in featuresetupfuncs:
789 if fn.__module__ in modules:
803 if fn.__module__ in modules:
790 fn(ui, supported)
804 fn(ui, supported)
791
805
792 # Add derived requirements from registered compression engines.
806 # Add derived requirements from registered compression engines.
793 for name in util.compengines:
807 for name in util.compengines:
794 engine = util.compengines[name]
808 engine = util.compengines[name]
795 if engine.available() and engine.revlogheader():
809 if engine.available() and engine.revlogheader():
796 supported.add(b'exp-compression-%s' % name)
810 supported.add(b'exp-compression-%s' % name)
797 if engine.name() == b'zstd':
811 if engine.name() == b'zstd':
798 supported.add(b'revlog-compression-zstd')
812 supported.add(b'revlog-compression-zstd')
799
813
800 return supported
814 return supported
801
815
802
816
803 def ensurerequirementsrecognized(requirements, supported):
817 def ensurerequirementsrecognized(requirements, supported):
804 """Validate that a set of local requirements is recognized.
818 """Validate that a set of local requirements is recognized.
805
819
806 Receives a set of requirements. Raises an ``error.RepoError`` if there
820 Receives a set of requirements. Raises an ``error.RepoError`` if there
807 exists any requirement in that set that currently loaded code doesn't
821 exists any requirement in that set that currently loaded code doesn't
808 recognize.
822 recognize.
809
823
810 Returns a set of supported requirements.
824 Returns a set of supported requirements.
811 """
825 """
812 missing = set()
826 missing = set()
813
827
814 for requirement in requirements:
828 for requirement in requirements:
815 if requirement in supported:
829 if requirement in supported:
816 continue
830 continue
817
831
818 if not requirement or not requirement[0:1].isalnum():
832 if not requirement or not requirement[0:1].isalnum():
819 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
833 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
820
834
821 missing.add(requirement)
835 missing.add(requirement)
822
836
823 if missing:
837 if missing:
824 raise error.RequirementError(
838 raise error.RequirementError(
825 _(b'repository requires features unknown to this Mercurial: %s')
839 _(b'repository requires features unknown to this Mercurial: %s')
826 % b' '.join(sorted(missing)),
840 % b' '.join(sorted(missing)),
827 hint=_(
841 hint=_(
828 b'see https://mercurial-scm.org/wiki/MissingRequirement '
842 b'see https://mercurial-scm.org/wiki/MissingRequirement '
829 b'for more information'
843 b'for more information'
830 ),
844 ),
831 )
845 )
832
846
833
847
834 def ensurerequirementscompatible(ui, requirements):
848 def ensurerequirementscompatible(ui, requirements):
835 """Validates that a set of recognized requirements is mutually compatible.
849 """Validates that a set of recognized requirements is mutually compatible.
836
850
837 Some requirements may not be compatible with others or require
851 Some requirements may not be compatible with others or require
838 config options that aren't enabled. This function is called during
852 config options that aren't enabled. This function is called during
839 repository opening to ensure that the set of requirements needed
853 repository opening to ensure that the set of requirements needed
840 to open a repository is sane and compatible with config options.
854 to open a repository is sane and compatible with config options.
841
855
842 Extensions can monkeypatch this function to perform additional
856 Extensions can monkeypatch this function to perform additional
843 checking.
857 checking.
844
858
845 ``error.RepoError`` should be raised on failure.
859 ``error.RepoError`` should be raised on failure.
846 """
860 """
847 if (
861 if (
848 requirementsmod.SPARSE_REQUIREMENT in requirements
862 requirementsmod.SPARSE_REQUIREMENT in requirements
849 and not sparse.enabled
863 and not sparse.enabled
850 ):
864 ):
851 raise error.RepoError(
865 raise error.RepoError(
852 _(
866 _(
853 b'repository is using sparse feature but '
867 b'repository is using sparse feature but '
854 b'sparse is not enabled; enable the '
868 b'sparse is not enabled; enable the '
855 b'"sparse" extensions to access'
869 b'"sparse" extensions to access'
856 )
870 )
857 )
871 )
858
872
859
873
860 def makestore(requirements, path, vfstype):
874 def makestore(requirements, path, vfstype):
861 """Construct a storage object for a repository."""
875 """Construct a storage object for a repository."""
862 if b'store' in requirements:
876 if b'store' in requirements:
863 if b'fncache' in requirements:
877 if b'fncache' in requirements:
864 return storemod.fncachestore(
878 return storemod.fncachestore(
865 path, vfstype, b'dotencode' in requirements
879 path, vfstype, b'dotencode' in requirements
866 )
880 )
867
881
868 return storemod.encodedstore(path, vfstype)
882 return storemod.encodedstore(path, vfstype)
869
883
870 return storemod.basicstore(path, vfstype)
884 return storemod.basicstore(path, vfstype)
871
885
872
886
873 def resolvestorevfsoptions(ui, requirements, features):
887 def resolvestorevfsoptions(ui, requirements, features):
874 """Resolve the options to pass to the store vfs opener.
888 """Resolve the options to pass to the store vfs opener.
875
889
876 The returned dict is used to influence behavior of the storage layer.
890 The returned dict is used to influence behavior of the storage layer.
877 """
891 """
878 options = {}
892 options = {}
879
893
880 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
894 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
881 options[b'treemanifest'] = True
895 options[b'treemanifest'] = True
882
896
883 # experimental config: format.manifestcachesize
897 # experimental config: format.manifestcachesize
884 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
898 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
885 if manifestcachesize is not None:
899 if manifestcachesize is not None:
886 options[b'manifestcachesize'] = manifestcachesize
900 options[b'manifestcachesize'] = manifestcachesize
887
901
888 # In the absence of another requirement superseding a revlog-related
902 # In the absence of another requirement superseding a revlog-related
889 # requirement, we have to assume the repo is using revlog version 0.
903 # requirement, we have to assume the repo is using revlog version 0.
890 # This revlog format is super old and we don't bother trying to parse
904 # This revlog format is super old and we don't bother trying to parse
891 # opener options for it because those options wouldn't do anything
905 # opener options for it because those options wouldn't do anything
892 # meaningful on such old repos.
906 # meaningful on such old repos.
893 if (
907 if (
894 b'revlogv1' in requirements
908 b'revlogv1' in requirements
895 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
909 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
896 ):
910 ):
897 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
911 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
898 else: # explicitly mark repo as using revlogv0
912 else: # explicitly mark repo as using revlogv0
899 options[b'revlogv0'] = True
913 options[b'revlogv0'] = True
900
914
901 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
915 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
902 options[b'copies-storage'] = b'changeset-sidedata'
916 options[b'copies-storage'] = b'changeset-sidedata'
903 else:
917 else:
904 writecopiesto = ui.config(b'experimental', b'copies.write-to')
918 writecopiesto = ui.config(b'experimental', b'copies.write-to')
905 copiesextramode = (b'changeset-only', b'compatibility')
919 copiesextramode = (b'changeset-only', b'compatibility')
906 if writecopiesto in copiesextramode:
920 if writecopiesto in copiesextramode:
907 options[b'copies-storage'] = b'extra'
921 options[b'copies-storage'] = b'extra'
908
922
909 return options
923 return options
910
924
911
925
912 def resolverevlogstorevfsoptions(ui, requirements, features):
926 def resolverevlogstorevfsoptions(ui, requirements, features):
913 """Resolve opener options specific to revlogs."""
927 """Resolve opener options specific to revlogs."""
914
928
915 options = {}
929 options = {}
916 options[b'flagprocessors'] = {}
930 options[b'flagprocessors'] = {}
917
931
918 if b'revlogv1' in requirements:
932 if b'revlogv1' in requirements:
919 options[b'revlogv1'] = True
933 options[b'revlogv1'] = True
920 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
934 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
921 options[b'revlogv2'] = True
935 options[b'revlogv2'] = True
922
936
923 if b'generaldelta' in requirements:
937 if b'generaldelta' in requirements:
924 options[b'generaldelta'] = True
938 options[b'generaldelta'] = True
925
939
926 # experimental config: format.chunkcachesize
940 # experimental config: format.chunkcachesize
927 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
941 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
928 if chunkcachesize is not None:
942 if chunkcachesize is not None:
929 options[b'chunkcachesize'] = chunkcachesize
943 options[b'chunkcachesize'] = chunkcachesize
930
944
931 deltabothparents = ui.configbool(
945 deltabothparents = ui.configbool(
932 b'storage', b'revlog.optimize-delta-parent-choice'
946 b'storage', b'revlog.optimize-delta-parent-choice'
933 )
947 )
934 options[b'deltabothparents'] = deltabothparents
948 options[b'deltabothparents'] = deltabothparents
935
949
936 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
950 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
937 lazydeltabase = False
951 lazydeltabase = False
938 if lazydelta:
952 if lazydelta:
939 lazydeltabase = ui.configbool(
953 lazydeltabase = ui.configbool(
940 b'storage', b'revlog.reuse-external-delta-parent'
954 b'storage', b'revlog.reuse-external-delta-parent'
941 )
955 )
942 if lazydeltabase is None:
956 if lazydeltabase is None:
943 lazydeltabase = not scmutil.gddeltaconfig(ui)
957 lazydeltabase = not scmutil.gddeltaconfig(ui)
944 options[b'lazydelta'] = lazydelta
958 options[b'lazydelta'] = lazydelta
945 options[b'lazydeltabase'] = lazydeltabase
959 options[b'lazydeltabase'] = lazydeltabase
946
960
947 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
961 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
948 if 0 <= chainspan:
962 if 0 <= chainspan:
949 options[b'maxdeltachainspan'] = chainspan
963 options[b'maxdeltachainspan'] = chainspan
950
964
951 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
965 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
952 if mmapindexthreshold is not None:
966 if mmapindexthreshold is not None:
953 options[b'mmapindexthreshold'] = mmapindexthreshold
967 options[b'mmapindexthreshold'] = mmapindexthreshold
954
968
955 withsparseread = ui.configbool(b'experimental', b'sparse-read')
969 withsparseread = ui.configbool(b'experimental', b'sparse-read')
956 srdensitythres = float(
970 srdensitythres = float(
957 ui.config(b'experimental', b'sparse-read.density-threshold')
971 ui.config(b'experimental', b'sparse-read.density-threshold')
958 )
972 )
959 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
973 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
960 options[b'with-sparse-read'] = withsparseread
974 options[b'with-sparse-read'] = withsparseread
961 options[b'sparse-read-density-threshold'] = srdensitythres
975 options[b'sparse-read-density-threshold'] = srdensitythres
962 options[b'sparse-read-min-gap-size'] = srmingapsize
976 options[b'sparse-read-min-gap-size'] = srmingapsize
963
977
964 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
978 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
965 options[b'sparse-revlog'] = sparserevlog
979 options[b'sparse-revlog'] = sparserevlog
966 if sparserevlog:
980 if sparserevlog:
967 options[b'generaldelta'] = True
981 options[b'generaldelta'] = True
968
982
969 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
983 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
970 options[b'side-data'] = sidedata
984 options[b'side-data'] = sidedata
971
985
972 maxchainlen = None
986 maxchainlen = None
973 if sparserevlog:
987 if sparserevlog:
974 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
988 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
975 # experimental config: format.maxchainlen
989 # experimental config: format.maxchainlen
976 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
990 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
977 if maxchainlen is not None:
991 if maxchainlen is not None:
978 options[b'maxchainlen'] = maxchainlen
992 options[b'maxchainlen'] = maxchainlen
979
993
980 for r in requirements:
994 for r in requirements:
981 # we allow multiple compression engine requirement to co-exist because
995 # we allow multiple compression engine requirement to co-exist because
982 # strickly speaking, revlog seems to support mixed compression style.
996 # strickly speaking, revlog seems to support mixed compression style.
983 #
997 #
984 # The compression used for new entries will be "the last one"
998 # The compression used for new entries will be "the last one"
985 prefix = r.startswith
999 prefix = r.startswith
986 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1000 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
987 options[b'compengine'] = r.split(b'-', 2)[2]
1001 options[b'compengine'] = r.split(b'-', 2)[2]
988
1002
989 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1003 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
990 if options[b'zlib.level'] is not None:
1004 if options[b'zlib.level'] is not None:
991 if not (0 <= options[b'zlib.level'] <= 9):
1005 if not (0 <= options[b'zlib.level'] <= 9):
992 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1006 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
993 raise error.Abort(msg % options[b'zlib.level'])
1007 raise error.Abort(msg % options[b'zlib.level'])
994 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1008 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
995 if options[b'zstd.level'] is not None:
1009 if options[b'zstd.level'] is not None:
996 if not (0 <= options[b'zstd.level'] <= 22):
1010 if not (0 <= options[b'zstd.level'] <= 22):
997 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1011 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
998 raise error.Abort(msg % options[b'zstd.level'])
1012 raise error.Abort(msg % options[b'zstd.level'])
999
1013
1000 if requirementsmod.NARROW_REQUIREMENT in requirements:
1014 if requirementsmod.NARROW_REQUIREMENT in requirements:
1001 options[b'enableellipsis'] = True
1015 options[b'enableellipsis'] = True
1002
1016
1003 if ui.configbool(b'experimental', b'rust.index'):
1017 if ui.configbool(b'experimental', b'rust.index'):
1004 options[b'rust.index'] = True
1018 options[b'rust.index'] = True
1005 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1019 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1006 options[b'persistent-nodemap'] = True
1020 options[b'persistent-nodemap'] = True
1007 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
1021 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
1008 options[b'persistent-nodemap.mmap'] = True
1022 options[b'persistent-nodemap.mmap'] = True
1009 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
1023 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
1010 options[b'persistent-nodemap.mode'] = epnm
1024 options[b'persistent-nodemap.mode'] = epnm
1011 if ui.configbool(b'devel', b'persistent-nodemap'):
1025 if ui.configbool(b'devel', b'persistent-nodemap'):
1012 options[b'devel-force-nodemap'] = True
1026 options[b'devel-force-nodemap'] = True
1013
1027
1014 return options
1028 return options
1015
1029
1016
1030
1017 def makemain(**kwargs):
1031 def makemain(**kwargs):
1018 """Produce a type conforming to ``ilocalrepositorymain``."""
1032 """Produce a type conforming to ``ilocalrepositorymain``."""
1019 return localrepository
1033 return localrepository
1020
1034
1021
1035
1022 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1036 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1023 class revlogfilestorage(object):
1037 class revlogfilestorage(object):
1024 """File storage when using revlogs."""
1038 """File storage when using revlogs."""
1025
1039
1026 def file(self, path):
1040 def file(self, path):
1027 if path[0] == b'/':
1041 if path[0] == b'/':
1028 path = path[1:]
1042 path = path[1:]
1029
1043
1030 return filelog.filelog(self.svfs, path)
1044 return filelog.filelog(self.svfs, path)
1031
1045
1032
1046
1033 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1047 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1034 class revlognarrowfilestorage(object):
1048 class revlognarrowfilestorage(object):
1035 """File storage when using revlogs and narrow files."""
1049 """File storage when using revlogs and narrow files."""
1036
1050
1037 def file(self, path):
1051 def file(self, path):
1038 if path[0] == b'/':
1052 if path[0] == b'/':
1039 path = path[1:]
1053 path = path[1:]
1040
1054
1041 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1055 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1042
1056
1043
1057
1044 def makefilestorage(requirements, features, **kwargs):
1058 def makefilestorage(requirements, features, **kwargs):
1045 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1059 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1046 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1060 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1047 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1061 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1048
1062
1049 if requirementsmod.NARROW_REQUIREMENT in requirements:
1063 if requirementsmod.NARROW_REQUIREMENT in requirements:
1050 return revlognarrowfilestorage
1064 return revlognarrowfilestorage
1051 else:
1065 else:
1052 return revlogfilestorage
1066 return revlogfilestorage
1053
1067
1054
1068
1055 # List of repository interfaces and factory functions for them. Each
1069 # List of repository interfaces and factory functions for them. Each
1056 # will be called in order during ``makelocalrepository()`` to iteratively
1070 # will be called in order during ``makelocalrepository()`` to iteratively
1057 # derive the final type for a local repository instance. We capture the
1071 # derive the final type for a local repository instance. We capture the
1058 # function as a lambda so we don't hold a reference and the module-level
1072 # function as a lambda so we don't hold a reference and the module-level
1059 # functions can be wrapped.
1073 # functions can be wrapped.
1060 REPO_INTERFACES = [
1074 REPO_INTERFACES = [
1061 (repository.ilocalrepositorymain, lambda: makemain),
1075 (repository.ilocalrepositorymain, lambda: makemain),
1062 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1076 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1063 ]
1077 ]
1064
1078
1065
1079
1066 @interfaceutil.implementer(repository.ilocalrepositorymain)
1080 @interfaceutil.implementer(repository.ilocalrepositorymain)
1067 class localrepository(object):
1081 class localrepository(object):
1068 """Main class for representing local repositories.
1082 """Main class for representing local repositories.
1069
1083
1070 All local repositories are instances of this class.
1084 All local repositories are instances of this class.
1071
1085
1072 Constructed on its own, instances of this class are not usable as
1086 Constructed on its own, instances of this class are not usable as
1073 repository objects. To obtain a usable repository object, call
1087 repository objects. To obtain a usable repository object, call
1074 ``hg.repository()``, ``localrepo.instance()``, or
1088 ``hg.repository()``, ``localrepo.instance()``, or
1075 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1089 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1076 ``instance()`` adds support for creating new repositories.
1090 ``instance()`` adds support for creating new repositories.
1077 ``hg.repository()`` adds more extension integration, including calling
1091 ``hg.repository()`` adds more extension integration, including calling
1078 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1092 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1079 used.
1093 used.
1080 """
1094 """
1081
1095
1082 # obsolete experimental requirements:
1096 # obsolete experimental requirements:
1083 # - manifestv2: An experimental new manifest format that allowed
1097 # - manifestv2: An experimental new manifest format that allowed
1084 # for stem compression of long paths. Experiment ended up not
1098 # for stem compression of long paths. Experiment ended up not
1085 # being successful (repository sizes went up due to worse delta
1099 # being successful (repository sizes went up due to worse delta
1086 # chains), and the code was deleted in 4.6.
1100 # chains), and the code was deleted in 4.6.
1087 supportedformats = {
1101 supportedformats = {
1088 b'revlogv1',
1102 b'revlogv1',
1089 b'generaldelta',
1103 b'generaldelta',
1090 requirementsmod.TREEMANIFEST_REQUIREMENT,
1104 requirementsmod.TREEMANIFEST_REQUIREMENT,
1091 requirementsmod.COPIESSDC_REQUIREMENT,
1105 requirementsmod.COPIESSDC_REQUIREMENT,
1092 requirementsmod.REVLOGV2_REQUIREMENT,
1106 requirementsmod.REVLOGV2_REQUIREMENT,
1093 requirementsmod.SIDEDATA_REQUIREMENT,
1107 requirementsmod.SIDEDATA_REQUIREMENT,
1094 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1108 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1095 requirementsmod.NODEMAP_REQUIREMENT,
1109 requirementsmod.NODEMAP_REQUIREMENT,
1096 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1110 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1097 requirementsmod.SHARESAFE_REQUIREMENT,
1111 requirementsmod.SHARESAFE_REQUIREMENT,
1098 }
1112 }
1099 _basesupported = supportedformats | {
1113 _basesupported = supportedformats | {
1100 b'store',
1114 b'store',
1101 b'fncache',
1115 b'fncache',
1102 requirementsmod.SHARED_REQUIREMENT,
1116 requirementsmod.SHARED_REQUIREMENT,
1103 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1117 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1104 b'dotencode',
1118 b'dotencode',
1105 requirementsmod.SPARSE_REQUIREMENT,
1119 requirementsmod.SPARSE_REQUIREMENT,
1106 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1120 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1107 }
1121 }
1108
1122
1109 # list of prefix for file which can be written without 'wlock'
1123 # list of prefix for file which can be written without 'wlock'
1110 # Extensions should extend this list when needed
1124 # Extensions should extend this list when needed
1111 _wlockfreeprefix = {
1125 _wlockfreeprefix = {
1112 # We migh consider requiring 'wlock' for the next
1126 # We migh consider requiring 'wlock' for the next
1113 # two, but pretty much all the existing code assume
1127 # two, but pretty much all the existing code assume
1114 # wlock is not needed so we keep them excluded for
1128 # wlock is not needed so we keep them excluded for
1115 # now.
1129 # now.
1116 b'hgrc',
1130 b'hgrc',
1117 b'requires',
1131 b'requires',
1118 # XXX cache is a complicatged business someone
1132 # XXX cache is a complicatged business someone
1119 # should investigate this in depth at some point
1133 # should investigate this in depth at some point
1120 b'cache/',
1134 b'cache/',
1121 # XXX shouldn't be dirstate covered by the wlock?
1135 # XXX shouldn't be dirstate covered by the wlock?
1122 b'dirstate',
1136 b'dirstate',
1123 # XXX bisect was still a bit too messy at the time
1137 # XXX bisect was still a bit too messy at the time
1124 # this changeset was introduced. Someone should fix
1138 # this changeset was introduced. Someone should fix
1125 # the remainig bit and drop this line
1139 # the remainig bit and drop this line
1126 b'bisect.state',
1140 b'bisect.state',
1127 }
1141 }
1128
1142
1129 def __init__(
1143 def __init__(
1130 self,
1144 self,
1131 baseui,
1145 baseui,
1132 ui,
1146 ui,
1133 origroot,
1147 origroot,
1134 wdirvfs,
1148 wdirvfs,
1135 hgvfs,
1149 hgvfs,
1136 requirements,
1150 requirements,
1137 supportedrequirements,
1151 supportedrequirements,
1138 sharedpath,
1152 sharedpath,
1139 store,
1153 store,
1140 cachevfs,
1154 cachevfs,
1141 wcachevfs,
1155 wcachevfs,
1142 features,
1156 features,
1143 intents=None,
1157 intents=None,
1144 ):
1158 ):
1145 """Create a new local repository instance.
1159 """Create a new local repository instance.
1146
1160
1147 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1161 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1148 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1162 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1149 object.
1163 object.
1150
1164
1151 Arguments:
1165 Arguments:
1152
1166
1153 baseui
1167 baseui
1154 ``ui.ui`` instance that ``ui`` argument was based off of.
1168 ``ui.ui`` instance that ``ui`` argument was based off of.
1155
1169
1156 ui
1170 ui
1157 ``ui.ui`` instance for use by the repository.
1171 ``ui.ui`` instance for use by the repository.
1158
1172
1159 origroot
1173 origroot
1160 ``bytes`` path to working directory root of this repository.
1174 ``bytes`` path to working directory root of this repository.
1161
1175
1162 wdirvfs
1176 wdirvfs
1163 ``vfs.vfs`` rooted at the working directory.
1177 ``vfs.vfs`` rooted at the working directory.
1164
1178
1165 hgvfs
1179 hgvfs
1166 ``vfs.vfs`` rooted at .hg/
1180 ``vfs.vfs`` rooted at .hg/
1167
1181
1168 requirements
1182 requirements
1169 ``set`` of bytestrings representing repository opening requirements.
1183 ``set`` of bytestrings representing repository opening requirements.
1170
1184
1171 supportedrequirements
1185 supportedrequirements
1172 ``set`` of bytestrings representing repository requirements that we
1186 ``set`` of bytestrings representing repository requirements that we
1173 know how to open. May be a supetset of ``requirements``.
1187 know how to open. May be a supetset of ``requirements``.
1174
1188
1175 sharedpath
1189 sharedpath
1176 ``bytes`` Defining path to storage base directory. Points to a
1190 ``bytes`` Defining path to storage base directory. Points to a
1177 ``.hg/`` directory somewhere.
1191 ``.hg/`` directory somewhere.
1178
1192
1179 store
1193 store
1180 ``store.basicstore`` (or derived) instance providing access to
1194 ``store.basicstore`` (or derived) instance providing access to
1181 versioned storage.
1195 versioned storage.
1182
1196
1183 cachevfs
1197 cachevfs
1184 ``vfs.vfs`` used for cache files.
1198 ``vfs.vfs`` used for cache files.
1185
1199
1186 wcachevfs
1200 wcachevfs
1187 ``vfs.vfs`` used for cache files related to the working copy.
1201 ``vfs.vfs`` used for cache files related to the working copy.
1188
1202
1189 features
1203 features
1190 ``set`` of bytestrings defining features/capabilities of this
1204 ``set`` of bytestrings defining features/capabilities of this
1191 instance.
1205 instance.
1192
1206
1193 intents
1207 intents
1194 ``set`` of system strings indicating what this repo will be used
1208 ``set`` of system strings indicating what this repo will be used
1195 for.
1209 for.
1196 """
1210 """
1197 self.baseui = baseui
1211 self.baseui = baseui
1198 self.ui = ui
1212 self.ui = ui
1199 self.origroot = origroot
1213 self.origroot = origroot
1200 # vfs rooted at working directory.
1214 # vfs rooted at working directory.
1201 self.wvfs = wdirvfs
1215 self.wvfs = wdirvfs
1202 self.root = wdirvfs.base
1216 self.root = wdirvfs.base
1203 # vfs rooted at .hg/. Used to access most non-store paths.
1217 # vfs rooted at .hg/. Used to access most non-store paths.
1204 self.vfs = hgvfs
1218 self.vfs = hgvfs
1205 self.path = hgvfs.base
1219 self.path = hgvfs.base
1206 self.requirements = requirements
1220 self.requirements = requirements
1207 self.supported = supportedrequirements
1221 self.supported = supportedrequirements
1208 self.sharedpath = sharedpath
1222 self.sharedpath = sharedpath
1209 self.store = store
1223 self.store = store
1210 self.cachevfs = cachevfs
1224 self.cachevfs = cachevfs
1211 self.wcachevfs = wcachevfs
1225 self.wcachevfs = wcachevfs
1212 self.features = features
1226 self.features = features
1213
1227
1214 self.filtername = None
1228 self.filtername = None
1215
1229
1216 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1230 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1217 b'devel', b'check-locks'
1231 b'devel', b'check-locks'
1218 ):
1232 ):
1219 self.vfs.audit = self._getvfsward(self.vfs.audit)
1233 self.vfs.audit = self._getvfsward(self.vfs.audit)
1220 # A list of callback to shape the phase if no data were found.
1234 # A list of callback to shape the phase if no data were found.
1221 # Callback are in the form: func(repo, roots) --> processed root.
1235 # Callback are in the form: func(repo, roots) --> processed root.
1222 # This list it to be filled by extension during repo setup
1236 # This list it to be filled by extension during repo setup
1223 self._phasedefaults = []
1237 self._phasedefaults = []
1224
1238
1225 color.setup(self.ui)
1239 color.setup(self.ui)
1226
1240
1227 self.spath = self.store.path
1241 self.spath = self.store.path
1228 self.svfs = self.store.vfs
1242 self.svfs = self.store.vfs
1229 self.sjoin = self.store.join
1243 self.sjoin = self.store.join
1230 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1244 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1231 b'devel', b'check-locks'
1245 b'devel', b'check-locks'
1232 ):
1246 ):
1233 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1247 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1234 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1248 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1235 else: # standard vfs
1249 else: # standard vfs
1236 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1250 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1237
1251
1238 self._dirstatevalidatewarned = False
1252 self._dirstatevalidatewarned = False
1239
1253
1240 self._branchcaches = branchmap.BranchMapCache()
1254 self._branchcaches = branchmap.BranchMapCache()
1241 self._revbranchcache = None
1255 self._revbranchcache = None
1242 self._filterpats = {}
1256 self._filterpats = {}
1243 self._datafilters = {}
1257 self._datafilters = {}
1244 self._transref = self._lockref = self._wlockref = None
1258 self._transref = self._lockref = self._wlockref = None
1245
1259
1246 # A cache for various files under .hg/ that tracks file changes,
1260 # A cache for various files under .hg/ that tracks file changes,
1247 # (used by the filecache decorator)
1261 # (used by the filecache decorator)
1248 #
1262 #
1249 # Maps a property name to its util.filecacheentry
1263 # Maps a property name to its util.filecacheentry
1250 self._filecache = {}
1264 self._filecache = {}
1251
1265
1252 # hold sets of revision to be filtered
1266 # hold sets of revision to be filtered
1253 # should be cleared when something might have changed the filter value:
1267 # should be cleared when something might have changed the filter value:
1254 # - new changesets,
1268 # - new changesets,
1255 # - phase change,
1269 # - phase change,
1256 # - new obsolescence marker,
1270 # - new obsolescence marker,
1257 # - working directory parent change,
1271 # - working directory parent change,
1258 # - bookmark changes
1272 # - bookmark changes
1259 self.filteredrevcache = {}
1273 self.filteredrevcache = {}
1260
1274
1261 # post-dirstate-status hooks
1275 # post-dirstate-status hooks
1262 self._postdsstatus = []
1276 self._postdsstatus = []
1263
1277
1264 # generic mapping between names and nodes
1278 # generic mapping between names and nodes
1265 self.names = namespaces.namespaces()
1279 self.names = namespaces.namespaces()
1266
1280
1267 # Key to signature value.
1281 # Key to signature value.
1268 self._sparsesignaturecache = {}
1282 self._sparsesignaturecache = {}
1269 # Signature to cached matcher instance.
1283 # Signature to cached matcher instance.
1270 self._sparsematchercache = {}
1284 self._sparsematchercache = {}
1271
1285
1272 self._extrafilterid = repoview.extrafilter(ui)
1286 self._extrafilterid = repoview.extrafilter(ui)
1273
1287
1274 self.filecopiesmode = None
1288 self.filecopiesmode = None
1275 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1289 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1276 self.filecopiesmode = b'changeset-sidedata'
1290 self.filecopiesmode = b'changeset-sidedata'
1277
1291
1278 def _getvfsward(self, origfunc):
1292 def _getvfsward(self, origfunc):
1279 """build a ward for self.vfs"""
1293 """build a ward for self.vfs"""
1280 rref = weakref.ref(self)
1294 rref = weakref.ref(self)
1281
1295
1282 def checkvfs(path, mode=None):
1296 def checkvfs(path, mode=None):
1283 ret = origfunc(path, mode=mode)
1297 ret = origfunc(path, mode=mode)
1284 repo = rref()
1298 repo = rref()
1285 if (
1299 if (
1286 repo is None
1300 repo is None
1287 or not util.safehasattr(repo, b'_wlockref')
1301 or not util.safehasattr(repo, b'_wlockref')
1288 or not util.safehasattr(repo, b'_lockref')
1302 or not util.safehasattr(repo, b'_lockref')
1289 ):
1303 ):
1290 return
1304 return
1291 if mode in (None, b'r', b'rb'):
1305 if mode in (None, b'r', b'rb'):
1292 return
1306 return
1293 if path.startswith(repo.path):
1307 if path.startswith(repo.path):
1294 # truncate name relative to the repository (.hg)
1308 # truncate name relative to the repository (.hg)
1295 path = path[len(repo.path) + 1 :]
1309 path = path[len(repo.path) + 1 :]
1296 if path.startswith(b'cache/'):
1310 if path.startswith(b'cache/'):
1297 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1311 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1298 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1312 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1299 # path prefixes covered by 'lock'
1313 # path prefixes covered by 'lock'
1300 vfs_path_prefixes = (
1314 vfs_path_prefixes = (
1301 b'journal.',
1315 b'journal.',
1302 b'undo.',
1316 b'undo.',
1303 b'strip-backup/',
1317 b'strip-backup/',
1304 b'cache/',
1318 b'cache/',
1305 )
1319 )
1306 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1320 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1307 if repo._currentlock(repo._lockref) is None:
1321 if repo._currentlock(repo._lockref) is None:
1308 repo.ui.develwarn(
1322 repo.ui.develwarn(
1309 b'write with no lock: "%s"' % path,
1323 b'write with no lock: "%s"' % path,
1310 stacklevel=3,
1324 stacklevel=3,
1311 config=b'check-locks',
1325 config=b'check-locks',
1312 )
1326 )
1313 elif repo._currentlock(repo._wlockref) is None:
1327 elif repo._currentlock(repo._wlockref) is None:
1314 # rest of vfs files are covered by 'wlock'
1328 # rest of vfs files are covered by 'wlock'
1315 #
1329 #
1316 # exclude special files
1330 # exclude special files
1317 for prefix in self._wlockfreeprefix:
1331 for prefix in self._wlockfreeprefix:
1318 if path.startswith(prefix):
1332 if path.startswith(prefix):
1319 return
1333 return
1320 repo.ui.develwarn(
1334 repo.ui.develwarn(
1321 b'write with no wlock: "%s"' % path,
1335 b'write with no wlock: "%s"' % path,
1322 stacklevel=3,
1336 stacklevel=3,
1323 config=b'check-locks',
1337 config=b'check-locks',
1324 )
1338 )
1325 return ret
1339 return ret
1326
1340
1327 return checkvfs
1341 return checkvfs
1328
1342
1329 def _getsvfsward(self, origfunc):
1343 def _getsvfsward(self, origfunc):
1330 """build a ward for self.svfs"""
1344 """build a ward for self.svfs"""
1331 rref = weakref.ref(self)
1345 rref = weakref.ref(self)
1332
1346
1333 def checksvfs(path, mode=None):
1347 def checksvfs(path, mode=None):
1334 ret = origfunc(path, mode=mode)
1348 ret = origfunc(path, mode=mode)
1335 repo = rref()
1349 repo = rref()
1336 if repo is None or not util.safehasattr(repo, b'_lockref'):
1350 if repo is None or not util.safehasattr(repo, b'_lockref'):
1337 return
1351 return
1338 if mode in (None, b'r', b'rb'):
1352 if mode in (None, b'r', b'rb'):
1339 return
1353 return
1340 if path.startswith(repo.sharedpath):
1354 if path.startswith(repo.sharedpath):
1341 # truncate name relative to the repository (.hg)
1355 # truncate name relative to the repository (.hg)
1342 path = path[len(repo.sharedpath) + 1 :]
1356 path = path[len(repo.sharedpath) + 1 :]
1343 if repo._currentlock(repo._lockref) is None:
1357 if repo._currentlock(repo._lockref) is None:
1344 repo.ui.develwarn(
1358 repo.ui.develwarn(
1345 b'write with no lock: "%s"' % path, stacklevel=4
1359 b'write with no lock: "%s"' % path, stacklevel=4
1346 )
1360 )
1347 return ret
1361 return ret
1348
1362
1349 return checksvfs
1363 return checksvfs
1350
1364
1351 def close(self):
1365 def close(self):
1352 self._writecaches()
1366 self._writecaches()
1353
1367
1354 def _writecaches(self):
1368 def _writecaches(self):
1355 if self._revbranchcache:
1369 if self._revbranchcache:
1356 self._revbranchcache.write()
1370 self._revbranchcache.write()
1357
1371
1358 def _restrictcapabilities(self, caps):
1372 def _restrictcapabilities(self, caps):
1359 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1373 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1360 caps = set(caps)
1374 caps = set(caps)
1361 capsblob = bundle2.encodecaps(
1375 capsblob = bundle2.encodecaps(
1362 bundle2.getrepocaps(self, role=b'client')
1376 bundle2.getrepocaps(self, role=b'client')
1363 )
1377 )
1364 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1378 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1365 return caps
1379 return caps
1366
1380
1367 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1381 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1368 # self -> auditor -> self._checknested -> self
1382 # self -> auditor -> self._checknested -> self
1369
1383
1370 @property
1384 @property
1371 def auditor(self):
1385 def auditor(self):
1372 # This is only used by context.workingctx.match in order to
1386 # This is only used by context.workingctx.match in order to
1373 # detect files in subrepos.
1387 # detect files in subrepos.
1374 return pathutil.pathauditor(self.root, callback=self._checknested)
1388 return pathutil.pathauditor(self.root, callback=self._checknested)
1375
1389
1376 @property
1390 @property
1377 def nofsauditor(self):
1391 def nofsauditor(self):
1378 # This is only used by context.basectx.match in order to detect
1392 # This is only used by context.basectx.match in order to detect
1379 # files in subrepos.
1393 # files in subrepos.
1380 return pathutil.pathauditor(
1394 return pathutil.pathauditor(
1381 self.root, callback=self._checknested, realfs=False, cached=True
1395 self.root, callback=self._checknested, realfs=False, cached=True
1382 )
1396 )
1383
1397
1384 def _checknested(self, path):
1398 def _checknested(self, path):
1385 """Determine if path is a legal nested repository."""
1399 """Determine if path is a legal nested repository."""
1386 if not path.startswith(self.root):
1400 if not path.startswith(self.root):
1387 return False
1401 return False
1388 subpath = path[len(self.root) + 1 :]
1402 subpath = path[len(self.root) + 1 :]
1389 normsubpath = util.pconvert(subpath)
1403 normsubpath = util.pconvert(subpath)
1390
1404
1391 # XXX: Checking against the current working copy is wrong in
1405 # XXX: Checking against the current working copy is wrong in
1392 # the sense that it can reject things like
1406 # the sense that it can reject things like
1393 #
1407 #
1394 # $ hg cat -r 10 sub/x.txt
1408 # $ hg cat -r 10 sub/x.txt
1395 #
1409 #
1396 # if sub/ is no longer a subrepository in the working copy
1410 # if sub/ is no longer a subrepository in the working copy
1397 # parent revision.
1411 # parent revision.
1398 #
1412 #
1399 # However, it can of course also allow things that would have
1413 # However, it can of course also allow things that would have
1400 # been rejected before, such as the above cat command if sub/
1414 # been rejected before, such as the above cat command if sub/
1401 # is a subrepository now, but was a normal directory before.
1415 # is a subrepository now, but was a normal directory before.
1402 # The old path auditor would have rejected by mistake since it
1416 # The old path auditor would have rejected by mistake since it
1403 # panics when it sees sub/.hg/.
1417 # panics when it sees sub/.hg/.
1404 #
1418 #
1405 # All in all, checking against the working copy seems sensible
1419 # All in all, checking against the working copy seems sensible
1406 # since we want to prevent access to nested repositories on
1420 # since we want to prevent access to nested repositories on
1407 # the filesystem *now*.
1421 # the filesystem *now*.
1408 ctx = self[None]
1422 ctx = self[None]
1409 parts = util.splitpath(subpath)
1423 parts = util.splitpath(subpath)
1410 while parts:
1424 while parts:
1411 prefix = b'/'.join(parts)
1425 prefix = b'/'.join(parts)
1412 if prefix in ctx.substate:
1426 if prefix in ctx.substate:
1413 if prefix == normsubpath:
1427 if prefix == normsubpath:
1414 return True
1428 return True
1415 else:
1429 else:
1416 sub = ctx.sub(prefix)
1430 sub = ctx.sub(prefix)
1417 return sub.checknested(subpath[len(prefix) + 1 :])
1431 return sub.checknested(subpath[len(prefix) + 1 :])
1418 else:
1432 else:
1419 parts.pop()
1433 parts.pop()
1420 return False
1434 return False
1421
1435
1422 def peer(self):
1436 def peer(self):
1423 return localpeer(self) # not cached to avoid reference cycle
1437 return localpeer(self) # not cached to avoid reference cycle
1424
1438
1425 def unfiltered(self):
1439 def unfiltered(self):
1426 """Return unfiltered version of the repository
1440 """Return unfiltered version of the repository
1427
1441
1428 Intended to be overwritten by filtered repo."""
1442 Intended to be overwritten by filtered repo."""
1429 return self
1443 return self
1430
1444
1431 def filtered(self, name, visibilityexceptions=None):
1445 def filtered(self, name, visibilityexceptions=None):
1432 """Return a filtered version of a repository
1446 """Return a filtered version of a repository
1433
1447
1434 The `name` parameter is the identifier of the requested view. This
1448 The `name` parameter is the identifier of the requested view. This
1435 will return a repoview object set "exactly" to the specified view.
1449 will return a repoview object set "exactly" to the specified view.
1436
1450
1437 This function does not apply recursive filtering to a repository. For
1451 This function does not apply recursive filtering to a repository. For
1438 example calling `repo.filtered("served")` will return a repoview using
1452 example calling `repo.filtered("served")` will return a repoview using
1439 the "served" view, regardless of the initial view used by `repo`.
1453 the "served" view, regardless of the initial view used by `repo`.
1440
1454
1441 In other word, there is always only one level of `repoview` "filtering".
1455 In other word, there is always only one level of `repoview` "filtering".
1442 """
1456 """
1443 if self._extrafilterid is not None and b'%' not in name:
1457 if self._extrafilterid is not None and b'%' not in name:
1444 name = name + b'%' + self._extrafilterid
1458 name = name + b'%' + self._extrafilterid
1445
1459
1446 cls = repoview.newtype(self.unfiltered().__class__)
1460 cls = repoview.newtype(self.unfiltered().__class__)
1447 return cls(self, name, visibilityexceptions)
1461 return cls(self, name, visibilityexceptions)
1448
1462
1449 @mixedrepostorecache(
1463 @mixedrepostorecache(
1450 (b'bookmarks', b'plain'),
1464 (b'bookmarks', b'plain'),
1451 (b'bookmarks.current', b'plain'),
1465 (b'bookmarks.current', b'plain'),
1452 (b'bookmarks', b''),
1466 (b'bookmarks', b''),
1453 (b'00changelog.i', b''),
1467 (b'00changelog.i', b''),
1454 )
1468 )
1455 def _bookmarks(self):
1469 def _bookmarks(self):
1456 # Since the multiple files involved in the transaction cannot be
1470 # Since the multiple files involved in the transaction cannot be
1457 # written atomically (with current repository format), there is a race
1471 # written atomically (with current repository format), there is a race
1458 # condition here.
1472 # condition here.
1459 #
1473 #
1460 # 1) changelog content A is read
1474 # 1) changelog content A is read
1461 # 2) outside transaction update changelog to content B
1475 # 2) outside transaction update changelog to content B
1462 # 3) outside transaction update bookmark file referring to content B
1476 # 3) outside transaction update bookmark file referring to content B
1463 # 4) bookmarks file content is read and filtered against changelog-A
1477 # 4) bookmarks file content is read and filtered against changelog-A
1464 #
1478 #
1465 # When this happens, bookmarks against nodes missing from A are dropped.
1479 # When this happens, bookmarks against nodes missing from A are dropped.
1466 #
1480 #
1467 # Having this happening during read is not great, but it become worse
1481 # Having this happening during read is not great, but it become worse
1468 # when this happen during write because the bookmarks to the "unknown"
1482 # when this happen during write because the bookmarks to the "unknown"
1469 # nodes will be dropped for good. However, writes happen within locks.
1483 # nodes will be dropped for good. However, writes happen within locks.
1470 # This locking makes it possible to have a race free consistent read.
1484 # This locking makes it possible to have a race free consistent read.
1471 # For this purpose data read from disc before locking are
1485 # For this purpose data read from disc before locking are
1472 # "invalidated" right after the locks are taken. This invalidations are
1486 # "invalidated" right after the locks are taken. This invalidations are
1473 # "light", the `filecache` mechanism keep the data in memory and will
1487 # "light", the `filecache` mechanism keep the data in memory and will
1474 # reuse them if the underlying files did not changed. Not parsing the
1488 # reuse them if the underlying files did not changed. Not parsing the
1475 # same data multiple times helps performances.
1489 # same data multiple times helps performances.
1476 #
1490 #
1477 # Unfortunately in the case describe above, the files tracked by the
1491 # Unfortunately in the case describe above, the files tracked by the
1478 # bookmarks file cache might not have changed, but the in-memory
1492 # bookmarks file cache might not have changed, but the in-memory
1479 # content is still "wrong" because we used an older changelog content
1493 # content is still "wrong" because we used an older changelog content
1480 # to process the on-disk data. So after locking, the changelog would be
1494 # to process the on-disk data. So after locking, the changelog would be
1481 # refreshed but `_bookmarks` would be preserved.
1495 # refreshed but `_bookmarks` would be preserved.
1482 # Adding `00changelog.i` to the list of tracked file is not
1496 # Adding `00changelog.i` to the list of tracked file is not
1483 # enough, because at the time we build the content for `_bookmarks` in
1497 # enough, because at the time we build the content for `_bookmarks` in
1484 # (4), the changelog file has already diverged from the content used
1498 # (4), the changelog file has already diverged from the content used
1485 # for loading `changelog` in (1)
1499 # for loading `changelog` in (1)
1486 #
1500 #
1487 # To prevent the issue, we force the changelog to be explicitly
1501 # To prevent the issue, we force the changelog to be explicitly
1488 # reloaded while computing `_bookmarks`. The data race can still happen
1502 # reloaded while computing `_bookmarks`. The data race can still happen
1489 # without the lock (with a narrower window), but it would no longer go
1503 # without the lock (with a narrower window), but it would no longer go
1490 # undetected during the lock time refresh.
1504 # undetected during the lock time refresh.
1491 #
1505 #
1492 # The new schedule is as follow
1506 # The new schedule is as follow
1493 #
1507 #
1494 # 1) filecache logic detect that `_bookmarks` needs to be computed
1508 # 1) filecache logic detect that `_bookmarks` needs to be computed
1495 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1509 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1496 # 3) We force `changelog` filecache to be tested
1510 # 3) We force `changelog` filecache to be tested
1497 # 4) cachestat for `changelog` are captured (for changelog)
1511 # 4) cachestat for `changelog` are captured (for changelog)
1498 # 5) `_bookmarks` is computed and cached
1512 # 5) `_bookmarks` is computed and cached
1499 #
1513 #
1500 # The step in (3) ensure we have a changelog at least as recent as the
1514 # The step in (3) ensure we have a changelog at least as recent as the
1501 # cache stat computed in (1). As a result at locking time:
1515 # cache stat computed in (1). As a result at locking time:
1502 # * if the changelog did not changed since (1) -> we can reuse the data
1516 # * if the changelog did not changed since (1) -> we can reuse the data
1503 # * otherwise -> the bookmarks get refreshed.
1517 # * otherwise -> the bookmarks get refreshed.
1504 self._refreshchangelog()
1518 self._refreshchangelog()
1505 return bookmarks.bmstore(self)
1519 return bookmarks.bmstore(self)
1506
1520
1507 def _refreshchangelog(self):
1521 def _refreshchangelog(self):
1508 """make sure the in memory changelog match the on-disk one"""
1522 """make sure the in memory changelog match the on-disk one"""
1509 if 'changelog' in vars(self) and self.currenttransaction() is None:
1523 if 'changelog' in vars(self) and self.currenttransaction() is None:
1510 del self.changelog
1524 del self.changelog
1511
1525
1512 @property
1526 @property
1513 def _activebookmark(self):
1527 def _activebookmark(self):
1514 return self._bookmarks.active
1528 return self._bookmarks.active
1515
1529
1516 # _phasesets depend on changelog. what we need is to call
1530 # _phasesets depend on changelog. what we need is to call
1517 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1531 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1518 # can't be easily expressed in filecache mechanism.
1532 # can't be easily expressed in filecache mechanism.
1519 @storecache(b'phaseroots', b'00changelog.i')
1533 @storecache(b'phaseroots', b'00changelog.i')
1520 def _phasecache(self):
1534 def _phasecache(self):
1521 return phases.phasecache(self, self._phasedefaults)
1535 return phases.phasecache(self, self._phasedefaults)
1522
1536
1523 @storecache(b'obsstore')
1537 @storecache(b'obsstore')
1524 def obsstore(self):
1538 def obsstore(self):
1525 return obsolete.makestore(self.ui, self)
1539 return obsolete.makestore(self.ui, self)
1526
1540
1527 @storecache(b'00changelog.i')
1541 @storecache(b'00changelog.i')
1528 def changelog(self):
1542 def changelog(self):
1529 # load dirstate before changelog to avoid race see issue6303
1543 # load dirstate before changelog to avoid race see issue6303
1530 self.dirstate.prefetch_parents()
1544 self.dirstate.prefetch_parents()
1531 return self.store.changelog(txnutil.mayhavepending(self.root))
1545 return self.store.changelog(txnutil.mayhavepending(self.root))
1532
1546
1533 @storecache(b'00manifest.i')
1547 @storecache(b'00manifest.i')
1534 def manifestlog(self):
1548 def manifestlog(self):
1535 return self.store.manifestlog(self, self._storenarrowmatch)
1549 return self.store.manifestlog(self, self._storenarrowmatch)
1536
1550
1537 @repofilecache(b'dirstate')
1551 @repofilecache(b'dirstate')
1538 def dirstate(self):
1552 def dirstate(self):
1539 return self._makedirstate()
1553 return self._makedirstate()
1540
1554
1541 def _makedirstate(self):
1555 def _makedirstate(self):
1542 """Extension point for wrapping the dirstate per-repo."""
1556 """Extension point for wrapping the dirstate per-repo."""
1543 sparsematchfn = lambda: sparse.matcher(self)
1557 sparsematchfn = lambda: sparse.matcher(self)
1544
1558
1545 return dirstate.dirstate(
1559 return dirstate.dirstate(
1546 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1560 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1547 )
1561 )
1548
1562
1549 def _dirstatevalidate(self, node):
1563 def _dirstatevalidate(self, node):
1550 try:
1564 try:
1551 self.changelog.rev(node)
1565 self.changelog.rev(node)
1552 return node
1566 return node
1553 except error.LookupError:
1567 except error.LookupError:
1554 if not self._dirstatevalidatewarned:
1568 if not self._dirstatevalidatewarned:
1555 self._dirstatevalidatewarned = True
1569 self._dirstatevalidatewarned = True
1556 self.ui.warn(
1570 self.ui.warn(
1557 _(b"warning: ignoring unknown working parent %s!\n")
1571 _(b"warning: ignoring unknown working parent %s!\n")
1558 % short(node)
1572 % short(node)
1559 )
1573 )
1560 return nullid
1574 return nullid
1561
1575
1562 @storecache(narrowspec.FILENAME)
1576 @storecache(narrowspec.FILENAME)
1563 def narrowpats(self):
1577 def narrowpats(self):
1564 """matcher patterns for this repository's narrowspec
1578 """matcher patterns for this repository's narrowspec
1565
1579
1566 A tuple of (includes, excludes).
1580 A tuple of (includes, excludes).
1567 """
1581 """
1568 return narrowspec.load(self)
1582 return narrowspec.load(self)
1569
1583
1570 @storecache(narrowspec.FILENAME)
1584 @storecache(narrowspec.FILENAME)
1571 def _storenarrowmatch(self):
1585 def _storenarrowmatch(self):
1572 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1586 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1573 return matchmod.always()
1587 return matchmod.always()
1574 include, exclude = self.narrowpats
1588 include, exclude = self.narrowpats
1575 return narrowspec.match(self.root, include=include, exclude=exclude)
1589 return narrowspec.match(self.root, include=include, exclude=exclude)
1576
1590
1577 @storecache(narrowspec.FILENAME)
1591 @storecache(narrowspec.FILENAME)
1578 def _narrowmatch(self):
1592 def _narrowmatch(self):
1579 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1593 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1580 return matchmod.always()
1594 return matchmod.always()
1581 narrowspec.checkworkingcopynarrowspec(self)
1595 narrowspec.checkworkingcopynarrowspec(self)
1582 include, exclude = self.narrowpats
1596 include, exclude = self.narrowpats
1583 return narrowspec.match(self.root, include=include, exclude=exclude)
1597 return narrowspec.match(self.root, include=include, exclude=exclude)
1584
1598
1585 def narrowmatch(self, match=None, includeexact=False):
1599 def narrowmatch(self, match=None, includeexact=False):
1586 """matcher corresponding the the repo's narrowspec
1600 """matcher corresponding the the repo's narrowspec
1587
1601
1588 If `match` is given, then that will be intersected with the narrow
1602 If `match` is given, then that will be intersected with the narrow
1589 matcher.
1603 matcher.
1590
1604
1591 If `includeexact` is True, then any exact matches from `match` will
1605 If `includeexact` is True, then any exact matches from `match` will
1592 be included even if they're outside the narrowspec.
1606 be included even if they're outside the narrowspec.
1593 """
1607 """
1594 if match:
1608 if match:
1595 if includeexact and not self._narrowmatch.always():
1609 if includeexact and not self._narrowmatch.always():
1596 # do not exclude explicitly-specified paths so that they can
1610 # do not exclude explicitly-specified paths so that they can
1597 # be warned later on
1611 # be warned later on
1598 em = matchmod.exact(match.files())
1612 em = matchmod.exact(match.files())
1599 nm = matchmod.unionmatcher([self._narrowmatch, em])
1613 nm = matchmod.unionmatcher([self._narrowmatch, em])
1600 return matchmod.intersectmatchers(match, nm)
1614 return matchmod.intersectmatchers(match, nm)
1601 return matchmod.intersectmatchers(match, self._narrowmatch)
1615 return matchmod.intersectmatchers(match, self._narrowmatch)
1602 return self._narrowmatch
1616 return self._narrowmatch
1603
1617
1604 def setnarrowpats(self, newincludes, newexcludes):
1618 def setnarrowpats(self, newincludes, newexcludes):
1605 narrowspec.save(self, newincludes, newexcludes)
1619 narrowspec.save(self, newincludes, newexcludes)
1606 self.invalidate(clearfilecache=True)
1620 self.invalidate(clearfilecache=True)
1607
1621
1608 @unfilteredpropertycache
1622 @unfilteredpropertycache
1609 def _quick_access_changeid_null(self):
1623 def _quick_access_changeid_null(self):
1610 return {
1624 return {
1611 b'null': (nullrev, nullid),
1625 b'null': (nullrev, nullid),
1612 nullrev: (nullrev, nullid),
1626 nullrev: (nullrev, nullid),
1613 nullid: (nullrev, nullid),
1627 nullid: (nullrev, nullid),
1614 }
1628 }
1615
1629
1616 @unfilteredpropertycache
1630 @unfilteredpropertycache
1617 def _quick_access_changeid_wc(self):
1631 def _quick_access_changeid_wc(self):
1618 # also fast path access to the working copy parents
1632 # also fast path access to the working copy parents
1619 # however, only do it for filter that ensure wc is visible.
1633 # however, only do it for filter that ensure wc is visible.
1620 quick = self._quick_access_changeid_null.copy()
1634 quick = self._quick_access_changeid_null.copy()
1621 cl = self.unfiltered().changelog
1635 cl = self.unfiltered().changelog
1622 for node in self.dirstate.parents():
1636 for node in self.dirstate.parents():
1623 if node == nullid:
1637 if node == nullid:
1624 continue
1638 continue
1625 rev = cl.index.get_rev(node)
1639 rev = cl.index.get_rev(node)
1626 if rev is None:
1640 if rev is None:
1627 # unknown working copy parent case:
1641 # unknown working copy parent case:
1628 #
1642 #
1629 # skip the fast path and let higher code deal with it
1643 # skip the fast path and let higher code deal with it
1630 continue
1644 continue
1631 pair = (rev, node)
1645 pair = (rev, node)
1632 quick[rev] = pair
1646 quick[rev] = pair
1633 quick[node] = pair
1647 quick[node] = pair
1634 # also add the parents of the parents
1648 # also add the parents of the parents
1635 for r in cl.parentrevs(rev):
1649 for r in cl.parentrevs(rev):
1636 if r == nullrev:
1650 if r == nullrev:
1637 continue
1651 continue
1638 n = cl.node(r)
1652 n = cl.node(r)
1639 pair = (r, n)
1653 pair = (r, n)
1640 quick[r] = pair
1654 quick[r] = pair
1641 quick[n] = pair
1655 quick[n] = pair
1642 p1node = self.dirstate.p1()
1656 p1node = self.dirstate.p1()
1643 if p1node != nullid:
1657 if p1node != nullid:
1644 quick[b'.'] = quick[p1node]
1658 quick[b'.'] = quick[p1node]
1645 return quick
1659 return quick
1646
1660
1647 @unfilteredmethod
1661 @unfilteredmethod
1648 def _quick_access_changeid_invalidate(self):
1662 def _quick_access_changeid_invalidate(self):
1649 if '_quick_access_changeid_wc' in vars(self):
1663 if '_quick_access_changeid_wc' in vars(self):
1650 del self.__dict__['_quick_access_changeid_wc']
1664 del self.__dict__['_quick_access_changeid_wc']
1651
1665
1652 @property
1666 @property
1653 def _quick_access_changeid(self):
1667 def _quick_access_changeid(self):
1654 """an helper dictionnary for __getitem__ calls
1668 """an helper dictionnary for __getitem__ calls
1655
1669
1656 This contains a list of symbol we can recognise right away without
1670 This contains a list of symbol we can recognise right away without
1657 further processing.
1671 further processing.
1658 """
1672 """
1659 if self.filtername in repoview.filter_has_wc:
1673 if self.filtername in repoview.filter_has_wc:
1660 return self._quick_access_changeid_wc
1674 return self._quick_access_changeid_wc
1661 return self._quick_access_changeid_null
1675 return self._quick_access_changeid_null
1662
1676
1663 def __getitem__(self, changeid):
1677 def __getitem__(self, changeid):
1664 # dealing with special cases
1678 # dealing with special cases
1665 if changeid is None:
1679 if changeid is None:
1666 return context.workingctx(self)
1680 return context.workingctx(self)
1667 if isinstance(changeid, context.basectx):
1681 if isinstance(changeid, context.basectx):
1668 return changeid
1682 return changeid
1669
1683
1670 # dealing with multiple revisions
1684 # dealing with multiple revisions
1671 if isinstance(changeid, slice):
1685 if isinstance(changeid, slice):
1672 # wdirrev isn't contiguous so the slice shouldn't include it
1686 # wdirrev isn't contiguous so the slice shouldn't include it
1673 return [
1687 return [
1674 self[i]
1688 self[i]
1675 for i in pycompat.xrange(*changeid.indices(len(self)))
1689 for i in pycompat.xrange(*changeid.indices(len(self)))
1676 if i not in self.changelog.filteredrevs
1690 if i not in self.changelog.filteredrevs
1677 ]
1691 ]
1678
1692
1679 # dealing with some special values
1693 # dealing with some special values
1680 quick_access = self._quick_access_changeid.get(changeid)
1694 quick_access = self._quick_access_changeid.get(changeid)
1681 if quick_access is not None:
1695 if quick_access is not None:
1682 rev, node = quick_access
1696 rev, node = quick_access
1683 return context.changectx(self, rev, node, maybe_filtered=False)
1697 return context.changectx(self, rev, node, maybe_filtered=False)
1684 if changeid == b'tip':
1698 if changeid == b'tip':
1685 node = self.changelog.tip()
1699 node = self.changelog.tip()
1686 rev = self.changelog.rev(node)
1700 rev = self.changelog.rev(node)
1687 return context.changectx(self, rev, node)
1701 return context.changectx(self, rev, node)
1688
1702
1689 # dealing with arbitrary values
1703 # dealing with arbitrary values
1690 try:
1704 try:
1691 if isinstance(changeid, int):
1705 if isinstance(changeid, int):
1692 node = self.changelog.node(changeid)
1706 node = self.changelog.node(changeid)
1693 rev = changeid
1707 rev = changeid
1694 elif changeid == b'.':
1708 elif changeid == b'.':
1695 # this is a hack to delay/avoid loading obsmarkers
1709 # this is a hack to delay/avoid loading obsmarkers
1696 # when we know that '.' won't be hidden
1710 # when we know that '.' won't be hidden
1697 node = self.dirstate.p1()
1711 node = self.dirstate.p1()
1698 rev = self.unfiltered().changelog.rev(node)
1712 rev = self.unfiltered().changelog.rev(node)
1699 elif len(changeid) == 20:
1713 elif len(changeid) == 20:
1700 try:
1714 try:
1701 node = changeid
1715 node = changeid
1702 rev = self.changelog.rev(changeid)
1716 rev = self.changelog.rev(changeid)
1703 except error.FilteredLookupError:
1717 except error.FilteredLookupError:
1704 changeid = hex(changeid) # for the error message
1718 changeid = hex(changeid) # for the error message
1705 raise
1719 raise
1706 except LookupError:
1720 except LookupError:
1707 # check if it might have come from damaged dirstate
1721 # check if it might have come from damaged dirstate
1708 #
1722 #
1709 # XXX we could avoid the unfiltered if we had a recognizable
1723 # XXX we could avoid the unfiltered if we had a recognizable
1710 # exception for filtered changeset access
1724 # exception for filtered changeset access
1711 if (
1725 if (
1712 self.local()
1726 self.local()
1713 and changeid in self.unfiltered().dirstate.parents()
1727 and changeid in self.unfiltered().dirstate.parents()
1714 ):
1728 ):
1715 msg = _(b"working directory has unknown parent '%s'!")
1729 msg = _(b"working directory has unknown parent '%s'!")
1716 raise error.Abort(msg % short(changeid))
1730 raise error.Abort(msg % short(changeid))
1717 changeid = hex(changeid) # for the error message
1731 changeid = hex(changeid) # for the error message
1718 raise
1732 raise
1719
1733
1720 elif len(changeid) == 40:
1734 elif len(changeid) == 40:
1721 node = bin(changeid)
1735 node = bin(changeid)
1722 rev = self.changelog.rev(node)
1736 rev = self.changelog.rev(node)
1723 else:
1737 else:
1724 raise error.ProgrammingError(
1738 raise error.ProgrammingError(
1725 b"unsupported changeid '%s' of type %s"
1739 b"unsupported changeid '%s' of type %s"
1726 % (changeid, pycompat.bytestr(type(changeid)))
1740 % (changeid, pycompat.bytestr(type(changeid)))
1727 )
1741 )
1728
1742
1729 return context.changectx(self, rev, node)
1743 return context.changectx(self, rev, node)
1730
1744
1731 except (error.FilteredIndexError, error.FilteredLookupError):
1745 except (error.FilteredIndexError, error.FilteredLookupError):
1732 raise error.FilteredRepoLookupError(
1746 raise error.FilteredRepoLookupError(
1733 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1747 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1734 )
1748 )
1735 except (IndexError, LookupError):
1749 except (IndexError, LookupError):
1736 raise error.RepoLookupError(
1750 raise error.RepoLookupError(
1737 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1751 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1738 )
1752 )
1739 except error.WdirUnsupported:
1753 except error.WdirUnsupported:
1740 return context.workingctx(self)
1754 return context.workingctx(self)
1741
1755
1742 def __contains__(self, changeid):
1756 def __contains__(self, changeid):
1743 """True if the given changeid exists
1757 """True if the given changeid exists
1744
1758
1745 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1759 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1746 specified.
1760 specified.
1747 """
1761 """
1748 try:
1762 try:
1749 self[changeid]
1763 self[changeid]
1750 return True
1764 return True
1751 except error.RepoLookupError:
1765 except error.RepoLookupError:
1752 return False
1766 return False
1753
1767
1754 def __nonzero__(self):
1768 def __nonzero__(self):
1755 return True
1769 return True
1756
1770
1757 __bool__ = __nonzero__
1771 __bool__ = __nonzero__
1758
1772
1759 def __len__(self):
1773 def __len__(self):
1760 # no need to pay the cost of repoview.changelog
1774 # no need to pay the cost of repoview.changelog
1761 unfi = self.unfiltered()
1775 unfi = self.unfiltered()
1762 return len(unfi.changelog)
1776 return len(unfi.changelog)
1763
1777
1764 def __iter__(self):
1778 def __iter__(self):
1765 return iter(self.changelog)
1779 return iter(self.changelog)
1766
1780
1767 def revs(self, expr, *args):
1781 def revs(self, expr, *args):
1768 """Find revisions matching a revset.
1782 """Find revisions matching a revset.
1769
1783
1770 The revset is specified as a string ``expr`` that may contain
1784 The revset is specified as a string ``expr`` that may contain
1771 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1785 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1772
1786
1773 Revset aliases from the configuration are not expanded. To expand
1787 Revset aliases from the configuration are not expanded. To expand
1774 user aliases, consider calling ``scmutil.revrange()`` or
1788 user aliases, consider calling ``scmutil.revrange()`` or
1775 ``repo.anyrevs([expr], user=True)``.
1789 ``repo.anyrevs([expr], user=True)``.
1776
1790
1777 Returns a smartset.abstractsmartset, which is a list-like interface
1791 Returns a smartset.abstractsmartset, which is a list-like interface
1778 that contains integer revisions.
1792 that contains integer revisions.
1779 """
1793 """
1780 tree = revsetlang.spectree(expr, *args)
1794 tree = revsetlang.spectree(expr, *args)
1781 return revset.makematcher(tree)(self)
1795 return revset.makematcher(tree)(self)
1782
1796
1783 def set(self, expr, *args):
1797 def set(self, expr, *args):
1784 """Find revisions matching a revset and emit changectx instances.
1798 """Find revisions matching a revset and emit changectx instances.
1785
1799
1786 This is a convenience wrapper around ``revs()`` that iterates the
1800 This is a convenience wrapper around ``revs()`` that iterates the
1787 result and is a generator of changectx instances.
1801 result and is a generator of changectx instances.
1788
1802
1789 Revset aliases from the configuration are not expanded. To expand
1803 Revset aliases from the configuration are not expanded. To expand
1790 user aliases, consider calling ``scmutil.revrange()``.
1804 user aliases, consider calling ``scmutil.revrange()``.
1791 """
1805 """
1792 for r in self.revs(expr, *args):
1806 for r in self.revs(expr, *args):
1793 yield self[r]
1807 yield self[r]
1794
1808
1795 def anyrevs(self, specs, user=False, localalias=None):
1809 def anyrevs(self, specs, user=False, localalias=None):
1796 """Find revisions matching one of the given revsets.
1810 """Find revisions matching one of the given revsets.
1797
1811
1798 Revset aliases from the configuration are not expanded by default. To
1812 Revset aliases from the configuration are not expanded by default. To
1799 expand user aliases, specify ``user=True``. To provide some local
1813 expand user aliases, specify ``user=True``. To provide some local
1800 definitions overriding user aliases, set ``localalias`` to
1814 definitions overriding user aliases, set ``localalias`` to
1801 ``{name: definitionstring}``.
1815 ``{name: definitionstring}``.
1802 """
1816 """
1803 if specs == [b'null']:
1817 if specs == [b'null']:
1804 return revset.baseset([nullrev])
1818 return revset.baseset([nullrev])
1805 if specs == [b'.']:
1819 if specs == [b'.']:
1806 quick_data = self._quick_access_changeid.get(b'.')
1820 quick_data = self._quick_access_changeid.get(b'.')
1807 if quick_data is not None:
1821 if quick_data is not None:
1808 return revset.baseset([quick_data[0]])
1822 return revset.baseset([quick_data[0]])
1809 if user:
1823 if user:
1810 m = revset.matchany(
1824 m = revset.matchany(
1811 self.ui,
1825 self.ui,
1812 specs,
1826 specs,
1813 lookup=revset.lookupfn(self),
1827 lookup=revset.lookupfn(self),
1814 localalias=localalias,
1828 localalias=localalias,
1815 )
1829 )
1816 else:
1830 else:
1817 m = revset.matchany(None, specs, localalias=localalias)
1831 m = revset.matchany(None, specs, localalias=localalias)
1818 return m(self)
1832 return m(self)
1819
1833
1820 def url(self):
1834 def url(self):
1821 return b'file:' + self.root
1835 return b'file:' + self.root
1822
1836
1823 def hook(self, name, throw=False, **args):
1837 def hook(self, name, throw=False, **args):
1824 """Call a hook, passing this repo instance.
1838 """Call a hook, passing this repo instance.
1825
1839
1826 This a convenience method to aid invoking hooks. Extensions likely
1840 This a convenience method to aid invoking hooks. Extensions likely
1827 won't call this unless they have registered a custom hook or are
1841 won't call this unless they have registered a custom hook or are
1828 replacing code that is expected to call a hook.
1842 replacing code that is expected to call a hook.
1829 """
1843 """
1830 return hook.hook(self.ui, self, name, throw, **args)
1844 return hook.hook(self.ui, self, name, throw, **args)
1831
1845
1832 @filteredpropertycache
1846 @filteredpropertycache
1833 def _tagscache(self):
1847 def _tagscache(self):
1834 """Returns a tagscache object that contains various tags related
1848 """Returns a tagscache object that contains various tags related
1835 caches."""
1849 caches."""
1836
1850
1837 # This simplifies its cache management by having one decorated
1851 # This simplifies its cache management by having one decorated
1838 # function (this one) and the rest simply fetch things from it.
1852 # function (this one) and the rest simply fetch things from it.
1839 class tagscache(object):
1853 class tagscache(object):
1840 def __init__(self):
1854 def __init__(self):
1841 # These two define the set of tags for this repository. tags
1855 # These two define the set of tags for this repository. tags
1842 # maps tag name to node; tagtypes maps tag name to 'global' or
1856 # maps tag name to node; tagtypes maps tag name to 'global' or
1843 # 'local'. (Global tags are defined by .hgtags across all
1857 # 'local'. (Global tags are defined by .hgtags across all
1844 # heads, and local tags are defined in .hg/localtags.)
1858 # heads, and local tags are defined in .hg/localtags.)
1845 # They constitute the in-memory cache of tags.
1859 # They constitute the in-memory cache of tags.
1846 self.tags = self.tagtypes = None
1860 self.tags = self.tagtypes = None
1847
1861
1848 self.nodetagscache = self.tagslist = None
1862 self.nodetagscache = self.tagslist = None
1849
1863
1850 cache = tagscache()
1864 cache = tagscache()
1851 cache.tags, cache.tagtypes = self._findtags()
1865 cache.tags, cache.tagtypes = self._findtags()
1852
1866
1853 return cache
1867 return cache
1854
1868
1855 def tags(self):
1869 def tags(self):
1856 '''return a mapping of tag to node'''
1870 '''return a mapping of tag to node'''
1857 t = {}
1871 t = {}
1858 if self.changelog.filteredrevs:
1872 if self.changelog.filteredrevs:
1859 tags, tt = self._findtags()
1873 tags, tt = self._findtags()
1860 else:
1874 else:
1861 tags = self._tagscache.tags
1875 tags = self._tagscache.tags
1862 rev = self.changelog.rev
1876 rev = self.changelog.rev
1863 for k, v in pycompat.iteritems(tags):
1877 for k, v in pycompat.iteritems(tags):
1864 try:
1878 try:
1865 # ignore tags to unknown nodes
1879 # ignore tags to unknown nodes
1866 rev(v)
1880 rev(v)
1867 t[k] = v
1881 t[k] = v
1868 except (error.LookupError, ValueError):
1882 except (error.LookupError, ValueError):
1869 pass
1883 pass
1870 return t
1884 return t
1871
1885
1872 def _findtags(self):
1886 def _findtags(self):
1873 """Do the hard work of finding tags. Return a pair of dicts
1887 """Do the hard work of finding tags. Return a pair of dicts
1874 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1888 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1875 maps tag name to a string like \'global\' or \'local\'.
1889 maps tag name to a string like \'global\' or \'local\'.
1876 Subclasses or extensions are free to add their own tags, but
1890 Subclasses or extensions are free to add their own tags, but
1877 should be aware that the returned dicts will be retained for the
1891 should be aware that the returned dicts will be retained for the
1878 duration of the localrepo object."""
1892 duration of the localrepo object."""
1879
1893
1880 # XXX what tagtype should subclasses/extensions use? Currently
1894 # XXX what tagtype should subclasses/extensions use? Currently
1881 # mq and bookmarks add tags, but do not set the tagtype at all.
1895 # mq and bookmarks add tags, but do not set the tagtype at all.
1882 # Should each extension invent its own tag type? Should there
1896 # Should each extension invent its own tag type? Should there
1883 # be one tagtype for all such "virtual" tags? Or is the status
1897 # be one tagtype for all such "virtual" tags? Or is the status
1884 # quo fine?
1898 # quo fine?
1885
1899
1886 # map tag name to (node, hist)
1900 # map tag name to (node, hist)
1887 alltags = tagsmod.findglobaltags(self.ui, self)
1901 alltags = tagsmod.findglobaltags(self.ui, self)
1888 # map tag name to tag type
1902 # map tag name to tag type
1889 tagtypes = {tag: b'global' for tag in alltags}
1903 tagtypes = {tag: b'global' for tag in alltags}
1890
1904
1891 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1905 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1892
1906
1893 # Build the return dicts. Have to re-encode tag names because
1907 # Build the return dicts. Have to re-encode tag names because
1894 # the tags module always uses UTF-8 (in order not to lose info
1908 # the tags module always uses UTF-8 (in order not to lose info
1895 # writing to the cache), but the rest of Mercurial wants them in
1909 # writing to the cache), but the rest of Mercurial wants them in
1896 # local encoding.
1910 # local encoding.
1897 tags = {}
1911 tags = {}
1898 for (name, (node, hist)) in pycompat.iteritems(alltags):
1912 for (name, (node, hist)) in pycompat.iteritems(alltags):
1899 if node != nullid:
1913 if node != nullid:
1900 tags[encoding.tolocal(name)] = node
1914 tags[encoding.tolocal(name)] = node
1901 tags[b'tip'] = self.changelog.tip()
1915 tags[b'tip'] = self.changelog.tip()
1902 tagtypes = {
1916 tagtypes = {
1903 encoding.tolocal(name): value
1917 encoding.tolocal(name): value
1904 for (name, value) in pycompat.iteritems(tagtypes)
1918 for (name, value) in pycompat.iteritems(tagtypes)
1905 }
1919 }
1906 return (tags, tagtypes)
1920 return (tags, tagtypes)
1907
1921
1908 def tagtype(self, tagname):
1922 def tagtype(self, tagname):
1909 """
1923 """
1910 return the type of the given tag. result can be:
1924 return the type of the given tag. result can be:
1911
1925
1912 'local' : a local tag
1926 'local' : a local tag
1913 'global' : a global tag
1927 'global' : a global tag
1914 None : tag does not exist
1928 None : tag does not exist
1915 """
1929 """
1916
1930
1917 return self._tagscache.tagtypes.get(tagname)
1931 return self._tagscache.tagtypes.get(tagname)
1918
1932
1919 def tagslist(self):
1933 def tagslist(self):
1920 '''return a list of tags ordered by revision'''
1934 '''return a list of tags ordered by revision'''
1921 if not self._tagscache.tagslist:
1935 if not self._tagscache.tagslist:
1922 l = []
1936 l = []
1923 for t, n in pycompat.iteritems(self.tags()):
1937 for t, n in pycompat.iteritems(self.tags()):
1924 l.append((self.changelog.rev(n), t, n))
1938 l.append((self.changelog.rev(n), t, n))
1925 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1939 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1926
1940
1927 return self._tagscache.tagslist
1941 return self._tagscache.tagslist
1928
1942
1929 def nodetags(self, node):
1943 def nodetags(self, node):
1930 '''return the tags associated with a node'''
1944 '''return the tags associated with a node'''
1931 if not self._tagscache.nodetagscache:
1945 if not self._tagscache.nodetagscache:
1932 nodetagscache = {}
1946 nodetagscache = {}
1933 for t, n in pycompat.iteritems(self._tagscache.tags):
1947 for t, n in pycompat.iteritems(self._tagscache.tags):
1934 nodetagscache.setdefault(n, []).append(t)
1948 nodetagscache.setdefault(n, []).append(t)
1935 for tags in pycompat.itervalues(nodetagscache):
1949 for tags in pycompat.itervalues(nodetagscache):
1936 tags.sort()
1950 tags.sort()
1937 self._tagscache.nodetagscache = nodetagscache
1951 self._tagscache.nodetagscache = nodetagscache
1938 return self._tagscache.nodetagscache.get(node, [])
1952 return self._tagscache.nodetagscache.get(node, [])
1939
1953
1940 def nodebookmarks(self, node):
1954 def nodebookmarks(self, node):
1941 """return the list of bookmarks pointing to the specified node"""
1955 """return the list of bookmarks pointing to the specified node"""
1942 return self._bookmarks.names(node)
1956 return self._bookmarks.names(node)
1943
1957
1944 def branchmap(self):
1958 def branchmap(self):
1945 """returns a dictionary {branch: [branchheads]} with branchheads
1959 """returns a dictionary {branch: [branchheads]} with branchheads
1946 ordered by increasing revision number"""
1960 ordered by increasing revision number"""
1947 return self._branchcaches[self]
1961 return self._branchcaches[self]
1948
1962
1949 @unfilteredmethod
1963 @unfilteredmethod
1950 def revbranchcache(self):
1964 def revbranchcache(self):
1951 if not self._revbranchcache:
1965 if not self._revbranchcache:
1952 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1966 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1953 return self._revbranchcache
1967 return self._revbranchcache
1954
1968
1955 def branchtip(self, branch, ignoremissing=False):
1969 def branchtip(self, branch, ignoremissing=False):
1956 """return the tip node for a given branch
1970 """return the tip node for a given branch
1957
1971
1958 If ignoremissing is True, then this method will not raise an error.
1972 If ignoremissing is True, then this method will not raise an error.
1959 This is helpful for callers that only expect None for a missing branch
1973 This is helpful for callers that only expect None for a missing branch
1960 (e.g. namespace).
1974 (e.g. namespace).
1961
1975
1962 """
1976 """
1963 try:
1977 try:
1964 return self.branchmap().branchtip(branch)
1978 return self.branchmap().branchtip(branch)
1965 except KeyError:
1979 except KeyError:
1966 if not ignoremissing:
1980 if not ignoremissing:
1967 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1981 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1968 else:
1982 else:
1969 pass
1983 pass
1970
1984
1971 def lookup(self, key):
1985 def lookup(self, key):
1972 node = scmutil.revsymbol(self, key).node()
1986 node = scmutil.revsymbol(self, key).node()
1973 if node is None:
1987 if node is None:
1974 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1988 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1975 return node
1989 return node
1976
1990
1977 def lookupbranch(self, key):
1991 def lookupbranch(self, key):
1978 if self.branchmap().hasbranch(key):
1992 if self.branchmap().hasbranch(key):
1979 return key
1993 return key
1980
1994
1981 return scmutil.revsymbol(self, key).branch()
1995 return scmutil.revsymbol(self, key).branch()
1982
1996
1983 def known(self, nodes):
1997 def known(self, nodes):
1984 cl = self.changelog
1998 cl = self.changelog
1985 get_rev = cl.index.get_rev
1999 get_rev = cl.index.get_rev
1986 filtered = cl.filteredrevs
2000 filtered = cl.filteredrevs
1987 result = []
2001 result = []
1988 for n in nodes:
2002 for n in nodes:
1989 r = get_rev(n)
2003 r = get_rev(n)
1990 resp = not (r is None or r in filtered)
2004 resp = not (r is None or r in filtered)
1991 result.append(resp)
2005 result.append(resp)
1992 return result
2006 return result
1993
2007
1994 def local(self):
2008 def local(self):
1995 return self
2009 return self
1996
2010
1997 def publishing(self):
2011 def publishing(self):
1998 # it's safe (and desirable) to trust the publish flag unconditionally
2012 # it's safe (and desirable) to trust the publish flag unconditionally
1999 # so that we don't finalize changes shared between users via ssh or nfs
2013 # so that we don't finalize changes shared between users via ssh or nfs
2000 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2014 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2001
2015
2002 def cancopy(self):
2016 def cancopy(self):
2003 # so statichttprepo's override of local() works
2017 # so statichttprepo's override of local() works
2004 if not self.local():
2018 if not self.local():
2005 return False
2019 return False
2006 if not self.publishing():
2020 if not self.publishing():
2007 return True
2021 return True
2008 # if publishing we can't copy if there is filtered content
2022 # if publishing we can't copy if there is filtered content
2009 return not self.filtered(b'visible').changelog.filteredrevs
2023 return not self.filtered(b'visible').changelog.filteredrevs
2010
2024
2011 def shared(self):
2025 def shared(self):
2012 '''the type of shared repository (None if not shared)'''
2026 '''the type of shared repository (None if not shared)'''
2013 if self.sharedpath != self.path:
2027 if self.sharedpath != self.path:
2014 return b'store'
2028 return b'store'
2015 return None
2029 return None
2016
2030
2017 def wjoin(self, f, *insidef):
2031 def wjoin(self, f, *insidef):
2018 return self.vfs.reljoin(self.root, f, *insidef)
2032 return self.vfs.reljoin(self.root, f, *insidef)
2019
2033
2020 def setparents(self, p1, p2=nullid):
2034 def setparents(self, p1, p2=nullid):
2021 self[None].setparents(p1, p2)
2035 self[None].setparents(p1, p2)
2022 self._quick_access_changeid_invalidate()
2036 self._quick_access_changeid_invalidate()
2023
2037
2024 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2038 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2025 """changeid must be a changeset revision, if specified.
2039 """changeid must be a changeset revision, if specified.
2026 fileid can be a file revision or node."""
2040 fileid can be a file revision or node."""
2027 return context.filectx(
2041 return context.filectx(
2028 self, path, changeid, fileid, changectx=changectx
2042 self, path, changeid, fileid, changectx=changectx
2029 )
2043 )
2030
2044
2031 def getcwd(self):
2045 def getcwd(self):
2032 return self.dirstate.getcwd()
2046 return self.dirstate.getcwd()
2033
2047
2034 def pathto(self, f, cwd=None):
2048 def pathto(self, f, cwd=None):
2035 return self.dirstate.pathto(f, cwd)
2049 return self.dirstate.pathto(f, cwd)
2036
2050
2037 def _loadfilter(self, filter):
2051 def _loadfilter(self, filter):
2038 if filter not in self._filterpats:
2052 if filter not in self._filterpats:
2039 l = []
2053 l = []
2040 for pat, cmd in self.ui.configitems(filter):
2054 for pat, cmd in self.ui.configitems(filter):
2041 if cmd == b'!':
2055 if cmd == b'!':
2042 continue
2056 continue
2043 mf = matchmod.match(self.root, b'', [pat])
2057 mf = matchmod.match(self.root, b'', [pat])
2044 fn = None
2058 fn = None
2045 params = cmd
2059 params = cmd
2046 for name, filterfn in pycompat.iteritems(self._datafilters):
2060 for name, filterfn in pycompat.iteritems(self._datafilters):
2047 if cmd.startswith(name):
2061 if cmd.startswith(name):
2048 fn = filterfn
2062 fn = filterfn
2049 params = cmd[len(name) :].lstrip()
2063 params = cmd[len(name) :].lstrip()
2050 break
2064 break
2051 if not fn:
2065 if not fn:
2052 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2066 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2053 fn.__name__ = 'commandfilter'
2067 fn.__name__ = 'commandfilter'
2054 # Wrap old filters not supporting keyword arguments
2068 # Wrap old filters not supporting keyword arguments
2055 if not pycompat.getargspec(fn)[2]:
2069 if not pycompat.getargspec(fn)[2]:
2056 oldfn = fn
2070 oldfn = fn
2057 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2071 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2058 fn.__name__ = 'compat-' + oldfn.__name__
2072 fn.__name__ = 'compat-' + oldfn.__name__
2059 l.append((mf, fn, params))
2073 l.append((mf, fn, params))
2060 self._filterpats[filter] = l
2074 self._filterpats[filter] = l
2061 return self._filterpats[filter]
2075 return self._filterpats[filter]
2062
2076
2063 def _filter(self, filterpats, filename, data):
2077 def _filter(self, filterpats, filename, data):
2064 for mf, fn, cmd in filterpats:
2078 for mf, fn, cmd in filterpats:
2065 if mf(filename):
2079 if mf(filename):
2066 self.ui.debug(
2080 self.ui.debug(
2067 b"filtering %s through %s\n"
2081 b"filtering %s through %s\n"
2068 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2082 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2069 )
2083 )
2070 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2084 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2071 break
2085 break
2072
2086
2073 return data
2087 return data
2074
2088
2075 @unfilteredpropertycache
2089 @unfilteredpropertycache
2076 def _encodefilterpats(self):
2090 def _encodefilterpats(self):
2077 return self._loadfilter(b'encode')
2091 return self._loadfilter(b'encode')
2078
2092
2079 @unfilteredpropertycache
2093 @unfilteredpropertycache
2080 def _decodefilterpats(self):
2094 def _decodefilterpats(self):
2081 return self._loadfilter(b'decode')
2095 return self._loadfilter(b'decode')
2082
2096
2083 def adddatafilter(self, name, filter):
2097 def adddatafilter(self, name, filter):
2084 self._datafilters[name] = filter
2098 self._datafilters[name] = filter
2085
2099
2086 def wread(self, filename):
2100 def wread(self, filename):
2087 if self.wvfs.islink(filename):
2101 if self.wvfs.islink(filename):
2088 data = self.wvfs.readlink(filename)
2102 data = self.wvfs.readlink(filename)
2089 else:
2103 else:
2090 data = self.wvfs.read(filename)
2104 data = self.wvfs.read(filename)
2091 return self._filter(self._encodefilterpats, filename, data)
2105 return self._filter(self._encodefilterpats, filename, data)
2092
2106
2093 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2107 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2094 """write ``data`` into ``filename`` in the working directory
2108 """write ``data`` into ``filename`` in the working directory
2095
2109
2096 This returns length of written (maybe decoded) data.
2110 This returns length of written (maybe decoded) data.
2097 """
2111 """
2098 data = self._filter(self._decodefilterpats, filename, data)
2112 data = self._filter(self._decodefilterpats, filename, data)
2099 if b'l' in flags:
2113 if b'l' in flags:
2100 self.wvfs.symlink(data, filename)
2114 self.wvfs.symlink(data, filename)
2101 else:
2115 else:
2102 self.wvfs.write(
2116 self.wvfs.write(
2103 filename, data, backgroundclose=backgroundclose, **kwargs
2117 filename, data, backgroundclose=backgroundclose, **kwargs
2104 )
2118 )
2105 if b'x' in flags:
2119 if b'x' in flags:
2106 self.wvfs.setflags(filename, False, True)
2120 self.wvfs.setflags(filename, False, True)
2107 else:
2121 else:
2108 self.wvfs.setflags(filename, False, False)
2122 self.wvfs.setflags(filename, False, False)
2109 return len(data)
2123 return len(data)
2110
2124
2111 def wwritedata(self, filename, data):
2125 def wwritedata(self, filename, data):
2112 return self._filter(self._decodefilterpats, filename, data)
2126 return self._filter(self._decodefilterpats, filename, data)
2113
2127
2114 def currenttransaction(self):
2128 def currenttransaction(self):
2115 """return the current transaction or None if non exists"""
2129 """return the current transaction or None if non exists"""
2116 if self._transref:
2130 if self._transref:
2117 tr = self._transref()
2131 tr = self._transref()
2118 else:
2132 else:
2119 tr = None
2133 tr = None
2120
2134
2121 if tr and tr.running():
2135 if tr and tr.running():
2122 return tr
2136 return tr
2123 return None
2137 return None
2124
2138
2125 def transaction(self, desc, report=None):
2139 def transaction(self, desc, report=None):
2126 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2140 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2127 b'devel', b'check-locks'
2141 b'devel', b'check-locks'
2128 ):
2142 ):
2129 if self._currentlock(self._lockref) is None:
2143 if self._currentlock(self._lockref) is None:
2130 raise error.ProgrammingError(b'transaction requires locking')
2144 raise error.ProgrammingError(b'transaction requires locking')
2131 tr = self.currenttransaction()
2145 tr = self.currenttransaction()
2132 if tr is not None:
2146 if tr is not None:
2133 return tr.nest(name=desc)
2147 return tr.nest(name=desc)
2134
2148
2135 # abort here if the journal already exists
2149 # abort here if the journal already exists
2136 if self.svfs.exists(b"journal"):
2150 if self.svfs.exists(b"journal"):
2137 raise error.RepoError(
2151 raise error.RepoError(
2138 _(b"abandoned transaction found"),
2152 _(b"abandoned transaction found"),
2139 hint=_(b"run 'hg recover' to clean up transaction"),
2153 hint=_(b"run 'hg recover' to clean up transaction"),
2140 )
2154 )
2141
2155
2142 idbase = b"%.40f#%f" % (random.random(), time.time())
2156 idbase = b"%.40f#%f" % (random.random(), time.time())
2143 ha = hex(hashutil.sha1(idbase).digest())
2157 ha = hex(hashutil.sha1(idbase).digest())
2144 txnid = b'TXN:' + ha
2158 txnid = b'TXN:' + ha
2145 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2159 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2146
2160
2147 self._writejournal(desc)
2161 self._writejournal(desc)
2148 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2162 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2149 if report:
2163 if report:
2150 rp = report
2164 rp = report
2151 else:
2165 else:
2152 rp = self.ui.warn
2166 rp = self.ui.warn
2153 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2167 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2154 # we must avoid cyclic reference between repo and transaction.
2168 # we must avoid cyclic reference between repo and transaction.
2155 reporef = weakref.ref(self)
2169 reporef = weakref.ref(self)
2156 # Code to track tag movement
2170 # Code to track tag movement
2157 #
2171 #
2158 # Since tags are all handled as file content, it is actually quite hard
2172 # Since tags are all handled as file content, it is actually quite hard
2159 # to track these movement from a code perspective. So we fallback to a
2173 # to track these movement from a code perspective. So we fallback to a
2160 # tracking at the repository level. One could envision to track changes
2174 # tracking at the repository level. One could envision to track changes
2161 # to the '.hgtags' file through changegroup apply but that fails to
2175 # to the '.hgtags' file through changegroup apply but that fails to
2162 # cope with case where transaction expose new heads without changegroup
2176 # cope with case where transaction expose new heads without changegroup
2163 # being involved (eg: phase movement).
2177 # being involved (eg: phase movement).
2164 #
2178 #
2165 # For now, We gate the feature behind a flag since this likely comes
2179 # For now, We gate the feature behind a flag since this likely comes
2166 # with performance impacts. The current code run more often than needed
2180 # with performance impacts. The current code run more often than needed
2167 # and do not use caches as much as it could. The current focus is on
2181 # and do not use caches as much as it could. The current focus is on
2168 # the behavior of the feature so we disable it by default. The flag
2182 # the behavior of the feature so we disable it by default. The flag
2169 # will be removed when we are happy with the performance impact.
2183 # will be removed when we are happy with the performance impact.
2170 #
2184 #
2171 # Once this feature is no longer experimental move the following
2185 # Once this feature is no longer experimental move the following
2172 # documentation to the appropriate help section:
2186 # documentation to the appropriate help section:
2173 #
2187 #
2174 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2188 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2175 # tags (new or changed or deleted tags). In addition the details of
2189 # tags (new or changed or deleted tags). In addition the details of
2176 # these changes are made available in a file at:
2190 # these changes are made available in a file at:
2177 # ``REPOROOT/.hg/changes/tags.changes``.
2191 # ``REPOROOT/.hg/changes/tags.changes``.
2178 # Make sure you check for HG_TAG_MOVED before reading that file as it
2192 # Make sure you check for HG_TAG_MOVED before reading that file as it
2179 # might exist from a previous transaction even if no tag were touched
2193 # might exist from a previous transaction even if no tag were touched
2180 # in this one. Changes are recorded in a line base format::
2194 # in this one. Changes are recorded in a line base format::
2181 #
2195 #
2182 # <action> <hex-node> <tag-name>\n
2196 # <action> <hex-node> <tag-name>\n
2183 #
2197 #
2184 # Actions are defined as follow:
2198 # Actions are defined as follow:
2185 # "-R": tag is removed,
2199 # "-R": tag is removed,
2186 # "+A": tag is added,
2200 # "+A": tag is added,
2187 # "-M": tag is moved (old value),
2201 # "-M": tag is moved (old value),
2188 # "+M": tag is moved (new value),
2202 # "+M": tag is moved (new value),
2189 tracktags = lambda x: None
2203 tracktags = lambda x: None
2190 # experimental config: experimental.hook-track-tags
2204 # experimental config: experimental.hook-track-tags
2191 shouldtracktags = self.ui.configbool(
2205 shouldtracktags = self.ui.configbool(
2192 b'experimental', b'hook-track-tags'
2206 b'experimental', b'hook-track-tags'
2193 )
2207 )
2194 if desc != b'strip' and shouldtracktags:
2208 if desc != b'strip' and shouldtracktags:
2195 oldheads = self.changelog.headrevs()
2209 oldheads = self.changelog.headrevs()
2196
2210
2197 def tracktags(tr2):
2211 def tracktags(tr2):
2198 repo = reporef()
2212 repo = reporef()
2199 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2213 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2200 newheads = repo.changelog.headrevs()
2214 newheads = repo.changelog.headrevs()
2201 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2215 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2202 # notes: we compare lists here.
2216 # notes: we compare lists here.
2203 # As we do it only once buiding set would not be cheaper
2217 # As we do it only once buiding set would not be cheaper
2204 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2218 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2205 if changes:
2219 if changes:
2206 tr2.hookargs[b'tag_moved'] = b'1'
2220 tr2.hookargs[b'tag_moved'] = b'1'
2207 with repo.vfs(
2221 with repo.vfs(
2208 b'changes/tags.changes', b'w', atomictemp=True
2222 b'changes/tags.changes', b'w', atomictemp=True
2209 ) as changesfile:
2223 ) as changesfile:
2210 # note: we do not register the file to the transaction
2224 # note: we do not register the file to the transaction
2211 # because we needs it to still exist on the transaction
2225 # because we needs it to still exist on the transaction
2212 # is close (for txnclose hooks)
2226 # is close (for txnclose hooks)
2213 tagsmod.writediff(changesfile, changes)
2227 tagsmod.writediff(changesfile, changes)
2214
2228
2215 def validate(tr2):
2229 def validate(tr2):
2216 """will run pre-closing hooks"""
2230 """will run pre-closing hooks"""
2217 # XXX the transaction API is a bit lacking here so we take a hacky
2231 # XXX the transaction API is a bit lacking here so we take a hacky
2218 # path for now
2232 # path for now
2219 #
2233 #
2220 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2234 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2221 # dict is copied before these run. In addition we needs the data
2235 # dict is copied before these run. In addition we needs the data
2222 # available to in memory hooks too.
2236 # available to in memory hooks too.
2223 #
2237 #
2224 # Moreover, we also need to make sure this runs before txnclose
2238 # Moreover, we also need to make sure this runs before txnclose
2225 # hooks and there is no "pending" mechanism that would execute
2239 # hooks and there is no "pending" mechanism that would execute
2226 # logic only if hooks are about to run.
2240 # logic only if hooks are about to run.
2227 #
2241 #
2228 # Fixing this limitation of the transaction is also needed to track
2242 # Fixing this limitation of the transaction is also needed to track
2229 # other families of changes (bookmarks, phases, obsolescence).
2243 # other families of changes (bookmarks, phases, obsolescence).
2230 #
2244 #
2231 # This will have to be fixed before we remove the experimental
2245 # This will have to be fixed before we remove the experimental
2232 # gating.
2246 # gating.
2233 tracktags(tr2)
2247 tracktags(tr2)
2234 repo = reporef()
2248 repo = reporef()
2235
2249
2236 singleheadopt = (b'experimental', b'single-head-per-branch')
2250 singleheadopt = (b'experimental', b'single-head-per-branch')
2237 singlehead = repo.ui.configbool(*singleheadopt)
2251 singlehead = repo.ui.configbool(*singleheadopt)
2238 if singlehead:
2252 if singlehead:
2239 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2253 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2240 accountclosed = singleheadsub.get(
2254 accountclosed = singleheadsub.get(
2241 b"account-closed-heads", False
2255 b"account-closed-heads", False
2242 )
2256 )
2243 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2257 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2244 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2258 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2245 for name, (old, new) in sorted(
2259 for name, (old, new) in sorted(
2246 tr.changes[b'bookmarks'].items()
2260 tr.changes[b'bookmarks'].items()
2247 ):
2261 ):
2248 args = tr.hookargs.copy()
2262 args = tr.hookargs.copy()
2249 args.update(bookmarks.preparehookargs(name, old, new))
2263 args.update(bookmarks.preparehookargs(name, old, new))
2250 repo.hook(
2264 repo.hook(
2251 b'pretxnclose-bookmark',
2265 b'pretxnclose-bookmark',
2252 throw=True,
2266 throw=True,
2253 **pycompat.strkwargs(args)
2267 **pycompat.strkwargs(args)
2254 )
2268 )
2255 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2269 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2256 cl = repo.unfiltered().changelog
2270 cl = repo.unfiltered().changelog
2257 for revs, (old, new) in tr.changes[b'phases']:
2271 for revs, (old, new) in tr.changes[b'phases']:
2258 for rev in revs:
2272 for rev in revs:
2259 args = tr.hookargs.copy()
2273 args = tr.hookargs.copy()
2260 node = hex(cl.node(rev))
2274 node = hex(cl.node(rev))
2261 args.update(phases.preparehookargs(node, old, new))
2275 args.update(phases.preparehookargs(node, old, new))
2262 repo.hook(
2276 repo.hook(
2263 b'pretxnclose-phase',
2277 b'pretxnclose-phase',
2264 throw=True,
2278 throw=True,
2265 **pycompat.strkwargs(args)
2279 **pycompat.strkwargs(args)
2266 )
2280 )
2267
2281
2268 repo.hook(
2282 repo.hook(
2269 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2283 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2270 )
2284 )
2271
2285
2272 def releasefn(tr, success):
2286 def releasefn(tr, success):
2273 repo = reporef()
2287 repo = reporef()
2274 if repo is None:
2288 if repo is None:
2275 # If the repo has been GC'd (and this release function is being
2289 # If the repo has been GC'd (and this release function is being
2276 # called from transaction.__del__), there's not much we can do,
2290 # called from transaction.__del__), there's not much we can do,
2277 # so just leave the unfinished transaction there and let the
2291 # so just leave the unfinished transaction there and let the
2278 # user run `hg recover`.
2292 # user run `hg recover`.
2279 return
2293 return
2280 if success:
2294 if success:
2281 # this should be explicitly invoked here, because
2295 # this should be explicitly invoked here, because
2282 # in-memory changes aren't written out at closing
2296 # in-memory changes aren't written out at closing
2283 # transaction, if tr.addfilegenerator (via
2297 # transaction, if tr.addfilegenerator (via
2284 # dirstate.write or so) isn't invoked while
2298 # dirstate.write or so) isn't invoked while
2285 # transaction running
2299 # transaction running
2286 repo.dirstate.write(None)
2300 repo.dirstate.write(None)
2287 else:
2301 else:
2288 # discard all changes (including ones already written
2302 # discard all changes (including ones already written
2289 # out) in this transaction
2303 # out) in this transaction
2290 narrowspec.restorebackup(self, b'journal.narrowspec')
2304 narrowspec.restorebackup(self, b'journal.narrowspec')
2291 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2305 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2292 repo.dirstate.restorebackup(None, b'journal.dirstate')
2306 repo.dirstate.restorebackup(None, b'journal.dirstate')
2293
2307
2294 repo.invalidate(clearfilecache=True)
2308 repo.invalidate(clearfilecache=True)
2295
2309
2296 tr = transaction.transaction(
2310 tr = transaction.transaction(
2297 rp,
2311 rp,
2298 self.svfs,
2312 self.svfs,
2299 vfsmap,
2313 vfsmap,
2300 b"journal",
2314 b"journal",
2301 b"undo",
2315 b"undo",
2302 aftertrans(renames),
2316 aftertrans(renames),
2303 self.store.createmode,
2317 self.store.createmode,
2304 validator=validate,
2318 validator=validate,
2305 releasefn=releasefn,
2319 releasefn=releasefn,
2306 checkambigfiles=_cachedfiles,
2320 checkambigfiles=_cachedfiles,
2307 name=desc,
2321 name=desc,
2308 )
2322 )
2309 tr.changes[b'origrepolen'] = len(self)
2323 tr.changes[b'origrepolen'] = len(self)
2310 tr.changes[b'obsmarkers'] = set()
2324 tr.changes[b'obsmarkers'] = set()
2311 tr.changes[b'phases'] = []
2325 tr.changes[b'phases'] = []
2312 tr.changes[b'bookmarks'] = {}
2326 tr.changes[b'bookmarks'] = {}
2313
2327
2314 tr.hookargs[b'txnid'] = txnid
2328 tr.hookargs[b'txnid'] = txnid
2315 tr.hookargs[b'txnname'] = desc
2329 tr.hookargs[b'txnname'] = desc
2316 tr.hookargs[b'changes'] = tr.changes
2330 tr.hookargs[b'changes'] = tr.changes
2317 # note: writing the fncache only during finalize mean that the file is
2331 # note: writing the fncache only during finalize mean that the file is
2318 # outdated when running hooks. As fncache is used for streaming clone,
2332 # outdated when running hooks. As fncache is used for streaming clone,
2319 # this is not expected to break anything that happen during the hooks.
2333 # this is not expected to break anything that happen during the hooks.
2320 tr.addfinalize(b'flush-fncache', self.store.write)
2334 tr.addfinalize(b'flush-fncache', self.store.write)
2321
2335
2322 def txnclosehook(tr2):
2336 def txnclosehook(tr2):
2323 """To be run if transaction is successful, will schedule a hook run"""
2337 """To be run if transaction is successful, will schedule a hook run"""
2324 # Don't reference tr2 in hook() so we don't hold a reference.
2338 # Don't reference tr2 in hook() so we don't hold a reference.
2325 # This reduces memory consumption when there are multiple
2339 # This reduces memory consumption when there are multiple
2326 # transactions per lock. This can likely go away if issue5045
2340 # transactions per lock. This can likely go away if issue5045
2327 # fixes the function accumulation.
2341 # fixes the function accumulation.
2328 hookargs = tr2.hookargs
2342 hookargs = tr2.hookargs
2329
2343
2330 def hookfunc(unused_success):
2344 def hookfunc(unused_success):
2331 repo = reporef()
2345 repo = reporef()
2332 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2346 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2333 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2347 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2334 for name, (old, new) in bmchanges:
2348 for name, (old, new) in bmchanges:
2335 args = tr.hookargs.copy()
2349 args = tr.hookargs.copy()
2336 args.update(bookmarks.preparehookargs(name, old, new))
2350 args.update(bookmarks.preparehookargs(name, old, new))
2337 repo.hook(
2351 repo.hook(
2338 b'txnclose-bookmark',
2352 b'txnclose-bookmark',
2339 throw=False,
2353 throw=False,
2340 **pycompat.strkwargs(args)
2354 **pycompat.strkwargs(args)
2341 )
2355 )
2342
2356
2343 if hook.hashook(repo.ui, b'txnclose-phase'):
2357 if hook.hashook(repo.ui, b'txnclose-phase'):
2344 cl = repo.unfiltered().changelog
2358 cl = repo.unfiltered().changelog
2345 phasemv = sorted(
2359 phasemv = sorted(
2346 tr.changes[b'phases'], key=lambda r: r[0][0]
2360 tr.changes[b'phases'], key=lambda r: r[0][0]
2347 )
2361 )
2348 for revs, (old, new) in phasemv:
2362 for revs, (old, new) in phasemv:
2349 for rev in revs:
2363 for rev in revs:
2350 args = tr.hookargs.copy()
2364 args = tr.hookargs.copy()
2351 node = hex(cl.node(rev))
2365 node = hex(cl.node(rev))
2352 args.update(phases.preparehookargs(node, old, new))
2366 args.update(phases.preparehookargs(node, old, new))
2353 repo.hook(
2367 repo.hook(
2354 b'txnclose-phase',
2368 b'txnclose-phase',
2355 throw=False,
2369 throw=False,
2356 **pycompat.strkwargs(args)
2370 **pycompat.strkwargs(args)
2357 )
2371 )
2358
2372
2359 repo.hook(
2373 repo.hook(
2360 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2374 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2361 )
2375 )
2362
2376
2363 reporef()._afterlock(hookfunc)
2377 reporef()._afterlock(hookfunc)
2364
2378
2365 tr.addfinalize(b'txnclose-hook', txnclosehook)
2379 tr.addfinalize(b'txnclose-hook', txnclosehook)
2366 # Include a leading "-" to make it happen before the transaction summary
2380 # Include a leading "-" to make it happen before the transaction summary
2367 # reports registered via scmutil.registersummarycallback() whose names
2381 # reports registered via scmutil.registersummarycallback() whose names
2368 # are 00-txnreport etc. That way, the caches will be warm when the
2382 # are 00-txnreport etc. That way, the caches will be warm when the
2369 # callbacks run.
2383 # callbacks run.
2370 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2384 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2371
2385
2372 def txnaborthook(tr2):
2386 def txnaborthook(tr2):
2373 """To be run if transaction is aborted"""
2387 """To be run if transaction is aborted"""
2374 reporef().hook(
2388 reporef().hook(
2375 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2389 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2376 )
2390 )
2377
2391
2378 tr.addabort(b'txnabort-hook', txnaborthook)
2392 tr.addabort(b'txnabort-hook', txnaborthook)
2379 # avoid eager cache invalidation. in-memory data should be identical
2393 # avoid eager cache invalidation. in-memory data should be identical
2380 # to stored data if transaction has no error.
2394 # to stored data if transaction has no error.
2381 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2395 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2382 self._transref = weakref.ref(tr)
2396 self._transref = weakref.ref(tr)
2383 scmutil.registersummarycallback(self, tr, desc)
2397 scmutil.registersummarycallback(self, tr, desc)
2384 return tr
2398 return tr
2385
2399
2386 def _journalfiles(self):
2400 def _journalfiles(self):
2387 return (
2401 return (
2388 (self.svfs, b'journal'),
2402 (self.svfs, b'journal'),
2389 (self.svfs, b'journal.narrowspec'),
2403 (self.svfs, b'journal.narrowspec'),
2390 (self.vfs, b'journal.narrowspec.dirstate'),
2404 (self.vfs, b'journal.narrowspec.dirstate'),
2391 (self.vfs, b'journal.dirstate'),
2405 (self.vfs, b'journal.dirstate'),
2392 (self.vfs, b'journal.branch'),
2406 (self.vfs, b'journal.branch'),
2393 (self.vfs, b'journal.desc'),
2407 (self.vfs, b'journal.desc'),
2394 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2408 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2395 (self.svfs, b'journal.phaseroots'),
2409 (self.svfs, b'journal.phaseroots'),
2396 )
2410 )
2397
2411
2398 def undofiles(self):
2412 def undofiles(self):
2399 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2413 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2400
2414
2401 @unfilteredmethod
2415 @unfilteredmethod
2402 def _writejournal(self, desc):
2416 def _writejournal(self, desc):
2403 self.dirstate.savebackup(None, b'journal.dirstate')
2417 self.dirstate.savebackup(None, b'journal.dirstate')
2404 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2418 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2405 narrowspec.savebackup(self, b'journal.narrowspec')
2419 narrowspec.savebackup(self, b'journal.narrowspec')
2406 self.vfs.write(
2420 self.vfs.write(
2407 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2421 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2408 )
2422 )
2409 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2423 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2410 bookmarksvfs = bookmarks.bookmarksvfs(self)
2424 bookmarksvfs = bookmarks.bookmarksvfs(self)
2411 bookmarksvfs.write(
2425 bookmarksvfs.write(
2412 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2426 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2413 )
2427 )
2414 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2428 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2415
2429
2416 def recover(self):
2430 def recover(self):
2417 with self.lock():
2431 with self.lock():
2418 if self.svfs.exists(b"journal"):
2432 if self.svfs.exists(b"journal"):
2419 self.ui.status(_(b"rolling back interrupted transaction\n"))
2433 self.ui.status(_(b"rolling back interrupted transaction\n"))
2420 vfsmap = {
2434 vfsmap = {
2421 b'': self.svfs,
2435 b'': self.svfs,
2422 b'plain': self.vfs,
2436 b'plain': self.vfs,
2423 }
2437 }
2424 transaction.rollback(
2438 transaction.rollback(
2425 self.svfs,
2439 self.svfs,
2426 vfsmap,
2440 vfsmap,
2427 b"journal",
2441 b"journal",
2428 self.ui.warn,
2442 self.ui.warn,
2429 checkambigfiles=_cachedfiles,
2443 checkambigfiles=_cachedfiles,
2430 )
2444 )
2431 self.invalidate()
2445 self.invalidate()
2432 return True
2446 return True
2433 else:
2447 else:
2434 self.ui.warn(_(b"no interrupted transaction available\n"))
2448 self.ui.warn(_(b"no interrupted transaction available\n"))
2435 return False
2449 return False
2436
2450
2437 def rollback(self, dryrun=False, force=False):
2451 def rollback(self, dryrun=False, force=False):
2438 wlock = lock = dsguard = None
2452 wlock = lock = dsguard = None
2439 try:
2453 try:
2440 wlock = self.wlock()
2454 wlock = self.wlock()
2441 lock = self.lock()
2455 lock = self.lock()
2442 if self.svfs.exists(b"undo"):
2456 if self.svfs.exists(b"undo"):
2443 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2457 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2444
2458
2445 return self._rollback(dryrun, force, dsguard)
2459 return self._rollback(dryrun, force, dsguard)
2446 else:
2460 else:
2447 self.ui.warn(_(b"no rollback information available\n"))
2461 self.ui.warn(_(b"no rollback information available\n"))
2448 return 1
2462 return 1
2449 finally:
2463 finally:
2450 release(dsguard, lock, wlock)
2464 release(dsguard, lock, wlock)
2451
2465
2452 @unfilteredmethod # Until we get smarter cache management
2466 @unfilteredmethod # Until we get smarter cache management
2453 def _rollback(self, dryrun, force, dsguard):
2467 def _rollback(self, dryrun, force, dsguard):
2454 ui = self.ui
2468 ui = self.ui
2455 try:
2469 try:
2456 args = self.vfs.read(b'undo.desc').splitlines()
2470 args = self.vfs.read(b'undo.desc').splitlines()
2457 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2471 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2458 if len(args) >= 3:
2472 if len(args) >= 3:
2459 detail = args[2]
2473 detail = args[2]
2460 oldtip = oldlen - 1
2474 oldtip = oldlen - 1
2461
2475
2462 if detail and ui.verbose:
2476 if detail and ui.verbose:
2463 msg = _(
2477 msg = _(
2464 b'repository tip rolled back to revision %d'
2478 b'repository tip rolled back to revision %d'
2465 b' (undo %s: %s)\n'
2479 b' (undo %s: %s)\n'
2466 ) % (oldtip, desc, detail)
2480 ) % (oldtip, desc, detail)
2467 else:
2481 else:
2468 msg = _(
2482 msg = _(
2469 b'repository tip rolled back to revision %d (undo %s)\n'
2483 b'repository tip rolled back to revision %d (undo %s)\n'
2470 ) % (oldtip, desc)
2484 ) % (oldtip, desc)
2471 except IOError:
2485 except IOError:
2472 msg = _(b'rolling back unknown transaction\n')
2486 msg = _(b'rolling back unknown transaction\n')
2473 desc = None
2487 desc = None
2474
2488
2475 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2489 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2476 raise error.Abort(
2490 raise error.Abort(
2477 _(
2491 _(
2478 b'rollback of last commit while not checked out '
2492 b'rollback of last commit while not checked out '
2479 b'may lose data'
2493 b'may lose data'
2480 ),
2494 ),
2481 hint=_(b'use -f to force'),
2495 hint=_(b'use -f to force'),
2482 )
2496 )
2483
2497
2484 ui.status(msg)
2498 ui.status(msg)
2485 if dryrun:
2499 if dryrun:
2486 return 0
2500 return 0
2487
2501
2488 parents = self.dirstate.parents()
2502 parents = self.dirstate.parents()
2489 self.destroying()
2503 self.destroying()
2490 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2504 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2491 transaction.rollback(
2505 transaction.rollback(
2492 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2506 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2493 )
2507 )
2494 bookmarksvfs = bookmarks.bookmarksvfs(self)
2508 bookmarksvfs = bookmarks.bookmarksvfs(self)
2495 if bookmarksvfs.exists(b'undo.bookmarks'):
2509 if bookmarksvfs.exists(b'undo.bookmarks'):
2496 bookmarksvfs.rename(
2510 bookmarksvfs.rename(
2497 b'undo.bookmarks', b'bookmarks', checkambig=True
2511 b'undo.bookmarks', b'bookmarks', checkambig=True
2498 )
2512 )
2499 if self.svfs.exists(b'undo.phaseroots'):
2513 if self.svfs.exists(b'undo.phaseroots'):
2500 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2514 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2501 self.invalidate()
2515 self.invalidate()
2502
2516
2503 has_node = self.changelog.index.has_node
2517 has_node = self.changelog.index.has_node
2504 parentgone = any(not has_node(p) for p in parents)
2518 parentgone = any(not has_node(p) for p in parents)
2505 if parentgone:
2519 if parentgone:
2506 # prevent dirstateguard from overwriting already restored one
2520 # prevent dirstateguard from overwriting already restored one
2507 dsguard.close()
2521 dsguard.close()
2508
2522
2509 narrowspec.restorebackup(self, b'undo.narrowspec')
2523 narrowspec.restorebackup(self, b'undo.narrowspec')
2510 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2524 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2511 self.dirstate.restorebackup(None, b'undo.dirstate')
2525 self.dirstate.restorebackup(None, b'undo.dirstate')
2512 try:
2526 try:
2513 branch = self.vfs.read(b'undo.branch')
2527 branch = self.vfs.read(b'undo.branch')
2514 self.dirstate.setbranch(encoding.tolocal(branch))
2528 self.dirstate.setbranch(encoding.tolocal(branch))
2515 except IOError:
2529 except IOError:
2516 ui.warn(
2530 ui.warn(
2517 _(
2531 _(
2518 b'named branch could not be reset: '
2532 b'named branch could not be reset: '
2519 b'current branch is still \'%s\'\n'
2533 b'current branch is still \'%s\'\n'
2520 )
2534 )
2521 % self.dirstate.branch()
2535 % self.dirstate.branch()
2522 )
2536 )
2523
2537
2524 parents = tuple([p.rev() for p in self[None].parents()])
2538 parents = tuple([p.rev() for p in self[None].parents()])
2525 if len(parents) > 1:
2539 if len(parents) > 1:
2526 ui.status(
2540 ui.status(
2527 _(
2541 _(
2528 b'working directory now based on '
2542 b'working directory now based on '
2529 b'revisions %d and %d\n'
2543 b'revisions %d and %d\n'
2530 )
2544 )
2531 % parents
2545 % parents
2532 )
2546 )
2533 else:
2547 else:
2534 ui.status(
2548 ui.status(
2535 _(b'working directory now based on revision %d\n') % parents
2549 _(b'working directory now based on revision %d\n') % parents
2536 )
2550 )
2537 mergestatemod.mergestate.clean(self)
2551 mergestatemod.mergestate.clean(self)
2538
2552
2539 # TODO: if we know which new heads may result from this rollback, pass
2553 # TODO: if we know which new heads may result from this rollback, pass
2540 # them to destroy(), which will prevent the branchhead cache from being
2554 # them to destroy(), which will prevent the branchhead cache from being
2541 # invalidated.
2555 # invalidated.
2542 self.destroyed()
2556 self.destroyed()
2543 return 0
2557 return 0
2544
2558
2545 def _buildcacheupdater(self, newtransaction):
2559 def _buildcacheupdater(self, newtransaction):
2546 """called during transaction to build the callback updating cache
2560 """called during transaction to build the callback updating cache
2547
2561
2548 Lives on the repository to help extension who might want to augment
2562 Lives on the repository to help extension who might want to augment
2549 this logic. For this purpose, the created transaction is passed to the
2563 this logic. For this purpose, the created transaction is passed to the
2550 method.
2564 method.
2551 """
2565 """
2552 # we must avoid cyclic reference between repo and transaction.
2566 # we must avoid cyclic reference between repo and transaction.
2553 reporef = weakref.ref(self)
2567 reporef = weakref.ref(self)
2554
2568
2555 def updater(tr):
2569 def updater(tr):
2556 repo = reporef()
2570 repo = reporef()
2557 repo.updatecaches(tr)
2571 repo.updatecaches(tr)
2558
2572
2559 return updater
2573 return updater
2560
2574
2561 @unfilteredmethod
2575 @unfilteredmethod
2562 def updatecaches(self, tr=None, full=False):
2576 def updatecaches(self, tr=None, full=False):
2563 """warm appropriate caches
2577 """warm appropriate caches
2564
2578
2565 If this function is called after a transaction closed. The transaction
2579 If this function is called after a transaction closed. The transaction
2566 will be available in the 'tr' argument. This can be used to selectively
2580 will be available in the 'tr' argument. This can be used to selectively
2567 update caches relevant to the changes in that transaction.
2581 update caches relevant to the changes in that transaction.
2568
2582
2569 If 'full' is set, make sure all caches the function knows about have
2583 If 'full' is set, make sure all caches the function knows about have
2570 up-to-date data. Even the ones usually loaded more lazily.
2584 up-to-date data. Even the ones usually loaded more lazily.
2571 """
2585 """
2572 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2586 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2573 # During strip, many caches are invalid but
2587 # During strip, many caches are invalid but
2574 # later call to `destroyed` will refresh them.
2588 # later call to `destroyed` will refresh them.
2575 return
2589 return
2576
2590
2577 if tr is None or tr.changes[b'origrepolen'] < len(self):
2591 if tr is None or tr.changes[b'origrepolen'] < len(self):
2578 # accessing the 'ser ved' branchmap should refresh all the others,
2592 # accessing the 'ser ved' branchmap should refresh all the others,
2579 self.ui.debug(b'updating the branch cache\n')
2593 self.ui.debug(b'updating the branch cache\n')
2580 self.filtered(b'served').branchmap()
2594 self.filtered(b'served').branchmap()
2581 self.filtered(b'served.hidden').branchmap()
2595 self.filtered(b'served.hidden').branchmap()
2582
2596
2583 if full:
2597 if full:
2584 unfi = self.unfiltered()
2598 unfi = self.unfiltered()
2585
2599
2586 self.changelog.update_caches(transaction=tr)
2600 self.changelog.update_caches(transaction=tr)
2587 self.manifestlog.update_caches(transaction=tr)
2601 self.manifestlog.update_caches(transaction=tr)
2588
2602
2589 rbc = unfi.revbranchcache()
2603 rbc = unfi.revbranchcache()
2590 for r in unfi.changelog:
2604 for r in unfi.changelog:
2591 rbc.branchinfo(r)
2605 rbc.branchinfo(r)
2592 rbc.write()
2606 rbc.write()
2593
2607
2594 # ensure the working copy parents are in the manifestfulltextcache
2608 # ensure the working copy parents are in the manifestfulltextcache
2595 for ctx in self[b'.'].parents():
2609 for ctx in self[b'.'].parents():
2596 ctx.manifest() # accessing the manifest is enough
2610 ctx.manifest() # accessing the manifest is enough
2597
2611
2598 # accessing fnode cache warms the cache
2612 # accessing fnode cache warms the cache
2599 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2613 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2600 # accessing tags warm the cache
2614 # accessing tags warm the cache
2601 self.tags()
2615 self.tags()
2602 self.filtered(b'served').tags()
2616 self.filtered(b'served').tags()
2603
2617
2604 # The `full` arg is documented as updating even the lazily-loaded
2618 # The `full` arg is documented as updating even the lazily-loaded
2605 # caches immediately, so we're forcing a write to cause these caches
2619 # caches immediately, so we're forcing a write to cause these caches
2606 # to be warmed up even if they haven't explicitly been requested
2620 # to be warmed up even if they haven't explicitly been requested
2607 # yet (if they've never been used by hg, they won't ever have been
2621 # yet (if they've never been used by hg, they won't ever have been
2608 # written, even if they're a subset of another kind of cache that
2622 # written, even if they're a subset of another kind of cache that
2609 # *has* been used).
2623 # *has* been used).
2610 for filt in repoview.filtertable.keys():
2624 for filt in repoview.filtertable.keys():
2611 filtered = self.filtered(filt)
2625 filtered = self.filtered(filt)
2612 filtered.branchmap().write(filtered)
2626 filtered.branchmap().write(filtered)
2613
2627
2614 def invalidatecaches(self):
2628 def invalidatecaches(self):
2615
2629
2616 if '_tagscache' in vars(self):
2630 if '_tagscache' in vars(self):
2617 # can't use delattr on proxy
2631 # can't use delattr on proxy
2618 del self.__dict__['_tagscache']
2632 del self.__dict__['_tagscache']
2619
2633
2620 self._branchcaches.clear()
2634 self._branchcaches.clear()
2621 self.invalidatevolatilesets()
2635 self.invalidatevolatilesets()
2622 self._sparsesignaturecache.clear()
2636 self._sparsesignaturecache.clear()
2623
2637
2624 def invalidatevolatilesets(self):
2638 def invalidatevolatilesets(self):
2625 self.filteredrevcache.clear()
2639 self.filteredrevcache.clear()
2626 obsolete.clearobscaches(self)
2640 obsolete.clearobscaches(self)
2627 self._quick_access_changeid_invalidate()
2641 self._quick_access_changeid_invalidate()
2628
2642
2629 def invalidatedirstate(self):
2643 def invalidatedirstate(self):
2630 """Invalidates the dirstate, causing the next call to dirstate
2644 """Invalidates the dirstate, causing the next call to dirstate
2631 to check if it was modified since the last time it was read,
2645 to check if it was modified since the last time it was read,
2632 rereading it if it has.
2646 rereading it if it has.
2633
2647
2634 This is different to dirstate.invalidate() that it doesn't always
2648 This is different to dirstate.invalidate() that it doesn't always
2635 rereads the dirstate. Use dirstate.invalidate() if you want to
2649 rereads the dirstate. Use dirstate.invalidate() if you want to
2636 explicitly read the dirstate again (i.e. restoring it to a previous
2650 explicitly read the dirstate again (i.e. restoring it to a previous
2637 known good state)."""
2651 known good state)."""
2638 if hasunfilteredcache(self, 'dirstate'):
2652 if hasunfilteredcache(self, 'dirstate'):
2639 for k in self.dirstate._filecache:
2653 for k in self.dirstate._filecache:
2640 try:
2654 try:
2641 delattr(self.dirstate, k)
2655 delattr(self.dirstate, k)
2642 except AttributeError:
2656 except AttributeError:
2643 pass
2657 pass
2644 delattr(self.unfiltered(), 'dirstate')
2658 delattr(self.unfiltered(), 'dirstate')
2645
2659
2646 def invalidate(self, clearfilecache=False):
2660 def invalidate(self, clearfilecache=False):
2647 """Invalidates both store and non-store parts other than dirstate
2661 """Invalidates both store and non-store parts other than dirstate
2648
2662
2649 If a transaction is running, invalidation of store is omitted,
2663 If a transaction is running, invalidation of store is omitted,
2650 because discarding in-memory changes might cause inconsistency
2664 because discarding in-memory changes might cause inconsistency
2651 (e.g. incomplete fncache causes unintentional failure, but
2665 (e.g. incomplete fncache causes unintentional failure, but
2652 redundant one doesn't).
2666 redundant one doesn't).
2653 """
2667 """
2654 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2668 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2655 for k in list(self._filecache.keys()):
2669 for k in list(self._filecache.keys()):
2656 # dirstate is invalidated separately in invalidatedirstate()
2670 # dirstate is invalidated separately in invalidatedirstate()
2657 if k == b'dirstate':
2671 if k == b'dirstate':
2658 continue
2672 continue
2659 if (
2673 if (
2660 k == b'changelog'
2674 k == b'changelog'
2661 and self.currenttransaction()
2675 and self.currenttransaction()
2662 and self.changelog._delayed
2676 and self.changelog._delayed
2663 ):
2677 ):
2664 # The changelog object may store unwritten revisions. We don't
2678 # The changelog object may store unwritten revisions. We don't
2665 # want to lose them.
2679 # want to lose them.
2666 # TODO: Solve the problem instead of working around it.
2680 # TODO: Solve the problem instead of working around it.
2667 continue
2681 continue
2668
2682
2669 if clearfilecache:
2683 if clearfilecache:
2670 del self._filecache[k]
2684 del self._filecache[k]
2671 try:
2685 try:
2672 delattr(unfiltered, k)
2686 delattr(unfiltered, k)
2673 except AttributeError:
2687 except AttributeError:
2674 pass
2688 pass
2675 self.invalidatecaches()
2689 self.invalidatecaches()
2676 if not self.currenttransaction():
2690 if not self.currenttransaction():
2677 # TODO: Changing contents of store outside transaction
2691 # TODO: Changing contents of store outside transaction
2678 # causes inconsistency. We should make in-memory store
2692 # causes inconsistency. We should make in-memory store
2679 # changes detectable, and abort if changed.
2693 # changes detectable, and abort if changed.
2680 self.store.invalidatecaches()
2694 self.store.invalidatecaches()
2681
2695
2682 def invalidateall(self):
2696 def invalidateall(self):
2683 """Fully invalidates both store and non-store parts, causing the
2697 """Fully invalidates both store and non-store parts, causing the
2684 subsequent operation to reread any outside changes."""
2698 subsequent operation to reread any outside changes."""
2685 # extension should hook this to invalidate its caches
2699 # extension should hook this to invalidate its caches
2686 self.invalidate()
2700 self.invalidate()
2687 self.invalidatedirstate()
2701 self.invalidatedirstate()
2688
2702
2689 @unfilteredmethod
2703 @unfilteredmethod
2690 def _refreshfilecachestats(self, tr):
2704 def _refreshfilecachestats(self, tr):
2691 """Reload stats of cached files so that they are flagged as valid"""
2705 """Reload stats of cached files so that they are flagged as valid"""
2692 for k, ce in self._filecache.items():
2706 for k, ce in self._filecache.items():
2693 k = pycompat.sysstr(k)
2707 k = pycompat.sysstr(k)
2694 if k == 'dirstate' or k not in self.__dict__:
2708 if k == 'dirstate' or k not in self.__dict__:
2695 continue
2709 continue
2696 ce.refresh()
2710 ce.refresh()
2697
2711
2698 def _lock(
2712 def _lock(
2699 self,
2713 self,
2700 vfs,
2714 vfs,
2701 lockname,
2715 lockname,
2702 wait,
2716 wait,
2703 releasefn,
2717 releasefn,
2704 acquirefn,
2718 acquirefn,
2705 desc,
2719 desc,
2706 ):
2720 ):
2707 timeout = 0
2721 timeout = 0
2708 warntimeout = 0
2722 warntimeout = 0
2709 if wait:
2723 if wait:
2710 timeout = self.ui.configint(b"ui", b"timeout")
2724 timeout = self.ui.configint(b"ui", b"timeout")
2711 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2725 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2712 # internal config: ui.signal-safe-lock
2726 # internal config: ui.signal-safe-lock
2713 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2727 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2714
2728
2715 l = lockmod.trylock(
2729 l = lockmod.trylock(
2716 self.ui,
2730 self.ui,
2717 vfs,
2731 vfs,
2718 lockname,
2732 lockname,
2719 timeout,
2733 timeout,
2720 warntimeout,
2734 warntimeout,
2721 releasefn=releasefn,
2735 releasefn=releasefn,
2722 acquirefn=acquirefn,
2736 acquirefn=acquirefn,
2723 desc=desc,
2737 desc=desc,
2724 signalsafe=signalsafe,
2738 signalsafe=signalsafe,
2725 )
2739 )
2726 return l
2740 return l
2727
2741
2728 def _afterlock(self, callback):
2742 def _afterlock(self, callback):
2729 """add a callback to be run when the repository is fully unlocked
2743 """add a callback to be run when the repository is fully unlocked
2730
2744
2731 The callback will be executed when the outermost lock is released
2745 The callback will be executed when the outermost lock is released
2732 (with wlock being higher level than 'lock')."""
2746 (with wlock being higher level than 'lock')."""
2733 for ref in (self._wlockref, self._lockref):
2747 for ref in (self._wlockref, self._lockref):
2734 l = ref and ref()
2748 l = ref and ref()
2735 if l and l.held:
2749 if l and l.held:
2736 l.postrelease.append(callback)
2750 l.postrelease.append(callback)
2737 break
2751 break
2738 else: # no lock have been found.
2752 else: # no lock have been found.
2739 callback(True)
2753 callback(True)
2740
2754
2741 def lock(self, wait=True):
2755 def lock(self, wait=True):
2742 """Lock the repository store (.hg/store) and return a weak reference
2756 """Lock the repository store (.hg/store) and return a weak reference
2743 to the lock. Use this before modifying the store (e.g. committing or
2757 to the lock. Use this before modifying the store (e.g. committing or
2744 stripping). If you are opening a transaction, get a lock as well.)
2758 stripping). If you are opening a transaction, get a lock as well.)
2745
2759
2746 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2760 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2747 'wlock' first to avoid a dead-lock hazard."""
2761 'wlock' first to avoid a dead-lock hazard."""
2748 l = self._currentlock(self._lockref)
2762 l = self._currentlock(self._lockref)
2749 if l is not None:
2763 if l is not None:
2750 l.lock()
2764 l.lock()
2751 return l
2765 return l
2752
2766
2753 l = self._lock(
2767 l = self._lock(
2754 vfs=self.svfs,
2768 vfs=self.svfs,
2755 lockname=b"lock",
2769 lockname=b"lock",
2756 wait=wait,
2770 wait=wait,
2757 releasefn=None,
2771 releasefn=None,
2758 acquirefn=self.invalidate,
2772 acquirefn=self.invalidate,
2759 desc=_(b'repository %s') % self.origroot,
2773 desc=_(b'repository %s') % self.origroot,
2760 )
2774 )
2761 self._lockref = weakref.ref(l)
2775 self._lockref = weakref.ref(l)
2762 return l
2776 return l
2763
2777
2764 def wlock(self, wait=True):
2778 def wlock(self, wait=True):
2765 """Lock the non-store parts of the repository (everything under
2779 """Lock the non-store parts of the repository (everything under
2766 .hg except .hg/store) and return a weak reference to the lock.
2780 .hg except .hg/store) and return a weak reference to the lock.
2767
2781
2768 Use this before modifying files in .hg.
2782 Use this before modifying files in .hg.
2769
2783
2770 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2784 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2771 'wlock' first to avoid a dead-lock hazard."""
2785 'wlock' first to avoid a dead-lock hazard."""
2772 l = self._wlockref and self._wlockref()
2786 l = self._wlockref and self._wlockref()
2773 if l is not None and l.held:
2787 if l is not None and l.held:
2774 l.lock()
2788 l.lock()
2775 return l
2789 return l
2776
2790
2777 # We do not need to check for non-waiting lock acquisition. Such
2791 # We do not need to check for non-waiting lock acquisition. Such
2778 # acquisition would not cause dead-lock as they would just fail.
2792 # acquisition would not cause dead-lock as they would just fail.
2779 if wait and (
2793 if wait and (
2780 self.ui.configbool(b'devel', b'all-warnings')
2794 self.ui.configbool(b'devel', b'all-warnings')
2781 or self.ui.configbool(b'devel', b'check-locks')
2795 or self.ui.configbool(b'devel', b'check-locks')
2782 ):
2796 ):
2783 if self._currentlock(self._lockref) is not None:
2797 if self._currentlock(self._lockref) is not None:
2784 self.ui.develwarn(b'"wlock" acquired after "lock"')
2798 self.ui.develwarn(b'"wlock" acquired after "lock"')
2785
2799
2786 def unlock():
2800 def unlock():
2787 if self.dirstate.pendingparentchange():
2801 if self.dirstate.pendingparentchange():
2788 self.dirstate.invalidate()
2802 self.dirstate.invalidate()
2789 else:
2803 else:
2790 self.dirstate.write(None)
2804 self.dirstate.write(None)
2791
2805
2792 self._filecache[b'dirstate'].refresh()
2806 self._filecache[b'dirstate'].refresh()
2793
2807
2794 l = self._lock(
2808 l = self._lock(
2795 self.vfs,
2809 self.vfs,
2796 b"wlock",
2810 b"wlock",
2797 wait,
2811 wait,
2798 unlock,
2812 unlock,
2799 self.invalidatedirstate,
2813 self.invalidatedirstate,
2800 _(b'working directory of %s') % self.origroot,
2814 _(b'working directory of %s') % self.origroot,
2801 )
2815 )
2802 self._wlockref = weakref.ref(l)
2816 self._wlockref = weakref.ref(l)
2803 return l
2817 return l
2804
2818
2805 def _currentlock(self, lockref):
2819 def _currentlock(self, lockref):
2806 """Returns the lock if it's held, or None if it's not."""
2820 """Returns the lock if it's held, or None if it's not."""
2807 if lockref is None:
2821 if lockref is None:
2808 return None
2822 return None
2809 l = lockref()
2823 l = lockref()
2810 if l is None or not l.held:
2824 if l is None or not l.held:
2811 return None
2825 return None
2812 return l
2826 return l
2813
2827
2814 def currentwlock(self):
2828 def currentwlock(self):
2815 """Returns the wlock if it's held, or None if it's not."""
2829 """Returns the wlock if it's held, or None if it's not."""
2816 return self._currentlock(self._wlockref)
2830 return self._currentlock(self._wlockref)
2817
2831
2818 def checkcommitpatterns(self, wctx, match, status, fail):
2832 def checkcommitpatterns(self, wctx, match, status, fail):
2819 """check for commit arguments that aren't committable"""
2833 """check for commit arguments that aren't committable"""
2820 if match.isexact() or match.prefix():
2834 if match.isexact() or match.prefix():
2821 matched = set(status.modified + status.added + status.removed)
2835 matched = set(status.modified + status.added + status.removed)
2822
2836
2823 for f in match.files():
2837 for f in match.files():
2824 f = self.dirstate.normalize(f)
2838 f = self.dirstate.normalize(f)
2825 if f == b'.' or f in matched or f in wctx.substate:
2839 if f == b'.' or f in matched or f in wctx.substate:
2826 continue
2840 continue
2827 if f in status.deleted:
2841 if f in status.deleted:
2828 fail(f, _(b'file not found!'))
2842 fail(f, _(b'file not found!'))
2829 # Is it a directory that exists or used to exist?
2843 # Is it a directory that exists or used to exist?
2830 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2844 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2831 d = f + b'/'
2845 d = f + b'/'
2832 for mf in matched:
2846 for mf in matched:
2833 if mf.startswith(d):
2847 if mf.startswith(d):
2834 break
2848 break
2835 else:
2849 else:
2836 fail(f, _(b"no match under directory!"))
2850 fail(f, _(b"no match under directory!"))
2837 elif f not in self.dirstate:
2851 elif f not in self.dirstate:
2838 fail(f, _(b"file not tracked!"))
2852 fail(f, _(b"file not tracked!"))
2839
2853
2840 @unfilteredmethod
2854 @unfilteredmethod
2841 def commit(
2855 def commit(
2842 self,
2856 self,
2843 text=b"",
2857 text=b"",
2844 user=None,
2858 user=None,
2845 date=None,
2859 date=None,
2846 match=None,
2860 match=None,
2847 force=False,
2861 force=False,
2848 editor=None,
2862 editor=None,
2849 extra=None,
2863 extra=None,
2850 ):
2864 ):
2851 """Add a new revision to current repository.
2865 """Add a new revision to current repository.
2852
2866
2853 Revision information is gathered from the working directory,
2867 Revision information is gathered from the working directory,
2854 match can be used to filter the committed files. If editor is
2868 match can be used to filter the committed files. If editor is
2855 supplied, it is called to get a commit message.
2869 supplied, it is called to get a commit message.
2856 """
2870 """
2857 if extra is None:
2871 if extra is None:
2858 extra = {}
2872 extra = {}
2859
2873
2860 def fail(f, msg):
2874 def fail(f, msg):
2861 raise error.InputError(b'%s: %s' % (f, msg))
2875 raise error.InputError(b'%s: %s' % (f, msg))
2862
2876
2863 if not match:
2877 if not match:
2864 match = matchmod.always()
2878 match = matchmod.always()
2865
2879
2866 if not force:
2880 if not force:
2867 match.bad = fail
2881 match.bad = fail
2868
2882
2869 # lock() for recent changelog (see issue4368)
2883 # lock() for recent changelog (see issue4368)
2870 with self.wlock(), self.lock():
2884 with self.wlock(), self.lock():
2871 wctx = self[None]
2885 wctx = self[None]
2872 merge = len(wctx.parents()) > 1
2886 merge = len(wctx.parents()) > 1
2873
2887
2874 if not force and merge and not match.always():
2888 if not force and merge and not match.always():
2875 raise error.Abort(
2889 raise error.Abort(
2876 _(
2890 _(
2877 b'cannot partially commit a merge '
2891 b'cannot partially commit a merge '
2878 b'(do not specify files or patterns)'
2892 b'(do not specify files or patterns)'
2879 )
2893 )
2880 )
2894 )
2881
2895
2882 status = self.status(match=match, clean=force)
2896 status = self.status(match=match, clean=force)
2883 if force:
2897 if force:
2884 status.modified.extend(
2898 status.modified.extend(
2885 status.clean
2899 status.clean
2886 ) # mq may commit clean files
2900 ) # mq may commit clean files
2887
2901
2888 # check subrepos
2902 # check subrepos
2889 subs, commitsubs, newstate = subrepoutil.precommit(
2903 subs, commitsubs, newstate = subrepoutil.precommit(
2890 self.ui, wctx, status, match, force=force
2904 self.ui, wctx, status, match, force=force
2891 )
2905 )
2892
2906
2893 # make sure all explicit patterns are matched
2907 # make sure all explicit patterns are matched
2894 if not force:
2908 if not force:
2895 self.checkcommitpatterns(wctx, match, status, fail)
2909 self.checkcommitpatterns(wctx, match, status, fail)
2896
2910
2897 cctx = context.workingcommitctx(
2911 cctx = context.workingcommitctx(
2898 self, status, text, user, date, extra
2912 self, status, text, user, date, extra
2899 )
2913 )
2900
2914
2901 ms = mergestatemod.mergestate.read(self)
2915 ms = mergestatemod.mergestate.read(self)
2902 mergeutil.checkunresolved(ms)
2916 mergeutil.checkunresolved(ms)
2903
2917
2904 # internal config: ui.allowemptycommit
2918 # internal config: ui.allowemptycommit
2905 if cctx.isempty() and not self.ui.configbool(
2919 if cctx.isempty() and not self.ui.configbool(
2906 b'ui', b'allowemptycommit'
2920 b'ui', b'allowemptycommit'
2907 ):
2921 ):
2908 self.ui.debug(b'nothing to commit, clearing merge state\n')
2922 self.ui.debug(b'nothing to commit, clearing merge state\n')
2909 ms.reset()
2923 ms.reset()
2910 return None
2924 return None
2911
2925
2912 if merge and cctx.deleted():
2926 if merge and cctx.deleted():
2913 raise error.Abort(_(b"cannot commit merge with missing files"))
2927 raise error.Abort(_(b"cannot commit merge with missing files"))
2914
2928
2915 if editor:
2929 if editor:
2916 cctx._text = editor(self, cctx, subs)
2930 cctx._text = editor(self, cctx, subs)
2917 edited = text != cctx._text
2931 edited = text != cctx._text
2918
2932
2919 # Save commit message in case this transaction gets rolled back
2933 # Save commit message in case this transaction gets rolled back
2920 # (e.g. by a pretxncommit hook). Leave the content alone on
2934 # (e.g. by a pretxncommit hook). Leave the content alone on
2921 # the assumption that the user will use the same editor again.
2935 # the assumption that the user will use the same editor again.
2922 msgfn = self.savecommitmessage(cctx._text)
2936 msgfn = self.savecommitmessage(cctx._text)
2923
2937
2924 # commit subs and write new state
2938 # commit subs and write new state
2925 if subs:
2939 if subs:
2926 uipathfn = scmutil.getuipathfn(self)
2940 uipathfn = scmutil.getuipathfn(self)
2927 for s in sorted(commitsubs):
2941 for s in sorted(commitsubs):
2928 sub = wctx.sub(s)
2942 sub = wctx.sub(s)
2929 self.ui.status(
2943 self.ui.status(
2930 _(b'committing subrepository %s\n')
2944 _(b'committing subrepository %s\n')
2931 % uipathfn(subrepoutil.subrelpath(sub))
2945 % uipathfn(subrepoutil.subrelpath(sub))
2932 )
2946 )
2933 sr = sub.commit(cctx._text, user, date)
2947 sr = sub.commit(cctx._text, user, date)
2934 newstate[s] = (newstate[s][0], sr)
2948 newstate[s] = (newstate[s][0], sr)
2935 subrepoutil.writestate(self, newstate)
2949 subrepoutil.writestate(self, newstate)
2936
2950
2937 p1, p2 = self.dirstate.parents()
2951 p1, p2 = self.dirstate.parents()
2938 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2952 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2939 try:
2953 try:
2940 self.hook(
2954 self.hook(
2941 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2955 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2942 )
2956 )
2943 with self.transaction(b'commit'):
2957 with self.transaction(b'commit'):
2944 ret = self.commitctx(cctx, True)
2958 ret = self.commitctx(cctx, True)
2945 # update bookmarks, dirstate and mergestate
2959 # update bookmarks, dirstate and mergestate
2946 bookmarks.update(self, [p1, p2], ret)
2960 bookmarks.update(self, [p1, p2], ret)
2947 cctx.markcommitted(ret)
2961 cctx.markcommitted(ret)
2948 ms.reset()
2962 ms.reset()
2949 except: # re-raises
2963 except: # re-raises
2950 if edited:
2964 if edited:
2951 self.ui.write(
2965 self.ui.write(
2952 _(b'note: commit message saved in %s\n') % msgfn
2966 _(b'note: commit message saved in %s\n') % msgfn
2953 )
2967 )
2954 self.ui.write(
2968 self.ui.write(
2955 _(
2969 _(
2956 b"note: use 'hg commit --logfile "
2970 b"note: use 'hg commit --logfile "
2957 b".hg/last-message.txt --edit' to reuse it\n"
2971 b".hg/last-message.txt --edit' to reuse it\n"
2958 )
2972 )
2959 )
2973 )
2960 raise
2974 raise
2961
2975
2962 def commithook(unused_success):
2976 def commithook(unused_success):
2963 # hack for command that use a temporary commit (eg: histedit)
2977 # hack for command that use a temporary commit (eg: histedit)
2964 # temporary commit got stripped before hook release
2978 # temporary commit got stripped before hook release
2965 if self.changelog.hasnode(ret):
2979 if self.changelog.hasnode(ret):
2966 self.hook(
2980 self.hook(
2967 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2981 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2968 )
2982 )
2969
2983
2970 self._afterlock(commithook)
2984 self._afterlock(commithook)
2971 return ret
2985 return ret
2972
2986
2973 @unfilteredmethod
2987 @unfilteredmethod
2974 def commitctx(self, ctx, error=False, origctx=None):
2988 def commitctx(self, ctx, error=False, origctx=None):
2975 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2989 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2976
2990
2977 @unfilteredmethod
2991 @unfilteredmethod
2978 def destroying(self):
2992 def destroying(self):
2979 """Inform the repository that nodes are about to be destroyed.
2993 """Inform the repository that nodes are about to be destroyed.
2980 Intended for use by strip and rollback, so there's a common
2994 Intended for use by strip and rollback, so there's a common
2981 place for anything that has to be done before destroying history.
2995 place for anything that has to be done before destroying history.
2982
2996
2983 This is mostly useful for saving state that is in memory and waiting
2997 This is mostly useful for saving state that is in memory and waiting
2984 to be flushed when the current lock is released. Because a call to
2998 to be flushed when the current lock is released. Because a call to
2985 destroyed is imminent, the repo will be invalidated causing those
2999 destroyed is imminent, the repo will be invalidated causing those
2986 changes to stay in memory (waiting for the next unlock), or vanish
3000 changes to stay in memory (waiting for the next unlock), or vanish
2987 completely.
3001 completely.
2988 """
3002 """
2989 # When using the same lock to commit and strip, the phasecache is left
3003 # When using the same lock to commit and strip, the phasecache is left
2990 # dirty after committing. Then when we strip, the repo is invalidated,
3004 # dirty after committing. Then when we strip, the repo is invalidated,
2991 # causing those changes to disappear.
3005 # causing those changes to disappear.
2992 if '_phasecache' in vars(self):
3006 if '_phasecache' in vars(self):
2993 self._phasecache.write()
3007 self._phasecache.write()
2994
3008
2995 @unfilteredmethod
3009 @unfilteredmethod
2996 def destroyed(self):
3010 def destroyed(self):
2997 """Inform the repository that nodes have been destroyed.
3011 """Inform the repository that nodes have been destroyed.
2998 Intended for use by strip and rollback, so there's a common
3012 Intended for use by strip and rollback, so there's a common
2999 place for anything that has to be done after destroying history.
3013 place for anything that has to be done after destroying history.
3000 """
3014 """
3001 # When one tries to:
3015 # When one tries to:
3002 # 1) destroy nodes thus calling this method (e.g. strip)
3016 # 1) destroy nodes thus calling this method (e.g. strip)
3003 # 2) use phasecache somewhere (e.g. commit)
3017 # 2) use phasecache somewhere (e.g. commit)
3004 #
3018 #
3005 # then 2) will fail because the phasecache contains nodes that were
3019 # then 2) will fail because the phasecache contains nodes that were
3006 # removed. We can either remove phasecache from the filecache,
3020 # removed. We can either remove phasecache from the filecache,
3007 # causing it to reload next time it is accessed, or simply filter
3021 # causing it to reload next time it is accessed, or simply filter
3008 # the removed nodes now and write the updated cache.
3022 # the removed nodes now and write the updated cache.
3009 self._phasecache.filterunknown(self)
3023 self._phasecache.filterunknown(self)
3010 self._phasecache.write()
3024 self._phasecache.write()
3011
3025
3012 # refresh all repository caches
3026 # refresh all repository caches
3013 self.updatecaches()
3027 self.updatecaches()
3014
3028
3015 # Ensure the persistent tag cache is updated. Doing it now
3029 # Ensure the persistent tag cache is updated. Doing it now
3016 # means that the tag cache only has to worry about destroyed
3030 # means that the tag cache only has to worry about destroyed
3017 # heads immediately after a strip/rollback. That in turn
3031 # heads immediately after a strip/rollback. That in turn
3018 # guarantees that "cachetip == currenttip" (comparing both rev
3032 # guarantees that "cachetip == currenttip" (comparing both rev
3019 # and node) always means no nodes have been added or destroyed.
3033 # and node) always means no nodes have been added or destroyed.
3020
3034
3021 # XXX this is suboptimal when qrefresh'ing: we strip the current
3035 # XXX this is suboptimal when qrefresh'ing: we strip the current
3022 # head, refresh the tag cache, then immediately add a new head.
3036 # head, refresh the tag cache, then immediately add a new head.
3023 # But I think doing it this way is necessary for the "instant
3037 # But I think doing it this way is necessary for the "instant
3024 # tag cache retrieval" case to work.
3038 # tag cache retrieval" case to work.
3025 self.invalidate()
3039 self.invalidate()
3026
3040
3027 def status(
3041 def status(
3028 self,
3042 self,
3029 node1=b'.',
3043 node1=b'.',
3030 node2=None,
3044 node2=None,
3031 match=None,
3045 match=None,
3032 ignored=False,
3046 ignored=False,
3033 clean=False,
3047 clean=False,
3034 unknown=False,
3048 unknown=False,
3035 listsubrepos=False,
3049 listsubrepos=False,
3036 ):
3050 ):
3037 '''a convenience method that calls node1.status(node2)'''
3051 '''a convenience method that calls node1.status(node2)'''
3038 return self[node1].status(
3052 return self[node1].status(
3039 node2, match, ignored, clean, unknown, listsubrepos
3053 node2, match, ignored, clean, unknown, listsubrepos
3040 )
3054 )
3041
3055
3042 def addpostdsstatus(self, ps):
3056 def addpostdsstatus(self, ps):
3043 """Add a callback to run within the wlock, at the point at which status
3057 """Add a callback to run within the wlock, at the point at which status
3044 fixups happen.
3058 fixups happen.
3045
3059
3046 On status completion, callback(wctx, status) will be called with the
3060 On status completion, callback(wctx, status) will be called with the
3047 wlock held, unless the dirstate has changed from underneath or the wlock
3061 wlock held, unless the dirstate has changed from underneath or the wlock
3048 couldn't be grabbed.
3062 couldn't be grabbed.
3049
3063
3050 Callbacks should not capture and use a cached copy of the dirstate --
3064 Callbacks should not capture and use a cached copy of the dirstate --
3051 it might change in the meanwhile. Instead, they should access the
3065 it might change in the meanwhile. Instead, they should access the
3052 dirstate via wctx.repo().dirstate.
3066 dirstate via wctx.repo().dirstate.
3053
3067
3054 This list is emptied out after each status run -- extensions should
3068 This list is emptied out after each status run -- extensions should
3055 make sure it adds to this list each time dirstate.status is called.
3069 make sure it adds to this list each time dirstate.status is called.
3056 Extensions should also make sure they don't call this for statuses
3070 Extensions should also make sure they don't call this for statuses
3057 that don't involve the dirstate.
3071 that don't involve the dirstate.
3058 """
3072 """
3059
3073
3060 # The list is located here for uniqueness reasons -- it is actually
3074 # The list is located here for uniqueness reasons -- it is actually
3061 # managed by the workingctx, but that isn't unique per-repo.
3075 # managed by the workingctx, but that isn't unique per-repo.
3062 self._postdsstatus.append(ps)
3076 self._postdsstatus.append(ps)
3063
3077
3064 def postdsstatus(self):
3078 def postdsstatus(self):
3065 """Used by workingctx to get the list of post-dirstate-status hooks."""
3079 """Used by workingctx to get the list of post-dirstate-status hooks."""
3066 return self._postdsstatus
3080 return self._postdsstatus
3067
3081
3068 def clearpostdsstatus(self):
3082 def clearpostdsstatus(self):
3069 """Used by workingctx to clear post-dirstate-status hooks."""
3083 """Used by workingctx to clear post-dirstate-status hooks."""
3070 del self._postdsstatus[:]
3084 del self._postdsstatus[:]
3071
3085
3072 def heads(self, start=None):
3086 def heads(self, start=None):
3073 if start is None:
3087 if start is None:
3074 cl = self.changelog
3088 cl = self.changelog
3075 headrevs = reversed(cl.headrevs())
3089 headrevs = reversed(cl.headrevs())
3076 return [cl.node(rev) for rev in headrevs]
3090 return [cl.node(rev) for rev in headrevs]
3077
3091
3078 heads = self.changelog.heads(start)
3092 heads = self.changelog.heads(start)
3079 # sort the output in rev descending order
3093 # sort the output in rev descending order
3080 return sorted(heads, key=self.changelog.rev, reverse=True)
3094 return sorted(heads, key=self.changelog.rev, reverse=True)
3081
3095
3082 def branchheads(self, branch=None, start=None, closed=False):
3096 def branchheads(self, branch=None, start=None, closed=False):
3083 """return a (possibly filtered) list of heads for the given branch
3097 """return a (possibly filtered) list of heads for the given branch
3084
3098
3085 Heads are returned in topological order, from newest to oldest.
3099 Heads are returned in topological order, from newest to oldest.
3086 If branch is None, use the dirstate branch.
3100 If branch is None, use the dirstate branch.
3087 If start is not None, return only heads reachable from start.
3101 If start is not None, return only heads reachable from start.
3088 If closed is True, return heads that are marked as closed as well.
3102 If closed is True, return heads that are marked as closed as well.
3089 """
3103 """
3090 if branch is None:
3104 if branch is None:
3091 branch = self[None].branch()
3105 branch = self[None].branch()
3092 branches = self.branchmap()
3106 branches = self.branchmap()
3093 if not branches.hasbranch(branch):
3107 if not branches.hasbranch(branch):
3094 return []
3108 return []
3095 # the cache returns heads ordered lowest to highest
3109 # the cache returns heads ordered lowest to highest
3096 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3110 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3097 if start is not None:
3111 if start is not None:
3098 # filter out the heads that cannot be reached from startrev
3112 # filter out the heads that cannot be reached from startrev
3099 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3113 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3100 bheads = [h for h in bheads if h in fbheads]
3114 bheads = [h for h in bheads if h in fbheads]
3101 return bheads
3115 return bheads
3102
3116
3103 def branches(self, nodes):
3117 def branches(self, nodes):
3104 if not nodes:
3118 if not nodes:
3105 nodes = [self.changelog.tip()]
3119 nodes = [self.changelog.tip()]
3106 b = []
3120 b = []
3107 for n in nodes:
3121 for n in nodes:
3108 t = n
3122 t = n
3109 while True:
3123 while True:
3110 p = self.changelog.parents(n)
3124 p = self.changelog.parents(n)
3111 if p[1] != nullid or p[0] == nullid:
3125 if p[1] != nullid or p[0] == nullid:
3112 b.append((t, n, p[0], p[1]))
3126 b.append((t, n, p[0], p[1]))
3113 break
3127 break
3114 n = p[0]
3128 n = p[0]
3115 return b
3129 return b
3116
3130
3117 def between(self, pairs):
3131 def between(self, pairs):
3118 r = []
3132 r = []
3119
3133
3120 for top, bottom in pairs:
3134 for top, bottom in pairs:
3121 n, l, i = top, [], 0
3135 n, l, i = top, [], 0
3122 f = 1
3136 f = 1
3123
3137
3124 while n != bottom and n != nullid:
3138 while n != bottom and n != nullid:
3125 p = self.changelog.parents(n)[0]
3139 p = self.changelog.parents(n)[0]
3126 if i == f:
3140 if i == f:
3127 l.append(n)
3141 l.append(n)
3128 f = f * 2
3142 f = f * 2
3129 n = p
3143 n = p
3130 i += 1
3144 i += 1
3131
3145
3132 r.append(l)
3146 r.append(l)
3133
3147
3134 return r
3148 return r
3135
3149
3136 def checkpush(self, pushop):
3150 def checkpush(self, pushop):
3137 """Extensions can override this function if additional checks have
3151 """Extensions can override this function if additional checks have
3138 to be performed before pushing, or call it if they override push
3152 to be performed before pushing, or call it if they override push
3139 command.
3153 command.
3140 """
3154 """
3141
3155
3142 @unfilteredpropertycache
3156 @unfilteredpropertycache
3143 def prepushoutgoinghooks(self):
3157 def prepushoutgoinghooks(self):
3144 """Return util.hooks consists of a pushop with repo, remote, outgoing
3158 """Return util.hooks consists of a pushop with repo, remote, outgoing
3145 methods, which are called before pushing changesets.
3159 methods, which are called before pushing changesets.
3146 """
3160 """
3147 return util.hooks()
3161 return util.hooks()
3148
3162
3149 def pushkey(self, namespace, key, old, new):
3163 def pushkey(self, namespace, key, old, new):
3150 try:
3164 try:
3151 tr = self.currenttransaction()
3165 tr = self.currenttransaction()
3152 hookargs = {}
3166 hookargs = {}
3153 if tr is not None:
3167 if tr is not None:
3154 hookargs.update(tr.hookargs)
3168 hookargs.update(tr.hookargs)
3155 hookargs = pycompat.strkwargs(hookargs)
3169 hookargs = pycompat.strkwargs(hookargs)
3156 hookargs['namespace'] = namespace
3170 hookargs['namespace'] = namespace
3157 hookargs['key'] = key
3171 hookargs['key'] = key
3158 hookargs['old'] = old
3172 hookargs['old'] = old
3159 hookargs['new'] = new
3173 hookargs['new'] = new
3160 self.hook(b'prepushkey', throw=True, **hookargs)
3174 self.hook(b'prepushkey', throw=True, **hookargs)
3161 except error.HookAbort as exc:
3175 except error.HookAbort as exc:
3162 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3176 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3163 if exc.hint:
3177 if exc.hint:
3164 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3178 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3165 return False
3179 return False
3166 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3180 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3167 ret = pushkey.push(self, namespace, key, old, new)
3181 ret = pushkey.push(self, namespace, key, old, new)
3168
3182
3169 def runhook(unused_success):
3183 def runhook(unused_success):
3170 self.hook(
3184 self.hook(
3171 b'pushkey',
3185 b'pushkey',
3172 namespace=namespace,
3186 namespace=namespace,
3173 key=key,
3187 key=key,
3174 old=old,
3188 old=old,
3175 new=new,
3189 new=new,
3176 ret=ret,
3190 ret=ret,
3177 )
3191 )
3178
3192
3179 self._afterlock(runhook)
3193 self._afterlock(runhook)
3180 return ret
3194 return ret
3181
3195
3182 def listkeys(self, namespace):
3196 def listkeys(self, namespace):
3183 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3197 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3184 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3198 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3185 values = pushkey.list(self, namespace)
3199 values = pushkey.list(self, namespace)
3186 self.hook(b'listkeys', namespace=namespace, values=values)
3200 self.hook(b'listkeys', namespace=namespace, values=values)
3187 return values
3201 return values
3188
3202
3189 def debugwireargs(self, one, two, three=None, four=None, five=None):
3203 def debugwireargs(self, one, two, three=None, four=None, five=None):
3190 '''used to test argument passing over the wire'''
3204 '''used to test argument passing over the wire'''
3191 return b"%s %s %s %s %s" % (
3205 return b"%s %s %s %s %s" % (
3192 one,
3206 one,
3193 two,
3207 two,
3194 pycompat.bytestr(three),
3208 pycompat.bytestr(three),
3195 pycompat.bytestr(four),
3209 pycompat.bytestr(four),
3196 pycompat.bytestr(five),
3210 pycompat.bytestr(five),
3197 )
3211 )
3198
3212
3199 def savecommitmessage(self, text):
3213 def savecommitmessage(self, text):
3200 fp = self.vfs(b'last-message.txt', b'wb')
3214 fp = self.vfs(b'last-message.txt', b'wb')
3201 try:
3215 try:
3202 fp.write(text)
3216 fp.write(text)
3203 finally:
3217 finally:
3204 fp.close()
3218 fp.close()
3205 return self.pathto(fp.name[len(self.root) + 1 :])
3219 return self.pathto(fp.name[len(self.root) + 1 :])
3206
3220
3207
3221
3208 # used to avoid circular references so destructors work
3222 # used to avoid circular references so destructors work
3209 def aftertrans(files):
3223 def aftertrans(files):
3210 renamefiles = [tuple(t) for t in files]
3224 renamefiles = [tuple(t) for t in files]
3211
3225
3212 def a():
3226 def a():
3213 for vfs, src, dest in renamefiles:
3227 for vfs, src, dest in renamefiles:
3214 # if src and dest refer to a same file, vfs.rename is a no-op,
3228 # if src and dest refer to a same file, vfs.rename is a no-op,
3215 # leaving both src and dest on disk. delete dest to make sure
3229 # leaving both src and dest on disk. delete dest to make sure
3216 # the rename couldn't be such a no-op.
3230 # the rename couldn't be such a no-op.
3217 vfs.tryunlink(dest)
3231 vfs.tryunlink(dest)
3218 try:
3232 try:
3219 vfs.rename(src, dest)
3233 vfs.rename(src, dest)
3220 except OSError: # journal file does not yet exist
3234 except OSError: # journal file does not yet exist
3221 pass
3235 pass
3222
3236
3223 return a
3237 return a
3224
3238
3225
3239
3226 def undoname(fn):
3240 def undoname(fn):
3227 base, name = os.path.split(fn)
3241 base, name = os.path.split(fn)
3228 assert name.startswith(b'journal')
3242 assert name.startswith(b'journal')
3229 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3243 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3230
3244
3231
3245
3232 def instance(ui, path, create, intents=None, createopts=None):
3246 def instance(ui, path, create, intents=None, createopts=None):
3233 localpath = util.urllocalpath(path)
3247 localpath = util.urllocalpath(path)
3234 if create:
3248 if create:
3235 createrepository(ui, localpath, createopts=createopts)
3249 createrepository(ui, localpath, createopts=createopts)
3236
3250
3237 return makelocalrepository(ui, localpath, intents=intents)
3251 return makelocalrepository(ui, localpath, intents=intents)
3238
3252
3239
3253
3240 def islocal(path):
3254 def islocal(path):
3241 return True
3255 return True
3242
3256
3243
3257
3244 def defaultcreateopts(ui, createopts=None):
3258 def defaultcreateopts(ui, createopts=None):
3245 """Populate the default creation options for a repository.
3259 """Populate the default creation options for a repository.
3246
3260
3247 A dictionary of explicitly requested creation options can be passed
3261 A dictionary of explicitly requested creation options can be passed
3248 in. Missing keys will be populated.
3262 in. Missing keys will be populated.
3249 """
3263 """
3250 createopts = dict(createopts or {})
3264 createopts = dict(createopts or {})
3251
3265
3252 if b'backend' not in createopts:
3266 if b'backend' not in createopts:
3253 # experimental config: storage.new-repo-backend
3267 # experimental config: storage.new-repo-backend
3254 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3268 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3255
3269
3256 return createopts
3270 return createopts
3257
3271
3258
3272
3259 def newreporequirements(ui, createopts):
3273 def newreporequirements(ui, createopts):
3260 """Determine the set of requirements for a new local repository.
3274 """Determine the set of requirements for a new local repository.
3261
3275
3262 Extensions can wrap this function to specify custom requirements for
3276 Extensions can wrap this function to specify custom requirements for
3263 new repositories.
3277 new repositories.
3264 """
3278 """
3265 # If the repo is being created from a shared repository, we copy
3279 # If the repo is being created from a shared repository, we copy
3266 # its requirements.
3280 # its requirements.
3267 if b'sharedrepo' in createopts:
3281 if b'sharedrepo' in createopts:
3268 requirements = set(createopts[b'sharedrepo'].requirements)
3282 requirements = set(createopts[b'sharedrepo'].requirements)
3269 if createopts.get(b'sharedrelative'):
3283 if createopts.get(b'sharedrelative'):
3270 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3284 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3271 else:
3285 else:
3272 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3286 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3273
3287
3274 return requirements
3288 return requirements
3275
3289
3276 if b'backend' not in createopts:
3290 if b'backend' not in createopts:
3277 raise error.ProgrammingError(
3291 raise error.ProgrammingError(
3278 b'backend key not present in createopts; '
3292 b'backend key not present in createopts; '
3279 b'was defaultcreateopts() called?'
3293 b'was defaultcreateopts() called?'
3280 )
3294 )
3281
3295
3282 if createopts[b'backend'] != b'revlogv1':
3296 if createopts[b'backend'] != b'revlogv1':
3283 raise error.Abort(
3297 raise error.Abort(
3284 _(
3298 _(
3285 b'unable to determine repository requirements for '
3299 b'unable to determine repository requirements for '
3286 b'storage backend: %s'
3300 b'storage backend: %s'
3287 )
3301 )
3288 % createopts[b'backend']
3302 % createopts[b'backend']
3289 )
3303 )
3290
3304
3291 requirements = {b'revlogv1'}
3305 requirements = {b'revlogv1'}
3292 if ui.configbool(b'format', b'usestore'):
3306 if ui.configbool(b'format', b'usestore'):
3293 requirements.add(b'store')
3307 requirements.add(b'store')
3294 if ui.configbool(b'format', b'usefncache'):
3308 if ui.configbool(b'format', b'usefncache'):
3295 requirements.add(b'fncache')
3309 requirements.add(b'fncache')
3296 if ui.configbool(b'format', b'dotencode'):
3310 if ui.configbool(b'format', b'dotencode'):
3297 requirements.add(b'dotencode')
3311 requirements.add(b'dotencode')
3298
3312
3299 compengines = ui.configlist(b'format', b'revlog-compression')
3313 compengines = ui.configlist(b'format', b'revlog-compression')
3300 for compengine in compengines:
3314 for compengine in compengines:
3301 if compengine in util.compengines:
3315 if compengine in util.compengines:
3302 break
3316 break
3303 else:
3317 else:
3304 raise error.Abort(
3318 raise error.Abort(
3305 _(
3319 _(
3306 b'compression engines %s defined by '
3320 b'compression engines %s defined by '
3307 b'format.revlog-compression not available'
3321 b'format.revlog-compression not available'
3308 )
3322 )
3309 % b', '.join(b'"%s"' % e for e in compengines),
3323 % b', '.join(b'"%s"' % e for e in compengines),
3310 hint=_(
3324 hint=_(
3311 b'run "hg debuginstall" to list available '
3325 b'run "hg debuginstall" to list available '
3312 b'compression engines'
3326 b'compression engines'
3313 ),
3327 ),
3314 )
3328 )
3315
3329
3316 # zlib is the historical default and doesn't need an explicit requirement.
3330 # zlib is the historical default and doesn't need an explicit requirement.
3317 if compengine == b'zstd':
3331 if compengine == b'zstd':
3318 requirements.add(b'revlog-compression-zstd')
3332 requirements.add(b'revlog-compression-zstd')
3319 elif compengine != b'zlib':
3333 elif compengine != b'zlib':
3320 requirements.add(b'exp-compression-%s' % compengine)
3334 requirements.add(b'exp-compression-%s' % compengine)
3321
3335
3322 if scmutil.gdinitconfig(ui):
3336 if scmutil.gdinitconfig(ui):
3323 requirements.add(b'generaldelta')
3337 requirements.add(b'generaldelta')
3324 if ui.configbool(b'format', b'sparse-revlog'):
3338 if ui.configbool(b'format', b'sparse-revlog'):
3325 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3339 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3326
3340
3327 # experimental config: format.exp-use-side-data
3341 # experimental config: format.exp-use-side-data
3328 if ui.configbool(b'format', b'exp-use-side-data'):
3342 if ui.configbool(b'format', b'exp-use-side-data'):
3329 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3343 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3330 # experimental config: format.exp-use-copies-side-data-changeset
3344 # experimental config: format.exp-use-copies-side-data-changeset
3331 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3345 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3332 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3346 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3333 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3347 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3334 if ui.configbool(b'experimental', b'treemanifest'):
3348 if ui.configbool(b'experimental', b'treemanifest'):
3335 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3349 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3336
3350
3337 revlogv2 = ui.config(b'experimental', b'revlogv2')
3351 revlogv2 = ui.config(b'experimental', b'revlogv2')
3338 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3352 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3339 requirements.remove(b'revlogv1')
3353 requirements.remove(b'revlogv1')
3340 # generaldelta is implied by revlogv2.
3354 # generaldelta is implied by revlogv2.
3341 requirements.discard(b'generaldelta')
3355 requirements.discard(b'generaldelta')
3342 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3356 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3343 # experimental config: format.internal-phase
3357 # experimental config: format.internal-phase
3344 if ui.configbool(b'format', b'internal-phase'):
3358 if ui.configbool(b'format', b'internal-phase'):
3345 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3359 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3346
3360
3347 if createopts.get(b'narrowfiles'):
3361 if createopts.get(b'narrowfiles'):
3348 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3362 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3349
3363
3350 if createopts.get(b'lfs'):
3364 if createopts.get(b'lfs'):
3351 requirements.add(b'lfs')
3365 requirements.add(b'lfs')
3352
3366
3353 if ui.configbool(b'format', b'bookmarks-in-store'):
3367 if ui.configbool(b'format', b'bookmarks-in-store'):
3354 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3368 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3355
3369
3356 if ui.configbool(b'format', b'use-persistent-nodemap'):
3370 if ui.configbool(b'format', b'use-persistent-nodemap'):
3357 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3371 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3358
3372
3359 # if share-safe is enabled, let's create the new repository with the new
3373 # if share-safe is enabled, let's create the new repository with the new
3360 # requirement
3374 # requirement
3361 if ui.configbool(b'format', b'exp-share-safe'):
3375 if ui.configbool(b'format', b'exp-share-safe'):
3362 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3376 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3363
3377
3364 return requirements
3378 return requirements
3365
3379
3366
3380
3367 def checkrequirementscompat(ui, requirements):
3381 def checkrequirementscompat(ui, requirements):
3368 """Checks compatibility of repository requirements enabled and disabled.
3382 """Checks compatibility of repository requirements enabled and disabled.
3369
3383
3370 Returns a set of requirements which needs to be dropped because dependend
3384 Returns a set of requirements which needs to be dropped because dependend
3371 requirements are not enabled. Also warns users about it"""
3385 requirements are not enabled. Also warns users about it"""
3372
3386
3373 dropped = set()
3387 dropped = set()
3374
3388
3375 if b'store' not in requirements:
3389 if b'store' not in requirements:
3376 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3390 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3377 ui.warn(
3391 ui.warn(
3378 _(
3392 _(
3379 b'ignoring enabled \'format.bookmarks-in-store\' config '
3393 b'ignoring enabled \'format.bookmarks-in-store\' config '
3380 b'beacuse it is incompatible with disabled '
3394 b'beacuse it is incompatible with disabled '
3381 b'\'format.usestore\' config\n'
3395 b'\'format.usestore\' config\n'
3382 )
3396 )
3383 )
3397 )
3384 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3398 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3385
3399
3386 if (
3400 if (
3387 requirementsmod.SHARED_REQUIREMENT in requirements
3401 requirementsmod.SHARED_REQUIREMENT in requirements
3388 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3402 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3389 ):
3403 ):
3390 raise error.Abort(
3404 raise error.Abort(
3391 _(
3405 _(
3392 b"cannot create shared repository as source was created"
3406 b"cannot create shared repository as source was created"
3393 b" with 'format.usestore' config disabled"
3407 b" with 'format.usestore' config disabled"
3394 )
3408 )
3395 )
3409 )
3396
3410
3397 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3411 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3398 ui.warn(
3412 ui.warn(
3399 _(
3413 _(
3400 b"ignoring enabled 'format.exp-share-safe' config because "
3414 b"ignoring enabled 'format.exp-share-safe' config because "
3401 b"it is incompatible with disabled 'format.usestore'"
3415 b"it is incompatible with disabled 'format.usestore'"
3402 b" config\n"
3416 b" config\n"
3403 )
3417 )
3404 )
3418 )
3405 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3419 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3406
3420
3407 return dropped
3421 return dropped
3408
3422
3409
3423
3410 def filterknowncreateopts(ui, createopts):
3424 def filterknowncreateopts(ui, createopts):
3411 """Filters a dict of repo creation options against options that are known.
3425 """Filters a dict of repo creation options against options that are known.
3412
3426
3413 Receives a dict of repo creation options and returns a dict of those
3427 Receives a dict of repo creation options and returns a dict of those
3414 options that we don't know how to handle.
3428 options that we don't know how to handle.
3415
3429
3416 This function is called as part of repository creation. If the
3430 This function is called as part of repository creation. If the
3417 returned dict contains any items, repository creation will not
3431 returned dict contains any items, repository creation will not
3418 be allowed, as it means there was a request to create a repository
3432 be allowed, as it means there was a request to create a repository
3419 with options not recognized by loaded code.
3433 with options not recognized by loaded code.
3420
3434
3421 Extensions can wrap this function to filter out creation options
3435 Extensions can wrap this function to filter out creation options
3422 they know how to handle.
3436 they know how to handle.
3423 """
3437 """
3424 known = {
3438 known = {
3425 b'backend',
3439 b'backend',
3426 b'lfs',
3440 b'lfs',
3427 b'narrowfiles',
3441 b'narrowfiles',
3428 b'sharedrepo',
3442 b'sharedrepo',
3429 b'sharedrelative',
3443 b'sharedrelative',
3430 b'shareditems',
3444 b'shareditems',
3431 b'shallowfilestore',
3445 b'shallowfilestore',
3432 }
3446 }
3433
3447
3434 return {k: v for k, v in createopts.items() if k not in known}
3448 return {k: v for k, v in createopts.items() if k not in known}
3435
3449
3436
3450
3437 def createrepository(ui, path, createopts=None):
3451 def createrepository(ui, path, createopts=None):
3438 """Create a new repository in a vfs.
3452 """Create a new repository in a vfs.
3439
3453
3440 ``path`` path to the new repo's working directory.
3454 ``path`` path to the new repo's working directory.
3441 ``createopts`` options for the new repository.
3455 ``createopts`` options for the new repository.
3442
3456
3443 The following keys for ``createopts`` are recognized:
3457 The following keys for ``createopts`` are recognized:
3444
3458
3445 backend
3459 backend
3446 The storage backend to use.
3460 The storage backend to use.
3447 lfs
3461 lfs
3448 Repository will be created with ``lfs`` requirement. The lfs extension
3462 Repository will be created with ``lfs`` requirement. The lfs extension
3449 will automatically be loaded when the repository is accessed.
3463 will automatically be loaded when the repository is accessed.
3450 narrowfiles
3464 narrowfiles
3451 Set up repository to support narrow file storage.
3465 Set up repository to support narrow file storage.
3452 sharedrepo
3466 sharedrepo
3453 Repository object from which storage should be shared.
3467 Repository object from which storage should be shared.
3454 sharedrelative
3468 sharedrelative
3455 Boolean indicating if the path to the shared repo should be
3469 Boolean indicating if the path to the shared repo should be
3456 stored as relative. By default, the pointer to the "parent" repo
3470 stored as relative. By default, the pointer to the "parent" repo
3457 is stored as an absolute path.
3471 is stored as an absolute path.
3458 shareditems
3472 shareditems
3459 Set of items to share to the new repository (in addition to storage).
3473 Set of items to share to the new repository (in addition to storage).
3460 shallowfilestore
3474 shallowfilestore
3461 Indicates that storage for files should be shallow (not all ancestor
3475 Indicates that storage for files should be shallow (not all ancestor
3462 revisions are known).
3476 revisions are known).
3463 """
3477 """
3464 createopts = defaultcreateopts(ui, createopts=createopts)
3478 createopts = defaultcreateopts(ui, createopts=createopts)
3465
3479
3466 unknownopts = filterknowncreateopts(ui, createopts)
3480 unknownopts = filterknowncreateopts(ui, createopts)
3467
3481
3468 if not isinstance(unknownopts, dict):
3482 if not isinstance(unknownopts, dict):
3469 raise error.ProgrammingError(
3483 raise error.ProgrammingError(
3470 b'filterknowncreateopts() did not return a dict'
3484 b'filterknowncreateopts() did not return a dict'
3471 )
3485 )
3472
3486
3473 if unknownopts:
3487 if unknownopts:
3474 raise error.Abort(
3488 raise error.Abort(
3475 _(
3489 _(
3476 b'unable to create repository because of unknown '
3490 b'unable to create repository because of unknown '
3477 b'creation option: %s'
3491 b'creation option: %s'
3478 )
3492 )
3479 % b', '.join(sorted(unknownopts)),
3493 % b', '.join(sorted(unknownopts)),
3480 hint=_(b'is a required extension not loaded?'),
3494 hint=_(b'is a required extension not loaded?'),
3481 )
3495 )
3482
3496
3483 requirements = newreporequirements(ui, createopts=createopts)
3497 requirements = newreporequirements(ui, createopts=createopts)
3484 requirements -= checkrequirementscompat(ui, requirements)
3498 requirements -= checkrequirementscompat(ui, requirements)
3485
3499
3486 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3500 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3487
3501
3488 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3502 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3489 if hgvfs.exists():
3503 if hgvfs.exists():
3490 raise error.RepoError(_(b'repository %s already exists') % path)
3504 raise error.RepoError(_(b'repository %s already exists') % path)
3491
3505
3492 if b'sharedrepo' in createopts:
3506 if b'sharedrepo' in createopts:
3493 sharedpath = createopts[b'sharedrepo'].sharedpath
3507 sharedpath = createopts[b'sharedrepo'].sharedpath
3494
3508
3495 if createopts.get(b'sharedrelative'):
3509 if createopts.get(b'sharedrelative'):
3496 try:
3510 try:
3497 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3511 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3498 except (IOError, ValueError) as e:
3512 except (IOError, ValueError) as e:
3499 # ValueError is raised on Windows if the drive letters differ
3513 # ValueError is raised on Windows if the drive letters differ
3500 # on each path.
3514 # on each path.
3501 raise error.Abort(
3515 raise error.Abort(
3502 _(b'cannot calculate relative path'),
3516 _(b'cannot calculate relative path'),
3503 hint=stringutil.forcebytestr(e),
3517 hint=stringutil.forcebytestr(e),
3504 )
3518 )
3505
3519
3506 if not wdirvfs.exists():
3520 if not wdirvfs.exists():
3507 wdirvfs.makedirs()
3521 wdirvfs.makedirs()
3508
3522
3509 hgvfs.makedir(notindexed=True)
3523 hgvfs.makedir(notindexed=True)
3510 if b'sharedrepo' not in createopts:
3524 if b'sharedrepo' not in createopts:
3511 hgvfs.mkdir(b'cache')
3525 hgvfs.mkdir(b'cache')
3512 hgvfs.mkdir(b'wcache')
3526 hgvfs.mkdir(b'wcache')
3513
3527
3514 if b'store' in requirements and b'sharedrepo' not in createopts:
3528 if b'store' in requirements and b'sharedrepo' not in createopts:
3515 hgvfs.mkdir(b'store')
3529 hgvfs.mkdir(b'store')
3516
3530
3517 # We create an invalid changelog outside the store so very old
3531 # We create an invalid changelog outside the store so very old
3518 # Mercurial versions (which didn't know about the requirements
3532 # Mercurial versions (which didn't know about the requirements
3519 # file) encounter an error on reading the changelog. This
3533 # file) encounter an error on reading the changelog. This
3520 # effectively locks out old clients and prevents them from
3534 # effectively locks out old clients and prevents them from
3521 # mucking with a repo in an unknown format.
3535 # mucking with a repo in an unknown format.
3522 #
3536 #
3523 # The revlog header has version 2, which won't be recognized by
3537 # The revlog header has version 2, which won't be recognized by
3524 # such old clients.
3538 # such old clients.
3525 hgvfs.append(
3539 hgvfs.append(
3526 b'00changelog.i',
3540 b'00changelog.i',
3527 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3541 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3528 b'layout',
3542 b'layout',
3529 )
3543 )
3530
3544
3531 # Filter the requirements into working copy and store ones
3545 # Filter the requirements into working copy and store ones
3532 wcreq, storereq = scmutil.filterrequirements(requirements)
3546 wcreq, storereq = scmutil.filterrequirements(requirements)
3533 # write working copy ones
3547 # write working copy ones
3534 scmutil.writerequires(hgvfs, wcreq)
3548 scmutil.writerequires(hgvfs, wcreq)
3535 # If there are store requirements and the current repository
3549 # If there are store requirements and the current repository
3536 # is not a shared one, write stored requirements
3550 # is not a shared one, write stored requirements
3537 # For new shared repository, we don't need to write the store
3551 # For new shared repository, we don't need to write the store
3538 # requirements as they are already present in store requires
3552 # requirements as they are already present in store requires
3539 if storereq and b'sharedrepo' not in createopts:
3553 if storereq and b'sharedrepo' not in createopts:
3540 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3554 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3541 scmutil.writerequires(storevfs, storereq)
3555 scmutil.writerequires(storevfs, storereq)
3542
3556
3543 # Write out file telling readers where to find the shared store.
3557 # Write out file telling readers where to find the shared store.
3544 if b'sharedrepo' in createopts:
3558 if b'sharedrepo' in createopts:
3545 hgvfs.write(b'sharedpath', sharedpath)
3559 hgvfs.write(b'sharedpath', sharedpath)
3546
3560
3547 if createopts.get(b'shareditems'):
3561 if createopts.get(b'shareditems'):
3548 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3562 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3549 hgvfs.write(b'shared', shared)
3563 hgvfs.write(b'shared', shared)
3550
3564
3551
3565
3552 def poisonrepository(repo):
3566 def poisonrepository(repo):
3553 """Poison a repository instance so it can no longer be used."""
3567 """Poison a repository instance so it can no longer be used."""
3554 # Perform any cleanup on the instance.
3568 # Perform any cleanup on the instance.
3555 repo.close()
3569 repo.close()
3556
3570
3557 # Our strategy is to replace the type of the object with one that
3571 # Our strategy is to replace the type of the object with one that
3558 # has all attribute lookups result in error.
3572 # has all attribute lookups result in error.
3559 #
3573 #
3560 # But we have to allow the close() method because some constructors
3574 # But we have to allow the close() method because some constructors
3561 # of repos call close() on repo references.
3575 # of repos call close() on repo references.
3562 class poisonedrepository(object):
3576 class poisonedrepository(object):
3563 def __getattribute__(self, item):
3577 def __getattribute__(self, item):
3564 if item == 'close':
3578 if item == 'close':
3565 return object.__getattribute__(self, item)
3579 return object.__getattribute__(self, item)
3566
3580
3567 raise error.ProgrammingError(
3581 raise error.ProgrammingError(
3568 b'repo instances should not be used after unshare'
3582 b'repo instances should not be used after unshare'
3569 )
3583 )
3570
3584
3571 def close(self):
3585 def close(self):
3572 pass
3586 pass
3573
3587
3574 # We may have a repoview, which intercepts __setattr__. So be sure
3588 # We may have a repoview, which intercepts __setattr__. So be sure
3575 # we operate at the lowest level possible.
3589 # we operate at the lowest level possible.
3576 object.__setattr__(repo, '__class__', poisonedrepository)
3590 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,455 +1,456 b''
1 setup
1 setup
2
2
3 $ cat >> $HGRCPATH <<EOF
3 $ cat >> $HGRCPATH <<EOF
4 > [extensions]
4 > [extensions]
5 > share =
5 > share =
6 > [format]
6 > [format]
7 > exp-share-safe = True
7 > exp-share-safe = True
8 > EOF
8 > EOF
9
9
10 prepare source repo
10 prepare source repo
11
11
12 $ hg init source
12 $ hg init source
13 $ cd source
13 $ cd source
14 $ cat .hg/requires
14 $ cat .hg/requires
15 exp-sharesafe
15 exp-sharesafe
16 $ cat .hg/store/requires
16 $ cat .hg/store/requires
17 dotencode
17 dotencode
18 fncache
18 fncache
19 generaldelta
19 generaldelta
20 revlogv1
20 revlogv1
21 sparserevlog
21 sparserevlog
22 store
22 store
23 $ hg debugrequirements
23 $ hg debugrequirements
24 dotencode
24 dotencode
25 exp-sharesafe
25 exp-sharesafe
26 fncache
26 fncache
27 generaldelta
27 generaldelta
28 revlogv1
28 revlogv1
29 sparserevlog
29 sparserevlog
30 store
30 store
31
31
32 $ echo a > a
32 $ echo a > a
33 $ hg ci -Aqm "added a"
33 $ hg ci -Aqm "added a"
34 $ echo b > b
34 $ echo b > b
35 $ hg ci -Aqm "added b"
35 $ hg ci -Aqm "added b"
36
36
37 $ HGEDITOR=cat hg config --shared
37 $ HGEDITOR=cat hg config --shared
38 abort: repository is not shared; can't use --shared
38 abort: repository is not shared; can't use --shared
39 [10]
39 [10]
40 $ cd ..
40 $ cd ..
41
41
42 Create a shared repo and check the requirements are shared and read correctly
42 Create a shared repo and check the requirements are shared and read correctly
43 $ hg share source shared1
43 $ hg share source shared1
44 updating working directory
44 updating working directory
45 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
45 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 $ cd shared1
46 $ cd shared1
47 $ cat .hg/requires
47 $ cat .hg/requires
48 exp-sharesafe
48 exp-sharesafe
49 shared
49 shared
50
50
51 $ hg debugrequirements -R ../source
51 $ hg debugrequirements -R ../source
52 dotencode
52 dotencode
53 exp-sharesafe
53 exp-sharesafe
54 fncache
54 fncache
55 generaldelta
55 generaldelta
56 revlogv1
56 revlogv1
57 sparserevlog
57 sparserevlog
58 store
58 store
59
59
60 $ hg debugrequirements
60 $ hg debugrequirements
61 dotencode
61 dotencode
62 exp-sharesafe
62 exp-sharesafe
63 fncache
63 fncache
64 generaldelta
64 generaldelta
65 revlogv1
65 revlogv1
66 shared
66 shared
67 sparserevlog
67 sparserevlog
68 store
68 store
69
69
70 $ echo c > c
70 $ echo c > c
71 $ hg ci -Aqm "added c"
71 $ hg ci -Aqm "added c"
72
72
73 Check that config of the source repository is also loaded
73 Check that config of the source repository is also loaded
74
74
75 $ hg showconfig ui.curses
75 $ hg showconfig ui.curses
76 [1]
76 [1]
77
77
78 $ echo "[ui]" >> ../source/.hg/hgrc
78 $ echo "[ui]" >> ../source/.hg/hgrc
79 $ echo "curses=true" >> ../source/.hg/hgrc
79 $ echo "curses=true" >> ../source/.hg/hgrc
80
80
81 $ hg showconfig ui.curses
81 $ hg showconfig ui.curses
82 true
82 true
83
83
84 Test that extensions of source repository are also loaded
84 Test that extensions of source repository are also loaded
85
85
86 $ hg debugextensions
86 $ hg debugextensions
87 share
87 share
88 $ hg extdiff -p echo
88 $ hg extdiff -p echo
89 hg: unknown command 'extdiff'
89 hg: unknown command 'extdiff'
90 'extdiff' is provided by the following extension:
90 'extdiff' is provided by the following extension:
91
91
92 extdiff command to allow external programs to compare revisions
92 extdiff command to allow external programs to compare revisions
93
93
94 (use 'hg help extensions' for information on enabling extensions)
94 (use 'hg help extensions' for information on enabling extensions)
95 [255]
95 [255]
96
96
97 $ echo "[extensions]" >> ../source/.hg/hgrc
97 $ echo "[extensions]" >> ../source/.hg/hgrc
98 $ echo "extdiff=" >> ../source/.hg/hgrc
98 $ echo "extdiff=" >> ../source/.hg/hgrc
99
99
100 $ hg debugextensions -R ../source
100 $ hg debugextensions -R ../source
101 extdiff
101 extdiff
102 share
102 share
103 $ hg extdiff -R ../source -p echo
103 $ hg extdiff -R ../source -p echo
104
104
105 BROKEN: the command below will not work if config of shared source is not loaded
105 BROKEN: the command below will not work if config of shared source is not loaded
106 on dispatch but debugextensions says that extension
106 on dispatch but debugextensions says that extension
107 is loaded
107 is loaded
108 $ hg debugextensions
108 $ hg debugextensions
109 extdiff
109 extdiff
110 share
110 share
111
111
112 $ hg extdiff -p echo
112 $ hg extdiff -p echo
113
113
114 However, local .hg/hgrc should override the config set by share source
114 However, local .hg/hgrc should override the config set by share source
115
115
116 $ echo "[ui]" >> .hg/hgrc
116 $ echo "[ui]" >> .hg/hgrc
117 $ echo "curses=false" >> .hg/hgrc
117 $ echo "curses=false" >> .hg/hgrc
118
118
119 $ hg showconfig ui.curses
119 $ hg showconfig ui.curses
120 false
120 false
121
121
122 $ HGEDITOR=cat hg config --shared
122 $ HGEDITOR=cat hg config --shared
123 [ui]
123 [ui]
124 curses=true
124 curses=true
125 [extensions]
125 [extensions]
126 extdiff=
126 extdiff=
127
127
128 $ HGEDITOR=cat hg config --local
128 $ HGEDITOR=cat hg config --local
129 [ui]
129 [ui]
130 curses=false
130 curses=false
131
131
132 Testing that hooks set in source repository also runs in shared repo
132 Testing that hooks set in source repository also runs in shared repo
133
133
134 $ cd ../source
134 $ cd ../source
135 $ cat <<EOF >> .hg/hgrc
135 $ cat <<EOF >> .hg/hgrc
136 > [extensions]
136 > [extensions]
137 > hooklib=
137 > hooklib=
138 > [hooks]
138 > [hooks]
139 > pretxnchangegroup.reject_merge_commits = \
139 > pretxnchangegroup.reject_merge_commits = \
140 > python:hgext.hooklib.reject_merge_commits.hook
140 > python:hgext.hooklib.reject_merge_commits.hook
141 > EOF
141 > EOF
142
142
143 $ cd ..
143 $ cd ..
144 $ hg clone source cloned
144 $ hg clone source cloned
145 updating to branch default
145 updating to branch default
146 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
146 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
147 $ cd cloned
147 $ cd cloned
148 $ hg up 0
148 $ hg up 0
149 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
149 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
150 $ echo bar > bar
150 $ echo bar > bar
151 $ hg ci -Aqm "added bar"
151 $ hg ci -Aqm "added bar"
152 $ hg merge
152 $ hg merge
153 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
153 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
154 (branch merge, don't forget to commit)
154 (branch merge, don't forget to commit)
155 $ hg ci -m "merge commit"
155 $ hg ci -m "merge commit"
156
156
157 $ hg push ../source
157 $ hg push ../source
158 pushing to ../source
158 pushing to ../source
159 searching for changes
159 searching for changes
160 adding changesets
160 adding changesets
161 adding manifests
161 adding manifests
162 adding file changes
162 adding file changes
163 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
163 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
164 transaction abort!
164 transaction abort!
165 rollback completed
165 rollback completed
166 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
166 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
167 [255]
167 [255]
168
168
169 $ hg push ../shared1
169 $ hg push ../shared1
170 pushing to ../shared1
170 pushing to ../shared1
171 searching for changes
171 searching for changes
172 adding changesets
172 adding changesets
173 adding manifests
173 adding manifests
174 adding file changes
174 adding file changes
175 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
175 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
176 transaction abort!
176 transaction abort!
177 rollback completed
177 rollback completed
178 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
178 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
179 [255]
179 [255]
180
180
181 Test that if share source config is untrusted, we dont read it
181 Test that if share source config is untrusted, we dont read it
182
182
183 $ cd ../shared1
183 $ cd ../shared1
184
184
185 $ cat << EOF > $TESTTMP/untrusted.py
185 $ cat << EOF > $TESTTMP/untrusted.py
186 > from mercurial import scmutil, util
186 > from mercurial import scmutil, util
187 > def uisetup(ui):
187 > def uisetup(ui):
188 > class untrustedui(ui.__class__):
188 > class untrustedui(ui.__class__):
189 > def _trusted(self, fp, f):
189 > def _trusted(self, fp, f):
190 > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
190 > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
191 > return False
191 > return False
192 > return super(untrustedui, self)._trusted(fp, f)
192 > return super(untrustedui, self)._trusted(fp, f)
193 > ui.__class__ = untrustedui
193 > ui.__class__ = untrustedui
194 > EOF
194 > EOF
195
195
196 $ hg showconfig hooks
196 $ hg showconfig hooks
197 hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
197 hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
198
198
199 $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
199 $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
200 [1]
200 [1]
201
201
202 Update the source repository format and check that shared repo works
202 Update the source repository format and check that shared repo works
203
203
204 $ cd ../source
204 $ cd ../source
205
205
206 Disable zstd related tests because its not present on pure version
206 Disable zstd related tests because its not present on pure version
207 #if zstd
207 #if zstd
208 $ echo "[format]" >> .hg/hgrc
208 $ echo "[format]" >> .hg/hgrc
209 $ echo "revlog-compression=zstd" >> .hg/hgrc
209 $ echo "revlog-compression=zstd" >> .hg/hgrc
210
210
211 $ hg debugupgraderepo --run -q
211 $ hg debugupgraderepo --run -q
212 upgrade will perform the following actions:
212 upgrade will perform the following actions:
213
213
214 requirements
214 requirements
215 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store
215 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store
216 added: revlog-compression-zstd
216 added: revlog-compression-zstd
217
217
218 $ hg log -r .
218 $ hg log -r .
219 changeset: 1:5f6d8a4bf34a
219 changeset: 1:5f6d8a4bf34a
220 user: test
220 user: test
221 date: Thu Jan 01 00:00:00 1970 +0000
221 date: Thu Jan 01 00:00:00 1970 +0000
222 summary: added b
222 summary: added b
223
223
224 #endif
224 #endif
225 $ echo "[format]" >> .hg/hgrc
225 $ echo "[format]" >> .hg/hgrc
226 $ echo "use-persistent-nodemap=True" >> .hg/hgrc
226 $ echo "use-persistent-nodemap=True" >> .hg/hgrc
227
227
228 $ hg debugupgraderepo --run -q -R ../shared1
228 $ hg debugupgraderepo --run -q -R ../shared1
229 abort: cannot upgrade repository; unsupported source requirement: shared
229 abort: cannot upgrade repository; unsupported source requirement: shared
230 [255]
230 [255]
231
231
232 $ hg debugupgraderepo --run -q
232 $ hg debugupgraderepo --run -q
233 upgrade will perform the following actions:
233 upgrade will perform the following actions:
234
234
235 requirements
235 requirements
236 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
236 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
237 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
237 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
238 added: persistent-nodemap
238 added: persistent-nodemap
239
239
240 $ hg log -r .
240 $ hg log -r .
241 changeset: 1:5f6d8a4bf34a
241 changeset: 1:5f6d8a4bf34a
242 user: test
242 user: test
243 date: Thu Jan 01 00:00:00 1970 +0000
243 date: Thu Jan 01 00:00:00 1970 +0000
244 summary: added b
244 summary: added b
245
245
246
246
247 Shared one should work
247 Shared one should work
248 $ cd ../shared1
248 $ cd ../shared1
249 $ hg log -r .
249 $ hg log -r .
250 changeset: 2:155349b645be
250 changeset: 2:155349b645be
251 tag: tip
251 tag: tip
252 user: test
252 user: test
253 date: Thu Jan 01 00:00:00 1970 +0000
253 date: Thu Jan 01 00:00:00 1970 +0000
254 summary: added c
254 summary: added c
255
255
256
256
257 Testing that nonsharedrc is loaded for source and not shared
257 Testing that nonsharedrc is loaded for source and not shared
258
258
259 $ cd ../source
259 $ cd ../source
260 $ touch .hg/hgrc-not-shared
260 $ touch .hg/hgrc-not-shared
261 $ echo "[ui]" >> .hg/hgrc-not-shared
261 $ echo "[ui]" >> .hg/hgrc-not-shared
262 $ echo "traceback=true" >> .hg/hgrc-not-shared
262 $ echo "traceback=true" >> .hg/hgrc-not-shared
263
263
264 $ hg showconfig ui.traceback
264 $ hg showconfig ui.traceback
265 true
265 true
266
266
267 $ HGEDITOR=cat hg config --non-shared
267 $ HGEDITOR=cat hg config --non-shared
268 [ui]
268 [ui]
269 traceback=true
269 traceback=true
270
270
271 $ cd ../shared1
271 $ cd ../shared1
272 $ hg showconfig ui.traceback
272 $ hg showconfig ui.traceback
273 [1]
273 [1]
274
274
275 Unsharing works
275 Unsharing works
276
276
277 $ hg unshare
277 $ hg unshare
278
278
279 Test that source config is added to the shared one after unshare, and the config
279 Test that source config is added to the shared one after unshare, and the config
280 of current repo is still respected over the config which came from source config
280 of current repo is still respected over the config which came from source config
281 $ cd ../cloned
281 $ cd ../cloned
282 $ hg push ../shared1
282 $ hg push ../shared1
283 pushing to ../shared1
283 pushing to ../shared1
284 searching for changes
284 searching for changes
285 adding changesets
285 adding changesets
286 adding manifests
286 adding manifests
287 adding file changes
287 adding file changes
288 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
288 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
289 transaction abort!
289 transaction abort!
290 rollback completed
290 rollback completed
291 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
291 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
292 [255]
292 [255]
293 $ hg showconfig ui.curses -R ../shared1
293 $ hg showconfig ui.curses -R ../shared1
294 false
294 false
295
295
296 $ cd ../
296 $ cd ../
297
297
298 Test that upgrading using debugupgraderepo works
298 Test that upgrading using debugupgraderepo works
299 =================================================
299 =================================================
300
300
301 $ hg init non-share-safe --config format.exp-share-safe=false
301 $ hg init non-share-safe --config format.exp-share-safe=false
302 $ cd non-share-safe
302 $ cd non-share-safe
303 $ hg debugrequirements
303 $ hg debugrequirements
304 dotencode
304 dotencode
305 fncache
305 fncache
306 generaldelta
306 generaldelta
307 revlogv1
307 revlogv1
308 sparserevlog
308 sparserevlog
309 store
309 store
310 $ echo foo > foo
310 $ echo foo > foo
311 $ hg ci -Aqm 'added foo'
311 $ hg ci -Aqm 'added foo'
312 $ echo bar > bar
312 $ echo bar > bar
313 $ hg ci -Aqm 'added bar'
313 $ hg ci -Aqm 'added bar'
314
314
315 Create a share before upgrading
315 Create a share before upgrading
316
316
317 $ cd ..
317 $ cd ..
318 $ hg share non-share-safe nss-share
318 $ hg share non-share-safe nss-share
319 updating working directory
319 updating working directory
320 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
320 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
321 $ hg debugrequirements -R nss-share
321 $ hg debugrequirements -R nss-share
322 dotencode
322 dotencode
323 fncache
323 fncache
324 generaldelta
324 generaldelta
325 revlogv1
325 revlogv1
326 shared
326 shared
327 sparserevlog
327 sparserevlog
328 store
328 store
329 $ cd non-share-safe
329 $ cd non-share-safe
330
330
331 Upgrade
331 Upgrade
332
332
333 $ hg debugupgraderepo -q
333 $ hg debugupgraderepo -q
334 requirements
334 requirements
335 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
335 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
336 added: exp-sharesafe
336 added: exp-sharesafe
337
337
338 $ hg debugupgraderepo --run -q
338 $ hg debugupgraderepo --run -q
339 upgrade will perform the following actions:
339 upgrade will perform the following actions:
340
340
341 requirements
341 requirements
342 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
342 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
343 added: exp-sharesafe
343 added: exp-sharesafe
344
344
345 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
345 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
346
346
347 $ hg debugrequirements
347 $ hg debugrequirements
348 dotencode
348 dotencode
349 exp-sharesafe
349 exp-sharesafe
350 fncache
350 fncache
351 generaldelta
351 generaldelta
352 revlogv1
352 revlogv1
353 sparserevlog
353 sparserevlog
354 store
354 store
355
355
356 $ cat .hg/requires
356 $ cat .hg/requires
357 exp-sharesafe
357 exp-sharesafe
358
358
359 $ cat .hg/store/requires
359 $ cat .hg/store/requires
360 dotencode
360 dotencode
361 fncache
361 fncache
362 generaldelta
362 generaldelta
363 revlogv1
363 revlogv1
364 sparserevlog
364 sparserevlog
365 store
365 store
366
366
367 $ hg log -GT "{node}: {desc}\n"
367 $ hg log -GT "{node}: {desc}\n"
368 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
368 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
369 |
369 |
370 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
370 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
371
371
372
372
373 Make sure existing shares still works
373 Make sure existing shares still works
374
374
375 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
375 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
376 warning: source repository supports share-safe functionality. Reshare to upgrade.
376 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
377 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
377 |
378 |
378 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
379 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
379
380
380
381
381
382
382 Create a safe share from upgrade one
383 Create a safe share from upgrade one
383
384
384 $ cd ..
385 $ cd ..
385 $ hg share non-share-safe ss-share
386 $ hg share non-share-safe ss-share
386 updating working directory
387 updating working directory
387 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
388 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
388 $ cd ss-share
389 $ cd ss-share
389 $ hg log -GT "{node}: {desc}\n"
390 $ hg log -GT "{node}: {desc}\n"
390 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
391 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
391 |
392 |
392 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
393 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
393
394
394 $ cd ../non-share-safe
395 $ cd ../non-share-safe
395
396
396 Test that downgrading works too
397 Test that downgrading works too
397
398
398 $ cat >> $HGRCPATH <<EOF
399 $ cat >> $HGRCPATH <<EOF
399 > [extensions]
400 > [extensions]
400 > share =
401 > share =
401 > [format]
402 > [format]
402 > exp-share-safe = False
403 > exp-share-safe = False
403 > EOF
404 > EOF
404
405
405 $ hg debugupgraderepo -q
406 $ hg debugupgraderepo -q
406 requirements
407 requirements
407 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
408 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
408 removed: exp-sharesafe
409 removed: exp-sharesafe
409
410
410 $ hg debugupgraderepo -q --run
411 $ hg debugupgraderepo -q --run
411 upgrade will perform the following actions:
412 upgrade will perform the following actions:
412
413
413 requirements
414 requirements
414 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
415 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
415 removed: exp-sharesafe
416 removed: exp-sharesafe
416
417
417 repository downgraded to not use share safe mode, existing shares will not work and needs to be reshared.
418 repository downgraded to not use share safe mode, existing shares will not work and needs to be reshared.
418
419
419 $ hg debugrequirements
420 $ hg debugrequirements
420 dotencode
421 dotencode
421 fncache
422 fncache
422 generaldelta
423 generaldelta
423 revlogv1
424 revlogv1
424 sparserevlog
425 sparserevlog
425 store
426 store
426
427
427 $ cat .hg/requires
428 $ cat .hg/requires
428 dotencode
429 dotencode
429 fncache
430 fncache
430 generaldelta
431 generaldelta
431 revlogv1
432 revlogv1
432 sparserevlog
433 sparserevlog
433 store
434 store
434
435
435 $ test -f .hg/store/requires
436 $ test -f .hg/store/requires
436 [1]
437 [1]
437
438
438 $ hg log -GT "{node}: {desc}\n"
439 $ hg log -GT "{node}: {desc}\n"
439 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
440 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
440 |
441 |
441 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
442 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
442
443
443
444
444 Make sure existing shares still works
445 Make sure existing shares still works
445
446
446 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
447 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
447 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
448 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
448 |
449 |
449 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
450 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
450
451
451 $ hg unshare -R ../nss-share
452 $ hg unshare -R ../nss-share
452
453
453 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
454 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
454 abort: share source does not support exp-sharesafe requirement
455 abort: share source does not support exp-sharesafe requirement
455 [255]
456 [255]
General Comments 0
You need to be logged in to leave comments. Login now