##// END OF EJS Templates
upgrade: add support to downgrade share safe mode...
Pulkit Goyal -
r46618:c6a1fa42 default
parent child Browse files
Show More
@@ -1,3566 +1,3576 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revset,
62 revset,
63 revsetlang,
63 revsetlang,
64 scmutil,
64 scmutil,
65 sparse,
65 sparse,
66 store as storemod,
66 store as storemod,
67 subrepoutil,
67 subrepoutil,
68 tags as tagsmod,
68 tags as tagsmod,
69 transaction,
69 transaction,
70 txnutil,
70 txnutil,
71 util,
71 util,
72 vfs as vfsmod,
72 vfs as vfsmod,
73 )
73 )
74
74
75 from .interfaces import (
75 from .interfaces import (
76 repository,
76 repository,
77 util as interfaceutil,
77 util as interfaceutil,
78 )
78 )
79
79
80 from .utils import (
80 from .utils import (
81 hashutil,
81 hashutil,
82 procutil,
82 procutil,
83 stringutil,
83 stringutil,
84 )
84 )
85
85
86 from .revlogutils import constants as revlogconst
86 from .revlogutils import constants as revlogconst
87
87
88 release = lockmod.release
88 release = lockmod.release
89 urlerr = util.urlerr
89 urlerr = util.urlerr
90 urlreq = util.urlreq
90 urlreq = util.urlreq
91
91
92 # set of (path, vfs-location) tuples. vfs-location is:
92 # set of (path, vfs-location) tuples. vfs-location is:
93 # - 'plain for vfs relative paths
93 # - 'plain for vfs relative paths
94 # - '' for svfs relative paths
94 # - '' for svfs relative paths
95 _cachedfiles = set()
95 _cachedfiles = set()
96
96
97
97
98 class _basefilecache(scmutil.filecache):
98 class _basefilecache(scmutil.filecache):
99 """All filecache usage on repo are done for logic that should be unfiltered"""
99 """All filecache usage on repo are done for logic that should be unfiltered"""
100
100
101 def __get__(self, repo, type=None):
101 def __get__(self, repo, type=None):
102 if repo is None:
102 if repo is None:
103 return self
103 return self
104 # proxy to unfiltered __dict__ since filtered repo has no entry
104 # proxy to unfiltered __dict__ since filtered repo has no entry
105 unfi = repo.unfiltered()
105 unfi = repo.unfiltered()
106 try:
106 try:
107 return unfi.__dict__[self.sname]
107 return unfi.__dict__[self.sname]
108 except KeyError:
108 except KeyError:
109 pass
109 pass
110 return super(_basefilecache, self).__get__(unfi, type)
110 return super(_basefilecache, self).__get__(unfi, type)
111
111
112 def set(self, repo, value):
112 def set(self, repo, value):
113 return super(_basefilecache, self).set(repo.unfiltered(), value)
113 return super(_basefilecache, self).set(repo.unfiltered(), value)
114
114
115
115
116 class repofilecache(_basefilecache):
116 class repofilecache(_basefilecache):
117 """filecache for files in .hg but outside of .hg/store"""
117 """filecache for files in .hg but outside of .hg/store"""
118
118
119 def __init__(self, *paths):
119 def __init__(self, *paths):
120 super(repofilecache, self).__init__(*paths)
120 super(repofilecache, self).__init__(*paths)
121 for path in paths:
121 for path in paths:
122 _cachedfiles.add((path, b'plain'))
122 _cachedfiles.add((path, b'plain'))
123
123
124 def join(self, obj, fname):
124 def join(self, obj, fname):
125 return obj.vfs.join(fname)
125 return obj.vfs.join(fname)
126
126
127
127
128 class storecache(_basefilecache):
128 class storecache(_basefilecache):
129 """filecache for files in the store"""
129 """filecache for files in the store"""
130
130
131 def __init__(self, *paths):
131 def __init__(self, *paths):
132 super(storecache, self).__init__(*paths)
132 super(storecache, self).__init__(*paths)
133 for path in paths:
133 for path in paths:
134 _cachedfiles.add((path, b''))
134 _cachedfiles.add((path, b''))
135
135
136 def join(self, obj, fname):
136 def join(self, obj, fname):
137 return obj.sjoin(fname)
137 return obj.sjoin(fname)
138
138
139
139
140 class mixedrepostorecache(_basefilecache):
140 class mixedrepostorecache(_basefilecache):
141 """filecache for a mix files in .hg/store and outside"""
141 """filecache for a mix files in .hg/store and outside"""
142
142
143 def __init__(self, *pathsandlocations):
143 def __init__(self, *pathsandlocations):
144 # scmutil.filecache only uses the path for passing back into our
144 # scmutil.filecache only uses the path for passing back into our
145 # join(), so we can safely pass a list of paths and locations
145 # join(), so we can safely pass a list of paths and locations
146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
146 super(mixedrepostorecache, self).__init__(*pathsandlocations)
147 _cachedfiles.update(pathsandlocations)
147 _cachedfiles.update(pathsandlocations)
148
148
149 def join(self, obj, fnameandlocation):
149 def join(self, obj, fnameandlocation):
150 fname, location = fnameandlocation
150 fname, location = fnameandlocation
151 if location == b'plain':
151 if location == b'plain':
152 return obj.vfs.join(fname)
152 return obj.vfs.join(fname)
153 else:
153 else:
154 if location != b'':
154 if location != b'':
155 raise error.ProgrammingError(
155 raise error.ProgrammingError(
156 b'unexpected location: %s' % location
156 b'unexpected location: %s' % location
157 )
157 )
158 return obj.sjoin(fname)
158 return obj.sjoin(fname)
159
159
160
160
161 def isfilecached(repo, name):
161 def isfilecached(repo, name):
162 """check if a repo has already cached "name" filecache-ed property
162 """check if a repo has already cached "name" filecache-ed property
163
163
164 This returns (cachedobj-or-None, iscached) tuple.
164 This returns (cachedobj-or-None, iscached) tuple.
165 """
165 """
166 cacheentry = repo.unfiltered()._filecache.get(name, None)
166 cacheentry = repo.unfiltered()._filecache.get(name, None)
167 if not cacheentry:
167 if not cacheentry:
168 return None, False
168 return None, False
169 return cacheentry.obj, True
169 return cacheentry.obj, True
170
170
171
171
172 class unfilteredpropertycache(util.propertycache):
172 class unfilteredpropertycache(util.propertycache):
173 """propertycache that apply to unfiltered repo only"""
173 """propertycache that apply to unfiltered repo only"""
174
174
175 def __get__(self, repo, type=None):
175 def __get__(self, repo, type=None):
176 unfi = repo.unfiltered()
176 unfi = repo.unfiltered()
177 if unfi is repo:
177 if unfi is repo:
178 return super(unfilteredpropertycache, self).__get__(unfi)
178 return super(unfilteredpropertycache, self).__get__(unfi)
179 return getattr(unfi, self.name)
179 return getattr(unfi, self.name)
180
180
181
181
182 class filteredpropertycache(util.propertycache):
182 class filteredpropertycache(util.propertycache):
183 """propertycache that must take filtering in account"""
183 """propertycache that must take filtering in account"""
184
184
185 def cachevalue(self, obj, value):
185 def cachevalue(self, obj, value):
186 object.__setattr__(obj, self.name, value)
186 object.__setattr__(obj, self.name, value)
187
187
188
188
189 def hasunfilteredcache(repo, name):
189 def hasunfilteredcache(repo, name):
190 """check if a repo has an unfilteredpropertycache value for <name>"""
190 """check if a repo has an unfilteredpropertycache value for <name>"""
191 return name in vars(repo.unfiltered())
191 return name in vars(repo.unfiltered())
192
192
193
193
194 def unfilteredmethod(orig):
194 def unfilteredmethod(orig):
195 """decorate method that always need to be run on unfiltered version"""
195 """decorate method that always need to be run on unfiltered version"""
196
196
197 @functools.wraps(orig)
197 @functools.wraps(orig)
198 def wrapper(repo, *args, **kwargs):
198 def wrapper(repo, *args, **kwargs):
199 return orig(repo.unfiltered(), *args, **kwargs)
199 return orig(repo.unfiltered(), *args, **kwargs)
200
200
201 return wrapper
201 return wrapper
202
202
203
203
204 moderncaps = {
204 moderncaps = {
205 b'lookup',
205 b'lookup',
206 b'branchmap',
206 b'branchmap',
207 b'pushkey',
207 b'pushkey',
208 b'known',
208 b'known',
209 b'getbundle',
209 b'getbundle',
210 b'unbundle',
210 b'unbundle',
211 }
211 }
212 legacycaps = moderncaps.union({b'changegroupsubset'})
212 legacycaps = moderncaps.union({b'changegroupsubset'})
213
213
214
214
215 @interfaceutil.implementer(repository.ipeercommandexecutor)
215 @interfaceutil.implementer(repository.ipeercommandexecutor)
216 class localcommandexecutor(object):
216 class localcommandexecutor(object):
217 def __init__(self, peer):
217 def __init__(self, peer):
218 self._peer = peer
218 self._peer = peer
219 self._sent = False
219 self._sent = False
220 self._closed = False
220 self._closed = False
221
221
222 def __enter__(self):
222 def __enter__(self):
223 return self
223 return self
224
224
225 def __exit__(self, exctype, excvalue, exctb):
225 def __exit__(self, exctype, excvalue, exctb):
226 self.close()
226 self.close()
227
227
228 def callcommand(self, command, args):
228 def callcommand(self, command, args):
229 if self._sent:
229 if self._sent:
230 raise error.ProgrammingError(
230 raise error.ProgrammingError(
231 b'callcommand() cannot be used after sendcommands()'
231 b'callcommand() cannot be used after sendcommands()'
232 )
232 )
233
233
234 if self._closed:
234 if self._closed:
235 raise error.ProgrammingError(
235 raise error.ProgrammingError(
236 b'callcommand() cannot be used after close()'
236 b'callcommand() cannot be used after close()'
237 )
237 )
238
238
239 # We don't need to support anything fancy. Just call the named
239 # We don't need to support anything fancy. Just call the named
240 # method on the peer and return a resolved future.
240 # method on the peer and return a resolved future.
241 fn = getattr(self._peer, pycompat.sysstr(command))
241 fn = getattr(self._peer, pycompat.sysstr(command))
242
242
243 f = pycompat.futures.Future()
243 f = pycompat.futures.Future()
244
244
245 try:
245 try:
246 result = fn(**pycompat.strkwargs(args))
246 result = fn(**pycompat.strkwargs(args))
247 except Exception:
247 except Exception:
248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
248 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
249 else:
249 else:
250 f.set_result(result)
250 f.set_result(result)
251
251
252 return f
252 return f
253
253
254 def sendcommands(self):
254 def sendcommands(self):
255 self._sent = True
255 self._sent = True
256
256
257 def close(self):
257 def close(self):
258 self._closed = True
258 self._closed = True
259
259
260
260
261 @interfaceutil.implementer(repository.ipeercommands)
261 @interfaceutil.implementer(repository.ipeercommands)
262 class localpeer(repository.peer):
262 class localpeer(repository.peer):
263 '''peer for a local repo; reflects only the most recent API'''
263 '''peer for a local repo; reflects only the most recent API'''
264
264
265 def __init__(self, repo, caps=None):
265 def __init__(self, repo, caps=None):
266 super(localpeer, self).__init__()
266 super(localpeer, self).__init__()
267
267
268 if caps is None:
268 if caps is None:
269 caps = moderncaps.copy()
269 caps = moderncaps.copy()
270 self._repo = repo.filtered(b'served')
270 self._repo = repo.filtered(b'served')
271 self.ui = repo.ui
271 self.ui = repo.ui
272 self._caps = repo._restrictcapabilities(caps)
272 self._caps = repo._restrictcapabilities(caps)
273
273
274 # Begin of _basepeer interface.
274 # Begin of _basepeer interface.
275
275
276 def url(self):
276 def url(self):
277 return self._repo.url()
277 return self._repo.url()
278
278
279 def local(self):
279 def local(self):
280 return self._repo
280 return self._repo
281
281
282 def peer(self):
282 def peer(self):
283 return self
283 return self
284
284
285 def canpush(self):
285 def canpush(self):
286 return True
286 return True
287
287
288 def close(self):
288 def close(self):
289 self._repo.close()
289 self._repo.close()
290
290
291 # End of _basepeer interface.
291 # End of _basepeer interface.
292
292
293 # Begin of _basewirecommands interface.
293 # Begin of _basewirecommands interface.
294
294
295 def branchmap(self):
295 def branchmap(self):
296 return self._repo.branchmap()
296 return self._repo.branchmap()
297
297
298 def capabilities(self):
298 def capabilities(self):
299 return self._caps
299 return self._caps
300
300
301 def clonebundles(self):
301 def clonebundles(self):
302 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
302 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
303
303
304 def debugwireargs(self, one, two, three=None, four=None, five=None):
304 def debugwireargs(self, one, two, three=None, four=None, five=None):
305 """Used to test argument passing over the wire"""
305 """Used to test argument passing over the wire"""
306 return b"%s %s %s %s %s" % (
306 return b"%s %s %s %s %s" % (
307 one,
307 one,
308 two,
308 two,
309 pycompat.bytestr(three),
309 pycompat.bytestr(three),
310 pycompat.bytestr(four),
310 pycompat.bytestr(four),
311 pycompat.bytestr(five),
311 pycompat.bytestr(five),
312 )
312 )
313
313
314 def getbundle(
314 def getbundle(
315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
315 self, source, heads=None, common=None, bundlecaps=None, **kwargs
316 ):
316 ):
317 chunks = exchange.getbundlechunks(
317 chunks = exchange.getbundlechunks(
318 self._repo,
318 self._repo,
319 source,
319 source,
320 heads=heads,
320 heads=heads,
321 common=common,
321 common=common,
322 bundlecaps=bundlecaps,
322 bundlecaps=bundlecaps,
323 **kwargs
323 **kwargs
324 )[1]
324 )[1]
325 cb = util.chunkbuffer(chunks)
325 cb = util.chunkbuffer(chunks)
326
326
327 if exchange.bundle2requested(bundlecaps):
327 if exchange.bundle2requested(bundlecaps):
328 # When requesting a bundle2, getbundle returns a stream to make the
328 # When requesting a bundle2, getbundle returns a stream to make the
329 # wire level function happier. We need to build a proper object
329 # wire level function happier. We need to build a proper object
330 # from it in local peer.
330 # from it in local peer.
331 return bundle2.getunbundler(self.ui, cb)
331 return bundle2.getunbundler(self.ui, cb)
332 else:
332 else:
333 return changegroup.getunbundler(b'01', cb, None)
333 return changegroup.getunbundler(b'01', cb, None)
334
334
335 def heads(self):
335 def heads(self):
336 return self._repo.heads()
336 return self._repo.heads()
337
337
338 def known(self, nodes):
338 def known(self, nodes):
339 return self._repo.known(nodes)
339 return self._repo.known(nodes)
340
340
341 def listkeys(self, namespace):
341 def listkeys(self, namespace):
342 return self._repo.listkeys(namespace)
342 return self._repo.listkeys(namespace)
343
343
344 def lookup(self, key):
344 def lookup(self, key):
345 return self._repo.lookup(key)
345 return self._repo.lookup(key)
346
346
347 def pushkey(self, namespace, key, old, new):
347 def pushkey(self, namespace, key, old, new):
348 return self._repo.pushkey(namespace, key, old, new)
348 return self._repo.pushkey(namespace, key, old, new)
349
349
350 def stream_out(self):
350 def stream_out(self):
351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
351 raise error.Abort(_(b'cannot perform stream clone against local peer'))
352
352
353 def unbundle(self, bundle, heads, url):
353 def unbundle(self, bundle, heads, url):
354 """apply a bundle on a repo
354 """apply a bundle on a repo
355
355
356 This function handles the repo locking itself."""
356 This function handles the repo locking itself."""
357 try:
357 try:
358 try:
358 try:
359 bundle = exchange.readbundle(self.ui, bundle, None)
359 bundle = exchange.readbundle(self.ui, bundle, None)
360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
360 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
361 if util.safehasattr(ret, b'getchunks'):
361 if util.safehasattr(ret, b'getchunks'):
362 # This is a bundle20 object, turn it into an unbundler.
362 # This is a bundle20 object, turn it into an unbundler.
363 # This little dance should be dropped eventually when the
363 # This little dance should be dropped eventually when the
364 # API is finally improved.
364 # API is finally improved.
365 stream = util.chunkbuffer(ret.getchunks())
365 stream = util.chunkbuffer(ret.getchunks())
366 ret = bundle2.getunbundler(self.ui, stream)
366 ret = bundle2.getunbundler(self.ui, stream)
367 return ret
367 return ret
368 except Exception as exc:
368 except Exception as exc:
369 # If the exception contains output salvaged from a bundle2
369 # If the exception contains output salvaged from a bundle2
370 # reply, we need to make sure it is printed before continuing
370 # reply, we need to make sure it is printed before continuing
371 # to fail. So we build a bundle2 with such output and consume
371 # to fail. So we build a bundle2 with such output and consume
372 # it directly.
372 # it directly.
373 #
373 #
374 # This is not very elegant but allows a "simple" solution for
374 # This is not very elegant but allows a "simple" solution for
375 # issue4594
375 # issue4594
376 output = getattr(exc, '_bundle2salvagedoutput', ())
376 output = getattr(exc, '_bundle2salvagedoutput', ())
377 if output:
377 if output:
378 bundler = bundle2.bundle20(self._repo.ui)
378 bundler = bundle2.bundle20(self._repo.ui)
379 for out in output:
379 for out in output:
380 bundler.addpart(out)
380 bundler.addpart(out)
381 stream = util.chunkbuffer(bundler.getchunks())
381 stream = util.chunkbuffer(bundler.getchunks())
382 b = bundle2.getunbundler(self.ui, stream)
382 b = bundle2.getunbundler(self.ui, stream)
383 bundle2.processbundle(self._repo, b)
383 bundle2.processbundle(self._repo, b)
384 raise
384 raise
385 except error.PushRaced as exc:
385 except error.PushRaced as exc:
386 raise error.ResponseError(
386 raise error.ResponseError(
387 _(b'push failed:'), stringutil.forcebytestr(exc)
387 _(b'push failed:'), stringutil.forcebytestr(exc)
388 )
388 )
389
389
390 # End of _basewirecommands interface.
390 # End of _basewirecommands interface.
391
391
392 # Begin of peer interface.
392 # Begin of peer interface.
393
393
394 def commandexecutor(self):
394 def commandexecutor(self):
395 return localcommandexecutor(self)
395 return localcommandexecutor(self)
396
396
397 # End of peer interface.
397 # End of peer interface.
398
398
399
399
400 @interfaceutil.implementer(repository.ipeerlegacycommands)
400 @interfaceutil.implementer(repository.ipeerlegacycommands)
401 class locallegacypeer(localpeer):
401 class locallegacypeer(localpeer):
402 """peer extension which implements legacy methods too; used for tests with
402 """peer extension which implements legacy methods too; used for tests with
403 restricted capabilities"""
403 restricted capabilities"""
404
404
405 def __init__(self, repo):
405 def __init__(self, repo):
406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
406 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
407
407
408 # Begin of baselegacywirecommands interface.
408 # Begin of baselegacywirecommands interface.
409
409
410 def between(self, pairs):
410 def between(self, pairs):
411 return self._repo.between(pairs)
411 return self._repo.between(pairs)
412
412
413 def branches(self, nodes):
413 def branches(self, nodes):
414 return self._repo.branches(nodes)
414 return self._repo.branches(nodes)
415
415
416 def changegroup(self, nodes, source):
416 def changegroup(self, nodes, source):
417 outgoing = discovery.outgoing(
417 outgoing = discovery.outgoing(
418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
418 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
419 )
419 )
420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
420 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
421
421
422 def changegroupsubset(self, bases, heads, source):
422 def changegroupsubset(self, bases, heads, source):
423 outgoing = discovery.outgoing(
423 outgoing = discovery.outgoing(
424 self._repo, missingroots=bases, ancestorsof=heads
424 self._repo, missingroots=bases, ancestorsof=heads
425 )
425 )
426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
426 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
427
427
428 # End of baselegacywirecommands interface.
428 # End of baselegacywirecommands interface.
429
429
430
430
431 # Functions receiving (ui, features) that extensions can register to impact
431 # Functions receiving (ui, features) that extensions can register to impact
432 # the ability to load repositories with custom requirements. Only
432 # the ability to load repositories with custom requirements. Only
433 # functions defined in loaded extensions are called.
433 # functions defined in loaded extensions are called.
434 #
434 #
435 # The function receives a set of requirement strings that the repository
435 # The function receives a set of requirement strings that the repository
436 # is capable of opening. Functions will typically add elements to the
436 # is capable of opening. Functions will typically add elements to the
437 # set to reflect that the extension knows how to handle that requirements.
437 # set to reflect that the extension knows how to handle that requirements.
438 featuresetupfuncs = set()
438 featuresetupfuncs = set()
439
439
440
440
441 def _getsharedvfs(hgvfs, requirements):
441 def _getsharedvfs(hgvfs, requirements):
442 """returns the vfs object pointing to root of shared source
442 """returns the vfs object pointing to root of shared source
443 repo for a shared repository
443 repo for a shared repository
444
444
445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
445 hgvfs is vfs pointing at .hg/ of current repo (shared one)
446 requirements is a set of requirements of current repo (shared one)
446 requirements is a set of requirements of current repo (shared one)
447 """
447 """
448 # The ``shared`` or ``relshared`` requirements indicate the
448 # The ``shared`` or ``relshared`` requirements indicate the
449 # store lives in the path contained in the ``.hg/sharedpath`` file.
449 # store lives in the path contained in the ``.hg/sharedpath`` file.
450 # This is an absolute path for ``shared`` and relative to
450 # This is an absolute path for ``shared`` and relative to
451 # ``.hg/`` for ``relshared``.
451 # ``.hg/`` for ``relshared``.
452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
452 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
453 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
454 sharedpath = hgvfs.join(sharedpath)
454 sharedpath = hgvfs.join(sharedpath)
455
455
456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
456 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
457
457
458 if not sharedvfs.exists():
458 if not sharedvfs.exists():
459 raise error.RepoError(
459 raise error.RepoError(
460 _(b'.hg/sharedpath points to nonexistent directory %s')
460 _(b'.hg/sharedpath points to nonexistent directory %s')
461 % sharedvfs.base
461 % sharedvfs.base
462 )
462 )
463 return sharedvfs
463 return sharedvfs
464
464
465
465
466 def _readrequires(vfs, allowmissing):
466 def _readrequires(vfs, allowmissing):
467 """reads the require file present at root of this vfs
467 """reads the require file present at root of this vfs
468 and return a set of requirements
468 and return a set of requirements
469
469
470 If allowmissing is True, we suppress ENOENT if raised"""
470 If allowmissing is True, we suppress ENOENT if raised"""
471 # requires file contains a newline-delimited list of
471 # requires file contains a newline-delimited list of
472 # features/capabilities the opener (us) must have in order to use
472 # features/capabilities the opener (us) must have in order to use
473 # the repository. This file was introduced in Mercurial 0.9.2,
473 # the repository. This file was introduced in Mercurial 0.9.2,
474 # which means very old repositories may not have one. We assume
474 # which means very old repositories may not have one. We assume
475 # a missing file translates to no requirements.
475 # a missing file translates to no requirements.
476 try:
476 try:
477 requirements = set(vfs.read(b'requires').splitlines())
477 requirements = set(vfs.read(b'requires').splitlines())
478 except IOError as e:
478 except IOError as e:
479 if not (allowmissing and e.errno == errno.ENOENT):
479 if not (allowmissing and e.errno == errno.ENOENT):
480 raise
480 raise
481 requirements = set()
481 requirements = set()
482 return requirements
482 return requirements
483
483
484
484
485 def makelocalrepository(baseui, path, intents=None):
485 def makelocalrepository(baseui, path, intents=None):
486 """Create a local repository object.
486 """Create a local repository object.
487
487
488 Given arguments needed to construct a local repository, this function
488 Given arguments needed to construct a local repository, this function
489 performs various early repository loading functionality (such as
489 performs various early repository loading functionality (such as
490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
490 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
491 the repository can be opened, derives a type suitable for representing
491 the repository can be opened, derives a type suitable for representing
492 that repository, and returns an instance of it.
492 that repository, and returns an instance of it.
493
493
494 The returned object conforms to the ``repository.completelocalrepository``
494 The returned object conforms to the ``repository.completelocalrepository``
495 interface.
495 interface.
496
496
497 The repository type is derived by calling a series of factory functions
497 The repository type is derived by calling a series of factory functions
498 for each aspect/interface of the final repository. These are defined by
498 for each aspect/interface of the final repository. These are defined by
499 ``REPO_INTERFACES``.
499 ``REPO_INTERFACES``.
500
500
501 Each factory function is called to produce a type implementing a specific
501 Each factory function is called to produce a type implementing a specific
502 interface. The cumulative list of returned types will be combined into a
502 interface. The cumulative list of returned types will be combined into a
503 new type and that type will be instantiated to represent the local
503 new type and that type will be instantiated to represent the local
504 repository.
504 repository.
505
505
506 The factory functions each receive various state that may be consulted
506 The factory functions each receive various state that may be consulted
507 as part of deriving a type.
507 as part of deriving a type.
508
508
509 Extensions should wrap these factory functions to customize repository type
509 Extensions should wrap these factory functions to customize repository type
510 creation. Note that an extension's wrapped function may be called even if
510 creation. Note that an extension's wrapped function may be called even if
511 that extension is not loaded for the repo being constructed. Extensions
511 that extension is not loaded for the repo being constructed. Extensions
512 should check if their ``__name__`` appears in the
512 should check if their ``__name__`` appears in the
513 ``extensionmodulenames`` set passed to the factory function and no-op if
513 ``extensionmodulenames`` set passed to the factory function and no-op if
514 not.
514 not.
515 """
515 """
516 ui = baseui.copy()
516 ui = baseui.copy()
517 # Prevent copying repo configuration.
517 # Prevent copying repo configuration.
518 ui.copy = baseui.copy
518 ui.copy = baseui.copy
519
519
520 # Working directory VFS rooted at repository root.
520 # Working directory VFS rooted at repository root.
521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
521 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
522
522
523 # Main VFS for .hg/ directory.
523 # Main VFS for .hg/ directory.
524 hgpath = wdirvfs.join(b'.hg')
524 hgpath = wdirvfs.join(b'.hg')
525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
525 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
526 # Whether this repository is shared one or not
526 # Whether this repository is shared one or not
527 shared = False
527 shared = False
528 # If this repository is shared, vfs pointing to shared repo
528 # If this repository is shared, vfs pointing to shared repo
529 sharedvfs = None
529 sharedvfs = None
530
530
531 # The .hg/ path should exist and should be a directory. All other
531 # The .hg/ path should exist and should be a directory. All other
532 # cases are errors.
532 # cases are errors.
533 if not hgvfs.isdir():
533 if not hgvfs.isdir():
534 try:
534 try:
535 hgvfs.stat()
535 hgvfs.stat()
536 except OSError as e:
536 except OSError as e:
537 if e.errno != errno.ENOENT:
537 if e.errno != errno.ENOENT:
538 raise
538 raise
539 except ValueError as e:
539 except ValueError as e:
540 # Can be raised on Python 3.8 when path is invalid.
540 # Can be raised on Python 3.8 when path is invalid.
541 raise error.Abort(
541 raise error.Abort(
542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
542 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
543 )
543 )
544
544
545 raise error.RepoError(_(b'repository %s not found') % path)
545 raise error.RepoError(_(b'repository %s not found') % path)
546
546
547 requirements = _readrequires(hgvfs, True)
547 requirements = _readrequires(hgvfs, True)
548 shared = (
548 shared = (
549 requirementsmod.SHARED_REQUIREMENT in requirements
549 requirementsmod.SHARED_REQUIREMENT in requirements
550 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
550 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
551 )
551 )
552 if shared:
552 if shared:
553 sharedvfs = _getsharedvfs(hgvfs, requirements)
553 sharedvfs = _getsharedvfs(hgvfs, requirements)
554
554
555 # if .hg/requires contains the sharesafe requirement, it means
555 # if .hg/requires contains the sharesafe requirement, it means
556 # there exists a `.hg/store/requires` too and we should read it
556 # there exists a `.hg/store/requires` too and we should read it
557 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
557 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
558 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
558 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
559 # is not present, refer checkrequirementscompat() for that
559 # is not present, refer checkrequirementscompat() for that
560 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
560 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
561
562 if (
563 shared
564 and requirementsmod.SHARESAFE_REQUIREMENT
565 not in _readrequires(sharedvfs, True)
566 ):
567 raise error.Abort(
568 _(b"share source does not support exp-sharesafe requirement")
569 )
570
561 if shared:
571 if shared:
562 # This is a shared repo
572 # This is a shared repo
563 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
573 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
564 else:
574 else:
565 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
575 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
566
576
567 requirements |= _readrequires(storevfs, False)
577 requirements |= _readrequires(storevfs, False)
568
578
569 # The .hg/hgrc file may load extensions or contain config options
579 # The .hg/hgrc file may load extensions or contain config options
570 # that influence repository construction. Attempt to load it and
580 # that influence repository construction. Attempt to load it and
571 # process any new extensions that it may have pulled in.
581 # process any new extensions that it may have pulled in.
572 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
582 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
573 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
583 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
574 extensions.loadall(ui)
584 extensions.loadall(ui)
575 extensions.populateui(ui)
585 extensions.populateui(ui)
576
586
577 # Set of module names of extensions loaded for this repository.
587 # Set of module names of extensions loaded for this repository.
578 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
588 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
579
589
580 supportedrequirements = gathersupportedrequirements(ui)
590 supportedrequirements = gathersupportedrequirements(ui)
581
591
582 # We first validate the requirements are known.
592 # We first validate the requirements are known.
583 ensurerequirementsrecognized(requirements, supportedrequirements)
593 ensurerequirementsrecognized(requirements, supportedrequirements)
584
594
585 # Then we validate that the known set is reasonable to use together.
595 # Then we validate that the known set is reasonable to use together.
586 ensurerequirementscompatible(ui, requirements)
596 ensurerequirementscompatible(ui, requirements)
587
597
588 # TODO there are unhandled edge cases related to opening repositories with
598 # TODO there are unhandled edge cases related to opening repositories with
589 # shared storage. If storage is shared, we should also test for requirements
599 # shared storage. If storage is shared, we should also test for requirements
590 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
600 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
591 # that repo, as that repo may load extensions needed to open it. This is a
601 # that repo, as that repo may load extensions needed to open it. This is a
592 # bit complicated because we don't want the other hgrc to overwrite settings
602 # bit complicated because we don't want the other hgrc to overwrite settings
593 # in this hgrc.
603 # in this hgrc.
594 #
604 #
595 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
605 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
596 # file when sharing repos. But if a requirement is added after the share is
606 # file when sharing repos. But if a requirement is added after the share is
597 # performed, thereby introducing a new requirement for the opener, we may
607 # performed, thereby introducing a new requirement for the opener, we may
598 # will not see that and could encounter a run-time error interacting with
608 # will not see that and could encounter a run-time error interacting with
599 # that shared store since it has an unknown-to-us requirement.
609 # that shared store since it has an unknown-to-us requirement.
600
610
601 # At this point, we know we should be capable of opening the repository.
611 # At this point, we know we should be capable of opening the repository.
602 # Now get on with doing that.
612 # Now get on with doing that.
603
613
604 features = set()
614 features = set()
605
615
606 # The "store" part of the repository holds versioned data. How it is
616 # The "store" part of the repository holds versioned data. How it is
607 # accessed is determined by various requirements. If `shared` or
617 # accessed is determined by various requirements. If `shared` or
608 # `relshared` requirements are present, this indicates current repository
618 # `relshared` requirements are present, this indicates current repository
609 # is a share and store exists in path mentioned in `.hg/sharedpath`
619 # is a share and store exists in path mentioned in `.hg/sharedpath`
610 if shared:
620 if shared:
611 storebasepath = sharedvfs.base
621 storebasepath = sharedvfs.base
612 cachepath = sharedvfs.join(b'cache')
622 cachepath = sharedvfs.join(b'cache')
613 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
623 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
614 else:
624 else:
615 storebasepath = hgvfs.base
625 storebasepath = hgvfs.base
616 cachepath = hgvfs.join(b'cache')
626 cachepath = hgvfs.join(b'cache')
617 wcachepath = hgvfs.join(b'wcache')
627 wcachepath = hgvfs.join(b'wcache')
618
628
619 # The store has changed over time and the exact layout is dictated by
629 # The store has changed over time and the exact layout is dictated by
620 # requirements. The store interface abstracts differences across all
630 # requirements. The store interface abstracts differences across all
621 # of them.
631 # of them.
622 store = makestore(
632 store = makestore(
623 requirements,
633 requirements,
624 storebasepath,
634 storebasepath,
625 lambda base: vfsmod.vfs(base, cacheaudited=True),
635 lambda base: vfsmod.vfs(base, cacheaudited=True),
626 )
636 )
627 hgvfs.createmode = store.createmode
637 hgvfs.createmode = store.createmode
628
638
629 storevfs = store.vfs
639 storevfs = store.vfs
630 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
640 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
631
641
632 # The cache vfs is used to manage cache files.
642 # The cache vfs is used to manage cache files.
633 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
643 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
634 cachevfs.createmode = store.createmode
644 cachevfs.createmode = store.createmode
635 # The cache vfs is used to manage cache files related to the working copy
645 # The cache vfs is used to manage cache files related to the working copy
636 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
646 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
637 wcachevfs.createmode = store.createmode
647 wcachevfs.createmode = store.createmode
638
648
639 # Now resolve the type for the repository object. We do this by repeatedly
649 # Now resolve the type for the repository object. We do this by repeatedly
640 # calling a factory function to produces types for specific aspects of the
650 # calling a factory function to produces types for specific aspects of the
641 # repo's operation. The aggregate returned types are used as base classes
651 # repo's operation. The aggregate returned types are used as base classes
642 # for a dynamically-derived type, which will represent our new repository.
652 # for a dynamically-derived type, which will represent our new repository.
643
653
644 bases = []
654 bases = []
645 extrastate = {}
655 extrastate = {}
646
656
647 for iface, fn in REPO_INTERFACES:
657 for iface, fn in REPO_INTERFACES:
648 # We pass all potentially useful state to give extensions tons of
658 # We pass all potentially useful state to give extensions tons of
649 # flexibility.
659 # flexibility.
650 typ = fn()(
660 typ = fn()(
651 ui=ui,
661 ui=ui,
652 intents=intents,
662 intents=intents,
653 requirements=requirements,
663 requirements=requirements,
654 features=features,
664 features=features,
655 wdirvfs=wdirvfs,
665 wdirvfs=wdirvfs,
656 hgvfs=hgvfs,
666 hgvfs=hgvfs,
657 store=store,
667 store=store,
658 storevfs=storevfs,
668 storevfs=storevfs,
659 storeoptions=storevfs.options,
669 storeoptions=storevfs.options,
660 cachevfs=cachevfs,
670 cachevfs=cachevfs,
661 wcachevfs=wcachevfs,
671 wcachevfs=wcachevfs,
662 extensionmodulenames=extensionmodulenames,
672 extensionmodulenames=extensionmodulenames,
663 extrastate=extrastate,
673 extrastate=extrastate,
664 baseclasses=bases,
674 baseclasses=bases,
665 )
675 )
666
676
667 if not isinstance(typ, type):
677 if not isinstance(typ, type):
668 raise error.ProgrammingError(
678 raise error.ProgrammingError(
669 b'unable to construct type for %s' % iface
679 b'unable to construct type for %s' % iface
670 )
680 )
671
681
672 bases.append(typ)
682 bases.append(typ)
673
683
674 # type() allows you to use characters in type names that wouldn't be
684 # type() allows you to use characters in type names that wouldn't be
675 # recognized as Python symbols in source code. We abuse that to add
685 # recognized as Python symbols in source code. We abuse that to add
676 # rich information about our constructed repo.
686 # rich information about our constructed repo.
677 name = pycompat.sysstr(
687 name = pycompat.sysstr(
678 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
688 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
679 )
689 )
680
690
681 cls = type(name, tuple(bases), {})
691 cls = type(name, tuple(bases), {})
682
692
683 return cls(
693 return cls(
684 baseui=baseui,
694 baseui=baseui,
685 ui=ui,
695 ui=ui,
686 origroot=path,
696 origroot=path,
687 wdirvfs=wdirvfs,
697 wdirvfs=wdirvfs,
688 hgvfs=hgvfs,
698 hgvfs=hgvfs,
689 requirements=requirements,
699 requirements=requirements,
690 supportedrequirements=supportedrequirements,
700 supportedrequirements=supportedrequirements,
691 sharedpath=storebasepath,
701 sharedpath=storebasepath,
692 store=store,
702 store=store,
693 cachevfs=cachevfs,
703 cachevfs=cachevfs,
694 wcachevfs=wcachevfs,
704 wcachevfs=wcachevfs,
695 features=features,
705 features=features,
696 intents=intents,
706 intents=intents,
697 )
707 )
698
708
699
709
700 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
710 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
701 """Load hgrc files/content into a ui instance.
711 """Load hgrc files/content into a ui instance.
702
712
703 This is called during repository opening to load any additional
713 This is called during repository opening to load any additional
704 config files or settings relevant to the current repository.
714 config files or settings relevant to the current repository.
705
715
706 Returns a bool indicating whether any additional configs were loaded.
716 Returns a bool indicating whether any additional configs were loaded.
707
717
708 Extensions should monkeypatch this function to modify how per-repo
718 Extensions should monkeypatch this function to modify how per-repo
709 configs are loaded. For example, an extension may wish to pull in
719 configs are loaded. For example, an extension may wish to pull in
710 configs from alternate files or sources.
720 configs from alternate files or sources.
711
721
712 sharedvfs is vfs object pointing to source repo if the current one is a
722 sharedvfs is vfs object pointing to source repo if the current one is a
713 shared one
723 shared one
714 """
724 """
715 if not rcutil.use_repo_hgrc():
725 if not rcutil.use_repo_hgrc():
716 return False
726 return False
717
727
718 ret = False
728 ret = False
719 # first load config from shared source if we has to
729 # first load config from shared source if we has to
720 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
730 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
721 try:
731 try:
722 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
732 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
723 ret = True
733 ret = True
724 except IOError:
734 except IOError:
725 pass
735 pass
726
736
727 try:
737 try:
728 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
738 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
729 ret = True
739 ret = True
730 except IOError:
740 except IOError:
731 pass
741 pass
732
742
733 try:
743 try:
734 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
744 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
735 ret = True
745 ret = True
736 except IOError:
746 except IOError:
737 pass
747 pass
738
748
739 return ret
749 return ret
740
750
741
751
742 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
752 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
743 """Perform additional actions after .hg/hgrc is loaded.
753 """Perform additional actions after .hg/hgrc is loaded.
744
754
745 This function is called during repository loading immediately after
755 This function is called during repository loading immediately after
746 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
756 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
747
757
748 The function can be used to validate configs, automatically add
758 The function can be used to validate configs, automatically add
749 options (including extensions) based on requirements, etc.
759 options (including extensions) based on requirements, etc.
750 """
760 """
751
761
752 # Map of requirements to list of extensions to load automatically when
762 # Map of requirements to list of extensions to load automatically when
753 # requirement is present.
763 # requirement is present.
754 autoextensions = {
764 autoextensions = {
755 b'git': [b'git'],
765 b'git': [b'git'],
756 b'largefiles': [b'largefiles'],
766 b'largefiles': [b'largefiles'],
757 b'lfs': [b'lfs'],
767 b'lfs': [b'lfs'],
758 }
768 }
759
769
760 for requirement, names in sorted(autoextensions.items()):
770 for requirement, names in sorted(autoextensions.items()):
761 if requirement not in requirements:
771 if requirement not in requirements:
762 continue
772 continue
763
773
764 for name in names:
774 for name in names:
765 if not ui.hasconfig(b'extensions', name):
775 if not ui.hasconfig(b'extensions', name):
766 ui.setconfig(b'extensions', name, b'', source=b'autoload')
776 ui.setconfig(b'extensions', name, b'', source=b'autoload')
767
777
768
778
769 def gathersupportedrequirements(ui):
779 def gathersupportedrequirements(ui):
770 """Determine the complete set of recognized requirements."""
780 """Determine the complete set of recognized requirements."""
771 # Start with all requirements supported by this file.
781 # Start with all requirements supported by this file.
772 supported = set(localrepository._basesupported)
782 supported = set(localrepository._basesupported)
773
783
774 # Execute ``featuresetupfuncs`` entries if they belong to an extension
784 # Execute ``featuresetupfuncs`` entries if they belong to an extension
775 # relevant to this ui instance.
785 # relevant to this ui instance.
776 modules = {m.__name__ for n, m in extensions.extensions(ui)}
786 modules = {m.__name__ for n, m in extensions.extensions(ui)}
777
787
778 for fn in featuresetupfuncs:
788 for fn in featuresetupfuncs:
779 if fn.__module__ in modules:
789 if fn.__module__ in modules:
780 fn(ui, supported)
790 fn(ui, supported)
781
791
782 # Add derived requirements from registered compression engines.
792 # Add derived requirements from registered compression engines.
783 for name in util.compengines:
793 for name in util.compengines:
784 engine = util.compengines[name]
794 engine = util.compengines[name]
785 if engine.available() and engine.revlogheader():
795 if engine.available() and engine.revlogheader():
786 supported.add(b'exp-compression-%s' % name)
796 supported.add(b'exp-compression-%s' % name)
787 if engine.name() == b'zstd':
797 if engine.name() == b'zstd':
788 supported.add(b'revlog-compression-zstd')
798 supported.add(b'revlog-compression-zstd')
789
799
790 return supported
800 return supported
791
801
792
802
793 def ensurerequirementsrecognized(requirements, supported):
803 def ensurerequirementsrecognized(requirements, supported):
794 """Validate that a set of local requirements is recognized.
804 """Validate that a set of local requirements is recognized.
795
805
796 Receives a set of requirements. Raises an ``error.RepoError`` if there
806 Receives a set of requirements. Raises an ``error.RepoError`` if there
797 exists any requirement in that set that currently loaded code doesn't
807 exists any requirement in that set that currently loaded code doesn't
798 recognize.
808 recognize.
799
809
800 Returns a set of supported requirements.
810 Returns a set of supported requirements.
801 """
811 """
802 missing = set()
812 missing = set()
803
813
804 for requirement in requirements:
814 for requirement in requirements:
805 if requirement in supported:
815 if requirement in supported:
806 continue
816 continue
807
817
808 if not requirement or not requirement[0:1].isalnum():
818 if not requirement or not requirement[0:1].isalnum():
809 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
819 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
810
820
811 missing.add(requirement)
821 missing.add(requirement)
812
822
813 if missing:
823 if missing:
814 raise error.RequirementError(
824 raise error.RequirementError(
815 _(b'repository requires features unknown to this Mercurial: %s')
825 _(b'repository requires features unknown to this Mercurial: %s')
816 % b' '.join(sorted(missing)),
826 % b' '.join(sorted(missing)),
817 hint=_(
827 hint=_(
818 b'see https://mercurial-scm.org/wiki/MissingRequirement '
828 b'see https://mercurial-scm.org/wiki/MissingRequirement '
819 b'for more information'
829 b'for more information'
820 ),
830 ),
821 )
831 )
822
832
823
833
824 def ensurerequirementscompatible(ui, requirements):
834 def ensurerequirementscompatible(ui, requirements):
825 """Validates that a set of recognized requirements is mutually compatible.
835 """Validates that a set of recognized requirements is mutually compatible.
826
836
827 Some requirements may not be compatible with others or require
837 Some requirements may not be compatible with others or require
828 config options that aren't enabled. This function is called during
838 config options that aren't enabled. This function is called during
829 repository opening to ensure that the set of requirements needed
839 repository opening to ensure that the set of requirements needed
830 to open a repository is sane and compatible with config options.
840 to open a repository is sane and compatible with config options.
831
841
832 Extensions can monkeypatch this function to perform additional
842 Extensions can monkeypatch this function to perform additional
833 checking.
843 checking.
834
844
835 ``error.RepoError`` should be raised on failure.
845 ``error.RepoError`` should be raised on failure.
836 """
846 """
837 if (
847 if (
838 requirementsmod.SPARSE_REQUIREMENT in requirements
848 requirementsmod.SPARSE_REQUIREMENT in requirements
839 and not sparse.enabled
849 and not sparse.enabled
840 ):
850 ):
841 raise error.RepoError(
851 raise error.RepoError(
842 _(
852 _(
843 b'repository is using sparse feature but '
853 b'repository is using sparse feature but '
844 b'sparse is not enabled; enable the '
854 b'sparse is not enabled; enable the '
845 b'"sparse" extensions to access'
855 b'"sparse" extensions to access'
846 )
856 )
847 )
857 )
848
858
849
859
850 def makestore(requirements, path, vfstype):
860 def makestore(requirements, path, vfstype):
851 """Construct a storage object for a repository."""
861 """Construct a storage object for a repository."""
852 if b'store' in requirements:
862 if b'store' in requirements:
853 if b'fncache' in requirements:
863 if b'fncache' in requirements:
854 return storemod.fncachestore(
864 return storemod.fncachestore(
855 path, vfstype, b'dotencode' in requirements
865 path, vfstype, b'dotencode' in requirements
856 )
866 )
857
867
858 return storemod.encodedstore(path, vfstype)
868 return storemod.encodedstore(path, vfstype)
859
869
860 return storemod.basicstore(path, vfstype)
870 return storemod.basicstore(path, vfstype)
861
871
862
872
863 def resolvestorevfsoptions(ui, requirements, features):
873 def resolvestorevfsoptions(ui, requirements, features):
864 """Resolve the options to pass to the store vfs opener.
874 """Resolve the options to pass to the store vfs opener.
865
875
866 The returned dict is used to influence behavior of the storage layer.
876 The returned dict is used to influence behavior of the storage layer.
867 """
877 """
868 options = {}
878 options = {}
869
879
870 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
880 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
871 options[b'treemanifest'] = True
881 options[b'treemanifest'] = True
872
882
873 # experimental config: format.manifestcachesize
883 # experimental config: format.manifestcachesize
874 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
884 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
875 if manifestcachesize is not None:
885 if manifestcachesize is not None:
876 options[b'manifestcachesize'] = manifestcachesize
886 options[b'manifestcachesize'] = manifestcachesize
877
887
878 # In the absence of another requirement superseding a revlog-related
888 # In the absence of another requirement superseding a revlog-related
879 # requirement, we have to assume the repo is using revlog version 0.
889 # requirement, we have to assume the repo is using revlog version 0.
880 # This revlog format is super old and we don't bother trying to parse
890 # This revlog format is super old and we don't bother trying to parse
881 # opener options for it because those options wouldn't do anything
891 # opener options for it because those options wouldn't do anything
882 # meaningful on such old repos.
892 # meaningful on such old repos.
883 if (
893 if (
884 b'revlogv1' in requirements
894 b'revlogv1' in requirements
885 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
895 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
886 ):
896 ):
887 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
897 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
888 else: # explicitly mark repo as using revlogv0
898 else: # explicitly mark repo as using revlogv0
889 options[b'revlogv0'] = True
899 options[b'revlogv0'] = True
890
900
891 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
901 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
892 options[b'copies-storage'] = b'changeset-sidedata'
902 options[b'copies-storage'] = b'changeset-sidedata'
893 else:
903 else:
894 writecopiesto = ui.config(b'experimental', b'copies.write-to')
904 writecopiesto = ui.config(b'experimental', b'copies.write-to')
895 copiesextramode = (b'changeset-only', b'compatibility')
905 copiesextramode = (b'changeset-only', b'compatibility')
896 if writecopiesto in copiesextramode:
906 if writecopiesto in copiesextramode:
897 options[b'copies-storage'] = b'extra'
907 options[b'copies-storage'] = b'extra'
898
908
899 return options
909 return options
900
910
901
911
902 def resolverevlogstorevfsoptions(ui, requirements, features):
912 def resolverevlogstorevfsoptions(ui, requirements, features):
903 """Resolve opener options specific to revlogs."""
913 """Resolve opener options specific to revlogs."""
904
914
905 options = {}
915 options = {}
906 options[b'flagprocessors'] = {}
916 options[b'flagprocessors'] = {}
907
917
908 if b'revlogv1' in requirements:
918 if b'revlogv1' in requirements:
909 options[b'revlogv1'] = True
919 options[b'revlogv1'] = True
910 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
920 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
911 options[b'revlogv2'] = True
921 options[b'revlogv2'] = True
912
922
913 if b'generaldelta' in requirements:
923 if b'generaldelta' in requirements:
914 options[b'generaldelta'] = True
924 options[b'generaldelta'] = True
915
925
916 # experimental config: format.chunkcachesize
926 # experimental config: format.chunkcachesize
917 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
927 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
918 if chunkcachesize is not None:
928 if chunkcachesize is not None:
919 options[b'chunkcachesize'] = chunkcachesize
929 options[b'chunkcachesize'] = chunkcachesize
920
930
921 deltabothparents = ui.configbool(
931 deltabothparents = ui.configbool(
922 b'storage', b'revlog.optimize-delta-parent-choice'
932 b'storage', b'revlog.optimize-delta-parent-choice'
923 )
933 )
924 options[b'deltabothparents'] = deltabothparents
934 options[b'deltabothparents'] = deltabothparents
925
935
926 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
936 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
927 lazydeltabase = False
937 lazydeltabase = False
928 if lazydelta:
938 if lazydelta:
929 lazydeltabase = ui.configbool(
939 lazydeltabase = ui.configbool(
930 b'storage', b'revlog.reuse-external-delta-parent'
940 b'storage', b'revlog.reuse-external-delta-parent'
931 )
941 )
932 if lazydeltabase is None:
942 if lazydeltabase is None:
933 lazydeltabase = not scmutil.gddeltaconfig(ui)
943 lazydeltabase = not scmutil.gddeltaconfig(ui)
934 options[b'lazydelta'] = lazydelta
944 options[b'lazydelta'] = lazydelta
935 options[b'lazydeltabase'] = lazydeltabase
945 options[b'lazydeltabase'] = lazydeltabase
936
946
937 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
947 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
938 if 0 <= chainspan:
948 if 0 <= chainspan:
939 options[b'maxdeltachainspan'] = chainspan
949 options[b'maxdeltachainspan'] = chainspan
940
950
941 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
951 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
942 if mmapindexthreshold is not None:
952 if mmapindexthreshold is not None:
943 options[b'mmapindexthreshold'] = mmapindexthreshold
953 options[b'mmapindexthreshold'] = mmapindexthreshold
944
954
945 withsparseread = ui.configbool(b'experimental', b'sparse-read')
955 withsparseread = ui.configbool(b'experimental', b'sparse-read')
946 srdensitythres = float(
956 srdensitythres = float(
947 ui.config(b'experimental', b'sparse-read.density-threshold')
957 ui.config(b'experimental', b'sparse-read.density-threshold')
948 )
958 )
949 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
959 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
950 options[b'with-sparse-read'] = withsparseread
960 options[b'with-sparse-read'] = withsparseread
951 options[b'sparse-read-density-threshold'] = srdensitythres
961 options[b'sparse-read-density-threshold'] = srdensitythres
952 options[b'sparse-read-min-gap-size'] = srmingapsize
962 options[b'sparse-read-min-gap-size'] = srmingapsize
953
963
954 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
964 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
955 options[b'sparse-revlog'] = sparserevlog
965 options[b'sparse-revlog'] = sparserevlog
956 if sparserevlog:
966 if sparserevlog:
957 options[b'generaldelta'] = True
967 options[b'generaldelta'] = True
958
968
959 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
969 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
960 options[b'side-data'] = sidedata
970 options[b'side-data'] = sidedata
961
971
962 maxchainlen = None
972 maxchainlen = None
963 if sparserevlog:
973 if sparserevlog:
964 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
974 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
965 # experimental config: format.maxchainlen
975 # experimental config: format.maxchainlen
966 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
976 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
967 if maxchainlen is not None:
977 if maxchainlen is not None:
968 options[b'maxchainlen'] = maxchainlen
978 options[b'maxchainlen'] = maxchainlen
969
979
970 for r in requirements:
980 for r in requirements:
971 # we allow multiple compression engine requirement to co-exist because
981 # we allow multiple compression engine requirement to co-exist because
972 # strickly speaking, revlog seems to support mixed compression style.
982 # strickly speaking, revlog seems to support mixed compression style.
973 #
983 #
974 # The compression used for new entries will be "the last one"
984 # The compression used for new entries will be "the last one"
975 prefix = r.startswith
985 prefix = r.startswith
976 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
986 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
977 options[b'compengine'] = r.split(b'-', 2)[2]
987 options[b'compengine'] = r.split(b'-', 2)[2]
978
988
979 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
989 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
980 if options[b'zlib.level'] is not None:
990 if options[b'zlib.level'] is not None:
981 if not (0 <= options[b'zlib.level'] <= 9):
991 if not (0 <= options[b'zlib.level'] <= 9):
982 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
992 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
983 raise error.Abort(msg % options[b'zlib.level'])
993 raise error.Abort(msg % options[b'zlib.level'])
984 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
994 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
985 if options[b'zstd.level'] is not None:
995 if options[b'zstd.level'] is not None:
986 if not (0 <= options[b'zstd.level'] <= 22):
996 if not (0 <= options[b'zstd.level'] <= 22):
987 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
997 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
988 raise error.Abort(msg % options[b'zstd.level'])
998 raise error.Abort(msg % options[b'zstd.level'])
989
999
990 if requirementsmod.NARROW_REQUIREMENT in requirements:
1000 if requirementsmod.NARROW_REQUIREMENT in requirements:
991 options[b'enableellipsis'] = True
1001 options[b'enableellipsis'] = True
992
1002
993 if ui.configbool(b'experimental', b'rust.index'):
1003 if ui.configbool(b'experimental', b'rust.index'):
994 options[b'rust.index'] = True
1004 options[b'rust.index'] = True
995 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1005 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
996 options[b'persistent-nodemap'] = True
1006 options[b'persistent-nodemap'] = True
997 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
1007 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
998 options[b'persistent-nodemap.mmap'] = True
1008 options[b'persistent-nodemap.mmap'] = True
999 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
1009 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
1000 options[b'persistent-nodemap.mode'] = epnm
1010 options[b'persistent-nodemap.mode'] = epnm
1001 if ui.configbool(b'devel', b'persistent-nodemap'):
1011 if ui.configbool(b'devel', b'persistent-nodemap'):
1002 options[b'devel-force-nodemap'] = True
1012 options[b'devel-force-nodemap'] = True
1003
1013
1004 return options
1014 return options
1005
1015
1006
1016
1007 def makemain(**kwargs):
1017 def makemain(**kwargs):
1008 """Produce a type conforming to ``ilocalrepositorymain``."""
1018 """Produce a type conforming to ``ilocalrepositorymain``."""
1009 return localrepository
1019 return localrepository
1010
1020
1011
1021
1012 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1022 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1013 class revlogfilestorage(object):
1023 class revlogfilestorage(object):
1014 """File storage when using revlogs."""
1024 """File storage when using revlogs."""
1015
1025
1016 def file(self, path):
1026 def file(self, path):
1017 if path[0] == b'/':
1027 if path[0] == b'/':
1018 path = path[1:]
1028 path = path[1:]
1019
1029
1020 return filelog.filelog(self.svfs, path)
1030 return filelog.filelog(self.svfs, path)
1021
1031
1022
1032
1023 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1033 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1024 class revlognarrowfilestorage(object):
1034 class revlognarrowfilestorage(object):
1025 """File storage when using revlogs and narrow files."""
1035 """File storage when using revlogs and narrow files."""
1026
1036
1027 def file(self, path):
1037 def file(self, path):
1028 if path[0] == b'/':
1038 if path[0] == b'/':
1029 path = path[1:]
1039 path = path[1:]
1030
1040
1031 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1041 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1032
1042
1033
1043
1034 def makefilestorage(requirements, features, **kwargs):
1044 def makefilestorage(requirements, features, **kwargs):
1035 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1045 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1036 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1046 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1037 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1047 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1038
1048
1039 if requirementsmod.NARROW_REQUIREMENT in requirements:
1049 if requirementsmod.NARROW_REQUIREMENT in requirements:
1040 return revlognarrowfilestorage
1050 return revlognarrowfilestorage
1041 else:
1051 else:
1042 return revlogfilestorage
1052 return revlogfilestorage
1043
1053
1044
1054
1045 # List of repository interfaces and factory functions for them. Each
1055 # List of repository interfaces and factory functions for them. Each
1046 # will be called in order during ``makelocalrepository()`` to iteratively
1056 # will be called in order during ``makelocalrepository()`` to iteratively
1047 # derive the final type for a local repository instance. We capture the
1057 # derive the final type for a local repository instance. We capture the
1048 # function as a lambda so we don't hold a reference and the module-level
1058 # function as a lambda so we don't hold a reference and the module-level
1049 # functions can be wrapped.
1059 # functions can be wrapped.
1050 REPO_INTERFACES = [
1060 REPO_INTERFACES = [
1051 (repository.ilocalrepositorymain, lambda: makemain),
1061 (repository.ilocalrepositorymain, lambda: makemain),
1052 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1062 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1053 ]
1063 ]
1054
1064
1055
1065
1056 @interfaceutil.implementer(repository.ilocalrepositorymain)
1066 @interfaceutil.implementer(repository.ilocalrepositorymain)
1057 class localrepository(object):
1067 class localrepository(object):
1058 """Main class for representing local repositories.
1068 """Main class for representing local repositories.
1059
1069
1060 All local repositories are instances of this class.
1070 All local repositories are instances of this class.
1061
1071
1062 Constructed on its own, instances of this class are not usable as
1072 Constructed on its own, instances of this class are not usable as
1063 repository objects. To obtain a usable repository object, call
1073 repository objects. To obtain a usable repository object, call
1064 ``hg.repository()``, ``localrepo.instance()``, or
1074 ``hg.repository()``, ``localrepo.instance()``, or
1065 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1075 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1066 ``instance()`` adds support for creating new repositories.
1076 ``instance()`` adds support for creating new repositories.
1067 ``hg.repository()`` adds more extension integration, including calling
1077 ``hg.repository()`` adds more extension integration, including calling
1068 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1078 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1069 used.
1079 used.
1070 """
1080 """
1071
1081
1072 # obsolete experimental requirements:
1082 # obsolete experimental requirements:
1073 # - manifestv2: An experimental new manifest format that allowed
1083 # - manifestv2: An experimental new manifest format that allowed
1074 # for stem compression of long paths. Experiment ended up not
1084 # for stem compression of long paths. Experiment ended up not
1075 # being successful (repository sizes went up due to worse delta
1085 # being successful (repository sizes went up due to worse delta
1076 # chains), and the code was deleted in 4.6.
1086 # chains), and the code was deleted in 4.6.
1077 supportedformats = {
1087 supportedformats = {
1078 b'revlogv1',
1088 b'revlogv1',
1079 b'generaldelta',
1089 b'generaldelta',
1080 requirementsmod.TREEMANIFEST_REQUIREMENT,
1090 requirementsmod.TREEMANIFEST_REQUIREMENT,
1081 requirementsmod.COPIESSDC_REQUIREMENT,
1091 requirementsmod.COPIESSDC_REQUIREMENT,
1082 requirementsmod.REVLOGV2_REQUIREMENT,
1092 requirementsmod.REVLOGV2_REQUIREMENT,
1083 requirementsmod.SIDEDATA_REQUIREMENT,
1093 requirementsmod.SIDEDATA_REQUIREMENT,
1084 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1094 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1085 requirementsmod.NODEMAP_REQUIREMENT,
1095 requirementsmod.NODEMAP_REQUIREMENT,
1086 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1096 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1087 requirementsmod.SHARESAFE_REQUIREMENT,
1097 requirementsmod.SHARESAFE_REQUIREMENT,
1088 }
1098 }
1089 _basesupported = supportedformats | {
1099 _basesupported = supportedformats | {
1090 b'store',
1100 b'store',
1091 b'fncache',
1101 b'fncache',
1092 requirementsmod.SHARED_REQUIREMENT,
1102 requirementsmod.SHARED_REQUIREMENT,
1093 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1103 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1094 b'dotencode',
1104 b'dotencode',
1095 requirementsmod.SPARSE_REQUIREMENT,
1105 requirementsmod.SPARSE_REQUIREMENT,
1096 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1106 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1097 }
1107 }
1098
1108
1099 # list of prefix for file which can be written without 'wlock'
1109 # list of prefix for file which can be written without 'wlock'
1100 # Extensions should extend this list when needed
1110 # Extensions should extend this list when needed
1101 _wlockfreeprefix = {
1111 _wlockfreeprefix = {
1102 # We migh consider requiring 'wlock' for the next
1112 # We migh consider requiring 'wlock' for the next
1103 # two, but pretty much all the existing code assume
1113 # two, but pretty much all the existing code assume
1104 # wlock is not needed so we keep them excluded for
1114 # wlock is not needed so we keep them excluded for
1105 # now.
1115 # now.
1106 b'hgrc',
1116 b'hgrc',
1107 b'requires',
1117 b'requires',
1108 # XXX cache is a complicatged business someone
1118 # XXX cache is a complicatged business someone
1109 # should investigate this in depth at some point
1119 # should investigate this in depth at some point
1110 b'cache/',
1120 b'cache/',
1111 # XXX shouldn't be dirstate covered by the wlock?
1121 # XXX shouldn't be dirstate covered by the wlock?
1112 b'dirstate',
1122 b'dirstate',
1113 # XXX bisect was still a bit too messy at the time
1123 # XXX bisect was still a bit too messy at the time
1114 # this changeset was introduced. Someone should fix
1124 # this changeset was introduced. Someone should fix
1115 # the remainig bit and drop this line
1125 # the remainig bit and drop this line
1116 b'bisect.state',
1126 b'bisect.state',
1117 }
1127 }
1118
1128
1119 def __init__(
1129 def __init__(
1120 self,
1130 self,
1121 baseui,
1131 baseui,
1122 ui,
1132 ui,
1123 origroot,
1133 origroot,
1124 wdirvfs,
1134 wdirvfs,
1125 hgvfs,
1135 hgvfs,
1126 requirements,
1136 requirements,
1127 supportedrequirements,
1137 supportedrequirements,
1128 sharedpath,
1138 sharedpath,
1129 store,
1139 store,
1130 cachevfs,
1140 cachevfs,
1131 wcachevfs,
1141 wcachevfs,
1132 features,
1142 features,
1133 intents=None,
1143 intents=None,
1134 ):
1144 ):
1135 """Create a new local repository instance.
1145 """Create a new local repository instance.
1136
1146
1137 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1147 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1138 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1148 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1139 object.
1149 object.
1140
1150
1141 Arguments:
1151 Arguments:
1142
1152
1143 baseui
1153 baseui
1144 ``ui.ui`` instance that ``ui`` argument was based off of.
1154 ``ui.ui`` instance that ``ui`` argument was based off of.
1145
1155
1146 ui
1156 ui
1147 ``ui.ui`` instance for use by the repository.
1157 ``ui.ui`` instance for use by the repository.
1148
1158
1149 origroot
1159 origroot
1150 ``bytes`` path to working directory root of this repository.
1160 ``bytes`` path to working directory root of this repository.
1151
1161
1152 wdirvfs
1162 wdirvfs
1153 ``vfs.vfs`` rooted at the working directory.
1163 ``vfs.vfs`` rooted at the working directory.
1154
1164
1155 hgvfs
1165 hgvfs
1156 ``vfs.vfs`` rooted at .hg/
1166 ``vfs.vfs`` rooted at .hg/
1157
1167
1158 requirements
1168 requirements
1159 ``set`` of bytestrings representing repository opening requirements.
1169 ``set`` of bytestrings representing repository opening requirements.
1160
1170
1161 supportedrequirements
1171 supportedrequirements
1162 ``set`` of bytestrings representing repository requirements that we
1172 ``set`` of bytestrings representing repository requirements that we
1163 know how to open. May be a supetset of ``requirements``.
1173 know how to open. May be a supetset of ``requirements``.
1164
1174
1165 sharedpath
1175 sharedpath
1166 ``bytes`` Defining path to storage base directory. Points to a
1176 ``bytes`` Defining path to storage base directory. Points to a
1167 ``.hg/`` directory somewhere.
1177 ``.hg/`` directory somewhere.
1168
1178
1169 store
1179 store
1170 ``store.basicstore`` (or derived) instance providing access to
1180 ``store.basicstore`` (or derived) instance providing access to
1171 versioned storage.
1181 versioned storage.
1172
1182
1173 cachevfs
1183 cachevfs
1174 ``vfs.vfs`` used for cache files.
1184 ``vfs.vfs`` used for cache files.
1175
1185
1176 wcachevfs
1186 wcachevfs
1177 ``vfs.vfs`` used for cache files related to the working copy.
1187 ``vfs.vfs`` used for cache files related to the working copy.
1178
1188
1179 features
1189 features
1180 ``set`` of bytestrings defining features/capabilities of this
1190 ``set`` of bytestrings defining features/capabilities of this
1181 instance.
1191 instance.
1182
1192
1183 intents
1193 intents
1184 ``set`` of system strings indicating what this repo will be used
1194 ``set`` of system strings indicating what this repo will be used
1185 for.
1195 for.
1186 """
1196 """
1187 self.baseui = baseui
1197 self.baseui = baseui
1188 self.ui = ui
1198 self.ui = ui
1189 self.origroot = origroot
1199 self.origroot = origroot
1190 # vfs rooted at working directory.
1200 # vfs rooted at working directory.
1191 self.wvfs = wdirvfs
1201 self.wvfs = wdirvfs
1192 self.root = wdirvfs.base
1202 self.root = wdirvfs.base
1193 # vfs rooted at .hg/. Used to access most non-store paths.
1203 # vfs rooted at .hg/. Used to access most non-store paths.
1194 self.vfs = hgvfs
1204 self.vfs = hgvfs
1195 self.path = hgvfs.base
1205 self.path = hgvfs.base
1196 self.requirements = requirements
1206 self.requirements = requirements
1197 self.supported = supportedrequirements
1207 self.supported = supportedrequirements
1198 self.sharedpath = sharedpath
1208 self.sharedpath = sharedpath
1199 self.store = store
1209 self.store = store
1200 self.cachevfs = cachevfs
1210 self.cachevfs = cachevfs
1201 self.wcachevfs = wcachevfs
1211 self.wcachevfs = wcachevfs
1202 self.features = features
1212 self.features = features
1203
1213
1204 self.filtername = None
1214 self.filtername = None
1205
1215
1206 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1216 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1207 b'devel', b'check-locks'
1217 b'devel', b'check-locks'
1208 ):
1218 ):
1209 self.vfs.audit = self._getvfsward(self.vfs.audit)
1219 self.vfs.audit = self._getvfsward(self.vfs.audit)
1210 # A list of callback to shape the phase if no data were found.
1220 # A list of callback to shape the phase if no data were found.
1211 # Callback are in the form: func(repo, roots) --> processed root.
1221 # Callback are in the form: func(repo, roots) --> processed root.
1212 # This list it to be filled by extension during repo setup
1222 # This list it to be filled by extension during repo setup
1213 self._phasedefaults = []
1223 self._phasedefaults = []
1214
1224
1215 color.setup(self.ui)
1225 color.setup(self.ui)
1216
1226
1217 self.spath = self.store.path
1227 self.spath = self.store.path
1218 self.svfs = self.store.vfs
1228 self.svfs = self.store.vfs
1219 self.sjoin = self.store.join
1229 self.sjoin = self.store.join
1220 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1230 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1221 b'devel', b'check-locks'
1231 b'devel', b'check-locks'
1222 ):
1232 ):
1223 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1233 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1224 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1234 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1225 else: # standard vfs
1235 else: # standard vfs
1226 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1236 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1227
1237
1228 self._dirstatevalidatewarned = False
1238 self._dirstatevalidatewarned = False
1229
1239
1230 self._branchcaches = branchmap.BranchMapCache()
1240 self._branchcaches = branchmap.BranchMapCache()
1231 self._revbranchcache = None
1241 self._revbranchcache = None
1232 self._filterpats = {}
1242 self._filterpats = {}
1233 self._datafilters = {}
1243 self._datafilters = {}
1234 self._transref = self._lockref = self._wlockref = None
1244 self._transref = self._lockref = self._wlockref = None
1235
1245
1236 # A cache for various files under .hg/ that tracks file changes,
1246 # A cache for various files under .hg/ that tracks file changes,
1237 # (used by the filecache decorator)
1247 # (used by the filecache decorator)
1238 #
1248 #
1239 # Maps a property name to its util.filecacheentry
1249 # Maps a property name to its util.filecacheentry
1240 self._filecache = {}
1250 self._filecache = {}
1241
1251
1242 # hold sets of revision to be filtered
1252 # hold sets of revision to be filtered
1243 # should be cleared when something might have changed the filter value:
1253 # should be cleared when something might have changed the filter value:
1244 # - new changesets,
1254 # - new changesets,
1245 # - phase change,
1255 # - phase change,
1246 # - new obsolescence marker,
1256 # - new obsolescence marker,
1247 # - working directory parent change,
1257 # - working directory parent change,
1248 # - bookmark changes
1258 # - bookmark changes
1249 self.filteredrevcache = {}
1259 self.filteredrevcache = {}
1250
1260
1251 # post-dirstate-status hooks
1261 # post-dirstate-status hooks
1252 self._postdsstatus = []
1262 self._postdsstatus = []
1253
1263
1254 # generic mapping between names and nodes
1264 # generic mapping between names and nodes
1255 self.names = namespaces.namespaces()
1265 self.names = namespaces.namespaces()
1256
1266
1257 # Key to signature value.
1267 # Key to signature value.
1258 self._sparsesignaturecache = {}
1268 self._sparsesignaturecache = {}
1259 # Signature to cached matcher instance.
1269 # Signature to cached matcher instance.
1260 self._sparsematchercache = {}
1270 self._sparsematchercache = {}
1261
1271
1262 self._extrafilterid = repoview.extrafilter(ui)
1272 self._extrafilterid = repoview.extrafilter(ui)
1263
1273
1264 self.filecopiesmode = None
1274 self.filecopiesmode = None
1265 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1275 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1266 self.filecopiesmode = b'changeset-sidedata'
1276 self.filecopiesmode = b'changeset-sidedata'
1267
1277
1268 def _getvfsward(self, origfunc):
1278 def _getvfsward(self, origfunc):
1269 """build a ward for self.vfs"""
1279 """build a ward for self.vfs"""
1270 rref = weakref.ref(self)
1280 rref = weakref.ref(self)
1271
1281
1272 def checkvfs(path, mode=None):
1282 def checkvfs(path, mode=None):
1273 ret = origfunc(path, mode=mode)
1283 ret = origfunc(path, mode=mode)
1274 repo = rref()
1284 repo = rref()
1275 if (
1285 if (
1276 repo is None
1286 repo is None
1277 or not util.safehasattr(repo, b'_wlockref')
1287 or not util.safehasattr(repo, b'_wlockref')
1278 or not util.safehasattr(repo, b'_lockref')
1288 or not util.safehasattr(repo, b'_lockref')
1279 ):
1289 ):
1280 return
1290 return
1281 if mode in (None, b'r', b'rb'):
1291 if mode in (None, b'r', b'rb'):
1282 return
1292 return
1283 if path.startswith(repo.path):
1293 if path.startswith(repo.path):
1284 # truncate name relative to the repository (.hg)
1294 # truncate name relative to the repository (.hg)
1285 path = path[len(repo.path) + 1 :]
1295 path = path[len(repo.path) + 1 :]
1286 if path.startswith(b'cache/'):
1296 if path.startswith(b'cache/'):
1287 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1297 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1288 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1298 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1289 # path prefixes covered by 'lock'
1299 # path prefixes covered by 'lock'
1290 vfs_path_prefixes = (
1300 vfs_path_prefixes = (
1291 b'journal.',
1301 b'journal.',
1292 b'undo.',
1302 b'undo.',
1293 b'strip-backup/',
1303 b'strip-backup/',
1294 b'cache/',
1304 b'cache/',
1295 )
1305 )
1296 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1306 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1297 if repo._currentlock(repo._lockref) is None:
1307 if repo._currentlock(repo._lockref) is None:
1298 repo.ui.develwarn(
1308 repo.ui.develwarn(
1299 b'write with no lock: "%s"' % path,
1309 b'write with no lock: "%s"' % path,
1300 stacklevel=3,
1310 stacklevel=3,
1301 config=b'check-locks',
1311 config=b'check-locks',
1302 )
1312 )
1303 elif repo._currentlock(repo._wlockref) is None:
1313 elif repo._currentlock(repo._wlockref) is None:
1304 # rest of vfs files are covered by 'wlock'
1314 # rest of vfs files are covered by 'wlock'
1305 #
1315 #
1306 # exclude special files
1316 # exclude special files
1307 for prefix in self._wlockfreeprefix:
1317 for prefix in self._wlockfreeprefix:
1308 if path.startswith(prefix):
1318 if path.startswith(prefix):
1309 return
1319 return
1310 repo.ui.develwarn(
1320 repo.ui.develwarn(
1311 b'write with no wlock: "%s"' % path,
1321 b'write with no wlock: "%s"' % path,
1312 stacklevel=3,
1322 stacklevel=3,
1313 config=b'check-locks',
1323 config=b'check-locks',
1314 )
1324 )
1315 return ret
1325 return ret
1316
1326
1317 return checkvfs
1327 return checkvfs
1318
1328
1319 def _getsvfsward(self, origfunc):
1329 def _getsvfsward(self, origfunc):
1320 """build a ward for self.svfs"""
1330 """build a ward for self.svfs"""
1321 rref = weakref.ref(self)
1331 rref = weakref.ref(self)
1322
1332
1323 def checksvfs(path, mode=None):
1333 def checksvfs(path, mode=None):
1324 ret = origfunc(path, mode=mode)
1334 ret = origfunc(path, mode=mode)
1325 repo = rref()
1335 repo = rref()
1326 if repo is None or not util.safehasattr(repo, b'_lockref'):
1336 if repo is None or not util.safehasattr(repo, b'_lockref'):
1327 return
1337 return
1328 if mode in (None, b'r', b'rb'):
1338 if mode in (None, b'r', b'rb'):
1329 return
1339 return
1330 if path.startswith(repo.sharedpath):
1340 if path.startswith(repo.sharedpath):
1331 # truncate name relative to the repository (.hg)
1341 # truncate name relative to the repository (.hg)
1332 path = path[len(repo.sharedpath) + 1 :]
1342 path = path[len(repo.sharedpath) + 1 :]
1333 if repo._currentlock(repo._lockref) is None:
1343 if repo._currentlock(repo._lockref) is None:
1334 repo.ui.develwarn(
1344 repo.ui.develwarn(
1335 b'write with no lock: "%s"' % path, stacklevel=4
1345 b'write with no lock: "%s"' % path, stacklevel=4
1336 )
1346 )
1337 return ret
1347 return ret
1338
1348
1339 return checksvfs
1349 return checksvfs
1340
1350
1341 def close(self):
1351 def close(self):
1342 self._writecaches()
1352 self._writecaches()
1343
1353
1344 def _writecaches(self):
1354 def _writecaches(self):
1345 if self._revbranchcache:
1355 if self._revbranchcache:
1346 self._revbranchcache.write()
1356 self._revbranchcache.write()
1347
1357
1348 def _restrictcapabilities(self, caps):
1358 def _restrictcapabilities(self, caps):
1349 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1359 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1350 caps = set(caps)
1360 caps = set(caps)
1351 capsblob = bundle2.encodecaps(
1361 capsblob = bundle2.encodecaps(
1352 bundle2.getrepocaps(self, role=b'client')
1362 bundle2.getrepocaps(self, role=b'client')
1353 )
1363 )
1354 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1364 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1355 return caps
1365 return caps
1356
1366
1357 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1367 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1358 # self -> auditor -> self._checknested -> self
1368 # self -> auditor -> self._checknested -> self
1359
1369
1360 @property
1370 @property
1361 def auditor(self):
1371 def auditor(self):
1362 # This is only used by context.workingctx.match in order to
1372 # This is only used by context.workingctx.match in order to
1363 # detect files in subrepos.
1373 # detect files in subrepos.
1364 return pathutil.pathauditor(self.root, callback=self._checknested)
1374 return pathutil.pathauditor(self.root, callback=self._checknested)
1365
1375
1366 @property
1376 @property
1367 def nofsauditor(self):
1377 def nofsauditor(self):
1368 # This is only used by context.basectx.match in order to detect
1378 # This is only used by context.basectx.match in order to detect
1369 # files in subrepos.
1379 # files in subrepos.
1370 return pathutil.pathauditor(
1380 return pathutil.pathauditor(
1371 self.root, callback=self._checknested, realfs=False, cached=True
1381 self.root, callback=self._checknested, realfs=False, cached=True
1372 )
1382 )
1373
1383
1374 def _checknested(self, path):
1384 def _checknested(self, path):
1375 """Determine if path is a legal nested repository."""
1385 """Determine if path is a legal nested repository."""
1376 if not path.startswith(self.root):
1386 if not path.startswith(self.root):
1377 return False
1387 return False
1378 subpath = path[len(self.root) + 1 :]
1388 subpath = path[len(self.root) + 1 :]
1379 normsubpath = util.pconvert(subpath)
1389 normsubpath = util.pconvert(subpath)
1380
1390
1381 # XXX: Checking against the current working copy is wrong in
1391 # XXX: Checking against the current working copy is wrong in
1382 # the sense that it can reject things like
1392 # the sense that it can reject things like
1383 #
1393 #
1384 # $ hg cat -r 10 sub/x.txt
1394 # $ hg cat -r 10 sub/x.txt
1385 #
1395 #
1386 # if sub/ is no longer a subrepository in the working copy
1396 # if sub/ is no longer a subrepository in the working copy
1387 # parent revision.
1397 # parent revision.
1388 #
1398 #
1389 # However, it can of course also allow things that would have
1399 # However, it can of course also allow things that would have
1390 # been rejected before, such as the above cat command if sub/
1400 # been rejected before, such as the above cat command if sub/
1391 # is a subrepository now, but was a normal directory before.
1401 # is a subrepository now, but was a normal directory before.
1392 # The old path auditor would have rejected by mistake since it
1402 # The old path auditor would have rejected by mistake since it
1393 # panics when it sees sub/.hg/.
1403 # panics when it sees sub/.hg/.
1394 #
1404 #
1395 # All in all, checking against the working copy seems sensible
1405 # All in all, checking against the working copy seems sensible
1396 # since we want to prevent access to nested repositories on
1406 # since we want to prevent access to nested repositories on
1397 # the filesystem *now*.
1407 # the filesystem *now*.
1398 ctx = self[None]
1408 ctx = self[None]
1399 parts = util.splitpath(subpath)
1409 parts = util.splitpath(subpath)
1400 while parts:
1410 while parts:
1401 prefix = b'/'.join(parts)
1411 prefix = b'/'.join(parts)
1402 if prefix in ctx.substate:
1412 if prefix in ctx.substate:
1403 if prefix == normsubpath:
1413 if prefix == normsubpath:
1404 return True
1414 return True
1405 else:
1415 else:
1406 sub = ctx.sub(prefix)
1416 sub = ctx.sub(prefix)
1407 return sub.checknested(subpath[len(prefix) + 1 :])
1417 return sub.checknested(subpath[len(prefix) + 1 :])
1408 else:
1418 else:
1409 parts.pop()
1419 parts.pop()
1410 return False
1420 return False
1411
1421
1412 def peer(self):
1422 def peer(self):
1413 return localpeer(self) # not cached to avoid reference cycle
1423 return localpeer(self) # not cached to avoid reference cycle
1414
1424
1415 def unfiltered(self):
1425 def unfiltered(self):
1416 """Return unfiltered version of the repository
1426 """Return unfiltered version of the repository
1417
1427
1418 Intended to be overwritten by filtered repo."""
1428 Intended to be overwritten by filtered repo."""
1419 return self
1429 return self
1420
1430
1421 def filtered(self, name, visibilityexceptions=None):
1431 def filtered(self, name, visibilityexceptions=None):
1422 """Return a filtered version of a repository
1432 """Return a filtered version of a repository
1423
1433
1424 The `name` parameter is the identifier of the requested view. This
1434 The `name` parameter is the identifier of the requested view. This
1425 will return a repoview object set "exactly" to the specified view.
1435 will return a repoview object set "exactly" to the specified view.
1426
1436
1427 This function does not apply recursive filtering to a repository. For
1437 This function does not apply recursive filtering to a repository. For
1428 example calling `repo.filtered("served")` will return a repoview using
1438 example calling `repo.filtered("served")` will return a repoview using
1429 the "served" view, regardless of the initial view used by `repo`.
1439 the "served" view, regardless of the initial view used by `repo`.
1430
1440
1431 In other word, there is always only one level of `repoview` "filtering".
1441 In other word, there is always only one level of `repoview` "filtering".
1432 """
1442 """
1433 if self._extrafilterid is not None and b'%' not in name:
1443 if self._extrafilterid is not None and b'%' not in name:
1434 name = name + b'%' + self._extrafilterid
1444 name = name + b'%' + self._extrafilterid
1435
1445
1436 cls = repoview.newtype(self.unfiltered().__class__)
1446 cls = repoview.newtype(self.unfiltered().__class__)
1437 return cls(self, name, visibilityexceptions)
1447 return cls(self, name, visibilityexceptions)
1438
1448
1439 @mixedrepostorecache(
1449 @mixedrepostorecache(
1440 (b'bookmarks', b'plain'),
1450 (b'bookmarks', b'plain'),
1441 (b'bookmarks.current', b'plain'),
1451 (b'bookmarks.current', b'plain'),
1442 (b'bookmarks', b''),
1452 (b'bookmarks', b''),
1443 (b'00changelog.i', b''),
1453 (b'00changelog.i', b''),
1444 )
1454 )
1445 def _bookmarks(self):
1455 def _bookmarks(self):
1446 # Since the multiple files involved in the transaction cannot be
1456 # Since the multiple files involved in the transaction cannot be
1447 # written atomically (with current repository format), there is a race
1457 # written atomically (with current repository format), there is a race
1448 # condition here.
1458 # condition here.
1449 #
1459 #
1450 # 1) changelog content A is read
1460 # 1) changelog content A is read
1451 # 2) outside transaction update changelog to content B
1461 # 2) outside transaction update changelog to content B
1452 # 3) outside transaction update bookmark file referring to content B
1462 # 3) outside transaction update bookmark file referring to content B
1453 # 4) bookmarks file content is read and filtered against changelog-A
1463 # 4) bookmarks file content is read and filtered against changelog-A
1454 #
1464 #
1455 # When this happens, bookmarks against nodes missing from A are dropped.
1465 # When this happens, bookmarks against nodes missing from A are dropped.
1456 #
1466 #
1457 # Having this happening during read is not great, but it become worse
1467 # Having this happening during read is not great, but it become worse
1458 # when this happen during write because the bookmarks to the "unknown"
1468 # when this happen during write because the bookmarks to the "unknown"
1459 # nodes will be dropped for good. However, writes happen within locks.
1469 # nodes will be dropped for good. However, writes happen within locks.
1460 # This locking makes it possible to have a race free consistent read.
1470 # This locking makes it possible to have a race free consistent read.
1461 # For this purpose data read from disc before locking are
1471 # For this purpose data read from disc before locking are
1462 # "invalidated" right after the locks are taken. This invalidations are
1472 # "invalidated" right after the locks are taken. This invalidations are
1463 # "light", the `filecache` mechanism keep the data in memory and will
1473 # "light", the `filecache` mechanism keep the data in memory and will
1464 # reuse them if the underlying files did not changed. Not parsing the
1474 # reuse them if the underlying files did not changed. Not parsing the
1465 # same data multiple times helps performances.
1475 # same data multiple times helps performances.
1466 #
1476 #
1467 # Unfortunately in the case describe above, the files tracked by the
1477 # Unfortunately in the case describe above, the files tracked by the
1468 # bookmarks file cache might not have changed, but the in-memory
1478 # bookmarks file cache might not have changed, but the in-memory
1469 # content is still "wrong" because we used an older changelog content
1479 # content is still "wrong" because we used an older changelog content
1470 # to process the on-disk data. So after locking, the changelog would be
1480 # to process the on-disk data. So after locking, the changelog would be
1471 # refreshed but `_bookmarks` would be preserved.
1481 # refreshed but `_bookmarks` would be preserved.
1472 # Adding `00changelog.i` to the list of tracked file is not
1482 # Adding `00changelog.i` to the list of tracked file is not
1473 # enough, because at the time we build the content for `_bookmarks` in
1483 # enough, because at the time we build the content for `_bookmarks` in
1474 # (4), the changelog file has already diverged from the content used
1484 # (4), the changelog file has already diverged from the content used
1475 # for loading `changelog` in (1)
1485 # for loading `changelog` in (1)
1476 #
1486 #
1477 # To prevent the issue, we force the changelog to be explicitly
1487 # To prevent the issue, we force the changelog to be explicitly
1478 # reloaded while computing `_bookmarks`. The data race can still happen
1488 # reloaded while computing `_bookmarks`. The data race can still happen
1479 # without the lock (with a narrower window), but it would no longer go
1489 # without the lock (with a narrower window), but it would no longer go
1480 # undetected during the lock time refresh.
1490 # undetected during the lock time refresh.
1481 #
1491 #
1482 # The new schedule is as follow
1492 # The new schedule is as follow
1483 #
1493 #
1484 # 1) filecache logic detect that `_bookmarks` needs to be computed
1494 # 1) filecache logic detect that `_bookmarks` needs to be computed
1485 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1495 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1486 # 3) We force `changelog` filecache to be tested
1496 # 3) We force `changelog` filecache to be tested
1487 # 4) cachestat for `changelog` are captured (for changelog)
1497 # 4) cachestat for `changelog` are captured (for changelog)
1488 # 5) `_bookmarks` is computed and cached
1498 # 5) `_bookmarks` is computed and cached
1489 #
1499 #
1490 # The step in (3) ensure we have a changelog at least as recent as the
1500 # The step in (3) ensure we have a changelog at least as recent as the
1491 # cache stat computed in (1). As a result at locking time:
1501 # cache stat computed in (1). As a result at locking time:
1492 # * if the changelog did not changed since (1) -> we can reuse the data
1502 # * if the changelog did not changed since (1) -> we can reuse the data
1493 # * otherwise -> the bookmarks get refreshed.
1503 # * otherwise -> the bookmarks get refreshed.
1494 self._refreshchangelog()
1504 self._refreshchangelog()
1495 return bookmarks.bmstore(self)
1505 return bookmarks.bmstore(self)
1496
1506
1497 def _refreshchangelog(self):
1507 def _refreshchangelog(self):
1498 """make sure the in memory changelog match the on-disk one"""
1508 """make sure the in memory changelog match the on-disk one"""
1499 if 'changelog' in vars(self) and self.currenttransaction() is None:
1509 if 'changelog' in vars(self) and self.currenttransaction() is None:
1500 del self.changelog
1510 del self.changelog
1501
1511
1502 @property
1512 @property
1503 def _activebookmark(self):
1513 def _activebookmark(self):
1504 return self._bookmarks.active
1514 return self._bookmarks.active
1505
1515
1506 # _phasesets depend on changelog. what we need is to call
1516 # _phasesets depend on changelog. what we need is to call
1507 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1517 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1508 # can't be easily expressed in filecache mechanism.
1518 # can't be easily expressed in filecache mechanism.
1509 @storecache(b'phaseroots', b'00changelog.i')
1519 @storecache(b'phaseroots', b'00changelog.i')
1510 def _phasecache(self):
1520 def _phasecache(self):
1511 return phases.phasecache(self, self._phasedefaults)
1521 return phases.phasecache(self, self._phasedefaults)
1512
1522
1513 @storecache(b'obsstore')
1523 @storecache(b'obsstore')
1514 def obsstore(self):
1524 def obsstore(self):
1515 return obsolete.makestore(self.ui, self)
1525 return obsolete.makestore(self.ui, self)
1516
1526
1517 @storecache(b'00changelog.i')
1527 @storecache(b'00changelog.i')
1518 def changelog(self):
1528 def changelog(self):
1519 # load dirstate before changelog to avoid race see issue6303
1529 # load dirstate before changelog to avoid race see issue6303
1520 self.dirstate.prefetch_parents()
1530 self.dirstate.prefetch_parents()
1521 return self.store.changelog(txnutil.mayhavepending(self.root))
1531 return self.store.changelog(txnutil.mayhavepending(self.root))
1522
1532
1523 @storecache(b'00manifest.i')
1533 @storecache(b'00manifest.i')
1524 def manifestlog(self):
1534 def manifestlog(self):
1525 return self.store.manifestlog(self, self._storenarrowmatch)
1535 return self.store.manifestlog(self, self._storenarrowmatch)
1526
1536
1527 @repofilecache(b'dirstate')
1537 @repofilecache(b'dirstate')
1528 def dirstate(self):
1538 def dirstate(self):
1529 return self._makedirstate()
1539 return self._makedirstate()
1530
1540
1531 def _makedirstate(self):
1541 def _makedirstate(self):
1532 """Extension point for wrapping the dirstate per-repo."""
1542 """Extension point for wrapping the dirstate per-repo."""
1533 sparsematchfn = lambda: sparse.matcher(self)
1543 sparsematchfn = lambda: sparse.matcher(self)
1534
1544
1535 return dirstate.dirstate(
1545 return dirstate.dirstate(
1536 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1546 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1537 )
1547 )
1538
1548
1539 def _dirstatevalidate(self, node):
1549 def _dirstatevalidate(self, node):
1540 try:
1550 try:
1541 self.changelog.rev(node)
1551 self.changelog.rev(node)
1542 return node
1552 return node
1543 except error.LookupError:
1553 except error.LookupError:
1544 if not self._dirstatevalidatewarned:
1554 if not self._dirstatevalidatewarned:
1545 self._dirstatevalidatewarned = True
1555 self._dirstatevalidatewarned = True
1546 self.ui.warn(
1556 self.ui.warn(
1547 _(b"warning: ignoring unknown working parent %s!\n")
1557 _(b"warning: ignoring unknown working parent %s!\n")
1548 % short(node)
1558 % short(node)
1549 )
1559 )
1550 return nullid
1560 return nullid
1551
1561
1552 @storecache(narrowspec.FILENAME)
1562 @storecache(narrowspec.FILENAME)
1553 def narrowpats(self):
1563 def narrowpats(self):
1554 """matcher patterns for this repository's narrowspec
1564 """matcher patterns for this repository's narrowspec
1555
1565
1556 A tuple of (includes, excludes).
1566 A tuple of (includes, excludes).
1557 """
1567 """
1558 return narrowspec.load(self)
1568 return narrowspec.load(self)
1559
1569
1560 @storecache(narrowspec.FILENAME)
1570 @storecache(narrowspec.FILENAME)
1561 def _storenarrowmatch(self):
1571 def _storenarrowmatch(self):
1562 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1572 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1563 return matchmod.always()
1573 return matchmod.always()
1564 include, exclude = self.narrowpats
1574 include, exclude = self.narrowpats
1565 return narrowspec.match(self.root, include=include, exclude=exclude)
1575 return narrowspec.match(self.root, include=include, exclude=exclude)
1566
1576
1567 @storecache(narrowspec.FILENAME)
1577 @storecache(narrowspec.FILENAME)
1568 def _narrowmatch(self):
1578 def _narrowmatch(self):
1569 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1579 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1570 return matchmod.always()
1580 return matchmod.always()
1571 narrowspec.checkworkingcopynarrowspec(self)
1581 narrowspec.checkworkingcopynarrowspec(self)
1572 include, exclude = self.narrowpats
1582 include, exclude = self.narrowpats
1573 return narrowspec.match(self.root, include=include, exclude=exclude)
1583 return narrowspec.match(self.root, include=include, exclude=exclude)
1574
1584
1575 def narrowmatch(self, match=None, includeexact=False):
1585 def narrowmatch(self, match=None, includeexact=False):
1576 """matcher corresponding the the repo's narrowspec
1586 """matcher corresponding the the repo's narrowspec
1577
1587
1578 If `match` is given, then that will be intersected with the narrow
1588 If `match` is given, then that will be intersected with the narrow
1579 matcher.
1589 matcher.
1580
1590
1581 If `includeexact` is True, then any exact matches from `match` will
1591 If `includeexact` is True, then any exact matches from `match` will
1582 be included even if they're outside the narrowspec.
1592 be included even if they're outside the narrowspec.
1583 """
1593 """
1584 if match:
1594 if match:
1585 if includeexact and not self._narrowmatch.always():
1595 if includeexact and not self._narrowmatch.always():
1586 # do not exclude explicitly-specified paths so that they can
1596 # do not exclude explicitly-specified paths so that they can
1587 # be warned later on
1597 # be warned later on
1588 em = matchmod.exact(match.files())
1598 em = matchmod.exact(match.files())
1589 nm = matchmod.unionmatcher([self._narrowmatch, em])
1599 nm = matchmod.unionmatcher([self._narrowmatch, em])
1590 return matchmod.intersectmatchers(match, nm)
1600 return matchmod.intersectmatchers(match, nm)
1591 return matchmod.intersectmatchers(match, self._narrowmatch)
1601 return matchmod.intersectmatchers(match, self._narrowmatch)
1592 return self._narrowmatch
1602 return self._narrowmatch
1593
1603
1594 def setnarrowpats(self, newincludes, newexcludes):
1604 def setnarrowpats(self, newincludes, newexcludes):
1595 narrowspec.save(self, newincludes, newexcludes)
1605 narrowspec.save(self, newincludes, newexcludes)
1596 self.invalidate(clearfilecache=True)
1606 self.invalidate(clearfilecache=True)
1597
1607
1598 @unfilteredpropertycache
1608 @unfilteredpropertycache
1599 def _quick_access_changeid_null(self):
1609 def _quick_access_changeid_null(self):
1600 return {
1610 return {
1601 b'null': (nullrev, nullid),
1611 b'null': (nullrev, nullid),
1602 nullrev: (nullrev, nullid),
1612 nullrev: (nullrev, nullid),
1603 nullid: (nullrev, nullid),
1613 nullid: (nullrev, nullid),
1604 }
1614 }
1605
1615
1606 @unfilteredpropertycache
1616 @unfilteredpropertycache
1607 def _quick_access_changeid_wc(self):
1617 def _quick_access_changeid_wc(self):
1608 # also fast path access to the working copy parents
1618 # also fast path access to the working copy parents
1609 # however, only do it for filter that ensure wc is visible.
1619 # however, only do it for filter that ensure wc is visible.
1610 quick = self._quick_access_changeid_null.copy()
1620 quick = self._quick_access_changeid_null.copy()
1611 cl = self.unfiltered().changelog
1621 cl = self.unfiltered().changelog
1612 for node in self.dirstate.parents():
1622 for node in self.dirstate.parents():
1613 if node == nullid:
1623 if node == nullid:
1614 continue
1624 continue
1615 rev = cl.index.get_rev(node)
1625 rev = cl.index.get_rev(node)
1616 if rev is None:
1626 if rev is None:
1617 # unknown working copy parent case:
1627 # unknown working copy parent case:
1618 #
1628 #
1619 # skip the fast path and let higher code deal with it
1629 # skip the fast path and let higher code deal with it
1620 continue
1630 continue
1621 pair = (rev, node)
1631 pair = (rev, node)
1622 quick[rev] = pair
1632 quick[rev] = pair
1623 quick[node] = pair
1633 quick[node] = pair
1624 # also add the parents of the parents
1634 # also add the parents of the parents
1625 for r in cl.parentrevs(rev):
1635 for r in cl.parentrevs(rev):
1626 if r == nullrev:
1636 if r == nullrev:
1627 continue
1637 continue
1628 n = cl.node(r)
1638 n = cl.node(r)
1629 pair = (r, n)
1639 pair = (r, n)
1630 quick[r] = pair
1640 quick[r] = pair
1631 quick[n] = pair
1641 quick[n] = pair
1632 p1node = self.dirstate.p1()
1642 p1node = self.dirstate.p1()
1633 if p1node != nullid:
1643 if p1node != nullid:
1634 quick[b'.'] = quick[p1node]
1644 quick[b'.'] = quick[p1node]
1635 return quick
1645 return quick
1636
1646
1637 @unfilteredmethod
1647 @unfilteredmethod
1638 def _quick_access_changeid_invalidate(self):
1648 def _quick_access_changeid_invalidate(self):
1639 if '_quick_access_changeid_wc' in vars(self):
1649 if '_quick_access_changeid_wc' in vars(self):
1640 del self.__dict__['_quick_access_changeid_wc']
1650 del self.__dict__['_quick_access_changeid_wc']
1641
1651
1642 @property
1652 @property
1643 def _quick_access_changeid(self):
1653 def _quick_access_changeid(self):
1644 """an helper dictionnary for __getitem__ calls
1654 """an helper dictionnary for __getitem__ calls
1645
1655
1646 This contains a list of symbol we can recognise right away without
1656 This contains a list of symbol we can recognise right away without
1647 further processing.
1657 further processing.
1648 """
1658 """
1649 if self.filtername in repoview.filter_has_wc:
1659 if self.filtername in repoview.filter_has_wc:
1650 return self._quick_access_changeid_wc
1660 return self._quick_access_changeid_wc
1651 return self._quick_access_changeid_null
1661 return self._quick_access_changeid_null
1652
1662
1653 def __getitem__(self, changeid):
1663 def __getitem__(self, changeid):
1654 # dealing with special cases
1664 # dealing with special cases
1655 if changeid is None:
1665 if changeid is None:
1656 return context.workingctx(self)
1666 return context.workingctx(self)
1657 if isinstance(changeid, context.basectx):
1667 if isinstance(changeid, context.basectx):
1658 return changeid
1668 return changeid
1659
1669
1660 # dealing with multiple revisions
1670 # dealing with multiple revisions
1661 if isinstance(changeid, slice):
1671 if isinstance(changeid, slice):
1662 # wdirrev isn't contiguous so the slice shouldn't include it
1672 # wdirrev isn't contiguous so the slice shouldn't include it
1663 return [
1673 return [
1664 self[i]
1674 self[i]
1665 for i in pycompat.xrange(*changeid.indices(len(self)))
1675 for i in pycompat.xrange(*changeid.indices(len(self)))
1666 if i not in self.changelog.filteredrevs
1676 if i not in self.changelog.filteredrevs
1667 ]
1677 ]
1668
1678
1669 # dealing with some special values
1679 # dealing with some special values
1670 quick_access = self._quick_access_changeid.get(changeid)
1680 quick_access = self._quick_access_changeid.get(changeid)
1671 if quick_access is not None:
1681 if quick_access is not None:
1672 rev, node = quick_access
1682 rev, node = quick_access
1673 return context.changectx(self, rev, node, maybe_filtered=False)
1683 return context.changectx(self, rev, node, maybe_filtered=False)
1674 if changeid == b'tip':
1684 if changeid == b'tip':
1675 node = self.changelog.tip()
1685 node = self.changelog.tip()
1676 rev = self.changelog.rev(node)
1686 rev = self.changelog.rev(node)
1677 return context.changectx(self, rev, node)
1687 return context.changectx(self, rev, node)
1678
1688
1679 # dealing with arbitrary values
1689 # dealing with arbitrary values
1680 try:
1690 try:
1681 if isinstance(changeid, int):
1691 if isinstance(changeid, int):
1682 node = self.changelog.node(changeid)
1692 node = self.changelog.node(changeid)
1683 rev = changeid
1693 rev = changeid
1684 elif changeid == b'.':
1694 elif changeid == b'.':
1685 # this is a hack to delay/avoid loading obsmarkers
1695 # this is a hack to delay/avoid loading obsmarkers
1686 # when we know that '.' won't be hidden
1696 # when we know that '.' won't be hidden
1687 node = self.dirstate.p1()
1697 node = self.dirstate.p1()
1688 rev = self.unfiltered().changelog.rev(node)
1698 rev = self.unfiltered().changelog.rev(node)
1689 elif len(changeid) == 20:
1699 elif len(changeid) == 20:
1690 try:
1700 try:
1691 node = changeid
1701 node = changeid
1692 rev = self.changelog.rev(changeid)
1702 rev = self.changelog.rev(changeid)
1693 except error.FilteredLookupError:
1703 except error.FilteredLookupError:
1694 changeid = hex(changeid) # for the error message
1704 changeid = hex(changeid) # for the error message
1695 raise
1705 raise
1696 except LookupError:
1706 except LookupError:
1697 # check if it might have come from damaged dirstate
1707 # check if it might have come from damaged dirstate
1698 #
1708 #
1699 # XXX we could avoid the unfiltered if we had a recognizable
1709 # XXX we could avoid the unfiltered if we had a recognizable
1700 # exception for filtered changeset access
1710 # exception for filtered changeset access
1701 if (
1711 if (
1702 self.local()
1712 self.local()
1703 and changeid in self.unfiltered().dirstate.parents()
1713 and changeid in self.unfiltered().dirstate.parents()
1704 ):
1714 ):
1705 msg = _(b"working directory has unknown parent '%s'!")
1715 msg = _(b"working directory has unknown parent '%s'!")
1706 raise error.Abort(msg % short(changeid))
1716 raise error.Abort(msg % short(changeid))
1707 changeid = hex(changeid) # for the error message
1717 changeid = hex(changeid) # for the error message
1708 raise
1718 raise
1709
1719
1710 elif len(changeid) == 40:
1720 elif len(changeid) == 40:
1711 node = bin(changeid)
1721 node = bin(changeid)
1712 rev = self.changelog.rev(node)
1722 rev = self.changelog.rev(node)
1713 else:
1723 else:
1714 raise error.ProgrammingError(
1724 raise error.ProgrammingError(
1715 b"unsupported changeid '%s' of type %s"
1725 b"unsupported changeid '%s' of type %s"
1716 % (changeid, pycompat.bytestr(type(changeid)))
1726 % (changeid, pycompat.bytestr(type(changeid)))
1717 )
1727 )
1718
1728
1719 return context.changectx(self, rev, node)
1729 return context.changectx(self, rev, node)
1720
1730
1721 except (error.FilteredIndexError, error.FilteredLookupError):
1731 except (error.FilteredIndexError, error.FilteredLookupError):
1722 raise error.FilteredRepoLookupError(
1732 raise error.FilteredRepoLookupError(
1723 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1733 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1724 )
1734 )
1725 except (IndexError, LookupError):
1735 except (IndexError, LookupError):
1726 raise error.RepoLookupError(
1736 raise error.RepoLookupError(
1727 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1737 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1728 )
1738 )
1729 except error.WdirUnsupported:
1739 except error.WdirUnsupported:
1730 return context.workingctx(self)
1740 return context.workingctx(self)
1731
1741
1732 def __contains__(self, changeid):
1742 def __contains__(self, changeid):
1733 """True if the given changeid exists
1743 """True if the given changeid exists
1734
1744
1735 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1745 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1736 specified.
1746 specified.
1737 """
1747 """
1738 try:
1748 try:
1739 self[changeid]
1749 self[changeid]
1740 return True
1750 return True
1741 except error.RepoLookupError:
1751 except error.RepoLookupError:
1742 return False
1752 return False
1743
1753
1744 def __nonzero__(self):
1754 def __nonzero__(self):
1745 return True
1755 return True
1746
1756
1747 __bool__ = __nonzero__
1757 __bool__ = __nonzero__
1748
1758
1749 def __len__(self):
1759 def __len__(self):
1750 # no need to pay the cost of repoview.changelog
1760 # no need to pay the cost of repoview.changelog
1751 unfi = self.unfiltered()
1761 unfi = self.unfiltered()
1752 return len(unfi.changelog)
1762 return len(unfi.changelog)
1753
1763
1754 def __iter__(self):
1764 def __iter__(self):
1755 return iter(self.changelog)
1765 return iter(self.changelog)
1756
1766
1757 def revs(self, expr, *args):
1767 def revs(self, expr, *args):
1758 """Find revisions matching a revset.
1768 """Find revisions matching a revset.
1759
1769
1760 The revset is specified as a string ``expr`` that may contain
1770 The revset is specified as a string ``expr`` that may contain
1761 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1771 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1762
1772
1763 Revset aliases from the configuration are not expanded. To expand
1773 Revset aliases from the configuration are not expanded. To expand
1764 user aliases, consider calling ``scmutil.revrange()`` or
1774 user aliases, consider calling ``scmutil.revrange()`` or
1765 ``repo.anyrevs([expr], user=True)``.
1775 ``repo.anyrevs([expr], user=True)``.
1766
1776
1767 Returns a smartset.abstractsmartset, which is a list-like interface
1777 Returns a smartset.abstractsmartset, which is a list-like interface
1768 that contains integer revisions.
1778 that contains integer revisions.
1769 """
1779 """
1770 tree = revsetlang.spectree(expr, *args)
1780 tree = revsetlang.spectree(expr, *args)
1771 return revset.makematcher(tree)(self)
1781 return revset.makematcher(tree)(self)
1772
1782
1773 def set(self, expr, *args):
1783 def set(self, expr, *args):
1774 """Find revisions matching a revset and emit changectx instances.
1784 """Find revisions matching a revset and emit changectx instances.
1775
1785
1776 This is a convenience wrapper around ``revs()`` that iterates the
1786 This is a convenience wrapper around ``revs()`` that iterates the
1777 result and is a generator of changectx instances.
1787 result and is a generator of changectx instances.
1778
1788
1779 Revset aliases from the configuration are not expanded. To expand
1789 Revset aliases from the configuration are not expanded. To expand
1780 user aliases, consider calling ``scmutil.revrange()``.
1790 user aliases, consider calling ``scmutil.revrange()``.
1781 """
1791 """
1782 for r in self.revs(expr, *args):
1792 for r in self.revs(expr, *args):
1783 yield self[r]
1793 yield self[r]
1784
1794
1785 def anyrevs(self, specs, user=False, localalias=None):
1795 def anyrevs(self, specs, user=False, localalias=None):
1786 """Find revisions matching one of the given revsets.
1796 """Find revisions matching one of the given revsets.
1787
1797
1788 Revset aliases from the configuration are not expanded by default. To
1798 Revset aliases from the configuration are not expanded by default. To
1789 expand user aliases, specify ``user=True``. To provide some local
1799 expand user aliases, specify ``user=True``. To provide some local
1790 definitions overriding user aliases, set ``localalias`` to
1800 definitions overriding user aliases, set ``localalias`` to
1791 ``{name: definitionstring}``.
1801 ``{name: definitionstring}``.
1792 """
1802 """
1793 if specs == [b'null']:
1803 if specs == [b'null']:
1794 return revset.baseset([nullrev])
1804 return revset.baseset([nullrev])
1795 if specs == [b'.']:
1805 if specs == [b'.']:
1796 quick_data = self._quick_access_changeid.get(b'.')
1806 quick_data = self._quick_access_changeid.get(b'.')
1797 if quick_data is not None:
1807 if quick_data is not None:
1798 return revset.baseset([quick_data[0]])
1808 return revset.baseset([quick_data[0]])
1799 if user:
1809 if user:
1800 m = revset.matchany(
1810 m = revset.matchany(
1801 self.ui,
1811 self.ui,
1802 specs,
1812 specs,
1803 lookup=revset.lookupfn(self),
1813 lookup=revset.lookupfn(self),
1804 localalias=localalias,
1814 localalias=localalias,
1805 )
1815 )
1806 else:
1816 else:
1807 m = revset.matchany(None, specs, localalias=localalias)
1817 m = revset.matchany(None, specs, localalias=localalias)
1808 return m(self)
1818 return m(self)
1809
1819
1810 def url(self):
1820 def url(self):
1811 return b'file:' + self.root
1821 return b'file:' + self.root
1812
1822
1813 def hook(self, name, throw=False, **args):
1823 def hook(self, name, throw=False, **args):
1814 """Call a hook, passing this repo instance.
1824 """Call a hook, passing this repo instance.
1815
1825
1816 This a convenience method to aid invoking hooks. Extensions likely
1826 This a convenience method to aid invoking hooks. Extensions likely
1817 won't call this unless they have registered a custom hook or are
1827 won't call this unless they have registered a custom hook or are
1818 replacing code that is expected to call a hook.
1828 replacing code that is expected to call a hook.
1819 """
1829 """
1820 return hook.hook(self.ui, self, name, throw, **args)
1830 return hook.hook(self.ui, self, name, throw, **args)
1821
1831
1822 @filteredpropertycache
1832 @filteredpropertycache
1823 def _tagscache(self):
1833 def _tagscache(self):
1824 """Returns a tagscache object that contains various tags related
1834 """Returns a tagscache object that contains various tags related
1825 caches."""
1835 caches."""
1826
1836
1827 # This simplifies its cache management by having one decorated
1837 # This simplifies its cache management by having one decorated
1828 # function (this one) and the rest simply fetch things from it.
1838 # function (this one) and the rest simply fetch things from it.
1829 class tagscache(object):
1839 class tagscache(object):
1830 def __init__(self):
1840 def __init__(self):
1831 # These two define the set of tags for this repository. tags
1841 # These two define the set of tags for this repository. tags
1832 # maps tag name to node; tagtypes maps tag name to 'global' or
1842 # maps tag name to node; tagtypes maps tag name to 'global' or
1833 # 'local'. (Global tags are defined by .hgtags across all
1843 # 'local'. (Global tags are defined by .hgtags across all
1834 # heads, and local tags are defined in .hg/localtags.)
1844 # heads, and local tags are defined in .hg/localtags.)
1835 # They constitute the in-memory cache of tags.
1845 # They constitute the in-memory cache of tags.
1836 self.tags = self.tagtypes = None
1846 self.tags = self.tagtypes = None
1837
1847
1838 self.nodetagscache = self.tagslist = None
1848 self.nodetagscache = self.tagslist = None
1839
1849
1840 cache = tagscache()
1850 cache = tagscache()
1841 cache.tags, cache.tagtypes = self._findtags()
1851 cache.tags, cache.tagtypes = self._findtags()
1842
1852
1843 return cache
1853 return cache
1844
1854
1845 def tags(self):
1855 def tags(self):
1846 '''return a mapping of tag to node'''
1856 '''return a mapping of tag to node'''
1847 t = {}
1857 t = {}
1848 if self.changelog.filteredrevs:
1858 if self.changelog.filteredrevs:
1849 tags, tt = self._findtags()
1859 tags, tt = self._findtags()
1850 else:
1860 else:
1851 tags = self._tagscache.tags
1861 tags = self._tagscache.tags
1852 rev = self.changelog.rev
1862 rev = self.changelog.rev
1853 for k, v in pycompat.iteritems(tags):
1863 for k, v in pycompat.iteritems(tags):
1854 try:
1864 try:
1855 # ignore tags to unknown nodes
1865 # ignore tags to unknown nodes
1856 rev(v)
1866 rev(v)
1857 t[k] = v
1867 t[k] = v
1858 except (error.LookupError, ValueError):
1868 except (error.LookupError, ValueError):
1859 pass
1869 pass
1860 return t
1870 return t
1861
1871
1862 def _findtags(self):
1872 def _findtags(self):
1863 """Do the hard work of finding tags. Return a pair of dicts
1873 """Do the hard work of finding tags. Return a pair of dicts
1864 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1874 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1865 maps tag name to a string like \'global\' or \'local\'.
1875 maps tag name to a string like \'global\' or \'local\'.
1866 Subclasses or extensions are free to add their own tags, but
1876 Subclasses or extensions are free to add their own tags, but
1867 should be aware that the returned dicts will be retained for the
1877 should be aware that the returned dicts will be retained for the
1868 duration of the localrepo object."""
1878 duration of the localrepo object."""
1869
1879
1870 # XXX what tagtype should subclasses/extensions use? Currently
1880 # XXX what tagtype should subclasses/extensions use? Currently
1871 # mq and bookmarks add tags, but do not set the tagtype at all.
1881 # mq and bookmarks add tags, but do not set the tagtype at all.
1872 # Should each extension invent its own tag type? Should there
1882 # Should each extension invent its own tag type? Should there
1873 # be one tagtype for all such "virtual" tags? Or is the status
1883 # be one tagtype for all such "virtual" tags? Or is the status
1874 # quo fine?
1884 # quo fine?
1875
1885
1876 # map tag name to (node, hist)
1886 # map tag name to (node, hist)
1877 alltags = tagsmod.findglobaltags(self.ui, self)
1887 alltags = tagsmod.findglobaltags(self.ui, self)
1878 # map tag name to tag type
1888 # map tag name to tag type
1879 tagtypes = {tag: b'global' for tag in alltags}
1889 tagtypes = {tag: b'global' for tag in alltags}
1880
1890
1881 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1891 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1882
1892
1883 # Build the return dicts. Have to re-encode tag names because
1893 # Build the return dicts. Have to re-encode tag names because
1884 # the tags module always uses UTF-8 (in order not to lose info
1894 # the tags module always uses UTF-8 (in order not to lose info
1885 # writing to the cache), but the rest of Mercurial wants them in
1895 # writing to the cache), but the rest of Mercurial wants them in
1886 # local encoding.
1896 # local encoding.
1887 tags = {}
1897 tags = {}
1888 for (name, (node, hist)) in pycompat.iteritems(alltags):
1898 for (name, (node, hist)) in pycompat.iteritems(alltags):
1889 if node != nullid:
1899 if node != nullid:
1890 tags[encoding.tolocal(name)] = node
1900 tags[encoding.tolocal(name)] = node
1891 tags[b'tip'] = self.changelog.tip()
1901 tags[b'tip'] = self.changelog.tip()
1892 tagtypes = {
1902 tagtypes = {
1893 encoding.tolocal(name): value
1903 encoding.tolocal(name): value
1894 for (name, value) in pycompat.iteritems(tagtypes)
1904 for (name, value) in pycompat.iteritems(tagtypes)
1895 }
1905 }
1896 return (tags, tagtypes)
1906 return (tags, tagtypes)
1897
1907
1898 def tagtype(self, tagname):
1908 def tagtype(self, tagname):
1899 """
1909 """
1900 return the type of the given tag. result can be:
1910 return the type of the given tag. result can be:
1901
1911
1902 'local' : a local tag
1912 'local' : a local tag
1903 'global' : a global tag
1913 'global' : a global tag
1904 None : tag does not exist
1914 None : tag does not exist
1905 """
1915 """
1906
1916
1907 return self._tagscache.tagtypes.get(tagname)
1917 return self._tagscache.tagtypes.get(tagname)
1908
1918
1909 def tagslist(self):
1919 def tagslist(self):
1910 '''return a list of tags ordered by revision'''
1920 '''return a list of tags ordered by revision'''
1911 if not self._tagscache.tagslist:
1921 if not self._tagscache.tagslist:
1912 l = []
1922 l = []
1913 for t, n in pycompat.iteritems(self.tags()):
1923 for t, n in pycompat.iteritems(self.tags()):
1914 l.append((self.changelog.rev(n), t, n))
1924 l.append((self.changelog.rev(n), t, n))
1915 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1925 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1916
1926
1917 return self._tagscache.tagslist
1927 return self._tagscache.tagslist
1918
1928
1919 def nodetags(self, node):
1929 def nodetags(self, node):
1920 '''return the tags associated with a node'''
1930 '''return the tags associated with a node'''
1921 if not self._tagscache.nodetagscache:
1931 if not self._tagscache.nodetagscache:
1922 nodetagscache = {}
1932 nodetagscache = {}
1923 for t, n in pycompat.iteritems(self._tagscache.tags):
1933 for t, n in pycompat.iteritems(self._tagscache.tags):
1924 nodetagscache.setdefault(n, []).append(t)
1934 nodetagscache.setdefault(n, []).append(t)
1925 for tags in pycompat.itervalues(nodetagscache):
1935 for tags in pycompat.itervalues(nodetagscache):
1926 tags.sort()
1936 tags.sort()
1927 self._tagscache.nodetagscache = nodetagscache
1937 self._tagscache.nodetagscache = nodetagscache
1928 return self._tagscache.nodetagscache.get(node, [])
1938 return self._tagscache.nodetagscache.get(node, [])
1929
1939
1930 def nodebookmarks(self, node):
1940 def nodebookmarks(self, node):
1931 """return the list of bookmarks pointing to the specified node"""
1941 """return the list of bookmarks pointing to the specified node"""
1932 return self._bookmarks.names(node)
1942 return self._bookmarks.names(node)
1933
1943
1934 def branchmap(self):
1944 def branchmap(self):
1935 """returns a dictionary {branch: [branchheads]} with branchheads
1945 """returns a dictionary {branch: [branchheads]} with branchheads
1936 ordered by increasing revision number"""
1946 ordered by increasing revision number"""
1937 return self._branchcaches[self]
1947 return self._branchcaches[self]
1938
1948
1939 @unfilteredmethod
1949 @unfilteredmethod
1940 def revbranchcache(self):
1950 def revbranchcache(self):
1941 if not self._revbranchcache:
1951 if not self._revbranchcache:
1942 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1952 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1943 return self._revbranchcache
1953 return self._revbranchcache
1944
1954
1945 def branchtip(self, branch, ignoremissing=False):
1955 def branchtip(self, branch, ignoremissing=False):
1946 """return the tip node for a given branch
1956 """return the tip node for a given branch
1947
1957
1948 If ignoremissing is True, then this method will not raise an error.
1958 If ignoremissing is True, then this method will not raise an error.
1949 This is helpful for callers that only expect None for a missing branch
1959 This is helpful for callers that only expect None for a missing branch
1950 (e.g. namespace).
1960 (e.g. namespace).
1951
1961
1952 """
1962 """
1953 try:
1963 try:
1954 return self.branchmap().branchtip(branch)
1964 return self.branchmap().branchtip(branch)
1955 except KeyError:
1965 except KeyError:
1956 if not ignoremissing:
1966 if not ignoremissing:
1957 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1967 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1958 else:
1968 else:
1959 pass
1969 pass
1960
1970
1961 def lookup(self, key):
1971 def lookup(self, key):
1962 node = scmutil.revsymbol(self, key).node()
1972 node = scmutil.revsymbol(self, key).node()
1963 if node is None:
1973 if node is None:
1964 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1974 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1965 return node
1975 return node
1966
1976
1967 def lookupbranch(self, key):
1977 def lookupbranch(self, key):
1968 if self.branchmap().hasbranch(key):
1978 if self.branchmap().hasbranch(key):
1969 return key
1979 return key
1970
1980
1971 return scmutil.revsymbol(self, key).branch()
1981 return scmutil.revsymbol(self, key).branch()
1972
1982
1973 def known(self, nodes):
1983 def known(self, nodes):
1974 cl = self.changelog
1984 cl = self.changelog
1975 get_rev = cl.index.get_rev
1985 get_rev = cl.index.get_rev
1976 filtered = cl.filteredrevs
1986 filtered = cl.filteredrevs
1977 result = []
1987 result = []
1978 for n in nodes:
1988 for n in nodes:
1979 r = get_rev(n)
1989 r = get_rev(n)
1980 resp = not (r is None or r in filtered)
1990 resp = not (r is None or r in filtered)
1981 result.append(resp)
1991 result.append(resp)
1982 return result
1992 return result
1983
1993
1984 def local(self):
1994 def local(self):
1985 return self
1995 return self
1986
1996
1987 def publishing(self):
1997 def publishing(self):
1988 # it's safe (and desirable) to trust the publish flag unconditionally
1998 # it's safe (and desirable) to trust the publish flag unconditionally
1989 # so that we don't finalize changes shared between users via ssh or nfs
1999 # so that we don't finalize changes shared between users via ssh or nfs
1990 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2000 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1991
2001
1992 def cancopy(self):
2002 def cancopy(self):
1993 # so statichttprepo's override of local() works
2003 # so statichttprepo's override of local() works
1994 if not self.local():
2004 if not self.local():
1995 return False
2005 return False
1996 if not self.publishing():
2006 if not self.publishing():
1997 return True
2007 return True
1998 # if publishing we can't copy if there is filtered content
2008 # if publishing we can't copy if there is filtered content
1999 return not self.filtered(b'visible').changelog.filteredrevs
2009 return not self.filtered(b'visible').changelog.filteredrevs
2000
2010
2001 def shared(self):
2011 def shared(self):
2002 '''the type of shared repository (None if not shared)'''
2012 '''the type of shared repository (None if not shared)'''
2003 if self.sharedpath != self.path:
2013 if self.sharedpath != self.path:
2004 return b'store'
2014 return b'store'
2005 return None
2015 return None
2006
2016
2007 def wjoin(self, f, *insidef):
2017 def wjoin(self, f, *insidef):
2008 return self.vfs.reljoin(self.root, f, *insidef)
2018 return self.vfs.reljoin(self.root, f, *insidef)
2009
2019
2010 def setparents(self, p1, p2=nullid):
2020 def setparents(self, p1, p2=nullid):
2011 self[None].setparents(p1, p2)
2021 self[None].setparents(p1, p2)
2012 self._quick_access_changeid_invalidate()
2022 self._quick_access_changeid_invalidate()
2013
2023
2014 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2024 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2015 """changeid must be a changeset revision, if specified.
2025 """changeid must be a changeset revision, if specified.
2016 fileid can be a file revision or node."""
2026 fileid can be a file revision or node."""
2017 return context.filectx(
2027 return context.filectx(
2018 self, path, changeid, fileid, changectx=changectx
2028 self, path, changeid, fileid, changectx=changectx
2019 )
2029 )
2020
2030
2021 def getcwd(self):
2031 def getcwd(self):
2022 return self.dirstate.getcwd()
2032 return self.dirstate.getcwd()
2023
2033
2024 def pathto(self, f, cwd=None):
2034 def pathto(self, f, cwd=None):
2025 return self.dirstate.pathto(f, cwd)
2035 return self.dirstate.pathto(f, cwd)
2026
2036
2027 def _loadfilter(self, filter):
2037 def _loadfilter(self, filter):
2028 if filter not in self._filterpats:
2038 if filter not in self._filterpats:
2029 l = []
2039 l = []
2030 for pat, cmd in self.ui.configitems(filter):
2040 for pat, cmd in self.ui.configitems(filter):
2031 if cmd == b'!':
2041 if cmd == b'!':
2032 continue
2042 continue
2033 mf = matchmod.match(self.root, b'', [pat])
2043 mf = matchmod.match(self.root, b'', [pat])
2034 fn = None
2044 fn = None
2035 params = cmd
2045 params = cmd
2036 for name, filterfn in pycompat.iteritems(self._datafilters):
2046 for name, filterfn in pycompat.iteritems(self._datafilters):
2037 if cmd.startswith(name):
2047 if cmd.startswith(name):
2038 fn = filterfn
2048 fn = filterfn
2039 params = cmd[len(name) :].lstrip()
2049 params = cmd[len(name) :].lstrip()
2040 break
2050 break
2041 if not fn:
2051 if not fn:
2042 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2052 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2043 fn.__name__ = 'commandfilter'
2053 fn.__name__ = 'commandfilter'
2044 # Wrap old filters not supporting keyword arguments
2054 # Wrap old filters not supporting keyword arguments
2045 if not pycompat.getargspec(fn)[2]:
2055 if not pycompat.getargspec(fn)[2]:
2046 oldfn = fn
2056 oldfn = fn
2047 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2057 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2048 fn.__name__ = 'compat-' + oldfn.__name__
2058 fn.__name__ = 'compat-' + oldfn.__name__
2049 l.append((mf, fn, params))
2059 l.append((mf, fn, params))
2050 self._filterpats[filter] = l
2060 self._filterpats[filter] = l
2051 return self._filterpats[filter]
2061 return self._filterpats[filter]
2052
2062
2053 def _filter(self, filterpats, filename, data):
2063 def _filter(self, filterpats, filename, data):
2054 for mf, fn, cmd in filterpats:
2064 for mf, fn, cmd in filterpats:
2055 if mf(filename):
2065 if mf(filename):
2056 self.ui.debug(
2066 self.ui.debug(
2057 b"filtering %s through %s\n"
2067 b"filtering %s through %s\n"
2058 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2068 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2059 )
2069 )
2060 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2070 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2061 break
2071 break
2062
2072
2063 return data
2073 return data
2064
2074
2065 @unfilteredpropertycache
2075 @unfilteredpropertycache
2066 def _encodefilterpats(self):
2076 def _encodefilterpats(self):
2067 return self._loadfilter(b'encode')
2077 return self._loadfilter(b'encode')
2068
2078
2069 @unfilteredpropertycache
2079 @unfilteredpropertycache
2070 def _decodefilterpats(self):
2080 def _decodefilterpats(self):
2071 return self._loadfilter(b'decode')
2081 return self._loadfilter(b'decode')
2072
2082
2073 def adddatafilter(self, name, filter):
2083 def adddatafilter(self, name, filter):
2074 self._datafilters[name] = filter
2084 self._datafilters[name] = filter
2075
2085
2076 def wread(self, filename):
2086 def wread(self, filename):
2077 if self.wvfs.islink(filename):
2087 if self.wvfs.islink(filename):
2078 data = self.wvfs.readlink(filename)
2088 data = self.wvfs.readlink(filename)
2079 else:
2089 else:
2080 data = self.wvfs.read(filename)
2090 data = self.wvfs.read(filename)
2081 return self._filter(self._encodefilterpats, filename, data)
2091 return self._filter(self._encodefilterpats, filename, data)
2082
2092
2083 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2093 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2084 """write ``data`` into ``filename`` in the working directory
2094 """write ``data`` into ``filename`` in the working directory
2085
2095
2086 This returns length of written (maybe decoded) data.
2096 This returns length of written (maybe decoded) data.
2087 """
2097 """
2088 data = self._filter(self._decodefilterpats, filename, data)
2098 data = self._filter(self._decodefilterpats, filename, data)
2089 if b'l' in flags:
2099 if b'l' in flags:
2090 self.wvfs.symlink(data, filename)
2100 self.wvfs.symlink(data, filename)
2091 else:
2101 else:
2092 self.wvfs.write(
2102 self.wvfs.write(
2093 filename, data, backgroundclose=backgroundclose, **kwargs
2103 filename, data, backgroundclose=backgroundclose, **kwargs
2094 )
2104 )
2095 if b'x' in flags:
2105 if b'x' in flags:
2096 self.wvfs.setflags(filename, False, True)
2106 self.wvfs.setflags(filename, False, True)
2097 else:
2107 else:
2098 self.wvfs.setflags(filename, False, False)
2108 self.wvfs.setflags(filename, False, False)
2099 return len(data)
2109 return len(data)
2100
2110
2101 def wwritedata(self, filename, data):
2111 def wwritedata(self, filename, data):
2102 return self._filter(self._decodefilterpats, filename, data)
2112 return self._filter(self._decodefilterpats, filename, data)
2103
2113
2104 def currenttransaction(self):
2114 def currenttransaction(self):
2105 """return the current transaction or None if non exists"""
2115 """return the current transaction or None if non exists"""
2106 if self._transref:
2116 if self._transref:
2107 tr = self._transref()
2117 tr = self._transref()
2108 else:
2118 else:
2109 tr = None
2119 tr = None
2110
2120
2111 if tr and tr.running():
2121 if tr and tr.running():
2112 return tr
2122 return tr
2113 return None
2123 return None
2114
2124
2115 def transaction(self, desc, report=None):
2125 def transaction(self, desc, report=None):
2116 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2126 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2117 b'devel', b'check-locks'
2127 b'devel', b'check-locks'
2118 ):
2128 ):
2119 if self._currentlock(self._lockref) is None:
2129 if self._currentlock(self._lockref) is None:
2120 raise error.ProgrammingError(b'transaction requires locking')
2130 raise error.ProgrammingError(b'transaction requires locking')
2121 tr = self.currenttransaction()
2131 tr = self.currenttransaction()
2122 if tr is not None:
2132 if tr is not None:
2123 return tr.nest(name=desc)
2133 return tr.nest(name=desc)
2124
2134
2125 # abort here if the journal already exists
2135 # abort here if the journal already exists
2126 if self.svfs.exists(b"journal"):
2136 if self.svfs.exists(b"journal"):
2127 raise error.RepoError(
2137 raise error.RepoError(
2128 _(b"abandoned transaction found"),
2138 _(b"abandoned transaction found"),
2129 hint=_(b"run 'hg recover' to clean up transaction"),
2139 hint=_(b"run 'hg recover' to clean up transaction"),
2130 )
2140 )
2131
2141
2132 idbase = b"%.40f#%f" % (random.random(), time.time())
2142 idbase = b"%.40f#%f" % (random.random(), time.time())
2133 ha = hex(hashutil.sha1(idbase).digest())
2143 ha = hex(hashutil.sha1(idbase).digest())
2134 txnid = b'TXN:' + ha
2144 txnid = b'TXN:' + ha
2135 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2145 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2136
2146
2137 self._writejournal(desc)
2147 self._writejournal(desc)
2138 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2148 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2139 if report:
2149 if report:
2140 rp = report
2150 rp = report
2141 else:
2151 else:
2142 rp = self.ui.warn
2152 rp = self.ui.warn
2143 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2153 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2144 # we must avoid cyclic reference between repo and transaction.
2154 # we must avoid cyclic reference between repo and transaction.
2145 reporef = weakref.ref(self)
2155 reporef = weakref.ref(self)
2146 # Code to track tag movement
2156 # Code to track tag movement
2147 #
2157 #
2148 # Since tags are all handled as file content, it is actually quite hard
2158 # Since tags are all handled as file content, it is actually quite hard
2149 # to track these movement from a code perspective. So we fallback to a
2159 # to track these movement from a code perspective. So we fallback to a
2150 # tracking at the repository level. One could envision to track changes
2160 # tracking at the repository level. One could envision to track changes
2151 # to the '.hgtags' file through changegroup apply but that fails to
2161 # to the '.hgtags' file through changegroup apply but that fails to
2152 # cope with case where transaction expose new heads without changegroup
2162 # cope with case where transaction expose new heads without changegroup
2153 # being involved (eg: phase movement).
2163 # being involved (eg: phase movement).
2154 #
2164 #
2155 # For now, We gate the feature behind a flag since this likely comes
2165 # For now, We gate the feature behind a flag since this likely comes
2156 # with performance impacts. The current code run more often than needed
2166 # with performance impacts. The current code run more often than needed
2157 # and do not use caches as much as it could. The current focus is on
2167 # and do not use caches as much as it could. The current focus is on
2158 # the behavior of the feature so we disable it by default. The flag
2168 # the behavior of the feature so we disable it by default. The flag
2159 # will be removed when we are happy with the performance impact.
2169 # will be removed when we are happy with the performance impact.
2160 #
2170 #
2161 # Once this feature is no longer experimental move the following
2171 # Once this feature is no longer experimental move the following
2162 # documentation to the appropriate help section:
2172 # documentation to the appropriate help section:
2163 #
2173 #
2164 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2174 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2165 # tags (new or changed or deleted tags). In addition the details of
2175 # tags (new or changed or deleted tags). In addition the details of
2166 # these changes are made available in a file at:
2176 # these changes are made available in a file at:
2167 # ``REPOROOT/.hg/changes/tags.changes``.
2177 # ``REPOROOT/.hg/changes/tags.changes``.
2168 # Make sure you check for HG_TAG_MOVED before reading that file as it
2178 # Make sure you check for HG_TAG_MOVED before reading that file as it
2169 # might exist from a previous transaction even if no tag were touched
2179 # might exist from a previous transaction even if no tag were touched
2170 # in this one. Changes are recorded in a line base format::
2180 # in this one. Changes are recorded in a line base format::
2171 #
2181 #
2172 # <action> <hex-node> <tag-name>\n
2182 # <action> <hex-node> <tag-name>\n
2173 #
2183 #
2174 # Actions are defined as follow:
2184 # Actions are defined as follow:
2175 # "-R": tag is removed,
2185 # "-R": tag is removed,
2176 # "+A": tag is added,
2186 # "+A": tag is added,
2177 # "-M": tag is moved (old value),
2187 # "-M": tag is moved (old value),
2178 # "+M": tag is moved (new value),
2188 # "+M": tag is moved (new value),
2179 tracktags = lambda x: None
2189 tracktags = lambda x: None
2180 # experimental config: experimental.hook-track-tags
2190 # experimental config: experimental.hook-track-tags
2181 shouldtracktags = self.ui.configbool(
2191 shouldtracktags = self.ui.configbool(
2182 b'experimental', b'hook-track-tags'
2192 b'experimental', b'hook-track-tags'
2183 )
2193 )
2184 if desc != b'strip' and shouldtracktags:
2194 if desc != b'strip' and shouldtracktags:
2185 oldheads = self.changelog.headrevs()
2195 oldheads = self.changelog.headrevs()
2186
2196
2187 def tracktags(tr2):
2197 def tracktags(tr2):
2188 repo = reporef()
2198 repo = reporef()
2189 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2199 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2190 newheads = repo.changelog.headrevs()
2200 newheads = repo.changelog.headrevs()
2191 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2201 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2192 # notes: we compare lists here.
2202 # notes: we compare lists here.
2193 # As we do it only once buiding set would not be cheaper
2203 # As we do it only once buiding set would not be cheaper
2194 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2204 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2195 if changes:
2205 if changes:
2196 tr2.hookargs[b'tag_moved'] = b'1'
2206 tr2.hookargs[b'tag_moved'] = b'1'
2197 with repo.vfs(
2207 with repo.vfs(
2198 b'changes/tags.changes', b'w', atomictemp=True
2208 b'changes/tags.changes', b'w', atomictemp=True
2199 ) as changesfile:
2209 ) as changesfile:
2200 # note: we do not register the file to the transaction
2210 # note: we do not register the file to the transaction
2201 # because we needs it to still exist on the transaction
2211 # because we needs it to still exist on the transaction
2202 # is close (for txnclose hooks)
2212 # is close (for txnclose hooks)
2203 tagsmod.writediff(changesfile, changes)
2213 tagsmod.writediff(changesfile, changes)
2204
2214
2205 def validate(tr2):
2215 def validate(tr2):
2206 """will run pre-closing hooks"""
2216 """will run pre-closing hooks"""
2207 # XXX the transaction API is a bit lacking here so we take a hacky
2217 # XXX the transaction API is a bit lacking here so we take a hacky
2208 # path for now
2218 # path for now
2209 #
2219 #
2210 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2220 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2211 # dict is copied before these run. In addition we needs the data
2221 # dict is copied before these run. In addition we needs the data
2212 # available to in memory hooks too.
2222 # available to in memory hooks too.
2213 #
2223 #
2214 # Moreover, we also need to make sure this runs before txnclose
2224 # Moreover, we also need to make sure this runs before txnclose
2215 # hooks and there is no "pending" mechanism that would execute
2225 # hooks and there is no "pending" mechanism that would execute
2216 # logic only if hooks are about to run.
2226 # logic only if hooks are about to run.
2217 #
2227 #
2218 # Fixing this limitation of the transaction is also needed to track
2228 # Fixing this limitation of the transaction is also needed to track
2219 # other families of changes (bookmarks, phases, obsolescence).
2229 # other families of changes (bookmarks, phases, obsolescence).
2220 #
2230 #
2221 # This will have to be fixed before we remove the experimental
2231 # This will have to be fixed before we remove the experimental
2222 # gating.
2232 # gating.
2223 tracktags(tr2)
2233 tracktags(tr2)
2224 repo = reporef()
2234 repo = reporef()
2225
2235
2226 singleheadopt = (b'experimental', b'single-head-per-branch')
2236 singleheadopt = (b'experimental', b'single-head-per-branch')
2227 singlehead = repo.ui.configbool(*singleheadopt)
2237 singlehead = repo.ui.configbool(*singleheadopt)
2228 if singlehead:
2238 if singlehead:
2229 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2239 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2230 accountclosed = singleheadsub.get(
2240 accountclosed = singleheadsub.get(
2231 b"account-closed-heads", False
2241 b"account-closed-heads", False
2232 )
2242 )
2233 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2243 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2234 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2244 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2235 for name, (old, new) in sorted(
2245 for name, (old, new) in sorted(
2236 tr.changes[b'bookmarks'].items()
2246 tr.changes[b'bookmarks'].items()
2237 ):
2247 ):
2238 args = tr.hookargs.copy()
2248 args = tr.hookargs.copy()
2239 args.update(bookmarks.preparehookargs(name, old, new))
2249 args.update(bookmarks.preparehookargs(name, old, new))
2240 repo.hook(
2250 repo.hook(
2241 b'pretxnclose-bookmark',
2251 b'pretxnclose-bookmark',
2242 throw=True,
2252 throw=True,
2243 **pycompat.strkwargs(args)
2253 **pycompat.strkwargs(args)
2244 )
2254 )
2245 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2255 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2246 cl = repo.unfiltered().changelog
2256 cl = repo.unfiltered().changelog
2247 for revs, (old, new) in tr.changes[b'phases']:
2257 for revs, (old, new) in tr.changes[b'phases']:
2248 for rev in revs:
2258 for rev in revs:
2249 args = tr.hookargs.copy()
2259 args = tr.hookargs.copy()
2250 node = hex(cl.node(rev))
2260 node = hex(cl.node(rev))
2251 args.update(phases.preparehookargs(node, old, new))
2261 args.update(phases.preparehookargs(node, old, new))
2252 repo.hook(
2262 repo.hook(
2253 b'pretxnclose-phase',
2263 b'pretxnclose-phase',
2254 throw=True,
2264 throw=True,
2255 **pycompat.strkwargs(args)
2265 **pycompat.strkwargs(args)
2256 )
2266 )
2257
2267
2258 repo.hook(
2268 repo.hook(
2259 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2269 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2260 )
2270 )
2261
2271
2262 def releasefn(tr, success):
2272 def releasefn(tr, success):
2263 repo = reporef()
2273 repo = reporef()
2264 if repo is None:
2274 if repo is None:
2265 # If the repo has been GC'd (and this release function is being
2275 # If the repo has been GC'd (and this release function is being
2266 # called from transaction.__del__), there's not much we can do,
2276 # called from transaction.__del__), there's not much we can do,
2267 # so just leave the unfinished transaction there and let the
2277 # so just leave the unfinished transaction there and let the
2268 # user run `hg recover`.
2278 # user run `hg recover`.
2269 return
2279 return
2270 if success:
2280 if success:
2271 # this should be explicitly invoked here, because
2281 # this should be explicitly invoked here, because
2272 # in-memory changes aren't written out at closing
2282 # in-memory changes aren't written out at closing
2273 # transaction, if tr.addfilegenerator (via
2283 # transaction, if tr.addfilegenerator (via
2274 # dirstate.write or so) isn't invoked while
2284 # dirstate.write or so) isn't invoked while
2275 # transaction running
2285 # transaction running
2276 repo.dirstate.write(None)
2286 repo.dirstate.write(None)
2277 else:
2287 else:
2278 # discard all changes (including ones already written
2288 # discard all changes (including ones already written
2279 # out) in this transaction
2289 # out) in this transaction
2280 narrowspec.restorebackup(self, b'journal.narrowspec')
2290 narrowspec.restorebackup(self, b'journal.narrowspec')
2281 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2291 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2282 repo.dirstate.restorebackup(None, b'journal.dirstate')
2292 repo.dirstate.restorebackup(None, b'journal.dirstate')
2283
2293
2284 repo.invalidate(clearfilecache=True)
2294 repo.invalidate(clearfilecache=True)
2285
2295
2286 tr = transaction.transaction(
2296 tr = transaction.transaction(
2287 rp,
2297 rp,
2288 self.svfs,
2298 self.svfs,
2289 vfsmap,
2299 vfsmap,
2290 b"journal",
2300 b"journal",
2291 b"undo",
2301 b"undo",
2292 aftertrans(renames),
2302 aftertrans(renames),
2293 self.store.createmode,
2303 self.store.createmode,
2294 validator=validate,
2304 validator=validate,
2295 releasefn=releasefn,
2305 releasefn=releasefn,
2296 checkambigfiles=_cachedfiles,
2306 checkambigfiles=_cachedfiles,
2297 name=desc,
2307 name=desc,
2298 )
2308 )
2299 tr.changes[b'origrepolen'] = len(self)
2309 tr.changes[b'origrepolen'] = len(self)
2300 tr.changes[b'obsmarkers'] = set()
2310 tr.changes[b'obsmarkers'] = set()
2301 tr.changes[b'phases'] = []
2311 tr.changes[b'phases'] = []
2302 tr.changes[b'bookmarks'] = {}
2312 tr.changes[b'bookmarks'] = {}
2303
2313
2304 tr.hookargs[b'txnid'] = txnid
2314 tr.hookargs[b'txnid'] = txnid
2305 tr.hookargs[b'txnname'] = desc
2315 tr.hookargs[b'txnname'] = desc
2306 tr.hookargs[b'changes'] = tr.changes
2316 tr.hookargs[b'changes'] = tr.changes
2307 # note: writing the fncache only during finalize mean that the file is
2317 # note: writing the fncache only during finalize mean that the file is
2308 # outdated when running hooks. As fncache is used for streaming clone,
2318 # outdated when running hooks. As fncache is used for streaming clone,
2309 # this is not expected to break anything that happen during the hooks.
2319 # this is not expected to break anything that happen during the hooks.
2310 tr.addfinalize(b'flush-fncache', self.store.write)
2320 tr.addfinalize(b'flush-fncache', self.store.write)
2311
2321
2312 def txnclosehook(tr2):
2322 def txnclosehook(tr2):
2313 """To be run if transaction is successful, will schedule a hook run"""
2323 """To be run if transaction is successful, will schedule a hook run"""
2314 # Don't reference tr2 in hook() so we don't hold a reference.
2324 # Don't reference tr2 in hook() so we don't hold a reference.
2315 # This reduces memory consumption when there are multiple
2325 # This reduces memory consumption when there are multiple
2316 # transactions per lock. This can likely go away if issue5045
2326 # transactions per lock. This can likely go away if issue5045
2317 # fixes the function accumulation.
2327 # fixes the function accumulation.
2318 hookargs = tr2.hookargs
2328 hookargs = tr2.hookargs
2319
2329
2320 def hookfunc(unused_success):
2330 def hookfunc(unused_success):
2321 repo = reporef()
2331 repo = reporef()
2322 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2332 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2323 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2333 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2324 for name, (old, new) in bmchanges:
2334 for name, (old, new) in bmchanges:
2325 args = tr.hookargs.copy()
2335 args = tr.hookargs.copy()
2326 args.update(bookmarks.preparehookargs(name, old, new))
2336 args.update(bookmarks.preparehookargs(name, old, new))
2327 repo.hook(
2337 repo.hook(
2328 b'txnclose-bookmark',
2338 b'txnclose-bookmark',
2329 throw=False,
2339 throw=False,
2330 **pycompat.strkwargs(args)
2340 **pycompat.strkwargs(args)
2331 )
2341 )
2332
2342
2333 if hook.hashook(repo.ui, b'txnclose-phase'):
2343 if hook.hashook(repo.ui, b'txnclose-phase'):
2334 cl = repo.unfiltered().changelog
2344 cl = repo.unfiltered().changelog
2335 phasemv = sorted(
2345 phasemv = sorted(
2336 tr.changes[b'phases'], key=lambda r: r[0][0]
2346 tr.changes[b'phases'], key=lambda r: r[0][0]
2337 )
2347 )
2338 for revs, (old, new) in phasemv:
2348 for revs, (old, new) in phasemv:
2339 for rev in revs:
2349 for rev in revs:
2340 args = tr.hookargs.copy()
2350 args = tr.hookargs.copy()
2341 node = hex(cl.node(rev))
2351 node = hex(cl.node(rev))
2342 args.update(phases.preparehookargs(node, old, new))
2352 args.update(phases.preparehookargs(node, old, new))
2343 repo.hook(
2353 repo.hook(
2344 b'txnclose-phase',
2354 b'txnclose-phase',
2345 throw=False,
2355 throw=False,
2346 **pycompat.strkwargs(args)
2356 **pycompat.strkwargs(args)
2347 )
2357 )
2348
2358
2349 repo.hook(
2359 repo.hook(
2350 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2360 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2351 )
2361 )
2352
2362
2353 reporef()._afterlock(hookfunc)
2363 reporef()._afterlock(hookfunc)
2354
2364
2355 tr.addfinalize(b'txnclose-hook', txnclosehook)
2365 tr.addfinalize(b'txnclose-hook', txnclosehook)
2356 # Include a leading "-" to make it happen before the transaction summary
2366 # Include a leading "-" to make it happen before the transaction summary
2357 # reports registered via scmutil.registersummarycallback() whose names
2367 # reports registered via scmutil.registersummarycallback() whose names
2358 # are 00-txnreport etc. That way, the caches will be warm when the
2368 # are 00-txnreport etc. That way, the caches will be warm when the
2359 # callbacks run.
2369 # callbacks run.
2360 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2370 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2361
2371
2362 def txnaborthook(tr2):
2372 def txnaborthook(tr2):
2363 """To be run if transaction is aborted"""
2373 """To be run if transaction is aborted"""
2364 reporef().hook(
2374 reporef().hook(
2365 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2375 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2366 )
2376 )
2367
2377
2368 tr.addabort(b'txnabort-hook', txnaborthook)
2378 tr.addabort(b'txnabort-hook', txnaborthook)
2369 # avoid eager cache invalidation. in-memory data should be identical
2379 # avoid eager cache invalidation. in-memory data should be identical
2370 # to stored data if transaction has no error.
2380 # to stored data if transaction has no error.
2371 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2381 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2372 self._transref = weakref.ref(tr)
2382 self._transref = weakref.ref(tr)
2373 scmutil.registersummarycallback(self, tr, desc)
2383 scmutil.registersummarycallback(self, tr, desc)
2374 return tr
2384 return tr
2375
2385
2376 def _journalfiles(self):
2386 def _journalfiles(self):
2377 return (
2387 return (
2378 (self.svfs, b'journal'),
2388 (self.svfs, b'journal'),
2379 (self.svfs, b'journal.narrowspec'),
2389 (self.svfs, b'journal.narrowspec'),
2380 (self.vfs, b'journal.narrowspec.dirstate'),
2390 (self.vfs, b'journal.narrowspec.dirstate'),
2381 (self.vfs, b'journal.dirstate'),
2391 (self.vfs, b'journal.dirstate'),
2382 (self.vfs, b'journal.branch'),
2392 (self.vfs, b'journal.branch'),
2383 (self.vfs, b'journal.desc'),
2393 (self.vfs, b'journal.desc'),
2384 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2394 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2385 (self.svfs, b'journal.phaseroots'),
2395 (self.svfs, b'journal.phaseroots'),
2386 )
2396 )
2387
2397
2388 def undofiles(self):
2398 def undofiles(self):
2389 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2399 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2390
2400
2391 @unfilteredmethod
2401 @unfilteredmethod
2392 def _writejournal(self, desc):
2402 def _writejournal(self, desc):
2393 self.dirstate.savebackup(None, b'journal.dirstate')
2403 self.dirstate.savebackup(None, b'journal.dirstate')
2394 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2404 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2395 narrowspec.savebackup(self, b'journal.narrowspec')
2405 narrowspec.savebackup(self, b'journal.narrowspec')
2396 self.vfs.write(
2406 self.vfs.write(
2397 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2407 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2398 )
2408 )
2399 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2409 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2400 bookmarksvfs = bookmarks.bookmarksvfs(self)
2410 bookmarksvfs = bookmarks.bookmarksvfs(self)
2401 bookmarksvfs.write(
2411 bookmarksvfs.write(
2402 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2412 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2403 )
2413 )
2404 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2414 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2405
2415
2406 def recover(self):
2416 def recover(self):
2407 with self.lock():
2417 with self.lock():
2408 if self.svfs.exists(b"journal"):
2418 if self.svfs.exists(b"journal"):
2409 self.ui.status(_(b"rolling back interrupted transaction\n"))
2419 self.ui.status(_(b"rolling back interrupted transaction\n"))
2410 vfsmap = {
2420 vfsmap = {
2411 b'': self.svfs,
2421 b'': self.svfs,
2412 b'plain': self.vfs,
2422 b'plain': self.vfs,
2413 }
2423 }
2414 transaction.rollback(
2424 transaction.rollback(
2415 self.svfs,
2425 self.svfs,
2416 vfsmap,
2426 vfsmap,
2417 b"journal",
2427 b"journal",
2418 self.ui.warn,
2428 self.ui.warn,
2419 checkambigfiles=_cachedfiles,
2429 checkambigfiles=_cachedfiles,
2420 )
2430 )
2421 self.invalidate()
2431 self.invalidate()
2422 return True
2432 return True
2423 else:
2433 else:
2424 self.ui.warn(_(b"no interrupted transaction available\n"))
2434 self.ui.warn(_(b"no interrupted transaction available\n"))
2425 return False
2435 return False
2426
2436
2427 def rollback(self, dryrun=False, force=False):
2437 def rollback(self, dryrun=False, force=False):
2428 wlock = lock = dsguard = None
2438 wlock = lock = dsguard = None
2429 try:
2439 try:
2430 wlock = self.wlock()
2440 wlock = self.wlock()
2431 lock = self.lock()
2441 lock = self.lock()
2432 if self.svfs.exists(b"undo"):
2442 if self.svfs.exists(b"undo"):
2433 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2443 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2434
2444
2435 return self._rollback(dryrun, force, dsguard)
2445 return self._rollback(dryrun, force, dsguard)
2436 else:
2446 else:
2437 self.ui.warn(_(b"no rollback information available\n"))
2447 self.ui.warn(_(b"no rollback information available\n"))
2438 return 1
2448 return 1
2439 finally:
2449 finally:
2440 release(dsguard, lock, wlock)
2450 release(dsguard, lock, wlock)
2441
2451
2442 @unfilteredmethod # Until we get smarter cache management
2452 @unfilteredmethod # Until we get smarter cache management
2443 def _rollback(self, dryrun, force, dsguard):
2453 def _rollback(self, dryrun, force, dsguard):
2444 ui = self.ui
2454 ui = self.ui
2445 try:
2455 try:
2446 args = self.vfs.read(b'undo.desc').splitlines()
2456 args = self.vfs.read(b'undo.desc').splitlines()
2447 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2457 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2448 if len(args) >= 3:
2458 if len(args) >= 3:
2449 detail = args[2]
2459 detail = args[2]
2450 oldtip = oldlen - 1
2460 oldtip = oldlen - 1
2451
2461
2452 if detail and ui.verbose:
2462 if detail and ui.verbose:
2453 msg = _(
2463 msg = _(
2454 b'repository tip rolled back to revision %d'
2464 b'repository tip rolled back to revision %d'
2455 b' (undo %s: %s)\n'
2465 b' (undo %s: %s)\n'
2456 ) % (oldtip, desc, detail)
2466 ) % (oldtip, desc, detail)
2457 else:
2467 else:
2458 msg = _(
2468 msg = _(
2459 b'repository tip rolled back to revision %d (undo %s)\n'
2469 b'repository tip rolled back to revision %d (undo %s)\n'
2460 ) % (oldtip, desc)
2470 ) % (oldtip, desc)
2461 except IOError:
2471 except IOError:
2462 msg = _(b'rolling back unknown transaction\n')
2472 msg = _(b'rolling back unknown transaction\n')
2463 desc = None
2473 desc = None
2464
2474
2465 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2475 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2466 raise error.Abort(
2476 raise error.Abort(
2467 _(
2477 _(
2468 b'rollback of last commit while not checked out '
2478 b'rollback of last commit while not checked out '
2469 b'may lose data'
2479 b'may lose data'
2470 ),
2480 ),
2471 hint=_(b'use -f to force'),
2481 hint=_(b'use -f to force'),
2472 )
2482 )
2473
2483
2474 ui.status(msg)
2484 ui.status(msg)
2475 if dryrun:
2485 if dryrun:
2476 return 0
2486 return 0
2477
2487
2478 parents = self.dirstate.parents()
2488 parents = self.dirstate.parents()
2479 self.destroying()
2489 self.destroying()
2480 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2490 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2481 transaction.rollback(
2491 transaction.rollback(
2482 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2492 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2483 )
2493 )
2484 bookmarksvfs = bookmarks.bookmarksvfs(self)
2494 bookmarksvfs = bookmarks.bookmarksvfs(self)
2485 if bookmarksvfs.exists(b'undo.bookmarks'):
2495 if bookmarksvfs.exists(b'undo.bookmarks'):
2486 bookmarksvfs.rename(
2496 bookmarksvfs.rename(
2487 b'undo.bookmarks', b'bookmarks', checkambig=True
2497 b'undo.bookmarks', b'bookmarks', checkambig=True
2488 )
2498 )
2489 if self.svfs.exists(b'undo.phaseroots'):
2499 if self.svfs.exists(b'undo.phaseroots'):
2490 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2500 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2491 self.invalidate()
2501 self.invalidate()
2492
2502
2493 has_node = self.changelog.index.has_node
2503 has_node = self.changelog.index.has_node
2494 parentgone = any(not has_node(p) for p in parents)
2504 parentgone = any(not has_node(p) for p in parents)
2495 if parentgone:
2505 if parentgone:
2496 # prevent dirstateguard from overwriting already restored one
2506 # prevent dirstateguard from overwriting already restored one
2497 dsguard.close()
2507 dsguard.close()
2498
2508
2499 narrowspec.restorebackup(self, b'undo.narrowspec')
2509 narrowspec.restorebackup(self, b'undo.narrowspec')
2500 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2510 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2501 self.dirstate.restorebackup(None, b'undo.dirstate')
2511 self.dirstate.restorebackup(None, b'undo.dirstate')
2502 try:
2512 try:
2503 branch = self.vfs.read(b'undo.branch')
2513 branch = self.vfs.read(b'undo.branch')
2504 self.dirstate.setbranch(encoding.tolocal(branch))
2514 self.dirstate.setbranch(encoding.tolocal(branch))
2505 except IOError:
2515 except IOError:
2506 ui.warn(
2516 ui.warn(
2507 _(
2517 _(
2508 b'named branch could not be reset: '
2518 b'named branch could not be reset: '
2509 b'current branch is still \'%s\'\n'
2519 b'current branch is still \'%s\'\n'
2510 )
2520 )
2511 % self.dirstate.branch()
2521 % self.dirstate.branch()
2512 )
2522 )
2513
2523
2514 parents = tuple([p.rev() for p in self[None].parents()])
2524 parents = tuple([p.rev() for p in self[None].parents()])
2515 if len(parents) > 1:
2525 if len(parents) > 1:
2516 ui.status(
2526 ui.status(
2517 _(
2527 _(
2518 b'working directory now based on '
2528 b'working directory now based on '
2519 b'revisions %d and %d\n'
2529 b'revisions %d and %d\n'
2520 )
2530 )
2521 % parents
2531 % parents
2522 )
2532 )
2523 else:
2533 else:
2524 ui.status(
2534 ui.status(
2525 _(b'working directory now based on revision %d\n') % parents
2535 _(b'working directory now based on revision %d\n') % parents
2526 )
2536 )
2527 mergestatemod.mergestate.clean(self)
2537 mergestatemod.mergestate.clean(self)
2528
2538
2529 # TODO: if we know which new heads may result from this rollback, pass
2539 # TODO: if we know which new heads may result from this rollback, pass
2530 # them to destroy(), which will prevent the branchhead cache from being
2540 # them to destroy(), which will prevent the branchhead cache from being
2531 # invalidated.
2541 # invalidated.
2532 self.destroyed()
2542 self.destroyed()
2533 return 0
2543 return 0
2534
2544
2535 def _buildcacheupdater(self, newtransaction):
2545 def _buildcacheupdater(self, newtransaction):
2536 """called during transaction to build the callback updating cache
2546 """called during transaction to build the callback updating cache
2537
2547
2538 Lives on the repository to help extension who might want to augment
2548 Lives on the repository to help extension who might want to augment
2539 this logic. For this purpose, the created transaction is passed to the
2549 this logic. For this purpose, the created transaction is passed to the
2540 method.
2550 method.
2541 """
2551 """
2542 # we must avoid cyclic reference between repo and transaction.
2552 # we must avoid cyclic reference between repo and transaction.
2543 reporef = weakref.ref(self)
2553 reporef = weakref.ref(self)
2544
2554
2545 def updater(tr):
2555 def updater(tr):
2546 repo = reporef()
2556 repo = reporef()
2547 repo.updatecaches(tr)
2557 repo.updatecaches(tr)
2548
2558
2549 return updater
2559 return updater
2550
2560
2551 @unfilteredmethod
2561 @unfilteredmethod
2552 def updatecaches(self, tr=None, full=False):
2562 def updatecaches(self, tr=None, full=False):
2553 """warm appropriate caches
2563 """warm appropriate caches
2554
2564
2555 If this function is called after a transaction closed. The transaction
2565 If this function is called after a transaction closed. The transaction
2556 will be available in the 'tr' argument. This can be used to selectively
2566 will be available in the 'tr' argument. This can be used to selectively
2557 update caches relevant to the changes in that transaction.
2567 update caches relevant to the changes in that transaction.
2558
2568
2559 If 'full' is set, make sure all caches the function knows about have
2569 If 'full' is set, make sure all caches the function knows about have
2560 up-to-date data. Even the ones usually loaded more lazily.
2570 up-to-date data. Even the ones usually loaded more lazily.
2561 """
2571 """
2562 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2572 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2563 # During strip, many caches are invalid but
2573 # During strip, many caches are invalid but
2564 # later call to `destroyed` will refresh them.
2574 # later call to `destroyed` will refresh them.
2565 return
2575 return
2566
2576
2567 if tr is None or tr.changes[b'origrepolen'] < len(self):
2577 if tr is None or tr.changes[b'origrepolen'] < len(self):
2568 # accessing the 'ser ved' branchmap should refresh all the others,
2578 # accessing the 'ser ved' branchmap should refresh all the others,
2569 self.ui.debug(b'updating the branch cache\n')
2579 self.ui.debug(b'updating the branch cache\n')
2570 self.filtered(b'served').branchmap()
2580 self.filtered(b'served').branchmap()
2571 self.filtered(b'served.hidden').branchmap()
2581 self.filtered(b'served.hidden').branchmap()
2572
2582
2573 if full:
2583 if full:
2574 unfi = self.unfiltered()
2584 unfi = self.unfiltered()
2575
2585
2576 self.changelog.update_caches(transaction=tr)
2586 self.changelog.update_caches(transaction=tr)
2577 self.manifestlog.update_caches(transaction=tr)
2587 self.manifestlog.update_caches(transaction=tr)
2578
2588
2579 rbc = unfi.revbranchcache()
2589 rbc = unfi.revbranchcache()
2580 for r in unfi.changelog:
2590 for r in unfi.changelog:
2581 rbc.branchinfo(r)
2591 rbc.branchinfo(r)
2582 rbc.write()
2592 rbc.write()
2583
2593
2584 # ensure the working copy parents are in the manifestfulltextcache
2594 # ensure the working copy parents are in the manifestfulltextcache
2585 for ctx in self[b'.'].parents():
2595 for ctx in self[b'.'].parents():
2586 ctx.manifest() # accessing the manifest is enough
2596 ctx.manifest() # accessing the manifest is enough
2587
2597
2588 # accessing fnode cache warms the cache
2598 # accessing fnode cache warms the cache
2589 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2599 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2590 # accessing tags warm the cache
2600 # accessing tags warm the cache
2591 self.tags()
2601 self.tags()
2592 self.filtered(b'served').tags()
2602 self.filtered(b'served').tags()
2593
2603
2594 # The `full` arg is documented as updating even the lazily-loaded
2604 # The `full` arg is documented as updating even the lazily-loaded
2595 # caches immediately, so we're forcing a write to cause these caches
2605 # caches immediately, so we're forcing a write to cause these caches
2596 # to be warmed up even if they haven't explicitly been requested
2606 # to be warmed up even if they haven't explicitly been requested
2597 # yet (if they've never been used by hg, they won't ever have been
2607 # yet (if they've never been used by hg, they won't ever have been
2598 # written, even if they're a subset of another kind of cache that
2608 # written, even if they're a subset of another kind of cache that
2599 # *has* been used).
2609 # *has* been used).
2600 for filt in repoview.filtertable.keys():
2610 for filt in repoview.filtertable.keys():
2601 filtered = self.filtered(filt)
2611 filtered = self.filtered(filt)
2602 filtered.branchmap().write(filtered)
2612 filtered.branchmap().write(filtered)
2603
2613
2604 def invalidatecaches(self):
2614 def invalidatecaches(self):
2605
2615
2606 if '_tagscache' in vars(self):
2616 if '_tagscache' in vars(self):
2607 # can't use delattr on proxy
2617 # can't use delattr on proxy
2608 del self.__dict__['_tagscache']
2618 del self.__dict__['_tagscache']
2609
2619
2610 self._branchcaches.clear()
2620 self._branchcaches.clear()
2611 self.invalidatevolatilesets()
2621 self.invalidatevolatilesets()
2612 self._sparsesignaturecache.clear()
2622 self._sparsesignaturecache.clear()
2613
2623
2614 def invalidatevolatilesets(self):
2624 def invalidatevolatilesets(self):
2615 self.filteredrevcache.clear()
2625 self.filteredrevcache.clear()
2616 obsolete.clearobscaches(self)
2626 obsolete.clearobscaches(self)
2617 self._quick_access_changeid_invalidate()
2627 self._quick_access_changeid_invalidate()
2618
2628
2619 def invalidatedirstate(self):
2629 def invalidatedirstate(self):
2620 """Invalidates the dirstate, causing the next call to dirstate
2630 """Invalidates the dirstate, causing the next call to dirstate
2621 to check if it was modified since the last time it was read,
2631 to check if it was modified since the last time it was read,
2622 rereading it if it has.
2632 rereading it if it has.
2623
2633
2624 This is different to dirstate.invalidate() that it doesn't always
2634 This is different to dirstate.invalidate() that it doesn't always
2625 rereads the dirstate. Use dirstate.invalidate() if you want to
2635 rereads the dirstate. Use dirstate.invalidate() if you want to
2626 explicitly read the dirstate again (i.e. restoring it to a previous
2636 explicitly read the dirstate again (i.e. restoring it to a previous
2627 known good state)."""
2637 known good state)."""
2628 if hasunfilteredcache(self, 'dirstate'):
2638 if hasunfilteredcache(self, 'dirstate'):
2629 for k in self.dirstate._filecache:
2639 for k in self.dirstate._filecache:
2630 try:
2640 try:
2631 delattr(self.dirstate, k)
2641 delattr(self.dirstate, k)
2632 except AttributeError:
2642 except AttributeError:
2633 pass
2643 pass
2634 delattr(self.unfiltered(), 'dirstate')
2644 delattr(self.unfiltered(), 'dirstate')
2635
2645
2636 def invalidate(self, clearfilecache=False):
2646 def invalidate(self, clearfilecache=False):
2637 """Invalidates both store and non-store parts other than dirstate
2647 """Invalidates both store and non-store parts other than dirstate
2638
2648
2639 If a transaction is running, invalidation of store is omitted,
2649 If a transaction is running, invalidation of store is omitted,
2640 because discarding in-memory changes might cause inconsistency
2650 because discarding in-memory changes might cause inconsistency
2641 (e.g. incomplete fncache causes unintentional failure, but
2651 (e.g. incomplete fncache causes unintentional failure, but
2642 redundant one doesn't).
2652 redundant one doesn't).
2643 """
2653 """
2644 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2654 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2645 for k in list(self._filecache.keys()):
2655 for k in list(self._filecache.keys()):
2646 # dirstate is invalidated separately in invalidatedirstate()
2656 # dirstate is invalidated separately in invalidatedirstate()
2647 if k == b'dirstate':
2657 if k == b'dirstate':
2648 continue
2658 continue
2649 if (
2659 if (
2650 k == b'changelog'
2660 k == b'changelog'
2651 and self.currenttransaction()
2661 and self.currenttransaction()
2652 and self.changelog._delayed
2662 and self.changelog._delayed
2653 ):
2663 ):
2654 # The changelog object may store unwritten revisions. We don't
2664 # The changelog object may store unwritten revisions. We don't
2655 # want to lose them.
2665 # want to lose them.
2656 # TODO: Solve the problem instead of working around it.
2666 # TODO: Solve the problem instead of working around it.
2657 continue
2667 continue
2658
2668
2659 if clearfilecache:
2669 if clearfilecache:
2660 del self._filecache[k]
2670 del self._filecache[k]
2661 try:
2671 try:
2662 delattr(unfiltered, k)
2672 delattr(unfiltered, k)
2663 except AttributeError:
2673 except AttributeError:
2664 pass
2674 pass
2665 self.invalidatecaches()
2675 self.invalidatecaches()
2666 if not self.currenttransaction():
2676 if not self.currenttransaction():
2667 # TODO: Changing contents of store outside transaction
2677 # TODO: Changing contents of store outside transaction
2668 # causes inconsistency. We should make in-memory store
2678 # causes inconsistency. We should make in-memory store
2669 # changes detectable, and abort if changed.
2679 # changes detectable, and abort if changed.
2670 self.store.invalidatecaches()
2680 self.store.invalidatecaches()
2671
2681
2672 def invalidateall(self):
2682 def invalidateall(self):
2673 """Fully invalidates both store and non-store parts, causing the
2683 """Fully invalidates both store and non-store parts, causing the
2674 subsequent operation to reread any outside changes."""
2684 subsequent operation to reread any outside changes."""
2675 # extension should hook this to invalidate its caches
2685 # extension should hook this to invalidate its caches
2676 self.invalidate()
2686 self.invalidate()
2677 self.invalidatedirstate()
2687 self.invalidatedirstate()
2678
2688
2679 @unfilteredmethod
2689 @unfilteredmethod
2680 def _refreshfilecachestats(self, tr):
2690 def _refreshfilecachestats(self, tr):
2681 """Reload stats of cached files so that they are flagged as valid"""
2691 """Reload stats of cached files so that they are flagged as valid"""
2682 for k, ce in self._filecache.items():
2692 for k, ce in self._filecache.items():
2683 k = pycompat.sysstr(k)
2693 k = pycompat.sysstr(k)
2684 if k == 'dirstate' or k not in self.__dict__:
2694 if k == 'dirstate' or k not in self.__dict__:
2685 continue
2695 continue
2686 ce.refresh()
2696 ce.refresh()
2687
2697
2688 def _lock(
2698 def _lock(
2689 self,
2699 self,
2690 vfs,
2700 vfs,
2691 lockname,
2701 lockname,
2692 wait,
2702 wait,
2693 releasefn,
2703 releasefn,
2694 acquirefn,
2704 acquirefn,
2695 desc,
2705 desc,
2696 ):
2706 ):
2697 timeout = 0
2707 timeout = 0
2698 warntimeout = 0
2708 warntimeout = 0
2699 if wait:
2709 if wait:
2700 timeout = self.ui.configint(b"ui", b"timeout")
2710 timeout = self.ui.configint(b"ui", b"timeout")
2701 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2711 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2702 # internal config: ui.signal-safe-lock
2712 # internal config: ui.signal-safe-lock
2703 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2713 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2704
2714
2705 l = lockmod.trylock(
2715 l = lockmod.trylock(
2706 self.ui,
2716 self.ui,
2707 vfs,
2717 vfs,
2708 lockname,
2718 lockname,
2709 timeout,
2719 timeout,
2710 warntimeout,
2720 warntimeout,
2711 releasefn=releasefn,
2721 releasefn=releasefn,
2712 acquirefn=acquirefn,
2722 acquirefn=acquirefn,
2713 desc=desc,
2723 desc=desc,
2714 signalsafe=signalsafe,
2724 signalsafe=signalsafe,
2715 )
2725 )
2716 return l
2726 return l
2717
2727
2718 def _afterlock(self, callback):
2728 def _afterlock(self, callback):
2719 """add a callback to be run when the repository is fully unlocked
2729 """add a callback to be run when the repository is fully unlocked
2720
2730
2721 The callback will be executed when the outermost lock is released
2731 The callback will be executed when the outermost lock is released
2722 (with wlock being higher level than 'lock')."""
2732 (with wlock being higher level than 'lock')."""
2723 for ref in (self._wlockref, self._lockref):
2733 for ref in (self._wlockref, self._lockref):
2724 l = ref and ref()
2734 l = ref and ref()
2725 if l and l.held:
2735 if l and l.held:
2726 l.postrelease.append(callback)
2736 l.postrelease.append(callback)
2727 break
2737 break
2728 else: # no lock have been found.
2738 else: # no lock have been found.
2729 callback(True)
2739 callback(True)
2730
2740
2731 def lock(self, wait=True):
2741 def lock(self, wait=True):
2732 """Lock the repository store (.hg/store) and return a weak reference
2742 """Lock the repository store (.hg/store) and return a weak reference
2733 to the lock. Use this before modifying the store (e.g. committing or
2743 to the lock. Use this before modifying the store (e.g. committing or
2734 stripping). If you are opening a transaction, get a lock as well.)
2744 stripping). If you are opening a transaction, get a lock as well.)
2735
2745
2736 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2746 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2737 'wlock' first to avoid a dead-lock hazard."""
2747 'wlock' first to avoid a dead-lock hazard."""
2738 l = self._currentlock(self._lockref)
2748 l = self._currentlock(self._lockref)
2739 if l is not None:
2749 if l is not None:
2740 l.lock()
2750 l.lock()
2741 return l
2751 return l
2742
2752
2743 l = self._lock(
2753 l = self._lock(
2744 vfs=self.svfs,
2754 vfs=self.svfs,
2745 lockname=b"lock",
2755 lockname=b"lock",
2746 wait=wait,
2756 wait=wait,
2747 releasefn=None,
2757 releasefn=None,
2748 acquirefn=self.invalidate,
2758 acquirefn=self.invalidate,
2749 desc=_(b'repository %s') % self.origroot,
2759 desc=_(b'repository %s') % self.origroot,
2750 )
2760 )
2751 self._lockref = weakref.ref(l)
2761 self._lockref = weakref.ref(l)
2752 return l
2762 return l
2753
2763
2754 def wlock(self, wait=True):
2764 def wlock(self, wait=True):
2755 """Lock the non-store parts of the repository (everything under
2765 """Lock the non-store parts of the repository (everything under
2756 .hg except .hg/store) and return a weak reference to the lock.
2766 .hg except .hg/store) and return a weak reference to the lock.
2757
2767
2758 Use this before modifying files in .hg.
2768 Use this before modifying files in .hg.
2759
2769
2760 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2770 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2761 'wlock' first to avoid a dead-lock hazard."""
2771 'wlock' first to avoid a dead-lock hazard."""
2762 l = self._wlockref and self._wlockref()
2772 l = self._wlockref and self._wlockref()
2763 if l is not None and l.held:
2773 if l is not None and l.held:
2764 l.lock()
2774 l.lock()
2765 return l
2775 return l
2766
2776
2767 # We do not need to check for non-waiting lock acquisition. Such
2777 # We do not need to check for non-waiting lock acquisition. Such
2768 # acquisition would not cause dead-lock as they would just fail.
2778 # acquisition would not cause dead-lock as they would just fail.
2769 if wait and (
2779 if wait and (
2770 self.ui.configbool(b'devel', b'all-warnings')
2780 self.ui.configbool(b'devel', b'all-warnings')
2771 or self.ui.configbool(b'devel', b'check-locks')
2781 or self.ui.configbool(b'devel', b'check-locks')
2772 ):
2782 ):
2773 if self._currentlock(self._lockref) is not None:
2783 if self._currentlock(self._lockref) is not None:
2774 self.ui.develwarn(b'"wlock" acquired after "lock"')
2784 self.ui.develwarn(b'"wlock" acquired after "lock"')
2775
2785
2776 def unlock():
2786 def unlock():
2777 if self.dirstate.pendingparentchange():
2787 if self.dirstate.pendingparentchange():
2778 self.dirstate.invalidate()
2788 self.dirstate.invalidate()
2779 else:
2789 else:
2780 self.dirstate.write(None)
2790 self.dirstate.write(None)
2781
2791
2782 self._filecache[b'dirstate'].refresh()
2792 self._filecache[b'dirstate'].refresh()
2783
2793
2784 l = self._lock(
2794 l = self._lock(
2785 self.vfs,
2795 self.vfs,
2786 b"wlock",
2796 b"wlock",
2787 wait,
2797 wait,
2788 unlock,
2798 unlock,
2789 self.invalidatedirstate,
2799 self.invalidatedirstate,
2790 _(b'working directory of %s') % self.origroot,
2800 _(b'working directory of %s') % self.origroot,
2791 )
2801 )
2792 self._wlockref = weakref.ref(l)
2802 self._wlockref = weakref.ref(l)
2793 return l
2803 return l
2794
2804
2795 def _currentlock(self, lockref):
2805 def _currentlock(self, lockref):
2796 """Returns the lock if it's held, or None if it's not."""
2806 """Returns the lock if it's held, or None if it's not."""
2797 if lockref is None:
2807 if lockref is None:
2798 return None
2808 return None
2799 l = lockref()
2809 l = lockref()
2800 if l is None or not l.held:
2810 if l is None or not l.held:
2801 return None
2811 return None
2802 return l
2812 return l
2803
2813
2804 def currentwlock(self):
2814 def currentwlock(self):
2805 """Returns the wlock if it's held, or None if it's not."""
2815 """Returns the wlock if it's held, or None if it's not."""
2806 return self._currentlock(self._wlockref)
2816 return self._currentlock(self._wlockref)
2807
2817
2808 def checkcommitpatterns(self, wctx, match, status, fail):
2818 def checkcommitpatterns(self, wctx, match, status, fail):
2809 """check for commit arguments that aren't committable"""
2819 """check for commit arguments that aren't committable"""
2810 if match.isexact() or match.prefix():
2820 if match.isexact() or match.prefix():
2811 matched = set(status.modified + status.added + status.removed)
2821 matched = set(status.modified + status.added + status.removed)
2812
2822
2813 for f in match.files():
2823 for f in match.files():
2814 f = self.dirstate.normalize(f)
2824 f = self.dirstate.normalize(f)
2815 if f == b'.' or f in matched or f in wctx.substate:
2825 if f == b'.' or f in matched or f in wctx.substate:
2816 continue
2826 continue
2817 if f in status.deleted:
2827 if f in status.deleted:
2818 fail(f, _(b'file not found!'))
2828 fail(f, _(b'file not found!'))
2819 # Is it a directory that exists or used to exist?
2829 # Is it a directory that exists or used to exist?
2820 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2830 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2821 d = f + b'/'
2831 d = f + b'/'
2822 for mf in matched:
2832 for mf in matched:
2823 if mf.startswith(d):
2833 if mf.startswith(d):
2824 break
2834 break
2825 else:
2835 else:
2826 fail(f, _(b"no match under directory!"))
2836 fail(f, _(b"no match under directory!"))
2827 elif f not in self.dirstate:
2837 elif f not in self.dirstate:
2828 fail(f, _(b"file not tracked!"))
2838 fail(f, _(b"file not tracked!"))
2829
2839
2830 @unfilteredmethod
2840 @unfilteredmethod
2831 def commit(
2841 def commit(
2832 self,
2842 self,
2833 text=b"",
2843 text=b"",
2834 user=None,
2844 user=None,
2835 date=None,
2845 date=None,
2836 match=None,
2846 match=None,
2837 force=False,
2847 force=False,
2838 editor=None,
2848 editor=None,
2839 extra=None,
2849 extra=None,
2840 ):
2850 ):
2841 """Add a new revision to current repository.
2851 """Add a new revision to current repository.
2842
2852
2843 Revision information is gathered from the working directory,
2853 Revision information is gathered from the working directory,
2844 match can be used to filter the committed files. If editor is
2854 match can be used to filter the committed files. If editor is
2845 supplied, it is called to get a commit message.
2855 supplied, it is called to get a commit message.
2846 """
2856 """
2847 if extra is None:
2857 if extra is None:
2848 extra = {}
2858 extra = {}
2849
2859
2850 def fail(f, msg):
2860 def fail(f, msg):
2851 raise error.InputError(b'%s: %s' % (f, msg))
2861 raise error.InputError(b'%s: %s' % (f, msg))
2852
2862
2853 if not match:
2863 if not match:
2854 match = matchmod.always()
2864 match = matchmod.always()
2855
2865
2856 if not force:
2866 if not force:
2857 match.bad = fail
2867 match.bad = fail
2858
2868
2859 # lock() for recent changelog (see issue4368)
2869 # lock() for recent changelog (see issue4368)
2860 with self.wlock(), self.lock():
2870 with self.wlock(), self.lock():
2861 wctx = self[None]
2871 wctx = self[None]
2862 merge = len(wctx.parents()) > 1
2872 merge = len(wctx.parents()) > 1
2863
2873
2864 if not force and merge and not match.always():
2874 if not force and merge and not match.always():
2865 raise error.Abort(
2875 raise error.Abort(
2866 _(
2876 _(
2867 b'cannot partially commit a merge '
2877 b'cannot partially commit a merge '
2868 b'(do not specify files or patterns)'
2878 b'(do not specify files or patterns)'
2869 )
2879 )
2870 )
2880 )
2871
2881
2872 status = self.status(match=match, clean=force)
2882 status = self.status(match=match, clean=force)
2873 if force:
2883 if force:
2874 status.modified.extend(
2884 status.modified.extend(
2875 status.clean
2885 status.clean
2876 ) # mq may commit clean files
2886 ) # mq may commit clean files
2877
2887
2878 # check subrepos
2888 # check subrepos
2879 subs, commitsubs, newstate = subrepoutil.precommit(
2889 subs, commitsubs, newstate = subrepoutil.precommit(
2880 self.ui, wctx, status, match, force=force
2890 self.ui, wctx, status, match, force=force
2881 )
2891 )
2882
2892
2883 # make sure all explicit patterns are matched
2893 # make sure all explicit patterns are matched
2884 if not force:
2894 if not force:
2885 self.checkcommitpatterns(wctx, match, status, fail)
2895 self.checkcommitpatterns(wctx, match, status, fail)
2886
2896
2887 cctx = context.workingcommitctx(
2897 cctx = context.workingcommitctx(
2888 self, status, text, user, date, extra
2898 self, status, text, user, date, extra
2889 )
2899 )
2890
2900
2891 ms = mergestatemod.mergestate.read(self)
2901 ms = mergestatemod.mergestate.read(self)
2892 mergeutil.checkunresolved(ms)
2902 mergeutil.checkunresolved(ms)
2893
2903
2894 # internal config: ui.allowemptycommit
2904 # internal config: ui.allowemptycommit
2895 if cctx.isempty() and not self.ui.configbool(
2905 if cctx.isempty() and not self.ui.configbool(
2896 b'ui', b'allowemptycommit'
2906 b'ui', b'allowemptycommit'
2897 ):
2907 ):
2898 self.ui.debug(b'nothing to commit, clearing merge state\n')
2908 self.ui.debug(b'nothing to commit, clearing merge state\n')
2899 ms.reset()
2909 ms.reset()
2900 return None
2910 return None
2901
2911
2902 if merge and cctx.deleted():
2912 if merge and cctx.deleted():
2903 raise error.Abort(_(b"cannot commit merge with missing files"))
2913 raise error.Abort(_(b"cannot commit merge with missing files"))
2904
2914
2905 if editor:
2915 if editor:
2906 cctx._text = editor(self, cctx, subs)
2916 cctx._text = editor(self, cctx, subs)
2907 edited = text != cctx._text
2917 edited = text != cctx._text
2908
2918
2909 # Save commit message in case this transaction gets rolled back
2919 # Save commit message in case this transaction gets rolled back
2910 # (e.g. by a pretxncommit hook). Leave the content alone on
2920 # (e.g. by a pretxncommit hook). Leave the content alone on
2911 # the assumption that the user will use the same editor again.
2921 # the assumption that the user will use the same editor again.
2912 msgfn = self.savecommitmessage(cctx._text)
2922 msgfn = self.savecommitmessage(cctx._text)
2913
2923
2914 # commit subs and write new state
2924 # commit subs and write new state
2915 if subs:
2925 if subs:
2916 uipathfn = scmutil.getuipathfn(self)
2926 uipathfn = scmutil.getuipathfn(self)
2917 for s in sorted(commitsubs):
2927 for s in sorted(commitsubs):
2918 sub = wctx.sub(s)
2928 sub = wctx.sub(s)
2919 self.ui.status(
2929 self.ui.status(
2920 _(b'committing subrepository %s\n')
2930 _(b'committing subrepository %s\n')
2921 % uipathfn(subrepoutil.subrelpath(sub))
2931 % uipathfn(subrepoutil.subrelpath(sub))
2922 )
2932 )
2923 sr = sub.commit(cctx._text, user, date)
2933 sr = sub.commit(cctx._text, user, date)
2924 newstate[s] = (newstate[s][0], sr)
2934 newstate[s] = (newstate[s][0], sr)
2925 subrepoutil.writestate(self, newstate)
2935 subrepoutil.writestate(self, newstate)
2926
2936
2927 p1, p2 = self.dirstate.parents()
2937 p1, p2 = self.dirstate.parents()
2928 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2938 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2929 try:
2939 try:
2930 self.hook(
2940 self.hook(
2931 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2941 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2932 )
2942 )
2933 with self.transaction(b'commit'):
2943 with self.transaction(b'commit'):
2934 ret = self.commitctx(cctx, True)
2944 ret = self.commitctx(cctx, True)
2935 # update bookmarks, dirstate and mergestate
2945 # update bookmarks, dirstate and mergestate
2936 bookmarks.update(self, [p1, p2], ret)
2946 bookmarks.update(self, [p1, p2], ret)
2937 cctx.markcommitted(ret)
2947 cctx.markcommitted(ret)
2938 ms.reset()
2948 ms.reset()
2939 except: # re-raises
2949 except: # re-raises
2940 if edited:
2950 if edited:
2941 self.ui.write(
2951 self.ui.write(
2942 _(b'note: commit message saved in %s\n') % msgfn
2952 _(b'note: commit message saved in %s\n') % msgfn
2943 )
2953 )
2944 self.ui.write(
2954 self.ui.write(
2945 _(
2955 _(
2946 b"note: use 'hg commit --logfile "
2956 b"note: use 'hg commit --logfile "
2947 b".hg/last-message.txt --edit' to reuse it\n"
2957 b".hg/last-message.txt --edit' to reuse it\n"
2948 )
2958 )
2949 )
2959 )
2950 raise
2960 raise
2951
2961
2952 def commithook(unused_success):
2962 def commithook(unused_success):
2953 # hack for command that use a temporary commit (eg: histedit)
2963 # hack for command that use a temporary commit (eg: histedit)
2954 # temporary commit got stripped before hook release
2964 # temporary commit got stripped before hook release
2955 if self.changelog.hasnode(ret):
2965 if self.changelog.hasnode(ret):
2956 self.hook(
2966 self.hook(
2957 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2967 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2958 )
2968 )
2959
2969
2960 self._afterlock(commithook)
2970 self._afterlock(commithook)
2961 return ret
2971 return ret
2962
2972
2963 @unfilteredmethod
2973 @unfilteredmethod
2964 def commitctx(self, ctx, error=False, origctx=None):
2974 def commitctx(self, ctx, error=False, origctx=None):
2965 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2975 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2966
2976
2967 @unfilteredmethod
2977 @unfilteredmethod
2968 def destroying(self):
2978 def destroying(self):
2969 """Inform the repository that nodes are about to be destroyed.
2979 """Inform the repository that nodes are about to be destroyed.
2970 Intended for use by strip and rollback, so there's a common
2980 Intended for use by strip and rollback, so there's a common
2971 place for anything that has to be done before destroying history.
2981 place for anything that has to be done before destroying history.
2972
2982
2973 This is mostly useful for saving state that is in memory and waiting
2983 This is mostly useful for saving state that is in memory and waiting
2974 to be flushed when the current lock is released. Because a call to
2984 to be flushed when the current lock is released. Because a call to
2975 destroyed is imminent, the repo will be invalidated causing those
2985 destroyed is imminent, the repo will be invalidated causing those
2976 changes to stay in memory (waiting for the next unlock), or vanish
2986 changes to stay in memory (waiting for the next unlock), or vanish
2977 completely.
2987 completely.
2978 """
2988 """
2979 # When using the same lock to commit and strip, the phasecache is left
2989 # When using the same lock to commit and strip, the phasecache is left
2980 # dirty after committing. Then when we strip, the repo is invalidated,
2990 # dirty after committing. Then when we strip, the repo is invalidated,
2981 # causing those changes to disappear.
2991 # causing those changes to disappear.
2982 if '_phasecache' in vars(self):
2992 if '_phasecache' in vars(self):
2983 self._phasecache.write()
2993 self._phasecache.write()
2984
2994
2985 @unfilteredmethod
2995 @unfilteredmethod
2986 def destroyed(self):
2996 def destroyed(self):
2987 """Inform the repository that nodes have been destroyed.
2997 """Inform the repository that nodes have been destroyed.
2988 Intended for use by strip and rollback, so there's a common
2998 Intended for use by strip and rollback, so there's a common
2989 place for anything that has to be done after destroying history.
2999 place for anything that has to be done after destroying history.
2990 """
3000 """
2991 # When one tries to:
3001 # When one tries to:
2992 # 1) destroy nodes thus calling this method (e.g. strip)
3002 # 1) destroy nodes thus calling this method (e.g. strip)
2993 # 2) use phasecache somewhere (e.g. commit)
3003 # 2) use phasecache somewhere (e.g. commit)
2994 #
3004 #
2995 # then 2) will fail because the phasecache contains nodes that were
3005 # then 2) will fail because the phasecache contains nodes that were
2996 # removed. We can either remove phasecache from the filecache,
3006 # removed. We can either remove phasecache from the filecache,
2997 # causing it to reload next time it is accessed, or simply filter
3007 # causing it to reload next time it is accessed, or simply filter
2998 # the removed nodes now and write the updated cache.
3008 # the removed nodes now and write the updated cache.
2999 self._phasecache.filterunknown(self)
3009 self._phasecache.filterunknown(self)
3000 self._phasecache.write()
3010 self._phasecache.write()
3001
3011
3002 # refresh all repository caches
3012 # refresh all repository caches
3003 self.updatecaches()
3013 self.updatecaches()
3004
3014
3005 # Ensure the persistent tag cache is updated. Doing it now
3015 # Ensure the persistent tag cache is updated. Doing it now
3006 # means that the tag cache only has to worry about destroyed
3016 # means that the tag cache only has to worry about destroyed
3007 # heads immediately after a strip/rollback. That in turn
3017 # heads immediately after a strip/rollback. That in turn
3008 # guarantees that "cachetip == currenttip" (comparing both rev
3018 # guarantees that "cachetip == currenttip" (comparing both rev
3009 # and node) always means no nodes have been added or destroyed.
3019 # and node) always means no nodes have been added or destroyed.
3010
3020
3011 # XXX this is suboptimal when qrefresh'ing: we strip the current
3021 # XXX this is suboptimal when qrefresh'ing: we strip the current
3012 # head, refresh the tag cache, then immediately add a new head.
3022 # head, refresh the tag cache, then immediately add a new head.
3013 # But I think doing it this way is necessary for the "instant
3023 # But I think doing it this way is necessary for the "instant
3014 # tag cache retrieval" case to work.
3024 # tag cache retrieval" case to work.
3015 self.invalidate()
3025 self.invalidate()
3016
3026
3017 def status(
3027 def status(
3018 self,
3028 self,
3019 node1=b'.',
3029 node1=b'.',
3020 node2=None,
3030 node2=None,
3021 match=None,
3031 match=None,
3022 ignored=False,
3032 ignored=False,
3023 clean=False,
3033 clean=False,
3024 unknown=False,
3034 unknown=False,
3025 listsubrepos=False,
3035 listsubrepos=False,
3026 ):
3036 ):
3027 '''a convenience method that calls node1.status(node2)'''
3037 '''a convenience method that calls node1.status(node2)'''
3028 return self[node1].status(
3038 return self[node1].status(
3029 node2, match, ignored, clean, unknown, listsubrepos
3039 node2, match, ignored, clean, unknown, listsubrepos
3030 )
3040 )
3031
3041
3032 def addpostdsstatus(self, ps):
3042 def addpostdsstatus(self, ps):
3033 """Add a callback to run within the wlock, at the point at which status
3043 """Add a callback to run within the wlock, at the point at which status
3034 fixups happen.
3044 fixups happen.
3035
3045
3036 On status completion, callback(wctx, status) will be called with the
3046 On status completion, callback(wctx, status) will be called with the
3037 wlock held, unless the dirstate has changed from underneath or the wlock
3047 wlock held, unless the dirstate has changed from underneath or the wlock
3038 couldn't be grabbed.
3048 couldn't be grabbed.
3039
3049
3040 Callbacks should not capture and use a cached copy of the dirstate --
3050 Callbacks should not capture and use a cached copy of the dirstate --
3041 it might change in the meanwhile. Instead, they should access the
3051 it might change in the meanwhile. Instead, they should access the
3042 dirstate via wctx.repo().dirstate.
3052 dirstate via wctx.repo().dirstate.
3043
3053
3044 This list is emptied out after each status run -- extensions should
3054 This list is emptied out after each status run -- extensions should
3045 make sure it adds to this list each time dirstate.status is called.
3055 make sure it adds to this list each time dirstate.status is called.
3046 Extensions should also make sure they don't call this for statuses
3056 Extensions should also make sure they don't call this for statuses
3047 that don't involve the dirstate.
3057 that don't involve the dirstate.
3048 """
3058 """
3049
3059
3050 # The list is located here for uniqueness reasons -- it is actually
3060 # The list is located here for uniqueness reasons -- it is actually
3051 # managed by the workingctx, but that isn't unique per-repo.
3061 # managed by the workingctx, but that isn't unique per-repo.
3052 self._postdsstatus.append(ps)
3062 self._postdsstatus.append(ps)
3053
3063
3054 def postdsstatus(self):
3064 def postdsstatus(self):
3055 """Used by workingctx to get the list of post-dirstate-status hooks."""
3065 """Used by workingctx to get the list of post-dirstate-status hooks."""
3056 return self._postdsstatus
3066 return self._postdsstatus
3057
3067
3058 def clearpostdsstatus(self):
3068 def clearpostdsstatus(self):
3059 """Used by workingctx to clear post-dirstate-status hooks."""
3069 """Used by workingctx to clear post-dirstate-status hooks."""
3060 del self._postdsstatus[:]
3070 del self._postdsstatus[:]
3061
3071
3062 def heads(self, start=None):
3072 def heads(self, start=None):
3063 if start is None:
3073 if start is None:
3064 cl = self.changelog
3074 cl = self.changelog
3065 headrevs = reversed(cl.headrevs())
3075 headrevs = reversed(cl.headrevs())
3066 return [cl.node(rev) for rev in headrevs]
3076 return [cl.node(rev) for rev in headrevs]
3067
3077
3068 heads = self.changelog.heads(start)
3078 heads = self.changelog.heads(start)
3069 # sort the output in rev descending order
3079 # sort the output in rev descending order
3070 return sorted(heads, key=self.changelog.rev, reverse=True)
3080 return sorted(heads, key=self.changelog.rev, reverse=True)
3071
3081
3072 def branchheads(self, branch=None, start=None, closed=False):
3082 def branchheads(self, branch=None, start=None, closed=False):
3073 """return a (possibly filtered) list of heads for the given branch
3083 """return a (possibly filtered) list of heads for the given branch
3074
3084
3075 Heads are returned in topological order, from newest to oldest.
3085 Heads are returned in topological order, from newest to oldest.
3076 If branch is None, use the dirstate branch.
3086 If branch is None, use the dirstate branch.
3077 If start is not None, return only heads reachable from start.
3087 If start is not None, return only heads reachable from start.
3078 If closed is True, return heads that are marked as closed as well.
3088 If closed is True, return heads that are marked as closed as well.
3079 """
3089 """
3080 if branch is None:
3090 if branch is None:
3081 branch = self[None].branch()
3091 branch = self[None].branch()
3082 branches = self.branchmap()
3092 branches = self.branchmap()
3083 if not branches.hasbranch(branch):
3093 if not branches.hasbranch(branch):
3084 return []
3094 return []
3085 # the cache returns heads ordered lowest to highest
3095 # the cache returns heads ordered lowest to highest
3086 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3096 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3087 if start is not None:
3097 if start is not None:
3088 # filter out the heads that cannot be reached from startrev
3098 # filter out the heads that cannot be reached from startrev
3089 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3099 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3090 bheads = [h for h in bheads if h in fbheads]
3100 bheads = [h for h in bheads if h in fbheads]
3091 return bheads
3101 return bheads
3092
3102
3093 def branches(self, nodes):
3103 def branches(self, nodes):
3094 if not nodes:
3104 if not nodes:
3095 nodes = [self.changelog.tip()]
3105 nodes = [self.changelog.tip()]
3096 b = []
3106 b = []
3097 for n in nodes:
3107 for n in nodes:
3098 t = n
3108 t = n
3099 while True:
3109 while True:
3100 p = self.changelog.parents(n)
3110 p = self.changelog.parents(n)
3101 if p[1] != nullid or p[0] == nullid:
3111 if p[1] != nullid or p[0] == nullid:
3102 b.append((t, n, p[0], p[1]))
3112 b.append((t, n, p[0], p[1]))
3103 break
3113 break
3104 n = p[0]
3114 n = p[0]
3105 return b
3115 return b
3106
3116
3107 def between(self, pairs):
3117 def between(self, pairs):
3108 r = []
3118 r = []
3109
3119
3110 for top, bottom in pairs:
3120 for top, bottom in pairs:
3111 n, l, i = top, [], 0
3121 n, l, i = top, [], 0
3112 f = 1
3122 f = 1
3113
3123
3114 while n != bottom and n != nullid:
3124 while n != bottom and n != nullid:
3115 p = self.changelog.parents(n)[0]
3125 p = self.changelog.parents(n)[0]
3116 if i == f:
3126 if i == f:
3117 l.append(n)
3127 l.append(n)
3118 f = f * 2
3128 f = f * 2
3119 n = p
3129 n = p
3120 i += 1
3130 i += 1
3121
3131
3122 r.append(l)
3132 r.append(l)
3123
3133
3124 return r
3134 return r
3125
3135
3126 def checkpush(self, pushop):
3136 def checkpush(self, pushop):
3127 """Extensions can override this function if additional checks have
3137 """Extensions can override this function if additional checks have
3128 to be performed before pushing, or call it if they override push
3138 to be performed before pushing, or call it if they override push
3129 command.
3139 command.
3130 """
3140 """
3131
3141
3132 @unfilteredpropertycache
3142 @unfilteredpropertycache
3133 def prepushoutgoinghooks(self):
3143 def prepushoutgoinghooks(self):
3134 """Return util.hooks consists of a pushop with repo, remote, outgoing
3144 """Return util.hooks consists of a pushop with repo, remote, outgoing
3135 methods, which are called before pushing changesets.
3145 methods, which are called before pushing changesets.
3136 """
3146 """
3137 return util.hooks()
3147 return util.hooks()
3138
3148
3139 def pushkey(self, namespace, key, old, new):
3149 def pushkey(self, namespace, key, old, new):
3140 try:
3150 try:
3141 tr = self.currenttransaction()
3151 tr = self.currenttransaction()
3142 hookargs = {}
3152 hookargs = {}
3143 if tr is not None:
3153 if tr is not None:
3144 hookargs.update(tr.hookargs)
3154 hookargs.update(tr.hookargs)
3145 hookargs = pycompat.strkwargs(hookargs)
3155 hookargs = pycompat.strkwargs(hookargs)
3146 hookargs['namespace'] = namespace
3156 hookargs['namespace'] = namespace
3147 hookargs['key'] = key
3157 hookargs['key'] = key
3148 hookargs['old'] = old
3158 hookargs['old'] = old
3149 hookargs['new'] = new
3159 hookargs['new'] = new
3150 self.hook(b'prepushkey', throw=True, **hookargs)
3160 self.hook(b'prepushkey', throw=True, **hookargs)
3151 except error.HookAbort as exc:
3161 except error.HookAbort as exc:
3152 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3162 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3153 if exc.hint:
3163 if exc.hint:
3154 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3164 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3155 return False
3165 return False
3156 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3166 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3157 ret = pushkey.push(self, namespace, key, old, new)
3167 ret = pushkey.push(self, namespace, key, old, new)
3158
3168
3159 def runhook(unused_success):
3169 def runhook(unused_success):
3160 self.hook(
3170 self.hook(
3161 b'pushkey',
3171 b'pushkey',
3162 namespace=namespace,
3172 namespace=namespace,
3163 key=key,
3173 key=key,
3164 old=old,
3174 old=old,
3165 new=new,
3175 new=new,
3166 ret=ret,
3176 ret=ret,
3167 )
3177 )
3168
3178
3169 self._afterlock(runhook)
3179 self._afterlock(runhook)
3170 return ret
3180 return ret
3171
3181
3172 def listkeys(self, namespace):
3182 def listkeys(self, namespace):
3173 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3183 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3174 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3184 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3175 values = pushkey.list(self, namespace)
3185 values = pushkey.list(self, namespace)
3176 self.hook(b'listkeys', namespace=namespace, values=values)
3186 self.hook(b'listkeys', namespace=namespace, values=values)
3177 return values
3187 return values
3178
3188
3179 def debugwireargs(self, one, two, three=None, four=None, five=None):
3189 def debugwireargs(self, one, two, three=None, four=None, five=None):
3180 '''used to test argument passing over the wire'''
3190 '''used to test argument passing over the wire'''
3181 return b"%s %s %s %s %s" % (
3191 return b"%s %s %s %s %s" % (
3182 one,
3192 one,
3183 two,
3193 two,
3184 pycompat.bytestr(three),
3194 pycompat.bytestr(three),
3185 pycompat.bytestr(four),
3195 pycompat.bytestr(four),
3186 pycompat.bytestr(five),
3196 pycompat.bytestr(five),
3187 )
3197 )
3188
3198
3189 def savecommitmessage(self, text):
3199 def savecommitmessage(self, text):
3190 fp = self.vfs(b'last-message.txt', b'wb')
3200 fp = self.vfs(b'last-message.txt', b'wb')
3191 try:
3201 try:
3192 fp.write(text)
3202 fp.write(text)
3193 finally:
3203 finally:
3194 fp.close()
3204 fp.close()
3195 return self.pathto(fp.name[len(self.root) + 1 :])
3205 return self.pathto(fp.name[len(self.root) + 1 :])
3196
3206
3197
3207
3198 # used to avoid circular references so destructors work
3208 # used to avoid circular references so destructors work
3199 def aftertrans(files):
3209 def aftertrans(files):
3200 renamefiles = [tuple(t) for t in files]
3210 renamefiles = [tuple(t) for t in files]
3201
3211
3202 def a():
3212 def a():
3203 for vfs, src, dest in renamefiles:
3213 for vfs, src, dest in renamefiles:
3204 # if src and dest refer to a same file, vfs.rename is a no-op,
3214 # if src and dest refer to a same file, vfs.rename is a no-op,
3205 # leaving both src and dest on disk. delete dest to make sure
3215 # leaving both src and dest on disk. delete dest to make sure
3206 # the rename couldn't be such a no-op.
3216 # the rename couldn't be such a no-op.
3207 vfs.tryunlink(dest)
3217 vfs.tryunlink(dest)
3208 try:
3218 try:
3209 vfs.rename(src, dest)
3219 vfs.rename(src, dest)
3210 except OSError: # journal file does not yet exist
3220 except OSError: # journal file does not yet exist
3211 pass
3221 pass
3212
3222
3213 return a
3223 return a
3214
3224
3215
3225
3216 def undoname(fn):
3226 def undoname(fn):
3217 base, name = os.path.split(fn)
3227 base, name = os.path.split(fn)
3218 assert name.startswith(b'journal')
3228 assert name.startswith(b'journal')
3219 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3229 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3220
3230
3221
3231
3222 def instance(ui, path, create, intents=None, createopts=None):
3232 def instance(ui, path, create, intents=None, createopts=None):
3223 localpath = util.urllocalpath(path)
3233 localpath = util.urllocalpath(path)
3224 if create:
3234 if create:
3225 createrepository(ui, localpath, createopts=createopts)
3235 createrepository(ui, localpath, createopts=createopts)
3226
3236
3227 return makelocalrepository(ui, localpath, intents=intents)
3237 return makelocalrepository(ui, localpath, intents=intents)
3228
3238
3229
3239
3230 def islocal(path):
3240 def islocal(path):
3231 return True
3241 return True
3232
3242
3233
3243
3234 def defaultcreateopts(ui, createopts=None):
3244 def defaultcreateopts(ui, createopts=None):
3235 """Populate the default creation options for a repository.
3245 """Populate the default creation options for a repository.
3236
3246
3237 A dictionary of explicitly requested creation options can be passed
3247 A dictionary of explicitly requested creation options can be passed
3238 in. Missing keys will be populated.
3248 in. Missing keys will be populated.
3239 """
3249 """
3240 createopts = dict(createopts or {})
3250 createopts = dict(createopts or {})
3241
3251
3242 if b'backend' not in createopts:
3252 if b'backend' not in createopts:
3243 # experimental config: storage.new-repo-backend
3253 # experimental config: storage.new-repo-backend
3244 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3254 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3245
3255
3246 return createopts
3256 return createopts
3247
3257
3248
3258
3249 def newreporequirements(ui, createopts):
3259 def newreporequirements(ui, createopts):
3250 """Determine the set of requirements for a new local repository.
3260 """Determine the set of requirements for a new local repository.
3251
3261
3252 Extensions can wrap this function to specify custom requirements for
3262 Extensions can wrap this function to specify custom requirements for
3253 new repositories.
3263 new repositories.
3254 """
3264 """
3255 # If the repo is being created from a shared repository, we copy
3265 # If the repo is being created from a shared repository, we copy
3256 # its requirements.
3266 # its requirements.
3257 if b'sharedrepo' in createopts:
3267 if b'sharedrepo' in createopts:
3258 requirements = set(createopts[b'sharedrepo'].requirements)
3268 requirements = set(createopts[b'sharedrepo'].requirements)
3259 if createopts.get(b'sharedrelative'):
3269 if createopts.get(b'sharedrelative'):
3260 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3270 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3261 else:
3271 else:
3262 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3272 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3263
3273
3264 return requirements
3274 return requirements
3265
3275
3266 if b'backend' not in createopts:
3276 if b'backend' not in createopts:
3267 raise error.ProgrammingError(
3277 raise error.ProgrammingError(
3268 b'backend key not present in createopts; '
3278 b'backend key not present in createopts; '
3269 b'was defaultcreateopts() called?'
3279 b'was defaultcreateopts() called?'
3270 )
3280 )
3271
3281
3272 if createopts[b'backend'] != b'revlogv1':
3282 if createopts[b'backend'] != b'revlogv1':
3273 raise error.Abort(
3283 raise error.Abort(
3274 _(
3284 _(
3275 b'unable to determine repository requirements for '
3285 b'unable to determine repository requirements for '
3276 b'storage backend: %s'
3286 b'storage backend: %s'
3277 )
3287 )
3278 % createopts[b'backend']
3288 % createopts[b'backend']
3279 )
3289 )
3280
3290
3281 requirements = {b'revlogv1'}
3291 requirements = {b'revlogv1'}
3282 if ui.configbool(b'format', b'usestore'):
3292 if ui.configbool(b'format', b'usestore'):
3283 requirements.add(b'store')
3293 requirements.add(b'store')
3284 if ui.configbool(b'format', b'usefncache'):
3294 if ui.configbool(b'format', b'usefncache'):
3285 requirements.add(b'fncache')
3295 requirements.add(b'fncache')
3286 if ui.configbool(b'format', b'dotencode'):
3296 if ui.configbool(b'format', b'dotencode'):
3287 requirements.add(b'dotencode')
3297 requirements.add(b'dotencode')
3288
3298
3289 compengines = ui.configlist(b'format', b'revlog-compression')
3299 compengines = ui.configlist(b'format', b'revlog-compression')
3290 for compengine in compengines:
3300 for compengine in compengines:
3291 if compengine in util.compengines:
3301 if compengine in util.compengines:
3292 break
3302 break
3293 else:
3303 else:
3294 raise error.Abort(
3304 raise error.Abort(
3295 _(
3305 _(
3296 b'compression engines %s defined by '
3306 b'compression engines %s defined by '
3297 b'format.revlog-compression not available'
3307 b'format.revlog-compression not available'
3298 )
3308 )
3299 % b', '.join(b'"%s"' % e for e in compengines),
3309 % b', '.join(b'"%s"' % e for e in compengines),
3300 hint=_(
3310 hint=_(
3301 b'run "hg debuginstall" to list available '
3311 b'run "hg debuginstall" to list available '
3302 b'compression engines'
3312 b'compression engines'
3303 ),
3313 ),
3304 )
3314 )
3305
3315
3306 # zlib is the historical default and doesn't need an explicit requirement.
3316 # zlib is the historical default and doesn't need an explicit requirement.
3307 if compengine == b'zstd':
3317 if compengine == b'zstd':
3308 requirements.add(b'revlog-compression-zstd')
3318 requirements.add(b'revlog-compression-zstd')
3309 elif compengine != b'zlib':
3319 elif compengine != b'zlib':
3310 requirements.add(b'exp-compression-%s' % compengine)
3320 requirements.add(b'exp-compression-%s' % compengine)
3311
3321
3312 if scmutil.gdinitconfig(ui):
3322 if scmutil.gdinitconfig(ui):
3313 requirements.add(b'generaldelta')
3323 requirements.add(b'generaldelta')
3314 if ui.configbool(b'format', b'sparse-revlog'):
3324 if ui.configbool(b'format', b'sparse-revlog'):
3315 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3325 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3316
3326
3317 # experimental config: format.exp-use-side-data
3327 # experimental config: format.exp-use-side-data
3318 if ui.configbool(b'format', b'exp-use-side-data'):
3328 if ui.configbool(b'format', b'exp-use-side-data'):
3319 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3329 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3320 # experimental config: format.exp-use-copies-side-data-changeset
3330 # experimental config: format.exp-use-copies-side-data-changeset
3321 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3331 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3322 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3332 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3323 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3333 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3324 if ui.configbool(b'experimental', b'treemanifest'):
3334 if ui.configbool(b'experimental', b'treemanifest'):
3325 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3335 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3326
3336
3327 revlogv2 = ui.config(b'experimental', b'revlogv2')
3337 revlogv2 = ui.config(b'experimental', b'revlogv2')
3328 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3338 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3329 requirements.remove(b'revlogv1')
3339 requirements.remove(b'revlogv1')
3330 # generaldelta is implied by revlogv2.
3340 # generaldelta is implied by revlogv2.
3331 requirements.discard(b'generaldelta')
3341 requirements.discard(b'generaldelta')
3332 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3342 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3333 # experimental config: format.internal-phase
3343 # experimental config: format.internal-phase
3334 if ui.configbool(b'format', b'internal-phase'):
3344 if ui.configbool(b'format', b'internal-phase'):
3335 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3345 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3336
3346
3337 if createopts.get(b'narrowfiles'):
3347 if createopts.get(b'narrowfiles'):
3338 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3348 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3339
3349
3340 if createopts.get(b'lfs'):
3350 if createopts.get(b'lfs'):
3341 requirements.add(b'lfs')
3351 requirements.add(b'lfs')
3342
3352
3343 if ui.configbool(b'format', b'bookmarks-in-store'):
3353 if ui.configbool(b'format', b'bookmarks-in-store'):
3344 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3354 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3345
3355
3346 if ui.configbool(b'format', b'use-persistent-nodemap'):
3356 if ui.configbool(b'format', b'use-persistent-nodemap'):
3347 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3357 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3348
3358
3349 # if share-safe is enabled, let's create the new repository with the new
3359 # if share-safe is enabled, let's create the new repository with the new
3350 # requirement
3360 # requirement
3351 if ui.configbool(b'format', b'exp-share-safe'):
3361 if ui.configbool(b'format', b'exp-share-safe'):
3352 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3362 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3353
3363
3354 return requirements
3364 return requirements
3355
3365
3356
3366
3357 def checkrequirementscompat(ui, requirements):
3367 def checkrequirementscompat(ui, requirements):
3358 """Checks compatibility of repository requirements enabled and disabled.
3368 """Checks compatibility of repository requirements enabled and disabled.
3359
3369
3360 Returns a set of requirements which needs to be dropped because dependend
3370 Returns a set of requirements which needs to be dropped because dependend
3361 requirements are not enabled. Also warns users about it"""
3371 requirements are not enabled. Also warns users about it"""
3362
3372
3363 dropped = set()
3373 dropped = set()
3364
3374
3365 if b'store' not in requirements:
3375 if b'store' not in requirements:
3366 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3376 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3367 ui.warn(
3377 ui.warn(
3368 _(
3378 _(
3369 b'ignoring enabled \'format.bookmarks-in-store\' config '
3379 b'ignoring enabled \'format.bookmarks-in-store\' config '
3370 b'beacuse it is incompatible with disabled '
3380 b'beacuse it is incompatible with disabled '
3371 b'\'format.usestore\' config\n'
3381 b'\'format.usestore\' config\n'
3372 )
3382 )
3373 )
3383 )
3374 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3384 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3375
3385
3376 if (
3386 if (
3377 requirementsmod.SHARED_REQUIREMENT in requirements
3387 requirementsmod.SHARED_REQUIREMENT in requirements
3378 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3388 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3379 ):
3389 ):
3380 raise error.Abort(
3390 raise error.Abort(
3381 _(
3391 _(
3382 b"cannot create shared repository as source was created"
3392 b"cannot create shared repository as source was created"
3383 b" with 'format.usestore' config disabled"
3393 b" with 'format.usestore' config disabled"
3384 )
3394 )
3385 )
3395 )
3386
3396
3387 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3397 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3388 ui.warn(
3398 ui.warn(
3389 _(
3399 _(
3390 b"ignoring enabled 'format.exp-share-safe' config because "
3400 b"ignoring enabled 'format.exp-share-safe' config because "
3391 b"it is incompatible with disabled 'format.usestore'"
3401 b"it is incompatible with disabled 'format.usestore'"
3392 b" config\n"
3402 b" config\n"
3393 )
3403 )
3394 )
3404 )
3395 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3405 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3396
3406
3397 return dropped
3407 return dropped
3398
3408
3399
3409
3400 def filterknowncreateopts(ui, createopts):
3410 def filterknowncreateopts(ui, createopts):
3401 """Filters a dict of repo creation options against options that are known.
3411 """Filters a dict of repo creation options against options that are known.
3402
3412
3403 Receives a dict of repo creation options and returns a dict of those
3413 Receives a dict of repo creation options and returns a dict of those
3404 options that we don't know how to handle.
3414 options that we don't know how to handle.
3405
3415
3406 This function is called as part of repository creation. If the
3416 This function is called as part of repository creation. If the
3407 returned dict contains any items, repository creation will not
3417 returned dict contains any items, repository creation will not
3408 be allowed, as it means there was a request to create a repository
3418 be allowed, as it means there was a request to create a repository
3409 with options not recognized by loaded code.
3419 with options not recognized by loaded code.
3410
3420
3411 Extensions can wrap this function to filter out creation options
3421 Extensions can wrap this function to filter out creation options
3412 they know how to handle.
3422 they know how to handle.
3413 """
3423 """
3414 known = {
3424 known = {
3415 b'backend',
3425 b'backend',
3416 b'lfs',
3426 b'lfs',
3417 b'narrowfiles',
3427 b'narrowfiles',
3418 b'sharedrepo',
3428 b'sharedrepo',
3419 b'sharedrelative',
3429 b'sharedrelative',
3420 b'shareditems',
3430 b'shareditems',
3421 b'shallowfilestore',
3431 b'shallowfilestore',
3422 }
3432 }
3423
3433
3424 return {k: v for k, v in createopts.items() if k not in known}
3434 return {k: v for k, v in createopts.items() if k not in known}
3425
3435
3426
3436
3427 def createrepository(ui, path, createopts=None):
3437 def createrepository(ui, path, createopts=None):
3428 """Create a new repository in a vfs.
3438 """Create a new repository in a vfs.
3429
3439
3430 ``path`` path to the new repo's working directory.
3440 ``path`` path to the new repo's working directory.
3431 ``createopts`` options for the new repository.
3441 ``createopts`` options for the new repository.
3432
3442
3433 The following keys for ``createopts`` are recognized:
3443 The following keys for ``createopts`` are recognized:
3434
3444
3435 backend
3445 backend
3436 The storage backend to use.
3446 The storage backend to use.
3437 lfs
3447 lfs
3438 Repository will be created with ``lfs`` requirement. The lfs extension
3448 Repository will be created with ``lfs`` requirement. The lfs extension
3439 will automatically be loaded when the repository is accessed.
3449 will automatically be loaded when the repository is accessed.
3440 narrowfiles
3450 narrowfiles
3441 Set up repository to support narrow file storage.
3451 Set up repository to support narrow file storage.
3442 sharedrepo
3452 sharedrepo
3443 Repository object from which storage should be shared.
3453 Repository object from which storage should be shared.
3444 sharedrelative
3454 sharedrelative
3445 Boolean indicating if the path to the shared repo should be
3455 Boolean indicating if the path to the shared repo should be
3446 stored as relative. By default, the pointer to the "parent" repo
3456 stored as relative. By default, the pointer to the "parent" repo
3447 is stored as an absolute path.
3457 is stored as an absolute path.
3448 shareditems
3458 shareditems
3449 Set of items to share to the new repository (in addition to storage).
3459 Set of items to share to the new repository (in addition to storage).
3450 shallowfilestore
3460 shallowfilestore
3451 Indicates that storage for files should be shallow (not all ancestor
3461 Indicates that storage for files should be shallow (not all ancestor
3452 revisions are known).
3462 revisions are known).
3453 """
3463 """
3454 createopts = defaultcreateopts(ui, createopts=createopts)
3464 createopts = defaultcreateopts(ui, createopts=createopts)
3455
3465
3456 unknownopts = filterknowncreateopts(ui, createopts)
3466 unknownopts = filterknowncreateopts(ui, createopts)
3457
3467
3458 if not isinstance(unknownopts, dict):
3468 if not isinstance(unknownopts, dict):
3459 raise error.ProgrammingError(
3469 raise error.ProgrammingError(
3460 b'filterknowncreateopts() did not return a dict'
3470 b'filterknowncreateopts() did not return a dict'
3461 )
3471 )
3462
3472
3463 if unknownopts:
3473 if unknownopts:
3464 raise error.Abort(
3474 raise error.Abort(
3465 _(
3475 _(
3466 b'unable to create repository because of unknown '
3476 b'unable to create repository because of unknown '
3467 b'creation option: %s'
3477 b'creation option: %s'
3468 )
3478 )
3469 % b', '.join(sorted(unknownopts)),
3479 % b', '.join(sorted(unknownopts)),
3470 hint=_(b'is a required extension not loaded?'),
3480 hint=_(b'is a required extension not loaded?'),
3471 )
3481 )
3472
3482
3473 requirements = newreporequirements(ui, createopts=createopts)
3483 requirements = newreporequirements(ui, createopts=createopts)
3474 requirements -= checkrequirementscompat(ui, requirements)
3484 requirements -= checkrequirementscompat(ui, requirements)
3475
3485
3476 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3486 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3477
3487
3478 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3488 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3479 if hgvfs.exists():
3489 if hgvfs.exists():
3480 raise error.RepoError(_(b'repository %s already exists') % path)
3490 raise error.RepoError(_(b'repository %s already exists') % path)
3481
3491
3482 if b'sharedrepo' in createopts:
3492 if b'sharedrepo' in createopts:
3483 sharedpath = createopts[b'sharedrepo'].sharedpath
3493 sharedpath = createopts[b'sharedrepo'].sharedpath
3484
3494
3485 if createopts.get(b'sharedrelative'):
3495 if createopts.get(b'sharedrelative'):
3486 try:
3496 try:
3487 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3497 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3488 except (IOError, ValueError) as e:
3498 except (IOError, ValueError) as e:
3489 # ValueError is raised on Windows if the drive letters differ
3499 # ValueError is raised on Windows if the drive letters differ
3490 # on each path.
3500 # on each path.
3491 raise error.Abort(
3501 raise error.Abort(
3492 _(b'cannot calculate relative path'),
3502 _(b'cannot calculate relative path'),
3493 hint=stringutil.forcebytestr(e),
3503 hint=stringutil.forcebytestr(e),
3494 )
3504 )
3495
3505
3496 if not wdirvfs.exists():
3506 if not wdirvfs.exists():
3497 wdirvfs.makedirs()
3507 wdirvfs.makedirs()
3498
3508
3499 hgvfs.makedir(notindexed=True)
3509 hgvfs.makedir(notindexed=True)
3500 if b'sharedrepo' not in createopts:
3510 if b'sharedrepo' not in createopts:
3501 hgvfs.mkdir(b'cache')
3511 hgvfs.mkdir(b'cache')
3502 hgvfs.mkdir(b'wcache')
3512 hgvfs.mkdir(b'wcache')
3503
3513
3504 if b'store' in requirements and b'sharedrepo' not in createopts:
3514 if b'store' in requirements and b'sharedrepo' not in createopts:
3505 hgvfs.mkdir(b'store')
3515 hgvfs.mkdir(b'store')
3506
3516
3507 # We create an invalid changelog outside the store so very old
3517 # We create an invalid changelog outside the store so very old
3508 # Mercurial versions (which didn't know about the requirements
3518 # Mercurial versions (which didn't know about the requirements
3509 # file) encounter an error on reading the changelog. This
3519 # file) encounter an error on reading the changelog. This
3510 # effectively locks out old clients and prevents them from
3520 # effectively locks out old clients and prevents them from
3511 # mucking with a repo in an unknown format.
3521 # mucking with a repo in an unknown format.
3512 #
3522 #
3513 # The revlog header has version 2, which won't be recognized by
3523 # The revlog header has version 2, which won't be recognized by
3514 # such old clients.
3524 # such old clients.
3515 hgvfs.append(
3525 hgvfs.append(
3516 b'00changelog.i',
3526 b'00changelog.i',
3517 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3527 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3518 b'layout',
3528 b'layout',
3519 )
3529 )
3520
3530
3521 # Filter the requirements into working copy and store ones
3531 # Filter the requirements into working copy and store ones
3522 wcreq, storereq = scmutil.filterrequirements(requirements)
3532 wcreq, storereq = scmutil.filterrequirements(requirements)
3523 # write working copy ones
3533 # write working copy ones
3524 scmutil.writerequires(hgvfs, wcreq)
3534 scmutil.writerequires(hgvfs, wcreq)
3525 # If there are store requirements and the current repository
3535 # If there are store requirements and the current repository
3526 # is not a shared one, write stored requirements
3536 # is not a shared one, write stored requirements
3527 # For new shared repository, we don't need to write the store
3537 # For new shared repository, we don't need to write the store
3528 # requirements as they are already present in store requires
3538 # requirements as they are already present in store requires
3529 if storereq and b'sharedrepo' not in createopts:
3539 if storereq and b'sharedrepo' not in createopts:
3530 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3540 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3531 scmutil.writerequires(storevfs, storereq)
3541 scmutil.writerequires(storevfs, storereq)
3532
3542
3533 # Write out file telling readers where to find the shared store.
3543 # Write out file telling readers where to find the shared store.
3534 if b'sharedrepo' in createopts:
3544 if b'sharedrepo' in createopts:
3535 hgvfs.write(b'sharedpath', sharedpath)
3545 hgvfs.write(b'sharedpath', sharedpath)
3536
3546
3537 if createopts.get(b'shareditems'):
3547 if createopts.get(b'shareditems'):
3538 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3548 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3539 hgvfs.write(b'shared', shared)
3549 hgvfs.write(b'shared', shared)
3540
3550
3541
3551
3542 def poisonrepository(repo):
3552 def poisonrepository(repo):
3543 """Poison a repository instance so it can no longer be used."""
3553 """Poison a repository instance so it can no longer be used."""
3544 # Perform any cleanup on the instance.
3554 # Perform any cleanup on the instance.
3545 repo.close()
3555 repo.close()
3546
3556
3547 # Our strategy is to replace the type of the object with one that
3557 # Our strategy is to replace the type of the object with one that
3548 # has all attribute lookups result in error.
3558 # has all attribute lookups result in error.
3549 #
3559 #
3550 # But we have to allow the close() method because some constructors
3560 # But we have to allow the close() method because some constructors
3551 # of repos call close() on repo references.
3561 # of repos call close() on repo references.
3552 class poisonedrepository(object):
3562 class poisonedrepository(object):
3553 def __getattribute__(self, item):
3563 def __getattribute__(self, item):
3554 if item == 'close':
3564 if item == 'close':
3555 return object.__getattribute__(self, item)
3565 return object.__getattribute__(self, item)
3556
3566
3557 raise error.ProgrammingError(
3567 raise error.ProgrammingError(
3558 b'repo instances should not be used after unshare'
3568 b'repo instances should not be used after unshare'
3559 )
3569 )
3560
3570
3561 def close(self):
3571 def close(self):
3562 pass
3572 pass
3563
3573
3564 # We may have a repoview, which intercepts __setattr__. So be sure
3574 # We may have a repoview, which intercepts __setattr__. So be sure
3565 # we operate at the lowest level possible.
3575 # we operate at the lowest level possible.
3566 object.__setattr__(repo, '__class__', poisonedrepository)
3576 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,1472 +1,1481 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from .i18n import _
12 from .i18n import _
13 from .pycompat import getattr
13 from .pycompat import getattr
14 from . import (
14 from . import (
15 changelog,
15 changelog,
16 error,
16 error,
17 filelog,
17 filelog,
18 hg,
18 hg,
19 localrepo,
19 localrepo,
20 manifest,
20 manifest,
21 metadata,
21 metadata,
22 pycompat,
22 pycompat,
23 requirements,
23 requirements,
24 revlog,
24 revlog,
25 scmutil,
25 scmutil,
26 util,
26 util,
27 vfs as vfsmod,
27 vfs as vfsmod,
28 )
28 )
29
29
30 from .utils import compression
30 from .utils import compression
31
31
32 # list of requirements that request a clone of all revlog if added/removed
32 # list of requirements that request a clone of all revlog if added/removed
33 RECLONES_REQUIREMENTS = {
33 RECLONES_REQUIREMENTS = {
34 b'generaldelta',
34 b'generaldelta',
35 requirements.SPARSEREVLOG_REQUIREMENT,
35 requirements.SPARSEREVLOG_REQUIREMENT,
36 }
36 }
37
37
38
38
39 def requiredsourcerequirements(repo):
39 def requiredsourcerequirements(repo):
40 """Obtain requirements required to be present to upgrade a repo.
40 """Obtain requirements required to be present to upgrade a repo.
41
41
42 An upgrade will not be allowed if the repository doesn't have the
42 An upgrade will not be allowed if the repository doesn't have the
43 requirements returned by this function.
43 requirements returned by this function.
44 """
44 """
45 return {
45 return {
46 # Introduced in Mercurial 0.9.2.
46 # Introduced in Mercurial 0.9.2.
47 b'revlogv1',
47 b'revlogv1',
48 # Introduced in Mercurial 0.9.2.
48 # Introduced in Mercurial 0.9.2.
49 b'store',
49 b'store',
50 }
50 }
51
51
52
52
53 def blocksourcerequirements(repo):
53 def blocksourcerequirements(repo):
54 """Obtain requirements that will prevent an upgrade from occurring.
54 """Obtain requirements that will prevent an upgrade from occurring.
55
55
56 An upgrade cannot be performed if the source repository contains a
56 An upgrade cannot be performed if the source repository contains a
57 requirements in the returned set.
57 requirements in the returned set.
58 """
58 """
59 return {
59 return {
60 # The upgrade code does not yet support these experimental features.
60 # The upgrade code does not yet support these experimental features.
61 # This is an artificial limitation.
61 # This is an artificial limitation.
62 requirements.TREEMANIFEST_REQUIREMENT,
62 requirements.TREEMANIFEST_REQUIREMENT,
63 # This was a precursor to generaldelta and was never enabled by default.
63 # This was a precursor to generaldelta and was never enabled by default.
64 # It should (hopefully) not exist in the wild.
64 # It should (hopefully) not exist in the wild.
65 b'parentdelta',
65 b'parentdelta',
66 # Upgrade should operate on the actual store, not the shared link.
66 # Upgrade should operate on the actual store, not the shared link.
67 requirements.SHARED_REQUIREMENT,
67 requirements.SHARED_REQUIREMENT,
68 }
68 }
69
69
70
70
71 def supportremovedrequirements(repo):
71 def supportremovedrequirements(repo):
72 """Obtain requirements that can be removed during an upgrade.
72 """Obtain requirements that can be removed during an upgrade.
73
73
74 If an upgrade were to create a repository that dropped a requirement,
74 If an upgrade were to create a repository that dropped a requirement,
75 the dropped requirement must appear in the returned set for the upgrade
75 the dropped requirement must appear in the returned set for the upgrade
76 to be allowed.
76 to be allowed.
77 """
77 """
78 supported = {
78 supported = {
79 requirements.SPARSEREVLOG_REQUIREMENT,
79 requirements.SPARSEREVLOG_REQUIREMENT,
80 requirements.SIDEDATA_REQUIREMENT,
80 requirements.SIDEDATA_REQUIREMENT,
81 requirements.COPIESSDC_REQUIREMENT,
81 requirements.COPIESSDC_REQUIREMENT,
82 requirements.NODEMAP_REQUIREMENT,
82 requirements.NODEMAP_REQUIREMENT,
83 requirements.SHARESAFE_REQUIREMENT,
83 }
84 }
84 for name in compression.compengines:
85 for name in compression.compengines:
85 engine = compression.compengines[name]
86 engine = compression.compengines[name]
86 if engine.available() and engine.revlogheader():
87 if engine.available() and engine.revlogheader():
87 supported.add(b'exp-compression-%s' % name)
88 supported.add(b'exp-compression-%s' % name)
88 if engine.name() == b'zstd':
89 if engine.name() == b'zstd':
89 supported.add(b'revlog-compression-zstd')
90 supported.add(b'revlog-compression-zstd')
90 return supported
91 return supported
91
92
92
93
93 def supporteddestrequirements(repo):
94 def supporteddestrequirements(repo):
94 """Obtain requirements that upgrade supports in the destination.
95 """Obtain requirements that upgrade supports in the destination.
95
96
96 If the result of the upgrade would create requirements not in this set,
97 If the result of the upgrade would create requirements not in this set,
97 the upgrade is disallowed.
98 the upgrade is disallowed.
98
99
99 Extensions should monkeypatch this to add their custom requirements.
100 Extensions should monkeypatch this to add their custom requirements.
100 """
101 """
101 supported = {
102 supported = {
102 b'dotencode',
103 b'dotencode',
103 b'fncache',
104 b'fncache',
104 b'generaldelta',
105 b'generaldelta',
105 b'revlogv1',
106 b'revlogv1',
106 b'store',
107 b'store',
107 requirements.SPARSEREVLOG_REQUIREMENT,
108 requirements.SPARSEREVLOG_REQUIREMENT,
108 requirements.SIDEDATA_REQUIREMENT,
109 requirements.SIDEDATA_REQUIREMENT,
109 requirements.COPIESSDC_REQUIREMENT,
110 requirements.COPIESSDC_REQUIREMENT,
110 requirements.NODEMAP_REQUIREMENT,
111 requirements.NODEMAP_REQUIREMENT,
111 requirements.SHARESAFE_REQUIREMENT,
112 requirements.SHARESAFE_REQUIREMENT,
112 }
113 }
113 for name in compression.compengines:
114 for name in compression.compengines:
114 engine = compression.compengines[name]
115 engine = compression.compengines[name]
115 if engine.available() and engine.revlogheader():
116 if engine.available() and engine.revlogheader():
116 supported.add(b'exp-compression-%s' % name)
117 supported.add(b'exp-compression-%s' % name)
117 if engine.name() == b'zstd':
118 if engine.name() == b'zstd':
118 supported.add(b'revlog-compression-zstd')
119 supported.add(b'revlog-compression-zstd')
119 return supported
120 return supported
120
121
121
122
122 def allowednewrequirements(repo):
123 def allowednewrequirements(repo):
123 """Obtain requirements that can be added to a repository during upgrade.
124 """Obtain requirements that can be added to a repository during upgrade.
124
125
125 This is used to disallow proposed requirements from being added when
126 This is used to disallow proposed requirements from being added when
126 they weren't present before.
127 they weren't present before.
127
128
128 We use a list of allowed requirement additions instead of a list of known
129 We use a list of allowed requirement additions instead of a list of known
129 bad additions because the whitelist approach is safer and will prevent
130 bad additions because the whitelist approach is safer and will prevent
130 future, unknown requirements from accidentally being added.
131 future, unknown requirements from accidentally being added.
131 """
132 """
132 supported = {
133 supported = {
133 b'dotencode',
134 b'dotencode',
134 b'fncache',
135 b'fncache',
135 b'generaldelta',
136 b'generaldelta',
136 requirements.SPARSEREVLOG_REQUIREMENT,
137 requirements.SPARSEREVLOG_REQUIREMENT,
137 requirements.SIDEDATA_REQUIREMENT,
138 requirements.SIDEDATA_REQUIREMENT,
138 requirements.COPIESSDC_REQUIREMENT,
139 requirements.COPIESSDC_REQUIREMENT,
139 requirements.NODEMAP_REQUIREMENT,
140 requirements.NODEMAP_REQUIREMENT,
140 requirements.SHARESAFE_REQUIREMENT,
141 requirements.SHARESAFE_REQUIREMENT,
141 }
142 }
142 for name in compression.compengines:
143 for name in compression.compengines:
143 engine = compression.compengines[name]
144 engine = compression.compengines[name]
144 if engine.available() and engine.revlogheader():
145 if engine.available() and engine.revlogheader():
145 supported.add(b'exp-compression-%s' % name)
146 supported.add(b'exp-compression-%s' % name)
146 if engine.name() == b'zstd':
147 if engine.name() == b'zstd':
147 supported.add(b'revlog-compression-zstd')
148 supported.add(b'revlog-compression-zstd')
148 return supported
149 return supported
149
150
150
151
151 def preservedrequirements(repo):
152 def preservedrequirements(repo):
152 return set()
153 return set()
153
154
154
155
155 DEFICIENCY = b'deficiency'
156 DEFICIENCY = b'deficiency'
156 OPTIMISATION = b'optimization'
157 OPTIMISATION = b'optimization'
157
158
158
159
159 class improvement(object):
160 class improvement(object):
160 """Represents an improvement that can be made as part of an upgrade.
161 """Represents an improvement that can be made as part of an upgrade.
161
162
162 The following attributes are defined on each instance:
163 The following attributes are defined on each instance:
163
164
164 name
165 name
165 Machine-readable string uniquely identifying this improvement. It
166 Machine-readable string uniquely identifying this improvement. It
166 will be mapped to an action later in the upgrade process.
167 will be mapped to an action later in the upgrade process.
167
168
168 type
169 type
169 Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious
170 Either ``DEFICIENCY`` or ``OPTIMISATION``. A deficiency is an obvious
170 problem. An optimization is an action (sometimes optional) that
171 problem. An optimization is an action (sometimes optional) that
171 can be taken to further improve the state of the repository.
172 can be taken to further improve the state of the repository.
172
173
173 description
174 description
174 Message intended for humans explaining the improvement in more detail,
175 Message intended for humans explaining the improvement in more detail,
175 including the implications of it. For ``DEFICIENCY`` types, should be
176 including the implications of it. For ``DEFICIENCY`` types, should be
176 worded in the present tense. For ``OPTIMISATION`` types, should be
177 worded in the present tense. For ``OPTIMISATION`` types, should be
177 worded in the future tense.
178 worded in the future tense.
178
179
179 upgrademessage
180 upgrademessage
180 Message intended for humans explaining what an upgrade addressing this
181 Message intended for humans explaining what an upgrade addressing this
181 issue will do. Should be worded in the future tense.
182 issue will do. Should be worded in the future tense.
182 """
183 """
183
184
184 def __init__(self, name, type, description, upgrademessage):
185 def __init__(self, name, type, description, upgrademessage):
185 self.name = name
186 self.name = name
186 self.type = type
187 self.type = type
187 self.description = description
188 self.description = description
188 self.upgrademessage = upgrademessage
189 self.upgrademessage = upgrademessage
189
190
190 def __eq__(self, other):
191 def __eq__(self, other):
191 if not isinstance(other, improvement):
192 if not isinstance(other, improvement):
192 # This is what python tell use to do
193 # This is what python tell use to do
193 return NotImplemented
194 return NotImplemented
194 return self.name == other.name
195 return self.name == other.name
195
196
196 def __ne__(self, other):
197 def __ne__(self, other):
197 return not (self == other)
198 return not (self == other)
198
199
199 def __hash__(self):
200 def __hash__(self):
200 return hash(self.name)
201 return hash(self.name)
201
202
202
203
203 allformatvariant = []
204 allformatvariant = []
204
205
205
206
206 def registerformatvariant(cls):
207 def registerformatvariant(cls):
207 allformatvariant.append(cls)
208 allformatvariant.append(cls)
208 return cls
209 return cls
209
210
210
211
211 class formatvariant(improvement):
212 class formatvariant(improvement):
212 """an improvement subclass dedicated to repository format"""
213 """an improvement subclass dedicated to repository format"""
213
214
214 type = DEFICIENCY
215 type = DEFICIENCY
215 ### The following attributes should be defined for each class:
216 ### The following attributes should be defined for each class:
216
217
217 # machine-readable string uniquely identifying this improvement. it will be
218 # machine-readable string uniquely identifying this improvement. it will be
218 # mapped to an action later in the upgrade process.
219 # mapped to an action later in the upgrade process.
219 name = None
220 name = None
220
221
221 # message intended for humans explaining the improvement in more detail,
222 # message intended for humans explaining the improvement in more detail,
222 # including the implications of it ``DEFICIENCY`` types, should be worded
223 # including the implications of it ``DEFICIENCY`` types, should be worded
223 # in the present tense.
224 # in the present tense.
224 description = None
225 description = None
225
226
226 # message intended for humans explaining what an upgrade addressing this
227 # message intended for humans explaining what an upgrade addressing this
227 # issue will do. should be worded in the future tense.
228 # issue will do. should be worded in the future tense.
228 upgrademessage = None
229 upgrademessage = None
229
230
230 # value of current Mercurial default for new repository
231 # value of current Mercurial default for new repository
231 default = None
232 default = None
232
233
233 def __init__(self):
234 def __init__(self):
234 raise NotImplementedError()
235 raise NotImplementedError()
235
236
236 @staticmethod
237 @staticmethod
237 def fromrepo(repo):
238 def fromrepo(repo):
238 """current value of the variant in the repository"""
239 """current value of the variant in the repository"""
239 raise NotImplementedError()
240 raise NotImplementedError()
240
241
241 @staticmethod
242 @staticmethod
242 def fromconfig(repo):
243 def fromconfig(repo):
243 """current value of the variant in the configuration"""
244 """current value of the variant in the configuration"""
244 raise NotImplementedError()
245 raise NotImplementedError()
245
246
246
247
247 class requirementformatvariant(formatvariant):
248 class requirementformatvariant(formatvariant):
248 """formatvariant based on a 'requirement' name.
249 """formatvariant based on a 'requirement' name.
249
250
250 Many format variant are controlled by a 'requirement'. We define a small
251 Many format variant are controlled by a 'requirement'. We define a small
251 subclass to factor the code.
252 subclass to factor the code.
252 """
253 """
253
254
254 # the requirement that control this format variant
255 # the requirement that control this format variant
255 _requirement = None
256 _requirement = None
256
257
257 @staticmethod
258 @staticmethod
258 def _newreporequirements(ui):
259 def _newreporequirements(ui):
259 return localrepo.newreporequirements(
260 return localrepo.newreporequirements(
260 ui, localrepo.defaultcreateopts(ui)
261 ui, localrepo.defaultcreateopts(ui)
261 )
262 )
262
263
263 @classmethod
264 @classmethod
264 def fromrepo(cls, repo):
265 def fromrepo(cls, repo):
265 assert cls._requirement is not None
266 assert cls._requirement is not None
266 return cls._requirement in repo.requirements
267 return cls._requirement in repo.requirements
267
268
268 @classmethod
269 @classmethod
269 def fromconfig(cls, repo):
270 def fromconfig(cls, repo):
270 assert cls._requirement is not None
271 assert cls._requirement is not None
271 return cls._requirement in cls._newreporequirements(repo.ui)
272 return cls._requirement in cls._newreporequirements(repo.ui)
272
273
273
274
274 @registerformatvariant
275 @registerformatvariant
275 class fncache(requirementformatvariant):
276 class fncache(requirementformatvariant):
276 name = b'fncache'
277 name = b'fncache'
277
278
278 _requirement = b'fncache'
279 _requirement = b'fncache'
279
280
280 default = True
281 default = True
281
282
282 description = _(
283 description = _(
283 b'long and reserved filenames may not work correctly; '
284 b'long and reserved filenames may not work correctly; '
284 b'repository performance is sub-optimal'
285 b'repository performance is sub-optimal'
285 )
286 )
286
287
287 upgrademessage = _(
288 upgrademessage = _(
288 b'repository will be more resilient to storing '
289 b'repository will be more resilient to storing '
289 b'certain paths and performance of certain '
290 b'certain paths and performance of certain '
290 b'operations should be improved'
291 b'operations should be improved'
291 )
292 )
292
293
293
294
294 @registerformatvariant
295 @registerformatvariant
295 class dotencode(requirementformatvariant):
296 class dotencode(requirementformatvariant):
296 name = b'dotencode'
297 name = b'dotencode'
297
298
298 _requirement = b'dotencode'
299 _requirement = b'dotencode'
299
300
300 default = True
301 default = True
301
302
302 description = _(
303 description = _(
303 b'storage of filenames beginning with a period or '
304 b'storage of filenames beginning with a period or '
304 b'space may not work correctly'
305 b'space may not work correctly'
305 )
306 )
306
307
307 upgrademessage = _(
308 upgrademessage = _(
308 b'repository will be better able to store files '
309 b'repository will be better able to store files '
309 b'beginning with a space or period'
310 b'beginning with a space or period'
310 )
311 )
311
312
312
313
313 @registerformatvariant
314 @registerformatvariant
314 class generaldelta(requirementformatvariant):
315 class generaldelta(requirementformatvariant):
315 name = b'generaldelta'
316 name = b'generaldelta'
316
317
317 _requirement = b'generaldelta'
318 _requirement = b'generaldelta'
318
319
319 default = True
320 default = True
320
321
321 description = _(
322 description = _(
322 b'deltas within internal storage are unable to '
323 b'deltas within internal storage are unable to '
323 b'choose optimal revisions; repository is larger and '
324 b'choose optimal revisions; repository is larger and '
324 b'slower than it could be; interaction with other '
325 b'slower than it could be; interaction with other '
325 b'repositories may require extra network and CPU '
326 b'repositories may require extra network and CPU '
326 b'resources, making "hg push" and "hg pull" slower'
327 b'resources, making "hg push" and "hg pull" slower'
327 )
328 )
328
329
329 upgrademessage = _(
330 upgrademessage = _(
330 b'repository storage will be able to create '
331 b'repository storage will be able to create '
331 b'optimal deltas; new repository data will be '
332 b'optimal deltas; new repository data will be '
332 b'smaller and read times should decrease; '
333 b'smaller and read times should decrease; '
333 b'interacting with other repositories using this '
334 b'interacting with other repositories using this '
334 b'storage model should require less network and '
335 b'storage model should require less network and '
335 b'CPU resources, making "hg push" and "hg pull" '
336 b'CPU resources, making "hg push" and "hg pull" '
336 b'faster'
337 b'faster'
337 )
338 )
338
339
339
340
340 @registerformatvariant
341 @registerformatvariant
341 class sharedsafe(requirementformatvariant):
342 class sharedsafe(requirementformatvariant):
342 name = b'exp-sharesafe'
343 name = b'exp-sharesafe'
343 _requirement = requirements.SHARESAFE_REQUIREMENT
344 _requirement = requirements.SHARESAFE_REQUIREMENT
344
345
345 default = False
346 default = False
346
347
347 description = _(
348 description = _(
348 b'old shared repositories do not share source repository '
349 b'old shared repositories do not share source repository '
349 b'requirements and config. This leads to various problems '
350 b'requirements and config. This leads to various problems '
350 b'when the source repository format is upgraded or some new '
351 b'when the source repository format is upgraded or some new '
351 b'extensions are enabled.'
352 b'extensions are enabled.'
352 )
353 )
353
354
354 upgrademessage = _(
355 upgrademessage = _(
355 b'Upgrades a repository to share-safe format so that future '
356 b'Upgrades a repository to share-safe format so that future '
356 b'shares of this repository share its requirements and configs.'
357 b'shares of this repository share its requirements and configs.'
357 )
358 )
358
359
359
360
360 @registerformatvariant
361 @registerformatvariant
361 class sparserevlog(requirementformatvariant):
362 class sparserevlog(requirementformatvariant):
362 name = b'sparserevlog'
363 name = b'sparserevlog'
363
364
364 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
365 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
365
366
366 default = True
367 default = True
367
368
368 description = _(
369 description = _(
369 b'in order to limit disk reading and memory usage on older '
370 b'in order to limit disk reading and memory usage on older '
370 b'version, the span of a delta chain from its root to its '
371 b'version, the span of a delta chain from its root to its '
371 b'end is limited, whatever the relevant data in this span. '
372 b'end is limited, whatever the relevant data in this span. '
372 b'This can severly limit Mercurial ability to build good '
373 b'This can severly limit Mercurial ability to build good '
373 b'chain of delta resulting is much more storage space being '
374 b'chain of delta resulting is much more storage space being '
374 b'taken and limit reusability of on disk delta during '
375 b'taken and limit reusability of on disk delta during '
375 b'exchange.'
376 b'exchange.'
376 )
377 )
377
378
378 upgrademessage = _(
379 upgrademessage = _(
379 b'Revlog supports delta chain with more unused data '
380 b'Revlog supports delta chain with more unused data '
380 b'between payload. These gaps will be skipped at read '
381 b'between payload. These gaps will be skipped at read '
381 b'time. This allows for better delta chains, making a '
382 b'time. This allows for better delta chains, making a '
382 b'better compression and faster exchange with server.'
383 b'better compression and faster exchange with server.'
383 )
384 )
384
385
385
386
386 @registerformatvariant
387 @registerformatvariant
387 class sidedata(requirementformatvariant):
388 class sidedata(requirementformatvariant):
388 name = b'sidedata'
389 name = b'sidedata'
389
390
390 _requirement = requirements.SIDEDATA_REQUIREMENT
391 _requirement = requirements.SIDEDATA_REQUIREMENT
391
392
392 default = False
393 default = False
393
394
394 description = _(
395 description = _(
395 b'Allows storage of extra data alongside a revision, '
396 b'Allows storage of extra data alongside a revision, '
396 b'unlocking various caching options.'
397 b'unlocking various caching options.'
397 )
398 )
398
399
399 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
400 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
400
401
401
402
402 @registerformatvariant
403 @registerformatvariant
403 class persistentnodemap(requirementformatvariant):
404 class persistentnodemap(requirementformatvariant):
404 name = b'persistent-nodemap'
405 name = b'persistent-nodemap'
405
406
406 _requirement = requirements.NODEMAP_REQUIREMENT
407 _requirement = requirements.NODEMAP_REQUIREMENT
407
408
408 default = False
409 default = False
409
410
410 description = _(
411 description = _(
411 b'persist the node -> rev mapping on disk to speedup lookup'
412 b'persist the node -> rev mapping on disk to speedup lookup'
412 )
413 )
413
414
414 upgrademessage = _(b'Speedup revision lookup by node id.')
415 upgrademessage = _(b'Speedup revision lookup by node id.')
415
416
416
417
417 @registerformatvariant
418 @registerformatvariant
418 class copiessdc(requirementformatvariant):
419 class copiessdc(requirementformatvariant):
419 name = b'copies-sdc'
420 name = b'copies-sdc'
420
421
421 _requirement = requirements.COPIESSDC_REQUIREMENT
422 _requirement = requirements.COPIESSDC_REQUIREMENT
422
423
423 default = False
424 default = False
424
425
425 description = _(b'Stores copies information alongside changesets.')
426 description = _(b'Stores copies information alongside changesets.')
426
427
427 upgrademessage = _(
428 upgrademessage = _(
428 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
429 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
429 )
430 )
430
431
431
432
432 @registerformatvariant
433 @registerformatvariant
433 class removecldeltachain(formatvariant):
434 class removecldeltachain(formatvariant):
434 name = b'plain-cl-delta'
435 name = b'plain-cl-delta'
435
436
436 default = True
437 default = True
437
438
438 description = _(
439 description = _(
439 b'changelog storage is using deltas instead of '
440 b'changelog storage is using deltas instead of '
440 b'raw entries; changelog reading and any '
441 b'raw entries; changelog reading and any '
441 b'operation relying on changelog data are slower '
442 b'operation relying on changelog data are slower '
442 b'than they could be'
443 b'than they could be'
443 )
444 )
444
445
445 upgrademessage = _(
446 upgrademessage = _(
446 b'changelog storage will be reformated to '
447 b'changelog storage will be reformated to '
447 b'store raw entries; changelog reading will be '
448 b'store raw entries; changelog reading will be '
448 b'faster; changelog size may be reduced'
449 b'faster; changelog size may be reduced'
449 )
450 )
450
451
451 @staticmethod
452 @staticmethod
452 def fromrepo(repo):
453 def fromrepo(repo):
453 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
454 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
454 # changelogs with deltas.
455 # changelogs with deltas.
455 cl = repo.changelog
456 cl = repo.changelog
456 chainbase = cl.chainbase
457 chainbase = cl.chainbase
457 return all(rev == chainbase(rev) for rev in cl)
458 return all(rev == chainbase(rev) for rev in cl)
458
459
459 @staticmethod
460 @staticmethod
460 def fromconfig(repo):
461 def fromconfig(repo):
461 return True
462 return True
462
463
463
464
464 @registerformatvariant
465 @registerformatvariant
465 class compressionengine(formatvariant):
466 class compressionengine(formatvariant):
466 name = b'compression'
467 name = b'compression'
467 default = b'zlib'
468 default = b'zlib'
468
469
469 description = _(
470 description = _(
470 b'Compresion algorithm used to compress data. '
471 b'Compresion algorithm used to compress data. '
471 b'Some engine are faster than other'
472 b'Some engine are faster than other'
472 )
473 )
473
474
474 upgrademessage = _(
475 upgrademessage = _(
475 b'revlog content will be recompressed with the new algorithm.'
476 b'revlog content will be recompressed with the new algorithm.'
476 )
477 )
477
478
478 @classmethod
479 @classmethod
479 def fromrepo(cls, repo):
480 def fromrepo(cls, repo):
480 # we allow multiple compression engine requirement to co-exist because
481 # we allow multiple compression engine requirement to co-exist because
481 # strickly speaking, revlog seems to support mixed compression style.
482 # strickly speaking, revlog seems to support mixed compression style.
482 #
483 #
483 # The compression used for new entries will be "the last one"
484 # The compression used for new entries will be "the last one"
484 compression = b'zlib'
485 compression = b'zlib'
485 for req in repo.requirements:
486 for req in repo.requirements:
486 prefix = req.startswith
487 prefix = req.startswith
487 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
488 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
488 compression = req.split(b'-', 2)[2]
489 compression = req.split(b'-', 2)[2]
489 return compression
490 return compression
490
491
491 @classmethod
492 @classmethod
492 def fromconfig(cls, repo):
493 def fromconfig(cls, repo):
493 compengines = repo.ui.configlist(b'format', b'revlog-compression')
494 compengines = repo.ui.configlist(b'format', b'revlog-compression')
494 # return the first valid value as the selection code would do
495 # return the first valid value as the selection code would do
495 for comp in compengines:
496 for comp in compengines:
496 if comp in util.compengines:
497 if comp in util.compengines:
497 return comp
498 return comp
498
499
499 # no valide compression found lets display it all for clarity
500 # no valide compression found lets display it all for clarity
500 return b','.join(compengines)
501 return b','.join(compengines)
501
502
502
503
503 @registerformatvariant
504 @registerformatvariant
504 class compressionlevel(formatvariant):
505 class compressionlevel(formatvariant):
505 name = b'compression-level'
506 name = b'compression-level'
506 default = b'default'
507 default = b'default'
507
508
508 description = _(b'compression level')
509 description = _(b'compression level')
509
510
510 upgrademessage = _(b'revlog content will be recompressed')
511 upgrademessage = _(b'revlog content will be recompressed')
511
512
512 @classmethod
513 @classmethod
513 def fromrepo(cls, repo):
514 def fromrepo(cls, repo):
514 comp = compressionengine.fromrepo(repo)
515 comp = compressionengine.fromrepo(repo)
515 level = None
516 level = None
516 if comp == b'zlib':
517 if comp == b'zlib':
517 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
518 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
518 elif comp == b'zstd':
519 elif comp == b'zstd':
519 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
520 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
520 if level is None:
521 if level is None:
521 return b'default'
522 return b'default'
522 return bytes(level)
523 return bytes(level)
523
524
524 @classmethod
525 @classmethod
525 def fromconfig(cls, repo):
526 def fromconfig(cls, repo):
526 comp = compressionengine.fromconfig(repo)
527 comp = compressionengine.fromconfig(repo)
527 level = None
528 level = None
528 if comp == b'zlib':
529 if comp == b'zlib':
529 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
530 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
530 elif comp == b'zstd':
531 elif comp == b'zstd':
531 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
532 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
532 if level is None:
533 if level is None:
533 return b'default'
534 return b'default'
534 return bytes(level)
535 return bytes(level)
535
536
536
537
537 def finddeficiencies(repo):
538 def finddeficiencies(repo):
538 """returns a list of deficiencies that the repo suffer from"""
539 """returns a list of deficiencies that the repo suffer from"""
539 deficiencies = []
540 deficiencies = []
540
541
541 # We could detect lack of revlogv1 and store here, but they were added
542 # We could detect lack of revlogv1 and store here, but they were added
542 # in 0.9.2 and we don't support upgrading repos without these
543 # in 0.9.2 and we don't support upgrading repos without these
543 # requirements, so let's not bother.
544 # requirements, so let's not bother.
544
545
545 for fv in allformatvariant:
546 for fv in allformatvariant:
546 if not fv.fromrepo(repo):
547 if not fv.fromrepo(repo):
547 deficiencies.append(fv)
548 deficiencies.append(fv)
548
549
549 return deficiencies
550 return deficiencies
550
551
551
552
552 # search without '-' to support older form on newer client.
553 # search without '-' to support older form on newer client.
553 #
554 #
554 # We don't enforce backward compatibility for debug command so this
555 # We don't enforce backward compatibility for debug command so this
555 # might eventually be dropped. However, having to use two different
556 # might eventually be dropped. However, having to use two different
556 # forms in script when comparing result is anoying enough to add
557 # forms in script when comparing result is anoying enough to add
557 # backward compatibility for a while.
558 # backward compatibility for a while.
558 legacy_opts_map = {
559 legacy_opts_map = {
559 b'redeltaparent': b're-delta-parent',
560 b'redeltaparent': b're-delta-parent',
560 b'redeltamultibase': b're-delta-multibase',
561 b'redeltamultibase': b're-delta-multibase',
561 b'redeltaall': b're-delta-all',
562 b'redeltaall': b're-delta-all',
562 b'redeltafulladd': b're-delta-fulladd',
563 b'redeltafulladd': b're-delta-fulladd',
563 }
564 }
564
565
565 ALL_OPTIMISATIONS = []
566 ALL_OPTIMISATIONS = []
566
567
567
568
568 def register_optimization(obj):
569 def register_optimization(obj):
569 ALL_OPTIMISATIONS.append(obj)
570 ALL_OPTIMISATIONS.append(obj)
570 return obj
571 return obj
571
572
572
573
573 register_optimization(
574 register_optimization(
574 improvement(
575 improvement(
575 name=b're-delta-parent',
576 name=b're-delta-parent',
576 type=OPTIMISATION,
577 type=OPTIMISATION,
577 description=_(
578 description=_(
578 b'deltas within internal storage will be recalculated to '
579 b'deltas within internal storage will be recalculated to '
579 b'choose an optimal base revision where this was not '
580 b'choose an optimal base revision where this was not '
580 b'already done; the size of the repository may shrink and '
581 b'already done; the size of the repository may shrink and '
581 b'various operations may become faster; the first time '
582 b'various operations may become faster; the first time '
582 b'this optimization is performed could slow down upgrade '
583 b'this optimization is performed could slow down upgrade '
583 b'execution considerably; subsequent invocations should '
584 b'execution considerably; subsequent invocations should '
584 b'not run noticeably slower'
585 b'not run noticeably slower'
585 ),
586 ),
586 upgrademessage=_(
587 upgrademessage=_(
587 b'deltas within internal storage will choose a new '
588 b'deltas within internal storage will choose a new '
588 b'base revision if needed'
589 b'base revision if needed'
589 ),
590 ),
590 )
591 )
591 )
592 )
592
593
593 register_optimization(
594 register_optimization(
594 improvement(
595 improvement(
595 name=b're-delta-multibase',
596 name=b're-delta-multibase',
596 type=OPTIMISATION,
597 type=OPTIMISATION,
597 description=_(
598 description=_(
598 b'deltas within internal storage will be recalculated '
599 b'deltas within internal storage will be recalculated '
599 b'against multiple base revision and the smallest '
600 b'against multiple base revision and the smallest '
600 b'difference will be used; the size of the repository may '
601 b'difference will be used; the size of the repository may '
601 b'shrink significantly when there are many merges; this '
602 b'shrink significantly when there are many merges; this '
602 b'optimization will slow down execution in proportion to '
603 b'optimization will slow down execution in proportion to '
603 b'the number of merges in the repository and the amount '
604 b'the number of merges in the repository and the amount '
604 b'of files in the repository; this slow down should not '
605 b'of files in the repository; this slow down should not '
605 b'be significant unless there are tens of thousands of '
606 b'be significant unless there are tens of thousands of '
606 b'files and thousands of merges'
607 b'files and thousands of merges'
607 ),
608 ),
608 upgrademessage=_(
609 upgrademessage=_(
609 b'deltas within internal storage will choose an '
610 b'deltas within internal storage will choose an '
610 b'optimal delta by computing deltas against multiple '
611 b'optimal delta by computing deltas against multiple '
611 b'parents; may slow down execution time '
612 b'parents; may slow down execution time '
612 b'significantly'
613 b'significantly'
613 ),
614 ),
614 )
615 )
615 )
616 )
616
617
617 register_optimization(
618 register_optimization(
618 improvement(
619 improvement(
619 name=b're-delta-all',
620 name=b're-delta-all',
620 type=OPTIMISATION,
621 type=OPTIMISATION,
621 description=_(
622 description=_(
622 b'deltas within internal storage will always be '
623 b'deltas within internal storage will always be '
623 b'recalculated without reusing prior deltas; this will '
624 b'recalculated without reusing prior deltas; this will '
624 b'likely make execution run several times slower; this '
625 b'likely make execution run several times slower; this '
625 b'optimization is typically not needed'
626 b'optimization is typically not needed'
626 ),
627 ),
627 upgrademessage=_(
628 upgrademessage=_(
628 b'deltas within internal storage will be fully '
629 b'deltas within internal storage will be fully '
629 b'recomputed; this will likely drastically slow down '
630 b'recomputed; this will likely drastically slow down '
630 b'execution time'
631 b'execution time'
631 ),
632 ),
632 )
633 )
633 )
634 )
634
635
635 register_optimization(
636 register_optimization(
636 improvement(
637 improvement(
637 name=b're-delta-fulladd',
638 name=b're-delta-fulladd',
638 type=OPTIMISATION,
639 type=OPTIMISATION,
639 description=_(
640 description=_(
640 b'every revision will be re-added as if it was new '
641 b'every revision will be re-added as if it was new '
641 b'content. It will go through the full storage '
642 b'content. It will go through the full storage '
642 b'mechanism giving extensions a chance to process it '
643 b'mechanism giving extensions a chance to process it '
643 b'(eg. lfs). This is similar to "re-delta-all" but even '
644 b'(eg. lfs). This is similar to "re-delta-all" but even '
644 b'slower since more logic is involved.'
645 b'slower since more logic is involved.'
645 ),
646 ),
646 upgrademessage=_(
647 upgrademessage=_(
647 b'each revision will be added as new content to the '
648 b'each revision will be added as new content to the '
648 b'internal storage; this will likely drastically slow '
649 b'internal storage; this will likely drastically slow '
649 b'down execution time, but some extensions might need '
650 b'down execution time, but some extensions might need '
650 b'it'
651 b'it'
651 ),
652 ),
652 )
653 )
653 )
654 )
654
655
655
656
656 def findoptimizations(repo):
657 def findoptimizations(repo):
657 """Determine optimisation that could be used during upgrade"""
658 """Determine optimisation that could be used during upgrade"""
658 # These are unconditionally added. There is logic later that figures out
659 # These are unconditionally added. There is logic later that figures out
659 # which ones to apply.
660 # which ones to apply.
660 return list(ALL_OPTIMISATIONS)
661 return list(ALL_OPTIMISATIONS)
661
662
662
663
663 def determineactions(repo, deficiencies, sourcereqs, destreqs):
664 def determineactions(repo, deficiencies, sourcereqs, destreqs):
664 """Determine upgrade actions that will be performed.
665 """Determine upgrade actions that will be performed.
665
666
666 Given a list of improvements as returned by ``finddeficiencies`` and
667 Given a list of improvements as returned by ``finddeficiencies`` and
667 ``findoptimizations``, determine the list of upgrade actions that
668 ``findoptimizations``, determine the list of upgrade actions that
668 will be performed.
669 will be performed.
669
670
670 The role of this function is to filter improvements if needed, apply
671 The role of this function is to filter improvements if needed, apply
671 recommended optimizations from the improvements list that make sense,
672 recommended optimizations from the improvements list that make sense,
672 etc.
673 etc.
673
674
674 Returns a list of action names.
675 Returns a list of action names.
675 """
676 """
676 newactions = []
677 newactions = []
677
678
678 for d in deficiencies:
679 for d in deficiencies:
679 name = d._requirement
680 name = d._requirement
680
681
681 # If the action is a requirement that doesn't show up in the
682 # If the action is a requirement that doesn't show up in the
682 # destination requirements, prune the action.
683 # destination requirements, prune the action.
683 if name is not None and name not in destreqs:
684 if name is not None and name not in destreqs:
684 continue
685 continue
685
686
686 newactions.append(d)
687 newactions.append(d)
687
688
688 # FUTURE consider adding some optimizations here for certain transitions.
689 # FUTURE consider adding some optimizations here for certain transitions.
689 # e.g. adding generaldelta could schedule parent redeltas.
690 # e.g. adding generaldelta could schedule parent redeltas.
690
691
691 return newactions
692 return newactions
692
693
693
694
694 def _revlogfrompath(repo, path):
695 def _revlogfrompath(repo, path):
695 """Obtain a revlog from a repo path.
696 """Obtain a revlog from a repo path.
696
697
697 An instance of the appropriate class is returned.
698 An instance of the appropriate class is returned.
698 """
699 """
699 if path == b'00changelog.i':
700 if path == b'00changelog.i':
700 return changelog.changelog(repo.svfs)
701 return changelog.changelog(repo.svfs)
701 elif path.endswith(b'00manifest.i'):
702 elif path.endswith(b'00manifest.i'):
702 mandir = path[: -len(b'00manifest.i')]
703 mandir = path[: -len(b'00manifest.i')]
703 return manifest.manifestrevlog(repo.svfs, tree=mandir)
704 return manifest.manifestrevlog(repo.svfs, tree=mandir)
704 else:
705 else:
705 # reverse of "/".join(("data", path + ".i"))
706 # reverse of "/".join(("data", path + ".i"))
706 return filelog.filelog(repo.svfs, path[5:-2])
707 return filelog.filelog(repo.svfs, path[5:-2])
707
708
708
709
709 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
710 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
710 """copy all relevant files for `oldrl` into `destrepo` store
711 """copy all relevant files for `oldrl` into `destrepo` store
711
712
712 Files are copied "as is" without any transformation. The copy is performed
713 Files are copied "as is" without any transformation. The copy is performed
713 without extra checks. Callers are responsible for making sure the copied
714 without extra checks. Callers are responsible for making sure the copied
714 content is compatible with format of the destination repository.
715 content is compatible with format of the destination repository.
715 """
716 """
716 oldrl = getattr(oldrl, '_revlog', oldrl)
717 oldrl = getattr(oldrl, '_revlog', oldrl)
717 newrl = _revlogfrompath(destrepo, unencodedname)
718 newrl = _revlogfrompath(destrepo, unencodedname)
718 newrl = getattr(newrl, '_revlog', newrl)
719 newrl = getattr(newrl, '_revlog', newrl)
719
720
720 oldvfs = oldrl.opener
721 oldvfs = oldrl.opener
721 newvfs = newrl.opener
722 newvfs = newrl.opener
722 oldindex = oldvfs.join(oldrl.indexfile)
723 oldindex = oldvfs.join(oldrl.indexfile)
723 newindex = newvfs.join(newrl.indexfile)
724 newindex = newvfs.join(newrl.indexfile)
724 olddata = oldvfs.join(oldrl.datafile)
725 olddata = oldvfs.join(oldrl.datafile)
725 newdata = newvfs.join(newrl.datafile)
726 newdata = newvfs.join(newrl.datafile)
726
727
727 with newvfs(newrl.indexfile, b'w'):
728 with newvfs(newrl.indexfile, b'w'):
728 pass # create all the directories
729 pass # create all the directories
729
730
730 util.copyfile(oldindex, newindex)
731 util.copyfile(oldindex, newindex)
731 copydata = oldrl.opener.exists(oldrl.datafile)
732 copydata = oldrl.opener.exists(oldrl.datafile)
732 if copydata:
733 if copydata:
733 util.copyfile(olddata, newdata)
734 util.copyfile(olddata, newdata)
734
735
735 if not (
736 if not (
736 unencodedname.endswith(b'00changelog.i')
737 unencodedname.endswith(b'00changelog.i')
737 or unencodedname.endswith(b'00manifest.i')
738 or unencodedname.endswith(b'00manifest.i')
738 ):
739 ):
739 destrepo.svfs.fncache.add(unencodedname)
740 destrepo.svfs.fncache.add(unencodedname)
740 if copydata:
741 if copydata:
741 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
742 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
742
743
743
744
744 UPGRADE_CHANGELOG = object()
745 UPGRADE_CHANGELOG = object()
745 UPGRADE_MANIFEST = object()
746 UPGRADE_MANIFEST = object()
746 UPGRADE_FILELOGS = object()
747 UPGRADE_FILELOGS = object()
747
748
748 UPGRADE_ALL_REVLOGS = frozenset(
749 UPGRADE_ALL_REVLOGS = frozenset(
749 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
750 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOGS]
750 )
751 )
751
752
752
753
753 def getsidedatacompanion(srcrepo, dstrepo):
754 def getsidedatacompanion(srcrepo, dstrepo):
754 sidedatacompanion = None
755 sidedatacompanion = None
755 removedreqs = srcrepo.requirements - dstrepo.requirements
756 removedreqs = srcrepo.requirements - dstrepo.requirements
756 addedreqs = dstrepo.requirements - srcrepo.requirements
757 addedreqs = dstrepo.requirements - srcrepo.requirements
757 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
758 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
758
759
759 def sidedatacompanion(rl, rev):
760 def sidedatacompanion(rl, rev):
760 rl = getattr(rl, '_revlog', rl)
761 rl = getattr(rl, '_revlog', rl)
761 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
762 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
762 return True, (), {}, 0, 0
763 return True, (), {}, 0, 0
763 return False, (), {}, 0, 0
764 return False, (), {}, 0, 0
764
765
765 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
766 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
766 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
767 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
767 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
768 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
768 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
769 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
769 return sidedatacompanion
770 return sidedatacompanion
770
771
771
772
772 def matchrevlog(revlogfilter, entry):
773 def matchrevlog(revlogfilter, entry):
773 """check if a revlog is selected for cloning.
774 """check if a revlog is selected for cloning.
774
775
775 In other words, are there any updates which need to be done on revlog
776 In other words, are there any updates which need to be done on revlog
776 or it can be blindly copied.
777 or it can be blindly copied.
777
778
778 The store entry is checked against the passed filter"""
779 The store entry is checked against the passed filter"""
779 if entry.endswith(b'00changelog.i'):
780 if entry.endswith(b'00changelog.i'):
780 return UPGRADE_CHANGELOG in revlogfilter
781 return UPGRADE_CHANGELOG in revlogfilter
781 elif entry.endswith(b'00manifest.i'):
782 elif entry.endswith(b'00manifest.i'):
782 return UPGRADE_MANIFEST in revlogfilter
783 return UPGRADE_MANIFEST in revlogfilter
783 return UPGRADE_FILELOGS in revlogfilter
784 return UPGRADE_FILELOGS in revlogfilter
784
785
785
786
786 def _clonerevlogs(
787 def _clonerevlogs(
787 ui,
788 ui,
788 srcrepo,
789 srcrepo,
789 dstrepo,
790 dstrepo,
790 tr,
791 tr,
791 deltareuse,
792 deltareuse,
792 forcedeltabothparents,
793 forcedeltabothparents,
793 revlogs=UPGRADE_ALL_REVLOGS,
794 revlogs=UPGRADE_ALL_REVLOGS,
794 ):
795 ):
795 """Copy revlogs between 2 repos."""
796 """Copy revlogs between 2 repos."""
796 revcount = 0
797 revcount = 0
797 srcsize = 0
798 srcsize = 0
798 srcrawsize = 0
799 srcrawsize = 0
799 dstsize = 0
800 dstsize = 0
800 fcount = 0
801 fcount = 0
801 frevcount = 0
802 frevcount = 0
802 fsrcsize = 0
803 fsrcsize = 0
803 frawsize = 0
804 frawsize = 0
804 fdstsize = 0
805 fdstsize = 0
805 mcount = 0
806 mcount = 0
806 mrevcount = 0
807 mrevcount = 0
807 msrcsize = 0
808 msrcsize = 0
808 mrawsize = 0
809 mrawsize = 0
809 mdstsize = 0
810 mdstsize = 0
810 crevcount = 0
811 crevcount = 0
811 csrcsize = 0
812 csrcsize = 0
812 crawsize = 0
813 crawsize = 0
813 cdstsize = 0
814 cdstsize = 0
814
815
815 alldatafiles = list(srcrepo.store.walk())
816 alldatafiles = list(srcrepo.store.walk())
816
817
817 # Perform a pass to collect metadata. This validates we can open all
818 # Perform a pass to collect metadata. This validates we can open all
818 # source files and allows a unified progress bar to be displayed.
819 # source files and allows a unified progress bar to be displayed.
819 for unencoded, encoded, size in alldatafiles:
820 for unencoded, encoded, size in alldatafiles:
820 if unencoded.endswith(b'.d'):
821 if unencoded.endswith(b'.d'):
821 continue
822 continue
822
823
823 rl = _revlogfrompath(srcrepo, unencoded)
824 rl = _revlogfrompath(srcrepo, unencoded)
824
825
825 info = rl.storageinfo(
826 info = rl.storageinfo(
826 exclusivefiles=True,
827 exclusivefiles=True,
827 revisionscount=True,
828 revisionscount=True,
828 trackedsize=True,
829 trackedsize=True,
829 storedsize=True,
830 storedsize=True,
830 )
831 )
831
832
832 revcount += info[b'revisionscount'] or 0
833 revcount += info[b'revisionscount'] or 0
833 datasize = info[b'storedsize'] or 0
834 datasize = info[b'storedsize'] or 0
834 rawsize = info[b'trackedsize'] or 0
835 rawsize = info[b'trackedsize'] or 0
835
836
836 srcsize += datasize
837 srcsize += datasize
837 srcrawsize += rawsize
838 srcrawsize += rawsize
838
839
839 # This is for the separate progress bars.
840 # This is for the separate progress bars.
840 if isinstance(rl, changelog.changelog):
841 if isinstance(rl, changelog.changelog):
841 crevcount += len(rl)
842 crevcount += len(rl)
842 csrcsize += datasize
843 csrcsize += datasize
843 crawsize += rawsize
844 crawsize += rawsize
844 elif isinstance(rl, manifest.manifestrevlog):
845 elif isinstance(rl, manifest.manifestrevlog):
845 mcount += 1
846 mcount += 1
846 mrevcount += len(rl)
847 mrevcount += len(rl)
847 msrcsize += datasize
848 msrcsize += datasize
848 mrawsize += rawsize
849 mrawsize += rawsize
849 elif isinstance(rl, filelog.filelog):
850 elif isinstance(rl, filelog.filelog):
850 fcount += 1
851 fcount += 1
851 frevcount += len(rl)
852 frevcount += len(rl)
852 fsrcsize += datasize
853 fsrcsize += datasize
853 frawsize += rawsize
854 frawsize += rawsize
854 else:
855 else:
855 error.ProgrammingError(b'unknown revlog type')
856 error.ProgrammingError(b'unknown revlog type')
856
857
857 if not revcount:
858 if not revcount:
858 return
859 return
859
860
860 ui.status(
861 ui.status(
861 _(
862 _(
862 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
863 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
863 b'%d in changelog)\n'
864 b'%d in changelog)\n'
864 )
865 )
865 % (revcount, frevcount, mrevcount, crevcount)
866 % (revcount, frevcount, mrevcount, crevcount)
866 )
867 )
867 ui.status(
868 ui.status(
868 _(b'migrating %s in store; %s tracked data\n')
869 _(b'migrating %s in store; %s tracked data\n')
869 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
870 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
870 )
871 )
871
872
872 # Used to keep track of progress.
873 # Used to keep track of progress.
873 progress = None
874 progress = None
874
875
875 def oncopiedrevision(rl, rev, node):
876 def oncopiedrevision(rl, rev, node):
876 progress.increment()
877 progress.increment()
877
878
878 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
879 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
879
880
880 # Do the actual copying.
881 # Do the actual copying.
881 # FUTURE this operation can be farmed off to worker processes.
882 # FUTURE this operation can be farmed off to worker processes.
882 seen = set()
883 seen = set()
883 for unencoded, encoded, size in alldatafiles:
884 for unencoded, encoded, size in alldatafiles:
884 if unencoded.endswith(b'.d'):
885 if unencoded.endswith(b'.d'):
885 continue
886 continue
886
887
887 oldrl = _revlogfrompath(srcrepo, unencoded)
888 oldrl = _revlogfrompath(srcrepo, unencoded)
888
889
889 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
890 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
890 ui.status(
891 ui.status(
891 _(
892 _(
892 b'finished migrating %d manifest revisions across %d '
893 b'finished migrating %d manifest revisions across %d '
893 b'manifests; change in size: %s\n'
894 b'manifests; change in size: %s\n'
894 )
895 )
895 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
896 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
896 )
897 )
897
898
898 ui.status(
899 ui.status(
899 _(
900 _(
900 b'migrating changelog containing %d revisions '
901 b'migrating changelog containing %d revisions '
901 b'(%s in store; %s tracked data)\n'
902 b'(%s in store; %s tracked data)\n'
902 )
903 )
903 % (
904 % (
904 crevcount,
905 crevcount,
905 util.bytecount(csrcsize),
906 util.bytecount(csrcsize),
906 util.bytecount(crawsize),
907 util.bytecount(crawsize),
907 )
908 )
908 )
909 )
909 seen.add(b'c')
910 seen.add(b'c')
910 progress = srcrepo.ui.makeprogress(
911 progress = srcrepo.ui.makeprogress(
911 _(b'changelog revisions'), total=crevcount
912 _(b'changelog revisions'), total=crevcount
912 )
913 )
913 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
914 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
914 ui.status(
915 ui.status(
915 _(
916 _(
916 b'finished migrating %d filelog revisions across %d '
917 b'finished migrating %d filelog revisions across %d '
917 b'filelogs; change in size: %s\n'
918 b'filelogs; change in size: %s\n'
918 )
919 )
919 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
920 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
920 )
921 )
921
922
922 ui.status(
923 ui.status(
923 _(
924 _(
924 b'migrating %d manifests containing %d revisions '
925 b'migrating %d manifests containing %d revisions '
925 b'(%s in store; %s tracked data)\n'
926 b'(%s in store; %s tracked data)\n'
926 )
927 )
927 % (
928 % (
928 mcount,
929 mcount,
929 mrevcount,
930 mrevcount,
930 util.bytecount(msrcsize),
931 util.bytecount(msrcsize),
931 util.bytecount(mrawsize),
932 util.bytecount(mrawsize),
932 )
933 )
933 )
934 )
934 seen.add(b'm')
935 seen.add(b'm')
935 if progress:
936 if progress:
936 progress.complete()
937 progress.complete()
937 progress = srcrepo.ui.makeprogress(
938 progress = srcrepo.ui.makeprogress(
938 _(b'manifest revisions'), total=mrevcount
939 _(b'manifest revisions'), total=mrevcount
939 )
940 )
940 elif b'f' not in seen:
941 elif b'f' not in seen:
941 ui.status(
942 ui.status(
942 _(
943 _(
943 b'migrating %d filelogs containing %d revisions '
944 b'migrating %d filelogs containing %d revisions '
944 b'(%s in store; %s tracked data)\n'
945 b'(%s in store; %s tracked data)\n'
945 )
946 )
946 % (
947 % (
947 fcount,
948 fcount,
948 frevcount,
949 frevcount,
949 util.bytecount(fsrcsize),
950 util.bytecount(fsrcsize),
950 util.bytecount(frawsize),
951 util.bytecount(frawsize),
951 )
952 )
952 )
953 )
953 seen.add(b'f')
954 seen.add(b'f')
954 if progress:
955 if progress:
955 progress.complete()
956 progress.complete()
956 progress = srcrepo.ui.makeprogress(
957 progress = srcrepo.ui.makeprogress(
957 _(b'file revisions'), total=frevcount
958 _(b'file revisions'), total=frevcount
958 )
959 )
959
960
960 if matchrevlog(revlogs, unencoded):
961 if matchrevlog(revlogs, unencoded):
961 ui.note(
962 ui.note(
962 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
963 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
963 )
964 )
964 newrl = _revlogfrompath(dstrepo, unencoded)
965 newrl = _revlogfrompath(dstrepo, unencoded)
965 oldrl.clone(
966 oldrl.clone(
966 tr,
967 tr,
967 newrl,
968 newrl,
968 addrevisioncb=oncopiedrevision,
969 addrevisioncb=oncopiedrevision,
969 deltareuse=deltareuse,
970 deltareuse=deltareuse,
970 forcedeltabothparents=forcedeltabothparents,
971 forcedeltabothparents=forcedeltabothparents,
971 sidedatacompanion=sidedatacompanion,
972 sidedatacompanion=sidedatacompanion,
972 )
973 )
973 else:
974 else:
974 msg = _(b'blindly copying %s containing %i revisions\n')
975 msg = _(b'blindly copying %s containing %i revisions\n')
975 ui.note(msg % (unencoded, len(oldrl)))
976 ui.note(msg % (unencoded, len(oldrl)))
976 _copyrevlog(tr, dstrepo, oldrl, unencoded)
977 _copyrevlog(tr, dstrepo, oldrl, unencoded)
977
978
978 newrl = _revlogfrompath(dstrepo, unencoded)
979 newrl = _revlogfrompath(dstrepo, unencoded)
979
980
980 info = newrl.storageinfo(storedsize=True)
981 info = newrl.storageinfo(storedsize=True)
981 datasize = info[b'storedsize'] or 0
982 datasize = info[b'storedsize'] or 0
982
983
983 dstsize += datasize
984 dstsize += datasize
984
985
985 if isinstance(newrl, changelog.changelog):
986 if isinstance(newrl, changelog.changelog):
986 cdstsize += datasize
987 cdstsize += datasize
987 elif isinstance(newrl, manifest.manifestrevlog):
988 elif isinstance(newrl, manifest.manifestrevlog):
988 mdstsize += datasize
989 mdstsize += datasize
989 else:
990 else:
990 fdstsize += datasize
991 fdstsize += datasize
991
992
992 progress.complete()
993 progress.complete()
993
994
994 ui.status(
995 ui.status(
995 _(
996 _(
996 b'finished migrating %d changelog revisions; change in size: '
997 b'finished migrating %d changelog revisions; change in size: '
997 b'%s\n'
998 b'%s\n'
998 )
999 )
999 % (crevcount, util.bytecount(cdstsize - csrcsize))
1000 % (crevcount, util.bytecount(cdstsize - csrcsize))
1000 )
1001 )
1001
1002
1002 ui.status(
1003 ui.status(
1003 _(
1004 _(
1004 b'finished migrating %d total revisions; total change in store '
1005 b'finished migrating %d total revisions; total change in store '
1005 b'size: %s\n'
1006 b'size: %s\n'
1006 )
1007 )
1007 % (revcount, util.bytecount(dstsize - srcsize))
1008 % (revcount, util.bytecount(dstsize - srcsize))
1008 )
1009 )
1009
1010
1010
1011
1011 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
1012 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
1012 """Determine whether to copy a store file during upgrade.
1013 """Determine whether to copy a store file during upgrade.
1013
1014
1014 This function is called when migrating store files from ``srcrepo`` to
1015 This function is called when migrating store files from ``srcrepo`` to
1015 ``dstrepo`` as part of upgrading a repository.
1016 ``dstrepo`` as part of upgrading a repository.
1016
1017
1017 Args:
1018 Args:
1018 srcrepo: repo we are copying from
1019 srcrepo: repo we are copying from
1019 dstrepo: repo we are copying to
1020 dstrepo: repo we are copying to
1020 requirements: set of requirements for ``dstrepo``
1021 requirements: set of requirements for ``dstrepo``
1021 path: store file being examined
1022 path: store file being examined
1022 mode: the ``ST_MODE`` file type of ``path``
1023 mode: the ``ST_MODE`` file type of ``path``
1023 st: ``stat`` data structure for ``path``
1024 st: ``stat`` data structure for ``path``
1024
1025
1025 Function should return ``True`` if the file is to be copied.
1026 Function should return ``True`` if the file is to be copied.
1026 """
1027 """
1027 # Skip revlogs.
1028 # Skip revlogs.
1028 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
1029 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
1029 return False
1030 return False
1030 # Skip transaction related files.
1031 # Skip transaction related files.
1031 if path.startswith(b'undo'):
1032 if path.startswith(b'undo'):
1032 return False
1033 return False
1033 # Only copy regular files.
1034 # Only copy regular files.
1034 if mode != stat.S_IFREG:
1035 if mode != stat.S_IFREG:
1035 return False
1036 return False
1036 # Skip other skipped files.
1037 # Skip other skipped files.
1037 if path in (b'lock', b'fncache'):
1038 if path in (b'lock', b'fncache'):
1038 return False
1039 return False
1039
1040
1040 return True
1041 return True
1041
1042
1042
1043
1043 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
1044 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
1044 """Hook point for extensions to perform additional actions during upgrade.
1045 """Hook point for extensions to perform additional actions during upgrade.
1045
1046
1046 This function is called after revlogs and store files have been copied but
1047 This function is called after revlogs and store files have been copied but
1047 before the new store is swapped into the original location.
1048 before the new store is swapped into the original location.
1048 """
1049 """
1049
1050
1050
1051
1051 def _upgraderepo(
1052 def _upgraderepo(
1052 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1053 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1053 ):
1054 ):
1054 """Do the low-level work of upgrading a repository.
1055 """Do the low-level work of upgrading a repository.
1055
1056
1056 The upgrade is effectively performed as a copy between a source
1057 The upgrade is effectively performed as a copy between a source
1057 repository and a temporary destination repository.
1058 repository and a temporary destination repository.
1058
1059
1059 The source repository is unmodified for as long as possible so the
1060 The source repository is unmodified for as long as possible so the
1060 upgrade can abort at any time without causing loss of service for
1061 upgrade can abort at any time without causing loss of service for
1061 readers and without corrupting the source repository.
1062 readers and without corrupting the source repository.
1062 """
1063 """
1063 assert srcrepo.currentwlock()
1064 assert srcrepo.currentwlock()
1064 assert dstrepo.currentwlock()
1065 assert dstrepo.currentwlock()
1065
1066
1066 ui.status(
1067 ui.status(
1067 _(
1068 _(
1068 b'(it is safe to interrupt this process any time before '
1069 b'(it is safe to interrupt this process any time before '
1069 b'data migration completes)\n'
1070 b'data migration completes)\n'
1070 )
1071 )
1071 )
1072 )
1072
1073
1073 if b're-delta-all' in actions:
1074 if b're-delta-all' in actions:
1074 deltareuse = revlog.revlog.DELTAREUSENEVER
1075 deltareuse = revlog.revlog.DELTAREUSENEVER
1075 elif b're-delta-parent' in actions:
1076 elif b're-delta-parent' in actions:
1076 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1077 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1077 elif b're-delta-multibase' in actions:
1078 elif b're-delta-multibase' in actions:
1078 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1079 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1079 elif b're-delta-fulladd' in actions:
1080 elif b're-delta-fulladd' in actions:
1080 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1081 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1081 else:
1082 else:
1082 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1083 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1083
1084
1084 with dstrepo.transaction(b'upgrade') as tr:
1085 with dstrepo.transaction(b'upgrade') as tr:
1085 _clonerevlogs(
1086 _clonerevlogs(
1086 ui,
1087 ui,
1087 srcrepo,
1088 srcrepo,
1088 dstrepo,
1089 dstrepo,
1089 tr,
1090 tr,
1090 deltareuse,
1091 deltareuse,
1091 b're-delta-multibase' in actions,
1092 b're-delta-multibase' in actions,
1092 revlogs=revlogs,
1093 revlogs=revlogs,
1093 )
1094 )
1094
1095
1095 # Now copy other files in the store directory.
1096 # Now copy other files in the store directory.
1096 # The sorted() makes execution deterministic.
1097 # The sorted() makes execution deterministic.
1097 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1098 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1098 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1099 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1099 continue
1100 continue
1100
1101
1101 srcrepo.ui.status(_(b'copying %s\n') % p)
1102 srcrepo.ui.status(_(b'copying %s\n') % p)
1102 src = srcrepo.store.rawvfs.join(p)
1103 src = srcrepo.store.rawvfs.join(p)
1103 dst = dstrepo.store.rawvfs.join(p)
1104 dst = dstrepo.store.rawvfs.join(p)
1104 util.copyfile(src, dst, copystat=True)
1105 util.copyfile(src, dst, copystat=True)
1105
1106
1106 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1107 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1107
1108
1108 ui.status(_(b'data fully migrated to temporary repository\n'))
1109 ui.status(_(b'data fully migrated to temporary repository\n'))
1109
1110
1110 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1111 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1111 backupvfs = vfsmod.vfs(backuppath)
1112 backupvfs = vfsmod.vfs(backuppath)
1112
1113
1113 # Make a backup of requires file first, as it is the first to be modified.
1114 # Make a backup of requires file first, as it is the first to be modified.
1114 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1115 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1115
1116
1116 # We install an arbitrary requirement that clients must not support
1117 # We install an arbitrary requirement that clients must not support
1117 # as a mechanism to lock out new clients during the data swap. This is
1118 # as a mechanism to lock out new clients during the data swap. This is
1118 # better than allowing a client to continue while the repository is in
1119 # better than allowing a client to continue while the repository is in
1119 # an inconsistent state.
1120 # an inconsistent state.
1120 ui.status(
1121 ui.status(
1121 _(
1122 _(
1122 b'marking source repository as being upgraded; clients will be '
1123 b'marking source repository as being upgraded; clients will be '
1123 b'unable to read from repository\n'
1124 b'unable to read from repository\n'
1124 )
1125 )
1125 )
1126 )
1126 scmutil.writereporequirements(
1127 scmutil.writereporequirements(
1127 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
1128 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
1128 )
1129 )
1129
1130
1130 ui.status(_(b'starting in-place swap of repository data\n'))
1131 ui.status(_(b'starting in-place swap of repository data\n'))
1131 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
1132 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
1132
1133
1133 # Now swap in the new store directory. Doing it as a rename should make
1134 # Now swap in the new store directory. Doing it as a rename should make
1134 # the operation nearly instantaneous and atomic (at least in well-behaved
1135 # the operation nearly instantaneous and atomic (at least in well-behaved
1135 # environments).
1136 # environments).
1136 ui.status(_(b'replacing store...\n'))
1137 ui.status(_(b'replacing store...\n'))
1137 tstart = util.timer()
1138 tstart = util.timer()
1138 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1139 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1139 util.rename(dstrepo.spath, srcrepo.spath)
1140 util.rename(dstrepo.spath, srcrepo.spath)
1140 elapsed = util.timer() - tstart
1141 elapsed = util.timer() - tstart
1141 ui.status(
1142 ui.status(
1142 _(
1143 _(
1143 b'store replacement complete; repository was inconsistent for '
1144 b'store replacement complete; repository was inconsistent for '
1144 b'%0.1fs\n'
1145 b'%0.1fs\n'
1145 )
1146 )
1146 % elapsed
1147 % elapsed
1147 )
1148 )
1148
1149
1149 # We first write the requirements file. Any new requirements will lock
1150 # We first write the requirements file. Any new requirements will lock
1150 # out legacy clients.
1151 # out legacy clients.
1151 ui.status(
1152 ui.status(
1152 _(
1153 _(
1153 b'finalizing requirements file and making repository readable '
1154 b'finalizing requirements file and making repository readable '
1154 b'again\n'
1155 b'again\n'
1155 )
1156 )
1156 )
1157 )
1157 scmutil.writereporequirements(srcrepo, requirements)
1158 scmutil.writereporequirements(srcrepo, requirements)
1158
1159
1159 # The lock file from the old store won't be removed because nothing has a
1160 # The lock file from the old store won't be removed because nothing has a
1160 # reference to its new location. So clean it up manually. Alternatively, we
1161 # reference to its new location. So clean it up manually. Alternatively, we
1161 # could update srcrepo.svfs and other variables to point to the new
1162 # could update srcrepo.svfs and other variables to point to the new
1162 # location. This is simpler.
1163 # location. This is simpler.
1163 backupvfs.unlink(b'store/lock')
1164 backupvfs.unlink(b'store/lock')
1164
1165
1165 return backuppath
1166 return backuppath
1166
1167
1167
1168
1168 def upgraderepo(
1169 def upgraderepo(
1169 ui,
1170 ui,
1170 repo,
1171 repo,
1171 run=False,
1172 run=False,
1172 optimize=None,
1173 optimize=None,
1173 backup=True,
1174 backup=True,
1174 manifest=None,
1175 manifest=None,
1175 changelog=None,
1176 changelog=None,
1176 filelogs=None,
1177 filelogs=None,
1177 ):
1178 ):
1178 """Upgrade a repository in place."""
1179 """Upgrade a repository in place."""
1179 if optimize is None:
1180 if optimize is None:
1180 optimize = []
1181 optimize = []
1181 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1182 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1182 repo = repo.unfiltered()
1183 repo = repo.unfiltered()
1183
1184
1184 revlogs = set(UPGRADE_ALL_REVLOGS)
1185 revlogs = set(UPGRADE_ALL_REVLOGS)
1185 specentries = (
1186 specentries = (
1186 (UPGRADE_CHANGELOG, changelog),
1187 (UPGRADE_CHANGELOG, changelog),
1187 (UPGRADE_MANIFEST, manifest),
1188 (UPGRADE_MANIFEST, manifest),
1188 (UPGRADE_FILELOGS, filelogs),
1189 (UPGRADE_FILELOGS, filelogs),
1189 )
1190 )
1190 specified = [(y, x) for (y, x) in specentries if x is not None]
1191 specified = [(y, x) for (y, x) in specentries if x is not None]
1191 if specified:
1192 if specified:
1192 # we have some limitation on revlogs to be recloned
1193 # we have some limitation on revlogs to be recloned
1193 if any(x for y, x in specified):
1194 if any(x for y, x in specified):
1194 revlogs = set()
1195 revlogs = set()
1195 for upgrade, enabled in specified:
1196 for upgrade, enabled in specified:
1196 if enabled:
1197 if enabled:
1197 revlogs.add(upgrade)
1198 revlogs.add(upgrade)
1198 else:
1199 else:
1199 # none are enabled
1200 # none are enabled
1200 for upgrade, __ in specified:
1201 for upgrade, __ in specified:
1201 revlogs.discard(upgrade)
1202 revlogs.discard(upgrade)
1202
1203
1203 # Ensure the repository can be upgraded.
1204 # Ensure the repository can be upgraded.
1204 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1205 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1205 if missingreqs:
1206 if missingreqs:
1206 raise error.Abort(
1207 raise error.Abort(
1207 _(b'cannot upgrade repository; requirement missing: %s')
1208 _(b'cannot upgrade repository; requirement missing: %s')
1208 % _(b', ').join(sorted(missingreqs))
1209 % _(b', ').join(sorted(missingreqs))
1209 )
1210 )
1210
1211
1211 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1212 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1212 if blockedreqs:
1213 if blockedreqs:
1213 raise error.Abort(
1214 raise error.Abort(
1214 _(
1215 _(
1215 b'cannot upgrade repository; unsupported source '
1216 b'cannot upgrade repository; unsupported source '
1216 b'requirement: %s'
1217 b'requirement: %s'
1217 )
1218 )
1218 % _(b', ').join(sorted(blockedreqs))
1219 % _(b', ').join(sorted(blockedreqs))
1219 )
1220 )
1220
1221
1221 # FUTURE there is potentially a need to control the wanted requirements via
1222 # FUTURE there is potentially a need to control the wanted requirements via
1222 # command arguments or via an extension hook point.
1223 # command arguments or via an extension hook point.
1223 newreqs = localrepo.newreporequirements(
1224 newreqs = localrepo.newreporequirements(
1224 repo.ui, localrepo.defaultcreateopts(repo.ui)
1225 repo.ui, localrepo.defaultcreateopts(repo.ui)
1225 )
1226 )
1226 newreqs.update(preservedrequirements(repo))
1227 newreqs.update(preservedrequirements(repo))
1227
1228
1228 noremovereqs = (
1229 noremovereqs = (
1229 repo.requirements - newreqs - supportremovedrequirements(repo)
1230 repo.requirements - newreqs - supportremovedrequirements(repo)
1230 )
1231 )
1231 if noremovereqs:
1232 if noremovereqs:
1232 raise error.Abort(
1233 raise error.Abort(
1233 _(
1234 _(
1234 b'cannot upgrade repository; requirement would be '
1235 b'cannot upgrade repository; requirement would be '
1235 b'removed: %s'
1236 b'removed: %s'
1236 )
1237 )
1237 % _(b', ').join(sorted(noremovereqs))
1238 % _(b', ').join(sorted(noremovereqs))
1238 )
1239 )
1239
1240
1240 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1241 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1241 if noaddreqs:
1242 if noaddreqs:
1242 raise error.Abort(
1243 raise error.Abort(
1243 _(
1244 _(
1244 b'cannot upgrade repository; do not support adding '
1245 b'cannot upgrade repository; do not support adding '
1245 b'requirement: %s'
1246 b'requirement: %s'
1246 )
1247 )
1247 % _(b', ').join(sorted(noaddreqs))
1248 % _(b', ').join(sorted(noaddreqs))
1248 )
1249 )
1249
1250
1250 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1251 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1251 if unsupportedreqs:
1252 if unsupportedreqs:
1252 raise error.Abort(
1253 raise error.Abort(
1253 _(
1254 _(
1254 b'cannot upgrade repository; do not support '
1255 b'cannot upgrade repository; do not support '
1255 b'destination requirement: %s'
1256 b'destination requirement: %s'
1256 )
1257 )
1257 % _(b', ').join(sorted(unsupportedreqs))
1258 % _(b', ').join(sorted(unsupportedreqs))
1258 )
1259 )
1259
1260
1260 # Find and validate all improvements that can be made.
1261 # Find and validate all improvements that can be made.
1261 alloptimizations = findoptimizations(repo)
1262 alloptimizations = findoptimizations(repo)
1262
1263
1263 # Apply and Validate arguments.
1264 # Apply and Validate arguments.
1264 optimizations = []
1265 optimizations = []
1265 for o in alloptimizations:
1266 for o in alloptimizations:
1266 if o.name in optimize:
1267 if o.name in optimize:
1267 optimizations.append(o)
1268 optimizations.append(o)
1268 optimize.discard(o.name)
1269 optimize.discard(o.name)
1269
1270
1270 if optimize: # anything left is unknown
1271 if optimize: # anything left is unknown
1271 raise error.Abort(
1272 raise error.Abort(
1272 _(b'unknown optimization action requested: %s')
1273 _(b'unknown optimization action requested: %s')
1273 % b', '.join(sorted(optimize)),
1274 % b', '.join(sorted(optimize)),
1274 hint=_(b'run without arguments to see valid optimizations'),
1275 hint=_(b'run without arguments to see valid optimizations'),
1275 )
1276 )
1276
1277
1277 deficiencies = finddeficiencies(repo)
1278 deficiencies = finddeficiencies(repo)
1278 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1279 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1279 actions.extend(
1280 actions.extend(
1280 o
1281 o
1281 for o in sorted(optimizations)
1282 for o in sorted(optimizations)
1282 # determineactions could have added optimisation
1283 # determineactions could have added optimisation
1283 if o not in actions
1284 if o not in actions
1284 )
1285 )
1285
1286
1286 removedreqs = repo.requirements - newreqs
1287 removedreqs = repo.requirements - newreqs
1287 addedreqs = newreqs - repo.requirements
1288 addedreqs = newreqs - repo.requirements
1288
1289
1289 if revlogs != UPGRADE_ALL_REVLOGS:
1290 if revlogs != UPGRADE_ALL_REVLOGS:
1290 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1291 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1291 if incompatible:
1292 if incompatible:
1292 msg = _(
1293 msg = _(
1293 b'ignoring revlogs selection flags, format requirements '
1294 b'ignoring revlogs selection flags, format requirements '
1294 b'change: %s\n'
1295 b'change: %s\n'
1295 )
1296 )
1296 ui.warn(msg % b', '.join(sorted(incompatible)))
1297 ui.warn(msg % b', '.join(sorted(incompatible)))
1297 revlogs = UPGRADE_ALL_REVLOGS
1298 revlogs = UPGRADE_ALL_REVLOGS
1298
1299
1299 def write_labeled(l, label):
1300 def write_labeled(l, label):
1300 first = True
1301 first = True
1301 for r in sorted(l):
1302 for r in sorted(l):
1302 if not first:
1303 if not first:
1303 ui.write(b', ')
1304 ui.write(b', ')
1304 ui.write(r, label=label)
1305 ui.write(r, label=label)
1305 first = False
1306 first = False
1306
1307
1307 def printrequirements():
1308 def printrequirements():
1308 ui.write(_(b'requirements\n'))
1309 ui.write(_(b'requirements\n'))
1309 ui.write(_(b' preserved: '))
1310 ui.write(_(b' preserved: '))
1310 write_labeled(
1311 write_labeled(
1311 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1312 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1312 )
1313 )
1313 ui.write((b'\n'))
1314 ui.write((b'\n'))
1314 removed = repo.requirements - newreqs
1315 removed = repo.requirements - newreqs
1315 if repo.requirements - newreqs:
1316 if repo.requirements - newreqs:
1316 ui.write(_(b' removed: '))
1317 ui.write(_(b' removed: '))
1317 write_labeled(removed, "upgrade-repo.requirement.removed")
1318 write_labeled(removed, "upgrade-repo.requirement.removed")
1318 ui.write((b'\n'))
1319 ui.write((b'\n'))
1319 added = newreqs - repo.requirements
1320 added = newreqs - repo.requirements
1320 if added:
1321 if added:
1321 ui.write(_(b' added: '))
1322 ui.write(_(b' added: '))
1322 write_labeled(added, "upgrade-repo.requirement.added")
1323 write_labeled(added, "upgrade-repo.requirement.added")
1323 ui.write((b'\n'))
1324 ui.write((b'\n'))
1324 ui.write(b'\n')
1325 ui.write(b'\n')
1325
1326
1326 def printoptimisations():
1327 def printoptimisations():
1327 optimisations = [a for a in actions if a.type == OPTIMISATION]
1328 optimisations = [a for a in actions if a.type == OPTIMISATION]
1328 optimisations.sort(key=lambda a: a.name)
1329 optimisations.sort(key=lambda a: a.name)
1329 if optimisations:
1330 if optimisations:
1330 ui.write(_(b'optimisations: '))
1331 ui.write(_(b'optimisations: '))
1331 write_labeled(
1332 write_labeled(
1332 [a.name for a in optimisations],
1333 [a.name for a in optimisations],
1333 "upgrade-repo.optimisation.performed",
1334 "upgrade-repo.optimisation.performed",
1334 )
1335 )
1335 ui.write(b'\n\n')
1336 ui.write(b'\n\n')
1336
1337
1337 def printupgradeactions():
1338 def printupgradeactions():
1338 for a in actions:
1339 for a in actions:
1339 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1340 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1340
1341
1341 if not run:
1342 if not run:
1342 fromconfig = []
1343 fromconfig = []
1343 onlydefault = []
1344 onlydefault = []
1344
1345
1345 for d in deficiencies:
1346 for d in deficiencies:
1346 if d.fromconfig(repo):
1347 if d.fromconfig(repo):
1347 fromconfig.append(d)
1348 fromconfig.append(d)
1348 elif d.default:
1349 elif d.default:
1349 onlydefault.append(d)
1350 onlydefault.append(d)
1350
1351
1351 if fromconfig or onlydefault:
1352 if fromconfig or onlydefault:
1352
1353
1353 if fromconfig:
1354 if fromconfig:
1354 ui.status(
1355 ui.status(
1355 _(
1356 _(
1356 b'repository lacks features recommended by '
1357 b'repository lacks features recommended by '
1357 b'current config options:\n\n'
1358 b'current config options:\n\n'
1358 )
1359 )
1359 )
1360 )
1360 for i in fromconfig:
1361 for i in fromconfig:
1361 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1362 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1362
1363
1363 if onlydefault:
1364 if onlydefault:
1364 ui.status(
1365 ui.status(
1365 _(
1366 _(
1366 b'repository lacks features used by the default '
1367 b'repository lacks features used by the default '
1367 b'config options:\n\n'
1368 b'config options:\n\n'
1368 )
1369 )
1369 )
1370 )
1370 for i in onlydefault:
1371 for i in onlydefault:
1371 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1372 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1372
1373
1373 ui.status(b'\n')
1374 ui.status(b'\n')
1374 else:
1375 else:
1375 ui.status(
1376 ui.status(
1376 _(
1377 _(
1377 b'(no feature deficiencies found in existing '
1378 b'(no feature deficiencies found in existing '
1378 b'repository)\n'
1379 b'repository)\n'
1379 )
1380 )
1380 )
1381 )
1381
1382
1382 ui.status(
1383 ui.status(
1383 _(
1384 _(
1384 b'performing an upgrade with "--run" will make the following '
1385 b'performing an upgrade with "--run" will make the following '
1385 b'changes:\n\n'
1386 b'changes:\n\n'
1386 )
1387 )
1387 )
1388 )
1388
1389
1389 printrequirements()
1390 printrequirements()
1390 printoptimisations()
1391 printoptimisations()
1391 printupgradeactions()
1392 printupgradeactions()
1392
1393
1393 unusedoptimize = [i for i in alloptimizations if i not in actions]
1394 unusedoptimize = [i for i in alloptimizations if i not in actions]
1394
1395
1395 if unusedoptimize:
1396 if unusedoptimize:
1396 ui.status(
1397 ui.status(
1397 _(
1398 _(
1398 b'additional optimizations are available by specifying '
1399 b'additional optimizations are available by specifying '
1399 b'"--optimize <name>":\n\n'
1400 b'"--optimize <name>":\n\n'
1400 )
1401 )
1401 )
1402 )
1402 for i in unusedoptimize:
1403 for i in unusedoptimize:
1403 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
1404 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
1404 return
1405 return
1405
1406
1406 # Else we're in the run=true case.
1407 # Else we're in the run=true case.
1407 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1408 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1408 printrequirements()
1409 printrequirements()
1409 printoptimisations()
1410 printoptimisations()
1410 printupgradeactions()
1411 printupgradeactions()
1411
1412
1412 upgradeactions = [a.name for a in actions]
1413 upgradeactions = [a.name for a in actions]
1413
1414
1414 ui.status(_(b'beginning upgrade...\n'))
1415 ui.status(_(b'beginning upgrade...\n'))
1415 with repo.wlock(), repo.lock():
1416 with repo.wlock(), repo.lock():
1416 ui.status(_(b'repository locked and read-only\n'))
1417 ui.status(_(b'repository locked and read-only\n'))
1417 # Our strategy for upgrading the repository is to create a new,
1418 # Our strategy for upgrading the repository is to create a new,
1418 # temporary repository, write data to it, then do a swap of the
1419 # temporary repository, write data to it, then do a swap of the
1419 # data. There are less heavyweight ways to do this, but it is easier
1420 # data. There are less heavyweight ways to do this, but it is easier
1420 # to create a new repo object than to instantiate all the components
1421 # to create a new repo object than to instantiate all the components
1421 # (like the store) separately.
1422 # (like the store) separately.
1422 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1423 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1423 backuppath = None
1424 backuppath = None
1424 try:
1425 try:
1425 ui.status(
1426 ui.status(
1426 _(
1427 _(
1427 b'creating temporary repository to stage migrated '
1428 b'creating temporary repository to stage migrated '
1428 b'data: %s\n'
1429 b'data: %s\n'
1429 )
1430 )
1430 % tmppath
1431 % tmppath
1431 )
1432 )
1432
1433
1433 # clone ui without using ui.copy because repo.ui is protected
1434 # clone ui without using ui.copy because repo.ui is protected
1434 repoui = repo.ui.__class__(repo.ui)
1435 repoui = repo.ui.__class__(repo.ui)
1435 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1436 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1436
1437
1437 with dstrepo.wlock(), dstrepo.lock():
1438 with dstrepo.wlock(), dstrepo.lock():
1438 backuppath = _upgraderepo(
1439 backuppath = _upgraderepo(
1439 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1440 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1440 )
1441 )
1441 if not (backup or backuppath is None):
1442 if not (backup or backuppath is None):
1442 ui.status(
1443 ui.status(
1443 _(b'removing old repository content%s\n') % backuppath
1444 _(b'removing old repository content%s\n') % backuppath
1444 )
1445 )
1445 repo.vfs.rmtree(backuppath, forcibly=True)
1446 repo.vfs.rmtree(backuppath, forcibly=True)
1446 backuppath = None
1447 backuppath = None
1447
1448
1448 finally:
1449 finally:
1449 ui.status(_(b'removing temporary repository %s\n') % tmppath)
1450 ui.status(_(b'removing temporary repository %s\n') % tmppath)
1450 repo.vfs.rmtree(tmppath, forcibly=True)
1451 repo.vfs.rmtree(tmppath, forcibly=True)
1451
1452
1452 if backuppath and not ui.quiet:
1453 if backuppath and not ui.quiet:
1453 ui.warn(
1454 ui.warn(
1454 _(b'copy of old repository backed up at %s\n') % backuppath
1455 _(b'copy of old repository backed up at %s\n') % backuppath
1455 )
1456 )
1456 ui.warn(
1457 ui.warn(
1457 _(
1458 _(
1458 b'the old repository will not be deleted; remove '
1459 b'the old repository will not be deleted; remove '
1459 b'it to free up disk space once the upgraded '
1460 b'it to free up disk space once the upgraded '
1460 b'repository is verified\n'
1461 b'repository is verified\n'
1461 )
1462 )
1462 )
1463 )
1463
1464
1464 if sharedsafe.name in addedreqs:
1465 if sharedsafe.name in addedreqs:
1465 ui.warn(
1466 ui.warn(
1466 _(
1467 _(
1467 b'repository upgraded to share safe mode, existing'
1468 b'repository upgraded to share safe mode, existing'
1468 b' shares will still work in old non-safe mode. '
1469 b' shares will still work in old non-safe mode. '
1469 b'Re-share existing shares to use them in safe mode'
1470 b'Re-share existing shares to use them in safe mode'
1470 b' New shares will be created in safe mode.\n'
1471 b' New shares will be created in safe mode.\n'
1471 )
1472 )
1472 )
1473 )
1474 if sharedsafe.name in removedreqs:
1475 ui.warn(
1476 _(
1477 b'repository downgraded to not use share safe mode, '
1478 b'existing shares will not work and needs to'
1479 b' be reshared.\n'
1480 )
1481 )
@@ -1,380 +1,455 b''
1 setup
1 setup
2
2
3 $ cat >> $HGRCPATH <<EOF
3 $ cat >> $HGRCPATH <<EOF
4 > [extensions]
4 > [extensions]
5 > share =
5 > share =
6 > [format]
6 > [format]
7 > exp-share-safe = True
7 > exp-share-safe = True
8 > EOF
8 > EOF
9
9
10 prepare source repo
10 prepare source repo
11
11
12 $ hg init source
12 $ hg init source
13 $ cd source
13 $ cd source
14 $ cat .hg/requires
14 $ cat .hg/requires
15 exp-sharesafe
15 exp-sharesafe
16 $ cat .hg/store/requires
16 $ cat .hg/store/requires
17 dotencode
17 dotencode
18 fncache
18 fncache
19 generaldelta
19 generaldelta
20 revlogv1
20 revlogv1
21 sparserevlog
21 sparserevlog
22 store
22 store
23 $ hg debugrequirements
23 $ hg debugrequirements
24 dotencode
24 dotencode
25 exp-sharesafe
25 exp-sharesafe
26 fncache
26 fncache
27 generaldelta
27 generaldelta
28 revlogv1
28 revlogv1
29 sparserevlog
29 sparserevlog
30 store
30 store
31
31
32 $ echo a > a
32 $ echo a > a
33 $ hg ci -Aqm "added a"
33 $ hg ci -Aqm "added a"
34 $ echo b > b
34 $ echo b > b
35 $ hg ci -Aqm "added b"
35 $ hg ci -Aqm "added b"
36
36
37 $ HGEDITOR=cat hg config --shared
37 $ HGEDITOR=cat hg config --shared
38 abort: repository is not shared; can't use --shared
38 abort: repository is not shared; can't use --shared
39 [10]
39 [10]
40 $ cd ..
40 $ cd ..
41
41
42 Create a shared repo and check the requirements are shared and read correctly
42 Create a shared repo and check the requirements are shared and read correctly
43 $ hg share source shared1
43 $ hg share source shared1
44 updating working directory
44 updating working directory
45 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
45 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 $ cd shared1
46 $ cd shared1
47 $ cat .hg/requires
47 $ cat .hg/requires
48 exp-sharesafe
48 exp-sharesafe
49 shared
49 shared
50
50
51 $ hg debugrequirements -R ../source
51 $ hg debugrequirements -R ../source
52 dotencode
52 dotencode
53 exp-sharesafe
53 exp-sharesafe
54 fncache
54 fncache
55 generaldelta
55 generaldelta
56 revlogv1
56 revlogv1
57 sparserevlog
57 sparserevlog
58 store
58 store
59
59
60 $ hg debugrequirements
60 $ hg debugrequirements
61 dotencode
61 dotencode
62 exp-sharesafe
62 exp-sharesafe
63 fncache
63 fncache
64 generaldelta
64 generaldelta
65 revlogv1
65 revlogv1
66 shared
66 shared
67 sparserevlog
67 sparserevlog
68 store
68 store
69
69
70 $ echo c > c
70 $ echo c > c
71 $ hg ci -Aqm "added c"
71 $ hg ci -Aqm "added c"
72
72
73 Check that config of the source repository is also loaded
73 Check that config of the source repository is also loaded
74
74
75 $ hg showconfig ui.curses
75 $ hg showconfig ui.curses
76 [1]
76 [1]
77
77
78 $ echo "[ui]" >> ../source/.hg/hgrc
78 $ echo "[ui]" >> ../source/.hg/hgrc
79 $ echo "curses=true" >> ../source/.hg/hgrc
79 $ echo "curses=true" >> ../source/.hg/hgrc
80
80
81 $ hg showconfig ui.curses
81 $ hg showconfig ui.curses
82 true
82 true
83
83
84 Test that extensions of source repository are also loaded
84 Test that extensions of source repository are also loaded
85
85
86 $ hg debugextensions
86 $ hg debugextensions
87 share
87 share
88 $ hg extdiff -p echo
88 $ hg extdiff -p echo
89 hg: unknown command 'extdiff'
89 hg: unknown command 'extdiff'
90 'extdiff' is provided by the following extension:
90 'extdiff' is provided by the following extension:
91
91
92 extdiff command to allow external programs to compare revisions
92 extdiff command to allow external programs to compare revisions
93
93
94 (use 'hg help extensions' for information on enabling extensions)
94 (use 'hg help extensions' for information on enabling extensions)
95 [255]
95 [255]
96
96
97 $ echo "[extensions]" >> ../source/.hg/hgrc
97 $ echo "[extensions]" >> ../source/.hg/hgrc
98 $ echo "extdiff=" >> ../source/.hg/hgrc
98 $ echo "extdiff=" >> ../source/.hg/hgrc
99
99
100 $ hg debugextensions -R ../source
100 $ hg debugextensions -R ../source
101 extdiff
101 extdiff
102 share
102 share
103 $ hg extdiff -R ../source -p echo
103 $ hg extdiff -R ../source -p echo
104
104
105 BROKEN: the command below will not work if config of shared source is not loaded
105 BROKEN: the command below will not work if config of shared source is not loaded
106 on dispatch but debugextensions says that extension
106 on dispatch but debugextensions says that extension
107 is loaded
107 is loaded
108 $ hg debugextensions
108 $ hg debugextensions
109 extdiff
109 extdiff
110 share
110 share
111
111
112 $ hg extdiff -p echo
112 $ hg extdiff -p echo
113
113
114 However, local .hg/hgrc should override the config set by share source
114 However, local .hg/hgrc should override the config set by share source
115
115
116 $ echo "[ui]" >> .hg/hgrc
116 $ echo "[ui]" >> .hg/hgrc
117 $ echo "curses=false" >> .hg/hgrc
117 $ echo "curses=false" >> .hg/hgrc
118
118
119 $ hg showconfig ui.curses
119 $ hg showconfig ui.curses
120 false
120 false
121
121
122 $ HGEDITOR=cat hg config --shared
122 $ HGEDITOR=cat hg config --shared
123 [ui]
123 [ui]
124 curses=true
124 curses=true
125 [extensions]
125 [extensions]
126 extdiff=
126 extdiff=
127
127
128 $ HGEDITOR=cat hg config --local
128 $ HGEDITOR=cat hg config --local
129 [ui]
129 [ui]
130 curses=false
130 curses=false
131
131
132 Testing that hooks set in source repository also runs in shared repo
132 Testing that hooks set in source repository also runs in shared repo
133
133
134 $ cd ../source
134 $ cd ../source
135 $ cat <<EOF >> .hg/hgrc
135 $ cat <<EOF >> .hg/hgrc
136 > [extensions]
136 > [extensions]
137 > hooklib=
137 > hooklib=
138 > [hooks]
138 > [hooks]
139 > pretxnchangegroup.reject_merge_commits = \
139 > pretxnchangegroup.reject_merge_commits = \
140 > python:hgext.hooklib.reject_merge_commits.hook
140 > python:hgext.hooklib.reject_merge_commits.hook
141 > EOF
141 > EOF
142
142
143 $ cd ..
143 $ cd ..
144 $ hg clone source cloned
144 $ hg clone source cloned
145 updating to branch default
145 updating to branch default
146 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
146 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
147 $ cd cloned
147 $ cd cloned
148 $ hg up 0
148 $ hg up 0
149 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
149 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
150 $ echo bar > bar
150 $ echo bar > bar
151 $ hg ci -Aqm "added bar"
151 $ hg ci -Aqm "added bar"
152 $ hg merge
152 $ hg merge
153 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
153 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
154 (branch merge, don't forget to commit)
154 (branch merge, don't forget to commit)
155 $ hg ci -m "merge commit"
155 $ hg ci -m "merge commit"
156
156
157 $ hg push ../source
157 $ hg push ../source
158 pushing to ../source
158 pushing to ../source
159 searching for changes
159 searching for changes
160 adding changesets
160 adding changesets
161 adding manifests
161 adding manifests
162 adding file changes
162 adding file changes
163 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
163 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
164 transaction abort!
164 transaction abort!
165 rollback completed
165 rollback completed
166 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
166 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
167 [255]
167 [255]
168
168
169 $ hg push ../shared1
169 $ hg push ../shared1
170 pushing to ../shared1
170 pushing to ../shared1
171 searching for changes
171 searching for changes
172 adding changesets
172 adding changesets
173 adding manifests
173 adding manifests
174 adding file changes
174 adding file changes
175 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
175 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
176 transaction abort!
176 transaction abort!
177 rollback completed
177 rollback completed
178 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
178 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
179 [255]
179 [255]
180
180
181 Test that if share source config is untrusted, we dont read it
181 Test that if share source config is untrusted, we dont read it
182
182
183 $ cd ../shared1
183 $ cd ../shared1
184
184
185 $ cat << EOF > $TESTTMP/untrusted.py
185 $ cat << EOF > $TESTTMP/untrusted.py
186 > from mercurial import scmutil, util
186 > from mercurial import scmutil, util
187 > def uisetup(ui):
187 > def uisetup(ui):
188 > class untrustedui(ui.__class__):
188 > class untrustedui(ui.__class__):
189 > def _trusted(self, fp, f):
189 > def _trusted(self, fp, f):
190 > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
190 > if util.normpath(fp.name).endswith(b'source/.hg/hgrc'):
191 > return False
191 > return False
192 > return super(untrustedui, self)._trusted(fp, f)
192 > return super(untrustedui, self)._trusted(fp, f)
193 > ui.__class__ = untrustedui
193 > ui.__class__ = untrustedui
194 > EOF
194 > EOF
195
195
196 $ hg showconfig hooks
196 $ hg showconfig hooks
197 hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
197 hooks.pretxnchangegroup.reject_merge_commits=python:hgext.hooklib.reject_merge_commits.hook
198
198
199 $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
199 $ hg showconfig hooks --config extensions.untrusted=$TESTTMP/untrusted.py
200 [1]
200 [1]
201
201
202 Update the source repository format and check that shared repo works
202 Update the source repository format and check that shared repo works
203
203
204 $ cd ../source
204 $ cd ../source
205
205
206 Disable zstd related tests because its not present on pure version
206 Disable zstd related tests because its not present on pure version
207 #if zstd
207 #if zstd
208 $ echo "[format]" >> .hg/hgrc
208 $ echo "[format]" >> .hg/hgrc
209 $ echo "revlog-compression=zstd" >> .hg/hgrc
209 $ echo "revlog-compression=zstd" >> .hg/hgrc
210
210
211 $ hg debugupgraderepo --run -q
211 $ hg debugupgraderepo --run -q
212 upgrade will perform the following actions:
212 upgrade will perform the following actions:
213
213
214 requirements
214 requirements
215 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store
215 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store
216 added: revlog-compression-zstd
216 added: revlog-compression-zstd
217
217
218 $ hg log -r .
218 $ hg log -r .
219 changeset: 1:5f6d8a4bf34a
219 changeset: 1:5f6d8a4bf34a
220 user: test
220 user: test
221 date: Thu Jan 01 00:00:00 1970 +0000
221 date: Thu Jan 01 00:00:00 1970 +0000
222 summary: added b
222 summary: added b
223
223
224 #endif
224 #endif
225 $ echo "[format]" >> .hg/hgrc
225 $ echo "[format]" >> .hg/hgrc
226 $ echo "use-persistent-nodemap=True" >> .hg/hgrc
226 $ echo "use-persistent-nodemap=True" >> .hg/hgrc
227
227
228 $ hg debugupgraderepo --run -q -R ../shared1
228 $ hg debugupgraderepo --run -q -R ../shared1
229 abort: cannot upgrade repository; unsupported source requirement: shared
229 abort: cannot upgrade repository; unsupported source requirement: shared
230 [255]
230 [255]
231
231
232 $ hg debugupgraderepo --run -q
232 $ hg debugupgraderepo --run -q
233 upgrade will perform the following actions:
233 upgrade will perform the following actions:
234
234
235 requirements
235 requirements
236 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
236 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
237 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
237 preserved: dotencode, exp-sharesafe, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
238 added: persistent-nodemap
238 added: persistent-nodemap
239
239
240 $ hg log -r .
240 $ hg log -r .
241 changeset: 1:5f6d8a4bf34a
241 changeset: 1:5f6d8a4bf34a
242 user: test
242 user: test
243 date: Thu Jan 01 00:00:00 1970 +0000
243 date: Thu Jan 01 00:00:00 1970 +0000
244 summary: added b
244 summary: added b
245
245
246
246
247 Shared one should work
247 Shared one should work
248 $ cd ../shared1
248 $ cd ../shared1
249 $ hg log -r .
249 $ hg log -r .
250 changeset: 2:155349b645be
250 changeset: 2:155349b645be
251 tag: tip
251 tag: tip
252 user: test
252 user: test
253 date: Thu Jan 01 00:00:00 1970 +0000
253 date: Thu Jan 01 00:00:00 1970 +0000
254 summary: added c
254 summary: added c
255
255
256
256
257 Testing that nonsharedrc is loaded for source and not shared
257 Testing that nonsharedrc is loaded for source and not shared
258
258
259 $ cd ../source
259 $ cd ../source
260 $ touch .hg/hgrc-not-shared
260 $ touch .hg/hgrc-not-shared
261 $ echo "[ui]" >> .hg/hgrc-not-shared
261 $ echo "[ui]" >> .hg/hgrc-not-shared
262 $ echo "traceback=true" >> .hg/hgrc-not-shared
262 $ echo "traceback=true" >> .hg/hgrc-not-shared
263
263
264 $ hg showconfig ui.traceback
264 $ hg showconfig ui.traceback
265 true
265 true
266
266
267 $ HGEDITOR=cat hg config --non-shared
267 $ HGEDITOR=cat hg config --non-shared
268 [ui]
268 [ui]
269 traceback=true
269 traceback=true
270
270
271 $ cd ../shared1
271 $ cd ../shared1
272 $ hg showconfig ui.traceback
272 $ hg showconfig ui.traceback
273 [1]
273 [1]
274
274
275 Unsharing works
275 Unsharing works
276
276
277 $ hg unshare
277 $ hg unshare
278
278
279 Test that source config is added to the shared one after unshare, and the config
279 Test that source config is added to the shared one after unshare, and the config
280 of current repo is still respected over the config which came from source config
280 of current repo is still respected over the config which came from source config
281 $ cd ../cloned
281 $ cd ../cloned
282 $ hg push ../shared1
282 $ hg push ../shared1
283 pushing to ../shared1
283 pushing to ../shared1
284 searching for changes
284 searching for changes
285 adding changesets
285 adding changesets
286 adding manifests
286 adding manifests
287 adding file changes
287 adding file changes
288 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
288 error: pretxnchangegroup.reject_merge_commits hook failed: bcde3522682d rejected as merge on the same branch. Please consider rebase.
289 transaction abort!
289 transaction abort!
290 rollback completed
290 rollback completed
291 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
291 abort: bcde3522682d rejected as merge on the same branch. Please consider rebase.
292 [255]
292 [255]
293 $ hg showconfig ui.curses -R ../shared1
293 $ hg showconfig ui.curses -R ../shared1
294 false
294 false
295
295
296 $ cd ../
296 $ cd ../
297
297
298 Test that upgrading using debugupgraderepo works
298 Test that upgrading using debugupgraderepo works
299 =================================================
299 =================================================
300
300
301 $ hg init non-share-safe --config format.exp-share-safe=false
301 $ hg init non-share-safe --config format.exp-share-safe=false
302 $ cd non-share-safe
302 $ cd non-share-safe
303 $ hg debugrequirements
303 $ hg debugrequirements
304 dotencode
304 dotencode
305 fncache
305 fncache
306 generaldelta
306 generaldelta
307 revlogv1
307 revlogv1
308 sparserevlog
308 sparserevlog
309 store
309 store
310 $ echo foo > foo
310 $ echo foo > foo
311 $ hg ci -Aqm 'added foo'
311 $ hg ci -Aqm 'added foo'
312 $ echo bar > bar
312 $ echo bar > bar
313 $ hg ci -Aqm 'added bar'
313 $ hg ci -Aqm 'added bar'
314
314
315 Create a share before upgrading
315 Create a share before upgrading
316
316
317 $ cd ..
317 $ cd ..
318 $ hg share non-share-safe nss-share
318 $ hg share non-share-safe nss-share
319 updating working directory
319 updating working directory
320 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
320 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
321 $ hg debugrequirements -R nss-share
321 $ hg debugrequirements -R nss-share
322 dotencode
322 dotencode
323 fncache
323 fncache
324 generaldelta
324 generaldelta
325 revlogv1
325 revlogv1
326 shared
326 shared
327 sparserevlog
327 sparserevlog
328 store
328 store
329 $ cd non-share-safe
329 $ cd non-share-safe
330
330
331 Upgrade
331 Upgrade
332
332
333 $ hg debugupgraderepo -q
333 $ hg debugupgraderepo -q
334 requirements
334 requirements
335 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
335 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
336 added: exp-sharesafe
336 added: exp-sharesafe
337
337
338 $ hg debugupgraderepo --run -q
338 $ hg debugupgraderepo --run -q
339 upgrade will perform the following actions:
339 upgrade will perform the following actions:
340
340
341 requirements
341 requirements
342 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
342 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
343 added: exp-sharesafe
343 added: exp-sharesafe
344
344
345 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
345 repository upgraded to share safe mode, existing shares will still work in old non-safe mode. Re-share existing shares to use them in safe mode New shares will be created in safe mode.
346
346
347 $ hg debugrequirements
347 $ hg debugrequirements
348 dotencode
348 dotencode
349 exp-sharesafe
349 exp-sharesafe
350 fncache
350 fncache
351 generaldelta
351 generaldelta
352 revlogv1
352 revlogv1
353 sparserevlog
353 sparserevlog
354 store
354 store
355
355
356 $ cat .hg/requires
356 $ cat .hg/requires
357 exp-sharesafe
357 exp-sharesafe
358
358
359 $ cat .hg/store/requires
359 $ cat .hg/store/requires
360 dotencode
360 dotencode
361 fncache
361 fncache
362 generaldelta
362 generaldelta
363 revlogv1
363 revlogv1
364 sparserevlog
364 sparserevlog
365 store
365 store
366
366
367 $ hg log -GT "{node}: {desc}\n"
367 $ hg log -GT "{node}: {desc}\n"
368 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
368 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
369 |
369 |
370 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
370 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
371
371
372
372
373 Make sure existing shares still works
373 Make sure existing shares still works
374
374
375 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
375 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
376 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
376 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
377 |
377 |
378 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
378 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
379
379
380
381
382 Create a safe share from upgrade one
383
384 $ cd ..
385 $ hg share non-share-safe ss-share
386 updating working directory
387 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
388 $ cd ss-share
389 $ hg log -GT "{node}: {desc}\n"
390 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
391 |
392 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
393
394 $ cd ../non-share-safe
395
396 Test that downgrading works too
397
398 $ cat >> $HGRCPATH <<EOF
399 > [extensions]
400 > share =
401 > [format]
402 > exp-share-safe = False
403 > EOF
404
405 $ hg debugupgraderepo -q
406 requirements
407 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
408 removed: exp-sharesafe
409
410 $ hg debugupgraderepo -q --run
411 upgrade will perform the following actions:
412
413 requirements
414 preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
415 removed: exp-sharesafe
416
417 repository downgraded to not use share safe mode, existing shares will not work and needs to be reshared.
418
419 $ hg debugrequirements
420 dotencode
421 fncache
422 generaldelta
423 revlogv1
424 sparserevlog
425 store
426
427 $ cat .hg/requires
428 dotencode
429 fncache
430 generaldelta
431 revlogv1
432 sparserevlog
433 store
434
435 $ test -f .hg/store/requires
436 [1]
437
438 $ hg log -GT "{node}: {desc}\n"
439 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
440 |
441 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
442
443
444 Make sure existing shares still works
445
446 $ hg log -GT "{node}: {desc}\n" -R ../nss-share
447 @ f63db81e6dde1d9c78814167f77fb1fb49283f4f: added bar
448 |
449 o f3ba8b99bb6f897c87bbc1c07b75c6ddf43a4f77: added foo
450
380 $ hg unshare -R ../nss-share
451 $ hg unshare -R ../nss-share
452
453 $ hg log -GT "{node}: {desc}\n" -R ../ss-share
454 abort: share source does not support exp-sharesafe requirement
455 [255]
General Comments 0
You need to be logged in to leave comments. Login now