##// END OF EJS Templates
localrepo: move requirements constant to requirements module...
Pulkit Goyal -
r45933:d7dcc75a default
parent child Browse files
Show More
@@ -1,3534 +1,3513 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 commit,
35 commit,
36 context,
36 context,
37 dirstate,
37 dirstate,
38 dirstateguard,
38 dirstateguard,
39 discovery,
39 discovery,
40 encoding,
40 encoding,
41 error,
41 error,
42 exchange,
42 exchange,
43 extensions,
43 extensions,
44 filelog,
44 filelog,
45 hook,
45 hook,
46 lock as lockmod,
46 lock as lockmod,
47 match as matchmod,
47 match as matchmod,
48 mergestate as mergestatemod,
48 mergestate as mergestatemod,
49 mergeutil,
49 mergeutil,
50 namespaces,
50 namespaces,
51 narrowspec,
51 narrowspec,
52 obsolete,
52 obsolete,
53 pathutil,
53 pathutil,
54 phases,
54 phases,
55 pushkey,
55 pushkey,
56 pycompat,
56 pycompat,
57 rcutil,
57 rcutil,
58 repoview,
58 repoview,
59 requirements as requirementsmod,
59 requirements as requirementsmod,
60 revset,
60 revset,
61 revsetlang,
61 revsetlang,
62 scmutil,
62 scmutil,
63 sparse,
63 sparse,
64 store as storemod,
64 store as storemod,
65 subrepoutil,
65 subrepoutil,
66 tags as tagsmod,
66 tags as tagsmod,
67 transaction,
67 transaction,
68 txnutil,
68 txnutil,
69 util,
69 util,
70 vfs as vfsmod,
70 vfs as vfsmod,
71 )
71 )
72
72
73 from .interfaces import (
73 from .interfaces import (
74 repository,
74 repository,
75 util as interfaceutil,
75 util as interfaceutil,
76 )
76 )
77
77
78 from .utils import (
78 from .utils import (
79 hashutil,
79 hashutil,
80 procutil,
80 procutil,
81 stringutil,
81 stringutil,
82 )
82 )
83
83
84 from .revlogutils import constants as revlogconst
84 from .revlogutils import constants as revlogconst
85
85
86 release = lockmod.release
86 release = lockmod.release
87 urlerr = util.urlerr
87 urlerr = util.urlerr
88 urlreq = util.urlreq
88 urlreq = util.urlreq
89
89
90 # set of (path, vfs-location) tuples. vfs-location is:
90 # set of (path, vfs-location) tuples. vfs-location is:
91 # - 'plain for vfs relative paths
91 # - 'plain for vfs relative paths
92 # - '' for svfs relative paths
92 # - '' for svfs relative paths
93 _cachedfiles = set()
93 _cachedfiles = set()
94
94
95
95
96 class _basefilecache(scmutil.filecache):
96 class _basefilecache(scmutil.filecache):
97 """All filecache usage on repo are done for logic that should be unfiltered
97 """All filecache usage on repo are done for logic that should be unfiltered
98 """
98 """
99
99
100 def __get__(self, repo, type=None):
100 def __get__(self, repo, type=None):
101 if repo is None:
101 if repo is None:
102 return self
102 return self
103 # proxy to unfiltered __dict__ since filtered repo has no entry
103 # proxy to unfiltered __dict__ since filtered repo has no entry
104 unfi = repo.unfiltered()
104 unfi = repo.unfiltered()
105 try:
105 try:
106 return unfi.__dict__[self.sname]
106 return unfi.__dict__[self.sname]
107 except KeyError:
107 except KeyError:
108 pass
108 pass
109 return super(_basefilecache, self).__get__(unfi, type)
109 return super(_basefilecache, self).__get__(unfi, type)
110
110
111 def set(self, repo, value):
111 def set(self, repo, value):
112 return super(_basefilecache, self).set(repo.unfiltered(), value)
112 return super(_basefilecache, self).set(repo.unfiltered(), value)
113
113
114
114
115 class repofilecache(_basefilecache):
115 class repofilecache(_basefilecache):
116 """filecache for files in .hg but outside of .hg/store"""
116 """filecache for files in .hg but outside of .hg/store"""
117
117
118 def __init__(self, *paths):
118 def __init__(self, *paths):
119 super(repofilecache, self).__init__(*paths)
119 super(repofilecache, self).__init__(*paths)
120 for path in paths:
120 for path in paths:
121 _cachedfiles.add((path, b'plain'))
121 _cachedfiles.add((path, b'plain'))
122
122
123 def join(self, obj, fname):
123 def join(self, obj, fname):
124 return obj.vfs.join(fname)
124 return obj.vfs.join(fname)
125
125
126
126
127 class storecache(_basefilecache):
127 class storecache(_basefilecache):
128 """filecache for files in the store"""
128 """filecache for files in the store"""
129
129
130 def __init__(self, *paths):
130 def __init__(self, *paths):
131 super(storecache, self).__init__(*paths)
131 super(storecache, self).__init__(*paths)
132 for path in paths:
132 for path in paths:
133 _cachedfiles.add((path, b''))
133 _cachedfiles.add((path, b''))
134
134
135 def join(self, obj, fname):
135 def join(self, obj, fname):
136 return obj.sjoin(fname)
136 return obj.sjoin(fname)
137
137
138
138
139 class mixedrepostorecache(_basefilecache):
139 class mixedrepostorecache(_basefilecache):
140 """filecache for a mix files in .hg/store and outside"""
140 """filecache for a mix files in .hg/store and outside"""
141
141
142 def __init__(self, *pathsandlocations):
142 def __init__(self, *pathsandlocations):
143 # scmutil.filecache only uses the path for passing back into our
143 # scmutil.filecache only uses the path for passing back into our
144 # join(), so we can safely pass a list of paths and locations
144 # join(), so we can safely pass a list of paths and locations
145 super(mixedrepostorecache, self).__init__(*pathsandlocations)
145 super(mixedrepostorecache, self).__init__(*pathsandlocations)
146 _cachedfiles.update(pathsandlocations)
146 _cachedfiles.update(pathsandlocations)
147
147
148 def join(self, obj, fnameandlocation):
148 def join(self, obj, fnameandlocation):
149 fname, location = fnameandlocation
149 fname, location = fnameandlocation
150 if location == b'plain':
150 if location == b'plain':
151 return obj.vfs.join(fname)
151 return obj.vfs.join(fname)
152 else:
152 else:
153 if location != b'':
153 if location != b'':
154 raise error.ProgrammingError(
154 raise error.ProgrammingError(
155 b'unexpected location: %s' % location
155 b'unexpected location: %s' % location
156 )
156 )
157 return obj.sjoin(fname)
157 return obj.sjoin(fname)
158
158
159
159
160 def isfilecached(repo, name):
160 def isfilecached(repo, name):
161 """check if a repo has already cached "name" filecache-ed property
161 """check if a repo has already cached "name" filecache-ed property
162
162
163 This returns (cachedobj-or-None, iscached) tuple.
163 This returns (cachedobj-or-None, iscached) tuple.
164 """
164 """
165 cacheentry = repo.unfiltered()._filecache.get(name, None)
165 cacheentry = repo.unfiltered()._filecache.get(name, None)
166 if not cacheentry:
166 if not cacheentry:
167 return None, False
167 return None, False
168 return cacheentry.obj, True
168 return cacheentry.obj, True
169
169
170
170
171 class unfilteredpropertycache(util.propertycache):
171 class unfilteredpropertycache(util.propertycache):
172 """propertycache that apply to unfiltered repo only"""
172 """propertycache that apply to unfiltered repo only"""
173
173
174 def __get__(self, repo, type=None):
174 def __get__(self, repo, type=None):
175 unfi = repo.unfiltered()
175 unfi = repo.unfiltered()
176 if unfi is repo:
176 if unfi is repo:
177 return super(unfilteredpropertycache, self).__get__(unfi)
177 return super(unfilteredpropertycache, self).__get__(unfi)
178 return getattr(unfi, self.name)
178 return getattr(unfi, self.name)
179
179
180
180
181 class filteredpropertycache(util.propertycache):
181 class filteredpropertycache(util.propertycache):
182 """propertycache that must take filtering in account"""
182 """propertycache that must take filtering in account"""
183
183
184 def cachevalue(self, obj, value):
184 def cachevalue(self, obj, value):
185 object.__setattr__(obj, self.name, value)
185 object.__setattr__(obj, self.name, value)
186
186
187
187
188 def hasunfilteredcache(repo, name):
188 def hasunfilteredcache(repo, name):
189 """check if a repo has an unfilteredpropertycache value for <name>"""
189 """check if a repo has an unfilteredpropertycache value for <name>"""
190 return name in vars(repo.unfiltered())
190 return name in vars(repo.unfiltered())
191
191
192
192
193 def unfilteredmethod(orig):
193 def unfilteredmethod(orig):
194 """decorate method that always need to be run on unfiltered version"""
194 """decorate method that always need to be run on unfiltered version"""
195
195
196 def wrapper(repo, *args, **kwargs):
196 def wrapper(repo, *args, **kwargs):
197 return orig(repo.unfiltered(), *args, **kwargs)
197 return orig(repo.unfiltered(), *args, **kwargs)
198
198
199 return wrapper
199 return wrapper
200
200
201
201
202 moderncaps = {
202 moderncaps = {
203 b'lookup',
203 b'lookup',
204 b'branchmap',
204 b'branchmap',
205 b'pushkey',
205 b'pushkey',
206 b'known',
206 b'known',
207 b'getbundle',
207 b'getbundle',
208 b'unbundle',
208 b'unbundle',
209 }
209 }
210 legacycaps = moderncaps.union({b'changegroupsubset'})
210 legacycaps = moderncaps.union({b'changegroupsubset'})
211
211
212
212
213 @interfaceutil.implementer(repository.ipeercommandexecutor)
213 @interfaceutil.implementer(repository.ipeercommandexecutor)
214 class localcommandexecutor(object):
214 class localcommandexecutor(object):
215 def __init__(self, peer):
215 def __init__(self, peer):
216 self._peer = peer
216 self._peer = peer
217 self._sent = False
217 self._sent = False
218 self._closed = False
218 self._closed = False
219
219
220 def __enter__(self):
220 def __enter__(self):
221 return self
221 return self
222
222
223 def __exit__(self, exctype, excvalue, exctb):
223 def __exit__(self, exctype, excvalue, exctb):
224 self.close()
224 self.close()
225
225
226 def callcommand(self, command, args):
226 def callcommand(self, command, args):
227 if self._sent:
227 if self._sent:
228 raise error.ProgrammingError(
228 raise error.ProgrammingError(
229 b'callcommand() cannot be used after sendcommands()'
229 b'callcommand() cannot be used after sendcommands()'
230 )
230 )
231
231
232 if self._closed:
232 if self._closed:
233 raise error.ProgrammingError(
233 raise error.ProgrammingError(
234 b'callcommand() cannot be used after close()'
234 b'callcommand() cannot be used after close()'
235 )
235 )
236
236
237 # We don't need to support anything fancy. Just call the named
237 # We don't need to support anything fancy. Just call the named
238 # method on the peer and return a resolved future.
238 # method on the peer and return a resolved future.
239 fn = getattr(self._peer, pycompat.sysstr(command))
239 fn = getattr(self._peer, pycompat.sysstr(command))
240
240
241 f = pycompat.futures.Future()
241 f = pycompat.futures.Future()
242
242
243 try:
243 try:
244 result = fn(**pycompat.strkwargs(args))
244 result = fn(**pycompat.strkwargs(args))
245 except Exception:
245 except Exception:
246 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
246 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
247 else:
247 else:
248 f.set_result(result)
248 f.set_result(result)
249
249
250 return f
250 return f
251
251
252 def sendcommands(self):
252 def sendcommands(self):
253 self._sent = True
253 self._sent = True
254
254
255 def close(self):
255 def close(self):
256 self._closed = True
256 self._closed = True
257
257
258
258
259 @interfaceutil.implementer(repository.ipeercommands)
259 @interfaceutil.implementer(repository.ipeercommands)
260 class localpeer(repository.peer):
260 class localpeer(repository.peer):
261 '''peer for a local repo; reflects only the most recent API'''
261 '''peer for a local repo; reflects only the most recent API'''
262
262
263 def __init__(self, repo, caps=None):
263 def __init__(self, repo, caps=None):
264 super(localpeer, self).__init__()
264 super(localpeer, self).__init__()
265
265
266 if caps is None:
266 if caps is None:
267 caps = moderncaps.copy()
267 caps = moderncaps.copy()
268 self._repo = repo.filtered(b'served')
268 self._repo = repo.filtered(b'served')
269 self.ui = repo.ui
269 self.ui = repo.ui
270 self._caps = repo._restrictcapabilities(caps)
270 self._caps = repo._restrictcapabilities(caps)
271
271
272 # Begin of _basepeer interface.
272 # Begin of _basepeer interface.
273
273
274 def url(self):
274 def url(self):
275 return self._repo.url()
275 return self._repo.url()
276
276
277 def local(self):
277 def local(self):
278 return self._repo
278 return self._repo
279
279
280 def peer(self):
280 def peer(self):
281 return self
281 return self
282
282
283 def canpush(self):
283 def canpush(self):
284 return True
284 return True
285
285
286 def close(self):
286 def close(self):
287 self._repo.close()
287 self._repo.close()
288
288
289 # End of _basepeer interface.
289 # End of _basepeer interface.
290
290
291 # Begin of _basewirecommands interface.
291 # Begin of _basewirecommands interface.
292
292
293 def branchmap(self):
293 def branchmap(self):
294 return self._repo.branchmap()
294 return self._repo.branchmap()
295
295
296 def capabilities(self):
296 def capabilities(self):
297 return self._caps
297 return self._caps
298
298
299 def clonebundles(self):
299 def clonebundles(self):
300 return self._repo.tryread(b'clonebundles.manifest')
300 return self._repo.tryread(b'clonebundles.manifest')
301
301
302 def debugwireargs(self, one, two, three=None, four=None, five=None):
302 def debugwireargs(self, one, two, three=None, four=None, five=None):
303 """Used to test argument passing over the wire"""
303 """Used to test argument passing over the wire"""
304 return b"%s %s %s %s %s" % (
304 return b"%s %s %s %s %s" % (
305 one,
305 one,
306 two,
306 two,
307 pycompat.bytestr(three),
307 pycompat.bytestr(three),
308 pycompat.bytestr(four),
308 pycompat.bytestr(four),
309 pycompat.bytestr(five),
309 pycompat.bytestr(five),
310 )
310 )
311
311
312 def getbundle(
312 def getbundle(
313 self, source, heads=None, common=None, bundlecaps=None, **kwargs
313 self, source, heads=None, common=None, bundlecaps=None, **kwargs
314 ):
314 ):
315 chunks = exchange.getbundlechunks(
315 chunks = exchange.getbundlechunks(
316 self._repo,
316 self._repo,
317 source,
317 source,
318 heads=heads,
318 heads=heads,
319 common=common,
319 common=common,
320 bundlecaps=bundlecaps,
320 bundlecaps=bundlecaps,
321 **kwargs
321 **kwargs
322 )[1]
322 )[1]
323 cb = util.chunkbuffer(chunks)
323 cb = util.chunkbuffer(chunks)
324
324
325 if exchange.bundle2requested(bundlecaps):
325 if exchange.bundle2requested(bundlecaps):
326 # When requesting a bundle2, getbundle returns a stream to make the
326 # When requesting a bundle2, getbundle returns a stream to make the
327 # wire level function happier. We need to build a proper object
327 # wire level function happier. We need to build a proper object
328 # from it in local peer.
328 # from it in local peer.
329 return bundle2.getunbundler(self.ui, cb)
329 return bundle2.getunbundler(self.ui, cb)
330 else:
330 else:
331 return changegroup.getunbundler(b'01', cb, None)
331 return changegroup.getunbundler(b'01', cb, None)
332
332
333 def heads(self):
333 def heads(self):
334 return self._repo.heads()
334 return self._repo.heads()
335
335
336 def known(self, nodes):
336 def known(self, nodes):
337 return self._repo.known(nodes)
337 return self._repo.known(nodes)
338
338
339 def listkeys(self, namespace):
339 def listkeys(self, namespace):
340 return self._repo.listkeys(namespace)
340 return self._repo.listkeys(namespace)
341
341
342 def lookup(self, key):
342 def lookup(self, key):
343 return self._repo.lookup(key)
343 return self._repo.lookup(key)
344
344
345 def pushkey(self, namespace, key, old, new):
345 def pushkey(self, namespace, key, old, new):
346 return self._repo.pushkey(namespace, key, old, new)
346 return self._repo.pushkey(namespace, key, old, new)
347
347
348 def stream_out(self):
348 def stream_out(self):
349 raise error.Abort(_(b'cannot perform stream clone against local peer'))
349 raise error.Abort(_(b'cannot perform stream clone against local peer'))
350
350
351 def unbundle(self, bundle, heads, url):
351 def unbundle(self, bundle, heads, url):
352 """apply a bundle on a repo
352 """apply a bundle on a repo
353
353
354 This function handles the repo locking itself."""
354 This function handles the repo locking itself."""
355 try:
355 try:
356 try:
356 try:
357 bundle = exchange.readbundle(self.ui, bundle, None)
357 bundle = exchange.readbundle(self.ui, bundle, None)
358 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
358 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
359 if util.safehasattr(ret, b'getchunks'):
359 if util.safehasattr(ret, b'getchunks'):
360 # This is a bundle20 object, turn it into an unbundler.
360 # This is a bundle20 object, turn it into an unbundler.
361 # This little dance should be dropped eventually when the
361 # This little dance should be dropped eventually when the
362 # API is finally improved.
362 # API is finally improved.
363 stream = util.chunkbuffer(ret.getchunks())
363 stream = util.chunkbuffer(ret.getchunks())
364 ret = bundle2.getunbundler(self.ui, stream)
364 ret = bundle2.getunbundler(self.ui, stream)
365 return ret
365 return ret
366 except Exception as exc:
366 except Exception as exc:
367 # If the exception contains output salvaged from a bundle2
367 # If the exception contains output salvaged from a bundle2
368 # reply, we need to make sure it is printed before continuing
368 # reply, we need to make sure it is printed before continuing
369 # to fail. So we build a bundle2 with such output and consume
369 # to fail. So we build a bundle2 with such output and consume
370 # it directly.
370 # it directly.
371 #
371 #
372 # This is not very elegant but allows a "simple" solution for
372 # This is not very elegant but allows a "simple" solution for
373 # issue4594
373 # issue4594
374 output = getattr(exc, '_bundle2salvagedoutput', ())
374 output = getattr(exc, '_bundle2salvagedoutput', ())
375 if output:
375 if output:
376 bundler = bundle2.bundle20(self._repo.ui)
376 bundler = bundle2.bundle20(self._repo.ui)
377 for out in output:
377 for out in output:
378 bundler.addpart(out)
378 bundler.addpart(out)
379 stream = util.chunkbuffer(bundler.getchunks())
379 stream = util.chunkbuffer(bundler.getchunks())
380 b = bundle2.getunbundler(self.ui, stream)
380 b = bundle2.getunbundler(self.ui, stream)
381 bundle2.processbundle(self._repo, b)
381 bundle2.processbundle(self._repo, b)
382 raise
382 raise
383 except error.PushRaced as exc:
383 except error.PushRaced as exc:
384 raise error.ResponseError(
384 raise error.ResponseError(
385 _(b'push failed:'), stringutil.forcebytestr(exc)
385 _(b'push failed:'), stringutil.forcebytestr(exc)
386 )
386 )
387
387
388 # End of _basewirecommands interface.
388 # End of _basewirecommands interface.
389
389
390 # Begin of peer interface.
390 # Begin of peer interface.
391
391
392 def commandexecutor(self):
392 def commandexecutor(self):
393 return localcommandexecutor(self)
393 return localcommandexecutor(self)
394
394
395 # End of peer interface.
395 # End of peer interface.
396
396
397
397
398 @interfaceutil.implementer(repository.ipeerlegacycommands)
398 @interfaceutil.implementer(repository.ipeerlegacycommands)
399 class locallegacypeer(localpeer):
399 class locallegacypeer(localpeer):
400 '''peer extension which implements legacy methods too; used for tests with
400 '''peer extension which implements legacy methods too; used for tests with
401 restricted capabilities'''
401 restricted capabilities'''
402
402
403 def __init__(self, repo):
403 def __init__(self, repo):
404 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
404 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
405
405
406 # Begin of baselegacywirecommands interface.
406 # Begin of baselegacywirecommands interface.
407
407
408 def between(self, pairs):
408 def between(self, pairs):
409 return self._repo.between(pairs)
409 return self._repo.between(pairs)
410
410
411 def branches(self, nodes):
411 def branches(self, nodes):
412 return self._repo.branches(nodes)
412 return self._repo.branches(nodes)
413
413
414 def changegroup(self, nodes, source):
414 def changegroup(self, nodes, source):
415 outgoing = discovery.outgoing(
415 outgoing = discovery.outgoing(
416 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
416 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
417 )
417 )
418 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
418 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
419
419
420 def changegroupsubset(self, bases, heads, source):
420 def changegroupsubset(self, bases, heads, source):
421 outgoing = discovery.outgoing(
421 outgoing = discovery.outgoing(
422 self._repo, missingroots=bases, ancestorsof=heads
422 self._repo, missingroots=bases, ancestorsof=heads
423 )
423 )
424 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
425
425
426 # End of baselegacywirecommands interface.
426 # End of baselegacywirecommands interface.
427
427
428
428
429 # Increment the sub-version when the revlog v2 format changes to lock out old
430 # clients.
431 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
432
433 # A repository with the sparserevlog feature will have delta chains that
434 # can spread over a larger span. Sparse reading cuts these large spans into
435 # pieces, so that each piece isn't too big.
436 # Without the sparserevlog capability, reading from the repository could use
437 # huge amounts of memory, because the whole span would be read at once,
438 # including all the intermediate revisions that aren't pertinent for the chain.
439 # This is why once a repository has enabled sparse-read, it becomes required.
440 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
441
442 # A repository with the sidedataflag requirement will allow to store extra
443 # information for revision without altering their original hashes.
444 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
445
446 # A repository with the the copies-sidedata-changeset requirement will store
447 # copies related information in changeset's sidedata.
448 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
449
450 # The repository use persistent nodemap for the changelog and the manifest.
451 NODEMAP_REQUIREMENT = b'persistent-nodemap'
452
453 # Functions receiving (ui, features) that extensions can register to impact
429 # Functions receiving (ui, features) that extensions can register to impact
454 # the ability to load repositories with custom requirements. Only
430 # the ability to load repositories with custom requirements. Only
455 # functions defined in loaded extensions are called.
431 # functions defined in loaded extensions are called.
456 #
432 #
457 # The function receives a set of requirement strings that the repository
433 # The function receives a set of requirement strings that the repository
458 # is capable of opening. Functions will typically add elements to the
434 # is capable of opening. Functions will typically add elements to the
459 # set to reflect that the extension knows how to handle that requirements.
435 # set to reflect that the extension knows how to handle that requirements.
460 featuresetupfuncs = set()
436 featuresetupfuncs = set()
461
437
462
438
463 def _getsharedvfs(hgvfs, requirements):
439 def _getsharedvfs(hgvfs, requirements):
464 """ returns the vfs object pointing to root of shared source
440 """ returns the vfs object pointing to root of shared source
465 repo for a shared repository
441 repo for a shared repository
466
442
467 hgvfs is vfs pointing at .hg/ of current repo (shared one)
443 hgvfs is vfs pointing at .hg/ of current repo (shared one)
468 requirements is a set of requirements of current repo (shared one)
444 requirements is a set of requirements of current repo (shared one)
469 """
445 """
470 # The ``shared`` or ``relshared`` requirements indicate the
446 # The ``shared`` or ``relshared`` requirements indicate the
471 # store lives in the path contained in the ``.hg/sharedpath`` file.
447 # store lives in the path contained in the ``.hg/sharedpath`` file.
472 # This is an absolute path for ``shared`` and relative to
448 # This is an absolute path for ``shared`` and relative to
473 # ``.hg/`` for ``relshared``.
449 # ``.hg/`` for ``relshared``.
474 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
450 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
475 if b'relshared' in requirements:
451 if b'relshared' in requirements:
476 sharedpath = hgvfs.join(sharedpath)
452 sharedpath = hgvfs.join(sharedpath)
477
453
478 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
454 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
479
455
480 if not sharedvfs.exists():
456 if not sharedvfs.exists():
481 raise error.RepoError(
457 raise error.RepoError(
482 _(b'.hg/sharedpath points to nonexistent directory %s')
458 _(b'.hg/sharedpath points to nonexistent directory %s')
483 % sharedvfs.base
459 % sharedvfs.base
484 )
460 )
485 return sharedvfs
461 return sharedvfs
486
462
487
463
488 def _readrequires(vfs, allowmissing):
464 def _readrequires(vfs, allowmissing):
489 """ reads the require file present at root of this vfs
465 """ reads the require file present at root of this vfs
490 and return a set of requirements
466 and return a set of requirements
491
467
492 If allowmissing is True, we suppress ENOENT if raised"""
468 If allowmissing is True, we suppress ENOENT if raised"""
493 # requires file contains a newline-delimited list of
469 # requires file contains a newline-delimited list of
494 # features/capabilities the opener (us) must have in order to use
470 # features/capabilities the opener (us) must have in order to use
495 # the repository. This file was introduced in Mercurial 0.9.2,
471 # the repository. This file was introduced in Mercurial 0.9.2,
496 # which means very old repositories may not have one. We assume
472 # which means very old repositories may not have one. We assume
497 # a missing file translates to no requirements.
473 # a missing file translates to no requirements.
498 try:
474 try:
499 requirements = set(vfs.read(b'requires').splitlines())
475 requirements = set(vfs.read(b'requires').splitlines())
500 except IOError as e:
476 except IOError as e:
501 if not (allowmissing and e.errno == errno.ENOENT):
477 if not (allowmissing and e.errno == errno.ENOENT):
502 raise
478 raise
503 requirements = set()
479 requirements = set()
504 return requirements
480 return requirements
505
481
506
482
507 def makelocalrepository(baseui, path, intents=None):
483 def makelocalrepository(baseui, path, intents=None):
508 """Create a local repository object.
484 """Create a local repository object.
509
485
510 Given arguments needed to construct a local repository, this function
486 Given arguments needed to construct a local repository, this function
511 performs various early repository loading functionality (such as
487 performs various early repository loading functionality (such as
512 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
488 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
513 the repository can be opened, derives a type suitable for representing
489 the repository can be opened, derives a type suitable for representing
514 that repository, and returns an instance of it.
490 that repository, and returns an instance of it.
515
491
516 The returned object conforms to the ``repository.completelocalrepository``
492 The returned object conforms to the ``repository.completelocalrepository``
517 interface.
493 interface.
518
494
519 The repository type is derived by calling a series of factory functions
495 The repository type is derived by calling a series of factory functions
520 for each aspect/interface of the final repository. These are defined by
496 for each aspect/interface of the final repository. These are defined by
521 ``REPO_INTERFACES``.
497 ``REPO_INTERFACES``.
522
498
523 Each factory function is called to produce a type implementing a specific
499 Each factory function is called to produce a type implementing a specific
524 interface. The cumulative list of returned types will be combined into a
500 interface. The cumulative list of returned types will be combined into a
525 new type and that type will be instantiated to represent the local
501 new type and that type will be instantiated to represent the local
526 repository.
502 repository.
527
503
528 The factory functions each receive various state that may be consulted
504 The factory functions each receive various state that may be consulted
529 as part of deriving a type.
505 as part of deriving a type.
530
506
531 Extensions should wrap these factory functions to customize repository type
507 Extensions should wrap these factory functions to customize repository type
532 creation. Note that an extension's wrapped function may be called even if
508 creation. Note that an extension's wrapped function may be called even if
533 that extension is not loaded for the repo being constructed. Extensions
509 that extension is not loaded for the repo being constructed. Extensions
534 should check if their ``__name__`` appears in the
510 should check if their ``__name__`` appears in the
535 ``extensionmodulenames`` set passed to the factory function and no-op if
511 ``extensionmodulenames`` set passed to the factory function and no-op if
536 not.
512 not.
537 """
513 """
538 ui = baseui.copy()
514 ui = baseui.copy()
539 # Prevent copying repo configuration.
515 # Prevent copying repo configuration.
540 ui.copy = baseui.copy
516 ui.copy = baseui.copy
541
517
542 # Working directory VFS rooted at repository root.
518 # Working directory VFS rooted at repository root.
543 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
519 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
544
520
545 # Main VFS for .hg/ directory.
521 # Main VFS for .hg/ directory.
546 hgpath = wdirvfs.join(b'.hg')
522 hgpath = wdirvfs.join(b'.hg')
547 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
523 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
548 # Whether this repository is shared one or not
524 # Whether this repository is shared one or not
549 shared = False
525 shared = False
550 # If this repository is shared, vfs pointing to shared repo
526 # If this repository is shared, vfs pointing to shared repo
551 sharedvfs = None
527 sharedvfs = None
552
528
553 # The .hg/ path should exist and should be a directory. All other
529 # The .hg/ path should exist and should be a directory. All other
554 # cases are errors.
530 # cases are errors.
555 if not hgvfs.isdir():
531 if not hgvfs.isdir():
556 try:
532 try:
557 hgvfs.stat()
533 hgvfs.stat()
558 except OSError as e:
534 except OSError as e:
559 if e.errno != errno.ENOENT:
535 if e.errno != errno.ENOENT:
560 raise
536 raise
561 except ValueError as e:
537 except ValueError as e:
562 # Can be raised on Python 3.8 when path is invalid.
538 # Can be raised on Python 3.8 when path is invalid.
563 raise error.Abort(
539 raise error.Abort(
564 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
540 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
565 )
541 )
566
542
567 raise error.RepoError(_(b'repository %s not found') % path)
543 raise error.RepoError(_(b'repository %s not found') % path)
568
544
569 requirements = _readrequires(hgvfs, True)
545 requirements = _readrequires(hgvfs, True)
570
546
571 # The .hg/hgrc file may load extensions or contain config options
547 # The .hg/hgrc file may load extensions or contain config options
572 # that influence repository construction. Attempt to load it and
548 # that influence repository construction. Attempt to load it and
573 # process any new extensions that it may have pulled in.
549 # process any new extensions that it may have pulled in.
574 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
550 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
575 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
551 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
576 extensions.loadall(ui)
552 extensions.loadall(ui)
577 extensions.populateui(ui)
553 extensions.populateui(ui)
578
554
579 # Set of module names of extensions loaded for this repository.
555 # Set of module names of extensions loaded for this repository.
580 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
556 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
581
557
582 supportedrequirements = gathersupportedrequirements(ui)
558 supportedrequirements = gathersupportedrequirements(ui)
583
559
584 # We first validate the requirements are known.
560 # We first validate the requirements are known.
585 ensurerequirementsrecognized(requirements, supportedrequirements)
561 ensurerequirementsrecognized(requirements, supportedrequirements)
586
562
587 # Then we validate that the known set is reasonable to use together.
563 # Then we validate that the known set is reasonable to use together.
588 ensurerequirementscompatible(ui, requirements)
564 ensurerequirementscompatible(ui, requirements)
589
565
590 # TODO there are unhandled edge cases related to opening repositories with
566 # TODO there are unhandled edge cases related to opening repositories with
591 # shared storage. If storage is shared, we should also test for requirements
567 # shared storage. If storage is shared, we should also test for requirements
592 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
568 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
593 # that repo, as that repo may load extensions needed to open it. This is a
569 # that repo, as that repo may load extensions needed to open it. This is a
594 # bit complicated because we don't want the other hgrc to overwrite settings
570 # bit complicated because we don't want the other hgrc to overwrite settings
595 # in this hgrc.
571 # in this hgrc.
596 #
572 #
597 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
573 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
598 # file when sharing repos. But if a requirement is added after the share is
574 # file when sharing repos. But if a requirement is added after the share is
599 # performed, thereby introducing a new requirement for the opener, we may
575 # performed, thereby introducing a new requirement for the opener, we may
600 # will not see that and could encounter a run-time error interacting with
576 # will not see that and could encounter a run-time error interacting with
601 # that shared store since it has an unknown-to-us requirement.
577 # that shared store since it has an unknown-to-us requirement.
602
578
603 # At this point, we know we should be capable of opening the repository.
579 # At this point, we know we should be capable of opening the repository.
604 # Now get on with doing that.
580 # Now get on with doing that.
605
581
606 features = set()
582 features = set()
607
583
608 # The "store" part of the repository holds versioned data. How it is
584 # The "store" part of the repository holds versioned data. How it is
609 # accessed is determined by various requirements. If `shared` or
585 # accessed is determined by various requirements. If `shared` or
610 # `relshared` requirements are present, this indicates current repository
586 # `relshared` requirements are present, this indicates current repository
611 # is a share and store exists in path mentioned in `.hg/sharedpath`
587 # is a share and store exists in path mentioned in `.hg/sharedpath`
612 shared = b'shared' in requirements or b'relshared' in requirements
588 shared = b'shared' in requirements or b'relshared' in requirements
613 if shared:
589 if shared:
614 sharedvfs = _getsharedvfs(hgvfs, requirements)
590 sharedvfs = _getsharedvfs(hgvfs, requirements)
615 storebasepath = sharedvfs.base
591 storebasepath = sharedvfs.base
616 cachepath = sharedvfs.join(b'cache')
592 cachepath = sharedvfs.join(b'cache')
617 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
593 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
618 else:
594 else:
619 storebasepath = hgvfs.base
595 storebasepath = hgvfs.base
620 cachepath = hgvfs.join(b'cache')
596 cachepath = hgvfs.join(b'cache')
621 wcachepath = hgvfs.join(b'wcache')
597 wcachepath = hgvfs.join(b'wcache')
622
598
623 # The store has changed over time and the exact layout is dictated by
599 # The store has changed over time and the exact layout is dictated by
624 # requirements. The store interface abstracts differences across all
600 # requirements. The store interface abstracts differences across all
625 # of them.
601 # of them.
626 store = makestore(
602 store = makestore(
627 requirements,
603 requirements,
628 storebasepath,
604 storebasepath,
629 lambda base: vfsmod.vfs(base, cacheaudited=True),
605 lambda base: vfsmod.vfs(base, cacheaudited=True),
630 )
606 )
631 hgvfs.createmode = store.createmode
607 hgvfs.createmode = store.createmode
632
608
633 storevfs = store.vfs
609 storevfs = store.vfs
634 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
610 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
635
611
636 # The cache vfs is used to manage cache files.
612 # The cache vfs is used to manage cache files.
637 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
613 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
638 cachevfs.createmode = store.createmode
614 cachevfs.createmode = store.createmode
639 # The cache vfs is used to manage cache files related to the working copy
615 # The cache vfs is used to manage cache files related to the working copy
640 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
616 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
641 wcachevfs.createmode = store.createmode
617 wcachevfs.createmode = store.createmode
642
618
643 # Now resolve the type for the repository object. We do this by repeatedly
619 # Now resolve the type for the repository object. We do this by repeatedly
644 # calling a factory function to produces types for specific aspects of the
620 # calling a factory function to produces types for specific aspects of the
645 # repo's operation. The aggregate returned types are used as base classes
621 # repo's operation. The aggregate returned types are used as base classes
646 # for a dynamically-derived type, which will represent our new repository.
622 # for a dynamically-derived type, which will represent our new repository.
647
623
648 bases = []
624 bases = []
649 extrastate = {}
625 extrastate = {}
650
626
651 for iface, fn in REPO_INTERFACES:
627 for iface, fn in REPO_INTERFACES:
652 # We pass all potentially useful state to give extensions tons of
628 # We pass all potentially useful state to give extensions tons of
653 # flexibility.
629 # flexibility.
654 typ = fn()(
630 typ = fn()(
655 ui=ui,
631 ui=ui,
656 intents=intents,
632 intents=intents,
657 requirements=requirements,
633 requirements=requirements,
658 features=features,
634 features=features,
659 wdirvfs=wdirvfs,
635 wdirvfs=wdirvfs,
660 hgvfs=hgvfs,
636 hgvfs=hgvfs,
661 store=store,
637 store=store,
662 storevfs=storevfs,
638 storevfs=storevfs,
663 storeoptions=storevfs.options,
639 storeoptions=storevfs.options,
664 cachevfs=cachevfs,
640 cachevfs=cachevfs,
665 wcachevfs=wcachevfs,
641 wcachevfs=wcachevfs,
666 extensionmodulenames=extensionmodulenames,
642 extensionmodulenames=extensionmodulenames,
667 extrastate=extrastate,
643 extrastate=extrastate,
668 baseclasses=bases,
644 baseclasses=bases,
669 )
645 )
670
646
671 if not isinstance(typ, type):
647 if not isinstance(typ, type):
672 raise error.ProgrammingError(
648 raise error.ProgrammingError(
673 b'unable to construct type for %s' % iface
649 b'unable to construct type for %s' % iface
674 )
650 )
675
651
676 bases.append(typ)
652 bases.append(typ)
677
653
678 # type() allows you to use characters in type names that wouldn't be
654 # type() allows you to use characters in type names that wouldn't be
679 # recognized as Python symbols in source code. We abuse that to add
655 # recognized as Python symbols in source code. We abuse that to add
680 # rich information about our constructed repo.
656 # rich information about our constructed repo.
681 name = pycompat.sysstr(
657 name = pycompat.sysstr(
682 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
658 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
683 )
659 )
684
660
685 cls = type(name, tuple(bases), {})
661 cls = type(name, tuple(bases), {})
686
662
687 return cls(
663 return cls(
688 baseui=baseui,
664 baseui=baseui,
689 ui=ui,
665 ui=ui,
690 origroot=path,
666 origroot=path,
691 wdirvfs=wdirvfs,
667 wdirvfs=wdirvfs,
692 hgvfs=hgvfs,
668 hgvfs=hgvfs,
693 requirements=requirements,
669 requirements=requirements,
694 supportedrequirements=supportedrequirements,
670 supportedrequirements=supportedrequirements,
695 sharedpath=storebasepath,
671 sharedpath=storebasepath,
696 store=store,
672 store=store,
697 cachevfs=cachevfs,
673 cachevfs=cachevfs,
698 wcachevfs=wcachevfs,
674 wcachevfs=wcachevfs,
699 features=features,
675 features=features,
700 intents=intents,
676 intents=intents,
701 )
677 )
702
678
703
679
704 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
680 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
705 """Load hgrc files/content into a ui instance.
681 """Load hgrc files/content into a ui instance.
706
682
707 This is called during repository opening to load any additional
683 This is called during repository opening to load any additional
708 config files or settings relevant to the current repository.
684 config files or settings relevant to the current repository.
709
685
710 Returns a bool indicating whether any additional configs were loaded.
686 Returns a bool indicating whether any additional configs were loaded.
711
687
712 Extensions should monkeypatch this function to modify how per-repo
688 Extensions should monkeypatch this function to modify how per-repo
713 configs are loaded. For example, an extension may wish to pull in
689 configs are loaded. For example, an extension may wish to pull in
714 configs from alternate files or sources.
690 configs from alternate files or sources.
715 """
691 """
716 if not rcutil.use_repo_hgrc():
692 if not rcutil.use_repo_hgrc():
717 return False
693 return False
718 try:
694 try:
719 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
695 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
720 return True
696 return True
721 except IOError:
697 except IOError:
722 return False
698 return False
723
699
724
700
725 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
701 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
726 """Perform additional actions after .hg/hgrc is loaded.
702 """Perform additional actions after .hg/hgrc is loaded.
727
703
728 This function is called during repository loading immediately after
704 This function is called during repository loading immediately after
729 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
705 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
730
706
731 The function can be used to validate configs, automatically add
707 The function can be used to validate configs, automatically add
732 options (including extensions) based on requirements, etc.
708 options (including extensions) based on requirements, etc.
733 """
709 """
734
710
735 # Map of requirements to list of extensions to load automatically when
711 # Map of requirements to list of extensions to load automatically when
736 # requirement is present.
712 # requirement is present.
737 autoextensions = {
713 autoextensions = {
738 b'git': [b'git'],
714 b'git': [b'git'],
739 b'largefiles': [b'largefiles'],
715 b'largefiles': [b'largefiles'],
740 b'lfs': [b'lfs'],
716 b'lfs': [b'lfs'],
741 }
717 }
742
718
743 for requirement, names in sorted(autoextensions.items()):
719 for requirement, names in sorted(autoextensions.items()):
744 if requirement not in requirements:
720 if requirement not in requirements:
745 continue
721 continue
746
722
747 for name in names:
723 for name in names:
748 if not ui.hasconfig(b'extensions', name):
724 if not ui.hasconfig(b'extensions', name):
749 ui.setconfig(b'extensions', name, b'', source=b'autoload')
725 ui.setconfig(b'extensions', name, b'', source=b'autoload')
750
726
751
727
752 def gathersupportedrequirements(ui):
728 def gathersupportedrequirements(ui):
753 """Determine the complete set of recognized requirements."""
729 """Determine the complete set of recognized requirements."""
754 # Start with all requirements supported by this file.
730 # Start with all requirements supported by this file.
755 supported = set(localrepository._basesupported)
731 supported = set(localrepository._basesupported)
756
732
757 # Execute ``featuresetupfuncs`` entries if they belong to an extension
733 # Execute ``featuresetupfuncs`` entries if they belong to an extension
758 # relevant to this ui instance.
734 # relevant to this ui instance.
759 modules = {m.__name__ for n, m in extensions.extensions(ui)}
735 modules = {m.__name__ for n, m in extensions.extensions(ui)}
760
736
761 for fn in featuresetupfuncs:
737 for fn in featuresetupfuncs:
762 if fn.__module__ in modules:
738 if fn.__module__ in modules:
763 fn(ui, supported)
739 fn(ui, supported)
764
740
765 # Add derived requirements from registered compression engines.
741 # Add derived requirements from registered compression engines.
766 for name in util.compengines:
742 for name in util.compengines:
767 engine = util.compengines[name]
743 engine = util.compengines[name]
768 if engine.available() and engine.revlogheader():
744 if engine.available() and engine.revlogheader():
769 supported.add(b'exp-compression-%s' % name)
745 supported.add(b'exp-compression-%s' % name)
770 if engine.name() == b'zstd':
746 if engine.name() == b'zstd':
771 supported.add(b'revlog-compression-zstd')
747 supported.add(b'revlog-compression-zstd')
772
748
773 return supported
749 return supported
774
750
775
751
776 def ensurerequirementsrecognized(requirements, supported):
752 def ensurerequirementsrecognized(requirements, supported):
777 """Validate that a set of local requirements is recognized.
753 """Validate that a set of local requirements is recognized.
778
754
779 Receives a set of requirements. Raises an ``error.RepoError`` if there
755 Receives a set of requirements. Raises an ``error.RepoError`` if there
780 exists any requirement in that set that currently loaded code doesn't
756 exists any requirement in that set that currently loaded code doesn't
781 recognize.
757 recognize.
782
758
783 Returns a set of supported requirements.
759 Returns a set of supported requirements.
784 """
760 """
785 missing = set()
761 missing = set()
786
762
787 for requirement in requirements:
763 for requirement in requirements:
788 if requirement in supported:
764 if requirement in supported:
789 continue
765 continue
790
766
791 if not requirement or not requirement[0:1].isalnum():
767 if not requirement or not requirement[0:1].isalnum():
792 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
768 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
793
769
794 missing.add(requirement)
770 missing.add(requirement)
795
771
796 if missing:
772 if missing:
797 raise error.RequirementError(
773 raise error.RequirementError(
798 _(b'repository requires features unknown to this Mercurial: %s')
774 _(b'repository requires features unknown to this Mercurial: %s')
799 % b' '.join(sorted(missing)),
775 % b' '.join(sorted(missing)),
800 hint=_(
776 hint=_(
801 b'see https://mercurial-scm.org/wiki/MissingRequirement '
777 b'see https://mercurial-scm.org/wiki/MissingRequirement '
802 b'for more information'
778 b'for more information'
803 ),
779 ),
804 )
780 )
805
781
806
782
807 def ensurerequirementscompatible(ui, requirements):
783 def ensurerequirementscompatible(ui, requirements):
808 """Validates that a set of recognized requirements is mutually compatible.
784 """Validates that a set of recognized requirements is mutually compatible.
809
785
810 Some requirements may not be compatible with others or require
786 Some requirements may not be compatible with others or require
811 config options that aren't enabled. This function is called during
787 config options that aren't enabled. This function is called during
812 repository opening to ensure that the set of requirements needed
788 repository opening to ensure that the set of requirements needed
813 to open a repository is sane and compatible with config options.
789 to open a repository is sane and compatible with config options.
814
790
815 Extensions can monkeypatch this function to perform additional
791 Extensions can monkeypatch this function to perform additional
816 checking.
792 checking.
817
793
818 ``error.RepoError`` should be raised on failure.
794 ``error.RepoError`` should be raised on failure.
819 """
795 """
820 if (
796 if (
821 requirementsmod.SPARSE_REQUIREMENT in requirements
797 requirementsmod.SPARSE_REQUIREMENT in requirements
822 and not sparse.enabled
798 and not sparse.enabled
823 ):
799 ):
824 raise error.RepoError(
800 raise error.RepoError(
825 _(
801 _(
826 b'repository is using sparse feature but '
802 b'repository is using sparse feature but '
827 b'sparse is not enabled; enable the '
803 b'sparse is not enabled; enable the '
828 b'"sparse" extensions to access'
804 b'"sparse" extensions to access'
829 )
805 )
830 )
806 )
831
807
832
808
833 def makestore(requirements, path, vfstype):
809 def makestore(requirements, path, vfstype):
834 """Construct a storage object for a repository."""
810 """Construct a storage object for a repository."""
835 if b'store' in requirements:
811 if b'store' in requirements:
836 if b'fncache' in requirements:
812 if b'fncache' in requirements:
837 return storemod.fncachestore(
813 return storemod.fncachestore(
838 path, vfstype, b'dotencode' in requirements
814 path, vfstype, b'dotencode' in requirements
839 )
815 )
840
816
841 return storemod.encodedstore(path, vfstype)
817 return storemod.encodedstore(path, vfstype)
842
818
843 return storemod.basicstore(path, vfstype)
819 return storemod.basicstore(path, vfstype)
844
820
845
821
846 def resolvestorevfsoptions(ui, requirements, features):
822 def resolvestorevfsoptions(ui, requirements, features):
847 """Resolve the options to pass to the store vfs opener.
823 """Resolve the options to pass to the store vfs opener.
848
824
849 The returned dict is used to influence behavior of the storage layer.
825 The returned dict is used to influence behavior of the storage layer.
850 """
826 """
851 options = {}
827 options = {}
852
828
853 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
829 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
854 options[b'treemanifest'] = True
830 options[b'treemanifest'] = True
855
831
856 # experimental config: format.manifestcachesize
832 # experimental config: format.manifestcachesize
857 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
833 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
858 if manifestcachesize is not None:
834 if manifestcachesize is not None:
859 options[b'manifestcachesize'] = manifestcachesize
835 options[b'manifestcachesize'] = manifestcachesize
860
836
861 # In the absence of another requirement superseding a revlog-related
837 # In the absence of another requirement superseding a revlog-related
862 # requirement, we have to assume the repo is using revlog version 0.
838 # requirement, we have to assume the repo is using revlog version 0.
863 # This revlog format is super old and we don't bother trying to parse
839 # This revlog format is super old and we don't bother trying to parse
864 # opener options for it because those options wouldn't do anything
840 # opener options for it because those options wouldn't do anything
865 # meaningful on such old repos.
841 # meaningful on such old repos.
866 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
842 if (
843 b'revlogv1' in requirements
844 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
845 ):
867 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
846 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
868 else: # explicitly mark repo as using revlogv0
847 else: # explicitly mark repo as using revlogv0
869 options[b'revlogv0'] = True
848 options[b'revlogv0'] = True
870
849
871 if COPIESSDC_REQUIREMENT in requirements:
850 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
872 options[b'copies-storage'] = b'changeset-sidedata'
851 options[b'copies-storage'] = b'changeset-sidedata'
873 else:
852 else:
874 writecopiesto = ui.config(b'experimental', b'copies.write-to')
853 writecopiesto = ui.config(b'experimental', b'copies.write-to')
875 copiesextramode = (b'changeset-only', b'compatibility')
854 copiesextramode = (b'changeset-only', b'compatibility')
876 if writecopiesto in copiesextramode:
855 if writecopiesto in copiesextramode:
877 options[b'copies-storage'] = b'extra'
856 options[b'copies-storage'] = b'extra'
878
857
879 return options
858 return options
880
859
881
860
882 def resolverevlogstorevfsoptions(ui, requirements, features):
861 def resolverevlogstorevfsoptions(ui, requirements, features):
883 """Resolve opener options specific to revlogs."""
862 """Resolve opener options specific to revlogs."""
884
863
885 options = {}
864 options = {}
886 options[b'flagprocessors'] = {}
865 options[b'flagprocessors'] = {}
887
866
888 if b'revlogv1' in requirements:
867 if b'revlogv1' in requirements:
889 options[b'revlogv1'] = True
868 options[b'revlogv1'] = True
890 if REVLOGV2_REQUIREMENT in requirements:
869 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
891 options[b'revlogv2'] = True
870 options[b'revlogv2'] = True
892
871
893 if b'generaldelta' in requirements:
872 if b'generaldelta' in requirements:
894 options[b'generaldelta'] = True
873 options[b'generaldelta'] = True
895
874
896 # experimental config: format.chunkcachesize
875 # experimental config: format.chunkcachesize
897 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
876 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
898 if chunkcachesize is not None:
877 if chunkcachesize is not None:
899 options[b'chunkcachesize'] = chunkcachesize
878 options[b'chunkcachesize'] = chunkcachesize
900
879
901 deltabothparents = ui.configbool(
880 deltabothparents = ui.configbool(
902 b'storage', b'revlog.optimize-delta-parent-choice'
881 b'storage', b'revlog.optimize-delta-parent-choice'
903 )
882 )
904 options[b'deltabothparents'] = deltabothparents
883 options[b'deltabothparents'] = deltabothparents
905
884
906 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
885 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
907 lazydeltabase = False
886 lazydeltabase = False
908 if lazydelta:
887 if lazydelta:
909 lazydeltabase = ui.configbool(
888 lazydeltabase = ui.configbool(
910 b'storage', b'revlog.reuse-external-delta-parent'
889 b'storage', b'revlog.reuse-external-delta-parent'
911 )
890 )
912 if lazydeltabase is None:
891 if lazydeltabase is None:
913 lazydeltabase = not scmutil.gddeltaconfig(ui)
892 lazydeltabase = not scmutil.gddeltaconfig(ui)
914 options[b'lazydelta'] = lazydelta
893 options[b'lazydelta'] = lazydelta
915 options[b'lazydeltabase'] = lazydeltabase
894 options[b'lazydeltabase'] = lazydeltabase
916
895
917 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
896 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
918 if 0 <= chainspan:
897 if 0 <= chainspan:
919 options[b'maxdeltachainspan'] = chainspan
898 options[b'maxdeltachainspan'] = chainspan
920
899
921 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
900 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
922 if mmapindexthreshold is not None:
901 if mmapindexthreshold is not None:
923 options[b'mmapindexthreshold'] = mmapindexthreshold
902 options[b'mmapindexthreshold'] = mmapindexthreshold
924
903
925 withsparseread = ui.configbool(b'experimental', b'sparse-read')
904 withsparseread = ui.configbool(b'experimental', b'sparse-read')
926 srdensitythres = float(
905 srdensitythres = float(
927 ui.config(b'experimental', b'sparse-read.density-threshold')
906 ui.config(b'experimental', b'sparse-read.density-threshold')
928 )
907 )
929 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
908 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
930 options[b'with-sparse-read'] = withsparseread
909 options[b'with-sparse-read'] = withsparseread
931 options[b'sparse-read-density-threshold'] = srdensitythres
910 options[b'sparse-read-density-threshold'] = srdensitythres
932 options[b'sparse-read-min-gap-size'] = srmingapsize
911 options[b'sparse-read-min-gap-size'] = srmingapsize
933
912
934 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
913 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
935 options[b'sparse-revlog'] = sparserevlog
914 options[b'sparse-revlog'] = sparserevlog
936 if sparserevlog:
915 if sparserevlog:
937 options[b'generaldelta'] = True
916 options[b'generaldelta'] = True
938
917
939 sidedata = SIDEDATA_REQUIREMENT in requirements
918 sidedata = requirementsmod.SIDEDATA_REQUIREMENT in requirements
940 options[b'side-data'] = sidedata
919 options[b'side-data'] = sidedata
941
920
942 maxchainlen = None
921 maxchainlen = None
943 if sparserevlog:
922 if sparserevlog:
944 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
923 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
945 # experimental config: format.maxchainlen
924 # experimental config: format.maxchainlen
946 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
925 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
947 if maxchainlen is not None:
926 if maxchainlen is not None:
948 options[b'maxchainlen'] = maxchainlen
927 options[b'maxchainlen'] = maxchainlen
949
928
950 for r in requirements:
929 for r in requirements:
951 # we allow multiple compression engine requirement to co-exist because
930 # we allow multiple compression engine requirement to co-exist because
952 # strickly speaking, revlog seems to support mixed compression style.
931 # strickly speaking, revlog seems to support mixed compression style.
953 #
932 #
954 # The compression used for new entries will be "the last one"
933 # The compression used for new entries will be "the last one"
955 prefix = r.startswith
934 prefix = r.startswith
956 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
935 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
957 options[b'compengine'] = r.split(b'-', 2)[2]
936 options[b'compengine'] = r.split(b'-', 2)[2]
958
937
959 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
938 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
960 if options[b'zlib.level'] is not None:
939 if options[b'zlib.level'] is not None:
961 if not (0 <= options[b'zlib.level'] <= 9):
940 if not (0 <= options[b'zlib.level'] <= 9):
962 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
941 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
963 raise error.Abort(msg % options[b'zlib.level'])
942 raise error.Abort(msg % options[b'zlib.level'])
964 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
943 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
965 if options[b'zstd.level'] is not None:
944 if options[b'zstd.level'] is not None:
966 if not (0 <= options[b'zstd.level'] <= 22):
945 if not (0 <= options[b'zstd.level'] <= 22):
967 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
946 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
968 raise error.Abort(msg % options[b'zstd.level'])
947 raise error.Abort(msg % options[b'zstd.level'])
969
948
970 if requirementsmod.NARROW_REQUIREMENT in requirements:
949 if requirementsmod.NARROW_REQUIREMENT in requirements:
971 options[b'enableellipsis'] = True
950 options[b'enableellipsis'] = True
972
951
973 if ui.configbool(b'experimental', b'rust.index'):
952 if ui.configbool(b'experimental', b'rust.index'):
974 options[b'rust.index'] = True
953 options[b'rust.index'] = True
975 if NODEMAP_REQUIREMENT in requirements:
954 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
976 options[b'persistent-nodemap'] = True
955 options[b'persistent-nodemap'] = True
977 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
956 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
978 options[b'persistent-nodemap.mmap'] = True
957 options[b'persistent-nodemap.mmap'] = True
979 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
958 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
980 options[b'persistent-nodemap.mode'] = epnm
959 options[b'persistent-nodemap.mode'] = epnm
981 if ui.configbool(b'devel', b'persistent-nodemap'):
960 if ui.configbool(b'devel', b'persistent-nodemap'):
982 options[b'devel-force-nodemap'] = True
961 options[b'devel-force-nodemap'] = True
983
962
984 return options
963 return options
985
964
986
965
987 def makemain(**kwargs):
966 def makemain(**kwargs):
988 """Produce a type conforming to ``ilocalrepositorymain``."""
967 """Produce a type conforming to ``ilocalrepositorymain``."""
989 return localrepository
968 return localrepository
990
969
991
970
992 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
971 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
993 class revlogfilestorage(object):
972 class revlogfilestorage(object):
994 """File storage when using revlogs."""
973 """File storage when using revlogs."""
995
974
996 def file(self, path):
975 def file(self, path):
997 if path[0] == b'/':
976 if path[0] == b'/':
998 path = path[1:]
977 path = path[1:]
999
978
1000 return filelog.filelog(self.svfs, path)
979 return filelog.filelog(self.svfs, path)
1001
980
1002
981
1003 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
982 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1004 class revlognarrowfilestorage(object):
983 class revlognarrowfilestorage(object):
1005 """File storage when using revlogs and narrow files."""
984 """File storage when using revlogs and narrow files."""
1006
985
1007 def file(self, path):
986 def file(self, path):
1008 if path[0] == b'/':
987 if path[0] == b'/':
1009 path = path[1:]
988 path = path[1:]
1010
989
1011 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
990 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1012
991
1013
992
1014 def makefilestorage(requirements, features, **kwargs):
993 def makefilestorage(requirements, features, **kwargs):
1015 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
994 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1016 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
995 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1017 features.add(repository.REPO_FEATURE_STREAM_CLONE)
996 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1018
997
1019 if requirementsmod.NARROW_REQUIREMENT in requirements:
998 if requirementsmod.NARROW_REQUIREMENT in requirements:
1020 return revlognarrowfilestorage
999 return revlognarrowfilestorage
1021 else:
1000 else:
1022 return revlogfilestorage
1001 return revlogfilestorage
1023
1002
1024
1003
1025 # List of repository interfaces and factory functions for them. Each
1004 # List of repository interfaces and factory functions for them. Each
1026 # will be called in order during ``makelocalrepository()`` to iteratively
1005 # will be called in order during ``makelocalrepository()`` to iteratively
1027 # derive the final type for a local repository instance. We capture the
1006 # derive the final type for a local repository instance. We capture the
1028 # function as a lambda so we don't hold a reference and the module-level
1007 # function as a lambda so we don't hold a reference and the module-level
1029 # functions can be wrapped.
1008 # functions can be wrapped.
1030 REPO_INTERFACES = [
1009 REPO_INTERFACES = [
1031 (repository.ilocalrepositorymain, lambda: makemain),
1010 (repository.ilocalrepositorymain, lambda: makemain),
1032 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1011 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1033 ]
1012 ]
1034
1013
1035
1014
1036 @interfaceutil.implementer(repository.ilocalrepositorymain)
1015 @interfaceutil.implementer(repository.ilocalrepositorymain)
1037 class localrepository(object):
1016 class localrepository(object):
1038 """Main class for representing local repositories.
1017 """Main class for representing local repositories.
1039
1018
1040 All local repositories are instances of this class.
1019 All local repositories are instances of this class.
1041
1020
1042 Constructed on its own, instances of this class are not usable as
1021 Constructed on its own, instances of this class are not usable as
1043 repository objects. To obtain a usable repository object, call
1022 repository objects. To obtain a usable repository object, call
1044 ``hg.repository()``, ``localrepo.instance()``, or
1023 ``hg.repository()``, ``localrepo.instance()``, or
1045 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1024 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1046 ``instance()`` adds support for creating new repositories.
1025 ``instance()`` adds support for creating new repositories.
1047 ``hg.repository()`` adds more extension integration, including calling
1026 ``hg.repository()`` adds more extension integration, including calling
1048 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1027 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1049 used.
1028 used.
1050 """
1029 """
1051
1030
1052 # obsolete experimental requirements:
1031 # obsolete experimental requirements:
1053 # - manifestv2: An experimental new manifest format that allowed
1032 # - manifestv2: An experimental new manifest format that allowed
1054 # for stem compression of long paths. Experiment ended up not
1033 # for stem compression of long paths. Experiment ended up not
1055 # being successful (repository sizes went up due to worse delta
1034 # being successful (repository sizes went up due to worse delta
1056 # chains), and the code was deleted in 4.6.
1035 # chains), and the code was deleted in 4.6.
1057 supportedformats = {
1036 supportedformats = {
1058 b'revlogv1',
1037 b'revlogv1',
1059 b'generaldelta',
1038 b'generaldelta',
1060 requirementsmod.TREEMANIFEST_REQUIREMENT,
1039 requirementsmod.TREEMANIFEST_REQUIREMENT,
1061 COPIESSDC_REQUIREMENT,
1040 requirementsmod.COPIESSDC_REQUIREMENT,
1062 REVLOGV2_REQUIREMENT,
1041 requirementsmod.REVLOGV2_REQUIREMENT,
1063 SIDEDATA_REQUIREMENT,
1042 requirementsmod.SIDEDATA_REQUIREMENT,
1064 SPARSEREVLOG_REQUIREMENT,
1043 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1065 NODEMAP_REQUIREMENT,
1044 requirementsmod.NODEMAP_REQUIREMENT,
1066 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1045 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1067 }
1046 }
1068 _basesupported = supportedformats | {
1047 _basesupported = supportedformats | {
1069 b'store',
1048 b'store',
1070 b'fncache',
1049 b'fncache',
1071 b'shared',
1050 b'shared',
1072 b'relshared',
1051 b'relshared',
1073 b'dotencode',
1052 b'dotencode',
1074 requirementsmod.SPARSE_REQUIREMENT,
1053 requirementsmod.SPARSE_REQUIREMENT,
1075 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1054 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1076 }
1055 }
1077
1056
1078 # list of prefix for file which can be written without 'wlock'
1057 # list of prefix for file which can be written without 'wlock'
1079 # Extensions should extend this list when needed
1058 # Extensions should extend this list when needed
1080 _wlockfreeprefix = {
1059 _wlockfreeprefix = {
1081 # We migh consider requiring 'wlock' for the next
1060 # We migh consider requiring 'wlock' for the next
1082 # two, but pretty much all the existing code assume
1061 # two, but pretty much all the existing code assume
1083 # wlock is not needed so we keep them excluded for
1062 # wlock is not needed so we keep them excluded for
1084 # now.
1063 # now.
1085 b'hgrc',
1064 b'hgrc',
1086 b'requires',
1065 b'requires',
1087 # XXX cache is a complicatged business someone
1066 # XXX cache is a complicatged business someone
1088 # should investigate this in depth at some point
1067 # should investigate this in depth at some point
1089 b'cache/',
1068 b'cache/',
1090 # XXX shouldn't be dirstate covered by the wlock?
1069 # XXX shouldn't be dirstate covered by the wlock?
1091 b'dirstate',
1070 b'dirstate',
1092 # XXX bisect was still a bit too messy at the time
1071 # XXX bisect was still a bit too messy at the time
1093 # this changeset was introduced. Someone should fix
1072 # this changeset was introduced. Someone should fix
1094 # the remainig bit and drop this line
1073 # the remainig bit and drop this line
1095 b'bisect.state',
1074 b'bisect.state',
1096 }
1075 }
1097
1076
1098 def __init__(
1077 def __init__(
1099 self,
1078 self,
1100 baseui,
1079 baseui,
1101 ui,
1080 ui,
1102 origroot,
1081 origroot,
1103 wdirvfs,
1082 wdirvfs,
1104 hgvfs,
1083 hgvfs,
1105 requirements,
1084 requirements,
1106 supportedrequirements,
1085 supportedrequirements,
1107 sharedpath,
1086 sharedpath,
1108 store,
1087 store,
1109 cachevfs,
1088 cachevfs,
1110 wcachevfs,
1089 wcachevfs,
1111 features,
1090 features,
1112 intents=None,
1091 intents=None,
1113 ):
1092 ):
1114 """Create a new local repository instance.
1093 """Create a new local repository instance.
1115
1094
1116 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1095 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1117 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1096 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1118 object.
1097 object.
1119
1098
1120 Arguments:
1099 Arguments:
1121
1100
1122 baseui
1101 baseui
1123 ``ui.ui`` instance that ``ui`` argument was based off of.
1102 ``ui.ui`` instance that ``ui`` argument was based off of.
1124
1103
1125 ui
1104 ui
1126 ``ui.ui`` instance for use by the repository.
1105 ``ui.ui`` instance for use by the repository.
1127
1106
1128 origroot
1107 origroot
1129 ``bytes`` path to working directory root of this repository.
1108 ``bytes`` path to working directory root of this repository.
1130
1109
1131 wdirvfs
1110 wdirvfs
1132 ``vfs.vfs`` rooted at the working directory.
1111 ``vfs.vfs`` rooted at the working directory.
1133
1112
1134 hgvfs
1113 hgvfs
1135 ``vfs.vfs`` rooted at .hg/
1114 ``vfs.vfs`` rooted at .hg/
1136
1115
1137 requirements
1116 requirements
1138 ``set`` of bytestrings representing repository opening requirements.
1117 ``set`` of bytestrings representing repository opening requirements.
1139
1118
1140 supportedrequirements
1119 supportedrequirements
1141 ``set`` of bytestrings representing repository requirements that we
1120 ``set`` of bytestrings representing repository requirements that we
1142 know how to open. May be a supetset of ``requirements``.
1121 know how to open. May be a supetset of ``requirements``.
1143
1122
1144 sharedpath
1123 sharedpath
1145 ``bytes`` Defining path to storage base directory. Points to a
1124 ``bytes`` Defining path to storage base directory. Points to a
1146 ``.hg/`` directory somewhere.
1125 ``.hg/`` directory somewhere.
1147
1126
1148 store
1127 store
1149 ``store.basicstore`` (or derived) instance providing access to
1128 ``store.basicstore`` (or derived) instance providing access to
1150 versioned storage.
1129 versioned storage.
1151
1130
1152 cachevfs
1131 cachevfs
1153 ``vfs.vfs`` used for cache files.
1132 ``vfs.vfs`` used for cache files.
1154
1133
1155 wcachevfs
1134 wcachevfs
1156 ``vfs.vfs`` used for cache files related to the working copy.
1135 ``vfs.vfs`` used for cache files related to the working copy.
1157
1136
1158 features
1137 features
1159 ``set`` of bytestrings defining features/capabilities of this
1138 ``set`` of bytestrings defining features/capabilities of this
1160 instance.
1139 instance.
1161
1140
1162 intents
1141 intents
1163 ``set`` of system strings indicating what this repo will be used
1142 ``set`` of system strings indicating what this repo will be used
1164 for.
1143 for.
1165 """
1144 """
1166 self.baseui = baseui
1145 self.baseui = baseui
1167 self.ui = ui
1146 self.ui = ui
1168 self.origroot = origroot
1147 self.origroot = origroot
1169 # vfs rooted at working directory.
1148 # vfs rooted at working directory.
1170 self.wvfs = wdirvfs
1149 self.wvfs = wdirvfs
1171 self.root = wdirvfs.base
1150 self.root = wdirvfs.base
1172 # vfs rooted at .hg/. Used to access most non-store paths.
1151 # vfs rooted at .hg/. Used to access most non-store paths.
1173 self.vfs = hgvfs
1152 self.vfs = hgvfs
1174 self.path = hgvfs.base
1153 self.path = hgvfs.base
1175 self.requirements = requirements
1154 self.requirements = requirements
1176 self.supported = supportedrequirements
1155 self.supported = supportedrequirements
1177 self.sharedpath = sharedpath
1156 self.sharedpath = sharedpath
1178 self.store = store
1157 self.store = store
1179 self.cachevfs = cachevfs
1158 self.cachevfs = cachevfs
1180 self.wcachevfs = wcachevfs
1159 self.wcachevfs = wcachevfs
1181 self.features = features
1160 self.features = features
1182
1161
1183 self.filtername = None
1162 self.filtername = None
1184
1163
1185 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1164 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1186 b'devel', b'check-locks'
1165 b'devel', b'check-locks'
1187 ):
1166 ):
1188 self.vfs.audit = self._getvfsward(self.vfs.audit)
1167 self.vfs.audit = self._getvfsward(self.vfs.audit)
1189 # A list of callback to shape the phase if no data were found.
1168 # A list of callback to shape the phase if no data were found.
1190 # Callback are in the form: func(repo, roots) --> processed root.
1169 # Callback are in the form: func(repo, roots) --> processed root.
1191 # This list it to be filled by extension during repo setup
1170 # This list it to be filled by extension during repo setup
1192 self._phasedefaults = []
1171 self._phasedefaults = []
1193
1172
1194 color.setup(self.ui)
1173 color.setup(self.ui)
1195
1174
1196 self.spath = self.store.path
1175 self.spath = self.store.path
1197 self.svfs = self.store.vfs
1176 self.svfs = self.store.vfs
1198 self.sjoin = self.store.join
1177 self.sjoin = self.store.join
1199 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1178 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1200 b'devel', b'check-locks'
1179 b'devel', b'check-locks'
1201 ):
1180 ):
1202 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1181 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1203 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1182 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1204 else: # standard vfs
1183 else: # standard vfs
1205 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1184 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1206
1185
1207 self._dirstatevalidatewarned = False
1186 self._dirstatevalidatewarned = False
1208
1187
1209 self._branchcaches = branchmap.BranchMapCache()
1188 self._branchcaches = branchmap.BranchMapCache()
1210 self._revbranchcache = None
1189 self._revbranchcache = None
1211 self._filterpats = {}
1190 self._filterpats = {}
1212 self._datafilters = {}
1191 self._datafilters = {}
1213 self._transref = self._lockref = self._wlockref = None
1192 self._transref = self._lockref = self._wlockref = None
1214
1193
1215 # A cache for various files under .hg/ that tracks file changes,
1194 # A cache for various files under .hg/ that tracks file changes,
1216 # (used by the filecache decorator)
1195 # (used by the filecache decorator)
1217 #
1196 #
1218 # Maps a property name to its util.filecacheentry
1197 # Maps a property name to its util.filecacheentry
1219 self._filecache = {}
1198 self._filecache = {}
1220
1199
1221 # hold sets of revision to be filtered
1200 # hold sets of revision to be filtered
1222 # should be cleared when something might have changed the filter value:
1201 # should be cleared when something might have changed the filter value:
1223 # - new changesets,
1202 # - new changesets,
1224 # - phase change,
1203 # - phase change,
1225 # - new obsolescence marker,
1204 # - new obsolescence marker,
1226 # - working directory parent change,
1205 # - working directory parent change,
1227 # - bookmark changes
1206 # - bookmark changes
1228 self.filteredrevcache = {}
1207 self.filteredrevcache = {}
1229
1208
1230 # post-dirstate-status hooks
1209 # post-dirstate-status hooks
1231 self._postdsstatus = []
1210 self._postdsstatus = []
1232
1211
1233 # generic mapping between names and nodes
1212 # generic mapping between names and nodes
1234 self.names = namespaces.namespaces()
1213 self.names = namespaces.namespaces()
1235
1214
1236 # Key to signature value.
1215 # Key to signature value.
1237 self._sparsesignaturecache = {}
1216 self._sparsesignaturecache = {}
1238 # Signature to cached matcher instance.
1217 # Signature to cached matcher instance.
1239 self._sparsematchercache = {}
1218 self._sparsematchercache = {}
1240
1219
1241 self._extrafilterid = repoview.extrafilter(ui)
1220 self._extrafilterid = repoview.extrafilter(ui)
1242
1221
1243 self.filecopiesmode = None
1222 self.filecopiesmode = None
1244 if COPIESSDC_REQUIREMENT in self.requirements:
1223 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1245 self.filecopiesmode = b'changeset-sidedata'
1224 self.filecopiesmode = b'changeset-sidedata'
1246
1225
1247 def _getvfsward(self, origfunc):
1226 def _getvfsward(self, origfunc):
1248 """build a ward for self.vfs"""
1227 """build a ward for self.vfs"""
1249 rref = weakref.ref(self)
1228 rref = weakref.ref(self)
1250
1229
1251 def checkvfs(path, mode=None):
1230 def checkvfs(path, mode=None):
1252 ret = origfunc(path, mode=mode)
1231 ret = origfunc(path, mode=mode)
1253 repo = rref()
1232 repo = rref()
1254 if (
1233 if (
1255 repo is None
1234 repo is None
1256 or not util.safehasattr(repo, b'_wlockref')
1235 or not util.safehasattr(repo, b'_wlockref')
1257 or not util.safehasattr(repo, b'_lockref')
1236 or not util.safehasattr(repo, b'_lockref')
1258 ):
1237 ):
1259 return
1238 return
1260 if mode in (None, b'r', b'rb'):
1239 if mode in (None, b'r', b'rb'):
1261 return
1240 return
1262 if path.startswith(repo.path):
1241 if path.startswith(repo.path):
1263 # truncate name relative to the repository (.hg)
1242 # truncate name relative to the repository (.hg)
1264 path = path[len(repo.path) + 1 :]
1243 path = path[len(repo.path) + 1 :]
1265 if path.startswith(b'cache/'):
1244 if path.startswith(b'cache/'):
1266 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1245 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1267 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1246 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1268 # path prefixes covered by 'lock'
1247 # path prefixes covered by 'lock'
1269 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1248 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1270 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1249 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1271 if repo._currentlock(repo._lockref) is None:
1250 if repo._currentlock(repo._lockref) is None:
1272 repo.ui.develwarn(
1251 repo.ui.develwarn(
1273 b'write with no lock: "%s"' % path,
1252 b'write with no lock: "%s"' % path,
1274 stacklevel=3,
1253 stacklevel=3,
1275 config=b'check-locks',
1254 config=b'check-locks',
1276 )
1255 )
1277 elif repo._currentlock(repo._wlockref) is None:
1256 elif repo._currentlock(repo._wlockref) is None:
1278 # rest of vfs files are covered by 'wlock'
1257 # rest of vfs files are covered by 'wlock'
1279 #
1258 #
1280 # exclude special files
1259 # exclude special files
1281 for prefix in self._wlockfreeprefix:
1260 for prefix in self._wlockfreeprefix:
1282 if path.startswith(prefix):
1261 if path.startswith(prefix):
1283 return
1262 return
1284 repo.ui.develwarn(
1263 repo.ui.develwarn(
1285 b'write with no wlock: "%s"' % path,
1264 b'write with no wlock: "%s"' % path,
1286 stacklevel=3,
1265 stacklevel=3,
1287 config=b'check-locks',
1266 config=b'check-locks',
1288 )
1267 )
1289 return ret
1268 return ret
1290
1269
1291 return checkvfs
1270 return checkvfs
1292
1271
1293 def _getsvfsward(self, origfunc):
1272 def _getsvfsward(self, origfunc):
1294 """build a ward for self.svfs"""
1273 """build a ward for self.svfs"""
1295 rref = weakref.ref(self)
1274 rref = weakref.ref(self)
1296
1275
1297 def checksvfs(path, mode=None):
1276 def checksvfs(path, mode=None):
1298 ret = origfunc(path, mode=mode)
1277 ret = origfunc(path, mode=mode)
1299 repo = rref()
1278 repo = rref()
1300 if repo is None or not util.safehasattr(repo, b'_lockref'):
1279 if repo is None or not util.safehasattr(repo, b'_lockref'):
1301 return
1280 return
1302 if mode in (None, b'r', b'rb'):
1281 if mode in (None, b'r', b'rb'):
1303 return
1282 return
1304 if path.startswith(repo.sharedpath):
1283 if path.startswith(repo.sharedpath):
1305 # truncate name relative to the repository (.hg)
1284 # truncate name relative to the repository (.hg)
1306 path = path[len(repo.sharedpath) + 1 :]
1285 path = path[len(repo.sharedpath) + 1 :]
1307 if repo._currentlock(repo._lockref) is None:
1286 if repo._currentlock(repo._lockref) is None:
1308 repo.ui.develwarn(
1287 repo.ui.develwarn(
1309 b'write with no lock: "%s"' % path, stacklevel=4
1288 b'write with no lock: "%s"' % path, stacklevel=4
1310 )
1289 )
1311 return ret
1290 return ret
1312
1291
1313 return checksvfs
1292 return checksvfs
1314
1293
1315 def close(self):
1294 def close(self):
1316 self._writecaches()
1295 self._writecaches()
1317
1296
1318 def _writecaches(self):
1297 def _writecaches(self):
1319 if self._revbranchcache:
1298 if self._revbranchcache:
1320 self._revbranchcache.write()
1299 self._revbranchcache.write()
1321
1300
1322 def _restrictcapabilities(self, caps):
1301 def _restrictcapabilities(self, caps):
1323 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1302 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1324 caps = set(caps)
1303 caps = set(caps)
1325 capsblob = bundle2.encodecaps(
1304 capsblob = bundle2.encodecaps(
1326 bundle2.getrepocaps(self, role=b'client')
1305 bundle2.getrepocaps(self, role=b'client')
1327 )
1306 )
1328 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1307 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1329 return caps
1308 return caps
1330
1309
1331 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1310 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1332 # self -> auditor -> self._checknested -> self
1311 # self -> auditor -> self._checknested -> self
1333
1312
1334 @property
1313 @property
1335 def auditor(self):
1314 def auditor(self):
1336 # This is only used by context.workingctx.match in order to
1315 # This is only used by context.workingctx.match in order to
1337 # detect files in subrepos.
1316 # detect files in subrepos.
1338 return pathutil.pathauditor(self.root, callback=self._checknested)
1317 return pathutil.pathauditor(self.root, callback=self._checknested)
1339
1318
1340 @property
1319 @property
1341 def nofsauditor(self):
1320 def nofsauditor(self):
1342 # This is only used by context.basectx.match in order to detect
1321 # This is only used by context.basectx.match in order to detect
1343 # files in subrepos.
1322 # files in subrepos.
1344 return pathutil.pathauditor(
1323 return pathutil.pathauditor(
1345 self.root, callback=self._checknested, realfs=False, cached=True
1324 self.root, callback=self._checknested, realfs=False, cached=True
1346 )
1325 )
1347
1326
1348 def _checknested(self, path):
1327 def _checknested(self, path):
1349 """Determine if path is a legal nested repository."""
1328 """Determine if path is a legal nested repository."""
1350 if not path.startswith(self.root):
1329 if not path.startswith(self.root):
1351 return False
1330 return False
1352 subpath = path[len(self.root) + 1 :]
1331 subpath = path[len(self.root) + 1 :]
1353 normsubpath = util.pconvert(subpath)
1332 normsubpath = util.pconvert(subpath)
1354
1333
1355 # XXX: Checking against the current working copy is wrong in
1334 # XXX: Checking against the current working copy is wrong in
1356 # the sense that it can reject things like
1335 # the sense that it can reject things like
1357 #
1336 #
1358 # $ hg cat -r 10 sub/x.txt
1337 # $ hg cat -r 10 sub/x.txt
1359 #
1338 #
1360 # if sub/ is no longer a subrepository in the working copy
1339 # if sub/ is no longer a subrepository in the working copy
1361 # parent revision.
1340 # parent revision.
1362 #
1341 #
1363 # However, it can of course also allow things that would have
1342 # However, it can of course also allow things that would have
1364 # been rejected before, such as the above cat command if sub/
1343 # been rejected before, such as the above cat command if sub/
1365 # is a subrepository now, but was a normal directory before.
1344 # is a subrepository now, but was a normal directory before.
1366 # The old path auditor would have rejected by mistake since it
1345 # The old path auditor would have rejected by mistake since it
1367 # panics when it sees sub/.hg/.
1346 # panics when it sees sub/.hg/.
1368 #
1347 #
1369 # All in all, checking against the working copy seems sensible
1348 # All in all, checking against the working copy seems sensible
1370 # since we want to prevent access to nested repositories on
1349 # since we want to prevent access to nested repositories on
1371 # the filesystem *now*.
1350 # the filesystem *now*.
1372 ctx = self[None]
1351 ctx = self[None]
1373 parts = util.splitpath(subpath)
1352 parts = util.splitpath(subpath)
1374 while parts:
1353 while parts:
1375 prefix = b'/'.join(parts)
1354 prefix = b'/'.join(parts)
1376 if prefix in ctx.substate:
1355 if prefix in ctx.substate:
1377 if prefix == normsubpath:
1356 if prefix == normsubpath:
1378 return True
1357 return True
1379 else:
1358 else:
1380 sub = ctx.sub(prefix)
1359 sub = ctx.sub(prefix)
1381 return sub.checknested(subpath[len(prefix) + 1 :])
1360 return sub.checknested(subpath[len(prefix) + 1 :])
1382 else:
1361 else:
1383 parts.pop()
1362 parts.pop()
1384 return False
1363 return False
1385
1364
1386 def peer(self):
1365 def peer(self):
1387 return localpeer(self) # not cached to avoid reference cycle
1366 return localpeer(self) # not cached to avoid reference cycle
1388
1367
1389 def unfiltered(self):
1368 def unfiltered(self):
1390 """Return unfiltered version of the repository
1369 """Return unfiltered version of the repository
1391
1370
1392 Intended to be overwritten by filtered repo."""
1371 Intended to be overwritten by filtered repo."""
1393 return self
1372 return self
1394
1373
1395 def filtered(self, name, visibilityexceptions=None):
1374 def filtered(self, name, visibilityexceptions=None):
1396 """Return a filtered version of a repository
1375 """Return a filtered version of a repository
1397
1376
1398 The `name` parameter is the identifier of the requested view. This
1377 The `name` parameter is the identifier of the requested view. This
1399 will return a repoview object set "exactly" to the specified view.
1378 will return a repoview object set "exactly" to the specified view.
1400
1379
1401 This function does not apply recursive filtering to a repository. For
1380 This function does not apply recursive filtering to a repository. For
1402 example calling `repo.filtered("served")` will return a repoview using
1381 example calling `repo.filtered("served")` will return a repoview using
1403 the "served" view, regardless of the initial view used by `repo`.
1382 the "served" view, regardless of the initial view used by `repo`.
1404
1383
1405 In other word, there is always only one level of `repoview` "filtering".
1384 In other word, there is always only one level of `repoview` "filtering".
1406 """
1385 """
1407 if self._extrafilterid is not None and b'%' not in name:
1386 if self._extrafilterid is not None and b'%' not in name:
1408 name = name + b'%' + self._extrafilterid
1387 name = name + b'%' + self._extrafilterid
1409
1388
1410 cls = repoview.newtype(self.unfiltered().__class__)
1389 cls = repoview.newtype(self.unfiltered().__class__)
1411 return cls(self, name, visibilityexceptions)
1390 return cls(self, name, visibilityexceptions)
1412
1391
1413 @mixedrepostorecache(
1392 @mixedrepostorecache(
1414 (b'bookmarks', b'plain'),
1393 (b'bookmarks', b'plain'),
1415 (b'bookmarks.current', b'plain'),
1394 (b'bookmarks.current', b'plain'),
1416 (b'bookmarks', b''),
1395 (b'bookmarks', b''),
1417 (b'00changelog.i', b''),
1396 (b'00changelog.i', b''),
1418 )
1397 )
1419 def _bookmarks(self):
1398 def _bookmarks(self):
1420 # Since the multiple files involved in the transaction cannot be
1399 # Since the multiple files involved in the transaction cannot be
1421 # written atomically (with current repository format), there is a race
1400 # written atomically (with current repository format), there is a race
1422 # condition here.
1401 # condition here.
1423 #
1402 #
1424 # 1) changelog content A is read
1403 # 1) changelog content A is read
1425 # 2) outside transaction update changelog to content B
1404 # 2) outside transaction update changelog to content B
1426 # 3) outside transaction update bookmark file referring to content B
1405 # 3) outside transaction update bookmark file referring to content B
1427 # 4) bookmarks file content is read and filtered against changelog-A
1406 # 4) bookmarks file content is read and filtered against changelog-A
1428 #
1407 #
1429 # When this happens, bookmarks against nodes missing from A are dropped.
1408 # When this happens, bookmarks against nodes missing from A are dropped.
1430 #
1409 #
1431 # Having this happening during read is not great, but it become worse
1410 # Having this happening during read is not great, but it become worse
1432 # when this happen during write because the bookmarks to the "unknown"
1411 # when this happen during write because the bookmarks to the "unknown"
1433 # nodes will be dropped for good. However, writes happen within locks.
1412 # nodes will be dropped for good. However, writes happen within locks.
1434 # This locking makes it possible to have a race free consistent read.
1413 # This locking makes it possible to have a race free consistent read.
1435 # For this purpose data read from disc before locking are
1414 # For this purpose data read from disc before locking are
1436 # "invalidated" right after the locks are taken. This invalidations are
1415 # "invalidated" right after the locks are taken. This invalidations are
1437 # "light", the `filecache` mechanism keep the data in memory and will
1416 # "light", the `filecache` mechanism keep the data in memory and will
1438 # reuse them if the underlying files did not changed. Not parsing the
1417 # reuse them if the underlying files did not changed. Not parsing the
1439 # same data multiple times helps performances.
1418 # same data multiple times helps performances.
1440 #
1419 #
1441 # Unfortunately in the case describe above, the files tracked by the
1420 # Unfortunately in the case describe above, the files tracked by the
1442 # bookmarks file cache might not have changed, but the in-memory
1421 # bookmarks file cache might not have changed, but the in-memory
1443 # content is still "wrong" because we used an older changelog content
1422 # content is still "wrong" because we used an older changelog content
1444 # to process the on-disk data. So after locking, the changelog would be
1423 # to process the on-disk data. So after locking, the changelog would be
1445 # refreshed but `_bookmarks` would be preserved.
1424 # refreshed but `_bookmarks` would be preserved.
1446 # Adding `00changelog.i` to the list of tracked file is not
1425 # Adding `00changelog.i` to the list of tracked file is not
1447 # enough, because at the time we build the content for `_bookmarks` in
1426 # enough, because at the time we build the content for `_bookmarks` in
1448 # (4), the changelog file has already diverged from the content used
1427 # (4), the changelog file has already diverged from the content used
1449 # for loading `changelog` in (1)
1428 # for loading `changelog` in (1)
1450 #
1429 #
1451 # To prevent the issue, we force the changelog to be explicitly
1430 # To prevent the issue, we force the changelog to be explicitly
1452 # reloaded while computing `_bookmarks`. The data race can still happen
1431 # reloaded while computing `_bookmarks`. The data race can still happen
1453 # without the lock (with a narrower window), but it would no longer go
1432 # without the lock (with a narrower window), but it would no longer go
1454 # undetected during the lock time refresh.
1433 # undetected during the lock time refresh.
1455 #
1434 #
1456 # The new schedule is as follow
1435 # The new schedule is as follow
1457 #
1436 #
1458 # 1) filecache logic detect that `_bookmarks` needs to be computed
1437 # 1) filecache logic detect that `_bookmarks` needs to be computed
1459 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1438 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1460 # 3) We force `changelog` filecache to be tested
1439 # 3) We force `changelog` filecache to be tested
1461 # 4) cachestat for `changelog` are captured (for changelog)
1440 # 4) cachestat for `changelog` are captured (for changelog)
1462 # 5) `_bookmarks` is computed and cached
1441 # 5) `_bookmarks` is computed and cached
1463 #
1442 #
1464 # The step in (3) ensure we have a changelog at least as recent as the
1443 # The step in (3) ensure we have a changelog at least as recent as the
1465 # cache stat computed in (1). As a result at locking time:
1444 # cache stat computed in (1). As a result at locking time:
1466 # * if the changelog did not changed since (1) -> we can reuse the data
1445 # * if the changelog did not changed since (1) -> we can reuse the data
1467 # * otherwise -> the bookmarks get refreshed.
1446 # * otherwise -> the bookmarks get refreshed.
1468 self._refreshchangelog()
1447 self._refreshchangelog()
1469 return bookmarks.bmstore(self)
1448 return bookmarks.bmstore(self)
1470
1449
1471 def _refreshchangelog(self):
1450 def _refreshchangelog(self):
1472 """make sure the in memory changelog match the on-disk one"""
1451 """make sure the in memory changelog match the on-disk one"""
1473 if 'changelog' in vars(self) and self.currenttransaction() is None:
1452 if 'changelog' in vars(self) and self.currenttransaction() is None:
1474 del self.changelog
1453 del self.changelog
1475
1454
1476 @property
1455 @property
1477 def _activebookmark(self):
1456 def _activebookmark(self):
1478 return self._bookmarks.active
1457 return self._bookmarks.active
1479
1458
1480 # _phasesets depend on changelog. what we need is to call
1459 # _phasesets depend on changelog. what we need is to call
1481 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1460 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1482 # can't be easily expressed in filecache mechanism.
1461 # can't be easily expressed in filecache mechanism.
1483 @storecache(b'phaseroots', b'00changelog.i')
1462 @storecache(b'phaseroots', b'00changelog.i')
1484 def _phasecache(self):
1463 def _phasecache(self):
1485 return phases.phasecache(self, self._phasedefaults)
1464 return phases.phasecache(self, self._phasedefaults)
1486
1465
1487 @storecache(b'obsstore')
1466 @storecache(b'obsstore')
1488 def obsstore(self):
1467 def obsstore(self):
1489 return obsolete.makestore(self.ui, self)
1468 return obsolete.makestore(self.ui, self)
1490
1469
1491 @storecache(b'00changelog.i')
1470 @storecache(b'00changelog.i')
1492 def changelog(self):
1471 def changelog(self):
1493 # load dirstate before changelog to avoid race see issue6303
1472 # load dirstate before changelog to avoid race see issue6303
1494 self.dirstate.prefetch_parents()
1473 self.dirstate.prefetch_parents()
1495 return self.store.changelog(txnutil.mayhavepending(self.root))
1474 return self.store.changelog(txnutil.mayhavepending(self.root))
1496
1475
1497 @storecache(b'00manifest.i')
1476 @storecache(b'00manifest.i')
1498 def manifestlog(self):
1477 def manifestlog(self):
1499 return self.store.manifestlog(self, self._storenarrowmatch)
1478 return self.store.manifestlog(self, self._storenarrowmatch)
1500
1479
1501 @repofilecache(b'dirstate')
1480 @repofilecache(b'dirstate')
1502 def dirstate(self):
1481 def dirstate(self):
1503 return self._makedirstate()
1482 return self._makedirstate()
1504
1483
1505 def _makedirstate(self):
1484 def _makedirstate(self):
1506 """Extension point for wrapping the dirstate per-repo."""
1485 """Extension point for wrapping the dirstate per-repo."""
1507 sparsematchfn = lambda: sparse.matcher(self)
1486 sparsematchfn = lambda: sparse.matcher(self)
1508
1487
1509 return dirstate.dirstate(
1488 return dirstate.dirstate(
1510 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1489 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1511 )
1490 )
1512
1491
1513 def _dirstatevalidate(self, node):
1492 def _dirstatevalidate(self, node):
1514 try:
1493 try:
1515 self.changelog.rev(node)
1494 self.changelog.rev(node)
1516 return node
1495 return node
1517 except error.LookupError:
1496 except error.LookupError:
1518 if not self._dirstatevalidatewarned:
1497 if not self._dirstatevalidatewarned:
1519 self._dirstatevalidatewarned = True
1498 self._dirstatevalidatewarned = True
1520 self.ui.warn(
1499 self.ui.warn(
1521 _(b"warning: ignoring unknown working parent %s!\n")
1500 _(b"warning: ignoring unknown working parent %s!\n")
1522 % short(node)
1501 % short(node)
1523 )
1502 )
1524 return nullid
1503 return nullid
1525
1504
1526 @storecache(narrowspec.FILENAME)
1505 @storecache(narrowspec.FILENAME)
1527 def narrowpats(self):
1506 def narrowpats(self):
1528 """matcher patterns for this repository's narrowspec
1507 """matcher patterns for this repository's narrowspec
1529
1508
1530 A tuple of (includes, excludes).
1509 A tuple of (includes, excludes).
1531 """
1510 """
1532 return narrowspec.load(self)
1511 return narrowspec.load(self)
1533
1512
1534 @storecache(narrowspec.FILENAME)
1513 @storecache(narrowspec.FILENAME)
1535 def _storenarrowmatch(self):
1514 def _storenarrowmatch(self):
1536 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1515 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1537 return matchmod.always()
1516 return matchmod.always()
1538 include, exclude = self.narrowpats
1517 include, exclude = self.narrowpats
1539 return narrowspec.match(self.root, include=include, exclude=exclude)
1518 return narrowspec.match(self.root, include=include, exclude=exclude)
1540
1519
1541 @storecache(narrowspec.FILENAME)
1520 @storecache(narrowspec.FILENAME)
1542 def _narrowmatch(self):
1521 def _narrowmatch(self):
1543 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1522 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1544 return matchmod.always()
1523 return matchmod.always()
1545 narrowspec.checkworkingcopynarrowspec(self)
1524 narrowspec.checkworkingcopynarrowspec(self)
1546 include, exclude = self.narrowpats
1525 include, exclude = self.narrowpats
1547 return narrowspec.match(self.root, include=include, exclude=exclude)
1526 return narrowspec.match(self.root, include=include, exclude=exclude)
1548
1527
1549 def narrowmatch(self, match=None, includeexact=False):
1528 def narrowmatch(self, match=None, includeexact=False):
1550 """matcher corresponding the the repo's narrowspec
1529 """matcher corresponding the the repo's narrowspec
1551
1530
1552 If `match` is given, then that will be intersected with the narrow
1531 If `match` is given, then that will be intersected with the narrow
1553 matcher.
1532 matcher.
1554
1533
1555 If `includeexact` is True, then any exact matches from `match` will
1534 If `includeexact` is True, then any exact matches from `match` will
1556 be included even if they're outside the narrowspec.
1535 be included even if they're outside the narrowspec.
1557 """
1536 """
1558 if match:
1537 if match:
1559 if includeexact and not self._narrowmatch.always():
1538 if includeexact and not self._narrowmatch.always():
1560 # do not exclude explicitly-specified paths so that they can
1539 # do not exclude explicitly-specified paths so that they can
1561 # be warned later on
1540 # be warned later on
1562 em = matchmod.exact(match.files())
1541 em = matchmod.exact(match.files())
1563 nm = matchmod.unionmatcher([self._narrowmatch, em])
1542 nm = matchmod.unionmatcher([self._narrowmatch, em])
1564 return matchmod.intersectmatchers(match, nm)
1543 return matchmod.intersectmatchers(match, nm)
1565 return matchmod.intersectmatchers(match, self._narrowmatch)
1544 return matchmod.intersectmatchers(match, self._narrowmatch)
1566 return self._narrowmatch
1545 return self._narrowmatch
1567
1546
1568 def setnarrowpats(self, newincludes, newexcludes):
1547 def setnarrowpats(self, newincludes, newexcludes):
1569 narrowspec.save(self, newincludes, newexcludes)
1548 narrowspec.save(self, newincludes, newexcludes)
1570 self.invalidate(clearfilecache=True)
1549 self.invalidate(clearfilecache=True)
1571
1550
1572 @unfilteredpropertycache
1551 @unfilteredpropertycache
1573 def _quick_access_changeid_null(self):
1552 def _quick_access_changeid_null(self):
1574 return {
1553 return {
1575 b'null': (nullrev, nullid),
1554 b'null': (nullrev, nullid),
1576 nullrev: (nullrev, nullid),
1555 nullrev: (nullrev, nullid),
1577 nullid: (nullrev, nullid),
1556 nullid: (nullrev, nullid),
1578 }
1557 }
1579
1558
1580 @unfilteredpropertycache
1559 @unfilteredpropertycache
1581 def _quick_access_changeid_wc(self):
1560 def _quick_access_changeid_wc(self):
1582 # also fast path access to the working copy parents
1561 # also fast path access to the working copy parents
1583 # however, only do it for filter that ensure wc is visible.
1562 # however, only do it for filter that ensure wc is visible.
1584 quick = {}
1563 quick = {}
1585 cl = self.unfiltered().changelog
1564 cl = self.unfiltered().changelog
1586 for node in self.dirstate.parents():
1565 for node in self.dirstate.parents():
1587 if node == nullid:
1566 if node == nullid:
1588 continue
1567 continue
1589 rev = cl.index.get_rev(node)
1568 rev = cl.index.get_rev(node)
1590 if rev is None:
1569 if rev is None:
1591 # unknown working copy parent case:
1570 # unknown working copy parent case:
1592 #
1571 #
1593 # skip the fast path and let higher code deal with it
1572 # skip the fast path and let higher code deal with it
1594 continue
1573 continue
1595 pair = (rev, node)
1574 pair = (rev, node)
1596 quick[rev] = pair
1575 quick[rev] = pair
1597 quick[node] = pair
1576 quick[node] = pair
1598 # also add the parents of the parents
1577 # also add the parents of the parents
1599 for r in cl.parentrevs(rev):
1578 for r in cl.parentrevs(rev):
1600 if r == nullrev:
1579 if r == nullrev:
1601 continue
1580 continue
1602 n = cl.node(r)
1581 n = cl.node(r)
1603 pair = (r, n)
1582 pair = (r, n)
1604 quick[r] = pair
1583 quick[r] = pair
1605 quick[n] = pair
1584 quick[n] = pair
1606 p1node = self.dirstate.p1()
1585 p1node = self.dirstate.p1()
1607 if p1node != nullid:
1586 if p1node != nullid:
1608 quick[b'.'] = quick[p1node]
1587 quick[b'.'] = quick[p1node]
1609 return quick
1588 return quick
1610
1589
1611 @unfilteredmethod
1590 @unfilteredmethod
1612 def _quick_access_changeid_invalidate(self):
1591 def _quick_access_changeid_invalidate(self):
1613 if '_quick_access_changeid_wc' in vars(self):
1592 if '_quick_access_changeid_wc' in vars(self):
1614 del self.__dict__['_quick_access_changeid_wc']
1593 del self.__dict__['_quick_access_changeid_wc']
1615
1594
1616 @property
1595 @property
1617 def _quick_access_changeid(self):
1596 def _quick_access_changeid(self):
1618 """an helper dictionnary for __getitem__ calls
1597 """an helper dictionnary for __getitem__ calls
1619
1598
1620 This contains a list of symbol we can recognise right away without
1599 This contains a list of symbol we can recognise right away without
1621 further processing.
1600 further processing.
1622 """
1601 """
1623 mapping = self._quick_access_changeid_null
1602 mapping = self._quick_access_changeid_null
1624 if self.filtername in repoview.filter_has_wc:
1603 if self.filtername in repoview.filter_has_wc:
1625 mapping = mapping.copy()
1604 mapping = mapping.copy()
1626 mapping.update(self._quick_access_changeid_wc)
1605 mapping.update(self._quick_access_changeid_wc)
1627 return mapping
1606 return mapping
1628
1607
1629 def __getitem__(self, changeid):
1608 def __getitem__(self, changeid):
1630 # dealing with special cases
1609 # dealing with special cases
1631 if changeid is None:
1610 if changeid is None:
1632 return context.workingctx(self)
1611 return context.workingctx(self)
1633 if isinstance(changeid, context.basectx):
1612 if isinstance(changeid, context.basectx):
1634 return changeid
1613 return changeid
1635
1614
1636 # dealing with multiple revisions
1615 # dealing with multiple revisions
1637 if isinstance(changeid, slice):
1616 if isinstance(changeid, slice):
1638 # wdirrev isn't contiguous so the slice shouldn't include it
1617 # wdirrev isn't contiguous so the slice shouldn't include it
1639 return [
1618 return [
1640 self[i]
1619 self[i]
1641 for i in pycompat.xrange(*changeid.indices(len(self)))
1620 for i in pycompat.xrange(*changeid.indices(len(self)))
1642 if i not in self.changelog.filteredrevs
1621 if i not in self.changelog.filteredrevs
1643 ]
1622 ]
1644
1623
1645 # dealing with some special values
1624 # dealing with some special values
1646 quick_access = self._quick_access_changeid.get(changeid)
1625 quick_access = self._quick_access_changeid.get(changeid)
1647 if quick_access is not None:
1626 if quick_access is not None:
1648 rev, node = quick_access
1627 rev, node = quick_access
1649 return context.changectx(self, rev, node, maybe_filtered=False)
1628 return context.changectx(self, rev, node, maybe_filtered=False)
1650 if changeid == b'tip':
1629 if changeid == b'tip':
1651 node = self.changelog.tip()
1630 node = self.changelog.tip()
1652 rev = self.changelog.rev(node)
1631 rev = self.changelog.rev(node)
1653 return context.changectx(self, rev, node)
1632 return context.changectx(self, rev, node)
1654
1633
1655 # dealing with arbitrary values
1634 # dealing with arbitrary values
1656 try:
1635 try:
1657 if isinstance(changeid, int):
1636 if isinstance(changeid, int):
1658 node = self.changelog.node(changeid)
1637 node = self.changelog.node(changeid)
1659 rev = changeid
1638 rev = changeid
1660 elif changeid == b'.':
1639 elif changeid == b'.':
1661 # this is a hack to delay/avoid loading obsmarkers
1640 # this is a hack to delay/avoid loading obsmarkers
1662 # when we know that '.' won't be hidden
1641 # when we know that '.' won't be hidden
1663 node = self.dirstate.p1()
1642 node = self.dirstate.p1()
1664 rev = self.unfiltered().changelog.rev(node)
1643 rev = self.unfiltered().changelog.rev(node)
1665 elif len(changeid) == 20:
1644 elif len(changeid) == 20:
1666 try:
1645 try:
1667 node = changeid
1646 node = changeid
1668 rev = self.changelog.rev(changeid)
1647 rev = self.changelog.rev(changeid)
1669 except error.FilteredLookupError:
1648 except error.FilteredLookupError:
1670 changeid = hex(changeid) # for the error message
1649 changeid = hex(changeid) # for the error message
1671 raise
1650 raise
1672 except LookupError:
1651 except LookupError:
1673 # check if it might have come from damaged dirstate
1652 # check if it might have come from damaged dirstate
1674 #
1653 #
1675 # XXX we could avoid the unfiltered if we had a recognizable
1654 # XXX we could avoid the unfiltered if we had a recognizable
1676 # exception for filtered changeset access
1655 # exception for filtered changeset access
1677 if (
1656 if (
1678 self.local()
1657 self.local()
1679 and changeid in self.unfiltered().dirstate.parents()
1658 and changeid in self.unfiltered().dirstate.parents()
1680 ):
1659 ):
1681 msg = _(b"working directory has unknown parent '%s'!")
1660 msg = _(b"working directory has unknown parent '%s'!")
1682 raise error.Abort(msg % short(changeid))
1661 raise error.Abort(msg % short(changeid))
1683 changeid = hex(changeid) # for the error message
1662 changeid = hex(changeid) # for the error message
1684 raise
1663 raise
1685
1664
1686 elif len(changeid) == 40:
1665 elif len(changeid) == 40:
1687 node = bin(changeid)
1666 node = bin(changeid)
1688 rev = self.changelog.rev(node)
1667 rev = self.changelog.rev(node)
1689 else:
1668 else:
1690 raise error.ProgrammingError(
1669 raise error.ProgrammingError(
1691 b"unsupported changeid '%s' of type %s"
1670 b"unsupported changeid '%s' of type %s"
1692 % (changeid, pycompat.bytestr(type(changeid)))
1671 % (changeid, pycompat.bytestr(type(changeid)))
1693 )
1672 )
1694
1673
1695 return context.changectx(self, rev, node)
1674 return context.changectx(self, rev, node)
1696
1675
1697 except (error.FilteredIndexError, error.FilteredLookupError):
1676 except (error.FilteredIndexError, error.FilteredLookupError):
1698 raise error.FilteredRepoLookupError(
1677 raise error.FilteredRepoLookupError(
1699 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1678 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1700 )
1679 )
1701 except (IndexError, LookupError):
1680 except (IndexError, LookupError):
1702 raise error.RepoLookupError(
1681 raise error.RepoLookupError(
1703 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1682 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1704 )
1683 )
1705 except error.WdirUnsupported:
1684 except error.WdirUnsupported:
1706 return context.workingctx(self)
1685 return context.workingctx(self)
1707
1686
1708 def __contains__(self, changeid):
1687 def __contains__(self, changeid):
1709 """True if the given changeid exists
1688 """True if the given changeid exists
1710
1689
1711 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1690 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1712 specified.
1691 specified.
1713 """
1692 """
1714 try:
1693 try:
1715 self[changeid]
1694 self[changeid]
1716 return True
1695 return True
1717 except error.RepoLookupError:
1696 except error.RepoLookupError:
1718 return False
1697 return False
1719
1698
1720 def __nonzero__(self):
1699 def __nonzero__(self):
1721 return True
1700 return True
1722
1701
1723 __bool__ = __nonzero__
1702 __bool__ = __nonzero__
1724
1703
1725 def __len__(self):
1704 def __len__(self):
1726 # no need to pay the cost of repoview.changelog
1705 # no need to pay the cost of repoview.changelog
1727 unfi = self.unfiltered()
1706 unfi = self.unfiltered()
1728 return len(unfi.changelog)
1707 return len(unfi.changelog)
1729
1708
1730 def __iter__(self):
1709 def __iter__(self):
1731 return iter(self.changelog)
1710 return iter(self.changelog)
1732
1711
1733 def revs(self, expr, *args):
1712 def revs(self, expr, *args):
1734 '''Find revisions matching a revset.
1713 '''Find revisions matching a revset.
1735
1714
1736 The revset is specified as a string ``expr`` that may contain
1715 The revset is specified as a string ``expr`` that may contain
1737 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1716 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1738
1717
1739 Revset aliases from the configuration are not expanded. To expand
1718 Revset aliases from the configuration are not expanded. To expand
1740 user aliases, consider calling ``scmutil.revrange()`` or
1719 user aliases, consider calling ``scmutil.revrange()`` or
1741 ``repo.anyrevs([expr], user=True)``.
1720 ``repo.anyrevs([expr], user=True)``.
1742
1721
1743 Returns a smartset.abstractsmartset, which is a list-like interface
1722 Returns a smartset.abstractsmartset, which is a list-like interface
1744 that contains integer revisions.
1723 that contains integer revisions.
1745 '''
1724 '''
1746 tree = revsetlang.spectree(expr, *args)
1725 tree = revsetlang.spectree(expr, *args)
1747 return revset.makematcher(tree)(self)
1726 return revset.makematcher(tree)(self)
1748
1727
1749 def set(self, expr, *args):
1728 def set(self, expr, *args):
1750 '''Find revisions matching a revset and emit changectx instances.
1729 '''Find revisions matching a revset and emit changectx instances.
1751
1730
1752 This is a convenience wrapper around ``revs()`` that iterates the
1731 This is a convenience wrapper around ``revs()`` that iterates the
1753 result and is a generator of changectx instances.
1732 result and is a generator of changectx instances.
1754
1733
1755 Revset aliases from the configuration are not expanded. To expand
1734 Revset aliases from the configuration are not expanded. To expand
1756 user aliases, consider calling ``scmutil.revrange()``.
1735 user aliases, consider calling ``scmutil.revrange()``.
1757 '''
1736 '''
1758 for r in self.revs(expr, *args):
1737 for r in self.revs(expr, *args):
1759 yield self[r]
1738 yield self[r]
1760
1739
1761 def anyrevs(self, specs, user=False, localalias=None):
1740 def anyrevs(self, specs, user=False, localalias=None):
1762 '''Find revisions matching one of the given revsets.
1741 '''Find revisions matching one of the given revsets.
1763
1742
1764 Revset aliases from the configuration are not expanded by default. To
1743 Revset aliases from the configuration are not expanded by default. To
1765 expand user aliases, specify ``user=True``. To provide some local
1744 expand user aliases, specify ``user=True``. To provide some local
1766 definitions overriding user aliases, set ``localalias`` to
1745 definitions overriding user aliases, set ``localalias`` to
1767 ``{name: definitionstring}``.
1746 ``{name: definitionstring}``.
1768 '''
1747 '''
1769 if specs == [b'null']:
1748 if specs == [b'null']:
1770 return revset.baseset([nullrev])
1749 return revset.baseset([nullrev])
1771 if specs == [b'.']:
1750 if specs == [b'.']:
1772 quick_data = self._quick_access_changeid.get(b'.')
1751 quick_data = self._quick_access_changeid.get(b'.')
1773 if quick_data is not None:
1752 if quick_data is not None:
1774 return revset.baseset([quick_data[0]])
1753 return revset.baseset([quick_data[0]])
1775 if user:
1754 if user:
1776 m = revset.matchany(
1755 m = revset.matchany(
1777 self.ui,
1756 self.ui,
1778 specs,
1757 specs,
1779 lookup=revset.lookupfn(self),
1758 lookup=revset.lookupfn(self),
1780 localalias=localalias,
1759 localalias=localalias,
1781 )
1760 )
1782 else:
1761 else:
1783 m = revset.matchany(None, specs, localalias=localalias)
1762 m = revset.matchany(None, specs, localalias=localalias)
1784 return m(self)
1763 return m(self)
1785
1764
1786 def url(self):
1765 def url(self):
1787 return b'file:' + self.root
1766 return b'file:' + self.root
1788
1767
1789 def hook(self, name, throw=False, **args):
1768 def hook(self, name, throw=False, **args):
1790 """Call a hook, passing this repo instance.
1769 """Call a hook, passing this repo instance.
1791
1770
1792 This a convenience method to aid invoking hooks. Extensions likely
1771 This a convenience method to aid invoking hooks. Extensions likely
1793 won't call this unless they have registered a custom hook or are
1772 won't call this unless they have registered a custom hook or are
1794 replacing code that is expected to call a hook.
1773 replacing code that is expected to call a hook.
1795 """
1774 """
1796 return hook.hook(self.ui, self, name, throw, **args)
1775 return hook.hook(self.ui, self, name, throw, **args)
1797
1776
1798 @filteredpropertycache
1777 @filteredpropertycache
1799 def _tagscache(self):
1778 def _tagscache(self):
1800 '''Returns a tagscache object that contains various tags related
1779 '''Returns a tagscache object that contains various tags related
1801 caches.'''
1780 caches.'''
1802
1781
1803 # This simplifies its cache management by having one decorated
1782 # This simplifies its cache management by having one decorated
1804 # function (this one) and the rest simply fetch things from it.
1783 # function (this one) and the rest simply fetch things from it.
1805 class tagscache(object):
1784 class tagscache(object):
1806 def __init__(self):
1785 def __init__(self):
1807 # These two define the set of tags for this repository. tags
1786 # These two define the set of tags for this repository. tags
1808 # maps tag name to node; tagtypes maps tag name to 'global' or
1787 # maps tag name to node; tagtypes maps tag name to 'global' or
1809 # 'local'. (Global tags are defined by .hgtags across all
1788 # 'local'. (Global tags are defined by .hgtags across all
1810 # heads, and local tags are defined in .hg/localtags.)
1789 # heads, and local tags are defined in .hg/localtags.)
1811 # They constitute the in-memory cache of tags.
1790 # They constitute the in-memory cache of tags.
1812 self.tags = self.tagtypes = None
1791 self.tags = self.tagtypes = None
1813
1792
1814 self.nodetagscache = self.tagslist = None
1793 self.nodetagscache = self.tagslist = None
1815
1794
1816 cache = tagscache()
1795 cache = tagscache()
1817 cache.tags, cache.tagtypes = self._findtags()
1796 cache.tags, cache.tagtypes = self._findtags()
1818
1797
1819 return cache
1798 return cache
1820
1799
1821 def tags(self):
1800 def tags(self):
1822 '''return a mapping of tag to node'''
1801 '''return a mapping of tag to node'''
1823 t = {}
1802 t = {}
1824 if self.changelog.filteredrevs:
1803 if self.changelog.filteredrevs:
1825 tags, tt = self._findtags()
1804 tags, tt = self._findtags()
1826 else:
1805 else:
1827 tags = self._tagscache.tags
1806 tags = self._tagscache.tags
1828 rev = self.changelog.rev
1807 rev = self.changelog.rev
1829 for k, v in pycompat.iteritems(tags):
1808 for k, v in pycompat.iteritems(tags):
1830 try:
1809 try:
1831 # ignore tags to unknown nodes
1810 # ignore tags to unknown nodes
1832 rev(v)
1811 rev(v)
1833 t[k] = v
1812 t[k] = v
1834 except (error.LookupError, ValueError):
1813 except (error.LookupError, ValueError):
1835 pass
1814 pass
1836 return t
1815 return t
1837
1816
1838 def _findtags(self):
1817 def _findtags(self):
1839 '''Do the hard work of finding tags. Return a pair of dicts
1818 '''Do the hard work of finding tags. Return a pair of dicts
1840 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1819 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1841 maps tag name to a string like \'global\' or \'local\'.
1820 maps tag name to a string like \'global\' or \'local\'.
1842 Subclasses or extensions are free to add their own tags, but
1821 Subclasses or extensions are free to add their own tags, but
1843 should be aware that the returned dicts will be retained for the
1822 should be aware that the returned dicts will be retained for the
1844 duration of the localrepo object.'''
1823 duration of the localrepo object.'''
1845
1824
1846 # XXX what tagtype should subclasses/extensions use? Currently
1825 # XXX what tagtype should subclasses/extensions use? Currently
1847 # mq and bookmarks add tags, but do not set the tagtype at all.
1826 # mq and bookmarks add tags, but do not set the tagtype at all.
1848 # Should each extension invent its own tag type? Should there
1827 # Should each extension invent its own tag type? Should there
1849 # be one tagtype for all such "virtual" tags? Or is the status
1828 # be one tagtype for all such "virtual" tags? Or is the status
1850 # quo fine?
1829 # quo fine?
1851
1830
1852 # map tag name to (node, hist)
1831 # map tag name to (node, hist)
1853 alltags = tagsmod.findglobaltags(self.ui, self)
1832 alltags = tagsmod.findglobaltags(self.ui, self)
1854 # map tag name to tag type
1833 # map tag name to tag type
1855 tagtypes = {tag: b'global' for tag in alltags}
1834 tagtypes = {tag: b'global' for tag in alltags}
1856
1835
1857 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1836 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1858
1837
1859 # Build the return dicts. Have to re-encode tag names because
1838 # Build the return dicts. Have to re-encode tag names because
1860 # the tags module always uses UTF-8 (in order not to lose info
1839 # the tags module always uses UTF-8 (in order not to lose info
1861 # writing to the cache), but the rest of Mercurial wants them in
1840 # writing to the cache), but the rest of Mercurial wants them in
1862 # local encoding.
1841 # local encoding.
1863 tags = {}
1842 tags = {}
1864 for (name, (node, hist)) in pycompat.iteritems(alltags):
1843 for (name, (node, hist)) in pycompat.iteritems(alltags):
1865 if node != nullid:
1844 if node != nullid:
1866 tags[encoding.tolocal(name)] = node
1845 tags[encoding.tolocal(name)] = node
1867 tags[b'tip'] = self.changelog.tip()
1846 tags[b'tip'] = self.changelog.tip()
1868 tagtypes = {
1847 tagtypes = {
1869 encoding.tolocal(name): value
1848 encoding.tolocal(name): value
1870 for (name, value) in pycompat.iteritems(tagtypes)
1849 for (name, value) in pycompat.iteritems(tagtypes)
1871 }
1850 }
1872 return (tags, tagtypes)
1851 return (tags, tagtypes)
1873
1852
1874 def tagtype(self, tagname):
1853 def tagtype(self, tagname):
1875 '''
1854 '''
1876 return the type of the given tag. result can be:
1855 return the type of the given tag. result can be:
1877
1856
1878 'local' : a local tag
1857 'local' : a local tag
1879 'global' : a global tag
1858 'global' : a global tag
1880 None : tag does not exist
1859 None : tag does not exist
1881 '''
1860 '''
1882
1861
1883 return self._tagscache.tagtypes.get(tagname)
1862 return self._tagscache.tagtypes.get(tagname)
1884
1863
1885 def tagslist(self):
1864 def tagslist(self):
1886 '''return a list of tags ordered by revision'''
1865 '''return a list of tags ordered by revision'''
1887 if not self._tagscache.tagslist:
1866 if not self._tagscache.tagslist:
1888 l = []
1867 l = []
1889 for t, n in pycompat.iteritems(self.tags()):
1868 for t, n in pycompat.iteritems(self.tags()):
1890 l.append((self.changelog.rev(n), t, n))
1869 l.append((self.changelog.rev(n), t, n))
1891 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1870 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1892
1871
1893 return self._tagscache.tagslist
1872 return self._tagscache.tagslist
1894
1873
1895 def nodetags(self, node):
1874 def nodetags(self, node):
1896 '''return the tags associated with a node'''
1875 '''return the tags associated with a node'''
1897 if not self._tagscache.nodetagscache:
1876 if not self._tagscache.nodetagscache:
1898 nodetagscache = {}
1877 nodetagscache = {}
1899 for t, n in pycompat.iteritems(self._tagscache.tags):
1878 for t, n in pycompat.iteritems(self._tagscache.tags):
1900 nodetagscache.setdefault(n, []).append(t)
1879 nodetagscache.setdefault(n, []).append(t)
1901 for tags in pycompat.itervalues(nodetagscache):
1880 for tags in pycompat.itervalues(nodetagscache):
1902 tags.sort()
1881 tags.sort()
1903 self._tagscache.nodetagscache = nodetagscache
1882 self._tagscache.nodetagscache = nodetagscache
1904 return self._tagscache.nodetagscache.get(node, [])
1883 return self._tagscache.nodetagscache.get(node, [])
1905
1884
1906 def nodebookmarks(self, node):
1885 def nodebookmarks(self, node):
1907 """return the list of bookmarks pointing to the specified node"""
1886 """return the list of bookmarks pointing to the specified node"""
1908 return self._bookmarks.names(node)
1887 return self._bookmarks.names(node)
1909
1888
1910 def branchmap(self):
1889 def branchmap(self):
1911 '''returns a dictionary {branch: [branchheads]} with branchheads
1890 '''returns a dictionary {branch: [branchheads]} with branchheads
1912 ordered by increasing revision number'''
1891 ordered by increasing revision number'''
1913 return self._branchcaches[self]
1892 return self._branchcaches[self]
1914
1893
1915 @unfilteredmethod
1894 @unfilteredmethod
1916 def revbranchcache(self):
1895 def revbranchcache(self):
1917 if not self._revbranchcache:
1896 if not self._revbranchcache:
1918 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1897 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1919 return self._revbranchcache
1898 return self._revbranchcache
1920
1899
1921 def branchtip(self, branch, ignoremissing=False):
1900 def branchtip(self, branch, ignoremissing=False):
1922 '''return the tip node for a given branch
1901 '''return the tip node for a given branch
1923
1902
1924 If ignoremissing is True, then this method will not raise an error.
1903 If ignoremissing is True, then this method will not raise an error.
1925 This is helpful for callers that only expect None for a missing branch
1904 This is helpful for callers that only expect None for a missing branch
1926 (e.g. namespace).
1905 (e.g. namespace).
1927
1906
1928 '''
1907 '''
1929 try:
1908 try:
1930 return self.branchmap().branchtip(branch)
1909 return self.branchmap().branchtip(branch)
1931 except KeyError:
1910 except KeyError:
1932 if not ignoremissing:
1911 if not ignoremissing:
1933 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1912 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1934 else:
1913 else:
1935 pass
1914 pass
1936
1915
1937 def lookup(self, key):
1916 def lookup(self, key):
1938 node = scmutil.revsymbol(self, key).node()
1917 node = scmutil.revsymbol(self, key).node()
1939 if node is None:
1918 if node is None:
1940 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1919 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1941 return node
1920 return node
1942
1921
1943 def lookupbranch(self, key):
1922 def lookupbranch(self, key):
1944 if self.branchmap().hasbranch(key):
1923 if self.branchmap().hasbranch(key):
1945 return key
1924 return key
1946
1925
1947 return scmutil.revsymbol(self, key).branch()
1926 return scmutil.revsymbol(self, key).branch()
1948
1927
1949 def known(self, nodes):
1928 def known(self, nodes):
1950 cl = self.changelog
1929 cl = self.changelog
1951 get_rev = cl.index.get_rev
1930 get_rev = cl.index.get_rev
1952 filtered = cl.filteredrevs
1931 filtered = cl.filteredrevs
1953 result = []
1932 result = []
1954 for n in nodes:
1933 for n in nodes:
1955 r = get_rev(n)
1934 r = get_rev(n)
1956 resp = not (r is None or r in filtered)
1935 resp = not (r is None or r in filtered)
1957 result.append(resp)
1936 result.append(resp)
1958 return result
1937 return result
1959
1938
1960 def local(self):
1939 def local(self):
1961 return self
1940 return self
1962
1941
1963 def publishing(self):
1942 def publishing(self):
1964 # it's safe (and desirable) to trust the publish flag unconditionally
1943 # it's safe (and desirable) to trust the publish flag unconditionally
1965 # so that we don't finalize changes shared between users via ssh or nfs
1944 # so that we don't finalize changes shared between users via ssh or nfs
1966 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1945 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1967
1946
1968 def cancopy(self):
1947 def cancopy(self):
1969 # so statichttprepo's override of local() works
1948 # so statichttprepo's override of local() works
1970 if not self.local():
1949 if not self.local():
1971 return False
1950 return False
1972 if not self.publishing():
1951 if not self.publishing():
1973 return True
1952 return True
1974 # if publishing we can't copy if there is filtered content
1953 # if publishing we can't copy if there is filtered content
1975 return not self.filtered(b'visible').changelog.filteredrevs
1954 return not self.filtered(b'visible').changelog.filteredrevs
1976
1955
1977 def shared(self):
1956 def shared(self):
1978 '''the type of shared repository (None if not shared)'''
1957 '''the type of shared repository (None if not shared)'''
1979 if self.sharedpath != self.path:
1958 if self.sharedpath != self.path:
1980 return b'store'
1959 return b'store'
1981 return None
1960 return None
1982
1961
1983 def wjoin(self, f, *insidef):
1962 def wjoin(self, f, *insidef):
1984 return self.vfs.reljoin(self.root, f, *insidef)
1963 return self.vfs.reljoin(self.root, f, *insidef)
1985
1964
1986 def setparents(self, p1, p2=nullid):
1965 def setparents(self, p1, p2=nullid):
1987 self[None].setparents(p1, p2)
1966 self[None].setparents(p1, p2)
1988 self._quick_access_changeid_invalidate()
1967 self._quick_access_changeid_invalidate()
1989
1968
1990 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1969 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1991 """changeid must be a changeset revision, if specified.
1970 """changeid must be a changeset revision, if specified.
1992 fileid can be a file revision or node."""
1971 fileid can be a file revision or node."""
1993 return context.filectx(
1972 return context.filectx(
1994 self, path, changeid, fileid, changectx=changectx
1973 self, path, changeid, fileid, changectx=changectx
1995 )
1974 )
1996
1975
1997 def getcwd(self):
1976 def getcwd(self):
1998 return self.dirstate.getcwd()
1977 return self.dirstate.getcwd()
1999
1978
2000 def pathto(self, f, cwd=None):
1979 def pathto(self, f, cwd=None):
2001 return self.dirstate.pathto(f, cwd)
1980 return self.dirstate.pathto(f, cwd)
2002
1981
2003 def _loadfilter(self, filter):
1982 def _loadfilter(self, filter):
2004 if filter not in self._filterpats:
1983 if filter not in self._filterpats:
2005 l = []
1984 l = []
2006 for pat, cmd in self.ui.configitems(filter):
1985 for pat, cmd in self.ui.configitems(filter):
2007 if cmd == b'!':
1986 if cmd == b'!':
2008 continue
1987 continue
2009 mf = matchmod.match(self.root, b'', [pat])
1988 mf = matchmod.match(self.root, b'', [pat])
2010 fn = None
1989 fn = None
2011 params = cmd
1990 params = cmd
2012 for name, filterfn in pycompat.iteritems(self._datafilters):
1991 for name, filterfn in pycompat.iteritems(self._datafilters):
2013 if cmd.startswith(name):
1992 if cmd.startswith(name):
2014 fn = filterfn
1993 fn = filterfn
2015 params = cmd[len(name) :].lstrip()
1994 params = cmd[len(name) :].lstrip()
2016 break
1995 break
2017 if not fn:
1996 if not fn:
2018 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1997 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2019 fn.__name__ = 'commandfilter'
1998 fn.__name__ = 'commandfilter'
2020 # Wrap old filters not supporting keyword arguments
1999 # Wrap old filters not supporting keyword arguments
2021 if not pycompat.getargspec(fn)[2]:
2000 if not pycompat.getargspec(fn)[2]:
2022 oldfn = fn
2001 oldfn = fn
2023 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2002 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2024 fn.__name__ = 'compat-' + oldfn.__name__
2003 fn.__name__ = 'compat-' + oldfn.__name__
2025 l.append((mf, fn, params))
2004 l.append((mf, fn, params))
2026 self._filterpats[filter] = l
2005 self._filterpats[filter] = l
2027 return self._filterpats[filter]
2006 return self._filterpats[filter]
2028
2007
2029 def _filter(self, filterpats, filename, data):
2008 def _filter(self, filterpats, filename, data):
2030 for mf, fn, cmd in filterpats:
2009 for mf, fn, cmd in filterpats:
2031 if mf(filename):
2010 if mf(filename):
2032 self.ui.debug(
2011 self.ui.debug(
2033 b"filtering %s through %s\n"
2012 b"filtering %s through %s\n"
2034 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2013 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2035 )
2014 )
2036 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2015 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2037 break
2016 break
2038
2017
2039 return data
2018 return data
2040
2019
2041 @unfilteredpropertycache
2020 @unfilteredpropertycache
2042 def _encodefilterpats(self):
2021 def _encodefilterpats(self):
2043 return self._loadfilter(b'encode')
2022 return self._loadfilter(b'encode')
2044
2023
2045 @unfilteredpropertycache
2024 @unfilteredpropertycache
2046 def _decodefilterpats(self):
2025 def _decodefilterpats(self):
2047 return self._loadfilter(b'decode')
2026 return self._loadfilter(b'decode')
2048
2027
2049 def adddatafilter(self, name, filter):
2028 def adddatafilter(self, name, filter):
2050 self._datafilters[name] = filter
2029 self._datafilters[name] = filter
2051
2030
2052 def wread(self, filename):
2031 def wread(self, filename):
2053 if self.wvfs.islink(filename):
2032 if self.wvfs.islink(filename):
2054 data = self.wvfs.readlink(filename)
2033 data = self.wvfs.readlink(filename)
2055 else:
2034 else:
2056 data = self.wvfs.read(filename)
2035 data = self.wvfs.read(filename)
2057 return self._filter(self._encodefilterpats, filename, data)
2036 return self._filter(self._encodefilterpats, filename, data)
2058
2037
2059 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2038 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2060 """write ``data`` into ``filename`` in the working directory
2039 """write ``data`` into ``filename`` in the working directory
2061
2040
2062 This returns length of written (maybe decoded) data.
2041 This returns length of written (maybe decoded) data.
2063 """
2042 """
2064 data = self._filter(self._decodefilterpats, filename, data)
2043 data = self._filter(self._decodefilterpats, filename, data)
2065 if b'l' in flags:
2044 if b'l' in flags:
2066 self.wvfs.symlink(data, filename)
2045 self.wvfs.symlink(data, filename)
2067 else:
2046 else:
2068 self.wvfs.write(
2047 self.wvfs.write(
2069 filename, data, backgroundclose=backgroundclose, **kwargs
2048 filename, data, backgroundclose=backgroundclose, **kwargs
2070 )
2049 )
2071 if b'x' in flags:
2050 if b'x' in flags:
2072 self.wvfs.setflags(filename, False, True)
2051 self.wvfs.setflags(filename, False, True)
2073 else:
2052 else:
2074 self.wvfs.setflags(filename, False, False)
2053 self.wvfs.setflags(filename, False, False)
2075 return len(data)
2054 return len(data)
2076
2055
2077 def wwritedata(self, filename, data):
2056 def wwritedata(self, filename, data):
2078 return self._filter(self._decodefilterpats, filename, data)
2057 return self._filter(self._decodefilterpats, filename, data)
2079
2058
2080 def currenttransaction(self):
2059 def currenttransaction(self):
2081 """return the current transaction or None if non exists"""
2060 """return the current transaction or None if non exists"""
2082 if self._transref:
2061 if self._transref:
2083 tr = self._transref()
2062 tr = self._transref()
2084 else:
2063 else:
2085 tr = None
2064 tr = None
2086
2065
2087 if tr and tr.running():
2066 if tr and tr.running():
2088 return tr
2067 return tr
2089 return None
2068 return None
2090
2069
2091 def transaction(self, desc, report=None):
2070 def transaction(self, desc, report=None):
2092 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2071 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2093 b'devel', b'check-locks'
2072 b'devel', b'check-locks'
2094 ):
2073 ):
2095 if self._currentlock(self._lockref) is None:
2074 if self._currentlock(self._lockref) is None:
2096 raise error.ProgrammingError(b'transaction requires locking')
2075 raise error.ProgrammingError(b'transaction requires locking')
2097 tr = self.currenttransaction()
2076 tr = self.currenttransaction()
2098 if tr is not None:
2077 if tr is not None:
2099 return tr.nest(name=desc)
2078 return tr.nest(name=desc)
2100
2079
2101 # abort here if the journal already exists
2080 # abort here if the journal already exists
2102 if self.svfs.exists(b"journal"):
2081 if self.svfs.exists(b"journal"):
2103 raise error.RepoError(
2082 raise error.RepoError(
2104 _(b"abandoned transaction found"),
2083 _(b"abandoned transaction found"),
2105 hint=_(b"run 'hg recover' to clean up transaction"),
2084 hint=_(b"run 'hg recover' to clean up transaction"),
2106 )
2085 )
2107
2086
2108 idbase = b"%.40f#%f" % (random.random(), time.time())
2087 idbase = b"%.40f#%f" % (random.random(), time.time())
2109 ha = hex(hashutil.sha1(idbase).digest())
2088 ha = hex(hashutil.sha1(idbase).digest())
2110 txnid = b'TXN:' + ha
2089 txnid = b'TXN:' + ha
2111 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2090 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2112
2091
2113 self._writejournal(desc)
2092 self._writejournal(desc)
2114 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2093 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2115 if report:
2094 if report:
2116 rp = report
2095 rp = report
2117 else:
2096 else:
2118 rp = self.ui.warn
2097 rp = self.ui.warn
2119 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2098 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2120 # we must avoid cyclic reference between repo and transaction.
2099 # we must avoid cyclic reference between repo and transaction.
2121 reporef = weakref.ref(self)
2100 reporef = weakref.ref(self)
2122 # Code to track tag movement
2101 # Code to track tag movement
2123 #
2102 #
2124 # Since tags are all handled as file content, it is actually quite hard
2103 # Since tags are all handled as file content, it is actually quite hard
2125 # to track these movement from a code perspective. So we fallback to a
2104 # to track these movement from a code perspective. So we fallback to a
2126 # tracking at the repository level. One could envision to track changes
2105 # tracking at the repository level. One could envision to track changes
2127 # to the '.hgtags' file through changegroup apply but that fails to
2106 # to the '.hgtags' file through changegroup apply but that fails to
2128 # cope with case where transaction expose new heads without changegroup
2107 # cope with case where transaction expose new heads without changegroup
2129 # being involved (eg: phase movement).
2108 # being involved (eg: phase movement).
2130 #
2109 #
2131 # For now, We gate the feature behind a flag since this likely comes
2110 # For now, We gate the feature behind a flag since this likely comes
2132 # with performance impacts. The current code run more often than needed
2111 # with performance impacts. The current code run more often than needed
2133 # and do not use caches as much as it could. The current focus is on
2112 # and do not use caches as much as it could. The current focus is on
2134 # the behavior of the feature so we disable it by default. The flag
2113 # the behavior of the feature so we disable it by default. The flag
2135 # will be removed when we are happy with the performance impact.
2114 # will be removed when we are happy with the performance impact.
2136 #
2115 #
2137 # Once this feature is no longer experimental move the following
2116 # Once this feature is no longer experimental move the following
2138 # documentation to the appropriate help section:
2117 # documentation to the appropriate help section:
2139 #
2118 #
2140 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2119 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2141 # tags (new or changed or deleted tags). In addition the details of
2120 # tags (new or changed or deleted tags). In addition the details of
2142 # these changes are made available in a file at:
2121 # these changes are made available in a file at:
2143 # ``REPOROOT/.hg/changes/tags.changes``.
2122 # ``REPOROOT/.hg/changes/tags.changes``.
2144 # Make sure you check for HG_TAG_MOVED before reading that file as it
2123 # Make sure you check for HG_TAG_MOVED before reading that file as it
2145 # might exist from a previous transaction even if no tag were touched
2124 # might exist from a previous transaction even if no tag were touched
2146 # in this one. Changes are recorded in a line base format::
2125 # in this one. Changes are recorded in a line base format::
2147 #
2126 #
2148 # <action> <hex-node> <tag-name>\n
2127 # <action> <hex-node> <tag-name>\n
2149 #
2128 #
2150 # Actions are defined as follow:
2129 # Actions are defined as follow:
2151 # "-R": tag is removed,
2130 # "-R": tag is removed,
2152 # "+A": tag is added,
2131 # "+A": tag is added,
2153 # "-M": tag is moved (old value),
2132 # "-M": tag is moved (old value),
2154 # "+M": tag is moved (new value),
2133 # "+M": tag is moved (new value),
2155 tracktags = lambda x: None
2134 tracktags = lambda x: None
2156 # experimental config: experimental.hook-track-tags
2135 # experimental config: experimental.hook-track-tags
2157 shouldtracktags = self.ui.configbool(
2136 shouldtracktags = self.ui.configbool(
2158 b'experimental', b'hook-track-tags'
2137 b'experimental', b'hook-track-tags'
2159 )
2138 )
2160 if desc != b'strip' and shouldtracktags:
2139 if desc != b'strip' and shouldtracktags:
2161 oldheads = self.changelog.headrevs()
2140 oldheads = self.changelog.headrevs()
2162
2141
2163 def tracktags(tr2):
2142 def tracktags(tr2):
2164 repo = reporef()
2143 repo = reporef()
2165 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2144 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2166 newheads = repo.changelog.headrevs()
2145 newheads = repo.changelog.headrevs()
2167 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2146 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2168 # notes: we compare lists here.
2147 # notes: we compare lists here.
2169 # As we do it only once buiding set would not be cheaper
2148 # As we do it only once buiding set would not be cheaper
2170 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2149 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2171 if changes:
2150 if changes:
2172 tr2.hookargs[b'tag_moved'] = b'1'
2151 tr2.hookargs[b'tag_moved'] = b'1'
2173 with repo.vfs(
2152 with repo.vfs(
2174 b'changes/tags.changes', b'w', atomictemp=True
2153 b'changes/tags.changes', b'w', atomictemp=True
2175 ) as changesfile:
2154 ) as changesfile:
2176 # note: we do not register the file to the transaction
2155 # note: we do not register the file to the transaction
2177 # because we needs it to still exist on the transaction
2156 # because we needs it to still exist on the transaction
2178 # is close (for txnclose hooks)
2157 # is close (for txnclose hooks)
2179 tagsmod.writediff(changesfile, changes)
2158 tagsmod.writediff(changesfile, changes)
2180
2159
2181 def validate(tr2):
2160 def validate(tr2):
2182 """will run pre-closing hooks"""
2161 """will run pre-closing hooks"""
2183 # XXX the transaction API is a bit lacking here so we take a hacky
2162 # XXX the transaction API is a bit lacking here so we take a hacky
2184 # path for now
2163 # path for now
2185 #
2164 #
2186 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2165 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2187 # dict is copied before these run. In addition we needs the data
2166 # dict is copied before these run. In addition we needs the data
2188 # available to in memory hooks too.
2167 # available to in memory hooks too.
2189 #
2168 #
2190 # Moreover, we also need to make sure this runs before txnclose
2169 # Moreover, we also need to make sure this runs before txnclose
2191 # hooks and there is no "pending" mechanism that would execute
2170 # hooks and there is no "pending" mechanism that would execute
2192 # logic only if hooks are about to run.
2171 # logic only if hooks are about to run.
2193 #
2172 #
2194 # Fixing this limitation of the transaction is also needed to track
2173 # Fixing this limitation of the transaction is also needed to track
2195 # other families of changes (bookmarks, phases, obsolescence).
2174 # other families of changes (bookmarks, phases, obsolescence).
2196 #
2175 #
2197 # This will have to be fixed before we remove the experimental
2176 # This will have to be fixed before we remove the experimental
2198 # gating.
2177 # gating.
2199 tracktags(tr2)
2178 tracktags(tr2)
2200 repo = reporef()
2179 repo = reporef()
2201
2180
2202 singleheadopt = (b'experimental', b'single-head-per-branch')
2181 singleheadopt = (b'experimental', b'single-head-per-branch')
2203 singlehead = repo.ui.configbool(*singleheadopt)
2182 singlehead = repo.ui.configbool(*singleheadopt)
2204 if singlehead:
2183 if singlehead:
2205 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2184 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2206 accountclosed = singleheadsub.get(
2185 accountclosed = singleheadsub.get(
2207 b"account-closed-heads", False
2186 b"account-closed-heads", False
2208 )
2187 )
2209 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2188 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2210 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2189 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2211 for name, (old, new) in sorted(
2190 for name, (old, new) in sorted(
2212 tr.changes[b'bookmarks'].items()
2191 tr.changes[b'bookmarks'].items()
2213 ):
2192 ):
2214 args = tr.hookargs.copy()
2193 args = tr.hookargs.copy()
2215 args.update(bookmarks.preparehookargs(name, old, new))
2194 args.update(bookmarks.preparehookargs(name, old, new))
2216 repo.hook(
2195 repo.hook(
2217 b'pretxnclose-bookmark',
2196 b'pretxnclose-bookmark',
2218 throw=True,
2197 throw=True,
2219 **pycompat.strkwargs(args)
2198 **pycompat.strkwargs(args)
2220 )
2199 )
2221 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2200 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2222 cl = repo.unfiltered().changelog
2201 cl = repo.unfiltered().changelog
2223 for revs, (old, new) in tr.changes[b'phases']:
2202 for revs, (old, new) in tr.changes[b'phases']:
2224 for rev in revs:
2203 for rev in revs:
2225 args = tr.hookargs.copy()
2204 args = tr.hookargs.copy()
2226 node = hex(cl.node(rev))
2205 node = hex(cl.node(rev))
2227 args.update(phases.preparehookargs(node, old, new))
2206 args.update(phases.preparehookargs(node, old, new))
2228 repo.hook(
2207 repo.hook(
2229 b'pretxnclose-phase',
2208 b'pretxnclose-phase',
2230 throw=True,
2209 throw=True,
2231 **pycompat.strkwargs(args)
2210 **pycompat.strkwargs(args)
2232 )
2211 )
2233
2212
2234 repo.hook(
2213 repo.hook(
2235 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2214 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2236 )
2215 )
2237
2216
2238 def releasefn(tr, success):
2217 def releasefn(tr, success):
2239 repo = reporef()
2218 repo = reporef()
2240 if repo is None:
2219 if repo is None:
2241 # If the repo has been GC'd (and this release function is being
2220 # If the repo has been GC'd (and this release function is being
2242 # called from transaction.__del__), there's not much we can do,
2221 # called from transaction.__del__), there's not much we can do,
2243 # so just leave the unfinished transaction there and let the
2222 # so just leave the unfinished transaction there and let the
2244 # user run `hg recover`.
2223 # user run `hg recover`.
2245 return
2224 return
2246 if success:
2225 if success:
2247 # this should be explicitly invoked here, because
2226 # this should be explicitly invoked here, because
2248 # in-memory changes aren't written out at closing
2227 # in-memory changes aren't written out at closing
2249 # transaction, if tr.addfilegenerator (via
2228 # transaction, if tr.addfilegenerator (via
2250 # dirstate.write or so) isn't invoked while
2229 # dirstate.write or so) isn't invoked while
2251 # transaction running
2230 # transaction running
2252 repo.dirstate.write(None)
2231 repo.dirstate.write(None)
2253 else:
2232 else:
2254 # discard all changes (including ones already written
2233 # discard all changes (including ones already written
2255 # out) in this transaction
2234 # out) in this transaction
2256 narrowspec.restorebackup(self, b'journal.narrowspec')
2235 narrowspec.restorebackup(self, b'journal.narrowspec')
2257 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2236 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2258 repo.dirstate.restorebackup(None, b'journal.dirstate')
2237 repo.dirstate.restorebackup(None, b'journal.dirstate')
2259
2238
2260 repo.invalidate(clearfilecache=True)
2239 repo.invalidate(clearfilecache=True)
2261
2240
2262 tr = transaction.transaction(
2241 tr = transaction.transaction(
2263 rp,
2242 rp,
2264 self.svfs,
2243 self.svfs,
2265 vfsmap,
2244 vfsmap,
2266 b"journal",
2245 b"journal",
2267 b"undo",
2246 b"undo",
2268 aftertrans(renames),
2247 aftertrans(renames),
2269 self.store.createmode,
2248 self.store.createmode,
2270 validator=validate,
2249 validator=validate,
2271 releasefn=releasefn,
2250 releasefn=releasefn,
2272 checkambigfiles=_cachedfiles,
2251 checkambigfiles=_cachedfiles,
2273 name=desc,
2252 name=desc,
2274 )
2253 )
2275 tr.changes[b'origrepolen'] = len(self)
2254 tr.changes[b'origrepolen'] = len(self)
2276 tr.changes[b'obsmarkers'] = set()
2255 tr.changes[b'obsmarkers'] = set()
2277 tr.changes[b'phases'] = []
2256 tr.changes[b'phases'] = []
2278 tr.changes[b'bookmarks'] = {}
2257 tr.changes[b'bookmarks'] = {}
2279
2258
2280 tr.hookargs[b'txnid'] = txnid
2259 tr.hookargs[b'txnid'] = txnid
2281 tr.hookargs[b'txnname'] = desc
2260 tr.hookargs[b'txnname'] = desc
2282 tr.hookargs[b'changes'] = tr.changes
2261 tr.hookargs[b'changes'] = tr.changes
2283 # note: writing the fncache only during finalize mean that the file is
2262 # note: writing the fncache only during finalize mean that the file is
2284 # outdated when running hooks. As fncache is used for streaming clone,
2263 # outdated when running hooks. As fncache is used for streaming clone,
2285 # this is not expected to break anything that happen during the hooks.
2264 # this is not expected to break anything that happen during the hooks.
2286 tr.addfinalize(b'flush-fncache', self.store.write)
2265 tr.addfinalize(b'flush-fncache', self.store.write)
2287
2266
2288 def txnclosehook(tr2):
2267 def txnclosehook(tr2):
2289 """To be run if transaction is successful, will schedule a hook run
2268 """To be run if transaction is successful, will schedule a hook run
2290 """
2269 """
2291 # Don't reference tr2 in hook() so we don't hold a reference.
2270 # Don't reference tr2 in hook() so we don't hold a reference.
2292 # This reduces memory consumption when there are multiple
2271 # This reduces memory consumption when there are multiple
2293 # transactions per lock. This can likely go away if issue5045
2272 # transactions per lock. This can likely go away if issue5045
2294 # fixes the function accumulation.
2273 # fixes the function accumulation.
2295 hookargs = tr2.hookargs
2274 hookargs = tr2.hookargs
2296
2275
2297 def hookfunc(unused_success):
2276 def hookfunc(unused_success):
2298 repo = reporef()
2277 repo = reporef()
2299 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2278 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2300 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2279 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2301 for name, (old, new) in bmchanges:
2280 for name, (old, new) in bmchanges:
2302 args = tr.hookargs.copy()
2281 args = tr.hookargs.copy()
2303 args.update(bookmarks.preparehookargs(name, old, new))
2282 args.update(bookmarks.preparehookargs(name, old, new))
2304 repo.hook(
2283 repo.hook(
2305 b'txnclose-bookmark',
2284 b'txnclose-bookmark',
2306 throw=False,
2285 throw=False,
2307 **pycompat.strkwargs(args)
2286 **pycompat.strkwargs(args)
2308 )
2287 )
2309
2288
2310 if hook.hashook(repo.ui, b'txnclose-phase'):
2289 if hook.hashook(repo.ui, b'txnclose-phase'):
2311 cl = repo.unfiltered().changelog
2290 cl = repo.unfiltered().changelog
2312 phasemv = sorted(
2291 phasemv = sorted(
2313 tr.changes[b'phases'], key=lambda r: r[0][0]
2292 tr.changes[b'phases'], key=lambda r: r[0][0]
2314 )
2293 )
2315 for revs, (old, new) in phasemv:
2294 for revs, (old, new) in phasemv:
2316 for rev in revs:
2295 for rev in revs:
2317 args = tr.hookargs.copy()
2296 args = tr.hookargs.copy()
2318 node = hex(cl.node(rev))
2297 node = hex(cl.node(rev))
2319 args.update(phases.preparehookargs(node, old, new))
2298 args.update(phases.preparehookargs(node, old, new))
2320 repo.hook(
2299 repo.hook(
2321 b'txnclose-phase',
2300 b'txnclose-phase',
2322 throw=False,
2301 throw=False,
2323 **pycompat.strkwargs(args)
2302 **pycompat.strkwargs(args)
2324 )
2303 )
2325
2304
2326 repo.hook(
2305 repo.hook(
2327 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2306 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2328 )
2307 )
2329
2308
2330 reporef()._afterlock(hookfunc)
2309 reporef()._afterlock(hookfunc)
2331
2310
2332 tr.addfinalize(b'txnclose-hook', txnclosehook)
2311 tr.addfinalize(b'txnclose-hook', txnclosehook)
2333 # Include a leading "-" to make it happen before the transaction summary
2312 # Include a leading "-" to make it happen before the transaction summary
2334 # reports registered via scmutil.registersummarycallback() whose names
2313 # reports registered via scmutil.registersummarycallback() whose names
2335 # are 00-txnreport etc. That way, the caches will be warm when the
2314 # are 00-txnreport etc. That way, the caches will be warm when the
2336 # callbacks run.
2315 # callbacks run.
2337 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2316 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2338
2317
2339 def txnaborthook(tr2):
2318 def txnaborthook(tr2):
2340 """To be run if transaction is aborted
2319 """To be run if transaction is aborted
2341 """
2320 """
2342 reporef().hook(
2321 reporef().hook(
2343 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2322 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2344 )
2323 )
2345
2324
2346 tr.addabort(b'txnabort-hook', txnaborthook)
2325 tr.addabort(b'txnabort-hook', txnaborthook)
2347 # avoid eager cache invalidation. in-memory data should be identical
2326 # avoid eager cache invalidation. in-memory data should be identical
2348 # to stored data if transaction has no error.
2327 # to stored data if transaction has no error.
2349 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2328 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2350 self._transref = weakref.ref(tr)
2329 self._transref = weakref.ref(tr)
2351 scmutil.registersummarycallback(self, tr, desc)
2330 scmutil.registersummarycallback(self, tr, desc)
2352 return tr
2331 return tr
2353
2332
2354 def _journalfiles(self):
2333 def _journalfiles(self):
2355 return (
2334 return (
2356 (self.svfs, b'journal'),
2335 (self.svfs, b'journal'),
2357 (self.svfs, b'journal.narrowspec'),
2336 (self.svfs, b'journal.narrowspec'),
2358 (self.vfs, b'journal.narrowspec.dirstate'),
2337 (self.vfs, b'journal.narrowspec.dirstate'),
2359 (self.vfs, b'journal.dirstate'),
2338 (self.vfs, b'journal.dirstate'),
2360 (self.vfs, b'journal.branch'),
2339 (self.vfs, b'journal.branch'),
2361 (self.vfs, b'journal.desc'),
2340 (self.vfs, b'journal.desc'),
2362 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2341 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2363 (self.svfs, b'journal.phaseroots'),
2342 (self.svfs, b'journal.phaseroots'),
2364 )
2343 )
2365
2344
2366 def undofiles(self):
2345 def undofiles(self):
2367 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2346 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2368
2347
2369 @unfilteredmethod
2348 @unfilteredmethod
2370 def _writejournal(self, desc):
2349 def _writejournal(self, desc):
2371 self.dirstate.savebackup(None, b'journal.dirstate')
2350 self.dirstate.savebackup(None, b'journal.dirstate')
2372 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2351 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2373 narrowspec.savebackup(self, b'journal.narrowspec')
2352 narrowspec.savebackup(self, b'journal.narrowspec')
2374 self.vfs.write(
2353 self.vfs.write(
2375 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2354 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2376 )
2355 )
2377 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2356 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2378 bookmarksvfs = bookmarks.bookmarksvfs(self)
2357 bookmarksvfs = bookmarks.bookmarksvfs(self)
2379 bookmarksvfs.write(
2358 bookmarksvfs.write(
2380 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2359 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2381 )
2360 )
2382 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2361 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2383
2362
2384 def recover(self):
2363 def recover(self):
2385 with self.lock():
2364 with self.lock():
2386 if self.svfs.exists(b"journal"):
2365 if self.svfs.exists(b"journal"):
2387 self.ui.status(_(b"rolling back interrupted transaction\n"))
2366 self.ui.status(_(b"rolling back interrupted transaction\n"))
2388 vfsmap = {
2367 vfsmap = {
2389 b'': self.svfs,
2368 b'': self.svfs,
2390 b'plain': self.vfs,
2369 b'plain': self.vfs,
2391 }
2370 }
2392 transaction.rollback(
2371 transaction.rollback(
2393 self.svfs,
2372 self.svfs,
2394 vfsmap,
2373 vfsmap,
2395 b"journal",
2374 b"journal",
2396 self.ui.warn,
2375 self.ui.warn,
2397 checkambigfiles=_cachedfiles,
2376 checkambigfiles=_cachedfiles,
2398 )
2377 )
2399 self.invalidate()
2378 self.invalidate()
2400 return True
2379 return True
2401 else:
2380 else:
2402 self.ui.warn(_(b"no interrupted transaction available\n"))
2381 self.ui.warn(_(b"no interrupted transaction available\n"))
2403 return False
2382 return False
2404
2383
2405 def rollback(self, dryrun=False, force=False):
2384 def rollback(self, dryrun=False, force=False):
2406 wlock = lock = dsguard = None
2385 wlock = lock = dsguard = None
2407 try:
2386 try:
2408 wlock = self.wlock()
2387 wlock = self.wlock()
2409 lock = self.lock()
2388 lock = self.lock()
2410 if self.svfs.exists(b"undo"):
2389 if self.svfs.exists(b"undo"):
2411 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2390 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2412
2391
2413 return self._rollback(dryrun, force, dsguard)
2392 return self._rollback(dryrun, force, dsguard)
2414 else:
2393 else:
2415 self.ui.warn(_(b"no rollback information available\n"))
2394 self.ui.warn(_(b"no rollback information available\n"))
2416 return 1
2395 return 1
2417 finally:
2396 finally:
2418 release(dsguard, lock, wlock)
2397 release(dsguard, lock, wlock)
2419
2398
2420 @unfilteredmethod # Until we get smarter cache management
2399 @unfilteredmethod # Until we get smarter cache management
2421 def _rollback(self, dryrun, force, dsguard):
2400 def _rollback(self, dryrun, force, dsguard):
2422 ui = self.ui
2401 ui = self.ui
2423 try:
2402 try:
2424 args = self.vfs.read(b'undo.desc').splitlines()
2403 args = self.vfs.read(b'undo.desc').splitlines()
2425 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2404 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2426 if len(args) >= 3:
2405 if len(args) >= 3:
2427 detail = args[2]
2406 detail = args[2]
2428 oldtip = oldlen - 1
2407 oldtip = oldlen - 1
2429
2408
2430 if detail and ui.verbose:
2409 if detail and ui.verbose:
2431 msg = _(
2410 msg = _(
2432 b'repository tip rolled back to revision %d'
2411 b'repository tip rolled back to revision %d'
2433 b' (undo %s: %s)\n'
2412 b' (undo %s: %s)\n'
2434 ) % (oldtip, desc, detail)
2413 ) % (oldtip, desc, detail)
2435 else:
2414 else:
2436 msg = _(
2415 msg = _(
2437 b'repository tip rolled back to revision %d (undo %s)\n'
2416 b'repository tip rolled back to revision %d (undo %s)\n'
2438 ) % (oldtip, desc)
2417 ) % (oldtip, desc)
2439 except IOError:
2418 except IOError:
2440 msg = _(b'rolling back unknown transaction\n')
2419 msg = _(b'rolling back unknown transaction\n')
2441 desc = None
2420 desc = None
2442
2421
2443 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2422 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2444 raise error.Abort(
2423 raise error.Abort(
2445 _(
2424 _(
2446 b'rollback of last commit while not checked out '
2425 b'rollback of last commit while not checked out '
2447 b'may lose data'
2426 b'may lose data'
2448 ),
2427 ),
2449 hint=_(b'use -f to force'),
2428 hint=_(b'use -f to force'),
2450 )
2429 )
2451
2430
2452 ui.status(msg)
2431 ui.status(msg)
2453 if dryrun:
2432 if dryrun:
2454 return 0
2433 return 0
2455
2434
2456 parents = self.dirstate.parents()
2435 parents = self.dirstate.parents()
2457 self.destroying()
2436 self.destroying()
2458 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2437 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2459 transaction.rollback(
2438 transaction.rollback(
2460 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2439 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2461 )
2440 )
2462 bookmarksvfs = bookmarks.bookmarksvfs(self)
2441 bookmarksvfs = bookmarks.bookmarksvfs(self)
2463 if bookmarksvfs.exists(b'undo.bookmarks'):
2442 if bookmarksvfs.exists(b'undo.bookmarks'):
2464 bookmarksvfs.rename(
2443 bookmarksvfs.rename(
2465 b'undo.bookmarks', b'bookmarks', checkambig=True
2444 b'undo.bookmarks', b'bookmarks', checkambig=True
2466 )
2445 )
2467 if self.svfs.exists(b'undo.phaseroots'):
2446 if self.svfs.exists(b'undo.phaseroots'):
2468 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2447 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2469 self.invalidate()
2448 self.invalidate()
2470
2449
2471 has_node = self.changelog.index.has_node
2450 has_node = self.changelog.index.has_node
2472 parentgone = any(not has_node(p) for p in parents)
2451 parentgone = any(not has_node(p) for p in parents)
2473 if parentgone:
2452 if parentgone:
2474 # prevent dirstateguard from overwriting already restored one
2453 # prevent dirstateguard from overwriting already restored one
2475 dsguard.close()
2454 dsguard.close()
2476
2455
2477 narrowspec.restorebackup(self, b'undo.narrowspec')
2456 narrowspec.restorebackup(self, b'undo.narrowspec')
2478 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2457 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2479 self.dirstate.restorebackup(None, b'undo.dirstate')
2458 self.dirstate.restorebackup(None, b'undo.dirstate')
2480 try:
2459 try:
2481 branch = self.vfs.read(b'undo.branch')
2460 branch = self.vfs.read(b'undo.branch')
2482 self.dirstate.setbranch(encoding.tolocal(branch))
2461 self.dirstate.setbranch(encoding.tolocal(branch))
2483 except IOError:
2462 except IOError:
2484 ui.warn(
2463 ui.warn(
2485 _(
2464 _(
2486 b'named branch could not be reset: '
2465 b'named branch could not be reset: '
2487 b'current branch is still \'%s\'\n'
2466 b'current branch is still \'%s\'\n'
2488 )
2467 )
2489 % self.dirstate.branch()
2468 % self.dirstate.branch()
2490 )
2469 )
2491
2470
2492 parents = tuple([p.rev() for p in self[None].parents()])
2471 parents = tuple([p.rev() for p in self[None].parents()])
2493 if len(parents) > 1:
2472 if len(parents) > 1:
2494 ui.status(
2473 ui.status(
2495 _(
2474 _(
2496 b'working directory now based on '
2475 b'working directory now based on '
2497 b'revisions %d and %d\n'
2476 b'revisions %d and %d\n'
2498 )
2477 )
2499 % parents
2478 % parents
2500 )
2479 )
2501 else:
2480 else:
2502 ui.status(
2481 ui.status(
2503 _(b'working directory now based on revision %d\n') % parents
2482 _(b'working directory now based on revision %d\n') % parents
2504 )
2483 )
2505 mergestatemod.mergestate.clean(self, self[b'.'].node())
2484 mergestatemod.mergestate.clean(self, self[b'.'].node())
2506
2485
2507 # TODO: if we know which new heads may result from this rollback, pass
2486 # TODO: if we know which new heads may result from this rollback, pass
2508 # them to destroy(), which will prevent the branchhead cache from being
2487 # them to destroy(), which will prevent the branchhead cache from being
2509 # invalidated.
2488 # invalidated.
2510 self.destroyed()
2489 self.destroyed()
2511 return 0
2490 return 0
2512
2491
2513 def _buildcacheupdater(self, newtransaction):
2492 def _buildcacheupdater(self, newtransaction):
2514 """called during transaction to build the callback updating cache
2493 """called during transaction to build the callback updating cache
2515
2494
2516 Lives on the repository to help extension who might want to augment
2495 Lives on the repository to help extension who might want to augment
2517 this logic. For this purpose, the created transaction is passed to the
2496 this logic. For this purpose, the created transaction is passed to the
2518 method.
2497 method.
2519 """
2498 """
2520 # we must avoid cyclic reference between repo and transaction.
2499 # we must avoid cyclic reference between repo and transaction.
2521 reporef = weakref.ref(self)
2500 reporef = weakref.ref(self)
2522
2501
2523 def updater(tr):
2502 def updater(tr):
2524 repo = reporef()
2503 repo = reporef()
2525 repo.updatecaches(tr)
2504 repo.updatecaches(tr)
2526
2505
2527 return updater
2506 return updater
2528
2507
2529 @unfilteredmethod
2508 @unfilteredmethod
2530 def updatecaches(self, tr=None, full=False):
2509 def updatecaches(self, tr=None, full=False):
2531 """warm appropriate caches
2510 """warm appropriate caches
2532
2511
2533 If this function is called after a transaction closed. The transaction
2512 If this function is called after a transaction closed. The transaction
2534 will be available in the 'tr' argument. This can be used to selectively
2513 will be available in the 'tr' argument. This can be used to selectively
2535 update caches relevant to the changes in that transaction.
2514 update caches relevant to the changes in that transaction.
2536
2515
2537 If 'full' is set, make sure all caches the function knows about have
2516 If 'full' is set, make sure all caches the function knows about have
2538 up-to-date data. Even the ones usually loaded more lazily.
2517 up-to-date data. Even the ones usually loaded more lazily.
2539 """
2518 """
2540 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2519 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2541 # During strip, many caches are invalid but
2520 # During strip, many caches are invalid but
2542 # later call to `destroyed` will refresh them.
2521 # later call to `destroyed` will refresh them.
2543 return
2522 return
2544
2523
2545 if tr is None or tr.changes[b'origrepolen'] < len(self):
2524 if tr is None or tr.changes[b'origrepolen'] < len(self):
2546 # accessing the 'ser ved' branchmap should refresh all the others,
2525 # accessing the 'ser ved' branchmap should refresh all the others,
2547 self.ui.debug(b'updating the branch cache\n')
2526 self.ui.debug(b'updating the branch cache\n')
2548 self.filtered(b'served').branchmap()
2527 self.filtered(b'served').branchmap()
2549 self.filtered(b'served.hidden').branchmap()
2528 self.filtered(b'served.hidden').branchmap()
2550
2529
2551 if full:
2530 if full:
2552 unfi = self.unfiltered()
2531 unfi = self.unfiltered()
2553
2532
2554 self.changelog.update_caches(transaction=tr)
2533 self.changelog.update_caches(transaction=tr)
2555 self.manifestlog.update_caches(transaction=tr)
2534 self.manifestlog.update_caches(transaction=tr)
2556
2535
2557 rbc = unfi.revbranchcache()
2536 rbc = unfi.revbranchcache()
2558 for r in unfi.changelog:
2537 for r in unfi.changelog:
2559 rbc.branchinfo(r)
2538 rbc.branchinfo(r)
2560 rbc.write()
2539 rbc.write()
2561
2540
2562 # ensure the working copy parents are in the manifestfulltextcache
2541 # ensure the working copy parents are in the manifestfulltextcache
2563 for ctx in self[b'.'].parents():
2542 for ctx in self[b'.'].parents():
2564 ctx.manifest() # accessing the manifest is enough
2543 ctx.manifest() # accessing the manifest is enough
2565
2544
2566 # accessing fnode cache warms the cache
2545 # accessing fnode cache warms the cache
2567 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2546 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2568 # accessing tags warm the cache
2547 # accessing tags warm the cache
2569 self.tags()
2548 self.tags()
2570 self.filtered(b'served').tags()
2549 self.filtered(b'served').tags()
2571
2550
2572 # The `full` arg is documented as updating even the lazily-loaded
2551 # The `full` arg is documented as updating even the lazily-loaded
2573 # caches immediately, so we're forcing a write to cause these caches
2552 # caches immediately, so we're forcing a write to cause these caches
2574 # to be warmed up even if they haven't explicitly been requested
2553 # to be warmed up even if they haven't explicitly been requested
2575 # yet (if they've never been used by hg, they won't ever have been
2554 # yet (if they've never been used by hg, they won't ever have been
2576 # written, even if they're a subset of another kind of cache that
2555 # written, even if they're a subset of another kind of cache that
2577 # *has* been used).
2556 # *has* been used).
2578 for filt in repoview.filtertable.keys():
2557 for filt in repoview.filtertable.keys():
2579 filtered = self.filtered(filt)
2558 filtered = self.filtered(filt)
2580 filtered.branchmap().write(filtered)
2559 filtered.branchmap().write(filtered)
2581
2560
2582 def invalidatecaches(self):
2561 def invalidatecaches(self):
2583
2562
2584 if '_tagscache' in vars(self):
2563 if '_tagscache' in vars(self):
2585 # can't use delattr on proxy
2564 # can't use delattr on proxy
2586 del self.__dict__['_tagscache']
2565 del self.__dict__['_tagscache']
2587
2566
2588 self._branchcaches.clear()
2567 self._branchcaches.clear()
2589 self.invalidatevolatilesets()
2568 self.invalidatevolatilesets()
2590 self._sparsesignaturecache.clear()
2569 self._sparsesignaturecache.clear()
2591
2570
2592 def invalidatevolatilesets(self):
2571 def invalidatevolatilesets(self):
2593 self.filteredrevcache.clear()
2572 self.filteredrevcache.clear()
2594 obsolete.clearobscaches(self)
2573 obsolete.clearobscaches(self)
2595 self._quick_access_changeid_invalidate()
2574 self._quick_access_changeid_invalidate()
2596
2575
2597 def invalidatedirstate(self):
2576 def invalidatedirstate(self):
2598 '''Invalidates the dirstate, causing the next call to dirstate
2577 '''Invalidates the dirstate, causing the next call to dirstate
2599 to check if it was modified since the last time it was read,
2578 to check if it was modified since the last time it was read,
2600 rereading it if it has.
2579 rereading it if it has.
2601
2580
2602 This is different to dirstate.invalidate() that it doesn't always
2581 This is different to dirstate.invalidate() that it doesn't always
2603 rereads the dirstate. Use dirstate.invalidate() if you want to
2582 rereads the dirstate. Use dirstate.invalidate() if you want to
2604 explicitly read the dirstate again (i.e. restoring it to a previous
2583 explicitly read the dirstate again (i.e. restoring it to a previous
2605 known good state).'''
2584 known good state).'''
2606 if hasunfilteredcache(self, 'dirstate'):
2585 if hasunfilteredcache(self, 'dirstate'):
2607 for k in self.dirstate._filecache:
2586 for k in self.dirstate._filecache:
2608 try:
2587 try:
2609 delattr(self.dirstate, k)
2588 delattr(self.dirstate, k)
2610 except AttributeError:
2589 except AttributeError:
2611 pass
2590 pass
2612 delattr(self.unfiltered(), 'dirstate')
2591 delattr(self.unfiltered(), 'dirstate')
2613
2592
2614 def invalidate(self, clearfilecache=False):
2593 def invalidate(self, clearfilecache=False):
2615 '''Invalidates both store and non-store parts other than dirstate
2594 '''Invalidates both store and non-store parts other than dirstate
2616
2595
2617 If a transaction is running, invalidation of store is omitted,
2596 If a transaction is running, invalidation of store is omitted,
2618 because discarding in-memory changes might cause inconsistency
2597 because discarding in-memory changes might cause inconsistency
2619 (e.g. incomplete fncache causes unintentional failure, but
2598 (e.g. incomplete fncache causes unintentional failure, but
2620 redundant one doesn't).
2599 redundant one doesn't).
2621 '''
2600 '''
2622 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2601 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2623 for k in list(self._filecache.keys()):
2602 for k in list(self._filecache.keys()):
2624 # dirstate is invalidated separately in invalidatedirstate()
2603 # dirstate is invalidated separately in invalidatedirstate()
2625 if k == b'dirstate':
2604 if k == b'dirstate':
2626 continue
2605 continue
2627 if (
2606 if (
2628 k == b'changelog'
2607 k == b'changelog'
2629 and self.currenttransaction()
2608 and self.currenttransaction()
2630 and self.changelog._delayed
2609 and self.changelog._delayed
2631 ):
2610 ):
2632 # The changelog object may store unwritten revisions. We don't
2611 # The changelog object may store unwritten revisions. We don't
2633 # want to lose them.
2612 # want to lose them.
2634 # TODO: Solve the problem instead of working around it.
2613 # TODO: Solve the problem instead of working around it.
2635 continue
2614 continue
2636
2615
2637 if clearfilecache:
2616 if clearfilecache:
2638 del self._filecache[k]
2617 del self._filecache[k]
2639 try:
2618 try:
2640 delattr(unfiltered, k)
2619 delattr(unfiltered, k)
2641 except AttributeError:
2620 except AttributeError:
2642 pass
2621 pass
2643 self.invalidatecaches()
2622 self.invalidatecaches()
2644 if not self.currenttransaction():
2623 if not self.currenttransaction():
2645 # TODO: Changing contents of store outside transaction
2624 # TODO: Changing contents of store outside transaction
2646 # causes inconsistency. We should make in-memory store
2625 # causes inconsistency. We should make in-memory store
2647 # changes detectable, and abort if changed.
2626 # changes detectable, and abort if changed.
2648 self.store.invalidatecaches()
2627 self.store.invalidatecaches()
2649
2628
2650 def invalidateall(self):
2629 def invalidateall(self):
2651 '''Fully invalidates both store and non-store parts, causing the
2630 '''Fully invalidates both store and non-store parts, causing the
2652 subsequent operation to reread any outside changes.'''
2631 subsequent operation to reread any outside changes.'''
2653 # extension should hook this to invalidate its caches
2632 # extension should hook this to invalidate its caches
2654 self.invalidate()
2633 self.invalidate()
2655 self.invalidatedirstate()
2634 self.invalidatedirstate()
2656
2635
2657 @unfilteredmethod
2636 @unfilteredmethod
2658 def _refreshfilecachestats(self, tr):
2637 def _refreshfilecachestats(self, tr):
2659 """Reload stats of cached files so that they are flagged as valid"""
2638 """Reload stats of cached files so that they are flagged as valid"""
2660 for k, ce in self._filecache.items():
2639 for k, ce in self._filecache.items():
2661 k = pycompat.sysstr(k)
2640 k = pycompat.sysstr(k)
2662 if k == 'dirstate' or k not in self.__dict__:
2641 if k == 'dirstate' or k not in self.__dict__:
2663 continue
2642 continue
2664 ce.refresh()
2643 ce.refresh()
2665
2644
2666 def _lock(
2645 def _lock(
2667 self,
2646 self,
2668 vfs,
2647 vfs,
2669 lockname,
2648 lockname,
2670 wait,
2649 wait,
2671 releasefn,
2650 releasefn,
2672 acquirefn,
2651 acquirefn,
2673 desc,
2652 desc,
2674 inheritchecker=None,
2653 inheritchecker=None,
2675 parentenvvar=None,
2654 parentenvvar=None,
2676 ):
2655 ):
2677 parentlock = None
2656 parentlock = None
2678 # the contents of parentenvvar are used by the underlying lock to
2657 # the contents of parentenvvar are used by the underlying lock to
2679 # determine whether it can be inherited
2658 # determine whether it can be inherited
2680 if parentenvvar is not None:
2659 if parentenvvar is not None:
2681 parentlock = encoding.environ.get(parentenvvar)
2660 parentlock = encoding.environ.get(parentenvvar)
2682
2661
2683 timeout = 0
2662 timeout = 0
2684 warntimeout = 0
2663 warntimeout = 0
2685 if wait:
2664 if wait:
2686 timeout = self.ui.configint(b"ui", b"timeout")
2665 timeout = self.ui.configint(b"ui", b"timeout")
2687 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2666 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2688 # internal config: ui.signal-safe-lock
2667 # internal config: ui.signal-safe-lock
2689 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2668 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2690
2669
2691 l = lockmod.trylock(
2670 l = lockmod.trylock(
2692 self.ui,
2671 self.ui,
2693 vfs,
2672 vfs,
2694 lockname,
2673 lockname,
2695 timeout,
2674 timeout,
2696 warntimeout,
2675 warntimeout,
2697 releasefn=releasefn,
2676 releasefn=releasefn,
2698 acquirefn=acquirefn,
2677 acquirefn=acquirefn,
2699 desc=desc,
2678 desc=desc,
2700 inheritchecker=inheritchecker,
2679 inheritchecker=inheritchecker,
2701 parentlock=parentlock,
2680 parentlock=parentlock,
2702 signalsafe=signalsafe,
2681 signalsafe=signalsafe,
2703 )
2682 )
2704 return l
2683 return l
2705
2684
2706 def _afterlock(self, callback):
2685 def _afterlock(self, callback):
2707 """add a callback to be run when the repository is fully unlocked
2686 """add a callback to be run when the repository is fully unlocked
2708
2687
2709 The callback will be executed when the outermost lock is released
2688 The callback will be executed when the outermost lock is released
2710 (with wlock being higher level than 'lock')."""
2689 (with wlock being higher level than 'lock')."""
2711 for ref in (self._wlockref, self._lockref):
2690 for ref in (self._wlockref, self._lockref):
2712 l = ref and ref()
2691 l = ref and ref()
2713 if l and l.held:
2692 if l and l.held:
2714 l.postrelease.append(callback)
2693 l.postrelease.append(callback)
2715 break
2694 break
2716 else: # no lock have been found.
2695 else: # no lock have been found.
2717 callback(True)
2696 callback(True)
2718
2697
2719 def lock(self, wait=True):
2698 def lock(self, wait=True):
2720 '''Lock the repository store (.hg/store) and return a weak reference
2699 '''Lock the repository store (.hg/store) and return a weak reference
2721 to the lock. Use this before modifying the store (e.g. committing or
2700 to the lock. Use this before modifying the store (e.g. committing or
2722 stripping). If you are opening a transaction, get a lock as well.)
2701 stripping). If you are opening a transaction, get a lock as well.)
2723
2702
2724 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2703 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2725 'wlock' first to avoid a dead-lock hazard.'''
2704 'wlock' first to avoid a dead-lock hazard.'''
2726 l = self._currentlock(self._lockref)
2705 l = self._currentlock(self._lockref)
2727 if l is not None:
2706 if l is not None:
2728 l.lock()
2707 l.lock()
2729 return l
2708 return l
2730
2709
2731 l = self._lock(
2710 l = self._lock(
2732 vfs=self.svfs,
2711 vfs=self.svfs,
2733 lockname=b"lock",
2712 lockname=b"lock",
2734 wait=wait,
2713 wait=wait,
2735 releasefn=None,
2714 releasefn=None,
2736 acquirefn=self.invalidate,
2715 acquirefn=self.invalidate,
2737 desc=_(b'repository %s') % self.origroot,
2716 desc=_(b'repository %s') % self.origroot,
2738 )
2717 )
2739 self._lockref = weakref.ref(l)
2718 self._lockref = weakref.ref(l)
2740 return l
2719 return l
2741
2720
2742 def _wlockchecktransaction(self):
2721 def _wlockchecktransaction(self):
2743 if self.currenttransaction() is not None:
2722 if self.currenttransaction() is not None:
2744 raise error.LockInheritanceContractViolation(
2723 raise error.LockInheritanceContractViolation(
2745 b'wlock cannot be inherited in the middle of a transaction'
2724 b'wlock cannot be inherited in the middle of a transaction'
2746 )
2725 )
2747
2726
2748 def wlock(self, wait=True):
2727 def wlock(self, wait=True):
2749 '''Lock the non-store parts of the repository (everything under
2728 '''Lock the non-store parts of the repository (everything under
2750 .hg except .hg/store) and return a weak reference to the lock.
2729 .hg except .hg/store) and return a weak reference to the lock.
2751
2730
2752 Use this before modifying files in .hg.
2731 Use this before modifying files in .hg.
2753
2732
2754 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2733 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2755 'wlock' first to avoid a dead-lock hazard.'''
2734 'wlock' first to avoid a dead-lock hazard.'''
2756 l = self._wlockref and self._wlockref()
2735 l = self._wlockref and self._wlockref()
2757 if l is not None and l.held:
2736 if l is not None and l.held:
2758 l.lock()
2737 l.lock()
2759 return l
2738 return l
2760
2739
2761 # We do not need to check for non-waiting lock acquisition. Such
2740 # We do not need to check for non-waiting lock acquisition. Such
2762 # acquisition would not cause dead-lock as they would just fail.
2741 # acquisition would not cause dead-lock as they would just fail.
2763 if wait and (
2742 if wait and (
2764 self.ui.configbool(b'devel', b'all-warnings')
2743 self.ui.configbool(b'devel', b'all-warnings')
2765 or self.ui.configbool(b'devel', b'check-locks')
2744 or self.ui.configbool(b'devel', b'check-locks')
2766 ):
2745 ):
2767 if self._currentlock(self._lockref) is not None:
2746 if self._currentlock(self._lockref) is not None:
2768 self.ui.develwarn(b'"wlock" acquired after "lock"')
2747 self.ui.develwarn(b'"wlock" acquired after "lock"')
2769
2748
2770 def unlock():
2749 def unlock():
2771 if self.dirstate.pendingparentchange():
2750 if self.dirstate.pendingparentchange():
2772 self.dirstate.invalidate()
2751 self.dirstate.invalidate()
2773 else:
2752 else:
2774 self.dirstate.write(None)
2753 self.dirstate.write(None)
2775
2754
2776 self._filecache[b'dirstate'].refresh()
2755 self._filecache[b'dirstate'].refresh()
2777
2756
2778 l = self._lock(
2757 l = self._lock(
2779 self.vfs,
2758 self.vfs,
2780 b"wlock",
2759 b"wlock",
2781 wait,
2760 wait,
2782 unlock,
2761 unlock,
2783 self.invalidatedirstate,
2762 self.invalidatedirstate,
2784 _(b'working directory of %s') % self.origroot,
2763 _(b'working directory of %s') % self.origroot,
2785 inheritchecker=self._wlockchecktransaction,
2764 inheritchecker=self._wlockchecktransaction,
2786 parentenvvar=b'HG_WLOCK_LOCKER',
2765 parentenvvar=b'HG_WLOCK_LOCKER',
2787 )
2766 )
2788 self._wlockref = weakref.ref(l)
2767 self._wlockref = weakref.ref(l)
2789 return l
2768 return l
2790
2769
2791 def _currentlock(self, lockref):
2770 def _currentlock(self, lockref):
2792 """Returns the lock if it's held, or None if it's not."""
2771 """Returns the lock if it's held, or None if it's not."""
2793 if lockref is None:
2772 if lockref is None:
2794 return None
2773 return None
2795 l = lockref()
2774 l = lockref()
2796 if l is None or not l.held:
2775 if l is None or not l.held:
2797 return None
2776 return None
2798 return l
2777 return l
2799
2778
2800 def currentwlock(self):
2779 def currentwlock(self):
2801 """Returns the wlock if it's held, or None if it's not."""
2780 """Returns the wlock if it's held, or None if it's not."""
2802 return self._currentlock(self._wlockref)
2781 return self._currentlock(self._wlockref)
2803
2782
2804 def checkcommitpatterns(self, wctx, match, status, fail):
2783 def checkcommitpatterns(self, wctx, match, status, fail):
2805 """check for commit arguments that aren't committable"""
2784 """check for commit arguments that aren't committable"""
2806 if match.isexact() or match.prefix():
2785 if match.isexact() or match.prefix():
2807 matched = set(status.modified + status.added + status.removed)
2786 matched = set(status.modified + status.added + status.removed)
2808
2787
2809 for f in match.files():
2788 for f in match.files():
2810 f = self.dirstate.normalize(f)
2789 f = self.dirstate.normalize(f)
2811 if f == b'.' or f in matched or f in wctx.substate:
2790 if f == b'.' or f in matched or f in wctx.substate:
2812 continue
2791 continue
2813 if f in status.deleted:
2792 if f in status.deleted:
2814 fail(f, _(b'file not found!'))
2793 fail(f, _(b'file not found!'))
2815 # Is it a directory that exists or used to exist?
2794 # Is it a directory that exists or used to exist?
2816 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2795 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2817 d = f + b'/'
2796 d = f + b'/'
2818 for mf in matched:
2797 for mf in matched:
2819 if mf.startswith(d):
2798 if mf.startswith(d):
2820 break
2799 break
2821 else:
2800 else:
2822 fail(f, _(b"no match under directory!"))
2801 fail(f, _(b"no match under directory!"))
2823 elif f not in self.dirstate:
2802 elif f not in self.dirstate:
2824 fail(f, _(b"file not tracked!"))
2803 fail(f, _(b"file not tracked!"))
2825
2804
2826 @unfilteredmethod
2805 @unfilteredmethod
2827 def commit(
2806 def commit(
2828 self,
2807 self,
2829 text=b"",
2808 text=b"",
2830 user=None,
2809 user=None,
2831 date=None,
2810 date=None,
2832 match=None,
2811 match=None,
2833 force=False,
2812 force=False,
2834 editor=None,
2813 editor=None,
2835 extra=None,
2814 extra=None,
2836 ):
2815 ):
2837 """Add a new revision to current repository.
2816 """Add a new revision to current repository.
2838
2817
2839 Revision information is gathered from the working directory,
2818 Revision information is gathered from the working directory,
2840 match can be used to filter the committed files. If editor is
2819 match can be used to filter the committed files. If editor is
2841 supplied, it is called to get a commit message.
2820 supplied, it is called to get a commit message.
2842 """
2821 """
2843 if extra is None:
2822 if extra is None:
2844 extra = {}
2823 extra = {}
2845
2824
2846 def fail(f, msg):
2825 def fail(f, msg):
2847 raise error.Abort(b'%s: %s' % (f, msg))
2826 raise error.Abort(b'%s: %s' % (f, msg))
2848
2827
2849 if not match:
2828 if not match:
2850 match = matchmod.always()
2829 match = matchmod.always()
2851
2830
2852 if not force:
2831 if not force:
2853 match.bad = fail
2832 match.bad = fail
2854
2833
2855 # lock() for recent changelog (see issue4368)
2834 # lock() for recent changelog (see issue4368)
2856 with self.wlock(), self.lock():
2835 with self.wlock(), self.lock():
2857 wctx = self[None]
2836 wctx = self[None]
2858 merge = len(wctx.parents()) > 1
2837 merge = len(wctx.parents()) > 1
2859
2838
2860 if not force and merge and not match.always():
2839 if not force and merge and not match.always():
2861 raise error.Abort(
2840 raise error.Abort(
2862 _(
2841 _(
2863 b'cannot partially commit a merge '
2842 b'cannot partially commit a merge '
2864 b'(do not specify files or patterns)'
2843 b'(do not specify files or patterns)'
2865 )
2844 )
2866 )
2845 )
2867
2846
2868 status = self.status(match=match, clean=force)
2847 status = self.status(match=match, clean=force)
2869 if force:
2848 if force:
2870 status.modified.extend(
2849 status.modified.extend(
2871 status.clean
2850 status.clean
2872 ) # mq may commit clean files
2851 ) # mq may commit clean files
2873
2852
2874 # check subrepos
2853 # check subrepos
2875 subs, commitsubs, newstate = subrepoutil.precommit(
2854 subs, commitsubs, newstate = subrepoutil.precommit(
2876 self.ui, wctx, status, match, force=force
2855 self.ui, wctx, status, match, force=force
2877 )
2856 )
2878
2857
2879 # make sure all explicit patterns are matched
2858 # make sure all explicit patterns are matched
2880 if not force:
2859 if not force:
2881 self.checkcommitpatterns(wctx, match, status, fail)
2860 self.checkcommitpatterns(wctx, match, status, fail)
2882
2861
2883 cctx = context.workingcommitctx(
2862 cctx = context.workingcommitctx(
2884 self, status, text, user, date, extra
2863 self, status, text, user, date, extra
2885 )
2864 )
2886
2865
2887 ms = mergestatemod.mergestate.read(self)
2866 ms = mergestatemod.mergestate.read(self)
2888 mergeutil.checkunresolved(ms)
2867 mergeutil.checkunresolved(ms)
2889
2868
2890 # internal config: ui.allowemptycommit
2869 # internal config: ui.allowemptycommit
2891 if cctx.isempty() and not self.ui.configbool(
2870 if cctx.isempty() and not self.ui.configbool(
2892 b'ui', b'allowemptycommit'
2871 b'ui', b'allowemptycommit'
2893 ):
2872 ):
2894 self.ui.debug(b'nothing to commit, clearing merge state\n')
2873 self.ui.debug(b'nothing to commit, clearing merge state\n')
2895 ms.reset()
2874 ms.reset()
2896 return None
2875 return None
2897
2876
2898 if merge and cctx.deleted():
2877 if merge and cctx.deleted():
2899 raise error.Abort(_(b"cannot commit merge with missing files"))
2878 raise error.Abort(_(b"cannot commit merge with missing files"))
2900
2879
2901 if editor:
2880 if editor:
2902 cctx._text = editor(self, cctx, subs)
2881 cctx._text = editor(self, cctx, subs)
2903 edited = text != cctx._text
2882 edited = text != cctx._text
2904
2883
2905 # Save commit message in case this transaction gets rolled back
2884 # Save commit message in case this transaction gets rolled back
2906 # (e.g. by a pretxncommit hook). Leave the content alone on
2885 # (e.g. by a pretxncommit hook). Leave the content alone on
2907 # the assumption that the user will use the same editor again.
2886 # the assumption that the user will use the same editor again.
2908 msgfn = self.savecommitmessage(cctx._text)
2887 msgfn = self.savecommitmessage(cctx._text)
2909
2888
2910 # commit subs and write new state
2889 # commit subs and write new state
2911 if subs:
2890 if subs:
2912 uipathfn = scmutil.getuipathfn(self)
2891 uipathfn = scmutil.getuipathfn(self)
2913 for s in sorted(commitsubs):
2892 for s in sorted(commitsubs):
2914 sub = wctx.sub(s)
2893 sub = wctx.sub(s)
2915 self.ui.status(
2894 self.ui.status(
2916 _(b'committing subrepository %s\n')
2895 _(b'committing subrepository %s\n')
2917 % uipathfn(subrepoutil.subrelpath(sub))
2896 % uipathfn(subrepoutil.subrelpath(sub))
2918 )
2897 )
2919 sr = sub.commit(cctx._text, user, date)
2898 sr = sub.commit(cctx._text, user, date)
2920 newstate[s] = (newstate[s][0], sr)
2899 newstate[s] = (newstate[s][0], sr)
2921 subrepoutil.writestate(self, newstate)
2900 subrepoutil.writestate(self, newstate)
2922
2901
2923 p1, p2 = self.dirstate.parents()
2902 p1, p2 = self.dirstate.parents()
2924 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2903 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2925 try:
2904 try:
2926 self.hook(
2905 self.hook(
2927 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2906 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2928 )
2907 )
2929 with self.transaction(b'commit'):
2908 with self.transaction(b'commit'):
2930 ret = self.commitctx(cctx, True)
2909 ret = self.commitctx(cctx, True)
2931 # update bookmarks, dirstate and mergestate
2910 # update bookmarks, dirstate and mergestate
2932 bookmarks.update(self, [p1, p2], ret)
2911 bookmarks.update(self, [p1, p2], ret)
2933 cctx.markcommitted(ret)
2912 cctx.markcommitted(ret)
2934 ms.reset()
2913 ms.reset()
2935 except: # re-raises
2914 except: # re-raises
2936 if edited:
2915 if edited:
2937 self.ui.write(
2916 self.ui.write(
2938 _(b'note: commit message saved in %s\n') % msgfn
2917 _(b'note: commit message saved in %s\n') % msgfn
2939 )
2918 )
2940 self.ui.write(
2919 self.ui.write(
2941 _(
2920 _(
2942 b"note: use 'hg commit --logfile "
2921 b"note: use 'hg commit --logfile "
2943 b".hg/last-message.txt --edit' to reuse it\n"
2922 b".hg/last-message.txt --edit' to reuse it\n"
2944 )
2923 )
2945 )
2924 )
2946 raise
2925 raise
2947
2926
2948 def commithook(unused_success):
2927 def commithook(unused_success):
2949 # hack for command that use a temporary commit (eg: histedit)
2928 # hack for command that use a temporary commit (eg: histedit)
2950 # temporary commit got stripped before hook release
2929 # temporary commit got stripped before hook release
2951 if self.changelog.hasnode(ret):
2930 if self.changelog.hasnode(ret):
2952 self.hook(
2931 self.hook(
2953 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2932 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2954 )
2933 )
2955
2934
2956 self._afterlock(commithook)
2935 self._afterlock(commithook)
2957 return ret
2936 return ret
2958
2937
2959 @unfilteredmethod
2938 @unfilteredmethod
2960 def commitctx(self, ctx, error=False, origctx=None):
2939 def commitctx(self, ctx, error=False, origctx=None):
2961 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2940 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2962
2941
2963 @unfilteredmethod
2942 @unfilteredmethod
2964 def destroying(self):
2943 def destroying(self):
2965 '''Inform the repository that nodes are about to be destroyed.
2944 '''Inform the repository that nodes are about to be destroyed.
2966 Intended for use by strip and rollback, so there's a common
2945 Intended for use by strip and rollback, so there's a common
2967 place for anything that has to be done before destroying history.
2946 place for anything that has to be done before destroying history.
2968
2947
2969 This is mostly useful for saving state that is in memory and waiting
2948 This is mostly useful for saving state that is in memory and waiting
2970 to be flushed when the current lock is released. Because a call to
2949 to be flushed when the current lock is released. Because a call to
2971 destroyed is imminent, the repo will be invalidated causing those
2950 destroyed is imminent, the repo will be invalidated causing those
2972 changes to stay in memory (waiting for the next unlock), or vanish
2951 changes to stay in memory (waiting for the next unlock), or vanish
2973 completely.
2952 completely.
2974 '''
2953 '''
2975 # When using the same lock to commit and strip, the phasecache is left
2954 # When using the same lock to commit and strip, the phasecache is left
2976 # dirty after committing. Then when we strip, the repo is invalidated,
2955 # dirty after committing. Then when we strip, the repo is invalidated,
2977 # causing those changes to disappear.
2956 # causing those changes to disappear.
2978 if '_phasecache' in vars(self):
2957 if '_phasecache' in vars(self):
2979 self._phasecache.write()
2958 self._phasecache.write()
2980
2959
2981 @unfilteredmethod
2960 @unfilteredmethod
2982 def destroyed(self):
2961 def destroyed(self):
2983 '''Inform the repository that nodes have been destroyed.
2962 '''Inform the repository that nodes have been destroyed.
2984 Intended for use by strip and rollback, so there's a common
2963 Intended for use by strip and rollback, so there's a common
2985 place for anything that has to be done after destroying history.
2964 place for anything that has to be done after destroying history.
2986 '''
2965 '''
2987 # When one tries to:
2966 # When one tries to:
2988 # 1) destroy nodes thus calling this method (e.g. strip)
2967 # 1) destroy nodes thus calling this method (e.g. strip)
2989 # 2) use phasecache somewhere (e.g. commit)
2968 # 2) use phasecache somewhere (e.g. commit)
2990 #
2969 #
2991 # then 2) will fail because the phasecache contains nodes that were
2970 # then 2) will fail because the phasecache contains nodes that were
2992 # removed. We can either remove phasecache from the filecache,
2971 # removed. We can either remove phasecache from the filecache,
2993 # causing it to reload next time it is accessed, or simply filter
2972 # causing it to reload next time it is accessed, or simply filter
2994 # the removed nodes now and write the updated cache.
2973 # the removed nodes now and write the updated cache.
2995 self._phasecache.filterunknown(self)
2974 self._phasecache.filterunknown(self)
2996 self._phasecache.write()
2975 self._phasecache.write()
2997
2976
2998 # refresh all repository caches
2977 # refresh all repository caches
2999 self.updatecaches()
2978 self.updatecaches()
3000
2979
3001 # Ensure the persistent tag cache is updated. Doing it now
2980 # Ensure the persistent tag cache is updated. Doing it now
3002 # means that the tag cache only has to worry about destroyed
2981 # means that the tag cache only has to worry about destroyed
3003 # heads immediately after a strip/rollback. That in turn
2982 # heads immediately after a strip/rollback. That in turn
3004 # guarantees that "cachetip == currenttip" (comparing both rev
2983 # guarantees that "cachetip == currenttip" (comparing both rev
3005 # and node) always means no nodes have been added or destroyed.
2984 # and node) always means no nodes have been added or destroyed.
3006
2985
3007 # XXX this is suboptimal when qrefresh'ing: we strip the current
2986 # XXX this is suboptimal when qrefresh'ing: we strip the current
3008 # head, refresh the tag cache, then immediately add a new head.
2987 # head, refresh the tag cache, then immediately add a new head.
3009 # But I think doing it this way is necessary for the "instant
2988 # But I think doing it this way is necessary for the "instant
3010 # tag cache retrieval" case to work.
2989 # tag cache retrieval" case to work.
3011 self.invalidate()
2990 self.invalidate()
3012
2991
3013 def status(
2992 def status(
3014 self,
2993 self,
3015 node1=b'.',
2994 node1=b'.',
3016 node2=None,
2995 node2=None,
3017 match=None,
2996 match=None,
3018 ignored=False,
2997 ignored=False,
3019 clean=False,
2998 clean=False,
3020 unknown=False,
2999 unknown=False,
3021 listsubrepos=False,
3000 listsubrepos=False,
3022 ):
3001 ):
3023 '''a convenience method that calls node1.status(node2)'''
3002 '''a convenience method that calls node1.status(node2)'''
3024 return self[node1].status(
3003 return self[node1].status(
3025 node2, match, ignored, clean, unknown, listsubrepos
3004 node2, match, ignored, clean, unknown, listsubrepos
3026 )
3005 )
3027
3006
3028 def addpostdsstatus(self, ps):
3007 def addpostdsstatus(self, ps):
3029 """Add a callback to run within the wlock, at the point at which status
3008 """Add a callback to run within the wlock, at the point at which status
3030 fixups happen.
3009 fixups happen.
3031
3010
3032 On status completion, callback(wctx, status) will be called with the
3011 On status completion, callback(wctx, status) will be called with the
3033 wlock held, unless the dirstate has changed from underneath or the wlock
3012 wlock held, unless the dirstate has changed from underneath or the wlock
3034 couldn't be grabbed.
3013 couldn't be grabbed.
3035
3014
3036 Callbacks should not capture and use a cached copy of the dirstate --
3015 Callbacks should not capture and use a cached copy of the dirstate --
3037 it might change in the meanwhile. Instead, they should access the
3016 it might change in the meanwhile. Instead, they should access the
3038 dirstate via wctx.repo().dirstate.
3017 dirstate via wctx.repo().dirstate.
3039
3018
3040 This list is emptied out after each status run -- extensions should
3019 This list is emptied out after each status run -- extensions should
3041 make sure it adds to this list each time dirstate.status is called.
3020 make sure it adds to this list each time dirstate.status is called.
3042 Extensions should also make sure they don't call this for statuses
3021 Extensions should also make sure they don't call this for statuses
3043 that don't involve the dirstate.
3022 that don't involve the dirstate.
3044 """
3023 """
3045
3024
3046 # The list is located here for uniqueness reasons -- it is actually
3025 # The list is located here for uniqueness reasons -- it is actually
3047 # managed by the workingctx, but that isn't unique per-repo.
3026 # managed by the workingctx, but that isn't unique per-repo.
3048 self._postdsstatus.append(ps)
3027 self._postdsstatus.append(ps)
3049
3028
3050 def postdsstatus(self):
3029 def postdsstatus(self):
3051 """Used by workingctx to get the list of post-dirstate-status hooks."""
3030 """Used by workingctx to get the list of post-dirstate-status hooks."""
3052 return self._postdsstatus
3031 return self._postdsstatus
3053
3032
3054 def clearpostdsstatus(self):
3033 def clearpostdsstatus(self):
3055 """Used by workingctx to clear post-dirstate-status hooks."""
3034 """Used by workingctx to clear post-dirstate-status hooks."""
3056 del self._postdsstatus[:]
3035 del self._postdsstatus[:]
3057
3036
3058 def heads(self, start=None):
3037 def heads(self, start=None):
3059 if start is None:
3038 if start is None:
3060 cl = self.changelog
3039 cl = self.changelog
3061 headrevs = reversed(cl.headrevs())
3040 headrevs = reversed(cl.headrevs())
3062 return [cl.node(rev) for rev in headrevs]
3041 return [cl.node(rev) for rev in headrevs]
3063
3042
3064 heads = self.changelog.heads(start)
3043 heads = self.changelog.heads(start)
3065 # sort the output in rev descending order
3044 # sort the output in rev descending order
3066 return sorted(heads, key=self.changelog.rev, reverse=True)
3045 return sorted(heads, key=self.changelog.rev, reverse=True)
3067
3046
3068 def branchheads(self, branch=None, start=None, closed=False):
3047 def branchheads(self, branch=None, start=None, closed=False):
3069 '''return a (possibly filtered) list of heads for the given branch
3048 '''return a (possibly filtered) list of heads for the given branch
3070
3049
3071 Heads are returned in topological order, from newest to oldest.
3050 Heads are returned in topological order, from newest to oldest.
3072 If branch is None, use the dirstate branch.
3051 If branch is None, use the dirstate branch.
3073 If start is not None, return only heads reachable from start.
3052 If start is not None, return only heads reachable from start.
3074 If closed is True, return heads that are marked as closed as well.
3053 If closed is True, return heads that are marked as closed as well.
3075 '''
3054 '''
3076 if branch is None:
3055 if branch is None:
3077 branch = self[None].branch()
3056 branch = self[None].branch()
3078 branches = self.branchmap()
3057 branches = self.branchmap()
3079 if not branches.hasbranch(branch):
3058 if not branches.hasbranch(branch):
3080 return []
3059 return []
3081 # the cache returns heads ordered lowest to highest
3060 # the cache returns heads ordered lowest to highest
3082 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3061 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3083 if start is not None:
3062 if start is not None:
3084 # filter out the heads that cannot be reached from startrev
3063 # filter out the heads that cannot be reached from startrev
3085 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3064 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3086 bheads = [h for h in bheads if h in fbheads]
3065 bheads = [h for h in bheads if h in fbheads]
3087 return bheads
3066 return bheads
3088
3067
3089 def branches(self, nodes):
3068 def branches(self, nodes):
3090 if not nodes:
3069 if not nodes:
3091 nodes = [self.changelog.tip()]
3070 nodes = [self.changelog.tip()]
3092 b = []
3071 b = []
3093 for n in nodes:
3072 for n in nodes:
3094 t = n
3073 t = n
3095 while True:
3074 while True:
3096 p = self.changelog.parents(n)
3075 p = self.changelog.parents(n)
3097 if p[1] != nullid or p[0] == nullid:
3076 if p[1] != nullid or p[0] == nullid:
3098 b.append((t, n, p[0], p[1]))
3077 b.append((t, n, p[0], p[1]))
3099 break
3078 break
3100 n = p[0]
3079 n = p[0]
3101 return b
3080 return b
3102
3081
3103 def between(self, pairs):
3082 def between(self, pairs):
3104 r = []
3083 r = []
3105
3084
3106 for top, bottom in pairs:
3085 for top, bottom in pairs:
3107 n, l, i = top, [], 0
3086 n, l, i = top, [], 0
3108 f = 1
3087 f = 1
3109
3088
3110 while n != bottom and n != nullid:
3089 while n != bottom and n != nullid:
3111 p = self.changelog.parents(n)[0]
3090 p = self.changelog.parents(n)[0]
3112 if i == f:
3091 if i == f:
3113 l.append(n)
3092 l.append(n)
3114 f = f * 2
3093 f = f * 2
3115 n = p
3094 n = p
3116 i += 1
3095 i += 1
3117
3096
3118 r.append(l)
3097 r.append(l)
3119
3098
3120 return r
3099 return r
3121
3100
3122 def checkpush(self, pushop):
3101 def checkpush(self, pushop):
3123 """Extensions can override this function if additional checks have
3102 """Extensions can override this function if additional checks have
3124 to be performed before pushing, or call it if they override push
3103 to be performed before pushing, or call it if they override push
3125 command.
3104 command.
3126 """
3105 """
3127
3106
3128 @unfilteredpropertycache
3107 @unfilteredpropertycache
3129 def prepushoutgoinghooks(self):
3108 def prepushoutgoinghooks(self):
3130 """Return util.hooks consists of a pushop with repo, remote, outgoing
3109 """Return util.hooks consists of a pushop with repo, remote, outgoing
3131 methods, which are called before pushing changesets.
3110 methods, which are called before pushing changesets.
3132 """
3111 """
3133 return util.hooks()
3112 return util.hooks()
3134
3113
3135 def pushkey(self, namespace, key, old, new):
3114 def pushkey(self, namespace, key, old, new):
3136 try:
3115 try:
3137 tr = self.currenttransaction()
3116 tr = self.currenttransaction()
3138 hookargs = {}
3117 hookargs = {}
3139 if tr is not None:
3118 if tr is not None:
3140 hookargs.update(tr.hookargs)
3119 hookargs.update(tr.hookargs)
3141 hookargs = pycompat.strkwargs(hookargs)
3120 hookargs = pycompat.strkwargs(hookargs)
3142 hookargs['namespace'] = namespace
3121 hookargs['namespace'] = namespace
3143 hookargs['key'] = key
3122 hookargs['key'] = key
3144 hookargs['old'] = old
3123 hookargs['old'] = old
3145 hookargs['new'] = new
3124 hookargs['new'] = new
3146 self.hook(b'prepushkey', throw=True, **hookargs)
3125 self.hook(b'prepushkey', throw=True, **hookargs)
3147 except error.HookAbort as exc:
3126 except error.HookAbort as exc:
3148 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3127 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3149 if exc.hint:
3128 if exc.hint:
3150 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3129 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3151 return False
3130 return False
3152 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3131 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3153 ret = pushkey.push(self, namespace, key, old, new)
3132 ret = pushkey.push(self, namespace, key, old, new)
3154
3133
3155 def runhook(unused_success):
3134 def runhook(unused_success):
3156 self.hook(
3135 self.hook(
3157 b'pushkey',
3136 b'pushkey',
3158 namespace=namespace,
3137 namespace=namespace,
3159 key=key,
3138 key=key,
3160 old=old,
3139 old=old,
3161 new=new,
3140 new=new,
3162 ret=ret,
3141 ret=ret,
3163 )
3142 )
3164
3143
3165 self._afterlock(runhook)
3144 self._afterlock(runhook)
3166 return ret
3145 return ret
3167
3146
3168 def listkeys(self, namespace):
3147 def listkeys(self, namespace):
3169 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3148 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3170 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3149 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3171 values = pushkey.list(self, namespace)
3150 values = pushkey.list(self, namespace)
3172 self.hook(b'listkeys', namespace=namespace, values=values)
3151 self.hook(b'listkeys', namespace=namespace, values=values)
3173 return values
3152 return values
3174
3153
3175 def debugwireargs(self, one, two, three=None, four=None, five=None):
3154 def debugwireargs(self, one, two, three=None, four=None, five=None):
3176 '''used to test argument passing over the wire'''
3155 '''used to test argument passing over the wire'''
3177 return b"%s %s %s %s %s" % (
3156 return b"%s %s %s %s %s" % (
3178 one,
3157 one,
3179 two,
3158 two,
3180 pycompat.bytestr(three),
3159 pycompat.bytestr(three),
3181 pycompat.bytestr(four),
3160 pycompat.bytestr(four),
3182 pycompat.bytestr(five),
3161 pycompat.bytestr(five),
3183 )
3162 )
3184
3163
3185 def savecommitmessage(self, text):
3164 def savecommitmessage(self, text):
3186 fp = self.vfs(b'last-message.txt', b'wb')
3165 fp = self.vfs(b'last-message.txt', b'wb')
3187 try:
3166 try:
3188 fp.write(text)
3167 fp.write(text)
3189 finally:
3168 finally:
3190 fp.close()
3169 fp.close()
3191 return self.pathto(fp.name[len(self.root) + 1 :])
3170 return self.pathto(fp.name[len(self.root) + 1 :])
3192
3171
3193
3172
3194 # used to avoid circular references so destructors work
3173 # used to avoid circular references so destructors work
3195 def aftertrans(files):
3174 def aftertrans(files):
3196 renamefiles = [tuple(t) for t in files]
3175 renamefiles = [tuple(t) for t in files]
3197
3176
3198 def a():
3177 def a():
3199 for vfs, src, dest in renamefiles:
3178 for vfs, src, dest in renamefiles:
3200 # if src and dest refer to a same file, vfs.rename is a no-op,
3179 # if src and dest refer to a same file, vfs.rename is a no-op,
3201 # leaving both src and dest on disk. delete dest to make sure
3180 # leaving both src and dest on disk. delete dest to make sure
3202 # the rename couldn't be such a no-op.
3181 # the rename couldn't be such a no-op.
3203 vfs.tryunlink(dest)
3182 vfs.tryunlink(dest)
3204 try:
3183 try:
3205 vfs.rename(src, dest)
3184 vfs.rename(src, dest)
3206 except OSError: # journal file does not yet exist
3185 except OSError: # journal file does not yet exist
3207 pass
3186 pass
3208
3187
3209 return a
3188 return a
3210
3189
3211
3190
3212 def undoname(fn):
3191 def undoname(fn):
3213 base, name = os.path.split(fn)
3192 base, name = os.path.split(fn)
3214 assert name.startswith(b'journal')
3193 assert name.startswith(b'journal')
3215 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3194 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3216
3195
3217
3196
3218 def instance(ui, path, create, intents=None, createopts=None):
3197 def instance(ui, path, create, intents=None, createopts=None):
3219 localpath = util.urllocalpath(path)
3198 localpath = util.urllocalpath(path)
3220 if create:
3199 if create:
3221 createrepository(ui, localpath, createopts=createopts)
3200 createrepository(ui, localpath, createopts=createopts)
3222
3201
3223 return makelocalrepository(ui, localpath, intents=intents)
3202 return makelocalrepository(ui, localpath, intents=intents)
3224
3203
3225
3204
3226 def islocal(path):
3205 def islocal(path):
3227 return True
3206 return True
3228
3207
3229
3208
3230 def defaultcreateopts(ui, createopts=None):
3209 def defaultcreateopts(ui, createopts=None):
3231 """Populate the default creation options for a repository.
3210 """Populate the default creation options for a repository.
3232
3211
3233 A dictionary of explicitly requested creation options can be passed
3212 A dictionary of explicitly requested creation options can be passed
3234 in. Missing keys will be populated.
3213 in. Missing keys will be populated.
3235 """
3214 """
3236 createopts = dict(createopts or {})
3215 createopts = dict(createopts or {})
3237
3216
3238 if b'backend' not in createopts:
3217 if b'backend' not in createopts:
3239 # experimental config: storage.new-repo-backend
3218 # experimental config: storage.new-repo-backend
3240 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3219 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3241
3220
3242 return createopts
3221 return createopts
3243
3222
3244
3223
3245 def newreporequirements(ui, createopts):
3224 def newreporequirements(ui, createopts):
3246 """Determine the set of requirements for a new local repository.
3225 """Determine the set of requirements for a new local repository.
3247
3226
3248 Extensions can wrap this function to specify custom requirements for
3227 Extensions can wrap this function to specify custom requirements for
3249 new repositories.
3228 new repositories.
3250 """
3229 """
3251 # If the repo is being created from a shared repository, we copy
3230 # If the repo is being created from a shared repository, we copy
3252 # its requirements.
3231 # its requirements.
3253 if b'sharedrepo' in createopts:
3232 if b'sharedrepo' in createopts:
3254 requirements = set(createopts[b'sharedrepo'].requirements)
3233 requirements = set(createopts[b'sharedrepo'].requirements)
3255 if createopts.get(b'sharedrelative'):
3234 if createopts.get(b'sharedrelative'):
3256 requirements.add(b'relshared')
3235 requirements.add(b'relshared')
3257 else:
3236 else:
3258 requirements.add(b'shared')
3237 requirements.add(b'shared')
3259
3238
3260 return requirements
3239 return requirements
3261
3240
3262 if b'backend' not in createopts:
3241 if b'backend' not in createopts:
3263 raise error.ProgrammingError(
3242 raise error.ProgrammingError(
3264 b'backend key not present in createopts; '
3243 b'backend key not present in createopts; '
3265 b'was defaultcreateopts() called?'
3244 b'was defaultcreateopts() called?'
3266 )
3245 )
3267
3246
3268 if createopts[b'backend'] != b'revlogv1':
3247 if createopts[b'backend'] != b'revlogv1':
3269 raise error.Abort(
3248 raise error.Abort(
3270 _(
3249 _(
3271 b'unable to determine repository requirements for '
3250 b'unable to determine repository requirements for '
3272 b'storage backend: %s'
3251 b'storage backend: %s'
3273 )
3252 )
3274 % createopts[b'backend']
3253 % createopts[b'backend']
3275 )
3254 )
3276
3255
3277 requirements = {b'revlogv1'}
3256 requirements = {b'revlogv1'}
3278 if ui.configbool(b'format', b'usestore'):
3257 if ui.configbool(b'format', b'usestore'):
3279 requirements.add(b'store')
3258 requirements.add(b'store')
3280 if ui.configbool(b'format', b'usefncache'):
3259 if ui.configbool(b'format', b'usefncache'):
3281 requirements.add(b'fncache')
3260 requirements.add(b'fncache')
3282 if ui.configbool(b'format', b'dotencode'):
3261 if ui.configbool(b'format', b'dotencode'):
3283 requirements.add(b'dotencode')
3262 requirements.add(b'dotencode')
3284
3263
3285 compengines = ui.configlist(b'format', b'revlog-compression')
3264 compengines = ui.configlist(b'format', b'revlog-compression')
3286 for compengine in compengines:
3265 for compengine in compengines:
3287 if compengine in util.compengines:
3266 if compengine in util.compengines:
3288 break
3267 break
3289 else:
3268 else:
3290 raise error.Abort(
3269 raise error.Abort(
3291 _(
3270 _(
3292 b'compression engines %s defined by '
3271 b'compression engines %s defined by '
3293 b'format.revlog-compression not available'
3272 b'format.revlog-compression not available'
3294 )
3273 )
3295 % b', '.join(b'"%s"' % e for e in compengines),
3274 % b', '.join(b'"%s"' % e for e in compengines),
3296 hint=_(
3275 hint=_(
3297 b'run "hg debuginstall" to list available '
3276 b'run "hg debuginstall" to list available '
3298 b'compression engines'
3277 b'compression engines'
3299 ),
3278 ),
3300 )
3279 )
3301
3280
3302 # zlib is the historical default and doesn't need an explicit requirement.
3281 # zlib is the historical default and doesn't need an explicit requirement.
3303 if compengine == b'zstd':
3282 if compengine == b'zstd':
3304 requirements.add(b'revlog-compression-zstd')
3283 requirements.add(b'revlog-compression-zstd')
3305 elif compengine != b'zlib':
3284 elif compengine != b'zlib':
3306 requirements.add(b'exp-compression-%s' % compengine)
3285 requirements.add(b'exp-compression-%s' % compengine)
3307
3286
3308 if scmutil.gdinitconfig(ui):
3287 if scmutil.gdinitconfig(ui):
3309 requirements.add(b'generaldelta')
3288 requirements.add(b'generaldelta')
3310 if ui.configbool(b'format', b'sparse-revlog'):
3289 if ui.configbool(b'format', b'sparse-revlog'):
3311 requirements.add(SPARSEREVLOG_REQUIREMENT)
3290 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3312
3291
3313 # experimental config: format.exp-use-side-data
3292 # experimental config: format.exp-use-side-data
3314 if ui.configbool(b'format', b'exp-use-side-data'):
3293 if ui.configbool(b'format', b'exp-use-side-data'):
3315 requirements.add(SIDEDATA_REQUIREMENT)
3294 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3316 # experimental config: format.exp-use-copies-side-data-changeset
3295 # experimental config: format.exp-use-copies-side-data-changeset
3317 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3296 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3318 requirements.add(SIDEDATA_REQUIREMENT)
3297 requirements.add(requirementsmod.SIDEDATA_REQUIREMENT)
3319 requirements.add(COPIESSDC_REQUIREMENT)
3298 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3320 if ui.configbool(b'experimental', b'treemanifest'):
3299 if ui.configbool(b'experimental', b'treemanifest'):
3321 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3300 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3322
3301
3323 revlogv2 = ui.config(b'experimental', b'revlogv2')
3302 revlogv2 = ui.config(b'experimental', b'revlogv2')
3324 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3303 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3325 requirements.remove(b'revlogv1')
3304 requirements.remove(b'revlogv1')
3326 # generaldelta is implied by revlogv2.
3305 # generaldelta is implied by revlogv2.
3327 requirements.discard(b'generaldelta')
3306 requirements.discard(b'generaldelta')
3328 requirements.add(REVLOGV2_REQUIREMENT)
3307 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3329 # experimental config: format.internal-phase
3308 # experimental config: format.internal-phase
3330 if ui.configbool(b'format', b'internal-phase'):
3309 if ui.configbool(b'format', b'internal-phase'):
3331 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3310 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3332
3311
3333 if createopts.get(b'narrowfiles'):
3312 if createopts.get(b'narrowfiles'):
3334 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3313 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3335
3314
3336 if createopts.get(b'lfs'):
3315 if createopts.get(b'lfs'):
3337 requirements.add(b'lfs')
3316 requirements.add(b'lfs')
3338
3317
3339 if ui.configbool(b'format', b'bookmarks-in-store'):
3318 if ui.configbool(b'format', b'bookmarks-in-store'):
3340 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3319 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3341
3320
3342 if ui.configbool(b'format', b'use-persistent-nodemap'):
3321 if ui.configbool(b'format', b'use-persistent-nodemap'):
3343 requirements.add(NODEMAP_REQUIREMENT)
3322 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3344
3323
3345 return requirements
3324 return requirements
3346
3325
3347
3326
3348 def checkrequirementscompat(ui, requirements):
3327 def checkrequirementscompat(ui, requirements):
3349 """ Checks compatibility of repository requirements enabled and disabled.
3328 """ Checks compatibility of repository requirements enabled and disabled.
3350
3329
3351 Returns a set of requirements which needs to be dropped because dependend
3330 Returns a set of requirements which needs to be dropped because dependend
3352 requirements are not enabled. Also warns users about it """
3331 requirements are not enabled. Also warns users about it """
3353
3332
3354 dropped = set()
3333 dropped = set()
3355
3334
3356 if b'store' not in requirements:
3335 if b'store' not in requirements:
3357 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3336 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3358 ui.warn(
3337 ui.warn(
3359 _(
3338 _(
3360 b'ignoring enabled \'format.bookmarks-in-store\' config '
3339 b'ignoring enabled \'format.bookmarks-in-store\' config '
3361 b'beacuse it is incompatible with disabled '
3340 b'beacuse it is incompatible with disabled '
3362 b'\'format.usestore\' config\n'
3341 b'\'format.usestore\' config\n'
3363 )
3342 )
3364 )
3343 )
3365 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3344 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3366
3345
3367 if b'shared' in requirements or b'relshared' in requirements:
3346 if b'shared' in requirements or b'relshared' in requirements:
3368 raise error.Abort(
3347 raise error.Abort(
3369 _(
3348 _(
3370 b"cannot create shared repository as source was created"
3349 b"cannot create shared repository as source was created"
3371 b" with 'format.usestore' config disabled"
3350 b" with 'format.usestore' config disabled"
3372 )
3351 )
3373 )
3352 )
3374
3353
3375 return dropped
3354 return dropped
3376
3355
3377
3356
3378 def filterknowncreateopts(ui, createopts):
3357 def filterknowncreateopts(ui, createopts):
3379 """Filters a dict of repo creation options against options that are known.
3358 """Filters a dict of repo creation options against options that are known.
3380
3359
3381 Receives a dict of repo creation options and returns a dict of those
3360 Receives a dict of repo creation options and returns a dict of those
3382 options that we don't know how to handle.
3361 options that we don't know how to handle.
3383
3362
3384 This function is called as part of repository creation. If the
3363 This function is called as part of repository creation. If the
3385 returned dict contains any items, repository creation will not
3364 returned dict contains any items, repository creation will not
3386 be allowed, as it means there was a request to create a repository
3365 be allowed, as it means there was a request to create a repository
3387 with options not recognized by loaded code.
3366 with options not recognized by loaded code.
3388
3367
3389 Extensions can wrap this function to filter out creation options
3368 Extensions can wrap this function to filter out creation options
3390 they know how to handle.
3369 they know how to handle.
3391 """
3370 """
3392 known = {
3371 known = {
3393 b'backend',
3372 b'backend',
3394 b'lfs',
3373 b'lfs',
3395 b'narrowfiles',
3374 b'narrowfiles',
3396 b'sharedrepo',
3375 b'sharedrepo',
3397 b'sharedrelative',
3376 b'sharedrelative',
3398 b'shareditems',
3377 b'shareditems',
3399 b'shallowfilestore',
3378 b'shallowfilestore',
3400 }
3379 }
3401
3380
3402 return {k: v for k, v in createopts.items() if k not in known}
3381 return {k: v for k, v in createopts.items() if k not in known}
3403
3382
3404
3383
3405 def createrepository(ui, path, createopts=None):
3384 def createrepository(ui, path, createopts=None):
3406 """Create a new repository in a vfs.
3385 """Create a new repository in a vfs.
3407
3386
3408 ``path`` path to the new repo's working directory.
3387 ``path`` path to the new repo's working directory.
3409 ``createopts`` options for the new repository.
3388 ``createopts`` options for the new repository.
3410
3389
3411 The following keys for ``createopts`` are recognized:
3390 The following keys for ``createopts`` are recognized:
3412
3391
3413 backend
3392 backend
3414 The storage backend to use.
3393 The storage backend to use.
3415 lfs
3394 lfs
3416 Repository will be created with ``lfs`` requirement. The lfs extension
3395 Repository will be created with ``lfs`` requirement. The lfs extension
3417 will automatically be loaded when the repository is accessed.
3396 will automatically be loaded when the repository is accessed.
3418 narrowfiles
3397 narrowfiles
3419 Set up repository to support narrow file storage.
3398 Set up repository to support narrow file storage.
3420 sharedrepo
3399 sharedrepo
3421 Repository object from which storage should be shared.
3400 Repository object from which storage should be shared.
3422 sharedrelative
3401 sharedrelative
3423 Boolean indicating if the path to the shared repo should be
3402 Boolean indicating if the path to the shared repo should be
3424 stored as relative. By default, the pointer to the "parent" repo
3403 stored as relative. By default, the pointer to the "parent" repo
3425 is stored as an absolute path.
3404 is stored as an absolute path.
3426 shareditems
3405 shareditems
3427 Set of items to share to the new repository (in addition to storage).
3406 Set of items to share to the new repository (in addition to storage).
3428 shallowfilestore
3407 shallowfilestore
3429 Indicates that storage for files should be shallow (not all ancestor
3408 Indicates that storage for files should be shallow (not all ancestor
3430 revisions are known).
3409 revisions are known).
3431 """
3410 """
3432 createopts = defaultcreateopts(ui, createopts=createopts)
3411 createopts = defaultcreateopts(ui, createopts=createopts)
3433
3412
3434 unknownopts = filterknowncreateopts(ui, createopts)
3413 unknownopts = filterknowncreateopts(ui, createopts)
3435
3414
3436 if not isinstance(unknownopts, dict):
3415 if not isinstance(unknownopts, dict):
3437 raise error.ProgrammingError(
3416 raise error.ProgrammingError(
3438 b'filterknowncreateopts() did not return a dict'
3417 b'filterknowncreateopts() did not return a dict'
3439 )
3418 )
3440
3419
3441 if unknownopts:
3420 if unknownopts:
3442 raise error.Abort(
3421 raise error.Abort(
3443 _(
3422 _(
3444 b'unable to create repository because of unknown '
3423 b'unable to create repository because of unknown '
3445 b'creation option: %s'
3424 b'creation option: %s'
3446 )
3425 )
3447 % b', '.join(sorted(unknownopts)),
3426 % b', '.join(sorted(unknownopts)),
3448 hint=_(b'is a required extension not loaded?'),
3427 hint=_(b'is a required extension not loaded?'),
3449 )
3428 )
3450
3429
3451 requirements = newreporequirements(ui, createopts=createopts)
3430 requirements = newreporequirements(ui, createopts=createopts)
3452 requirements -= checkrequirementscompat(ui, requirements)
3431 requirements -= checkrequirementscompat(ui, requirements)
3453
3432
3454 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3433 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3455
3434
3456 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3435 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3457 if hgvfs.exists():
3436 if hgvfs.exists():
3458 raise error.RepoError(_(b'repository %s already exists') % path)
3437 raise error.RepoError(_(b'repository %s already exists') % path)
3459
3438
3460 if b'sharedrepo' in createopts:
3439 if b'sharedrepo' in createopts:
3461 sharedpath = createopts[b'sharedrepo'].sharedpath
3440 sharedpath = createopts[b'sharedrepo'].sharedpath
3462
3441
3463 if createopts.get(b'sharedrelative'):
3442 if createopts.get(b'sharedrelative'):
3464 try:
3443 try:
3465 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3444 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3466 except (IOError, ValueError) as e:
3445 except (IOError, ValueError) as e:
3467 # ValueError is raised on Windows if the drive letters differ
3446 # ValueError is raised on Windows if the drive letters differ
3468 # on each path.
3447 # on each path.
3469 raise error.Abort(
3448 raise error.Abort(
3470 _(b'cannot calculate relative path'),
3449 _(b'cannot calculate relative path'),
3471 hint=stringutil.forcebytestr(e),
3450 hint=stringutil.forcebytestr(e),
3472 )
3451 )
3473
3452
3474 if not wdirvfs.exists():
3453 if not wdirvfs.exists():
3475 wdirvfs.makedirs()
3454 wdirvfs.makedirs()
3476
3455
3477 hgvfs.makedir(notindexed=True)
3456 hgvfs.makedir(notindexed=True)
3478 if b'sharedrepo' not in createopts:
3457 if b'sharedrepo' not in createopts:
3479 hgvfs.mkdir(b'cache')
3458 hgvfs.mkdir(b'cache')
3480 hgvfs.mkdir(b'wcache')
3459 hgvfs.mkdir(b'wcache')
3481
3460
3482 if b'store' in requirements and b'sharedrepo' not in createopts:
3461 if b'store' in requirements and b'sharedrepo' not in createopts:
3483 hgvfs.mkdir(b'store')
3462 hgvfs.mkdir(b'store')
3484
3463
3485 # We create an invalid changelog outside the store so very old
3464 # We create an invalid changelog outside the store so very old
3486 # Mercurial versions (which didn't know about the requirements
3465 # Mercurial versions (which didn't know about the requirements
3487 # file) encounter an error on reading the changelog. This
3466 # file) encounter an error on reading the changelog. This
3488 # effectively locks out old clients and prevents them from
3467 # effectively locks out old clients and prevents them from
3489 # mucking with a repo in an unknown format.
3468 # mucking with a repo in an unknown format.
3490 #
3469 #
3491 # The revlog header has version 2, which won't be recognized by
3470 # The revlog header has version 2, which won't be recognized by
3492 # such old clients.
3471 # such old clients.
3493 hgvfs.append(
3472 hgvfs.append(
3494 b'00changelog.i',
3473 b'00changelog.i',
3495 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3474 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3496 b'layout',
3475 b'layout',
3497 )
3476 )
3498
3477
3499 scmutil.writerequires(hgvfs, requirements)
3478 scmutil.writerequires(hgvfs, requirements)
3500
3479
3501 # Write out file telling readers where to find the shared store.
3480 # Write out file telling readers where to find the shared store.
3502 if b'sharedrepo' in createopts:
3481 if b'sharedrepo' in createopts:
3503 hgvfs.write(b'sharedpath', sharedpath)
3482 hgvfs.write(b'sharedpath', sharedpath)
3504
3483
3505 if createopts.get(b'shareditems'):
3484 if createopts.get(b'shareditems'):
3506 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3485 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3507 hgvfs.write(b'shared', shared)
3486 hgvfs.write(b'shared', shared)
3508
3487
3509
3488
3510 def poisonrepository(repo):
3489 def poisonrepository(repo):
3511 """Poison a repository instance so it can no longer be used."""
3490 """Poison a repository instance so it can no longer be used."""
3512 # Perform any cleanup on the instance.
3491 # Perform any cleanup on the instance.
3513 repo.close()
3492 repo.close()
3514
3493
3515 # Our strategy is to replace the type of the object with one that
3494 # Our strategy is to replace the type of the object with one that
3516 # has all attribute lookups result in error.
3495 # has all attribute lookups result in error.
3517 #
3496 #
3518 # But we have to allow the close() method because some constructors
3497 # But we have to allow the close() method because some constructors
3519 # of repos call close() on repo references.
3498 # of repos call close() on repo references.
3520 class poisonedrepository(object):
3499 class poisonedrepository(object):
3521 def __getattribute__(self, item):
3500 def __getattribute__(self, item):
3522 if item == 'close':
3501 if item == 'close':
3523 return object.__getattribute__(self, item)
3502 return object.__getattribute__(self, item)
3524
3503
3525 raise error.ProgrammingError(
3504 raise error.ProgrammingError(
3526 b'repo instances should not be used after unshare'
3505 b'repo instances should not be used after unshare'
3527 )
3506 )
3528
3507
3529 def close(self):
3508 def close(self):
3530 pass
3509 pass
3531
3510
3532 # We may have a repoview, which intercepts __setattr__. So be sure
3511 # We may have a repoview, which intercepts __setattr__. So be sure
3533 # we operate at the lowest level possible.
3512 # we operate at the lowest level possible.
3534 object.__setattr__(repo, '__class__', poisonedrepository)
3513 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,22 +1,46 b''
1 # requirements.py - objects and functions related to repository requirements
1 # requirements.py - objects and functions related to repository requirements
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 # When narrowing is finalized and no longer subject to format changes,
10 # When narrowing is finalized and no longer subject to format changes,
11 # we should move this to just "narrow" or similar.
11 # we should move this to just "narrow" or similar.
12 NARROW_REQUIREMENT = b'narrowhg-experimental'
12 NARROW_REQUIREMENT = b'narrowhg-experimental'
13
13
14 # Enables sparse working directory usage
14 # Enables sparse working directory usage
15 SPARSE_REQUIREMENT = b'exp-sparse'
15 SPARSE_REQUIREMENT = b'exp-sparse'
16
16
17 # Enables the internal phase which is used to hide changesets instead
17 # Enables the internal phase which is used to hide changesets instead
18 # of stripping them
18 # of stripping them
19 INTERNAL_PHASE_REQUIREMENT = b'internal-phase'
19 INTERNAL_PHASE_REQUIREMENT = b'internal-phase'
20
20
21 # Stores manifest in Tree structure
21 # Stores manifest in Tree structure
22 TREEMANIFEST_REQUIREMENT = b'treemanifest'
22 TREEMANIFEST_REQUIREMENT = b'treemanifest'
23
24 # Increment the sub-version when the revlog v2 format changes to lock out old
25 # clients.
26 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
27
28 # A repository with the sparserevlog feature will have delta chains that
29 # can spread over a larger span. Sparse reading cuts these large spans into
30 # pieces, so that each piece isn't too big.
31 # Without the sparserevlog capability, reading from the repository could use
32 # huge amounts of memory, because the whole span would be read at once,
33 # including all the intermediate revisions that aren't pertinent for the chain.
34 # This is why once a repository has enabled sparse-read, it becomes required.
35 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
36
37 # A repository with the sidedataflag requirement will allow to store extra
38 # information for revision without altering their original hashes.
39 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
40
41 # A repository with the the copies-sidedata-changeset requirement will store
42 # copies related information in changeset's sidedata.
43 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
44
45 # The repository use persistent nodemap for the changelog and the manifest.
46 NODEMAP_REQUIREMENT = b'persistent-nodemap'
@@ -1,1432 +1,1432 b''
1 # upgrade.py - functions for in place upgrade of Mercurial repository
1 # upgrade.py - functions for in place upgrade of Mercurial repository
2 #
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
3 # Copyright (c) 2016-present, Gregory Szorc
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import stat
10 import stat
11
11
12 from .i18n import _
12 from .i18n import _
13 from .pycompat import getattr
13 from .pycompat import getattr
14 from . import (
14 from . import (
15 changelog,
15 changelog,
16 error,
16 error,
17 filelog,
17 filelog,
18 hg,
18 hg,
19 localrepo,
19 localrepo,
20 manifest,
20 manifest,
21 metadata,
21 metadata,
22 pycompat,
22 pycompat,
23 requirements,
23 requirements,
24 revlog,
24 revlog,
25 scmutil,
25 scmutil,
26 util,
26 util,
27 vfs as vfsmod,
27 vfs as vfsmod,
28 )
28 )
29
29
30 from .utils import compression
30 from .utils import compression
31
31
32 # list of requirements that request a clone of all revlog if added/removed
32 # list of requirements that request a clone of all revlog if added/removed
33 RECLONES_REQUIREMENTS = {
33 RECLONES_REQUIREMENTS = {
34 b'generaldelta',
34 b'generaldelta',
35 localrepo.SPARSEREVLOG_REQUIREMENT,
35 requirements.SPARSEREVLOG_REQUIREMENT,
36 }
36 }
37
37
38
38
39 def requiredsourcerequirements(repo):
39 def requiredsourcerequirements(repo):
40 """Obtain requirements required to be present to upgrade a repo.
40 """Obtain requirements required to be present to upgrade a repo.
41
41
42 An upgrade will not be allowed if the repository doesn't have the
42 An upgrade will not be allowed if the repository doesn't have the
43 requirements returned by this function.
43 requirements returned by this function.
44 """
44 """
45 return {
45 return {
46 # Introduced in Mercurial 0.9.2.
46 # Introduced in Mercurial 0.9.2.
47 b'revlogv1',
47 b'revlogv1',
48 # Introduced in Mercurial 0.9.2.
48 # Introduced in Mercurial 0.9.2.
49 b'store',
49 b'store',
50 }
50 }
51
51
52
52
53 def blocksourcerequirements(repo):
53 def blocksourcerequirements(repo):
54 """Obtain requirements that will prevent an upgrade from occurring.
54 """Obtain requirements that will prevent an upgrade from occurring.
55
55
56 An upgrade cannot be performed if the source repository contains a
56 An upgrade cannot be performed if the source repository contains a
57 requirements in the returned set.
57 requirements in the returned set.
58 """
58 """
59 return {
59 return {
60 # The upgrade code does not yet support these experimental features.
60 # The upgrade code does not yet support these experimental features.
61 # This is an artificial limitation.
61 # This is an artificial limitation.
62 requirements.TREEMANIFEST_REQUIREMENT,
62 requirements.TREEMANIFEST_REQUIREMENT,
63 # This was a precursor to generaldelta and was never enabled by default.
63 # This was a precursor to generaldelta and was never enabled by default.
64 # It should (hopefully) not exist in the wild.
64 # It should (hopefully) not exist in the wild.
65 b'parentdelta',
65 b'parentdelta',
66 # Upgrade should operate on the actual store, not the shared link.
66 # Upgrade should operate on the actual store, not the shared link.
67 b'shared',
67 b'shared',
68 }
68 }
69
69
70
70
71 def supportremovedrequirements(repo):
71 def supportremovedrequirements(repo):
72 """Obtain requirements that can be removed during an upgrade.
72 """Obtain requirements that can be removed during an upgrade.
73
73
74 If an upgrade were to create a repository that dropped a requirement,
74 If an upgrade were to create a repository that dropped a requirement,
75 the dropped requirement must appear in the returned set for the upgrade
75 the dropped requirement must appear in the returned set for the upgrade
76 to be allowed.
76 to be allowed.
77 """
77 """
78 supported = {
78 supported = {
79 localrepo.SPARSEREVLOG_REQUIREMENT,
79 requirements.SPARSEREVLOG_REQUIREMENT,
80 localrepo.SIDEDATA_REQUIREMENT,
80 requirements.SIDEDATA_REQUIREMENT,
81 localrepo.COPIESSDC_REQUIREMENT,
81 requirements.COPIESSDC_REQUIREMENT,
82 localrepo.NODEMAP_REQUIREMENT,
82 requirements.NODEMAP_REQUIREMENT,
83 }
83 }
84 for name in compression.compengines:
84 for name in compression.compengines:
85 engine = compression.compengines[name]
85 engine = compression.compengines[name]
86 if engine.available() and engine.revlogheader():
86 if engine.available() and engine.revlogheader():
87 supported.add(b'exp-compression-%s' % name)
87 supported.add(b'exp-compression-%s' % name)
88 if engine.name() == b'zstd':
88 if engine.name() == b'zstd':
89 supported.add(b'revlog-compression-zstd')
89 supported.add(b'revlog-compression-zstd')
90 return supported
90 return supported
91
91
92
92
93 def supporteddestrequirements(repo):
93 def supporteddestrequirements(repo):
94 """Obtain requirements that upgrade supports in the destination.
94 """Obtain requirements that upgrade supports in the destination.
95
95
96 If the result of the upgrade would create requirements not in this set,
96 If the result of the upgrade would create requirements not in this set,
97 the upgrade is disallowed.
97 the upgrade is disallowed.
98
98
99 Extensions should monkeypatch this to add their custom requirements.
99 Extensions should monkeypatch this to add their custom requirements.
100 """
100 """
101 supported = {
101 supported = {
102 b'dotencode',
102 b'dotencode',
103 b'fncache',
103 b'fncache',
104 b'generaldelta',
104 b'generaldelta',
105 b'revlogv1',
105 b'revlogv1',
106 b'store',
106 b'store',
107 localrepo.SPARSEREVLOG_REQUIREMENT,
107 requirements.SPARSEREVLOG_REQUIREMENT,
108 localrepo.SIDEDATA_REQUIREMENT,
108 requirements.SIDEDATA_REQUIREMENT,
109 localrepo.COPIESSDC_REQUIREMENT,
109 requirements.COPIESSDC_REQUIREMENT,
110 localrepo.NODEMAP_REQUIREMENT,
110 requirements.NODEMAP_REQUIREMENT,
111 }
111 }
112 for name in compression.compengines:
112 for name in compression.compengines:
113 engine = compression.compengines[name]
113 engine = compression.compengines[name]
114 if engine.available() and engine.revlogheader():
114 if engine.available() and engine.revlogheader():
115 supported.add(b'exp-compression-%s' % name)
115 supported.add(b'exp-compression-%s' % name)
116 if engine.name() == b'zstd':
116 if engine.name() == b'zstd':
117 supported.add(b'revlog-compression-zstd')
117 supported.add(b'revlog-compression-zstd')
118 return supported
118 return supported
119
119
120
120
121 def allowednewrequirements(repo):
121 def allowednewrequirements(repo):
122 """Obtain requirements that can be added to a repository during upgrade.
122 """Obtain requirements that can be added to a repository during upgrade.
123
123
124 This is used to disallow proposed requirements from being added when
124 This is used to disallow proposed requirements from being added when
125 they weren't present before.
125 they weren't present before.
126
126
127 We use a list of allowed requirement additions instead of a list of known
127 We use a list of allowed requirement additions instead of a list of known
128 bad additions because the whitelist approach is safer and will prevent
128 bad additions because the whitelist approach is safer and will prevent
129 future, unknown requirements from accidentally being added.
129 future, unknown requirements from accidentally being added.
130 """
130 """
131 supported = {
131 supported = {
132 b'dotencode',
132 b'dotencode',
133 b'fncache',
133 b'fncache',
134 b'generaldelta',
134 b'generaldelta',
135 localrepo.SPARSEREVLOG_REQUIREMENT,
135 requirements.SPARSEREVLOG_REQUIREMENT,
136 localrepo.SIDEDATA_REQUIREMENT,
136 requirements.SIDEDATA_REQUIREMENT,
137 localrepo.COPIESSDC_REQUIREMENT,
137 requirements.COPIESSDC_REQUIREMENT,
138 localrepo.NODEMAP_REQUIREMENT,
138 requirements.NODEMAP_REQUIREMENT,
139 }
139 }
140 for name in compression.compengines:
140 for name in compression.compengines:
141 engine = compression.compengines[name]
141 engine = compression.compengines[name]
142 if engine.available() and engine.revlogheader():
142 if engine.available() and engine.revlogheader():
143 supported.add(b'exp-compression-%s' % name)
143 supported.add(b'exp-compression-%s' % name)
144 if engine.name() == b'zstd':
144 if engine.name() == b'zstd':
145 supported.add(b'revlog-compression-zstd')
145 supported.add(b'revlog-compression-zstd')
146 return supported
146 return supported
147
147
148
148
149 def preservedrequirements(repo):
149 def preservedrequirements(repo):
150 return set()
150 return set()
151
151
152
152
153 deficiency = b'deficiency'
153 deficiency = b'deficiency'
154 optimisation = b'optimization'
154 optimisation = b'optimization'
155
155
156
156
157 class improvement(object):
157 class improvement(object):
158 """Represents an improvement that can be made as part of an upgrade.
158 """Represents an improvement that can be made as part of an upgrade.
159
159
160 The following attributes are defined on each instance:
160 The following attributes are defined on each instance:
161
161
162 name
162 name
163 Machine-readable string uniquely identifying this improvement. It
163 Machine-readable string uniquely identifying this improvement. It
164 will be mapped to an action later in the upgrade process.
164 will be mapped to an action later in the upgrade process.
165
165
166 type
166 type
167 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
167 Either ``deficiency`` or ``optimisation``. A deficiency is an obvious
168 problem. An optimization is an action (sometimes optional) that
168 problem. An optimization is an action (sometimes optional) that
169 can be taken to further improve the state of the repository.
169 can be taken to further improve the state of the repository.
170
170
171 description
171 description
172 Message intended for humans explaining the improvement in more detail,
172 Message intended for humans explaining the improvement in more detail,
173 including the implications of it. For ``deficiency`` types, should be
173 including the implications of it. For ``deficiency`` types, should be
174 worded in the present tense. For ``optimisation`` types, should be
174 worded in the present tense. For ``optimisation`` types, should be
175 worded in the future tense.
175 worded in the future tense.
176
176
177 upgrademessage
177 upgrademessage
178 Message intended for humans explaining what an upgrade addressing this
178 Message intended for humans explaining what an upgrade addressing this
179 issue will do. Should be worded in the future tense.
179 issue will do. Should be worded in the future tense.
180 """
180 """
181
181
182 def __init__(self, name, type, description, upgrademessage):
182 def __init__(self, name, type, description, upgrademessage):
183 self.name = name
183 self.name = name
184 self.type = type
184 self.type = type
185 self.description = description
185 self.description = description
186 self.upgrademessage = upgrademessage
186 self.upgrademessage = upgrademessage
187
187
188 def __eq__(self, other):
188 def __eq__(self, other):
189 if not isinstance(other, improvement):
189 if not isinstance(other, improvement):
190 # This is what python tell use to do
190 # This is what python tell use to do
191 return NotImplemented
191 return NotImplemented
192 return self.name == other.name
192 return self.name == other.name
193
193
194 def __ne__(self, other):
194 def __ne__(self, other):
195 return not (self == other)
195 return not (self == other)
196
196
197 def __hash__(self):
197 def __hash__(self):
198 return hash(self.name)
198 return hash(self.name)
199
199
200
200
201 allformatvariant = []
201 allformatvariant = []
202
202
203
203
204 def registerformatvariant(cls):
204 def registerformatvariant(cls):
205 allformatvariant.append(cls)
205 allformatvariant.append(cls)
206 return cls
206 return cls
207
207
208
208
209 class formatvariant(improvement):
209 class formatvariant(improvement):
210 """an improvement subclass dedicated to repository format"""
210 """an improvement subclass dedicated to repository format"""
211
211
212 type = deficiency
212 type = deficiency
213 ### The following attributes should be defined for each class:
213 ### The following attributes should be defined for each class:
214
214
215 # machine-readable string uniquely identifying this improvement. it will be
215 # machine-readable string uniquely identifying this improvement. it will be
216 # mapped to an action later in the upgrade process.
216 # mapped to an action later in the upgrade process.
217 name = None
217 name = None
218
218
219 # message intended for humans explaining the improvement in more detail,
219 # message intended for humans explaining the improvement in more detail,
220 # including the implications of it ``deficiency`` types, should be worded
220 # including the implications of it ``deficiency`` types, should be worded
221 # in the present tense.
221 # in the present tense.
222 description = None
222 description = None
223
223
224 # message intended for humans explaining what an upgrade addressing this
224 # message intended for humans explaining what an upgrade addressing this
225 # issue will do. should be worded in the future tense.
225 # issue will do. should be worded in the future tense.
226 upgrademessage = None
226 upgrademessage = None
227
227
228 # value of current Mercurial default for new repository
228 # value of current Mercurial default for new repository
229 default = None
229 default = None
230
230
231 def __init__(self):
231 def __init__(self):
232 raise NotImplementedError()
232 raise NotImplementedError()
233
233
234 @staticmethod
234 @staticmethod
235 def fromrepo(repo):
235 def fromrepo(repo):
236 """current value of the variant in the repository"""
236 """current value of the variant in the repository"""
237 raise NotImplementedError()
237 raise NotImplementedError()
238
238
239 @staticmethod
239 @staticmethod
240 def fromconfig(repo):
240 def fromconfig(repo):
241 """current value of the variant in the configuration"""
241 """current value of the variant in the configuration"""
242 raise NotImplementedError()
242 raise NotImplementedError()
243
243
244
244
245 class requirementformatvariant(formatvariant):
245 class requirementformatvariant(formatvariant):
246 """formatvariant based on a 'requirement' name.
246 """formatvariant based on a 'requirement' name.
247
247
248 Many format variant are controlled by a 'requirement'. We define a small
248 Many format variant are controlled by a 'requirement'. We define a small
249 subclass to factor the code.
249 subclass to factor the code.
250 """
250 """
251
251
252 # the requirement that control this format variant
252 # the requirement that control this format variant
253 _requirement = None
253 _requirement = None
254
254
255 @staticmethod
255 @staticmethod
256 def _newreporequirements(ui):
256 def _newreporequirements(ui):
257 return localrepo.newreporequirements(
257 return localrepo.newreporequirements(
258 ui, localrepo.defaultcreateopts(ui)
258 ui, localrepo.defaultcreateopts(ui)
259 )
259 )
260
260
261 @classmethod
261 @classmethod
262 def fromrepo(cls, repo):
262 def fromrepo(cls, repo):
263 assert cls._requirement is not None
263 assert cls._requirement is not None
264 return cls._requirement in repo.requirements
264 return cls._requirement in repo.requirements
265
265
266 @classmethod
266 @classmethod
267 def fromconfig(cls, repo):
267 def fromconfig(cls, repo):
268 assert cls._requirement is not None
268 assert cls._requirement is not None
269 return cls._requirement in cls._newreporequirements(repo.ui)
269 return cls._requirement in cls._newreporequirements(repo.ui)
270
270
271
271
272 @registerformatvariant
272 @registerformatvariant
273 class fncache(requirementformatvariant):
273 class fncache(requirementformatvariant):
274 name = b'fncache'
274 name = b'fncache'
275
275
276 _requirement = b'fncache'
276 _requirement = b'fncache'
277
277
278 default = True
278 default = True
279
279
280 description = _(
280 description = _(
281 b'long and reserved filenames may not work correctly; '
281 b'long and reserved filenames may not work correctly; '
282 b'repository performance is sub-optimal'
282 b'repository performance is sub-optimal'
283 )
283 )
284
284
285 upgrademessage = _(
285 upgrademessage = _(
286 b'repository will be more resilient to storing '
286 b'repository will be more resilient to storing '
287 b'certain paths and performance of certain '
287 b'certain paths and performance of certain '
288 b'operations should be improved'
288 b'operations should be improved'
289 )
289 )
290
290
291
291
292 @registerformatvariant
292 @registerformatvariant
293 class dotencode(requirementformatvariant):
293 class dotencode(requirementformatvariant):
294 name = b'dotencode'
294 name = b'dotencode'
295
295
296 _requirement = b'dotencode'
296 _requirement = b'dotencode'
297
297
298 default = True
298 default = True
299
299
300 description = _(
300 description = _(
301 b'storage of filenames beginning with a period or '
301 b'storage of filenames beginning with a period or '
302 b'space may not work correctly'
302 b'space may not work correctly'
303 )
303 )
304
304
305 upgrademessage = _(
305 upgrademessage = _(
306 b'repository will be better able to store files '
306 b'repository will be better able to store files '
307 b'beginning with a space or period'
307 b'beginning with a space or period'
308 )
308 )
309
309
310
310
311 @registerformatvariant
311 @registerformatvariant
312 class generaldelta(requirementformatvariant):
312 class generaldelta(requirementformatvariant):
313 name = b'generaldelta'
313 name = b'generaldelta'
314
314
315 _requirement = b'generaldelta'
315 _requirement = b'generaldelta'
316
316
317 default = True
317 default = True
318
318
319 description = _(
319 description = _(
320 b'deltas within internal storage are unable to '
320 b'deltas within internal storage are unable to '
321 b'choose optimal revisions; repository is larger and '
321 b'choose optimal revisions; repository is larger and '
322 b'slower than it could be; interaction with other '
322 b'slower than it could be; interaction with other '
323 b'repositories may require extra network and CPU '
323 b'repositories may require extra network and CPU '
324 b'resources, making "hg push" and "hg pull" slower'
324 b'resources, making "hg push" and "hg pull" slower'
325 )
325 )
326
326
327 upgrademessage = _(
327 upgrademessage = _(
328 b'repository storage will be able to create '
328 b'repository storage will be able to create '
329 b'optimal deltas; new repository data will be '
329 b'optimal deltas; new repository data will be '
330 b'smaller and read times should decrease; '
330 b'smaller and read times should decrease; '
331 b'interacting with other repositories using this '
331 b'interacting with other repositories using this '
332 b'storage model should require less network and '
332 b'storage model should require less network and '
333 b'CPU resources, making "hg push" and "hg pull" '
333 b'CPU resources, making "hg push" and "hg pull" '
334 b'faster'
334 b'faster'
335 )
335 )
336
336
337
337
338 @registerformatvariant
338 @registerformatvariant
339 class sparserevlog(requirementformatvariant):
339 class sparserevlog(requirementformatvariant):
340 name = b'sparserevlog'
340 name = b'sparserevlog'
341
341
342 _requirement = localrepo.SPARSEREVLOG_REQUIREMENT
342 _requirement = requirements.SPARSEREVLOG_REQUIREMENT
343
343
344 default = True
344 default = True
345
345
346 description = _(
346 description = _(
347 b'in order to limit disk reading and memory usage on older '
347 b'in order to limit disk reading and memory usage on older '
348 b'version, the span of a delta chain from its root to its '
348 b'version, the span of a delta chain from its root to its '
349 b'end is limited, whatever the relevant data in this span. '
349 b'end is limited, whatever the relevant data in this span. '
350 b'This can severly limit Mercurial ability to build good '
350 b'This can severly limit Mercurial ability to build good '
351 b'chain of delta resulting is much more storage space being '
351 b'chain of delta resulting is much more storage space being '
352 b'taken and limit reusability of on disk delta during '
352 b'taken and limit reusability of on disk delta during '
353 b'exchange.'
353 b'exchange.'
354 )
354 )
355
355
356 upgrademessage = _(
356 upgrademessage = _(
357 b'Revlog supports delta chain with more unused data '
357 b'Revlog supports delta chain with more unused data '
358 b'between payload. These gaps will be skipped at read '
358 b'between payload. These gaps will be skipped at read '
359 b'time. This allows for better delta chains, making a '
359 b'time. This allows for better delta chains, making a '
360 b'better compression and faster exchange with server.'
360 b'better compression and faster exchange with server.'
361 )
361 )
362
362
363
363
364 @registerformatvariant
364 @registerformatvariant
365 class sidedata(requirementformatvariant):
365 class sidedata(requirementformatvariant):
366 name = b'sidedata'
366 name = b'sidedata'
367
367
368 _requirement = localrepo.SIDEDATA_REQUIREMENT
368 _requirement = requirements.SIDEDATA_REQUIREMENT
369
369
370 default = False
370 default = False
371
371
372 description = _(
372 description = _(
373 b'Allows storage of extra data alongside a revision, '
373 b'Allows storage of extra data alongside a revision, '
374 b'unlocking various caching options.'
374 b'unlocking various caching options.'
375 )
375 )
376
376
377 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
377 upgrademessage = _(b'Allows storage of extra data alongside a revision.')
378
378
379
379
380 @registerformatvariant
380 @registerformatvariant
381 class persistentnodemap(requirementformatvariant):
381 class persistentnodemap(requirementformatvariant):
382 name = b'persistent-nodemap'
382 name = b'persistent-nodemap'
383
383
384 _requirement = localrepo.NODEMAP_REQUIREMENT
384 _requirement = requirements.NODEMAP_REQUIREMENT
385
385
386 default = False
386 default = False
387
387
388 description = _(
388 description = _(
389 b'persist the node -> rev mapping on disk to speedup lookup'
389 b'persist the node -> rev mapping on disk to speedup lookup'
390 )
390 )
391
391
392 upgrademessage = _(b'Speedup revision lookup by node id.')
392 upgrademessage = _(b'Speedup revision lookup by node id.')
393
393
394
394
395 @registerformatvariant
395 @registerformatvariant
396 class copiessdc(requirementformatvariant):
396 class copiessdc(requirementformatvariant):
397 name = b'copies-sdc'
397 name = b'copies-sdc'
398
398
399 _requirement = localrepo.COPIESSDC_REQUIREMENT
399 _requirement = requirements.COPIESSDC_REQUIREMENT
400
400
401 default = False
401 default = False
402
402
403 description = _(b'Stores copies information alongside changesets.')
403 description = _(b'Stores copies information alongside changesets.')
404
404
405 upgrademessage = _(
405 upgrademessage = _(
406 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
406 b'Allows to use more efficient algorithm to deal with ' b'copy tracing.'
407 )
407 )
408
408
409
409
410 @registerformatvariant
410 @registerformatvariant
411 class removecldeltachain(formatvariant):
411 class removecldeltachain(formatvariant):
412 name = b'plain-cl-delta'
412 name = b'plain-cl-delta'
413
413
414 default = True
414 default = True
415
415
416 description = _(
416 description = _(
417 b'changelog storage is using deltas instead of '
417 b'changelog storage is using deltas instead of '
418 b'raw entries; changelog reading and any '
418 b'raw entries; changelog reading and any '
419 b'operation relying on changelog data are slower '
419 b'operation relying on changelog data are slower '
420 b'than they could be'
420 b'than they could be'
421 )
421 )
422
422
423 upgrademessage = _(
423 upgrademessage = _(
424 b'changelog storage will be reformated to '
424 b'changelog storage will be reformated to '
425 b'store raw entries; changelog reading will be '
425 b'store raw entries; changelog reading will be '
426 b'faster; changelog size may be reduced'
426 b'faster; changelog size may be reduced'
427 )
427 )
428
428
429 @staticmethod
429 @staticmethod
430 def fromrepo(repo):
430 def fromrepo(repo):
431 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
431 # Mercurial 4.0 changed changelogs to not use delta chains. Search for
432 # changelogs with deltas.
432 # changelogs with deltas.
433 cl = repo.changelog
433 cl = repo.changelog
434 chainbase = cl.chainbase
434 chainbase = cl.chainbase
435 return all(rev == chainbase(rev) for rev in cl)
435 return all(rev == chainbase(rev) for rev in cl)
436
436
437 @staticmethod
437 @staticmethod
438 def fromconfig(repo):
438 def fromconfig(repo):
439 return True
439 return True
440
440
441
441
442 @registerformatvariant
442 @registerformatvariant
443 class compressionengine(formatvariant):
443 class compressionengine(formatvariant):
444 name = b'compression'
444 name = b'compression'
445 default = b'zlib'
445 default = b'zlib'
446
446
447 description = _(
447 description = _(
448 b'Compresion algorithm used to compress data. '
448 b'Compresion algorithm used to compress data. '
449 b'Some engine are faster than other'
449 b'Some engine are faster than other'
450 )
450 )
451
451
452 upgrademessage = _(
452 upgrademessage = _(
453 b'revlog content will be recompressed with the new algorithm.'
453 b'revlog content will be recompressed with the new algorithm.'
454 )
454 )
455
455
456 @classmethod
456 @classmethod
457 def fromrepo(cls, repo):
457 def fromrepo(cls, repo):
458 # we allow multiple compression engine requirement to co-exist because
458 # we allow multiple compression engine requirement to co-exist because
459 # strickly speaking, revlog seems to support mixed compression style.
459 # strickly speaking, revlog seems to support mixed compression style.
460 #
460 #
461 # The compression used for new entries will be "the last one"
461 # The compression used for new entries will be "the last one"
462 compression = b'zlib'
462 compression = b'zlib'
463 for req in repo.requirements:
463 for req in repo.requirements:
464 prefix = req.startswith
464 prefix = req.startswith
465 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
465 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
466 compression = req.split(b'-', 2)[2]
466 compression = req.split(b'-', 2)[2]
467 return compression
467 return compression
468
468
469 @classmethod
469 @classmethod
470 def fromconfig(cls, repo):
470 def fromconfig(cls, repo):
471 compengines = repo.ui.configlist(b'format', b'revlog-compression')
471 compengines = repo.ui.configlist(b'format', b'revlog-compression')
472 # return the first valid value as the selection code would do
472 # return the first valid value as the selection code would do
473 for comp in compengines:
473 for comp in compengines:
474 if comp in util.compengines:
474 if comp in util.compengines:
475 return comp
475 return comp
476
476
477 # no valide compression found lets display it all for clarity
477 # no valide compression found lets display it all for clarity
478 return b','.join(compengines)
478 return b','.join(compengines)
479
479
480
480
481 @registerformatvariant
481 @registerformatvariant
482 class compressionlevel(formatvariant):
482 class compressionlevel(formatvariant):
483 name = b'compression-level'
483 name = b'compression-level'
484 default = b'default'
484 default = b'default'
485
485
486 description = _(b'compression level')
486 description = _(b'compression level')
487
487
488 upgrademessage = _(b'revlog content will be recompressed')
488 upgrademessage = _(b'revlog content will be recompressed')
489
489
490 @classmethod
490 @classmethod
491 def fromrepo(cls, repo):
491 def fromrepo(cls, repo):
492 comp = compressionengine.fromrepo(repo)
492 comp = compressionengine.fromrepo(repo)
493 level = None
493 level = None
494 if comp == b'zlib':
494 if comp == b'zlib':
495 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
495 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
496 elif comp == b'zstd':
496 elif comp == b'zstd':
497 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
497 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
498 if level is None:
498 if level is None:
499 return b'default'
499 return b'default'
500 return bytes(level)
500 return bytes(level)
501
501
502 @classmethod
502 @classmethod
503 def fromconfig(cls, repo):
503 def fromconfig(cls, repo):
504 comp = compressionengine.fromconfig(repo)
504 comp = compressionengine.fromconfig(repo)
505 level = None
505 level = None
506 if comp == b'zlib':
506 if comp == b'zlib':
507 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
507 level = repo.ui.configint(b'storage', b'revlog.zlib.level')
508 elif comp == b'zstd':
508 elif comp == b'zstd':
509 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
509 level = repo.ui.configint(b'storage', b'revlog.zstd.level')
510 if level is None:
510 if level is None:
511 return b'default'
511 return b'default'
512 return bytes(level)
512 return bytes(level)
513
513
514
514
515 def finddeficiencies(repo):
515 def finddeficiencies(repo):
516 """returns a list of deficiencies that the repo suffer from"""
516 """returns a list of deficiencies that the repo suffer from"""
517 deficiencies = []
517 deficiencies = []
518
518
519 # We could detect lack of revlogv1 and store here, but they were added
519 # We could detect lack of revlogv1 and store here, but they were added
520 # in 0.9.2 and we don't support upgrading repos without these
520 # in 0.9.2 and we don't support upgrading repos without these
521 # requirements, so let's not bother.
521 # requirements, so let's not bother.
522
522
523 for fv in allformatvariant:
523 for fv in allformatvariant:
524 if not fv.fromrepo(repo):
524 if not fv.fromrepo(repo):
525 deficiencies.append(fv)
525 deficiencies.append(fv)
526
526
527 return deficiencies
527 return deficiencies
528
528
529
529
530 # search without '-' to support older form on newer client.
530 # search without '-' to support older form on newer client.
531 #
531 #
532 # We don't enforce backward compatibility for debug command so this
532 # We don't enforce backward compatibility for debug command so this
533 # might eventually be dropped. However, having to use two different
533 # might eventually be dropped. However, having to use two different
534 # forms in script when comparing result is anoying enough to add
534 # forms in script when comparing result is anoying enough to add
535 # backward compatibility for a while.
535 # backward compatibility for a while.
536 legacy_opts_map = {
536 legacy_opts_map = {
537 b'redeltaparent': b're-delta-parent',
537 b'redeltaparent': b're-delta-parent',
538 b'redeltamultibase': b're-delta-multibase',
538 b'redeltamultibase': b're-delta-multibase',
539 b'redeltaall': b're-delta-all',
539 b'redeltaall': b're-delta-all',
540 b'redeltafulladd': b're-delta-fulladd',
540 b'redeltafulladd': b're-delta-fulladd',
541 }
541 }
542
542
543
543
544 def findoptimizations(repo):
544 def findoptimizations(repo):
545 """Determine optimisation that could be used during upgrade"""
545 """Determine optimisation that could be used during upgrade"""
546 # These are unconditionally added. There is logic later that figures out
546 # These are unconditionally added. There is logic later that figures out
547 # which ones to apply.
547 # which ones to apply.
548 optimizations = []
548 optimizations = []
549
549
550 optimizations.append(
550 optimizations.append(
551 improvement(
551 improvement(
552 name=b're-delta-parent',
552 name=b're-delta-parent',
553 type=optimisation,
553 type=optimisation,
554 description=_(
554 description=_(
555 b'deltas within internal storage will be recalculated to '
555 b'deltas within internal storage will be recalculated to '
556 b'choose an optimal base revision where this was not '
556 b'choose an optimal base revision where this was not '
557 b'already done; the size of the repository may shrink and '
557 b'already done; the size of the repository may shrink and '
558 b'various operations may become faster; the first time '
558 b'various operations may become faster; the first time '
559 b'this optimization is performed could slow down upgrade '
559 b'this optimization is performed could slow down upgrade '
560 b'execution considerably; subsequent invocations should '
560 b'execution considerably; subsequent invocations should '
561 b'not run noticeably slower'
561 b'not run noticeably slower'
562 ),
562 ),
563 upgrademessage=_(
563 upgrademessage=_(
564 b'deltas within internal storage will choose a new '
564 b'deltas within internal storage will choose a new '
565 b'base revision if needed'
565 b'base revision if needed'
566 ),
566 ),
567 )
567 )
568 )
568 )
569
569
570 optimizations.append(
570 optimizations.append(
571 improvement(
571 improvement(
572 name=b're-delta-multibase',
572 name=b're-delta-multibase',
573 type=optimisation,
573 type=optimisation,
574 description=_(
574 description=_(
575 b'deltas within internal storage will be recalculated '
575 b'deltas within internal storage will be recalculated '
576 b'against multiple base revision and the smallest '
576 b'against multiple base revision and the smallest '
577 b'difference will be used; the size of the repository may '
577 b'difference will be used; the size of the repository may '
578 b'shrink significantly when there are many merges; this '
578 b'shrink significantly when there are many merges; this '
579 b'optimization will slow down execution in proportion to '
579 b'optimization will slow down execution in proportion to '
580 b'the number of merges in the repository and the amount '
580 b'the number of merges in the repository and the amount '
581 b'of files in the repository; this slow down should not '
581 b'of files in the repository; this slow down should not '
582 b'be significant unless there are tens of thousands of '
582 b'be significant unless there are tens of thousands of '
583 b'files and thousands of merges'
583 b'files and thousands of merges'
584 ),
584 ),
585 upgrademessage=_(
585 upgrademessage=_(
586 b'deltas within internal storage will choose an '
586 b'deltas within internal storage will choose an '
587 b'optimal delta by computing deltas against multiple '
587 b'optimal delta by computing deltas against multiple '
588 b'parents; may slow down execution time '
588 b'parents; may slow down execution time '
589 b'significantly'
589 b'significantly'
590 ),
590 ),
591 )
591 )
592 )
592 )
593
593
594 optimizations.append(
594 optimizations.append(
595 improvement(
595 improvement(
596 name=b're-delta-all',
596 name=b're-delta-all',
597 type=optimisation,
597 type=optimisation,
598 description=_(
598 description=_(
599 b'deltas within internal storage will always be '
599 b'deltas within internal storage will always be '
600 b'recalculated without reusing prior deltas; this will '
600 b'recalculated without reusing prior deltas; this will '
601 b'likely make execution run several times slower; this '
601 b'likely make execution run several times slower; this '
602 b'optimization is typically not needed'
602 b'optimization is typically not needed'
603 ),
603 ),
604 upgrademessage=_(
604 upgrademessage=_(
605 b'deltas within internal storage will be fully '
605 b'deltas within internal storage will be fully '
606 b'recomputed; this will likely drastically slow down '
606 b'recomputed; this will likely drastically slow down '
607 b'execution time'
607 b'execution time'
608 ),
608 ),
609 )
609 )
610 )
610 )
611
611
612 optimizations.append(
612 optimizations.append(
613 improvement(
613 improvement(
614 name=b're-delta-fulladd',
614 name=b're-delta-fulladd',
615 type=optimisation,
615 type=optimisation,
616 description=_(
616 description=_(
617 b'every revision will be re-added as if it was new '
617 b'every revision will be re-added as if it was new '
618 b'content. It will go through the full storage '
618 b'content. It will go through the full storage '
619 b'mechanism giving extensions a chance to process it '
619 b'mechanism giving extensions a chance to process it '
620 b'(eg. lfs). This is similar to "re-delta-all" but even '
620 b'(eg. lfs). This is similar to "re-delta-all" but even '
621 b'slower since more logic is involved.'
621 b'slower since more logic is involved.'
622 ),
622 ),
623 upgrademessage=_(
623 upgrademessage=_(
624 b'each revision will be added as new content to the '
624 b'each revision will be added as new content to the '
625 b'internal storage; this will likely drastically slow '
625 b'internal storage; this will likely drastically slow '
626 b'down execution time, but some extensions might need '
626 b'down execution time, but some extensions might need '
627 b'it'
627 b'it'
628 ),
628 ),
629 )
629 )
630 )
630 )
631
631
632 return optimizations
632 return optimizations
633
633
634
634
635 def determineactions(repo, deficiencies, sourcereqs, destreqs):
635 def determineactions(repo, deficiencies, sourcereqs, destreqs):
636 """Determine upgrade actions that will be performed.
636 """Determine upgrade actions that will be performed.
637
637
638 Given a list of improvements as returned by ``finddeficiencies`` and
638 Given a list of improvements as returned by ``finddeficiencies`` and
639 ``findoptimizations``, determine the list of upgrade actions that
639 ``findoptimizations``, determine the list of upgrade actions that
640 will be performed.
640 will be performed.
641
641
642 The role of this function is to filter improvements if needed, apply
642 The role of this function is to filter improvements if needed, apply
643 recommended optimizations from the improvements list that make sense,
643 recommended optimizations from the improvements list that make sense,
644 etc.
644 etc.
645
645
646 Returns a list of action names.
646 Returns a list of action names.
647 """
647 """
648 newactions = []
648 newactions = []
649
649
650 for d in deficiencies:
650 for d in deficiencies:
651 name = d._requirement
651 name = d._requirement
652
652
653 # If the action is a requirement that doesn't show up in the
653 # If the action is a requirement that doesn't show up in the
654 # destination requirements, prune the action.
654 # destination requirements, prune the action.
655 if name is not None and name not in destreqs:
655 if name is not None and name not in destreqs:
656 continue
656 continue
657
657
658 newactions.append(d)
658 newactions.append(d)
659
659
660 # FUTURE consider adding some optimizations here for certain transitions.
660 # FUTURE consider adding some optimizations here for certain transitions.
661 # e.g. adding generaldelta could schedule parent redeltas.
661 # e.g. adding generaldelta could schedule parent redeltas.
662
662
663 return newactions
663 return newactions
664
664
665
665
666 def _revlogfrompath(repo, path):
666 def _revlogfrompath(repo, path):
667 """Obtain a revlog from a repo path.
667 """Obtain a revlog from a repo path.
668
668
669 An instance of the appropriate class is returned.
669 An instance of the appropriate class is returned.
670 """
670 """
671 if path == b'00changelog.i':
671 if path == b'00changelog.i':
672 return changelog.changelog(repo.svfs)
672 return changelog.changelog(repo.svfs)
673 elif path.endswith(b'00manifest.i'):
673 elif path.endswith(b'00manifest.i'):
674 mandir = path[: -len(b'00manifest.i')]
674 mandir = path[: -len(b'00manifest.i')]
675 return manifest.manifestrevlog(repo.svfs, tree=mandir)
675 return manifest.manifestrevlog(repo.svfs, tree=mandir)
676 else:
676 else:
677 # reverse of "/".join(("data", path + ".i"))
677 # reverse of "/".join(("data", path + ".i"))
678 return filelog.filelog(repo.svfs, path[5:-2])
678 return filelog.filelog(repo.svfs, path[5:-2])
679
679
680
680
681 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
681 def _copyrevlog(tr, destrepo, oldrl, unencodedname):
682 """copy all relevant files for `oldrl` into `destrepo` store
682 """copy all relevant files for `oldrl` into `destrepo` store
683
683
684 Files are copied "as is" without any transformation. The copy is performed
684 Files are copied "as is" without any transformation. The copy is performed
685 without extra checks. Callers are responsible for making sure the copied
685 without extra checks. Callers are responsible for making sure the copied
686 content is compatible with format of the destination repository.
686 content is compatible with format of the destination repository.
687 """
687 """
688 oldrl = getattr(oldrl, '_revlog', oldrl)
688 oldrl = getattr(oldrl, '_revlog', oldrl)
689 newrl = _revlogfrompath(destrepo, unencodedname)
689 newrl = _revlogfrompath(destrepo, unencodedname)
690 newrl = getattr(newrl, '_revlog', newrl)
690 newrl = getattr(newrl, '_revlog', newrl)
691
691
692 oldvfs = oldrl.opener
692 oldvfs = oldrl.opener
693 newvfs = newrl.opener
693 newvfs = newrl.opener
694 oldindex = oldvfs.join(oldrl.indexfile)
694 oldindex = oldvfs.join(oldrl.indexfile)
695 newindex = newvfs.join(newrl.indexfile)
695 newindex = newvfs.join(newrl.indexfile)
696 olddata = oldvfs.join(oldrl.datafile)
696 olddata = oldvfs.join(oldrl.datafile)
697 newdata = newvfs.join(newrl.datafile)
697 newdata = newvfs.join(newrl.datafile)
698
698
699 with newvfs(newrl.indexfile, b'w'):
699 with newvfs(newrl.indexfile, b'w'):
700 pass # create all the directories
700 pass # create all the directories
701
701
702 util.copyfile(oldindex, newindex)
702 util.copyfile(oldindex, newindex)
703 copydata = oldrl.opener.exists(oldrl.datafile)
703 copydata = oldrl.opener.exists(oldrl.datafile)
704 if copydata:
704 if copydata:
705 util.copyfile(olddata, newdata)
705 util.copyfile(olddata, newdata)
706
706
707 if not (
707 if not (
708 unencodedname.endswith(b'00changelog.i')
708 unencodedname.endswith(b'00changelog.i')
709 or unencodedname.endswith(b'00manifest.i')
709 or unencodedname.endswith(b'00manifest.i')
710 ):
710 ):
711 destrepo.svfs.fncache.add(unencodedname)
711 destrepo.svfs.fncache.add(unencodedname)
712 if copydata:
712 if copydata:
713 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
713 destrepo.svfs.fncache.add(unencodedname[:-2] + b'.d')
714
714
715
715
716 UPGRADE_CHANGELOG = object()
716 UPGRADE_CHANGELOG = object()
717 UPGRADE_MANIFEST = object()
717 UPGRADE_MANIFEST = object()
718 UPGRADE_FILELOG = object()
718 UPGRADE_FILELOG = object()
719
719
720 UPGRADE_ALL_REVLOGS = frozenset(
720 UPGRADE_ALL_REVLOGS = frozenset(
721 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOG]
721 [UPGRADE_CHANGELOG, UPGRADE_MANIFEST, UPGRADE_FILELOG]
722 )
722 )
723
723
724
724
725 def getsidedatacompanion(srcrepo, dstrepo):
725 def getsidedatacompanion(srcrepo, dstrepo):
726 sidedatacompanion = None
726 sidedatacompanion = None
727 removedreqs = srcrepo.requirements - dstrepo.requirements
727 removedreqs = srcrepo.requirements - dstrepo.requirements
728 addedreqs = dstrepo.requirements - srcrepo.requirements
728 addedreqs = dstrepo.requirements - srcrepo.requirements
729 if localrepo.SIDEDATA_REQUIREMENT in removedreqs:
729 if requirements.SIDEDATA_REQUIREMENT in removedreqs:
730
730
731 def sidedatacompanion(rl, rev):
731 def sidedatacompanion(rl, rev):
732 rl = getattr(rl, '_revlog', rl)
732 rl = getattr(rl, '_revlog', rl)
733 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
733 if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
734 return True, (), {}
734 return True, (), {}
735 return False, (), {}
735 return False, (), {}
736
736
737 elif localrepo.COPIESSDC_REQUIREMENT in addedreqs:
737 elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
738 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
738 sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
739 elif localrepo.COPIESSDC_REQUIREMENT in removedreqs:
739 elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
740 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
740 sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
741 return sidedatacompanion
741 return sidedatacompanion
742
742
743
743
744 def matchrevlog(revlogfilter, entry):
744 def matchrevlog(revlogfilter, entry):
745 """check is a revlog is selected for cloning
745 """check is a revlog is selected for cloning
746
746
747 The store entry is checked against the passed filter"""
747 The store entry is checked against the passed filter"""
748 if entry.endswith(b'00changelog.i'):
748 if entry.endswith(b'00changelog.i'):
749 return UPGRADE_CHANGELOG in revlogfilter
749 return UPGRADE_CHANGELOG in revlogfilter
750 elif entry.endswith(b'00manifest.i'):
750 elif entry.endswith(b'00manifest.i'):
751 return UPGRADE_MANIFEST in revlogfilter
751 return UPGRADE_MANIFEST in revlogfilter
752 return UPGRADE_FILELOG in revlogfilter
752 return UPGRADE_FILELOG in revlogfilter
753
753
754
754
755 def _clonerevlogs(
755 def _clonerevlogs(
756 ui,
756 ui,
757 srcrepo,
757 srcrepo,
758 dstrepo,
758 dstrepo,
759 tr,
759 tr,
760 deltareuse,
760 deltareuse,
761 forcedeltabothparents,
761 forcedeltabothparents,
762 revlogs=UPGRADE_ALL_REVLOGS,
762 revlogs=UPGRADE_ALL_REVLOGS,
763 ):
763 ):
764 """Copy revlogs between 2 repos."""
764 """Copy revlogs between 2 repos."""
765 revcount = 0
765 revcount = 0
766 srcsize = 0
766 srcsize = 0
767 srcrawsize = 0
767 srcrawsize = 0
768 dstsize = 0
768 dstsize = 0
769 fcount = 0
769 fcount = 0
770 frevcount = 0
770 frevcount = 0
771 fsrcsize = 0
771 fsrcsize = 0
772 frawsize = 0
772 frawsize = 0
773 fdstsize = 0
773 fdstsize = 0
774 mcount = 0
774 mcount = 0
775 mrevcount = 0
775 mrevcount = 0
776 msrcsize = 0
776 msrcsize = 0
777 mrawsize = 0
777 mrawsize = 0
778 mdstsize = 0
778 mdstsize = 0
779 crevcount = 0
779 crevcount = 0
780 csrcsize = 0
780 csrcsize = 0
781 crawsize = 0
781 crawsize = 0
782 cdstsize = 0
782 cdstsize = 0
783
783
784 alldatafiles = list(srcrepo.store.walk())
784 alldatafiles = list(srcrepo.store.walk())
785
785
786 # Perform a pass to collect metadata. This validates we can open all
786 # Perform a pass to collect metadata. This validates we can open all
787 # source files and allows a unified progress bar to be displayed.
787 # source files and allows a unified progress bar to be displayed.
788 for unencoded, encoded, size in alldatafiles:
788 for unencoded, encoded, size in alldatafiles:
789 if unencoded.endswith(b'.d'):
789 if unencoded.endswith(b'.d'):
790 continue
790 continue
791
791
792 rl = _revlogfrompath(srcrepo, unencoded)
792 rl = _revlogfrompath(srcrepo, unencoded)
793
793
794 info = rl.storageinfo(
794 info = rl.storageinfo(
795 exclusivefiles=True,
795 exclusivefiles=True,
796 revisionscount=True,
796 revisionscount=True,
797 trackedsize=True,
797 trackedsize=True,
798 storedsize=True,
798 storedsize=True,
799 )
799 )
800
800
801 revcount += info[b'revisionscount'] or 0
801 revcount += info[b'revisionscount'] or 0
802 datasize = info[b'storedsize'] or 0
802 datasize = info[b'storedsize'] or 0
803 rawsize = info[b'trackedsize'] or 0
803 rawsize = info[b'trackedsize'] or 0
804
804
805 srcsize += datasize
805 srcsize += datasize
806 srcrawsize += rawsize
806 srcrawsize += rawsize
807
807
808 # This is for the separate progress bars.
808 # This is for the separate progress bars.
809 if isinstance(rl, changelog.changelog):
809 if isinstance(rl, changelog.changelog):
810 crevcount += len(rl)
810 crevcount += len(rl)
811 csrcsize += datasize
811 csrcsize += datasize
812 crawsize += rawsize
812 crawsize += rawsize
813 elif isinstance(rl, manifest.manifestrevlog):
813 elif isinstance(rl, manifest.manifestrevlog):
814 mcount += 1
814 mcount += 1
815 mrevcount += len(rl)
815 mrevcount += len(rl)
816 msrcsize += datasize
816 msrcsize += datasize
817 mrawsize += rawsize
817 mrawsize += rawsize
818 elif isinstance(rl, filelog.filelog):
818 elif isinstance(rl, filelog.filelog):
819 fcount += 1
819 fcount += 1
820 frevcount += len(rl)
820 frevcount += len(rl)
821 fsrcsize += datasize
821 fsrcsize += datasize
822 frawsize += rawsize
822 frawsize += rawsize
823 else:
823 else:
824 error.ProgrammingError(b'unknown revlog type')
824 error.ProgrammingError(b'unknown revlog type')
825
825
826 if not revcount:
826 if not revcount:
827 return
827 return
828
828
829 ui.status(
829 ui.status(
830 _(
830 _(
831 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
831 b'migrating %d total revisions (%d in filelogs, %d in manifests, '
832 b'%d in changelog)\n'
832 b'%d in changelog)\n'
833 )
833 )
834 % (revcount, frevcount, mrevcount, crevcount)
834 % (revcount, frevcount, mrevcount, crevcount)
835 )
835 )
836 ui.status(
836 ui.status(
837 _(b'migrating %s in store; %s tracked data\n')
837 _(b'migrating %s in store; %s tracked data\n')
838 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
838 % ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
839 )
839 )
840
840
841 # Used to keep track of progress.
841 # Used to keep track of progress.
842 progress = None
842 progress = None
843
843
844 def oncopiedrevision(rl, rev, node):
844 def oncopiedrevision(rl, rev, node):
845 progress.increment()
845 progress.increment()
846
846
847 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
847 sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
848
848
849 # Do the actual copying.
849 # Do the actual copying.
850 # FUTURE this operation can be farmed off to worker processes.
850 # FUTURE this operation can be farmed off to worker processes.
851 seen = set()
851 seen = set()
852 for unencoded, encoded, size in alldatafiles:
852 for unencoded, encoded, size in alldatafiles:
853 if unencoded.endswith(b'.d'):
853 if unencoded.endswith(b'.d'):
854 continue
854 continue
855
855
856 oldrl = _revlogfrompath(srcrepo, unencoded)
856 oldrl = _revlogfrompath(srcrepo, unencoded)
857
857
858 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
858 if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
859 ui.status(
859 ui.status(
860 _(
860 _(
861 b'finished migrating %d manifest revisions across %d '
861 b'finished migrating %d manifest revisions across %d '
862 b'manifests; change in size: %s\n'
862 b'manifests; change in size: %s\n'
863 )
863 )
864 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
864 % (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
865 )
865 )
866
866
867 ui.status(
867 ui.status(
868 _(
868 _(
869 b'migrating changelog containing %d revisions '
869 b'migrating changelog containing %d revisions '
870 b'(%s in store; %s tracked data)\n'
870 b'(%s in store; %s tracked data)\n'
871 )
871 )
872 % (
872 % (
873 crevcount,
873 crevcount,
874 util.bytecount(csrcsize),
874 util.bytecount(csrcsize),
875 util.bytecount(crawsize),
875 util.bytecount(crawsize),
876 )
876 )
877 )
877 )
878 seen.add(b'c')
878 seen.add(b'c')
879 progress = srcrepo.ui.makeprogress(
879 progress = srcrepo.ui.makeprogress(
880 _(b'changelog revisions'), total=crevcount
880 _(b'changelog revisions'), total=crevcount
881 )
881 )
882 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
882 elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
883 ui.status(
883 ui.status(
884 _(
884 _(
885 b'finished migrating %d filelog revisions across %d '
885 b'finished migrating %d filelog revisions across %d '
886 b'filelogs; change in size: %s\n'
886 b'filelogs; change in size: %s\n'
887 )
887 )
888 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
888 % (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
889 )
889 )
890
890
891 ui.status(
891 ui.status(
892 _(
892 _(
893 b'migrating %d manifests containing %d revisions '
893 b'migrating %d manifests containing %d revisions '
894 b'(%s in store; %s tracked data)\n'
894 b'(%s in store; %s tracked data)\n'
895 )
895 )
896 % (
896 % (
897 mcount,
897 mcount,
898 mrevcount,
898 mrevcount,
899 util.bytecount(msrcsize),
899 util.bytecount(msrcsize),
900 util.bytecount(mrawsize),
900 util.bytecount(mrawsize),
901 )
901 )
902 )
902 )
903 seen.add(b'm')
903 seen.add(b'm')
904 if progress:
904 if progress:
905 progress.complete()
905 progress.complete()
906 progress = srcrepo.ui.makeprogress(
906 progress = srcrepo.ui.makeprogress(
907 _(b'manifest revisions'), total=mrevcount
907 _(b'manifest revisions'), total=mrevcount
908 )
908 )
909 elif b'f' not in seen:
909 elif b'f' not in seen:
910 ui.status(
910 ui.status(
911 _(
911 _(
912 b'migrating %d filelogs containing %d revisions '
912 b'migrating %d filelogs containing %d revisions '
913 b'(%s in store; %s tracked data)\n'
913 b'(%s in store; %s tracked data)\n'
914 )
914 )
915 % (
915 % (
916 fcount,
916 fcount,
917 frevcount,
917 frevcount,
918 util.bytecount(fsrcsize),
918 util.bytecount(fsrcsize),
919 util.bytecount(frawsize),
919 util.bytecount(frawsize),
920 )
920 )
921 )
921 )
922 seen.add(b'f')
922 seen.add(b'f')
923 if progress:
923 if progress:
924 progress.complete()
924 progress.complete()
925 progress = srcrepo.ui.makeprogress(
925 progress = srcrepo.ui.makeprogress(
926 _(b'file revisions'), total=frevcount
926 _(b'file revisions'), total=frevcount
927 )
927 )
928
928
929 if matchrevlog(revlogs, unencoded):
929 if matchrevlog(revlogs, unencoded):
930 ui.note(
930 ui.note(
931 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
931 _(b'cloning %d revisions from %s\n') % (len(oldrl), unencoded)
932 )
932 )
933 newrl = _revlogfrompath(dstrepo, unencoded)
933 newrl = _revlogfrompath(dstrepo, unencoded)
934 oldrl.clone(
934 oldrl.clone(
935 tr,
935 tr,
936 newrl,
936 newrl,
937 addrevisioncb=oncopiedrevision,
937 addrevisioncb=oncopiedrevision,
938 deltareuse=deltareuse,
938 deltareuse=deltareuse,
939 forcedeltabothparents=forcedeltabothparents,
939 forcedeltabothparents=forcedeltabothparents,
940 sidedatacompanion=sidedatacompanion,
940 sidedatacompanion=sidedatacompanion,
941 )
941 )
942 else:
942 else:
943 msg = _(b'blindly copying %s containing %i revisions\n')
943 msg = _(b'blindly copying %s containing %i revisions\n')
944 ui.note(msg % (unencoded, len(oldrl)))
944 ui.note(msg % (unencoded, len(oldrl)))
945 _copyrevlog(tr, dstrepo, oldrl, unencoded)
945 _copyrevlog(tr, dstrepo, oldrl, unencoded)
946
946
947 newrl = _revlogfrompath(dstrepo, unencoded)
947 newrl = _revlogfrompath(dstrepo, unencoded)
948
948
949 info = newrl.storageinfo(storedsize=True)
949 info = newrl.storageinfo(storedsize=True)
950 datasize = info[b'storedsize'] or 0
950 datasize = info[b'storedsize'] or 0
951
951
952 dstsize += datasize
952 dstsize += datasize
953
953
954 if isinstance(newrl, changelog.changelog):
954 if isinstance(newrl, changelog.changelog):
955 cdstsize += datasize
955 cdstsize += datasize
956 elif isinstance(newrl, manifest.manifestrevlog):
956 elif isinstance(newrl, manifest.manifestrevlog):
957 mdstsize += datasize
957 mdstsize += datasize
958 else:
958 else:
959 fdstsize += datasize
959 fdstsize += datasize
960
960
961 progress.complete()
961 progress.complete()
962
962
963 ui.status(
963 ui.status(
964 _(
964 _(
965 b'finished migrating %d changelog revisions; change in size: '
965 b'finished migrating %d changelog revisions; change in size: '
966 b'%s\n'
966 b'%s\n'
967 )
967 )
968 % (crevcount, util.bytecount(cdstsize - csrcsize))
968 % (crevcount, util.bytecount(cdstsize - csrcsize))
969 )
969 )
970
970
971 ui.status(
971 ui.status(
972 _(
972 _(
973 b'finished migrating %d total revisions; total change in store '
973 b'finished migrating %d total revisions; total change in store '
974 b'size: %s\n'
974 b'size: %s\n'
975 )
975 )
976 % (revcount, util.bytecount(dstsize - srcsize))
976 % (revcount, util.bytecount(dstsize - srcsize))
977 )
977 )
978
978
979
979
980 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
980 def _filterstorefile(srcrepo, dstrepo, requirements, path, mode, st):
981 """Determine whether to copy a store file during upgrade.
981 """Determine whether to copy a store file during upgrade.
982
982
983 This function is called when migrating store files from ``srcrepo`` to
983 This function is called when migrating store files from ``srcrepo`` to
984 ``dstrepo`` as part of upgrading a repository.
984 ``dstrepo`` as part of upgrading a repository.
985
985
986 Args:
986 Args:
987 srcrepo: repo we are copying from
987 srcrepo: repo we are copying from
988 dstrepo: repo we are copying to
988 dstrepo: repo we are copying to
989 requirements: set of requirements for ``dstrepo``
989 requirements: set of requirements for ``dstrepo``
990 path: store file being examined
990 path: store file being examined
991 mode: the ``ST_MODE`` file type of ``path``
991 mode: the ``ST_MODE`` file type of ``path``
992 st: ``stat`` data structure for ``path``
992 st: ``stat`` data structure for ``path``
993
993
994 Function should return ``True`` if the file is to be copied.
994 Function should return ``True`` if the file is to be copied.
995 """
995 """
996 # Skip revlogs.
996 # Skip revlogs.
997 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
997 if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
998 return False
998 return False
999 # Skip transaction related files.
999 # Skip transaction related files.
1000 if path.startswith(b'undo'):
1000 if path.startswith(b'undo'):
1001 return False
1001 return False
1002 # Only copy regular files.
1002 # Only copy regular files.
1003 if mode != stat.S_IFREG:
1003 if mode != stat.S_IFREG:
1004 return False
1004 return False
1005 # Skip other skipped files.
1005 # Skip other skipped files.
1006 if path in (b'lock', b'fncache'):
1006 if path in (b'lock', b'fncache'):
1007 return False
1007 return False
1008
1008
1009 return True
1009 return True
1010
1010
1011
1011
1012 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
1012 def _finishdatamigration(ui, srcrepo, dstrepo, requirements):
1013 """Hook point for extensions to perform additional actions during upgrade.
1013 """Hook point for extensions to perform additional actions during upgrade.
1014
1014
1015 This function is called after revlogs and store files have been copied but
1015 This function is called after revlogs and store files have been copied but
1016 before the new store is swapped into the original location.
1016 before the new store is swapped into the original location.
1017 """
1017 """
1018
1018
1019
1019
1020 def _upgraderepo(
1020 def _upgraderepo(
1021 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1021 ui, srcrepo, dstrepo, requirements, actions, revlogs=UPGRADE_ALL_REVLOGS
1022 ):
1022 ):
1023 """Do the low-level work of upgrading a repository.
1023 """Do the low-level work of upgrading a repository.
1024
1024
1025 The upgrade is effectively performed as a copy between a source
1025 The upgrade is effectively performed as a copy between a source
1026 repository and a temporary destination repository.
1026 repository and a temporary destination repository.
1027
1027
1028 The source repository is unmodified for as long as possible so the
1028 The source repository is unmodified for as long as possible so the
1029 upgrade can abort at any time without causing loss of service for
1029 upgrade can abort at any time without causing loss of service for
1030 readers and without corrupting the source repository.
1030 readers and without corrupting the source repository.
1031 """
1031 """
1032 assert srcrepo.currentwlock()
1032 assert srcrepo.currentwlock()
1033 assert dstrepo.currentwlock()
1033 assert dstrepo.currentwlock()
1034
1034
1035 ui.status(
1035 ui.status(
1036 _(
1036 _(
1037 b'(it is safe to interrupt this process any time before '
1037 b'(it is safe to interrupt this process any time before '
1038 b'data migration completes)\n'
1038 b'data migration completes)\n'
1039 )
1039 )
1040 )
1040 )
1041
1041
1042 if b're-delta-all' in actions:
1042 if b're-delta-all' in actions:
1043 deltareuse = revlog.revlog.DELTAREUSENEVER
1043 deltareuse = revlog.revlog.DELTAREUSENEVER
1044 elif b're-delta-parent' in actions:
1044 elif b're-delta-parent' in actions:
1045 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1045 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1046 elif b're-delta-multibase' in actions:
1046 elif b're-delta-multibase' in actions:
1047 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1047 deltareuse = revlog.revlog.DELTAREUSESAMEREVS
1048 elif b're-delta-fulladd' in actions:
1048 elif b're-delta-fulladd' in actions:
1049 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1049 deltareuse = revlog.revlog.DELTAREUSEFULLADD
1050 else:
1050 else:
1051 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1051 deltareuse = revlog.revlog.DELTAREUSEALWAYS
1052
1052
1053 with dstrepo.transaction(b'upgrade') as tr:
1053 with dstrepo.transaction(b'upgrade') as tr:
1054 _clonerevlogs(
1054 _clonerevlogs(
1055 ui,
1055 ui,
1056 srcrepo,
1056 srcrepo,
1057 dstrepo,
1057 dstrepo,
1058 tr,
1058 tr,
1059 deltareuse,
1059 deltareuse,
1060 b're-delta-multibase' in actions,
1060 b're-delta-multibase' in actions,
1061 revlogs=revlogs,
1061 revlogs=revlogs,
1062 )
1062 )
1063
1063
1064 # Now copy other files in the store directory.
1064 # Now copy other files in the store directory.
1065 # The sorted() makes execution deterministic.
1065 # The sorted() makes execution deterministic.
1066 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1066 for p, kind, st in sorted(srcrepo.store.vfs.readdir(b'', stat=True)):
1067 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1067 if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
1068 continue
1068 continue
1069
1069
1070 srcrepo.ui.status(_(b'copying %s\n') % p)
1070 srcrepo.ui.status(_(b'copying %s\n') % p)
1071 src = srcrepo.store.rawvfs.join(p)
1071 src = srcrepo.store.rawvfs.join(p)
1072 dst = dstrepo.store.rawvfs.join(p)
1072 dst = dstrepo.store.rawvfs.join(p)
1073 util.copyfile(src, dst, copystat=True)
1073 util.copyfile(src, dst, copystat=True)
1074
1074
1075 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1075 _finishdatamigration(ui, srcrepo, dstrepo, requirements)
1076
1076
1077 ui.status(_(b'data fully migrated to temporary repository\n'))
1077 ui.status(_(b'data fully migrated to temporary repository\n'))
1078
1078
1079 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1079 backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
1080 backupvfs = vfsmod.vfs(backuppath)
1080 backupvfs = vfsmod.vfs(backuppath)
1081
1081
1082 # Make a backup of requires file first, as it is the first to be modified.
1082 # Make a backup of requires file first, as it is the first to be modified.
1083 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1083 util.copyfile(srcrepo.vfs.join(b'requires'), backupvfs.join(b'requires'))
1084
1084
1085 # We install an arbitrary requirement that clients must not support
1085 # We install an arbitrary requirement that clients must not support
1086 # as a mechanism to lock out new clients during the data swap. This is
1086 # as a mechanism to lock out new clients during the data swap. This is
1087 # better than allowing a client to continue while the repository is in
1087 # better than allowing a client to continue while the repository is in
1088 # an inconsistent state.
1088 # an inconsistent state.
1089 ui.status(
1089 ui.status(
1090 _(
1090 _(
1091 b'marking source repository as being upgraded; clients will be '
1091 b'marking source repository as being upgraded; clients will be '
1092 b'unable to read from repository\n'
1092 b'unable to read from repository\n'
1093 )
1093 )
1094 )
1094 )
1095 scmutil.writereporequirements(
1095 scmutil.writereporequirements(
1096 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
1096 srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
1097 )
1097 )
1098
1098
1099 ui.status(_(b'starting in-place swap of repository data\n'))
1099 ui.status(_(b'starting in-place swap of repository data\n'))
1100 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
1100 ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
1101
1101
1102 # Now swap in the new store directory. Doing it as a rename should make
1102 # Now swap in the new store directory. Doing it as a rename should make
1103 # the operation nearly instantaneous and atomic (at least in well-behaved
1103 # the operation nearly instantaneous and atomic (at least in well-behaved
1104 # environments).
1104 # environments).
1105 ui.status(_(b'replacing store...\n'))
1105 ui.status(_(b'replacing store...\n'))
1106 tstart = util.timer()
1106 tstart = util.timer()
1107 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1107 util.rename(srcrepo.spath, backupvfs.join(b'store'))
1108 util.rename(dstrepo.spath, srcrepo.spath)
1108 util.rename(dstrepo.spath, srcrepo.spath)
1109 elapsed = util.timer() - tstart
1109 elapsed = util.timer() - tstart
1110 ui.status(
1110 ui.status(
1111 _(
1111 _(
1112 b'store replacement complete; repository was inconsistent for '
1112 b'store replacement complete; repository was inconsistent for '
1113 b'%0.1fs\n'
1113 b'%0.1fs\n'
1114 )
1114 )
1115 % elapsed
1115 % elapsed
1116 )
1116 )
1117
1117
1118 # We first write the requirements file. Any new requirements will lock
1118 # We first write the requirements file. Any new requirements will lock
1119 # out legacy clients.
1119 # out legacy clients.
1120 ui.status(
1120 ui.status(
1121 _(
1121 _(
1122 b'finalizing requirements file and making repository readable '
1122 b'finalizing requirements file and making repository readable '
1123 b'again\n'
1123 b'again\n'
1124 )
1124 )
1125 )
1125 )
1126 scmutil.writereporequirements(srcrepo, requirements)
1126 scmutil.writereporequirements(srcrepo, requirements)
1127
1127
1128 # The lock file from the old store won't be removed because nothing has a
1128 # The lock file from the old store won't be removed because nothing has a
1129 # reference to its new location. So clean it up manually. Alternatively, we
1129 # reference to its new location. So clean it up manually. Alternatively, we
1130 # could update srcrepo.svfs and other variables to point to the new
1130 # could update srcrepo.svfs and other variables to point to the new
1131 # location. This is simpler.
1131 # location. This is simpler.
1132 backupvfs.unlink(b'store/lock')
1132 backupvfs.unlink(b'store/lock')
1133
1133
1134 return backuppath
1134 return backuppath
1135
1135
1136
1136
1137 def upgraderepo(
1137 def upgraderepo(
1138 ui,
1138 ui,
1139 repo,
1139 repo,
1140 run=False,
1140 run=False,
1141 optimize=None,
1141 optimize=None,
1142 backup=True,
1142 backup=True,
1143 manifest=None,
1143 manifest=None,
1144 changelog=None,
1144 changelog=None,
1145 ):
1145 ):
1146 """Upgrade a repository in place."""
1146 """Upgrade a repository in place."""
1147 if optimize is None:
1147 if optimize is None:
1148 optimize = []
1148 optimize = []
1149 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1149 optimize = {legacy_opts_map.get(o, o) for o in optimize}
1150 repo = repo.unfiltered()
1150 repo = repo.unfiltered()
1151
1151
1152 revlogs = set(UPGRADE_ALL_REVLOGS)
1152 revlogs = set(UPGRADE_ALL_REVLOGS)
1153 specentries = ((b'c', changelog), (b'm', manifest))
1153 specentries = ((b'c', changelog), (b'm', manifest))
1154 specified = [(y, x) for (y, x) in specentries if x is not None]
1154 specified = [(y, x) for (y, x) in specentries if x is not None]
1155 if specified:
1155 if specified:
1156 # we have some limitation on revlogs to be recloned
1156 # we have some limitation on revlogs to be recloned
1157 if any(x for y, x in specified):
1157 if any(x for y, x in specified):
1158 revlogs = set()
1158 revlogs = set()
1159 for r, enabled in specified:
1159 for r, enabled in specified:
1160 if enabled:
1160 if enabled:
1161 if r == b'c':
1161 if r == b'c':
1162 revlogs.add(UPGRADE_CHANGELOG)
1162 revlogs.add(UPGRADE_CHANGELOG)
1163 elif r == b'm':
1163 elif r == b'm':
1164 revlogs.add(UPGRADE_MANIFEST)
1164 revlogs.add(UPGRADE_MANIFEST)
1165 else:
1165 else:
1166 # none are enabled
1166 # none are enabled
1167 for r, __ in specified:
1167 for r, __ in specified:
1168 if r == b'c':
1168 if r == b'c':
1169 revlogs.discard(UPGRADE_CHANGELOG)
1169 revlogs.discard(UPGRADE_CHANGELOG)
1170 elif r == b'm':
1170 elif r == b'm':
1171 revlogs.discard(UPGRADE_MANIFEST)
1171 revlogs.discard(UPGRADE_MANIFEST)
1172
1172
1173 # Ensure the repository can be upgraded.
1173 # Ensure the repository can be upgraded.
1174 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1174 missingreqs = requiredsourcerequirements(repo) - repo.requirements
1175 if missingreqs:
1175 if missingreqs:
1176 raise error.Abort(
1176 raise error.Abort(
1177 _(b'cannot upgrade repository; requirement missing: %s')
1177 _(b'cannot upgrade repository; requirement missing: %s')
1178 % _(b', ').join(sorted(missingreqs))
1178 % _(b', ').join(sorted(missingreqs))
1179 )
1179 )
1180
1180
1181 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1181 blockedreqs = blocksourcerequirements(repo) & repo.requirements
1182 if blockedreqs:
1182 if blockedreqs:
1183 raise error.Abort(
1183 raise error.Abort(
1184 _(
1184 _(
1185 b'cannot upgrade repository; unsupported source '
1185 b'cannot upgrade repository; unsupported source '
1186 b'requirement: %s'
1186 b'requirement: %s'
1187 )
1187 )
1188 % _(b', ').join(sorted(blockedreqs))
1188 % _(b', ').join(sorted(blockedreqs))
1189 )
1189 )
1190
1190
1191 # FUTURE there is potentially a need to control the wanted requirements via
1191 # FUTURE there is potentially a need to control the wanted requirements via
1192 # command arguments or via an extension hook point.
1192 # command arguments or via an extension hook point.
1193 newreqs = localrepo.newreporequirements(
1193 newreqs = localrepo.newreporequirements(
1194 repo.ui, localrepo.defaultcreateopts(repo.ui)
1194 repo.ui, localrepo.defaultcreateopts(repo.ui)
1195 )
1195 )
1196 newreqs.update(preservedrequirements(repo))
1196 newreqs.update(preservedrequirements(repo))
1197
1197
1198 noremovereqs = (
1198 noremovereqs = (
1199 repo.requirements - newreqs - supportremovedrequirements(repo)
1199 repo.requirements - newreqs - supportremovedrequirements(repo)
1200 )
1200 )
1201 if noremovereqs:
1201 if noremovereqs:
1202 raise error.Abort(
1202 raise error.Abort(
1203 _(
1203 _(
1204 b'cannot upgrade repository; requirement would be '
1204 b'cannot upgrade repository; requirement would be '
1205 b'removed: %s'
1205 b'removed: %s'
1206 )
1206 )
1207 % _(b', ').join(sorted(noremovereqs))
1207 % _(b', ').join(sorted(noremovereqs))
1208 )
1208 )
1209
1209
1210 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1210 noaddreqs = newreqs - repo.requirements - allowednewrequirements(repo)
1211 if noaddreqs:
1211 if noaddreqs:
1212 raise error.Abort(
1212 raise error.Abort(
1213 _(
1213 _(
1214 b'cannot upgrade repository; do not support adding '
1214 b'cannot upgrade repository; do not support adding '
1215 b'requirement: %s'
1215 b'requirement: %s'
1216 )
1216 )
1217 % _(b', ').join(sorted(noaddreqs))
1217 % _(b', ').join(sorted(noaddreqs))
1218 )
1218 )
1219
1219
1220 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1220 unsupportedreqs = newreqs - supporteddestrequirements(repo)
1221 if unsupportedreqs:
1221 if unsupportedreqs:
1222 raise error.Abort(
1222 raise error.Abort(
1223 _(
1223 _(
1224 b'cannot upgrade repository; do not support '
1224 b'cannot upgrade repository; do not support '
1225 b'destination requirement: %s'
1225 b'destination requirement: %s'
1226 )
1226 )
1227 % _(b', ').join(sorted(unsupportedreqs))
1227 % _(b', ').join(sorted(unsupportedreqs))
1228 )
1228 )
1229
1229
1230 # Find and validate all improvements that can be made.
1230 # Find and validate all improvements that can be made.
1231 alloptimizations = findoptimizations(repo)
1231 alloptimizations = findoptimizations(repo)
1232
1232
1233 # Apply and Validate arguments.
1233 # Apply and Validate arguments.
1234 optimizations = []
1234 optimizations = []
1235 for o in alloptimizations:
1235 for o in alloptimizations:
1236 if o.name in optimize:
1236 if o.name in optimize:
1237 optimizations.append(o)
1237 optimizations.append(o)
1238 optimize.discard(o.name)
1238 optimize.discard(o.name)
1239
1239
1240 if optimize: # anything left is unknown
1240 if optimize: # anything left is unknown
1241 raise error.Abort(
1241 raise error.Abort(
1242 _(b'unknown optimization action requested: %s')
1242 _(b'unknown optimization action requested: %s')
1243 % b', '.join(sorted(optimize)),
1243 % b', '.join(sorted(optimize)),
1244 hint=_(b'run without arguments to see valid optimizations'),
1244 hint=_(b'run without arguments to see valid optimizations'),
1245 )
1245 )
1246
1246
1247 deficiencies = finddeficiencies(repo)
1247 deficiencies = finddeficiencies(repo)
1248 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1248 actions = determineactions(repo, deficiencies, repo.requirements, newreqs)
1249 actions.extend(
1249 actions.extend(
1250 o
1250 o
1251 for o in sorted(optimizations)
1251 for o in sorted(optimizations)
1252 # determineactions could have added optimisation
1252 # determineactions could have added optimisation
1253 if o not in actions
1253 if o not in actions
1254 )
1254 )
1255
1255
1256 removedreqs = repo.requirements - newreqs
1256 removedreqs = repo.requirements - newreqs
1257 addedreqs = newreqs - repo.requirements
1257 addedreqs = newreqs - repo.requirements
1258
1258
1259 if revlogs != UPGRADE_ALL_REVLOGS:
1259 if revlogs != UPGRADE_ALL_REVLOGS:
1260 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1260 incompatible = RECLONES_REQUIREMENTS & (removedreqs | addedreqs)
1261 if incompatible:
1261 if incompatible:
1262 msg = _(
1262 msg = _(
1263 b'ignoring revlogs selection flags, format requirements '
1263 b'ignoring revlogs selection flags, format requirements '
1264 b'change: %s\n'
1264 b'change: %s\n'
1265 )
1265 )
1266 ui.warn(msg % b', '.join(sorted(incompatible)))
1266 ui.warn(msg % b', '.join(sorted(incompatible)))
1267 revlogs = UPGRADE_ALL_REVLOGS
1267 revlogs = UPGRADE_ALL_REVLOGS
1268
1268
1269 def write_labeled(l, label):
1269 def write_labeled(l, label):
1270 first = True
1270 first = True
1271 for r in sorted(l):
1271 for r in sorted(l):
1272 if not first:
1272 if not first:
1273 ui.write(b', ')
1273 ui.write(b', ')
1274 ui.write(r, label=label)
1274 ui.write(r, label=label)
1275 first = False
1275 first = False
1276
1276
1277 def printrequirements():
1277 def printrequirements():
1278 ui.write(_(b'requirements\n'))
1278 ui.write(_(b'requirements\n'))
1279 ui.write(_(b' preserved: '))
1279 ui.write(_(b' preserved: '))
1280 write_labeled(
1280 write_labeled(
1281 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1281 newreqs & repo.requirements, "upgrade-repo.requirement.preserved"
1282 )
1282 )
1283 ui.write((b'\n'))
1283 ui.write((b'\n'))
1284 removed = repo.requirements - newreqs
1284 removed = repo.requirements - newreqs
1285 if repo.requirements - newreqs:
1285 if repo.requirements - newreqs:
1286 ui.write(_(b' removed: '))
1286 ui.write(_(b' removed: '))
1287 write_labeled(removed, "upgrade-repo.requirement.removed")
1287 write_labeled(removed, "upgrade-repo.requirement.removed")
1288 ui.write((b'\n'))
1288 ui.write((b'\n'))
1289 added = newreqs - repo.requirements
1289 added = newreqs - repo.requirements
1290 if added:
1290 if added:
1291 ui.write(_(b' added: '))
1291 ui.write(_(b' added: '))
1292 write_labeled(added, "upgrade-repo.requirement.added")
1292 write_labeled(added, "upgrade-repo.requirement.added")
1293 ui.write((b'\n'))
1293 ui.write((b'\n'))
1294 ui.write(b'\n')
1294 ui.write(b'\n')
1295
1295
1296 def printoptimisations():
1296 def printoptimisations():
1297 optimisations = [a for a in actions if a.type == optimisation]
1297 optimisations = [a for a in actions if a.type == optimisation]
1298 optimisations.sort(key=lambda a: a.name)
1298 optimisations.sort(key=lambda a: a.name)
1299 if optimisations:
1299 if optimisations:
1300 ui.write(_(b'optimisations: '))
1300 ui.write(_(b'optimisations: '))
1301 write_labeled(
1301 write_labeled(
1302 [a.name for a in optimisations],
1302 [a.name for a in optimisations],
1303 "upgrade-repo.optimisation.performed",
1303 "upgrade-repo.optimisation.performed",
1304 )
1304 )
1305 ui.write(b'\n\n')
1305 ui.write(b'\n\n')
1306
1306
1307 def printupgradeactions():
1307 def printupgradeactions():
1308 for a in actions:
1308 for a in actions:
1309 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1309 ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
1310
1310
1311 if not run:
1311 if not run:
1312 fromconfig = []
1312 fromconfig = []
1313 onlydefault = []
1313 onlydefault = []
1314
1314
1315 for d in deficiencies:
1315 for d in deficiencies:
1316 if d.fromconfig(repo):
1316 if d.fromconfig(repo):
1317 fromconfig.append(d)
1317 fromconfig.append(d)
1318 elif d.default:
1318 elif d.default:
1319 onlydefault.append(d)
1319 onlydefault.append(d)
1320
1320
1321 if fromconfig or onlydefault:
1321 if fromconfig or onlydefault:
1322
1322
1323 if fromconfig:
1323 if fromconfig:
1324 ui.status(
1324 ui.status(
1325 _(
1325 _(
1326 b'repository lacks features recommended by '
1326 b'repository lacks features recommended by '
1327 b'current config options:\n\n'
1327 b'current config options:\n\n'
1328 )
1328 )
1329 )
1329 )
1330 for i in fromconfig:
1330 for i in fromconfig:
1331 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1331 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1332
1332
1333 if onlydefault:
1333 if onlydefault:
1334 ui.status(
1334 ui.status(
1335 _(
1335 _(
1336 b'repository lacks features used by the default '
1336 b'repository lacks features used by the default '
1337 b'config options:\n\n'
1337 b'config options:\n\n'
1338 )
1338 )
1339 )
1339 )
1340 for i in onlydefault:
1340 for i in onlydefault:
1341 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1341 ui.status(b'%s\n %s\n\n' % (i.name, i.description))
1342
1342
1343 ui.status(b'\n')
1343 ui.status(b'\n')
1344 else:
1344 else:
1345 ui.status(
1345 ui.status(
1346 _(
1346 _(
1347 b'(no feature deficiencies found in existing '
1347 b'(no feature deficiencies found in existing '
1348 b'repository)\n'
1348 b'repository)\n'
1349 )
1349 )
1350 )
1350 )
1351
1351
1352 ui.status(
1352 ui.status(
1353 _(
1353 _(
1354 b'performing an upgrade with "--run" will make the following '
1354 b'performing an upgrade with "--run" will make the following '
1355 b'changes:\n\n'
1355 b'changes:\n\n'
1356 )
1356 )
1357 )
1357 )
1358
1358
1359 printrequirements()
1359 printrequirements()
1360 printoptimisations()
1360 printoptimisations()
1361 printupgradeactions()
1361 printupgradeactions()
1362
1362
1363 unusedoptimize = [i for i in alloptimizations if i not in actions]
1363 unusedoptimize = [i for i in alloptimizations if i not in actions]
1364
1364
1365 if unusedoptimize:
1365 if unusedoptimize:
1366 ui.status(
1366 ui.status(
1367 _(
1367 _(
1368 b'additional optimizations are available by specifying '
1368 b'additional optimizations are available by specifying '
1369 b'"--optimize <name>":\n\n'
1369 b'"--optimize <name>":\n\n'
1370 )
1370 )
1371 )
1371 )
1372 for i in unusedoptimize:
1372 for i in unusedoptimize:
1373 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
1373 ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
1374 return
1374 return
1375
1375
1376 # Else we're in the run=true case.
1376 # Else we're in the run=true case.
1377 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1377 ui.write(_(b'upgrade will perform the following actions:\n\n'))
1378 printrequirements()
1378 printrequirements()
1379 printoptimisations()
1379 printoptimisations()
1380 printupgradeactions()
1380 printupgradeactions()
1381
1381
1382 upgradeactions = [a.name for a in actions]
1382 upgradeactions = [a.name for a in actions]
1383
1383
1384 ui.status(_(b'beginning upgrade...\n'))
1384 ui.status(_(b'beginning upgrade...\n'))
1385 with repo.wlock(), repo.lock():
1385 with repo.wlock(), repo.lock():
1386 ui.status(_(b'repository locked and read-only\n'))
1386 ui.status(_(b'repository locked and read-only\n'))
1387 # Our strategy for upgrading the repository is to create a new,
1387 # Our strategy for upgrading the repository is to create a new,
1388 # temporary repository, write data to it, then do a swap of the
1388 # temporary repository, write data to it, then do a swap of the
1389 # data. There are less heavyweight ways to do this, but it is easier
1389 # data. There are less heavyweight ways to do this, but it is easier
1390 # to create a new repo object than to instantiate all the components
1390 # to create a new repo object than to instantiate all the components
1391 # (like the store) separately.
1391 # (like the store) separately.
1392 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1392 tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
1393 backuppath = None
1393 backuppath = None
1394 try:
1394 try:
1395 ui.status(
1395 ui.status(
1396 _(
1396 _(
1397 b'creating temporary repository to stage migrated '
1397 b'creating temporary repository to stage migrated '
1398 b'data: %s\n'
1398 b'data: %s\n'
1399 )
1399 )
1400 % tmppath
1400 % tmppath
1401 )
1401 )
1402
1402
1403 # clone ui without using ui.copy because repo.ui is protected
1403 # clone ui without using ui.copy because repo.ui is protected
1404 repoui = repo.ui.__class__(repo.ui)
1404 repoui = repo.ui.__class__(repo.ui)
1405 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1405 dstrepo = hg.repository(repoui, path=tmppath, create=True)
1406
1406
1407 with dstrepo.wlock(), dstrepo.lock():
1407 with dstrepo.wlock(), dstrepo.lock():
1408 backuppath = _upgraderepo(
1408 backuppath = _upgraderepo(
1409 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1409 ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
1410 )
1410 )
1411 if not (backup or backuppath is None):
1411 if not (backup or backuppath is None):
1412 ui.status(
1412 ui.status(
1413 _(b'removing old repository content%s\n') % backuppath
1413 _(b'removing old repository content%s\n') % backuppath
1414 )
1414 )
1415 repo.vfs.rmtree(backuppath, forcibly=True)
1415 repo.vfs.rmtree(backuppath, forcibly=True)
1416 backuppath = None
1416 backuppath = None
1417
1417
1418 finally:
1418 finally:
1419 ui.status(_(b'removing temporary repository %s\n') % tmppath)
1419 ui.status(_(b'removing temporary repository %s\n') % tmppath)
1420 repo.vfs.rmtree(tmppath, forcibly=True)
1420 repo.vfs.rmtree(tmppath, forcibly=True)
1421
1421
1422 if backuppath and not ui.quiet:
1422 if backuppath and not ui.quiet:
1423 ui.warn(
1423 ui.warn(
1424 _(b'copy of old repository backed up at %s\n') % backuppath
1424 _(b'copy of old repository backed up at %s\n') % backuppath
1425 )
1425 )
1426 ui.warn(
1426 ui.warn(
1427 _(
1427 _(
1428 b'the old repository will not be deleted; remove '
1428 b'the old repository will not be deleted; remove '
1429 b'it to free up disk space once the upgraded '
1429 b'it to free up disk space once the upgraded '
1430 b'repository is verified\n'
1430 b'repository is verified\n'
1431 )
1431 )
1432 )
1432 )
@@ -1,83 +1,83 b''
1 # ext-sidedata.py - small extension to test the sidedata logic
1 # ext-sidedata.py - small extension to test the sidedata logic
2 #
2 #
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net)
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net)
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11 import struct
11 import struct
12
12
13 from mercurial import (
13 from mercurial import (
14 extensions,
14 extensions,
15 localrepo,
16 node,
15 node,
16 requirements,
17 revlog,
17 revlog,
18 upgrade,
18 upgrade,
19 )
19 )
20
20
21 from mercurial.revlogutils import sidedata
21 from mercurial.revlogutils import sidedata
22
22
23
23
24 def wrapaddrevision(
24 def wrapaddrevision(
25 orig, self, text, transaction, link, p1, p2, *args, **kwargs
25 orig, self, text, transaction, link, p1, p2, *args, **kwargs
26 ):
26 ):
27 if kwargs.get('sidedata') is None:
27 if kwargs.get('sidedata') is None:
28 kwargs['sidedata'] = {}
28 kwargs['sidedata'] = {}
29 sd = kwargs['sidedata']
29 sd = kwargs['sidedata']
30 ## let's store some arbitrary data just for testing
30 ## let's store some arbitrary data just for testing
31 # text length
31 # text length
32 sd[sidedata.SD_TEST1] = struct.pack('>I', len(text))
32 sd[sidedata.SD_TEST1] = struct.pack('>I', len(text))
33 # and sha2 hashes
33 # and sha2 hashes
34 sha256 = hashlib.sha256(text).digest()
34 sha256 = hashlib.sha256(text).digest()
35 sd[sidedata.SD_TEST2] = struct.pack('>32s', sha256)
35 sd[sidedata.SD_TEST2] = struct.pack('>32s', sha256)
36 return orig(self, text, transaction, link, p1, p2, *args, **kwargs)
36 return orig(self, text, transaction, link, p1, p2, *args, **kwargs)
37
37
38
38
39 def wraprevision(orig, self, nodeorrev, *args, **kwargs):
39 def wraprevision(orig, self, nodeorrev, *args, **kwargs):
40 text = orig(self, nodeorrev, *args, **kwargs)
40 text = orig(self, nodeorrev, *args, **kwargs)
41 if getattr(self, 'sidedatanocheck', False):
41 if getattr(self, 'sidedatanocheck', False):
42 return text
42 return text
43 if nodeorrev != node.nullrev and nodeorrev != node.nullid:
43 if nodeorrev != node.nullrev and nodeorrev != node.nullid:
44 sd = self.sidedata(nodeorrev)
44 sd = self.sidedata(nodeorrev)
45 if len(text) != struct.unpack('>I', sd[sidedata.SD_TEST1])[0]:
45 if len(text) != struct.unpack('>I', sd[sidedata.SD_TEST1])[0]:
46 raise RuntimeError('text size mismatch')
46 raise RuntimeError('text size mismatch')
47 expected = sd[sidedata.SD_TEST2]
47 expected = sd[sidedata.SD_TEST2]
48 got = hashlib.sha256(text).digest()
48 got = hashlib.sha256(text).digest()
49 if got != expected:
49 if got != expected:
50 raise RuntimeError('sha256 mismatch')
50 raise RuntimeError('sha256 mismatch')
51 return text
51 return text
52
52
53
53
54 def wrapgetsidedatacompanion(orig, srcrepo, dstrepo):
54 def wrapgetsidedatacompanion(orig, srcrepo, dstrepo):
55 sidedatacompanion = orig(srcrepo, dstrepo)
55 sidedatacompanion = orig(srcrepo, dstrepo)
56 addedreqs = dstrepo.requirements - srcrepo.requirements
56 addedreqs = dstrepo.requirements - srcrepo.requirements
57 if localrepo.SIDEDATA_REQUIREMENT in addedreqs:
57 if requirements.SIDEDATA_REQUIREMENT in addedreqs:
58 assert sidedatacompanion is None # deal with composition later
58 assert sidedatacompanion is None # deal with composition later
59
59
60 def sidedatacompanion(revlog, rev):
60 def sidedatacompanion(revlog, rev):
61 update = {}
61 update = {}
62 revlog.sidedatanocheck = True
62 revlog.sidedatanocheck = True
63 try:
63 try:
64 text = revlog.revision(rev)
64 text = revlog.revision(rev)
65 finally:
65 finally:
66 del revlog.sidedatanocheck
66 del revlog.sidedatanocheck
67 ## let's store some arbitrary data just for testing
67 ## let's store some arbitrary data just for testing
68 # text length
68 # text length
69 update[sidedata.SD_TEST1] = struct.pack('>I', len(text))
69 update[sidedata.SD_TEST1] = struct.pack('>I', len(text))
70 # and sha2 hashes
70 # and sha2 hashes
71 sha256 = hashlib.sha256(text).digest()
71 sha256 = hashlib.sha256(text).digest()
72 update[sidedata.SD_TEST2] = struct.pack('>32s', sha256)
72 update[sidedata.SD_TEST2] = struct.pack('>32s', sha256)
73 return False, (), update
73 return False, (), update
74
74
75 return sidedatacompanion
75 return sidedatacompanion
76
76
77
77
78 def extsetup(ui):
78 def extsetup(ui):
79 extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision)
79 extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision)
80 extensions.wrapfunction(revlog.revlog, 'revision', wraprevision)
80 extensions.wrapfunction(revlog.revlog, 'revision', wraprevision)
81 extensions.wrapfunction(
81 extensions.wrapfunction(
82 upgrade, 'getsidedatacompanion', wrapgetsidedatacompanion
82 upgrade, 'getsidedatacompanion', wrapgetsidedatacompanion
83 )
83 )
General Comments 0
You need to be logged in to leave comments. Login now