##// END OF EJS Templates
changelog: also monitor `00changelog.n` when applicable (issue6554)...
marmoute -
r48853:c094e829 stable
parent child Browse files
Show More
@@ -1,3851 +1,3866 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 sha1nodeconstants,
23 sha1nodeconstants,
24 short,
24 short,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 delattr,
27 delattr,
28 getattr,
28 getattr,
29 )
29 )
30 from . import (
30 from . import (
31 bookmarks,
31 bookmarks,
32 branchmap,
32 branchmap,
33 bundle2,
33 bundle2,
34 bundlecaches,
34 bundlecaches,
35 changegroup,
35 changegroup,
36 color,
36 color,
37 commit,
37 commit,
38 context,
38 context,
39 dirstate,
39 dirstate,
40 dirstateguard,
40 dirstateguard,
41 discovery,
41 discovery,
42 encoding,
42 encoding,
43 error,
43 error,
44 exchange,
44 exchange,
45 extensions,
45 extensions,
46 filelog,
46 filelog,
47 hook,
47 hook,
48 lock as lockmod,
48 lock as lockmod,
49 match as matchmod,
49 match as matchmod,
50 mergestate as mergestatemod,
50 mergestate as mergestatemod,
51 mergeutil,
51 mergeutil,
52 namespaces,
52 namespaces,
53 narrowspec,
53 narrowspec,
54 obsolete,
54 obsolete,
55 pathutil,
55 pathutil,
56 phases,
56 phases,
57 pushkey,
57 pushkey,
58 pycompat,
58 pycompat,
59 rcutil,
59 rcutil,
60 repoview,
60 repoview,
61 requirements as requirementsmod,
61 requirements as requirementsmod,
62 revlog,
62 revlog,
63 revset,
63 revset,
64 revsetlang,
64 revsetlang,
65 scmutil,
65 scmutil,
66 sparse,
66 sparse,
67 store as storemod,
67 store as storemod,
68 subrepoutil,
68 subrepoutil,
69 tags as tagsmod,
69 tags as tagsmod,
70 transaction,
70 transaction,
71 txnutil,
71 txnutil,
72 util,
72 util,
73 vfs as vfsmod,
73 vfs as vfsmod,
74 wireprototypes,
74 wireprototypes,
75 )
75 )
76
76
77 from .interfaces import (
77 from .interfaces import (
78 repository,
78 repository,
79 util as interfaceutil,
79 util as interfaceutil,
80 )
80 )
81
81
82 from .utils import (
82 from .utils import (
83 hashutil,
83 hashutil,
84 procutil,
84 procutil,
85 stringutil,
85 stringutil,
86 urlutil,
86 urlutil,
87 )
87 )
88
88
89 from .revlogutils import (
89 from .revlogutils import (
90 concurrency_checker as revlogchecker,
90 concurrency_checker as revlogchecker,
91 constants as revlogconst,
91 constants as revlogconst,
92 sidedata as sidedatamod,
92 sidedata as sidedatamod,
93 )
93 )
94
94
95 release = lockmod.release
95 release = lockmod.release
96 urlerr = util.urlerr
96 urlerr = util.urlerr
97 urlreq = util.urlreq
97 urlreq = util.urlreq
98
98
99 # set of (path, vfs-location) tuples. vfs-location is:
99 # set of (path, vfs-location) tuples. vfs-location is:
100 # - 'plain for vfs relative paths
100 # - 'plain for vfs relative paths
101 # - '' for svfs relative paths
101 # - '' for svfs relative paths
102 _cachedfiles = set()
102 _cachedfiles = set()
103
103
104
104
105 class _basefilecache(scmutil.filecache):
105 class _basefilecache(scmutil.filecache):
106 """All filecache usage on repo are done for logic that should be unfiltered"""
106 """All filecache usage on repo are done for logic that should be unfiltered"""
107
107
108 def __get__(self, repo, type=None):
108 def __get__(self, repo, type=None):
109 if repo is None:
109 if repo is None:
110 return self
110 return self
111 # proxy to unfiltered __dict__ since filtered repo has no entry
111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 unfi = repo.unfiltered()
112 unfi = repo.unfiltered()
113 try:
113 try:
114 return unfi.__dict__[self.sname]
114 return unfi.__dict__[self.sname]
115 except KeyError:
115 except KeyError:
116 pass
116 pass
117 return super(_basefilecache, self).__get__(unfi, type)
117 return super(_basefilecache, self).__get__(unfi, type)
118
118
119 def set(self, repo, value):
119 def set(self, repo, value):
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121
121
122
122
123 class repofilecache(_basefilecache):
123 class repofilecache(_basefilecache):
124 """filecache for files in .hg but outside of .hg/store"""
124 """filecache for files in .hg but outside of .hg/store"""
125
125
126 def __init__(self, *paths):
126 def __init__(self, *paths):
127 super(repofilecache, self).__init__(*paths)
127 super(repofilecache, self).__init__(*paths)
128 for path in paths:
128 for path in paths:
129 _cachedfiles.add((path, b'plain'))
129 _cachedfiles.add((path, b'plain'))
130
130
131 def join(self, obj, fname):
131 def join(self, obj, fname):
132 return obj.vfs.join(fname)
132 return obj.vfs.join(fname)
133
133
134
134
135 class storecache(_basefilecache):
135 class storecache(_basefilecache):
136 """filecache for files in the store"""
136 """filecache for files in the store"""
137
137
138 def __init__(self, *paths):
138 def __init__(self, *paths):
139 super(storecache, self).__init__(*paths)
139 super(storecache, self).__init__(*paths)
140 for path in paths:
140 for path in paths:
141 _cachedfiles.add((path, b''))
141 _cachedfiles.add((path, b''))
142
142
143 def join(self, obj, fname):
143 def join(self, obj, fname):
144 return obj.sjoin(fname)
144 return obj.sjoin(fname)
145
145
146
146
147 class changelogcache(storecache):
148 """filecache for the changelog"""
149
150 def __init__(self):
151 super(changelogcache, self).__init__()
152 _cachedfiles.add((b'00changelog.i', b''))
153 _cachedfiles.add((b'00changelog.n', b''))
154
155 def tracked_paths(self, obj):
156 paths = [self.join(obj, b'00changelog.i')]
157 if obj.store.opener.options.get(b'persistent-nodemap', False):
158 paths.append(self.join(obj, b'00changelog.n'))
159 return paths
160
161
147 class mixedrepostorecache(_basefilecache):
162 class mixedrepostorecache(_basefilecache):
148 """filecache for a mix files in .hg/store and outside"""
163 """filecache for a mix files in .hg/store and outside"""
149
164
150 def __init__(self, *pathsandlocations):
165 def __init__(self, *pathsandlocations):
151 # scmutil.filecache only uses the path for passing back into our
166 # scmutil.filecache only uses the path for passing back into our
152 # join(), so we can safely pass a list of paths and locations
167 # join(), so we can safely pass a list of paths and locations
153 super(mixedrepostorecache, self).__init__(*pathsandlocations)
168 super(mixedrepostorecache, self).__init__(*pathsandlocations)
154 _cachedfiles.update(pathsandlocations)
169 _cachedfiles.update(pathsandlocations)
155
170
156 def join(self, obj, fnameandlocation):
171 def join(self, obj, fnameandlocation):
157 fname, location = fnameandlocation
172 fname, location = fnameandlocation
158 if location == b'plain':
173 if location == b'plain':
159 return obj.vfs.join(fname)
174 return obj.vfs.join(fname)
160 else:
175 else:
161 if location != b'':
176 if location != b'':
162 raise error.ProgrammingError(
177 raise error.ProgrammingError(
163 b'unexpected location: %s' % location
178 b'unexpected location: %s' % location
164 )
179 )
165 return obj.sjoin(fname)
180 return obj.sjoin(fname)
166
181
167
182
168 def isfilecached(repo, name):
183 def isfilecached(repo, name):
169 """check if a repo has already cached "name" filecache-ed property
184 """check if a repo has already cached "name" filecache-ed property
170
185
171 This returns (cachedobj-or-None, iscached) tuple.
186 This returns (cachedobj-or-None, iscached) tuple.
172 """
187 """
173 cacheentry = repo.unfiltered()._filecache.get(name, None)
188 cacheentry = repo.unfiltered()._filecache.get(name, None)
174 if not cacheentry:
189 if not cacheentry:
175 return None, False
190 return None, False
176 return cacheentry.obj, True
191 return cacheentry.obj, True
177
192
178
193
179 class unfilteredpropertycache(util.propertycache):
194 class unfilteredpropertycache(util.propertycache):
180 """propertycache that apply to unfiltered repo only"""
195 """propertycache that apply to unfiltered repo only"""
181
196
182 def __get__(self, repo, type=None):
197 def __get__(self, repo, type=None):
183 unfi = repo.unfiltered()
198 unfi = repo.unfiltered()
184 if unfi is repo:
199 if unfi is repo:
185 return super(unfilteredpropertycache, self).__get__(unfi)
200 return super(unfilteredpropertycache, self).__get__(unfi)
186 return getattr(unfi, self.name)
201 return getattr(unfi, self.name)
187
202
188
203
189 class filteredpropertycache(util.propertycache):
204 class filteredpropertycache(util.propertycache):
190 """propertycache that must take filtering in account"""
205 """propertycache that must take filtering in account"""
191
206
192 def cachevalue(self, obj, value):
207 def cachevalue(self, obj, value):
193 object.__setattr__(obj, self.name, value)
208 object.__setattr__(obj, self.name, value)
194
209
195
210
196 def hasunfilteredcache(repo, name):
211 def hasunfilteredcache(repo, name):
197 """check if a repo has an unfilteredpropertycache value for <name>"""
212 """check if a repo has an unfilteredpropertycache value for <name>"""
198 return name in vars(repo.unfiltered())
213 return name in vars(repo.unfiltered())
199
214
200
215
201 def unfilteredmethod(orig):
216 def unfilteredmethod(orig):
202 """decorate method that always need to be run on unfiltered version"""
217 """decorate method that always need to be run on unfiltered version"""
203
218
204 @functools.wraps(orig)
219 @functools.wraps(orig)
205 def wrapper(repo, *args, **kwargs):
220 def wrapper(repo, *args, **kwargs):
206 return orig(repo.unfiltered(), *args, **kwargs)
221 return orig(repo.unfiltered(), *args, **kwargs)
207
222
208 return wrapper
223 return wrapper
209
224
210
225
211 moderncaps = {
226 moderncaps = {
212 b'lookup',
227 b'lookup',
213 b'branchmap',
228 b'branchmap',
214 b'pushkey',
229 b'pushkey',
215 b'known',
230 b'known',
216 b'getbundle',
231 b'getbundle',
217 b'unbundle',
232 b'unbundle',
218 }
233 }
219 legacycaps = moderncaps.union({b'changegroupsubset'})
234 legacycaps = moderncaps.union({b'changegroupsubset'})
220
235
221
236
222 @interfaceutil.implementer(repository.ipeercommandexecutor)
237 @interfaceutil.implementer(repository.ipeercommandexecutor)
223 class localcommandexecutor(object):
238 class localcommandexecutor(object):
224 def __init__(self, peer):
239 def __init__(self, peer):
225 self._peer = peer
240 self._peer = peer
226 self._sent = False
241 self._sent = False
227 self._closed = False
242 self._closed = False
228
243
229 def __enter__(self):
244 def __enter__(self):
230 return self
245 return self
231
246
232 def __exit__(self, exctype, excvalue, exctb):
247 def __exit__(self, exctype, excvalue, exctb):
233 self.close()
248 self.close()
234
249
235 def callcommand(self, command, args):
250 def callcommand(self, command, args):
236 if self._sent:
251 if self._sent:
237 raise error.ProgrammingError(
252 raise error.ProgrammingError(
238 b'callcommand() cannot be used after sendcommands()'
253 b'callcommand() cannot be used after sendcommands()'
239 )
254 )
240
255
241 if self._closed:
256 if self._closed:
242 raise error.ProgrammingError(
257 raise error.ProgrammingError(
243 b'callcommand() cannot be used after close()'
258 b'callcommand() cannot be used after close()'
244 )
259 )
245
260
246 # We don't need to support anything fancy. Just call the named
261 # We don't need to support anything fancy. Just call the named
247 # method on the peer and return a resolved future.
262 # method on the peer and return a resolved future.
248 fn = getattr(self._peer, pycompat.sysstr(command))
263 fn = getattr(self._peer, pycompat.sysstr(command))
249
264
250 f = pycompat.futures.Future()
265 f = pycompat.futures.Future()
251
266
252 try:
267 try:
253 result = fn(**pycompat.strkwargs(args))
268 result = fn(**pycompat.strkwargs(args))
254 except Exception:
269 except Exception:
255 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
270 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
256 else:
271 else:
257 f.set_result(result)
272 f.set_result(result)
258
273
259 return f
274 return f
260
275
261 def sendcommands(self):
276 def sendcommands(self):
262 self._sent = True
277 self._sent = True
263
278
264 def close(self):
279 def close(self):
265 self._closed = True
280 self._closed = True
266
281
267
282
268 @interfaceutil.implementer(repository.ipeercommands)
283 @interfaceutil.implementer(repository.ipeercommands)
269 class localpeer(repository.peer):
284 class localpeer(repository.peer):
270 '''peer for a local repo; reflects only the most recent API'''
285 '''peer for a local repo; reflects only the most recent API'''
271
286
272 def __init__(self, repo, caps=None):
287 def __init__(self, repo, caps=None):
273 super(localpeer, self).__init__()
288 super(localpeer, self).__init__()
274
289
275 if caps is None:
290 if caps is None:
276 caps = moderncaps.copy()
291 caps = moderncaps.copy()
277 self._repo = repo.filtered(b'served')
292 self._repo = repo.filtered(b'served')
278 self.ui = repo.ui
293 self.ui = repo.ui
279
294
280 if repo._wanted_sidedata:
295 if repo._wanted_sidedata:
281 formatted = bundle2.format_remote_wanted_sidedata(repo)
296 formatted = bundle2.format_remote_wanted_sidedata(repo)
282 caps.add(b'exp-wanted-sidedata=' + formatted)
297 caps.add(b'exp-wanted-sidedata=' + formatted)
283
298
284 self._caps = repo._restrictcapabilities(caps)
299 self._caps = repo._restrictcapabilities(caps)
285
300
286 # Begin of _basepeer interface.
301 # Begin of _basepeer interface.
287
302
288 def url(self):
303 def url(self):
289 return self._repo.url()
304 return self._repo.url()
290
305
291 def local(self):
306 def local(self):
292 return self._repo
307 return self._repo
293
308
294 def peer(self):
309 def peer(self):
295 return self
310 return self
296
311
297 def canpush(self):
312 def canpush(self):
298 return True
313 return True
299
314
300 def close(self):
315 def close(self):
301 self._repo.close()
316 self._repo.close()
302
317
303 # End of _basepeer interface.
318 # End of _basepeer interface.
304
319
305 # Begin of _basewirecommands interface.
320 # Begin of _basewirecommands interface.
306
321
307 def branchmap(self):
322 def branchmap(self):
308 return self._repo.branchmap()
323 return self._repo.branchmap()
309
324
310 def capabilities(self):
325 def capabilities(self):
311 return self._caps
326 return self._caps
312
327
313 def clonebundles(self):
328 def clonebundles(self):
314 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
329 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
315
330
316 def debugwireargs(self, one, two, three=None, four=None, five=None):
331 def debugwireargs(self, one, two, three=None, four=None, five=None):
317 """Used to test argument passing over the wire"""
332 """Used to test argument passing over the wire"""
318 return b"%s %s %s %s %s" % (
333 return b"%s %s %s %s %s" % (
319 one,
334 one,
320 two,
335 two,
321 pycompat.bytestr(three),
336 pycompat.bytestr(three),
322 pycompat.bytestr(four),
337 pycompat.bytestr(four),
323 pycompat.bytestr(five),
338 pycompat.bytestr(five),
324 )
339 )
325
340
326 def getbundle(
341 def getbundle(
327 self,
342 self,
328 source,
343 source,
329 heads=None,
344 heads=None,
330 common=None,
345 common=None,
331 bundlecaps=None,
346 bundlecaps=None,
332 remote_sidedata=None,
347 remote_sidedata=None,
333 **kwargs
348 **kwargs
334 ):
349 ):
335 chunks = exchange.getbundlechunks(
350 chunks = exchange.getbundlechunks(
336 self._repo,
351 self._repo,
337 source,
352 source,
338 heads=heads,
353 heads=heads,
339 common=common,
354 common=common,
340 bundlecaps=bundlecaps,
355 bundlecaps=bundlecaps,
341 remote_sidedata=remote_sidedata,
356 remote_sidedata=remote_sidedata,
342 **kwargs
357 **kwargs
343 )[1]
358 )[1]
344 cb = util.chunkbuffer(chunks)
359 cb = util.chunkbuffer(chunks)
345
360
346 if exchange.bundle2requested(bundlecaps):
361 if exchange.bundle2requested(bundlecaps):
347 # When requesting a bundle2, getbundle returns a stream to make the
362 # When requesting a bundle2, getbundle returns a stream to make the
348 # wire level function happier. We need to build a proper object
363 # wire level function happier. We need to build a proper object
349 # from it in local peer.
364 # from it in local peer.
350 return bundle2.getunbundler(self.ui, cb)
365 return bundle2.getunbundler(self.ui, cb)
351 else:
366 else:
352 return changegroup.getunbundler(b'01', cb, None)
367 return changegroup.getunbundler(b'01', cb, None)
353
368
354 def heads(self):
369 def heads(self):
355 return self._repo.heads()
370 return self._repo.heads()
356
371
357 def known(self, nodes):
372 def known(self, nodes):
358 return self._repo.known(nodes)
373 return self._repo.known(nodes)
359
374
360 def listkeys(self, namespace):
375 def listkeys(self, namespace):
361 return self._repo.listkeys(namespace)
376 return self._repo.listkeys(namespace)
362
377
363 def lookup(self, key):
378 def lookup(self, key):
364 return self._repo.lookup(key)
379 return self._repo.lookup(key)
365
380
366 def pushkey(self, namespace, key, old, new):
381 def pushkey(self, namespace, key, old, new):
367 return self._repo.pushkey(namespace, key, old, new)
382 return self._repo.pushkey(namespace, key, old, new)
368
383
369 def stream_out(self):
384 def stream_out(self):
370 raise error.Abort(_(b'cannot perform stream clone against local peer'))
385 raise error.Abort(_(b'cannot perform stream clone against local peer'))
371
386
372 def unbundle(self, bundle, heads, url):
387 def unbundle(self, bundle, heads, url):
373 """apply a bundle on a repo
388 """apply a bundle on a repo
374
389
375 This function handles the repo locking itself."""
390 This function handles the repo locking itself."""
376 try:
391 try:
377 try:
392 try:
378 bundle = exchange.readbundle(self.ui, bundle, None)
393 bundle = exchange.readbundle(self.ui, bundle, None)
379 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
394 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
380 if util.safehasattr(ret, b'getchunks'):
395 if util.safehasattr(ret, b'getchunks'):
381 # This is a bundle20 object, turn it into an unbundler.
396 # This is a bundle20 object, turn it into an unbundler.
382 # This little dance should be dropped eventually when the
397 # This little dance should be dropped eventually when the
383 # API is finally improved.
398 # API is finally improved.
384 stream = util.chunkbuffer(ret.getchunks())
399 stream = util.chunkbuffer(ret.getchunks())
385 ret = bundle2.getunbundler(self.ui, stream)
400 ret = bundle2.getunbundler(self.ui, stream)
386 return ret
401 return ret
387 except Exception as exc:
402 except Exception as exc:
388 # If the exception contains output salvaged from a bundle2
403 # If the exception contains output salvaged from a bundle2
389 # reply, we need to make sure it is printed before continuing
404 # reply, we need to make sure it is printed before continuing
390 # to fail. So we build a bundle2 with such output and consume
405 # to fail. So we build a bundle2 with such output and consume
391 # it directly.
406 # it directly.
392 #
407 #
393 # This is not very elegant but allows a "simple" solution for
408 # This is not very elegant but allows a "simple" solution for
394 # issue4594
409 # issue4594
395 output = getattr(exc, '_bundle2salvagedoutput', ())
410 output = getattr(exc, '_bundle2salvagedoutput', ())
396 if output:
411 if output:
397 bundler = bundle2.bundle20(self._repo.ui)
412 bundler = bundle2.bundle20(self._repo.ui)
398 for out in output:
413 for out in output:
399 bundler.addpart(out)
414 bundler.addpart(out)
400 stream = util.chunkbuffer(bundler.getchunks())
415 stream = util.chunkbuffer(bundler.getchunks())
401 b = bundle2.getunbundler(self.ui, stream)
416 b = bundle2.getunbundler(self.ui, stream)
402 bundle2.processbundle(self._repo, b)
417 bundle2.processbundle(self._repo, b)
403 raise
418 raise
404 except error.PushRaced as exc:
419 except error.PushRaced as exc:
405 raise error.ResponseError(
420 raise error.ResponseError(
406 _(b'push failed:'), stringutil.forcebytestr(exc)
421 _(b'push failed:'), stringutil.forcebytestr(exc)
407 )
422 )
408
423
409 # End of _basewirecommands interface.
424 # End of _basewirecommands interface.
410
425
411 # Begin of peer interface.
426 # Begin of peer interface.
412
427
413 def commandexecutor(self):
428 def commandexecutor(self):
414 return localcommandexecutor(self)
429 return localcommandexecutor(self)
415
430
416 # End of peer interface.
431 # End of peer interface.
417
432
418
433
419 @interfaceutil.implementer(repository.ipeerlegacycommands)
434 @interfaceutil.implementer(repository.ipeerlegacycommands)
420 class locallegacypeer(localpeer):
435 class locallegacypeer(localpeer):
421 """peer extension which implements legacy methods too; used for tests with
436 """peer extension which implements legacy methods too; used for tests with
422 restricted capabilities"""
437 restricted capabilities"""
423
438
424 def __init__(self, repo):
439 def __init__(self, repo):
425 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
440 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
426
441
427 # Begin of baselegacywirecommands interface.
442 # Begin of baselegacywirecommands interface.
428
443
429 def between(self, pairs):
444 def between(self, pairs):
430 return self._repo.between(pairs)
445 return self._repo.between(pairs)
431
446
432 def branches(self, nodes):
447 def branches(self, nodes):
433 return self._repo.branches(nodes)
448 return self._repo.branches(nodes)
434
449
435 def changegroup(self, nodes, source):
450 def changegroup(self, nodes, source):
436 outgoing = discovery.outgoing(
451 outgoing = discovery.outgoing(
437 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
452 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
438 )
453 )
439 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
454 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
440
455
441 def changegroupsubset(self, bases, heads, source):
456 def changegroupsubset(self, bases, heads, source):
442 outgoing = discovery.outgoing(
457 outgoing = discovery.outgoing(
443 self._repo, missingroots=bases, ancestorsof=heads
458 self._repo, missingroots=bases, ancestorsof=heads
444 )
459 )
445 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
460 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
446
461
447 # End of baselegacywirecommands interface.
462 # End of baselegacywirecommands interface.
448
463
449
464
450 # Functions receiving (ui, features) that extensions can register to impact
465 # Functions receiving (ui, features) that extensions can register to impact
451 # the ability to load repositories with custom requirements. Only
466 # the ability to load repositories with custom requirements. Only
452 # functions defined in loaded extensions are called.
467 # functions defined in loaded extensions are called.
453 #
468 #
454 # The function receives a set of requirement strings that the repository
469 # The function receives a set of requirement strings that the repository
455 # is capable of opening. Functions will typically add elements to the
470 # is capable of opening. Functions will typically add elements to the
456 # set to reflect that the extension knows how to handle that requirements.
471 # set to reflect that the extension knows how to handle that requirements.
457 featuresetupfuncs = set()
472 featuresetupfuncs = set()
458
473
459
474
460 def _getsharedvfs(hgvfs, requirements):
475 def _getsharedvfs(hgvfs, requirements):
461 """returns the vfs object pointing to root of shared source
476 """returns the vfs object pointing to root of shared source
462 repo for a shared repository
477 repo for a shared repository
463
478
464 hgvfs is vfs pointing at .hg/ of current repo (shared one)
479 hgvfs is vfs pointing at .hg/ of current repo (shared one)
465 requirements is a set of requirements of current repo (shared one)
480 requirements is a set of requirements of current repo (shared one)
466 """
481 """
467 # The ``shared`` or ``relshared`` requirements indicate the
482 # The ``shared`` or ``relshared`` requirements indicate the
468 # store lives in the path contained in the ``.hg/sharedpath`` file.
483 # store lives in the path contained in the ``.hg/sharedpath`` file.
469 # This is an absolute path for ``shared`` and relative to
484 # This is an absolute path for ``shared`` and relative to
470 # ``.hg/`` for ``relshared``.
485 # ``.hg/`` for ``relshared``.
471 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
486 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
472 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
487 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
473 sharedpath = util.normpath(hgvfs.join(sharedpath))
488 sharedpath = util.normpath(hgvfs.join(sharedpath))
474
489
475 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
490 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
476
491
477 if not sharedvfs.exists():
492 if not sharedvfs.exists():
478 raise error.RepoError(
493 raise error.RepoError(
479 _(b'.hg/sharedpath points to nonexistent directory %s')
494 _(b'.hg/sharedpath points to nonexistent directory %s')
480 % sharedvfs.base
495 % sharedvfs.base
481 )
496 )
482 return sharedvfs
497 return sharedvfs
483
498
484
499
485 def _readrequires(vfs, allowmissing):
500 def _readrequires(vfs, allowmissing):
486 """reads the require file present at root of this vfs
501 """reads the require file present at root of this vfs
487 and return a set of requirements
502 and return a set of requirements
488
503
489 If allowmissing is True, we suppress ENOENT if raised"""
504 If allowmissing is True, we suppress ENOENT if raised"""
490 # requires file contains a newline-delimited list of
505 # requires file contains a newline-delimited list of
491 # features/capabilities the opener (us) must have in order to use
506 # features/capabilities the opener (us) must have in order to use
492 # the repository. This file was introduced in Mercurial 0.9.2,
507 # the repository. This file was introduced in Mercurial 0.9.2,
493 # which means very old repositories may not have one. We assume
508 # which means very old repositories may not have one. We assume
494 # a missing file translates to no requirements.
509 # a missing file translates to no requirements.
495 try:
510 try:
496 requirements = set(vfs.read(b'requires').splitlines())
511 requirements = set(vfs.read(b'requires').splitlines())
497 except IOError as e:
512 except IOError as e:
498 if not (allowmissing and e.errno == errno.ENOENT):
513 if not (allowmissing and e.errno == errno.ENOENT):
499 raise
514 raise
500 requirements = set()
515 requirements = set()
501 return requirements
516 return requirements
502
517
503
518
504 def makelocalrepository(baseui, path, intents=None):
519 def makelocalrepository(baseui, path, intents=None):
505 """Create a local repository object.
520 """Create a local repository object.
506
521
507 Given arguments needed to construct a local repository, this function
522 Given arguments needed to construct a local repository, this function
508 performs various early repository loading functionality (such as
523 performs various early repository loading functionality (such as
509 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
524 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
510 the repository can be opened, derives a type suitable for representing
525 the repository can be opened, derives a type suitable for representing
511 that repository, and returns an instance of it.
526 that repository, and returns an instance of it.
512
527
513 The returned object conforms to the ``repository.completelocalrepository``
528 The returned object conforms to the ``repository.completelocalrepository``
514 interface.
529 interface.
515
530
516 The repository type is derived by calling a series of factory functions
531 The repository type is derived by calling a series of factory functions
517 for each aspect/interface of the final repository. These are defined by
532 for each aspect/interface of the final repository. These are defined by
518 ``REPO_INTERFACES``.
533 ``REPO_INTERFACES``.
519
534
520 Each factory function is called to produce a type implementing a specific
535 Each factory function is called to produce a type implementing a specific
521 interface. The cumulative list of returned types will be combined into a
536 interface. The cumulative list of returned types will be combined into a
522 new type and that type will be instantiated to represent the local
537 new type and that type will be instantiated to represent the local
523 repository.
538 repository.
524
539
525 The factory functions each receive various state that may be consulted
540 The factory functions each receive various state that may be consulted
526 as part of deriving a type.
541 as part of deriving a type.
527
542
528 Extensions should wrap these factory functions to customize repository type
543 Extensions should wrap these factory functions to customize repository type
529 creation. Note that an extension's wrapped function may be called even if
544 creation. Note that an extension's wrapped function may be called even if
530 that extension is not loaded for the repo being constructed. Extensions
545 that extension is not loaded for the repo being constructed. Extensions
531 should check if their ``__name__`` appears in the
546 should check if their ``__name__`` appears in the
532 ``extensionmodulenames`` set passed to the factory function and no-op if
547 ``extensionmodulenames`` set passed to the factory function and no-op if
533 not.
548 not.
534 """
549 """
535 ui = baseui.copy()
550 ui = baseui.copy()
536 # Prevent copying repo configuration.
551 # Prevent copying repo configuration.
537 ui.copy = baseui.copy
552 ui.copy = baseui.copy
538
553
539 # Working directory VFS rooted at repository root.
554 # Working directory VFS rooted at repository root.
540 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
555 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
541
556
542 # Main VFS for .hg/ directory.
557 # Main VFS for .hg/ directory.
543 hgpath = wdirvfs.join(b'.hg')
558 hgpath = wdirvfs.join(b'.hg')
544 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
559 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
545 # Whether this repository is shared one or not
560 # Whether this repository is shared one or not
546 shared = False
561 shared = False
547 # If this repository is shared, vfs pointing to shared repo
562 # If this repository is shared, vfs pointing to shared repo
548 sharedvfs = None
563 sharedvfs = None
549
564
550 # The .hg/ path should exist and should be a directory. All other
565 # The .hg/ path should exist and should be a directory. All other
551 # cases are errors.
566 # cases are errors.
552 if not hgvfs.isdir():
567 if not hgvfs.isdir():
553 try:
568 try:
554 hgvfs.stat()
569 hgvfs.stat()
555 except OSError as e:
570 except OSError as e:
556 if e.errno != errno.ENOENT:
571 if e.errno != errno.ENOENT:
557 raise
572 raise
558 except ValueError as e:
573 except ValueError as e:
559 # Can be raised on Python 3.8 when path is invalid.
574 # Can be raised on Python 3.8 when path is invalid.
560 raise error.Abort(
575 raise error.Abort(
561 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
576 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
562 )
577 )
563
578
564 raise error.RepoError(_(b'repository %s not found') % path)
579 raise error.RepoError(_(b'repository %s not found') % path)
565
580
566 requirements = _readrequires(hgvfs, True)
581 requirements = _readrequires(hgvfs, True)
567 shared = (
582 shared = (
568 requirementsmod.SHARED_REQUIREMENT in requirements
583 requirementsmod.SHARED_REQUIREMENT in requirements
569 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
584 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
570 )
585 )
571 storevfs = None
586 storevfs = None
572 if shared:
587 if shared:
573 # This is a shared repo
588 # This is a shared repo
574 sharedvfs = _getsharedvfs(hgvfs, requirements)
589 sharedvfs = _getsharedvfs(hgvfs, requirements)
575 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
590 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
576 else:
591 else:
577 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
592 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
578
593
579 # if .hg/requires contains the sharesafe requirement, it means
594 # if .hg/requires contains the sharesafe requirement, it means
580 # there exists a `.hg/store/requires` too and we should read it
595 # there exists a `.hg/store/requires` too and we should read it
581 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
596 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
582 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
597 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
583 # is not present, refer checkrequirementscompat() for that
598 # is not present, refer checkrequirementscompat() for that
584 #
599 #
585 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
600 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
586 # repository was shared the old way. We check the share source .hg/requires
601 # repository was shared the old way. We check the share source .hg/requires
587 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
602 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
588 # to be reshared
603 # to be reshared
589 hint = _(b"see `hg help config.format.use-share-safe` for more information")
604 hint = _(b"see `hg help config.format.use-share-safe` for more information")
590 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
605 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
591
606
592 if (
607 if (
593 shared
608 shared
594 and requirementsmod.SHARESAFE_REQUIREMENT
609 and requirementsmod.SHARESAFE_REQUIREMENT
595 not in _readrequires(sharedvfs, True)
610 not in _readrequires(sharedvfs, True)
596 ):
611 ):
597 mismatch_warn = ui.configbool(
612 mismatch_warn = ui.configbool(
598 b'share', b'safe-mismatch.source-not-safe.warn'
613 b'share', b'safe-mismatch.source-not-safe.warn'
599 )
614 )
600 mismatch_config = ui.config(
615 mismatch_config = ui.config(
601 b'share', b'safe-mismatch.source-not-safe'
616 b'share', b'safe-mismatch.source-not-safe'
602 )
617 )
603 if mismatch_config in (
618 if mismatch_config in (
604 b'downgrade-allow',
619 b'downgrade-allow',
605 b'allow',
620 b'allow',
606 b'downgrade-abort',
621 b'downgrade-abort',
607 ):
622 ):
608 # prevent cyclic import localrepo -> upgrade -> localrepo
623 # prevent cyclic import localrepo -> upgrade -> localrepo
609 from . import upgrade
624 from . import upgrade
610
625
611 upgrade.downgrade_share_to_non_safe(
626 upgrade.downgrade_share_to_non_safe(
612 ui,
627 ui,
613 hgvfs,
628 hgvfs,
614 sharedvfs,
629 sharedvfs,
615 requirements,
630 requirements,
616 mismatch_config,
631 mismatch_config,
617 mismatch_warn,
632 mismatch_warn,
618 )
633 )
619 elif mismatch_config == b'abort':
634 elif mismatch_config == b'abort':
620 raise error.Abort(
635 raise error.Abort(
621 _(b"share source does not support share-safe requirement"),
636 _(b"share source does not support share-safe requirement"),
622 hint=hint,
637 hint=hint,
623 )
638 )
624 else:
639 else:
625 raise error.Abort(
640 raise error.Abort(
626 _(
641 _(
627 b"share-safe mismatch with source.\nUnrecognized"
642 b"share-safe mismatch with source.\nUnrecognized"
628 b" value '%s' of `share.safe-mismatch.source-not-safe`"
643 b" value '%s' of `share.safe-mismatch.source-not-safe`"
629 b" set."
644 b" set."
630 )
645 )
631 % mismatch_config,
646 % mismatch_config,
632 hint=hint,
647 hint=hint,
633 )
648 )
634 else:
649 else:
635 requirements |= _readrequires(storevfs, False)
650 requirements |= _readrequires(storevfs, False)
636 elif shared:
651 elif shared:
637 sourcerequires = _readrequires(sharedvfs, False)
652 sourcerequires = _readrequires(sharedvfs, False)
638 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
653 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
639 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
654 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
640 mismatch_warn = ui.configbool(
655 mismatch_warn = ui.configbool(
641 b'share', b'safe-mismatch.source-safe.warn'
656 b'share', b'safe-mismatch.source-safe.warn'
642 )
657 )
643 if mismatch_config in (
658 if mismatch_config in (
644 b'upgrade-allow',
659 b'upgrade-allow',
645 b'allow',
660 b'allow',
646 b'upgrade-abort',
661 b'upgrade-abort',
647 ):
662 ):
648 # prevent cyclic import localrepo -> upgrade -> localrepo
663 # prevent cyclic import localrepo -> upgrade -> localrepo
649 from . import upgrade
664 from . import upgrade
650
665
651 upgrade.upgrade_share_to_safe(
666 upgrade.upgrade_share_to_safe(
652 ui,
667 ui,
653 hgvfs,
668 hgvfs,
654 storevfs,
669 storevfs,
655 requirements,
670 requirements,
656 mismatch_config,
671 mismatch_config,
657 mismatch_warn,
672 mismatch_warn,
658 )
673 )
659 elif mismatch_config == b'abort':
674 elif mismatch_config == b'abort':
660 raise error.Abort(
675 raise error.Abort(
661 _(
676 _(
662 b'version mismatch: source uses share-safe'
677 b'version mismatch: source uses share-safe'
663 b' functionality while the current share does not'
678 b' functionality while the current share does not'
664 ),
679 ),
665 hint=hint,
680 hint=hint,
666 )
681 )
667 else:
682 else:
668 raise error.Abort(
683 raise error.Abort(
669 _(
684 _(
670 b"share-safe mismatch with source.\nUnrecognized"
685 b"share-safe mismatch with source.\nUnrecognized"
671 b" value '%s' of `share.safe-mismatch.source-safe` set."
686 b" value '%s' of `share.safe-mismatch.source-safe` set."
672 )
687 )
673 % mismatch_config,
688 % mismatch_config,
674 hint=hint,
689 hint=hint,
675 )
690 )
676
691
677 # The .hg/hgrc file may load extensions or contain config options
692 # The .hg/hgrc file may load extensions or contain config options
678 # that influence repository construction. Attempt to load it and
693 # that influence repository construction. Attempt to load it and
679 # process any new extensions that it may have pulled in.
694 # process any new extensions that it may have pulled in.
680 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
695 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
681 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
696 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
682 extensions.loadall(ui)
697 extensions.loadall(ui)
683 extensions.populateui(ui)
698 extensions.populateui(ui)
684
699
685 # Set of module names of extensions loaded for this repository.
700 # Set of module names of extensions loaded for this repository.
686 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
701 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
687
702
688 supportedrequirements = gathersupportedrequirements(ui)
703 supportedrequirements = gathersupportedrequirements(ui)
689
704
690 # We first validate the requirements are known.
705 # We first validate the requirements are known.
691 ensurerequirementsrecognized(requirements, supportedrequirements)
706 ensurerequirementsrecognized(requirements, supportedrequirements)
692
707
693 # Then we validate that the known set is reasonable to use together.
708 # Then we validate that the known set is reasonable to use together.
694 ensurerequirementscompatible(ui, requirements)
709 ensurerequirementscompatible(ui, requirements)
695
710
696 # TODO there are unhandled edge cases related to opening repositories with
711 # TODO there are unhandled edge cases related to opening repositories with
697 # shared storage. If storage is shared, we should also test for requirements
712 # shared storage. If storage is shared, we should also test for requirements
698 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
713 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
699 # that repo, as that repo may load extensions needed to open it. This is a
714 # that repo, as that repo may load extensions needed to open it. This is a
700 # bit complicated because we don't want the other hgrc to overwrite settings
715 # bit complicated because we don't want the other hgrc to overwrite settings
701 # in this hgrc.
716 # in this hgrc.
702 #
717 #
703 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
718 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
704 # file when sharing repos. But if a requirement is added after the share is
719 # file when sharing repos. But if a requirement is added after the share is
705 # performed, thereby introducing a new requirement for the opener, we may
720 # performed, thereby introducing a new requirement for the opener, we may
706 # will not see that and could encounter a run-time error interacting with
721 # will not see that and could encounter a run-time error interacting with
707 # that shared store since it has an unknown-to-us requirement.
722 # that shared store since it has an unknown-to-us requirement.
708
723
709 # At this point, we know we should be capable of opening the repository.
724 # At this point, we know we should be capable of opening the repository.
710 # Now get on with doing that.
725 # Now get on with doing that.
711
726
712 features = set()
727 features = set()
713
728
714 # The "store" part of the repository holds versioned data. How it is
729 # The "store" part of the repository holds versioned data. How it is
715 # accessed is determined by various requirements. If `shared` or
730 # accessed is determined by various requirements. If `shared` or
716 # `relshared` requirements are present, this indicates current repository
731 # `relshared` requirements are present, this indicates current repository
717 # is a share and store exists in path mentioned in `.hg/sharedpath`
732 # is a share and store exists in path mentioned in `.hg/sharedpath`
718 if shared:
733 if shared:
719 storebasepath = sharedvfs.base
734 storebasepath = sharedvfs.base
720 cachepath = sharedvfs.join(b'cache')
735 cachepath = sharedvfs.join(b'cache')
721 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
736 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
722 else:
737 else:
723 storebasepath = hgvfs.base
738 storebasepath = hgvfs.base
724 cachepath = hgvfs.join(b'cache')
739 cachepath = hgvfs.join(b'cache')
725 wcachepath = hgvfs.join(b'wcache')
740 wcachepath = hgvfs.join(b'wcache')
726
741
727 # The store has changed over time and the exact layout is dictated by
742 # The store has changed over time and the exact layout is dictated by
728 # requirements. The store interface abstracts differences across all
743 # requirements. The store interface abstracts differences across all
729 # of them.
744 # of them.
730 store = makestore(
745 store = makestore(
731 requirements,
746 requirements,
732 storebasepath,
747 storebasepath,
733 lambda base: vfsmod.vfs(base, cacheaudited=True),
748 lambda base: vfsmod.vfs(base, cacheaudited=True),
734 )
749 )
735 hgvfs.createmode = store.createmode
750 hgvfs.createmode = store.createmode
736
751
737 storevfs = store.vfs
752 storevfs = store.vfs
738 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
753 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
739
754
740 if (
755 if (
741 requirementsmod.REVLOGV2_REQUIREMENT in requirements
756 requirementsmod.REVLOGV2_REQUIREMENT in requirements
742 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
757 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
743 ):
758 ):
744 features.add(repository.REPO_FEATURE_SIDE_DATA)
759 features.add(repository.REPO_FEATURE_SIDE_DATA)
745 # the revlogv2 docket introduced race condition that we need to fix
760 # the revlogv2 docket introduced race condition that we need to fix
746 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
761 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
747
762
748 # The cache vfs is used to manage cache files.
763 # The cache vfs is used to manage cache files.
749 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
764 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
750 cachevfs.createmode = store.createmode
765 cachevfs.createmode = store.createmode
751 # The cache vfs is used to manage cache files related to the working copy
766 # The cache vfs is used to manage cache files related to the working copy
752 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
767 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
753 wcachevfs.createmode = store.createmode
768 wcachevfs.createmode = store.createmode
754
769
755 # Now resolve the type for the repository object. We do this by repeatedly
770 # Now resolve the type for the repository object. We do this by repeatedly
756 # calling a factory function to produces types for specific aspects of the
771 # calling a factory function to produces types for specific aspects of the
757 # repo's operation. The aggregate returned types are used as base classes
772 # repo's operation. The aggregate returned types are used as base classes
758 # for a dynamically-derived type, which will represent our new repository.
773 # for a dynamically-derived type, which will represent our new repository.
759
774
760 bases = []
775 bases = []
761 extrastate = {}
776 extrastate = {}
762
777
763 for iface, fn in REPO_INTERFACES:
778 for iface, fn in REPO_INTERFACES:
764 # We pass all potentially useful state to give extensions tons of
779 # We pass all potentially useful state to give extensions tons of
765 # flexibility.
780 # flexibility.
766 typ = fn()(
781 typ = fn()(
767 ui=ui,
782 ui=ui,
768 intents=intents,
783 intents=intents,
769 requirements=requirements,
784 requirements=requirements,
770 features=features,
785 features=features,
771 wdirvfs=wdirvfs,
786 wdirvfs=wdirvfs,
772 hgvfs=hgvfs,
787 hgvfs=hgvfs,
773 store=store,
788 store=store,
774 storevfs=storevfs,
789 storevfs=storevfs,
775 storeoptions=storevfs.options,
790 storeoptions=storevfs.options,
776 cachevfs=cachevfs,
791 cachevfs=cachevfs,
777 wcachevfs=wcachevfs,
792 wcachevfs=wcachevfs,
778 extensionmodulenames=extensionmodulenames,
793 extensionmodulenames=extensionmodulenames,
779 extrastate=extrastate,
794 extrastate=extrastate,
780 baseclasses=bases,
795 baseclasses=bases,
781 )
796 )
782
797
783 if not isinstance(typ, type):
798 if not isinstance(typ, type):
784 raise error.ProgrammingError(
799 raise error.ProgrammingError(
785 b'unable to construct type for %s' % iface
800 b'unable to construct type for %s' % iface
786 )
801 )
787
802
788 bases.append(typ)
803 bases.append(typ)
789
804
790 # type() allows you to use characters in type names that wouldn't be
805 # type() allows you to use characters in type names that wouldn't be
791 # recognized as Python symbols in source code. We abuse that to add
806 # recognized as Python symbols in source code. We abuse that to add
792 # rich information about our constructed repo.
807 # rich information about our constructed repo.
793 name = pycompat.sysstr(
808 name = pycompat.sysstr(
794 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
809 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
795 )
810 )
796
811
797 cls = type(name, tuple(bases), {})
812 cls = type(name, tuple(bases), {})
798
813
799 return cls(
814 return cls(
800 baseui=baseui,
815 baseui=baseui,
801 ui=ui,
816 ui=ui,
802 origroot=path,
817 origroot=path,
803 wdirvfs=wdirvfs,
818 wdirvfs=wdirvfs,
804 hgvfs=hgvfs,
819 hgvfs=hgvfs,
805 requirements=requirements,
820 requirements=requirements,
806 supportedrequirements=supportedrequirements,
821 supportedrequirements=supportedrequirements,
807 sharedpath=storebasepath,
822 sharedpath=storebasepath,
808 store=store,
823 store=store,
809 cachevfs=cachevfs,
824 cachevfs=cachevfs,
810 wcachevfs=wcachevfs,
825 wcachevfs=wcachevfs,
811 features=features,
826 features=features,
812 intents=intents,
827 intents=intents,
813 )
828 )
814
829
815
830
816 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
831 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
817 """Load hgrc files/content into a ui instance.
832 """Load hgrc files/content into a ui instance.
818
833
819 This is called during repository opening to load any additional
834 This is called during repository opening to load any additional
820 config files or settings relevant to the current repository.
835 config files or settings relevant to the current repository.
821
836
822 Returns a bool indicating whether any additional configs were loaded.
837 Returns a bool indicating whether any additional configs were loaded.
823
838
824 Extensions should monkeypatch this function to modify how per-repo
839 Extensions should monkeypatch this function to modify how per-repo
825 configs are loaded. For example, an extension may wish to pull in
840 configs are loaded. For example, an extension may wish to pull in
826 configs from alternate files or sources.
841 configs from alternate files or sources.
827
842
828 sharedvfs is vfs object pointing to source repo if the current one is a
843 sharedvfs is vfs object pointing to source repo if the current one is a
829 shared one
844 shared one
830 """
845 """
831 if not rcutil.use_repo_hgrc():
846 if not rcutil.use_repo_hgrc():
832 return False
847 return False
833
848
834 ret = False
849 ret = False
835 # first load config from shared source if we has to
850 # first load config from shared source if we has to
836 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
851 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
837 try:
852 try:
838 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
853 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
839 ret = True
854 ret = True
840 except IOError:
855 except IOError:
841 pass
856 pass
842
857
843 try:
858 try:
844 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
859 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
845 ret = True
860 ret = True
846 except IOError:
861 except IOError:
847 pass
862 pass
848
863
849 try:
864 try:
850 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
865 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
851 ret = True
866 ret = True
852 except IOError:
867 except IOError:
853 pass
868 pass
854
869
855 return ret
870 return ret
856
871
857
872
858 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
873 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
859 """Perform additional actions after .hg/hgrc is loaded.
874 """Perform additional actions after .hg/hgrc is loaded.
860
875
861 This function is called during repository loading immediately after
876 This function is called during repository loading immediately after
862 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
877 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
863
878
864 The function can be used to validate configs, automatically add
879 The function can be used to validate configs, automatically add
865 options (including extensions) based on requirements, etc.
880 options (including extensions) based on requirements, etc.
866 """
881 """
867
882
868 # Map of requirements to list of extensions to load automatically when
883 # Map of requirements to list of extensions to load automatically when
869 # requirement is present.
884 # requirement is present.
870 autoextensions = {
885 autoextensions = {
871 b'git': [b'git'],
886 b'git': [b'git'],
872 b'largefiles': [b'largefiles'],
887 b'largefiles': [b'largefiles'],
873 b'lfs': [b'lfs'],
888 b'lfs': [b'lfs'],
874 }
889 }
875
890
876 for requirement, names in sorted(autoextensions.items()):
891 for requirement, names in sorted(autoextensions.items()):
877 if requirement not in requirements:
892 if requirement not in requirements:
878 continue
893 continue
879
894
880 for name in names:
895 for name in names:
881 if not ui.hasconfig(b'extensions', name):
896 if not ui.hasconfig(b'extensions', name):
882 ui.setconfig(b'extensions', name, b'', source=b'autoload')
897 ui.setconfig(b'extensions', name, b'', source=b'autoload')
883
898
884
899
885 def gathersupportedrequirements(ui):
900 def gathersupportedrequirements(ui):
886 """Determine the complete set of recognized requirements."""
901 """Determine the complete set of recognized requirements."""
887 # Start with all requirements supported by this file.
902 # Start with all requirements supported by this file.
888 supported = set(localrepository._basesupported)
903 supported = set(localrepository._basesupported)
889
904
890 if dirstate.SUPPORTS_DIRSTATE_V2:
905 if dirstate.SUPPORTS_DIRSTATE_V2:
891 supported.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
906 supported.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
892
907
893 # Execute ``featuresetupfuncs`` entries if they belong to an extension
908 # Execute ``featuresetupfuncs`` entries if they belong to an extension
894 # relevant to this ui instance.
909 # relevant to this ui instance.
895 modules = {m.__name__ for n, m in extensions.extensions(ui)}
910 modules = {m.__name__ for n, m in extensions.extensions(ui)}
896
911
897 for fn in featuresetupfuncs:
912 for fn in featuresetupfuncs:
898 if fn.__module__ in modules:
913 if fn.__module__ in modules:
899 fn(ui, supported)
914 fn(ui, supported)
900
915
901 # Add derived requirements from registered compression engines.
916 # Add derived requirements from registered compression engines.
902 for name in util.compengines:
917 for name in util.compengines:
903 engine = util.compengines[name]
918 engine = util.compengines[name]
904 if engine.available() and engine.revlogheader():
919 if engine.available() and engine.revlogheader():
905 supported.add(b'exp-compression-%s' % name)
920 supported.add(b'exp-compression-%s' % name)
906 if engine.name() == b'zstd':
921 if engine.name() == b'zstd':
907 supported.add(b'revlog-compression-zstd')
922 supported.add(b'revlog-compression-zstd')
908
923
909 return supported
924 return supported
910
925
911
926
912 def ensurerequirementsrecognized(requirements, supported):
927 def ensurerequirementsrecognized(requirements, supported):
913 """Validate that a set of local requirements is recognized.
928 """Validate that a set of local requirements is recognized.
914
929
915 Receives a set of requirements. Raises an ``error.RepoError`` if there
930 Receives a set of requirements. Raises an ``error.RepoError`` if there
916 exists any requirement in that set that currently loaded code doesn't
931 exists any requirement in that set that currently loaded code doesn't
917 recognize.
932 recognize.
918
933
919 Returns a set of supported requirements.
934 Returns a set of supported requirements.
920 """
935 """
921 missing = set()
936 missing = set()
922
937
923 for requirement in requirements:
938 for requirement in requirements:
924 if requirement in supported:
939 if requirement in supported:
925 continue
940 continue
926
941
927 if not requirement or not requirement[0:1].isalnum():
942 if not requirement or not requirement[0:1].isalnum():
928 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
943 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
929
944
930 missing.add(requirement)
945 missing.add(requirement)
931
946
932 if missing:
947 if missing:
933 raise error.RequirementError(
948 raise error.RequirementError(
934 _(b'repository requires features unknown to this Mercurial: %s')
949 _(b'repository requires features unknown to this Mercurial: %s')
935 % b' '.join(sorted(missing)),
950 % b' '.join(sorted(missing)),
936 hint=_(
951 hint=_(
937 b'see https://mercurial-scm.org/wiki/MissingRequirement '
952 b'see https://mercurial-scm.org/wiki/MissingRequirement '
938 b'for more information'
953 b'for more information'
939 ),
954 ),
940 )
955 )
941
956
942
957
943 def ensurerequirementscompatible(ui, requirements):
958 def ensurerequirementscompatible(ui, requirements):
944 """Validates that a set of recognized requirements is mutually compatible.
959 """Validates that a set of recognized requirements is mutually compatible.
945
960
946 Some requirements may not be compatible with others or require
961 Some requirements may not be compatible with others or require
947 config options that aren't enabled. This function is called during
962 config options that aren't enabled. This function is called during
948 repository opening to ensure that the set of requirements needed
963 repository opening to ensure that the set of requirements needed
949 to open a repository is sane and compatible with config options.
964 to open a repository is sane and compatible with config options.
950
965
951 Extensions can monkeypatch this function to perform additional
966 Extensions can monkeypatch this function to perform additional
952 checking.
967 checking.
953
968
954 ``error.RepoError`` should be raised on failure.
969 ``error.RepoError`` should be raised on failure.
955 """
970 """
956 if (
971 if (
957 requirementsmod.SPARSE_REQUIREMENT in requirements
972 requirementsmod.SPARSE_REQUIREMENT in requirements
958 and not sparse.enabled
973 and not sparse.enabled
959 ):
974 ):
960 raise error.RepoError(
975 raise error.RepoError(
961 _(
976 _(
962 b'repository is using sparse feature but '
977 b'repository is using sparse feature but '
963 b'sparse is not enabled; enable the '
978 b'sparse is not enabled; enable the '
964 b'"sparse" extensions to access'
979 b'"sparse" extensions to access'
965 )
980 )
966 )
981 )
967
982
968
983
969 def makestore(requirements, path, vfstype):
984 def makestore(requirements, path, vfstype):
970 """Construct a storage object for a repository."""
985 """Construct a storage object for a repository."""
971 if requirementsmod.STORE_REQUIREMENT in requirements:
986 if requirementsmod.STORE_REQUIREMENT in requirements:
972 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
987 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
973 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
988 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
974 return storemod.fncachestore(path, vfstype, dotencode)
989 return storemod.fncachestore(path, vfstype, dotencode)
975
990
976 return storemod.encodedstore(path, vfstype)
991 return storemod.encodedstore(path, vfstype)
977
992
978 return storemod.basicstore(path, vfstype)
993 return storemod.basicstore(path, vfstype)
979
994
980
995
981 def resolvestorevfsoptions(ui, requirements, features):
996 def resolvestorevfsoptions(ui, requirements, features):
982 """Resolve the options to pass to the store vfs opener.
997 """Resolve the options to pass to the store vfs opener.
983
998
984 The returned dict is used to influence behavior of the storage layer.
999 The returned dict is used to influence behavior of the storage layer.
985 """
1000 """
986 options = {}
1001 options = {}
987
1002
988 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1003 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
989 options[b'treemanifest'] = True
1004 options[b'treemanifest'] = True
990
1005
991 # experimental config: format.manifestcachesize
1006 # experimental config: format.manifestcachesize
992 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1007 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
993 if manifestcachesize is not None:
1008 if manifestcachesize is not None:
994 options[b'manifestcachesize'] = manifestcachesize
1009 options[b'manifestcachesize'] = manifestcachesize
995
1010
996 # In the absence of another requirement superseding a revlog-related
1011 # In the absence of another requirement superseding a revlog-related
997 # requirement, we have to assume the repo is using revlog version 0.
1012 # requirement, we have to assume the repo is using revlog version 0.
998 # This revlog format is super old and we don't bother trying to parse
1013 # This revlog format is super old and we don't bother trying to parse
999 # opener options for it because those options wouldn't do anything
1014 # opener options for it because those options wouldn't do anything
1000 # meaningful on such old repos.
1015 # meaningful on such old repos.
1001 if (
1016 if (
1002 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1017 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1003 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1018 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1004 ):
1019 ):
1005 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1020 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1006 else: # explicitly mark repo as using revlogv0
1021 else: # explicitly mark repo as using revlogv0
1007 options[b'revlogv0'] = True
1022 options[b'revlogv0'] = True
1008
1023
1009 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1024 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1010 options[b'copies-storage'] = b'changeset-sidedata'
1025 options[b'copies-storage'] = b'changeset-sidedata'
1011 else:
1026 else:
1012 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1027 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1013 copiesextramode = (b'changeset-only', b'compatibility')
1028 copiesextramode = (b'changeset-only', b'compatibility')
1014 if writecopiesto in copiesextramode:
1029 if writecopiesto in copiesextramode:
1015 options[b'copies-storage'] = b'extra'
1030 options[b'copies-storage'] = b'extra'
1016
1031
1017 return options
1032 return options
1018
1033
1019
1034
1020 def resolverevlogstorevfsoptions(ui, requirements, features):
1035 def resolverevlogstorevfsoptions(ui, requirements, features):
1021 """Resolve opener options specific to revlogs."""
1036 """Resolve opener options specific to revlogs."""
1022
1037
1023 options = {}
1038 options = {}
1024 options[b'flagprocessors'] = {}
1039 options[b'flagprocessors'] = {}
1025
1040
1026 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1041 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1027 options[b'revlogv1'] = True
1042 options[b'revlogv1'] = True
1028 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1043 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1029 options[b'revlogv2'] = True
1044 options[b'revlogv2'] = True
1030 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1045 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1031 options[b'changelogv2'] = True
1046 options[b'changelogv2'] = True
1032
1047
1033 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1048 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1034 options[b'generaldelta'] = True
1049 options[b'generaldelta'] = True
1035
1050
1036 # experimental config: format.chunkcachesize
1051 # experimental config: format.chunkcachesize
1037 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1052 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1038 if chunkcachesize is not None:
1053 if chunkcachesize is not None:
1039 options[b'chunkcachesize'] = chunkcachesize
1054 options[b'chunkcachesize'] = chunkcachesize
1040
1055
1041 deltabothparents = ui.configbool(
1056 deltabothparents = ui.configbool(
1042 b'storage', b'revlog.optimize-delta-parent-choice'
1057 b'storage', b'revlog.optimize-delta-parent-choice'
1043 )
1058 )
1044 options[b'deltabothparents'] = deltabothparents
1059 options[b'deltabothparents'] = deltabothparents
1045
1060
1046 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1061 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1047 options[b'issue6528.fix-incoming'] = issue6528
1062 options[b'issue6528.fix-incoming'] = issue6528
1048
1063
1049 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1064 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1050 lazydeltabase = False
1065 lazydeltabase = False
1051 if lazydelta:
1066 if lazydelta:
1052 lazydeltabase = ui.configbool(
1067 lazydeltabase = ui.configbool(
1053 b'storage', b'revlog.reuse-external-delta-parent'
1068 b'storage', b'revlog.reuse-external-delta-parent'
1054 )
1069 )
1055 if lazydeltabase is None:
1070 if lazydeltabase is None:
1056 lazydeltabase = not scmutil.gddeltaconfig(ui)
1071 lazydeltabase = not scmutil.gddeltaconfig(ui)
1057 options[b'lazydelta'] = lazydelta
1072 options[b'lazydelta'] = lazydelta
1058 options[b'lazydeltabase'] = lazydeltabase
1073 options[b'lazydeltabase'] = lazydeltabase
1059
1074
1060 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1075 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1061 if 0 <= chainspan:
1076 if 0 <= chainspan:
1062 options[b'maxdeltachainspan'] = chainspan
1077 options[b'maxdeltachainspan'] = chainspan
1063
1078
1064 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1079 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1065 if mmapindexthreshold is not None:
1080 if mmapindexthreshold is not None:
1066 options[b'mmapindexthreshold'] = mmapindexthreshold
1081 options[b'mmapindexthreshold'] = mmapindexthreshold
1067
1082
1068 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1083 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1069 srdensitythres = float(
1084 srdensitythres = float(
1070 ui.config(b'experimental', b'sparse-read.density-threshold')
1085 ui.config(b'experimental', b'sparse-read.density-threshold')
1071 )
1086 )
1072 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1087 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1073 options[b'with-sparse-read'] = withsparseread
1088 options[b'with-sparse-read'] = withsparseread
1074 options[b'sparse-read-density-threshold'] = srdensitythres
1089 options[b'sparse-read-density-threshold'] = srdensitythres
1075 options[b'sparse-read-min-gap-size'] = srmingapsize
1090 options[b'sparse-read-min-gap-size'] = srmingapsize
1076
1091
1077 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1092 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1078 options[b'sparse-revlog'] = sparserevlog
1093 options[b'sparse-revlog'] = sparserevlog
1079 if sparserevlog:
1094 if sparserevlog:
1080 options[b'generaldelta'] = True
1095 options[b'generaldelta'] = True
1081
1096
1082 maxchainlen = None
1097 maxchainlen = None
1083 if sparserevlog:
1098 if sparserevlog:
1084 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1099 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1085 # experimental config: format.maxchainlen
1100 # experimental config: format.maxchainlen
1086 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1101 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1087 if maxchainlen is not None:
1102 if maxchainlen is not None:
1088 options[b'maxchainlen'] = maxchainlen
1103 options[b'maxchainlen'] = maxchainlen
1089
1104
1090 for r in requirements:
1105 for r in requirements:
1091 # we allow multiple compression engine requirement to co-exist because
1106 # we allow multiple compression engine requirement to co-exist because
1092 # strickly speaking, revlog seems to support mixed compression style.
1107 # strickly speaking, revlog seems to support mixed compression style.
1093 #
1108 #
1094 # The compression used for new entries will be "the last one"
1109 # The compression used for new entries will be "the last one"
1095 prefix = r.startswith
1110 prefix = r.startswith
1096 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1111 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1097 options[b'compengine'] = r.split(b'-', 2)[2]
1112 options[b'compengine'] = r.split(b'-', 2)[2]
1098
1113
1099 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1114 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1100 if options[b'zlib.level'] is not None:
1115 if options[b'zlib.level'] is not None:
1101 if not (0 <= options[b'zlib.level'] <= 9):
1116 if not (0 <= options[b'zlib.level'] <= 9):
1102 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1117 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1103 raise error.Abort(msg % options[b'zlib.level'])
1118 raise error.Abort(msg % options[b'zlib.level'])
1104 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1119 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1105 if options[b'zstd.level'] is not None:
1120 if options[b'zstd.level'] is not None:
1106 if not (0 <= options[b'zstd.level'] <= 22):
1121 if not (0 <= options[b'zstd.level'] <= 22):
1107 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1122 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1108 raise error.Abort(msg % options[b'zstd.level'])
1123 raise error.Abort(msg % options[b'zstd.level'])
1109
1124
1110 if requirementsmod.NARROW_REQUIREMENT in requirements:
1125 if requirementsmod.NARROW_REQUIREMENT in requirements:
1111 options[b'enableellipsis'] = True
1126 options[b'enableellipsis'] = True
1112
1127
1113 if ui.configbool(b'experimental', b'rust.index'):
1128 if ui.configbool(b'experimental', b'rust.index'):
1114 options[b'rust.index'] = True
1129 options[b'rust.index'] = True
1115 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1130 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1116 slow_path = ui.config(
1131 slow_path = ui.config(
1117 b'storage', b'revlog.persistent-nodemap.slow-path'
1132 b'storage', b'revlog.persistent-nodemap.slow-path'
1118 )
1133 )
1119 if slow_path not in (b'allow', b'warn', b'abort'):
1134 if slow_path not in (b'allow', b'warn', b'abort'):
1120 default = ui.config_default(
1135 default = ui.config_default(
1121 b'storage', b'revlog.persistent-nodemap.slow-path'
1136 b'storage', b'revlog.persistent-nodemap.slow-path'
1122 )
1137 )
1123 msg = _(
1138 msg = _(
1124 b'unknown value for config '
1139 b'unknown value for config '
1125 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1140 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1126 )
1141 )
1127 ui.warn(msg % slow_path)
1142 ui.warn(msg % slow_path)
1128 if not ui.quiet:
1143 if not ui.quiet:
1129 ui.warn(_(b'falling back to default value: %s\n') % default)
1144 ui.warn(_(b'falling back to default value: %s\n') % default)
1130 slow_path = default
1145 slow_path = default
1131
1146
1132 msg = _(
1147 msg = _(
1133 b"accessing `persistent-nodemap` repository without associated "
1148 b"accessing `persistent-nodemap` repository without associated "
1134 b"fast implementation."
1149 b"fast implementation."
1135 )
1150 )
1136 hint = _(
1151 hint = _(
1137 b"check `hg help config.format.use-persistent-nodemap` "
1152 b"check `hg help config.format.use-persistent-nodemap` "
1138 b"for details"
1153 b"for details"
1139 )
1154 )
1140 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1155 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1141 if slow_path == b'warn':
1156 if slow_path == b'warn':
1142 msg = b"warning: " + msg + b'\n'
1157 msg = b"warning: " + msg + b'\n'
1143 ui.warn(msg)
1158 ui.warn(msg)
1144 if not ui.quiet:
1159 if not ui.quiet:
1145 hint = b'(' + hint + b')\n'
1160 hint = b'(' + hint + b')\n'
1146 ui.warn(hint)
1161 ui.warn(hint)
1147 if slow_path == b'abort':
1162 if slow_path == b'abort':
1148 raise error.Abort(msg, hint=hint)
1163 raise error.Abort(msg, hint=hint)
1149 options[b'persistent-nodemap'] = True
1164 options[b'persistent-nodemap'] = True
1150 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1165 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1151 options[b'persistent-nodemap.mmap'] = True
1166 options[b'persistent-nodemap.mmap'] = True
1152 if ui.configbool(b'devel', b'persistent-nodemap'):
1167 if ui.configbool(b'devel', b'persistent-nodemap'):
1153 options[b'devel-force-nodemap'] = True
1168 options[b'devel-force-nodemap'] = True
1154
1169
1155 return options
1170 return options
1156
1171
1157
1172
1158 def makemain(**kwargs):
1173 def makemain(**kwargs):
1159 """Produce a type conforming to ``ilocalrepositorymain``."""
1174 """Produce a type conforming to ``ilocalrepositorymain``."""
1160 return localrepository
1175 return localrepository
1161
1176
1162
1177
1163 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1178 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1164 class revlogfilestorage(object):
1179 class revlogfilestorage(object):
1165 """File storage when using revlogs."""
1180 """File storage when using revlogs."""
1166
1181
1167 def file(self, path):
1182 def file(self, path):
1168 if path.startswith(b'/'):
1183 if path.startswith(b'/'):
1169 path = path[1:]
1184 path = path[1:]
1170
1185
1171 return filelog.filelog(self.svfs, path)
1186 return filelog.filelog(self.svfs, path)
1172
1187
1173
1188
1174 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1189 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1175 class revlognarrowfilestorage(object):
1190 class revlognarrowfilestorage(object):
1176 """File storage when using revlogs and narrow files."""
1191 """File storage when using revlogs and narrow files."""
1177
1192
1178 def file(self, path):
1193 def file(self, path):
1179 if path.startswith(b'/'):
1194 if path.startswith(b'/'):
1180 path = path[1:]
1195 path = path[1:]
1181
1196
1182 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1197 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1183
1198
1184
1199
1185 def makefilestorage(requirements, features, **kwargs):
1200 def makefilestorage(requirements, features, **kwargs):
1186 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1201 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1187 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1202 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1188 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1203 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1189
1204
1190 if requirementsmod.NARROW_REQUIREMENT in requirements:
1205 if requirementsmod.NARROW_REQUIREMENT in requirements:
1191 return revlognarrowfilestorage
1206 return revlognarrowfilestorage
1192 else:
1207 else:
1193 return revlogfilestorage
1208 return revlogfilestorage
1194
1209
1195
1210
1196 # List of repository interfaces and factory functions for them. Each
1211 # List of repository interfaces and factory functions for them. Each
1197 # will be called in order during ``makelocalrepository()`` to iteratively
1212 # will be called in order during ``makelocalrepository()`` to iteratively
1198 # derive the final type for a local repository instance. We capture the
1213 # derive the final type for a local repository instance. We capture the
1199 # function as a lambda so we don't hold a reference and the module-level
1214 # function as a lambda so we don't hold a reference and the module-level
1200 # functions can be wrapped.
1215 # functions can be wrapped.
1201 REPO_INTERFACES = [
1216 REPO_INTERFACES = [
1202 (repository.ilocalrepositorymain, lambda: makemain),
1217 (repository.ilocalrepositorymain, lambda: makemain),
1203 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1218 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1204 ]
1219 ]
1205
1220
1206
1221
1207 @interfaceutil.implementer(repository.ilocalrepositorymain)
1222 @interfaceutil.implementer(repository.ilocalrepositorymain)
1208 class localrepository(object):
1223 class localrepository(object):
1209 """Main class for representing local repositories.
1224 """Main class for representing local repositories.
1210
1225
1211 All local repositories are instances of this class.
1226 All local repositories are instances of this class.
1212
1227
1213 Constructed on its own, instances of this class are not usable as
1228 Constructed on its own, instances of this class are not usable as
1214 repository objects. To obtain a usable repository object, call
1229 repository objects. To obtain a usable repository object, call
1215 ``hg.repository()``, ``localrepo.instance()``, or
1230 ``hg.repository()``, ``localrepo.instance()``, or
1216 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1231 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1217 ``instance()`` adds support for creating new repositories.
1232 ``instance()`` adds support for creating new repositories.
1218 ``hg.repository()`` adds more extension integration, including calling
1233 ``hg.repository()`` adds more extension integration, including calling
1219 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1234 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1220 used.
1235 used.
1221 """
1236 """
1222
1237
1223 # obsolete experimental requirements:
1238 # obsolete experimental requirements:
1224 # - manifestv2: An experimental new manifest format that allowed
1239 # - manifestv2: An experimental new manifest format that allowed
1225 # for stem compression of long paths. Experiment ended up not
1240 # for stem compression of long paths. Experiment ended up not
1226 # being successful (repository sizes went up due to worse delta
1241 # being successful (repository sizes went up due to worse delta
1227 # chains), and the code was deleted in 4.6.
1242 # chains), and the code was deleted in 4.6.
1228 supportedformats = {
1243 supportedformats = {
1229 requirementsmod.REVLOGV1_REQUIREMENT,
1244 requirementsmod.REVLOGV1_REQUIREMENT,
1230 requirementsmod.GENERALDELTA_REQUIREMENT,
1245 requirementsmod.GENERALDELTA_REQUIREMENT,
1231 requirementsmod.TREEMANIFEST_REQUIREMENT,
1246 requirementsmod.TREEMANIFEST_REQUIREMENT,
1232 requirementsmod.COPIESSDC_REQUIREMENT,
1247 requirementsmod.COPIESSDC_REQUIREMENT,
1233 requirementsmod.REVLOGV2_REQUIREMENT,
1248 requirementsmod.REVLOGV2_REQUIREMENT,
1234 requirementsmod.CHANGELOGV2_REQUIREMENT,
1249 requirementsmod.CHANGELOGV2_REQUIREMENT,
1235 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1250 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1236 requirementsmod.NODEMAP_REQUIREMENT,
1251 requirementsmod.NODEMAP_REQUIREMENT,
1237 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1252 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1238 requirementsmod.SHARESAFE_REQUIREMENT,
1253 requirementsmod.SHARESAFE_REQUIREMENT,
1239 }
1254 }
1240 _basesupported = supportedformats | {
1255 _basesupported = supportedformats | {
1241 requirementsmod.STORE_REQUIREMENT,
1256 requirementsmod.STORE_REQUIREMENT,
1242 requirementsmod.FNCACHE_REQUIREMENT,
1257 requirementsmod.FNCACHE_REQUIREMENT,
1243 requirementsmod.SHARED_REQUIREMENT,
1258 requirementsmod.SHARED_REQUIREMENT,
1244 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1259 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1245 requirementsmod.DOTENCODE_REQUIREMENT,
1260 requirementsmod.DOTENCODE_REQUIREMENT,
1246 requirementsmod.SPARSE_REQUIREMENT,
1261 requirementsmod.SPARSE_REQUIREMENT,
1247 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1262 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1248 }
1263 }
1249
1264
1250 # list of prefix for file which can be written without 'wlock'
1265 # list of prefix for file which can be written without 'wlock'
1251 # Extensions should extend this list when needed
1266 # Extensions should extend this list when needed
1252 _wlockfreeprefix = {
1267 _wlockfreeprefix = {
1253 # We migh consider requiring 'wlock' for the next
1268 # We migh consider requiring 'wlock' for the next
1254 # two, but pretty much all the existing code assume
1269 # two, but pretty much all the existing code assume
1255 # wlock is not needed so we keep them excluded for
1270 # wlock is not needed so we keep them excluded for
1256 # now.
1271 # now.
1257 b'hgrc',
1272 b'hgrc',
1258 b'requires',
1273 b'requires',
1259 # XXX cache is a complicatged business someone
1274 # XXX cache is a complicatged business someone
1260 # should investigate this in depth at some point
1275 # should investigate this in depth at some point
1261 b'cache/',
1276 b'cache/',
1262 # XXX shouldn't be dirstate covered by the wlock?
1277 # XXX shouldn't be dirstate covered by the wlock?
1263 b'dirstate',
1278 b'dirstate',
1264 # XXX bisect was still a bit too messy at the time
1279 # XXX bisect was still a bit too messy at the time
1265 # this changeset was introduced. Someone should fix
1280 # this changeset was introduced. Someone should fix
1266 # the remainig bit and drop this line
1281 # the remainig bit and drop this line
1267 b'bisect.state',
1282 b'bisect.state',
1268 }
1283 }
1269
1284
1270 def __init__(
1285 def __init__(
1271 self,
1286 self,
1272 baseui,
1287 baseui,
1273 ui,
1288 ui,
1274 origroot,
1289 origroot,
1275 wdirvfs,
1290 wdirvfs,
1276 hgvfs,
1291 hgvfs,
1277 requirements,
1292 requirements,
1278 supportedrequirements,
1293 supportedrequirements,
1279 sharedpath,
1294 sharedpath,
1280 store,
1295 store,
1281 cachevfs,
1296 cachevfs,
1282 wcachevfs,
1297 wcachevfs,
1283 features,
1298 features,
1284 intents=None,
1299 intents=None,
1285 ):
1300 ):
1286 """Create a new local repository instance.
1301 """Create a new local repository instance.
1287
1302
1288 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1303 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1289 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1304 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1290 object.
1305 object.
1291
1306
1292 Arguments:
1307 Arguments:
1293
1308
1294 baseui
1309 baseui
1295 ``ui.ui`` instance that ``ui`` argument was based off of.
1310 ``ui.ui`` instance that ``ui`` argument was based off of.
1296
1311
1297 ui
1312 ui
1298 ``ui.ui`` instance for use by the repository.
1313 ``ui.ui`` instance for use by the repository.
1299
1314
1300 origroot
1315 origroot
1301 ``bytes`` path to working directory root of this repository.
1316 ``bytes`` path to working directory root of this repository.
1302
1317
1303 wdirvfs
1318 wdirvfs
1304 ``vfs.vfs`` rooted at the working directory.
1319 ``vfs.vfs`` rooted at the working directory.
1305
1320
1306 hgvfs
1321 hgvfs
1307 ``vfs.vfs`` rooted at .hg/
1322 ``vfs.vfs`` rooted at .hg/
1308
1323
1309 requirements
1324 requirements
1310 ``set`` of bytestrings representing repository opening requirements.
1325 ``set`` of bytestrings representing repository opening requirements.
1311
1326
1312 supportedrequirements
1327 supportedrequirements
1313 ``set`` of bytestrings representing repository requirements that we
1328 ``set`` of bytestrings representing repository requirements that we
1314 know how to open. May be a supetset of ``requirements``.
1329 know how to open. May be a supetset of ``requirements``.
1315
1330
1316 sharedpath
1331 sharedpath
1317 ``bytes`` Defining path to storage base directory. Points to a
1332 ``bytes`` Defining path to storage base directory. Points to a
1318 ``.hg/`` directory somewhere.
1333 ``.hg/`` directory somewhere.
1319
1334
1320 store
1335 store
1321 ``store.basicstore`` (or derived) instance providing access to
1336 ``store.basicstore`` (or derived) instance providing access to
1322 versioned storage.
1337 versioned storage.
1323
1338
1324 cachevfs
1339 cachevfs
1325 ``vfs.vfs`` used for cache files.
1340 ``vfs.vfs`` used for cache files.
1326
1341
1327 wcachevfs
1342 wcachevfs
1328 ``vfs.vfs`` used for cache files related to the working copy.
1343 ``vfs.vfs`` used for cache files related to the working copy.
1329
1344
1330 features
1345 features
1331 ``set`` of bytestrings defining features/capabilities of this
1346 ``set`` of bytestrings defining features/capabilities of this
1332 instance.
1347 instance.
1333
1348
1334 intents
1349 intents
1335 ``set`` of system strings indicating what this repo will be used
1350 ``set`` of system strings indicating what this repo will be used
1336 for.
1351 for.
1337 """
1352 """
1338 self.baseui = baseui
1353 self.baseui = baseui
1339 self.ui = ui
1354 self.ui = ui
1340 self.origroot = origroot
1355 self.origroot = origroot
1341 # vfs rooted at working directory.
1356 # vfs rooted at working directory.
1342 self.wvfs = wdirvfs
1357 self.wvfs = wdirvfs
1343 self.root = wdirvfs.base
1358 self.root = wdirvfs.base
1344 # vfs rooted at .hg/. Used to access most non-store paths.
1359 # vfs rooted at .hg/. Used to access most non-store paths.
1345 self.vfs = hgvfs
1360 self.vfs = hgvfs
1346 self.path = hgvfs.base
1361 self.path = hgvfs.base
1347 self.requirements = requirements
1362 self.requirements = requirements
1348 self.nodeconstants = sha1nodeconstants
1363 self.nodeconstants = sha1nodeconstants
1349 self.nullid = self.nodeconstants.nullid
1364 self.nullid = self.nodeconstants.nullid
1350 self.supported = supportedrequirements
1365 self.supported = supportedrequirements
1351 self.sharedpath = sharedpath
1366 self.sharedpath = sharedpath
1352 self.store = store
1367 self.store = store
1353 self.cachevfs = cachevfs
1368 self.cachevfs = cachevfs
1354 self.wcachevfs = wcachevfs
1369 self.wcachevfs = wcachevfs
1355 self.features = features
1370 self.features = features
1356
1371
1357 self.filtername = None
1372 self.filtername = None
1358
1373
1359 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1374 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1360 b'devel', b'check-locks'
1375 b'devel', b'check-locks'
1361 ):
1376 ):
1362 self.vfs.audit = self._getvfsward(self.vfs.audit)
1377 self.vfs.audit = self._getvfsward(self.vfs.audit)
1363 # A list of callback to shape the phase if no data were found.
1378 # A list of callback to shape the phase if no data were found.
1364 # Callback are in the form: func(repo, roots) --> processed root.
1379 # Callback are in the form: func(repo, roots) --> processed root.
1365 # This list it to be filled by extension during repo setup
1380 # This list it to be filled by extension during repo setup
1366 self._phasedefaults = []
1381 self._phasedefaults = []
1367
1382
1368 color.setup(self.ui)
1383 color.setup(self.ui)
1369
1384
1370 self.spath = self.store.path
1385 self.spath = self.store.path
1371 self.svfs = self.store.vfs
1386 self.svfs = self.store.vfs
1372 self.sjoin = self.store.join
1387 self.sjoin = self.store.join
1373 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1388 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1374 b'devel', b'check-locks'
1389 b'devel', b'check-locks'
1375 ):
1390 ):
1376 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1391 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1377 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1392 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1378 else: # standard vfs
1393 else: # standard vfs
1379 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1394 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1380
1395
1381 self._dirstatevalidatewarned = False
1396 self._dirstatevalidatewarned = False
1382
1397
1383 self._branchcaches = branchmap.BranchMapCache()
1398 self._branchcaches = branchmap.BranchMapCache()
1384 self._revbranchcache = None
1399 self._revbranchcache = None
1385 self._filterpats = {}
1400 self._filterpats = {}
1386 self._datafilters = {}
1401 self._datafilters = {}
1387 self._transref = self._lockref = self._wlockref = None
1402 self._transref = self._lockref = self._wlockref = None
1388
1403
1389 # A cache for various files under .hg/ that tracks file changes,
1404 # A cache for various files under .hg/ that tracks file changes,
1390 # (used by the filecache decorator)
1405 # (used by the filecache decorator)
1391 #
1406 #
1392 # Maps a property name to its util.filecacheentry
1407 # Maps a property name to its util.filecacheentry
1393 self._filecache = {}
1408 self._filecache = {}
1394
1409
1395 # hold sets of revision to be filtered
1410 # hold sets of revision to be filtered
1396 # should be cleared when something might have changed the filter value:
1411 # should be cleared when something might have changed the filter value:
1397 # - new changesets,
1412 # - new changesets,
1398 # - phase change,
1413 # - phase change,
1399 # - new obsolescence marker,
1414 # - new obsolescence marker,
1400 # - working directory parent change,
1415 # - working directory parent change,
1401 # - bookmark changes
1416 # - bookmark changes
1402 self.filteredrevcache = {}
1417 self.filteredrevcache = {}
1403
1418
1404 # post-dirstate-status hooks
1419 # post-dirstate-status hooks
1405 self._postdsstatus = []
1420 self._postdsstatus = []
1406
1421
1407 # generic mapping between names and nodes
1422 # generic mapping between names and nodes
1408 self.names = namespaces.namespaces()
1423 self.names = namespaces.namespaces()
1409
1424
1410 # Key to signature value.
1425 # Key to signature value.
1411 self._sparsesignaturecache = {}
1426 self._sparsesignaturecache = {}
1412 # Signature to cached matcher instance.
1427 # Signature to cached matcher instance.
1413 self._sparsematchercache = {}
1428 self._sparsematchercache = {}
1414
1429
1415 self._extrafilterid = repoview.extrafilter(ui)
1430 self._extrafilterid = repoview.extrafilter(ui)
1416
1431
1417 self.filecopiesmode = None
1432 self.filecopiesmode = None
1418 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1433 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1419 self.filecopiesmode = b'changeset-sidedata'
1434 self.filecopiesmode = b'changeset-sidedata'
1420
1435
1421 self._wanted_sidedata = set()
1436 self._wanted_sidedata = set()
1422 self._sidedata_computers = {}
1437 self._sidedata_computers = {}
1423 sidedatamod.set_sidedata_spec_for_repo(self)
1438 sidedatamod.set_sidedata_spec_for_repo(self)
1424
1439
1425 def _getvfsward(self, origfunc):
1440 def _getvfsward(self, origfunc):
1426 """build a ward for self.vfs"""
1441 """build a ward for self.vfs"""
1427 rref = weakref.ref(self)
1442 rref = weakref.ref(self)
1428
1443
1429 def checkvfs(path, mode=None):
1444 def checkvfs(path, mode=None):
1430 ret = origfunc(path, mode=mode)
1445 ret = origfunc(path, mode=mode)
1431 repo = rref()
1446 repo = rref()
1432 if (
1447 if (
1433 repo is None
1448 repo is None
1434 or not util.safehasattr(repo, b'_wlockref')
1449 or not util.safehasattr(repo, b'_wlockref')
1435 or not util.safehasattr(repo, b'_lockref')
1450 or not util.safehasattr(repo, b'_lockref')
1436 ):
1451 ):
1437 return
1452 return
1438 if mode in (None, b'r', b'rb'):
1453 if mode in (None, b'r', b'rb'):
1439 return
1454 return
1440 if path.startswith(repo.path):
1455 if path.startswith(repo.path):
1441 # truncate name relative to the repository (.hg)
1456 # truncate name relative to the repository (.hg)
1442 path = path[len(repo.path) + 1 :]
1457 path = path[len(repo.path) + 1 :]
1443 if path.startswith(b'cache/'):
1458 if path.startswith(b'cache/'):
1444 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1459 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1445 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1460 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1446 # path prefixes covered by 'lock'
1461 # path prefixes covered by 'lock'
1447 vfs_path_prefixes = (
1462 vfs_path_prefixes = (
1448 b'journal.',
1463 b'journal.',
1449 b'undo.',
1464 b'undo.',
1450 b'strip-backup/',
1465 b'strip-backup/',
1451 b'cache/',
1466 b'cache/',
1452 )
1467 )
1453 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1468 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1454 if repo._currentlock(repo._lockref) is None:
1469 if repo._currentlock(repo._lockref) is None:
1455 repo.ui.develwarn(
1470 repo.ui.develwarn(
1456 b'write with no lock: "%s"' % path,
1471 b'write with no lock: "%s"' % path,
1457 stacklevel=3,
1472 stacklevel=3,
1458 config=b'check-locks',
1473 config=b'check-locks',
1459 )
1474 )
1460 elif repo._currentlock(repo._wlockref) is None:
1475 elif repo._currentlock(repo._wlockref) is None:
1461 # rest of vfs files are covered by 'wlock'
1476 # rest of vfs files are covered by 'wlock'
1462 #
1477 #
1463 # exclude special files
1478 # exclude special files
1464 for prefix in self._wlockfreeprefix:
1479 for prefix in self._wlockfreeprefix:
1465 if path.startswith(prefix):
1480 if path.startswith(prefix):
1466 return
1481 return
1467 repo.ui.develwarn(
1482 repo.ui.develwarn(
1468 b'write with no wlock: "%s"' % path,
1483 b'write with no wlock: "%s"' % path,
1469 stacklevel=3,
1484 stacklevel=3,
1470 config=b'check-locks',
1485 config=b'check-locks',
1471 )
1486 )
1472 return ret
1487 return ret
1473
1488
1474 return checkvfs
1489 return checkvfs
1475
1490
1476 def _getsvfsward(self, origfunc):
1491 def _getsvfsward(self, origfunc):
1477 """build a ward for self.svfs"""
1492 """build a ward for self.svfs"""
1478 rref = weakref.ref(self)
1493 rref = weakref.ref(self)
1479
1494
1480 def checksvfs(path, mode=None):
1495 def checksvfs(path, mode=None):
1481 ret = origfunc(path, mode=mode)
1496 ret = origfunc(path, mode=mode)
1482 repo = rref()
1497 repo = rref()
1483 if repo is None or not util.safehasattr(repo, b'_lockref'):
1498 if repo is None or not util.safehasattr(repo, b'_lockref'):
1484 return
1499 return
1485 if mode in (None, b'r', b'rb'):
1500 if mode in (None, b'r', b'rb'):
1486 return
1501 return
1487 if path.startswith(repo.sharedpath):
1502 if path.startswith(repo.sharedpath):
1488 # truncate name relative to the repository (.hg)
1503 # truncate name relative to the repository (.hg)
1489 path = path[len(repo.sharedpath) + 1 :]
1504 path = path[len(repo.sharedpath) + 1 :]
1490 if repo._currentlock(repo._lockref) is None:
1505 if repo._currentlock(repo._lockref) is None:
1491 repo.ui.develwarn(
1506 repo.ui.develwarn(
1492 b'write with no lock: "%s"' % path, stacklevel=4
1507 b'write with no lock: "%s"' % path, stacklevel=4
1493 )
1508 )
1494 return ret
1509 return ret
1495
1510
1496 return checksvfs
1511 return checksvfs
1497
1512
1498 def close(self):
1513 def close(self):
1499 self._writecaches()
1514 self._writecaches()
1500
1515
1501 def _writecaches(self):
1516 def _writecaches(self):
1502 if self._revbranchcache:
1517 if self._revbranchcache:
1503 self._revbranchcache.write()
1518 self._revbranchcache.write()
1504
1519
1505 def _restrictcapabilities(self, caps):
1520 def _restrictcapabilities(self, caps):
1506 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1521 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1507 caps = set(caps)
1522 caps = set(caps)
1508 capsblob = bundle2.encodecaps(
1523 capsblob = bundle2.encodecaps(
1509 bundle2.getrepocaps(self, role=b'client')
1524 bundle2.getrepocaps(self, role=b'client')
1510 )
1525 )
1511 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1526 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1512 if self.ui.configbool(b'experimental', b'narrow'):
1527 if self.ui.configbool(b'experimental', b'narrow'):
1513 caps.add(wireprototypes.NARROWCAP)
1528 caps.add(wireprototypes.NARROWCAP)
1514 return caps
1529 return caps
1515
1530
1516 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1531 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1517 # self -> auditor -> self._checknested -> self
1532 # self -> auditor -> self._checknested -> self
1518
1533
1519 @property
1534 @property
1520 def auditor(self):
1535 def auditor(self):
1521 # This is only used by context.workingctx.match in order to
1536 # This is only used by context.workingctx.match in order to
1522 # detect files in subrepos.
1537 # detect files in subrepos.
1523 return pathutil.pathauditor(self.root, callback=self._checknested)
1538 return pathutil.pathauditor(self.root, callback=self._checknested)
1524
1539
1525 @property
1540 @property
1526 def nofsauditor(self):
1541 def nofsauditor(self):
1527 # This is only used by context.basectx.match in order to detect
1542 # This is only used by context.basectx.match in order to detect
1528 # files in subrepos.
1543 # files in subrepos.
1529 return pathutil.pathauditor(
1544 return pathutil.pathauditor(
1530 self.root, callback=self._checknested, realfs=False, cached=True
1545 self.root, callback=self._checknested, realfs=False, cached=True
1531 )
1546 )
1532
1547
1533 def _checknested(self, path):
1548 def _checknested(self, path):
1534 """Determine if path is a legal nested repository."""
1549 """Determine if path is a legal nested repository."""
1535 if not path.startswith(self.root):
1550 if not path.startswith(self.root):
1536 return False
1551 return False
1537 subpath = path[len(self.root) + 1 :]
1552 subpath = path[len(self.root) + 1 :]
1538 normsubpath = util.pconvert(subpath)
1553 normsubpath = util.pconvert(subpath)
1539
1554
1540 # XXX: Checking against the current working copy is wrong in
1555 # XXX: Checking against the current working copy is wrong in
1541 # the sense that it can reject things like
1556 # the sense that it can reject things like
1542 #
1557 #
1543 # $ hg cat -r 10 sub/x.txt
1558 # $ hg cat -r 10 sub/x.txt
1544 #
1559 #
1545 # if sub/ is no longer a subrepository in the working copy
1560 # if sub/ is no longer a subrepository in the working copy
1546 # parent revision.
1561 # parent revision.
1547 #
1562 #
1548 # However, it can of course also allow things that would have
1563 # However, it can of course also allow things that would have
1549 # been rejected before, such as the above cat command if sub/
1564 # been rejected before, such as the above cat command if sub/
1550 # is a subrepository now, but was a normal directory before.
1565 # is a subrepository now, but was a normal directory before.
1551 # The old path auditor would have rejected by mistake since it
1566 # The old path auditor would have rejected by mistake since it
1552 # panics when it sees sub/.hg/.
1567 # panics when it sees sub/.hg/.
1553 #
1568 #
1554 # All in all, checking against the working copy seems sensible
1569 # All in all, checking against the working copy seems sensible
1555 # since we want to prevent access to nested repositories on
1570 # since we want to prevent access to nested repositories on
1556 # the filesystem *now*.
1571 # the filesystem *now*.
1557 ctx = self[None]
1572 ctx = self[None]
1558 parts = util.splitpath(subpath)
1573 parts = util.splitpath(subpath)
1559 while parts:
1574 while parts:
1560 prefix = b'/'.join(parts)
1575 prefix = b'/'.join(parts)
1561 if prefix in ctx.substate:
1576 if prefix in ctx.substate:
1562 if prefix == normsubpath:
1577 if prefix == normsubpath:
1563 return True
1578 return True
1564 else:
1579 else:
1565 sub = ctx.sub(prefix)
1580 sub = ctx.sub(prefix)
1566 return sub.checknested(subpath[len(prefix) + 1 :])
1581 return sub.checknested(subpath[len(prefix) + 1 :])
1567 else:
1582 else:
1568 parts.pop()
1583 parts.pop()
1569 return False
1584 return False
1570
1585
1571 def peer(self):
1586 def peer(self):
1572 return localpeer(self) # not cached to avoid reference cycle
1587 return localpeer(self) # not cached to avoid reference cycle
1573
1588
1574 def unfiltered(self):
1589 def unfiltered(self):
1575 """Return unfiltered version of the repository
1590 """Return unfiltered version of the repository
1576
1591
1577 Intended to be overwritten by filtered repo."""
1592 Intended to be overwritten by filtered repo."""
1578 return self
1593 return self
1579
1594
1580 def filtered(self, name, visibilityexceptions=None):
1595 def filtered(self, name, visibilityexceptions=None):
1581 """Return a filtered version of a repository
1596 """Return a filtered version of a repository
1582
1597
1583 The `name` parameter is the identifier of the requested view. This
1598 The `name` parameter is the identifier of the requested view. This
1584 will return a repoview object set "exactly" to the specified view.
1599 will return a repoview object set "exactly" to the specified view.
1585
1600
1586 This function does not apply recursive filtering to a repository. For
1601 This function does not apply recursive filtering to a repository. For
1587 example calling `repo.filtered("served")` will return a repoview using
1602 example calling `repo.filtered("served")` will return a repoview using
1588 the "served" view, regardless of the initial view used by `repo`.
1603 the "served" view, regardless of the initial view used by `repo`.
1589
1604
1590 In other word, there is always only one level of `repoview` "filtering".
1605 In other word, there is always only one level of `repoview` "filtering".
1591 """
1606 """
1592 if self._extrafilterid is not None and b'%' not in name:
1607 if self._extrafilterid is not None and b'%' not in name:
1593 name = name + b'%' + self._extrafilterid
1608 name = name + b'%' + self._extrafilterid
1594
1609
1595 cls = repoview.newtype(self.unfiltered().__class__)
1610 cls = repoview.newtype(self.unfiltered().__class__)
1596 return cls(self, name, visibilityexceptions)
1611 return cls(self, name, visibilityexceptions)
1597
1612
1598 @mixedrepostorecache(
1613 @mixedrepostorecache(
1599 (b'bookmarks', b'plain'),
1614 (b'bookmarks', b'plain'),
1600 (b'bookmarks.current', b'plain'),
1615 (b'bookmarks.current', b'plain'),
1601 (b'bookmarks', b''),
1616 (b'bookmarks', b''),
1602 (b'00changelog.i', b''),
1617 (b'00changelog.i', b''),
1603 )
1618 )
1604 def _bookmarks(self):
1619 def _bookmarks(self):
1605 # Since the multiple files involved in the transaction cannot be
1620 # Since the multiple files involved in the transaction cannot be
1606 # written atomically (with current repository format), there is a race
1621 # written atomically (with current repository format), there is a race
1607 # condition here.
1622 # condition here.
1608 #
1623 #
1609 # 1) changelog content A is read
1624 # 1) changelog content A is read
1610 # 2) outside transaction update changelog to content B
1625 # 2) outside transaction update changelog to content B
1611 # 3) outside transaction update bookmark file referring to content B
1626 # 3) outside transaction update bookmark file referring to content B
1612 # 4) bookmarks file content is read and filtered against changelog-A
1627 # 4) bookmarks file content is read and filtered against changelog-A
1613 #
1628 #
1614 # When this happens, bookmarks against nodes missing from A are dropped.
1629 # When this happens, bookmarks against nodes missing from A are dropped.
1615 #
1630 #
1616 # Having this happening during read is not great, but it become worse
1631 # Having this happening during read is not great, but it become worse
1617 # when this happen during write because the bookmarks to the "unknown"
1632 # when this happen during write because the bookmarks to the "unknown"
1618 # nodes will be dropped for good. However, writes happen within locks.
1633 # nodes will be dropped for good. However, writes happen within locks.
1619 # This locking makes it possible to have a race free consistent read.
1634 # This locking makes it possible to have a race free consistent read.
1620 # For this purpose data read from disc before locking are
1635 # For this purpose data read from disc before locking are
1621 # "invalidated" right after the locks are taken. This invalidations are
1636 # "invalidated" right after the locks are taken. This invalidations are
1622 # "light", the `filecache` mechanism keep the data in memory and will
1637 # "light", the `filecache` mechanism keep the data in memory and will
1623 # reuse them if the underlying files did not changed. Not parsing the
1638 # reuse them if the underlying files did not changed. Not parsing the
1624 # same data multiple times helps performances.
1639 # same data multiple times helps performances.
1625 #
1640 #
1626 # Unfortunately in the case describe above, the files tracked by the
1641 # Unfortunately in the case describe above, the files tracked by the
1627 # bookmarks file cache might not have changed, but the in-memory
1642 # bookmarks file cache might not have changed, but the in-memory
1628 # content is still "wrong" because we used an older changelog content
1643 # content is still "wrong" because we used an older changelog content
1629 # to process the on-disk data. So after locking, the changelog would be
1644 # to process the on-disk data. So after locking, the changelog would be
1630 # refreshed but `_bookmarks` would be preserved.
1645 # refreshed but `_bookmarks` would be preserved.
1631 # Adding `00changelog.i` to the list of tracked file is not
1646 # Adding `00changelog.i` to the list of tracked file is not
1632 # enough, because at the time we build the content for `_bookmarks` in
1647 # enough, because at the time we build the content for `_bookmarks` in
1633 # (4), the changelog file has already diverged from the content used
1648 # (4), the changelog file has already diverged from the content used
1634 # for loading `changelog` in (1)
1649 # for loading `changelog` in (1)
1635 #
1650 #
1636 # To prevent the issue, we force the changelog to be explicitly
1651 # To prevent the issue, we force the changelog to be explicitly
1637 # reloaded while computing `_bookmarks`. The data race can still happen
1652 # reloaded while computing `_bookmarks`. The data race can still happen
1638 # without the lock (with a narrower window), but it would no longer go
1653 # without the lock (with a narrower window), but it would no longer go
1639 # undetected during the lock time refresh.
1654 # undetected during the lock time refresh.
1640 #
1655 #
1641 # The new schedule is as follow
1656 # The new schedule is as follow
1642 #
1657 #
1643 # 1) filecache logic detect that `_bookmarks` needs to be computed
1658 # 1) filecache logic detect that `_bookmarks` needs to be computed
1644 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1659 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1645 # 3) We force `changelog` filecache to be tested
1660 # 3) We force `changelog` filecache to be tested
1646 # 4) cachestat for `changelog` are captured (for changelog)
1661 # 4) cachestat for `changelog` are captured (for changelog)
1647 # 5) `_bookmarks` is computed and cached
1662 # 5) `_bookmarks` is computed and cached
1648 #
1663 #
1649 # The step in (3) ensure we have a changelog at least as recent as the
1664 # The step in (3) ensure we have a changelog at least as recent as the
1650 # cache stat computed in (1). As a result at locking time:
1665 # cache stat computed in (1). As a result at locking time:
1651 # * if the changelog did not changed since (1) -> we can reuse the data
1666 # * if the changelog did not changed since (1) -> we can reuse the data
1652 # * otherwise -> the bookmarks get refreshed.
1667 # * otherwise -> the bookmarks get refreshed.
1653 self._refreshchangelog()
1668 self._refreshchangelog()
1654 return bookmarks.bmstore(self)
1669 return bookmarks.bmstore(self)
1655
1670
1656 def _refreshchangelog(self):
1671 def _refreshchangelog(self):
1657 """make sure the in memory changelog match the on-disk one"""
1672 """make sure the in memory changelog match the on-disk one"""
1658 if 'changelog' in vars(self) and self.currenttransaction() is None:
1673 if 'changelog' in vars(self) and self.currenttransaction() is None:
1659 del self.changelog
1674 del self.changelog
1660
1675
1661 @property
1676 @property
1662 def _activebookmark(self):
1677 def _activebookmark(self):
1663 return self._bookmarks.active
1678 return self._bookmarks.active
1664
1679
1665 # _phasesets depend on changelog. what we need is to call
1680 # _phasesets depend on changelog. what we need is to call
1666 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1681 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1667 # can't be easily expressed in filecache mechanism.
1682 # can't be easily expressed in filecache mechanism.
1668 @storecache(b'phaseroots', b'00changelog.i')
1683 @storecache(b'phaseroots', b'00changelog.i')
1669 def _phasecache(self):
1684 def _phasecache(self):
1670 return phases.phasecache(self, self._phasedefaults)
1685 return phases.phasecache(self, self._phasedefaults)
1671
1686
1672 @storecache(b'obsstore')
1687 @storecache(b'obsstore')
1673 def obsstore(self):
1688 def obsstore(self):
1674 return obsolete.makestore(self.ui, self)
1689 return obsolete.makestore(self.ui, self)
1675
1690
1676 @storecache(b'00changelog.i')
1691 @changelogcache()
1677 def changelog(self):
1692 def changelog(repo):
1678 # load dirstate before changelog to avoid race see issue6303
1693 # load dirstate before changelog to avoid race see issue6303
1679 self.dirstate.prefetch_parents()
1694 repo.dirstate.prefetch_parents()
1680 return self.store.changelog(
1695 return repo.store.changelog(
1681 txnutil.mayhavepending(self.root),
1696 txnutil.mayhavepending(repo.root),
1682 concurrencychecker=revlogchecker.get_checker(self.ui, b'changelog'),
1697 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1683 )
1698 )
1684
1699
1685 @storecache(b'00manifest.i')
1700 @storecache(b'00manifest.i')
1686 def manifestlog(self):
1701 def manifestlog(self):
1687 return self.store.manifestlog(self, self._storenarrowmatch)
1702 return self.store.manifestlog(self, self._storenarrowmatch)
1688
1703
1689 @repofilecache(b'dirstate')
1704 @repofilecache(b'dirstate')
1690 def dirstate(self):
1705 def dirstate(self):
1691 return self._makedirstate()
1706 return self._makedirstate()
1692
1707
1693 def _makedirstate(self):
1708 def _makedirstate(self):
1694 """Extension point for wrapping the dirstate per-repo."""
1709 """Extension point for wrapping the dirstate per-repo."""
1695 sparsematchfn = lambda: sparse.matcher(self)
1710 sparsematchfn = lambda: sparse.matcher(self)
1696 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1711 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1697 use_dirstate_v2 = v2_req in self.requirements
1712 use_dirstate_v2 = v2_req in self.requirements
1698
1713
1699 return dirstate.dirstate(
1714 return dirstate.dirstate(
1700 self.vfs,
1715 self.vfs,
1701 self.ui,
1716 self.ui,
1702 self.root,
1717 self.root,
1703 self._dirstatevalidate,
1718 self._dirstatevalidate,
1704 sparsematchfn,
1719 sparsematchfn,
1705 self.nodeconstants,
1720 self.nodeconstants,
1706 use_dirstate_v2,
1721 use_dirstate_v2,
1707 )
1722 )
1708
1723
1709 def _dirstatevalidate(self, node):
1724 def _dirstatevalidate(self, node):
1710 try:
1725 try:
1711 self.changelog.rev(node)
1726 self.changelog.rev(node)
1712 return node
1727 return node
1713 except error.LookupError:
1728 except error.LookupError:
1714 if not self._dirstatevalidatewarned:
1729 if not self._dirstatevalidatewarned:
1715 self._dirstatevalidatewarned = True
1730 self._dirstatevalidatewarned = True
1716 self.ui.warn(
1731 self.ui.warn(
1717 _(b"warning: ignoring unknown working parent %s!\n")
1732 _(b"warning: ignoring unknown working parent %s!\n")
1718 % short(node)
1733 % short(node)
1719 )
1734 )
1720 return self.nullid
1735 return self.nullid
1721
1736
1722 @storecache(narrowspec.FILENAME)
1737 @storecache(narrowspec.FILENAME)
1723 def narrowpats(self):
1738 def narrowpats(self):
1724 """matcher patterns for this repository's narrowspec
1739 """matcher patterns for this repository's narrowspec
1725
1740
1726 A tuple of (includes, excludes).
1741 A tuple of (includes, excludes).
1727 """
1742 """
1728 return narrowspec.load(self)
1743 return narrowspec.load(self)
1729
1744
1730 @storecache(narrowspec.FILENAME)
1745 @storecache(narrowspec.FILENAME)
1731 def _storenarrowmatch(self):
1746 def _storenarrowmatch(self):
1732 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1747 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1733 return matchmod.always()
1748 return matchmod.always()
1734 include, exclude = self.narrowpats
1749 include, exclude = self.narrowpats
1735 return narrowspec.match(self.root, include=include, exclude=exclude)
1750 return narrowspec.match(self.root, include=include, exclude=exclude)
1736
1751
1737 @storecache(narrowspec.FILENAME)
1752 @storecache(narrowspec.FILENAME)
1738 def _narrowmatch(self):
1753 def _narrowmatch(self):
1739 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1754 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1740 return matchmod.always()
1755 return matchmod.always()
1741 narrowspec.checkworkingcopynarrowspec(self)
1756 narrowspec.checkworkingcopynarrowspec(self)
1742 include, exclude = self.narrowpats
1757 include, exclude = self.narrowpats
1743 return narrowspec.match(self.root, include=include, exclude=exclude)
1758 return narrowspec.match(self.root, include=include, exclude=exclude)
1744
1759
1745 def narrowmatch(self, match=None, includeexact=False):
1760 def narrowmatch(self, match=None, includeexact=False):
1746 """matcher corresponding the the repo's narrowspec
1761 """matcher corresponding the the repo's narrowspec
1747
1762
1748 If `match` is given, then that will be intersected with the narrow
1763 If `match` is given, then that will be intersected with the narrow
1749 matcher.
1764 matcher.
1750
1765
1751 If `includeexact` is True, then any exact matches from `match` will
1766 If `includeexact` is True, then any exact matches from `match` will
1752 be included even if they're outside the narrowspec.
1767 be included even if they're outside the narrowspec.
1753 """
1768 """
1754 if match:
1769 if match:
1755 if includeexact and not self._narrowmatch.always():
1770 if includeexact and not self._narrowmatch.always():
1756 # do not exclude explicitly-specified paths so that they can
1771 # do not exclude explicitly-specified paths so that they can
1757 # be warned later on
1772 # be warned later on
1758 em = matchmod.exact(match.files())
1773 em = matchmod.exact(match.files())
1759 nm = matchmod.unionmatcher([self._narrowmatch, em])
1774 nm = matchmod.unionmatcher([self._narrowmatch, em])
1760 return matchmod.intersectmatchers(match, nm)
1775 return matchmod.intersectmatchers(match, nm)
1761 return matchmod.intersectmatchers(match, self._narrowmatch)
1776 return matchmod.intersectmatchers(match, self._narrowmatch)
1762 return self._narrowmatch
1777 return self._narrowmatch
1763
1778
1764 def setnarrowpats(self, newincludes, newexcludes):
1779 def setnarrowpats(self, newincludes, newexcludes):
1765 narrowspec.save(self, newincludes, newexcludes)
1780 narrowspec.save(self, newincludes, newexcludes)
1766 self.invalidate(clearfilecache=True)
1781 self.invalidate(clearfilecache=True)
1767
1782
1768 @unfilteredpropertycache
1783 @unfilteredpropertycache
1769 def _quick_access_changeid_null(self):
1784 def _quick_access_changeid_null(self):
1770 return {
1785 return {
1771 b'null': (nullrev, self.nodeconstants.nullid),
1786 b'null': (nullrev, self.nodeconstants.nullid),
1772 nullrev: (nullrev, self.nodeconstants.nullid),
1787 nullrev: (nullrev, self.nodeconstants.nullid),
1773 self.nullid: (nullrev, self.nullid),
1788 self.nullid: (nullrev, self.nullid),
1774 }
1789 }
1775
1790
1776 @unfilteredpropertycache
1791 @unfilteredpropertycache
1777 def _quick_access_changeid_wc(self):
1792 def _quick_access_changeid_wc(self):
1778 # also fast path access to the working copy parents
1793 # also fast path access to the working copy parents
1779 # however, only do it for filter that ensure wc is visible.
1794 # however, only do it for filter that ensure wc is visible.
1780 quick = self._quick_access_changeid_null.copy()
1795 quick = self._quick_access_changeid_null.copy()
1781 cl = self.unfiltered().changelog
1796 cl = self.unfiltered().changelog
1782 for node in self.dirstate.parents():
1797 for node in self.dirstate.parents():
1783 if node == self.nullid:
1798 if node == self.nullid:
1784 continue
1799 continue
1785 rev = cl.index.get_rev(node)
1800 rev = cl.index.get_rev(node)
1786 if rev is None:
1801 if rev is None:
1787 # unknown working copy parent case:
1802 # unknown working copy parent case:
1788 #
1803 #
1789 # skip the fast path and let higher code deal with it
1804 # skip the fast path and let higher code deal with it
1790 continue
1805 continue
1791 pair = (rev, node)
1806 pair = (rev, node)
1792 quick[rev] = pair
1807 quick[rev] = pair
1793 quick[node] = pair
1808 quick[node] = pair
1794 # also add the parents of the parents
1809 # also add the parents of the parents
1795 for r in cl.parentrevs(rev):
1810 for r in cl.parentrevs(rev):
1796 if r == nullrev:
1811 if r == nullrev:
1797 continue
1812 continue
1798 n = cl.node(r)
1813 n = cl.node(r)
1799 pair = (r, n)
1814 pair = (r, n)
1800 quick[r] = pair
1815 quick[r] = pair
1801 quick[n] = pair
1816 quick[n] = pair
1802 p1node = self.dirstate.p1()
1817 p1node = self.dirstate.p1()
1803 if p1node != self.nullid:
1818 if p1node != self.nullid:
1804 quick[b'.'] = quick[p1node]
1819 quick[b'.'] = quick[p1node]
1805 return quick
1820 return quick
1806
1821
1807 @unfilteredmethod
1822 @unfilteredmethod
1808 def _quick_access_changeid_invalidate(self):
1823 def _quick_access_changeid_invalidate(self):
1809 if '_quick_access_changeid_wc' in vars(self):
1824 if '_quick_access_changeid_wc' in vars(self):
1810 del self.__dict__['_quick_access_changeid_wc']
1825 del self.__dict__['_quick_access_changeid_wc']
1811
1826
1812 @property
1827 @property
1813 def _quick_access_changeid(self):
1828 def _quick_access_changeid(self):
1814 """an helper dictionnary for __getitem__ calls
1829 """an helper dictionnary for __getitem__ calls
1815
1830
1816 This contains a list of symbol we can recognise right away without
1831 This contains a list of symbol we can recognise right away without
1817 further processing.
1832 further processing.
1818 """
1833 """
1819 if self.filtername in repoview.filter_has_wc:
1834 if self.filtername in repoview.filter_has_wc:
1820 return self._quick_access_changeid_wc
1835 return self._quick_access_changeid_wc
1821 return self._quick_access_changeid_null
1836 return self._quick_access_changeid_null
1822
1837
1823 def __getitem__(self, changeid):
1838 def __getitem__(self, changeid):
1824 # dealing with special cases
1839 # dealing with special cases
1825 if changeid is None:
1840 if changeid is None:
1826 return context.workingctx(self)
1841 return context.workingctx(self)
1827 if isinstance(changeid, context.basectx):
1842 if isinstance(changeid, context.basectx):
1828 return changeid
1843 return changeid
1829
1844
1830 # dealing with multiple revisions
1845 # dealing with multiple revisions
1831 if isinstance(changeid, slice):
1846 if isinstance(changeid, slice):
1832 # wdirrev isn't contiguous so the slice shouldn't include it
1847 # wdirrev isn't contiguous so the slice shouldn't include it
1833 return [
1848 return [
1834 self[i]
1849 self[i]
1835 for i in pycompat.xrange(*changeid.indices(len(self)))
1850 for i in pycompat.xrange(*changeid.indices(len(self)))
1836 if i not in self.changelog.filteredrevs
1851 if i not in self.changelog.filteredrevs
1837 ]
1852 ]
1838
1853
1839 # dealing with some special values
1854 # dealing with some special values
1840 quick_access = self._quick_access_changeid.get(changeid)
1855 quick_access = self._quick_access_changeid.get(changeid)
1841 if quick_access is not None:
1856 if quick_access is not None:
1842 rev, node = quick_access
1857 rev, node = quick_access
1843 return context.changectx(self, rev, node, maybe_filtered=False)
1858 return context.changectx(self, rev, node, maybe_filtered=False)
1844 if changeid == b'tip':
1859 if changeid == b'tip':
1845 node = self.changelog.tip()
1860 node = self.changelog.tip()
1846 rev = self.changelog.rev(node)
1861 rev = self.changelog.rev(node)
1847 return context.changectx(self, rev, node)
1862 return context.changectx(self, rev, node)
1848
1863
1849 # dealing with arbitrary values
1864 # dealing with arbitrary values
1850 try:
1865 try:
1851 if isinstance(changeid, int):
1866 if isinstance(changeid, int):
1852 node = self.changelog.node(changeid)
1867 node = self.changelog.node(changeid)
1853 rev = changeid
1868 rev = changeid
1854 elif changeid == b'.':
1869 elif changeid == b'.':
1855 # this is a hack to delay/avoid loading obsmarkers
1870 # this is a hack to delay/avoid loading obsmarkers
1856 # when we know that '.' won't be hidden
1871 # when we know that '.' won't be hidden
1857 node = self.dirstate.p1()
1872 node = self.dirstate.p1()
1858 rev = self.unfiltered().changelog.rev(node)
1873 rev = self.unfiltered().changelog.rev(node)
1859 elif len(changeid) == self.nodeconstants.nodelen:
1874 elif len(changeid) == self.nodeconstants.nodelen:
1860 try:
1875 try:
1861 node = changeid
1876 node = changeid
1862 rev = self.changelog.rev(changeid)
1877 rev = self.changelog.rev(changeid)
1863 except error.FilteredLookupError:
1878 except error.FilteredLookupError:
1864 changeid = hex(changeid) # for the error message
1879 changeid = hex(changeid) # for the error message
1865 raise
1880 raise
1866 except LookupError:
1881 except LookupError:
1867 # check if it might have come from damaged dirstate
1882 # check if it might have come from damaged dirstate
1868 #
1883 #
1869 # XXX we could avoid the unfiltered if we had a recognizable
1884 # XXX we could avoid the unfiltered if we had a recognizable
1870 # exception for filtered changeset access
1885 # exception for filtered changeset access
1871 if (
1886 if (
1872 self.local()
1887 self.local()
1873 and changeid in self.unfiltered().dirstate.parents()
1888 and changeid in self.unfiltered().dirstate.parents()
1874 ):
1889 ):
1875 msg = _(b"working directory has unknown parent '%s'!")
1890 msg = _(b"working directory has unknown parent '%s'!")
1876 raise error.Abort(msg % short(changeid))
1891 raise error.Abort(msg % short(changeid))
1877 changeid = hex(changeid) # for the error message
1892 changeid = hex(changeid) # for the error message
1878 raise
1893 raise
1879
1894
1880 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1895 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1881 node = bin(changeid)
1896 node = bin(changeid)
1882 rev = self.changelog.rev(node)
1897 rev = self.changelog.rev(node)
1883 else:
1898 else:
1884 raise error.ProgrammingError(
1899 raise error.ProgrammingError(
1885 b"unsupported changeid '%s' of type %s"
1900 b"unsupported changeid '%s' of type %s"
1886 % (changeid, pycompat.bytestr(type(changeid)))
1901 % (changeid, pycompat.bytestr(type(changeid)))
1887 )
1902 )
1888
1903
1889 return context.changectx(self, rev, node)
1904 return context.changectx(self, rev, node)
1890
1905
1891 except (error.FilteredIndexError, error.FilteredLookupError):
1906 except (error.FilteredIndexError, error.FilteredLookupError):
1892 raise error.FilteredRepoLookupError(
1907 raise error.FilteredRepoLookupError(
1893 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1908 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1894 )
1909 )
1895 except (IndexError, LookupError):
1910 except (IndexError, LookupError):
1896 raise error.RepoLookupError(
1911 raise error.RepoLookupError(
1897 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1912 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1898 )
1913 )
1899 except error.WdirUnsupported:
1914 except error.WdirUnsupported:
1900 return context.workingctx(self)
1915 return context.workingctx(self)
1901
1916
1902 def __contains__(self, changeid):
1917 def __contains__(self, changeid):
1903 """True if the given changeid exists"""
1918 """True if the given changeid exists"""
1904 try:
1919 try:
1905 self[changeid]
1920 self[changeid]
1906 return True
1921 return True
1907 except error.RepoLookupError:
1922 except error.RepoLookupError:
1908 return False
1923 return False
1909
1924
1910 def __nonzero__(self):
1925 def __nonzero__(self):
1911 return True
1926 return True
1912
1927
1913 __bool__ = __nonzero__
1928 __bool__ = __nonzero__
1914
1929
1915 def __len__(self):
1930 def __len__(self):
1916 # no need to pay the cost of repoview.changelog
1931 # no need to pay the cost of repoview.changelog
1917 unfi = self.unfiltered()
1932 unfi = self.unfiltered()
1918 return len(unfi.changelog)
1933 return len(unfi.changelog)
1919
1934
1920 def __iter__(self):
1935 def __iter__(self):
1921 return iter(self.changelog)
1936 return iter(self.changelog)
1922
1937
1923 def revs(self, expr, *args):
1938 def revs(self, expr, *args):
1924 """Find revisions matching a revset.
1939 """Find revisions matching a revset.
1925
1940
1926 The revset is specified as a string ``expr`` that may contain
1941 The revset is specified as a string ``expr`` that may contain
1927 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1942 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1928
1943
1929 Revset aliases from the configuration are not expanded. To expand
1944 Revset aliases from the configuration are not expanded. To expand
1930 user aliases, consider calling ``scmutil.revrange()`` or
1945 user aliases, consider calling ``scmutil.revrange()`` or
1931 ``repo.anyrevs([expr], user=True)``.
1946 ``repo.anyrevs([expr], user=True)``.
1932
1947
1933 Returns a smartset.abstractsmartset, which is a list-like interface
1948 Returns a smartset.abstractsmartset, which is a list-like interface
1934 that contains integer revisions.
1949 that contains integer revisions.
1935 """
1950 """
1936 tree = revsetlang.spectree(expr, *args)
1951 tree = revsetlang.spectree(expr, *args)
1937 return revset.makematcher(tree)(self)
1952 return revset.makematcher(tree)(self)
1938
1953
1939 def set(self, expr, *args):
1954 def set(self, expr, *args):
1940 """Find revisions matching a revset and emit changectx instances.
1955 """Find revisions matching a revset and emit changectx instances.
1941
1956
1942 This is a convenience wrapper around ``revs()`` that iterates the
1957 This is a convenience wrapper around ``revs()`` that iterates the
1943 result and is a generator of changectx instances.
1958 result and is a generator of changectx instances.
1944
1959
1945 Revset aliases from the configuration are not expanded. To expand
1960 Revset aliases from the configuration are not expanded. To expand
1946 user aliases, consider calling ``scmutil.revrange()``.
1961 user aliases, consider calling ``scmutil.revrange()``.
1947 """
1962 """
1948 for r in self.revs(expr, *args):
1963 for r in self.revs(expr, *args):
1949 yield self[r]
1964 yield self[r]
1950
1965
1951 def anyrevs(self, specs, user=False, localalias=None):
1966 def anyrevs(self, specs, user=False, localalias=None):
1952 """Find revisions matching one of the given revsets.
1967 """Find revisions matching one of the given revsets.
1953
1968
1954 Revset aliases from the configuration are not expanded by default. To
1969 Revset aliases from the configuration are not expanded by default. To
1955 expand user aliases, specify ``user=True``. To provide some local
1970 expand user aliases, specify ``user=True``. To provide some local
1956 definitions overriding user aliases, set ``localalias`` to
1971 definitions overriding user aliases, set ``localalias`` to
1957 ``{name: definitionstring}``.
1972 ``{name: definitionstring}``.
1958 """
1973 """
1959 if specs == [b'null']:
1974 if specs == [b'null']:
1960 return revset.baseset([nullrev])
1975 return revset.baseset([nullrev])
1961 if specs == [b'.']:
1976 if specs == [b'.']:
1962 quick_data = self._quick_access_changeid.get(b'.')
1977 quick_data = self._quick_access_changeid.get(b'.')
1963 if quick_data is not None:
1978 if quick_data is not None:
1964 return revset.baseset([quick_data[0]])
1979 return revset.baseset([quick_data[0]])
1965 if user:
1980 if user:
1966 m = revset.matchany(
1981 m = revset.matchany(
1967 self.ui,
1982 self.ui,
1968 specs,
1983 specs,
1969 lookup=revset.lookupfn(self),
1984 lookup=revset.lookupfn(self),
1970 localalias=localalias,
1985 localalias=localalias,
1971 )
1986 )
1972 else:
1987 else:
1973 m = revset.matchany(None, specs, localalias=localalias)
1988 m = revset.matchany(None, specs, localalias=localalias)
1974 return m(self)
1989 return m(self)
1975
1990
1976 def url(self):
1991 def url(self):
1977 return b'file:' + self.root
1992 return b'file:' + self.root
1978
1993
1979 def hook(self, name, throw=False, **args):
1994 def hook(self, name, throw=False, **args):
1980 """Call a hook, passing this repo instance.
1995 """Call a hook, passing this repo instance.
1981
1996
1982 This a convenience method to aid invoking hooks. Extensions likely
1997 This a convenience method to aid invoking hooks. Extensions likely
1983 won't call this unless they have registered a custom hook or are
1998 won't call this unless they have registered a custom hook or are
1984 replacing code that is expected to call a hook.
1999 replacing code that is expected to call a hook.
1985 """
2000 """
1986 return hook.hook(self.ui, self, name, throw, **args)
2001 return hook.hook(self.ui, self, name, throw, **args)
1987
2002
1988 @filteredpropertycache
2003 @filteredpropertycache
1989 def _tagscache(self):
2004 def _tagscache(self):
1990 """Returns a tagscache object that contains various tags related
2005 """Returns a tagscache object that contains various tags related
1991 caches."""
2006 caches."""
1992
2007
1993 # This simplifies its cache management by having one decorated
2008 # This simplifies its cache management by having one decorated
1994 # function (this one) and the rest simply fetch things from it.
2009 # function (this one) and the rest simply fetch things from it.
1995 class tagscache(object):
2010 class tagscache(object):
1996 def __init__(self):
2011 def __init__(self):
1997 # These two define the set of tags for this repository. tags
2012 # These two define the set of tags for this repository. tags
1998 # maps tag name to node; tagtypes maps tag name to 'global' or
2013 # maps tag name to node; tagtypes maps tag name to 'global' or
1999 # 'local'. (Global tags are defined by .hgtags across all
2014 # 'local'. (Global tags are defined by .hgtags across all
2000 # heads, and local tags are defined in .hg/localtags.)
2015 # heads, and local tags are defined in .hg/localtags.)
2001 # They constitute the in-memory cache of tags.
2016 # They constitute the in-memory cache of tags.
2002 self.tags = self.tagtypes = None
2017 self.tags = self.tagtypes = None
2003
2018
2004 self.nodetagscache = self.tagslist = None
2019 self.nodetagscache = self.tagslist = None
2005
2020
2006 cache = tagscache()
2021 cache = tagscache()
2007 cache.tags, cache.tagtypes = self._findtags()
2022 cache.tags, cache.tagtypes = self._findtags()
2008
2023
2009 return cache
2024 return cache
2010
2025
2011 def tags(self):
2026 def tags(self):
2012 '''return a mapping of tag to node'''
2027 '''return a mapping of tag to node'''
2013 t = {}
2028 t = {}
2014 if self.changelog.filteredrevs:
2029 if self.changelog.filteredrevs:
2015 tags, tt = self._findtags()
2030 tags, tt = self._findtags()
2016 else:
2031 else:
2017 tags = self._tagscache.tags
2032 tags = self._tagscache.tags
2018 rev = self.changelog.rev
2033 rev = self.changelog.rev
2019 for k, v in pycompat.iteritems(tags):
2034 for k, v in pycompat.iteritems(tags):
2020 try:
2035 try:
2021 # ignore tags to unknown nodes
2036 # ignore tags to unknown nodes
2022 rev(v)
2037 rev(v)
2023 t[k] = v
2038 t[k] = v
2024 except (error.LookupError, ValueError):
2039 except (error.LookupError, ValueError):
2025 pass
2040 pass
2026 return t
2041 return t
2027
2042
2028 def _findtags(self):
2043 def _findtags(self):
2029 """Do the hard work of finding tags. Return a pair of dicts
2044 """Do the hard work of finding tags. Return a pair of dicts
2030 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2045 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2031 maps tag name to a string like \'global\' or \'local\'.
2046 maps tag name to a string like \'global\' or \'local\'.
2032 Subclasses or extensions are free to add their own tags, but
2047 Subclasses or extensions are free to add their own tags, but
2033 should be aware that the returned dicts will be retained for the
2048 should be aware that the returned dicts will be retained for the
2034 duration of the localrepo object."""
2049 duration of the localrepo object."""
2035
2050
2036 # XXX what tagtype should subclasses/extensions use? Currently
2051 # XXX what tagtype should subclasses/extensions use? Currently
2037 # mq and bookmarks add tags, but do not set the tagtype at all.
2052 # mq and bookmarks add tags, but do not set the tagtype at all.
2038 # Should each extension invent its own tag type? Should there
2053 # Should each extension invent its own tag type? Should there
2039 # be one tagtype for all such "virtual" tags? Or is the status
2054 # be one tagtype for all such "virtual" tags? Or is the status
2040 # quo fine?
2055 # quo fine?
2041
2056
2042 # map tag name to (node, hist)
2057 # map tag name to (node, hist)
2043 alltags = tagsmod.findglobaltags(self.ui, self)
2058 alltags = tagsmod.findglobaltags(self.ui, self)
2044 # map tag name to tag type
2059 # map tag name to tag type
2045 tagtypes = {tag: b'global' for tag in alltags}
2060 tagtypes = {tag: b'global' for tag in alltags}
2046
2061
2047 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2062 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2048
2063
2049 # Build the return dicts. Have to re-encode tag names because
2064 # Build the return dicts. Have to re-encode tag names because
2050 # the tags module always uses UTF-8 (in order not to lose info
2065 # the tags module always uses UTF-8 (in order not to lose info
2051 # writing to the cache), but the rest of Mercurial wants them in
2066 # writing to the cache), but the rest of Mercurial wants them in
2052 # local encoding.
2067 # local encoding.
2053 tags = {}
2068 tags = {}
2054 for (name, (node, hist)) in pycompat.iteritems(alltags):
2069 for (name, (node, hist)) in pycompat.iteritems(alltags):
2055 if node != self.nullid:
2070 if node != self.nullid:
2056 tags[encoding.tolocal(name)] = node
2071 tags[encoding.tolocal(name)] = node
2057 tags[b'tip'] = self.changelog.tip()
2072 tags[b'tip'] = self.changelog.tip()
2058 tagtypes = {
2073 tagtypes = {
2059 encoding.tolocal(name): value
2074 encoding.tolocal(name): value
2060 for (name, value) in pycompat.iteritems(tagtypes)
2075 for (name, value) in pycompat.iteritems(tagtypes)
2061 }
2076 }
2062 return (tags, tagtypes)
2077 return (tags, tagtypes)
2063
2078
2064 def tagtype(self, tagname):
2079 def tagtype(self, tagname):
2065 """
2080 """
2066 return the type of the given tag. result can be:
2081 return the type of the given tag. result can be:
2067
2082
2068 'local' : a local tag
2083 'local' : a local tag
2069 'global' : a global tag
2084 'global' : a global tag
2070 None : tag does not exist
2085 None : tag does not exist
2071 """
2086 """
2072
2087
2073 return self._tagscache.tagtypes.get(tagname)
2088 return self._tagscache.tagtypes.get(tagname)
2074
2089
2075 def tagslist(self):
2090 def tagslist(self):
2076 '''return a list of tags ordered by revision'''
2091 '''return a list of tags ordered by revision'''
2077 if not self._tagscache.tagslist:
2092 if not self._tagscache.tagslist:
2078 l = []
2093 l = []
2079 for t, n in pycompat.iteritems(self.tags()):
2094 for t, n in pycompat.iteritems(self.tags()):
2080 l.append((self.changelog.rev(n), t, n))
2095 l.append((self.changelog.rev(n), t, n))
2081 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2096 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2082
2097
2083 return self._tagscache.tagslist
2098 return self._tagscache.tagslist
2084
2099
2085 def nodetags(self, node):
2100 def nodetags(self, node):
2086 '''return the tags associated with a node'''
2101 '''return the tags associated with a node'''
2087 if not self._tagscache.nodetagscache:
2102 if not self._tagscache.nodetagscache:
2088 nodetagscache = {}
2103 nodetagscache = {}
2089 for t, n in pycompat.iteritems(self._tagscache.tags):
2104 for t, n in pycompat.iteritems(self._tagscache.tags):
2090 nodetagscache.setdefault(n, []).append(t)
2105 nodetagscache.setdefault(n, []).append(t)
2091 for tags in pycompat.itervalues(nodetagscache):
2106 for tags in pycompat.itervalues(nodetagscache):
2092 tags.sort()
2107 tags.sort()
2093 self._tagscache.nodetagscache = nodetagscache
2108 self._tagscache.nodetagscache = nodetagscache
2094 return self._tagscache.nodetagscache.get(node, [])
2109 return self._tagscache.nodetagscache.get(node, [])
2095
2110
2096 def nodebookmarks(self, node):
2111 def nodebookmarks(self, node):
2097 """return the list of bookmarks pointing to the specified node"""
2112 """return the list of bookmarks pointing to the specified node"""
2098 return self._bookmarks.names(node)
2113 return self._bookmarks.names(node)
2099
2114
2100 def branchmap(self):
2115 def branchmap(self):
2101 """returns a dictionary {branch: [branchheads]} with branchheads
2116 """returns a dictionary {branch: [branchheads]} with branchheads
2102 ordered by increasing revision number"""
2117 ordered by increasing revision number"""
2103 return self._branchcaches[self]
2118 return self._branchcaches[self]
2104
2119
2105 @unfilteredmethod
2120 @unfilteredmethod
2106 def revbranchcache(self):
2121 def revbranchcache(self):
2107 if not self._revbranchcache:
2122 if not self._revbranchcache:
2108 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2123 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2109 return self._revbranchcache
2124 return self._revbranchcache
2110
2125
2111 def register_changeset(self, rev, changelogrevision):
2126 def register_changeset(self, rev, changelogrevision):
2112 self.revbranchcache().setdata(rev, changelogrevision)
2127 self.revbranchcache().setdata(rev, changelogrevision)
2113
2128
2114 def branchtip(self, branch, ignoremissing=False):
2129 def branchtip(self, branch, ignoremissing=False):
2115 """return the tip node for a given branch
2130 """return the tip node for a given branch
2116
2131
2117 If ignoremissing is True, then this method will not raise an error.
2132 If ignoremissing is True, then this method will not raise an error.
2118 This is helpful for callers that only expect None for a missing branch
2133 This is helpful for callers that only expect None for a missing branch
2119 (e.g. namespace).
2134 (e.g. namespace).
2120
2135
2121 """
2136 """
2122 try:
2137 try:
2123 return self.branchmap().branchtip(branch)
2138 return self.branchmap().branchtip(branch)
2124 except KeyError:
2139 except KeyError:
2125 if not ignoremissing:
2140 if not ignoremissing:
2126 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2141 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2127 else:
2142 else:
2128 pass
2143 pass
2129
2144
2130 def lookup(self, key):
2145 def lookup(self, key):
2131 node = scmutil.revsymbol(self, key).node()
2146 node = scmutil.revsymbol(self, key).node()
2132 if node is None:
2147 if node is None:
2133 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2148 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2134 return node
2149 return node
2135
2150
2136 def lookupbranch(self, key):
2151 def lookupbranch(self, key):
2137 if self.branchmap().hasbranch(key):
2152 if self.branchmap().hasbranch(key):
2138 return key
2153 return key
2139
2154
2140 return scmutil.revsymbol(self, key).branch()
2155 return scmutil.revsymbol(self, key).branch()
2141
2156
2142 def known(self, nodes):
2157 def known(self, nodes):
2143 cl = self.changelog
2158 cl = self.changelog
2144 get_rev = cl.index.get_rev
2159 get_rev = cl.index.get_rev
2145 filtered = cl.filteredrevs
2160 filtered = cl.filteredrevs
2146 result = []
2161 result = []
2147 for n in nodes:
2162 for n in nodes:
2148 r = get_rev(n)
2163 r = get_rev(n)
2149 resp = not (r is None or r in filtered)
2164 resp = not (r is None or r in filtered)
2150 result.append(resp)
2165 result.append(resp)
2151 return result
2166 return result
2152
2167
2153 def local(self):
2168 def local(self):
2154 return self
2169 return self
2155
2170
2156 def publishing(self):
2171 def publishing(self):
2157 # it's safe (and desirable) to trust the publish flag unconditionally
2172 # it's safe (and desirable) to trust the publish flag unconditionally
2158 # so that we don't finalize changes shared between users via ssh or nfs
2173 # so that we don't finalize changes shared between users via ssh or nfs
2159 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2174 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2160
2175
2161 def cancopy(self):
2176 def cancopy(self):
2162 # so statichttprepo's override of local() works
2177 # so statichttprepo's override of local() works
2163 if not self.local():
2178 if not self.local():
2164 return False
2179 return False
2165 if not self.publishing():
2180 if not self.publishing():
2166 return True
2181 return True
2167 # if publishing we can't copy if there is filtered content
2182 # if publishing we can't copy if there is filtered content
2168 return not self.filtered(b'visible').changelog.filteredrevs
2183 return not self.filtered(b'visible').changelog.filteredrevs
2169
2184
2170 def shared(self):
2185 def shared(self):
2171 '''the type of shared repository (None if not shared)'''
2186 '''the type of shared repository (None if not shared)'''
2172 if self.sharedpath != self.path:
2187 if self.sharedpath != self.path:
2173 return b'store'
2188 return b'store'
2174 return None
2189 return None
2175
2190
2176 def wjoin(self, f, *insidef):
2191 def wjoin(self, f, *insidef):
2177 return self.vfs.reljoin(self.root, f, *insidef)
2192 return self.vfs.reljoin(self.root, f, *insidef)
2178
2193
2179 def setparents(self, p1, p2=None):
2194 def setparents(self, p1, p2=None):
2180 if p2 is None:
2195 if p2 is None:
2181 p2 = self.nullid
2196 p2 = self.nullid
2182 self[None].setparents(p1, p2)
2197 self[None].setparents(p1, p2)
2183 self._quick_access_changeid_invalidate()
2198 self._quick_access_changeid_invalidate()
2184
2199
2185 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2200 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2186 """changeid must be a changeset revision, if specified.
2201 """changeid must be a changeset revision, if specified.
2187 fileid can be a file revision or node."""
2202 fileid can be a file revision or node."""
2188 return context.filectx(
2203 return context.filectx(
2189 self, path, changeid, fileid, changectx=changectx
2204 self, path, changeid, fileid, changectx=changectx
2190 )
2205 )
2191
2206
2192 def getcwd(self):
2207 def getcwd(self):
2193 return self.dirstate.getcwd()
2208 return self.dirstate.getcwd()
2194
2209
2195 def pathto(self, f, cwd=None):
2210 def pathto(self, f, cwd=None):
2196 return self.dirstate.pathto(f, cwd)
2211 return self.dirstate.pathto(f, cwd)
2197
2212
2198 def _loadfilter(self, filter):
2213 def _loadfilter(self, filter):
2199 if filter not in self._filterpats:
2214 if filter not in self._filterpats:
2200 l = []
2215 l = []
2201 for pat, cmd in self.ui.configitems(filter):
2216 for pat, cmd in self.ui.configitems(filter):
2202 if cmd == b'!':
2217 if cmd == b'!':
2203 continue
2218 continue
2204 mf = matchmod.match(self.root, b'', [pat])
2219 mf = matchmod.match(self.root, b'', [pat])
2205 fn = None
2220 fn = None
2206 params = cmd
2221 params = cmd
2207 for name, filterfn in pycompat.iteritems(self._datafilters):
2222 for name, filterfn in pycompat.iteritems(self._datafilters):
2208 if cmd.startswith(name):
2223 if cmd.startswith(name):
2209 fn = filterfn
2224 fn = filterfn
2210 params = cmd[len(name) :].lstrip()
2225 params = cmd[len(name) :].lstrip()
2211 break
2226 break
2212 if not fn:
2227 if not fn:
2213 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2228 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2214 fn.__name__ = 'commandfilter'
2229 fn.__name__ = 'commandfilter'
2215 # Wrap old filters not supporting keyword arguments
2230 # Wrap old filters not supporting keyword arguments
2216 if not pycompat.getargspec(fn)[2]:
2231 if not pycompat.getargspec(fn)[2]:
2217 oldfn = fn
2232 oldfn = fn
2218 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2233 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2219 fn.__name__ = 'compat-' + oldfn.__name__
2234 fn.__name__ = 'compat-' + oldfn.__name__
2220 l.append((mf, fn, params))
2235 l.append((mf, fn, params))
2221 self._filterpats[filter] = l
2236 self._filterpats[filter] = l
2222 return self._filterpats[filter]
2237 return self._filterpats[filter]
2223
2238
2224 def _filter(self, filterpats, filename, data):
2239 def _filter(self, filterpats, filename, data):
2225 for mf, fn, cmd in filterpats:
2240 for mf, fn, cmd in filterpats:
2226 if mf(filename):
2241 if mf(filename):
2227 self.ui.debug(
2242 self.ui.debug(
2228 b"filtering %s through %s\n"
2243 b"filtering %s through %s\n"
2229 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2244 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2230 )
2245 )
2231 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2246 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2232 break
2247 break
2233
2248
2234 return data
2249 return data
2235
2250
2236 @unfilteredpropertycache
2251 @unfilteredpropertycache
2237 def _encodefilterpats(self):
2252 def _encodefilterpats(self):
2238 return self._loadfilter(b'encode')
2253 return self._loadfilter(b'encode')
2239
2254
2240 @unfilteredpropertycache
2255 @unfilteredpropertycache
2241 def _decodefilterpats(self):
2256 def _decodefilterpats(self):
2242 return self._loadfilter(b'decode')
2257 return self._loadfilter(b'decode')
2243
2258
2244 def adddatafilter(self, name, filter):
2259 def adddatafilter(self, name, filter):
2245 self._datafilters[name] = filter
2260 self._datafilters[name] = filter
2246
2261
2247 def wread(self, filename):
2262 def wread(self, filename):
2248 if self.wvfs.islink(filename):
2263 if self.wvfs.islink(filename):
2249 data = self.wvfs.readlink(filename)
2264 data = self.wvfs.readlink(filename)
2250 else:
2265 else:
2251 data = self.wvfs.read(filename)
2266 data = self.wvfs.read(filename)
2252 return self._filter(self._encodefilterpats, filename, data)
2267 return self._filter(self._encodefilterpats, filename, data)
2253
2268
2254 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2269 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2255 """write ``data`` into ``filename`` in the working directory
2270 """write ``data`` into ``filename`` in the working directory
2256
2271
2257 This returns length of written (maybe decoded) data.
2272 This returns length of written (maybe decoded) data.
2258 """
2273 """
2259 data = self._filter(self._decodefilterpats, filename, data)
2274 data = self._filter(self._decodefilterpats, filename, data)
2260 if b'l' in flags:
2275 if b'l' in flags:
2261 self.wvfs.symlink(data, filename)
2276 self.wvfs.symlink(data, filename)
2262 else:
2277 else:
2263 self.wvfs.write(
2278 self.wvfs.write(
2264 filename, data, backgroundclose=backgroundclose, **kwargs
2279 filename, data, backgroundclose=backgroundclose, **kwargs
2265 )
2280 )
2266 if b'x' in flags:
2281 if b'x' in flags:
2267 self.wvfs.setflags(filename, False, True)
2282 self.wvfs.setflags(filename, False, True)
2268 else:
2283 else:
2269 self.wvfs.setflags(filename, False, False)
2284 self.wvfs.setflags(filename, False, False)
2270 return len(data)
2285 return len(data)
2271
2286
2272 def wwritedata(self, filename, data):
2287 def wwritedata(self, filename, data):
2273 return self._filter(self._decodefilterpats, filename, data)
2288 return self._filter(self._decodefilterpats, filename, data)
2274
2289
2275 def currenttransaction(self):
2290 def currenttransaction(self):
2276 """return the current transaction or None if non exists"""
2291 """return the current transaction or None if non exists"""
2277 if self._transref:
2292 if self._transref:
2278 tr = self._transref()
2293 tr = self._transref()
2279 else:
2294 else:
2280 tr = None
2295 tr = None
2281
2296
2282 if tr and tr.running():
2297 if tr and tr.running():
2283 return tr
2298 return tr
2284 return None
2299 return None
2285
2300
2286 def transaction(self, desc, report=None):
2301 def transaction(self, desc, report=None):
2287 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2302 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2288 b'devel', b'check-locks'
2303 b'devel', b'check-locks'
2289 ):
2304 ):
2290 if self._currentlock(self._lockref) is None:
2305 if self._currentlock(self._lockref) is None:
2291 raise error.ProgrammingError(b'transaction requires locking')
2306 raise error.ProgrammingError(b'transaction requires locking')
2292 tr = self.currenttransaction()
2307 tr = self.currenttransaction()
2293 if tr is not None:
2308 if tr is not None:
2294 return tr.nest(name=desc)
2309 return tr.nest(name=desc)
2295
2310
2296 # abort here if the journal already exists
2311 # abort here if the journal already exists
2297 if self.svfs.exists(b"journal"):
2312 if self.svfs.exists(b"journal"):
2298 raise error.RepoError(
2313 raise error.RepoError(
2299 _(b"abandoned transaction found"),
2314 _(b"abandoned transaction found"),
2300 hint=_(b"run 'hg recover' to clean up transaction"),
2315 hint=_(b"run 'hg recover' to clean up transaction"),
2301 )
2316 )
2302
2317
2303 idbase = b"%.40f#%f" % (random.random(), time.time())
2318 idbase = b"%.40f#%f" % (random.random(), time.time())
2304 ha = hex(hashutil.sha1(idbase).digest())
2319 ha = hex(hashutil.sha1(idbase).digest())
2305 txnid = b'TXN:' + ha
2320 txnid = b'TXN:' + ha
2306 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2321 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2307
2322
2308 self._writejournal(desc)
2323 self._writejournal(desc)
2309 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2324 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2310 if report:
2325 if report:
2311 rp = report
2326 rp = report
2312 else:
2327 else:
2313 rp = self.ui.warn
2328 rp = self.ui.warn
2314 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2329 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2315 # we must avoid cyclic reference between repo and transaction.
2330 # we must avoid cyclic reference between repo and transaction.
2316 reporef = weakref.ref(self)
2331 reporef = weakref.ref(self)
2317 # Code to track tag movement
2332 # Code to track tag movement
2318 #
2333 #
2319 # Since tags are all handled as file content, it is actually quite hard
2334 # Since tags are all handled as file content, it is actually quite hard
2320 # to track these movement from a code perspective. So we fallback to a
2335 # to track these movement from a code perspective. So we fallback to a
2321 # tracking at the repository level. One could envision to track changes
2336 # tracking at the repository level. One could envision to track changes
2322 # to the '.hgtags' file through changegroup apply but that fails to
2337 # to the '.hgtags' file through changegroup apply but that fails to
2323 # cope with case where transaction expose new heads without changegroup
2338 # cope with case where transaction expose new heads without changegroup
2324 # being involved (eg: phase movement).
2339 # being involved (eg: phase movement).
2325 #
2340 #
2326 # For now, We gate the feature behind a flag since this likely comes
2341 # For now, We gate the feature behind a flag since this likely comes
2327 # with performance impacts. The current code run more often than needed
2342 # with performance impacts. The current code run more often than needed
2328 # and do not use caches as much as it could. The current focus is on
2343 # and do not use caches as much as it could. The current focus is on
2329 # the behavior of the feature so we disable it by default. The flag
2344 # the behavior of the feature so we disable it by default. The flag
2330 # will be removed when we are happy with the performance impact.
2345 # will be removed when we are happy with the performance impact.
2331 #
2346 #
2332 # Once this feature is no longer experimental move the following
2347 # Once this feature is no longer experimental move the following
2333 # documentation to the appropriate help section:
2348 # documentation to the appropriate help section:
2334 #
2349 #
2335 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2350 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2336 # tags (new or changed or deleted tags). In addition the details of
2351 # tags (new or changed or deleted tags). In addition the details of
2337 # these changes are made available in a file at:
2352 # these changes are made available in a file at:
2338 # ``REPOROOT/.hg/changes/tags.changes``.
2353 # ``REPOROOT/.hg/changes/tags.changes``.
2339 # Make sure you check for HG_TAG_MOVED before reading that file as it
2354 # Make sure you check for HG_TAG_MOVED before reading that file as it
2340 # might exist from a previous transaction even if no tag were touched
2355 # might exist from a previous transaction even if no tag were touched
2341 # in this one. Changes are recorded in a line base format::
2356 # in this one. Changes are recorded in a line base format::
2342 #
2357 #
2343 # <action> <hex-node> <tag-name>\n
2358 # <action> <hex-node> <tag-name>\n
2344 #
2359 #
2345 # Actions are defined as follow:
2360 # Actions are defined as follow:
2346 # "-R": tag is removed,
2361 # "-R": tag is removed,
2347 # "+A": tag is added,
2362 # "+A": tag is added,
2348 # "-M": tag is moved (old value),
2363 # "-M": tag is moved (old value),
2349 # "+M": tag is moved (new value),
2364 # "+M": tag is moved (new value),
2350 tracktags = lambda x: None
2365 tracktags = lambda x: None
2351 # experimental config: experimental.hook-track-tags
2366 # experimental config: experimental.hook-track-tags
2352 shouldtracktags = self.ui.configbool(
2367 shouldtracktags = self.ui.configbool(
2353 b'experimental', b'hook-track-tags'
2368 b'experimental', b'hook-track-tags'
2354 )
2369 )
2355 if desc != b'strip' and shouldtracktags:
2370 if desc != b'strip' and shouldtracktags:
2356 oldheads = self.changelog.headrevs()
2371 oldheads = self.changelog.headrevs()
2357
2372
2358 def tracktags(tr2):
2373 def tracktags(tr2):
2359 repo = reporef()
2374 repo = reporef()
2360 assert repo is not None # help pytype
2375 assert repo is not None # help pytype
2361 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2376 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2362 newheads = repo.changelog.headrevs()
2377 newheads = repo.changelog.headrevs()
2363 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2378 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2364 # notes: we compare lists here.
2379 # notes: we compare lists here.
2365 # As we do it only once buiding set would not be cheaper
2380 # As we do it only once buiding set would not be cheaper
2366 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2381 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2367 if changes:
2382 if changes:
2368 tr2.hookargs[b'tag_moved'] = b'1'
2383 tr2.hookargs[b'tag_moved'] = b'1'
2369 with repo.vfs(
2384 with repo.vfs(
2370 b'changes/tags.changes', b'w', atomictemp=True
2385 b'changes/tags.changes', b'w', atomictemp=True
2371 ) as changesfile:
2386 ) as changesfile:
2372 # note: we do not register the file to the transaction
2387 # note: we do not register the file to the transaction
2373 # because we needs it to still exist on the transaction
2388 # because we needs it to still exist on the transaction
2374 # is close (for txnclose hooks)
2389 # is close (for txnclose hooks)
2375 tagsmod.writediff(changesfile, changes)
2390 tagsmod.writediff(changesfile, changes)
2376
2391
2377 def validate(tr2):
2392 def validate(tr2):
2378 """will run pre-closing hooks"""
2393 """will run pre-closing hooks"""
2379 # XXX the transaction API is a bit lacking here so we take a hacky
2394 # XXX the transaction API is a bit lacking here so we take a hacky
2380 # path for now
2395 # path for now
2381 #
2396 #
2382 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2397 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2383 # dict is copied before these run. In addition we needs the data
2398 # dict is copied before these run. In addition we needs the data
2384 # available to in memory hooks too.
2399 # available to in memory hooks too.
2385 #
2400 #
2386 # Moreover, we also need to make sure this runs before txnclose
2401 # Moreover, we also need to make sure this runs before txnclose
2387 # hooks and there is no "pending" mechanism that would execute
2402 # hooks and there is no "pending" mechanism that would execute
2388 # logic only if hooks are about to run.
2403 # logic only if hooks are about to run.
2389 #
2404 #
2390 # Fixing this limitation of the transaction is also needed to track
2405 # Fixing this limitation of the transaction is also needed to track
2391 # other families of changes (bookmarks, phases, obsolescence).
2406 # other families of changes (bookmarks, phases, obsolescence).
2392 #
2407 #
2393 # This will have to be fixed before we remove the experimental
2408 # This will have to be fixed before we remove the experimental
2394 # gating.
2409 # gating.
2395 tracktags(tr2)
2410 tracktags(tr2)
2396 repo = reporef()
2411 repo = reporef()
2397 assert repo is not None # help pytype
2412 assert repo is not None # help pytype
2398
2413
2399 singleheadopt = (b'experimental', b'single-head-per-branch')
2414 singleheadopt = (b'experimental', b'single-head-per-branch')
2400 singlehead = repo.ui.configbool(*singleheadopt)
2415 singlehead = repo.ui.configbool(*singleheadopt)
2401 if singlehead:
2416 if singlehead:
2402 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2417 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2403 accountclosed = singleheadsub.get(
2418 accountclosed = singleheadsub.get(
2404 b"account-closed-heads", False
2419 b"account-closed-heads", False
2405 )
2420 )
2406 if singleheadsub.get(b"public-changes-only", False):
2421 if singleheadsub.get(b"public-changes-only", False):
2407 filtername = b"immutable"
2422 filtername = b"immutable"
2408 else:
2423 else:
2409 filtername = b"visible"
2424 filtername = b"visible"
2410 scmutil.enforcesinglehead(
2425 scmutil.enforcesinglehead(
2411 repo, tr2, desc, accountclosed, filtername
2426 repo, tr2, desc, accountclosed, filtername
2412 )
2427 )
2413 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2428 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2414 for name, (old, new) in sorted(
2429 for name, (old, new) in sorted(
2415 tr.changes[b'bookmarks'].items()
2430 tr.changes[b'bookmarks'].items()
2416 ):
2431 ):
2417 args = tr.hookargs.copy()
2432 args = tr.hookargs.copy()
2418 args.update(bookmarks.preparehookargs(name, old, new))
2433 args.update(bookmarks.preparehookargs(name, old, new))
2419 repo.hook(
2434 repo.hook(
2420 b'pretxnclose-bookmark',
2435 b'pretxnclose-bookmark',
2421 throw=True,
2436 throw=True,
2422 **pycompat.strkwargs(args)
2437 **pycompat.strkwargs(args)
2423 )
2438 )
2424 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2439 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2425 cl = repo.unfiltered().changelog
2440 cl = repo.unfiltered().changelog
2426 for revs, (old, new) in tr.changes[b'phases']:
2441 for revs, (old, new) in tr.changes[b'phases']:
2427 for rev in revs:
2442 for rev in revs:
2428 args = tr.hookargs.copy()
2443 args = tr.hookargs.copy()
2429 node = hex(cl.node(rev))
2444 node = hex(cl.node(rev))
2430 args.update(phases.preparehookargs(node, old, new))
2445 args.update(phases.preparehookargs(node, old, new))
2431 repo.hook(
2446 repo.hook(
2432 b'pretxnclose-phase',
2447 b'pretxnclose-phase',
2433 throw=True,
2448 throw=True,
2434 **pycompat.strkwargs(args)
2449 **pycompat.strkwargs(args)
2435 )
2450 )
2436
2451
2437 repo.hook(
2452 repo.hook(
2438 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2453 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2439 )
2454 )
2440
2455
2441 def releasefn(tr, success):
2456 def releasefn(tr, success):
2442 repo = reporef()
2457 repo = reporef()
2443 if repo is None:
2458 if repo is None:
2444 # If the repo has been GC'd (and this release function is being
2459 # If the repo has been GC'd (and this release function is being
2445 # called from transaction.__del__), there's not much we can do,
2460 # called from transaction.__del__), there's not much we can do,
2446 # so just leave the unfinished transaction there and let the
2461 # so just leave the unfinished transaction there and let the
2447 # user run `hg recover`.
2462 # user run `hg recover`.
2448 return
2463 return
2449 if success:
2464 if success:
2450 # this should be explicitly invoked here, because
2465 # this should be explicitly invoked here, because
2451 # in-memory changes aren't written out at closing
2466 # in-memory changes aren't written out at closing
2452 # transaction, if tr.addfilegenerator (via
2467 # transaction, if tr.addfilegenerator (via
2453 # dirstate.write or so) isn't invoked while
2468 # dirstate.write or so) isn't invoked while
2454 # transaction running
2469 # transaction running
2455 repo.dirstate.write(None)
2470 repo.dirstate.write(None)
2456 else:
2471 else:
2457 # discard all changes (including ones already written
2472 # discard all changes (including ones already written
2458 # out) in this transaction
2473 # out) in this transaction
2459 narrowspec.restorebackup(self, b'journal.narrowspec')
2474 narrowspec.restorebackup(self, b'journal.narrowspec')
2460 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2475 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2461 repo.dirstate.restorebackup(None, b'journal.dirstate')
2476 repo.dirstate.restorebackup(None, b'journal.dirstate')
2462
2477
2463 repo.invalidate(clearfilecache=True)
2478 repo.invalidate(clearfilecache=True)
2464
2479
2465 tr = transaction.transaction(
2480 tr = transaction.transaction(
2466 rp,
2481 rp,
2467 self.svfs,
2482 self.svfs,
2468 vfsmap,
2483 vfsmap,
2469 b"journal",
2484 b"journal",
2470 b"undo",
2485 b"undo",
2471 aftertrans(renames),
2486 aftertrans(renames),
2472 self.store.createmode,
2487 self.store.createmode,
2473 validator=validate,
2488 validator=validate,
2474 releasefn=releasefn,
2489 releasefn=releasefn,
2475 checkambigfiles=_cachedfiles,
2490 checkambigfiles=_cachedfiles,
2476 name=desc,
2491 name=desc,
2477 )
2492 )
2478 tr.changes[b'origrepolen'] = len(self)
2493 tr.changes[b'origrepolen'] = len(self)
2479 tr.changes[b'obsmarkers'] = set()
2494 tr.changes[b'obsmarkers'] = set()
2480 tr.changes[b'phases'] = []
2495 tr.changes[b'phases'] = []
2481 tr.changes[b'bookmarks'] = {}
2496 tr.changes[b'bookmarks'] = {}
2482
2497
2483 tr.hookargs[b'txnid'] = txnid
2498 tr.hookargs[b'txnid'] = txnid
2484 tr.hookargs[b'txnname'] = desc
2499 tr.hookargs[b'txnname'] = desc
2485 tr.hookargs[b'changes'] = tr.changes
2500 tr.hookargs[b'changes'] = tr.changes
2486 # note: writing the fncache only during finalize mean that the file is
2501 # note: writing the fncache only during finalize mean that the file is
2487 # outdated when running hooks. As fncache is used for streaming clone,
2502 # outdated when running hooks. As fncache is used for streaming clone,
2488 # this is not expected to break anything that happen during the hooks.
2503 # this is not expected to break anything that happen during the hooks.
2489 tr.addfinalize(b'flush-fncache', self.store.write)
2504 tr.addfinalize(b'flush-fncache', self.store.write)
2490
2505
2491 def txnclosehook(tr2):
2506 def txnclosehook(tr2):
2492 """To be run if transaction is successful, will schedule a hook run"""
2507 """To be run if transaction is successful, will schedule a hook run"""
2493 # Don't reference tr2 in hook() so we don't hold a reference.
2508 # Don't reference tr2 in hook() so we don't hold a reference.
2494 # This reduces memory consumption when there are multiple
2509 # This reduces memory consumption when there are multiple
2495 # transactions per lock. This can likely go away if issue5045
2510 # transactions per lock. This can likely go away if issue5045
2496 # fixes the function accumulation.
2511 # fixes the function accumulation.
2497 hookargs = tr2.hookargs
2512 hookargs = tr2.hookargs
2498
2513
2499 def hookfunc(unused_success):
2514 def hookfunc(unused_success):
2500 repo = reporef()
2515 repo = reporef()
2501 assert repo is not None # help pytype
2516 assert repo is not None # help pytype
2502
2517
2503 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2518 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2504 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2519 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2505 for name, (old, new) in bmchanges:
2520 for name, (old, new) in bmchanges:
2506 args = tr.hookargs.copy()
2521 args = tr.hookargs.copy()
2507 args.update(bookmarks.preparehookargs(name, old, new))
2522 args.update(bookmarks.preparehookargs(name, old, new))
2508 repo.hook(
2523 repo.hook(
2509 b'txnclose-bookmark',
2524 b'txnclose-bookmark',
2510 throw=False,
2525 throw=False,
2511 **pycompat.strkwargs(args)
2526 **pycompat.strkwargs(args)
2512 )
2527 )
2513
2528
2514 if hook.hashook(repo.ui, b'txnclose-phase'):
2529 if hook.hashook(repo.ui, b'txnclose-phase'):
2515 cl = repo.unfiltered().changelog
2530 cl = repo.unfiltered().changelog
2516 phasemv = sorted(
2531 phasemv = sorted(
2517 tr.changes[b'phases'], key=lambda r: r[0][0]
2532 tr.changes[b'phases'], key=lambda r: r[0][0]
2518 )
2533 )
2519 for revs, (old, new) in phasemv:
2534 for revs, (old, new) in phasemv:
2520 for rev in revs:
2535 for rev in revs:
2521 args = tr.hookargs.copy()
2536 args = tr.hookargs.copy()
2522 node = hex(cl.node(rev))
2537 node = hex(cl.node(rev))
2523 args.update(phases.preparehookargs(node, old, new))
2538 args.update(phases.preparehookargs(node, old, new))
2524 repo.hook(
2539 repo.hook(
2525 b'txnclose-phase',
2540 b'txnclose-phase',
2526 throw=False,
2541 throw=False,
2527 **pycompat.strkwargs(args)
2542 **pycompat.strkwargs(args)
2528 )
2543 )
2529
2544
2530 repo.hook(
2545 repo.hook(
2531 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2546 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2532 )
2547 )
2533
2548
2534 repo = reporef()
2549 repo = reporef()
2535 assert repo is not None # help pytype
2550 assert repo is not None # help pytype
2536 repo._afterlock(hookfunc)
2551 repo._afterlock(hookfunc)
2537
2552
2538 tr.addfinalize(b'txnclose-hook', txnclosehook)
2553 tr.addfinalize(b'txnclose-hook', txnclosehook)
2539 # Include a leading "-" to make it happen before the transaction summary
2554 # Include a leading "-" to make it happen before the transaction summary
2540 # reports registered via scmutil.registersummarycallback() whose names
2555 # reports registered via scmutil.registersummarycallback() whose names
2541 # are 00-txnreport etc. That way, the caches will be warm when the
2556 # are 00-txnreport etc. That way, the caches will be warm when the
2542 # callbacks run.
2557 # callbacks run.
2543 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2558 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2544
2559
2545 def txnaborthook(tr2):
2560 def txnaborthook(tr2):
2546 """To be run if transaction is aborted"""
2561 """To be run if transaction is aborted"""
2547 repo = reporef()
2562 repo = reporef()
2548 assert repo is not None # help pytype
2563 assert repo is not None # help pytype
2549 repo.hook(
2564 repo.hook(
2550 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2565 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2551 )
2566 )
2552
2567
2553 tr.addabort(b'txnabort-hook', txnaborthook)
2568 tr.addabort(b'txnabort-hook', txnaborthook)
2554 # avoid eager cache invalidation. in-memory data should be identical
2569 # avoid eager cache invalidation. in-memory data should be identical
2555 # to stored data if transaction has no error.
2570 # to stored data if transaction has no error.
2556 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2571 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2557 self._transref = weakref.ref(tr)
2572 self._transref = weakref.ref(tr)
2558 scmutil.registersummarycallback(self, tr, desc)
2573 scmutil.registersummarycallback(self, tr, desc)
2559 return tr
2574 return tr
2560
2575
2561 def _journalfiles(self):
2576 def _journalfiles(self):
2562 return (
2577 return (
2563 (self.svfs, b'journal'),
2578 (self.svfs, b'journal'),
2564 (self.svfs, b'journal.narrowspec'),
2579 (self.svfs, b'journal.narrowspec'),
2565 (self.vfs, b'journal.narrowspec.dirstate'),
2580 (self.vfs, b'journal.narrowspec.dirstate'),
2566 (self.vfs, b'journal.dirstate'),
2581 (self.vfs, b'journal.dirstate'),
2567 (self.vfs, b'journal.branch'),
2582 (self.vfs, b'journal.branch'),
2568 (self.vfs, b'journal.desc'),
2583 (self.vfs, b'journal.desc'),
2569 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2584 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2570 (self.svfs, b'journal.phaseroots'),
2585 (self.svfs, b'journal.phaseroots'),
2571 )
2586 )
2572
2587
2573 def undofiles(self):
2588 def undofiles(self):
2574 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2589 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2575
2590
2576 @unfilteredmethod
2591 @unfilteredmethod
2577 def _writejournal(self, desc):
2592 def _writejournal(self, desc):
2578 self.dirstate.savebackup(None, b'journal.dirstate')
2593 self.dirstate.savebackup(None, b'journal.dirstate')
2579 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2594 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2580 narrowspec.savebackup(self, b'journal.narrowspec')
2595 narrowspec.savebackup(self, b'journal.narrowspec')
2581 self.vfs.write(
2596 self.vfs.write(
2582 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2597 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2583 )
2598 )
2584 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2599 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2585 bookmarksvfs = bookmarks.bookmarksvfs(self)
2600 bookmarksvfs = bookmarks.bookmarksvfs(self)
2586 bookmarksvfs.write(
2601 bookmarksvfs.write(
2587 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2602 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2588 )
2603 )
2589 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2604 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2590
2605
2591 def recover(self):
2606 def recover(self):
2592 with self.lock():
2607 with self.lock():
2593 if self.svfs.exists(b"journal"):
2608 if self.svfs.exists(b"journal"):
2594 self.ui.status(_(b"rolling back interrupted transaction\n"))
2609 self.ui.status(_(b"rolling back interrupted transaction\n"))
2595 vfsmap = {
2610 vfsmap = {
2596 b'': self.svfs,
2611 b'': self.svfs,
2597 b'plain': self.vfs,
2612 b'plain': self.vfs,
2598 }
2613 }
2599 transaction.rollback(
2614 transaction.rollback(
2600 self.svfs,
2615 self.svfs,
2601 vfsmap,
2616 vfsmap,
2602 b"journal",
2617 b"journal",
2603 self.ui.warn,
2618 self.ui.warn,
2604 checkambigfiles=_cachedfiles,
2619 checkambigfiles=_cachedfiles,
2605 )
2620 )
2606 self.invalidate()
2621 self.invalidate()
2607 return True
2622 return True
2608 else:
2623 else:
2609 self.ui.warn(_(b"no interrupted transaction available\n"))
2624 self.ui.warn(_(b"no interrupted transaction available\n"))
2610 return False
2625 return False
2611
2626
2612 def rollback(self, dryrun=False, force=False):
2627 def rollback(self, dryrun=False, force=False):
2613 wlock = lock = dsguard = None
2628 wlock = lock = dsguard = None
2614 try:
2629 try:
2615 wlock = self.wlock()
2630 wlock = self.wlock()
2616 lock = self.lock()
2631 lock = self.lock()
2617 if self.svfs.exists(b"undo"):
2632 if self.svfs.exists(b"undo"):
2618 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2633 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2619
2634
2620 return self._rollback(dryrun, force, dsguard)
2635 return self._rollback(dryrun, force, dsguard)
2621 else:
2636 else:
2622 self.ui.warn(_(b"no rollback information available\n"))
2637 self.ui.warn(_(b"no rollback information available\n"))
2623 return 1
2638 return 1
2624 finally:
2639 finally:
2625 release(dsguard, lock, wlock)
2640 release(dsguard, lock, wlock)
2626
2641
2627 @unfilteredmethod # Until we get smarter cache management
2642 @unfilteredmethod # Until we get smarter cache management
2628 def _rollback(self, dryrun, force, dsguard):
2643 def _rollback(self, dryrun, force, dsguard):
2629 ui = self.ui
2644 ui = self.ui
2630 try:
2645 try:
2631 args = self.vfs.read(b'undo.desc').splitlines()
2646 args = self.vfs.read(b'undo.desc').splitlines()
2632 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2647 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2633 if len(args) >= 3:
2648 if len(args) >= 3:
2634 detail = args[2]
2649 detail = args[2]
2635 oldtip = oldlen - 1
2650 oldtip = oldlen - 1
2636
2651
2637 if detail and ui.verbose:
2652 if detail and ui.verbose:
2638 msg = _(
2653 msg = _(
2639 b'repository tip rolled back to revision %d'
2654 b'repository tip rolled back to revision %d'
2640 b' (undo %s: %s)\n'
2655 b' (undo %s: %s)\n'
2641 ) % (oldtip, desc, detail)
2656 ) % (oldtip, desc, detail)
2642 else:
2657 else:
2643 msg = _(
2658 msg = _(
2644 b'repository tip rolled back to revision %d (undo %s)\n'
2659 b'repository tip rolled back to revision %d (undo %s)\n'
2645 ) % (oldtip, desc)
2660 ) % (oldtip, desc)
2646 except IOError:
2661 except IOError:
2647 msg = _(b'rolling back unknown transaction\n')
2662 msg = _(b'rolling back unknown transaction\n')
2648 desc = None
2663 desc = None
2649
2664
2650 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2665 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2651 raise error.Abort(
2666 raise error.Abort(
2652 _(
2667 _(
2653 b'rollback of last commit while not checked out '
2668 b'rollback of last commit while not checked out '
2654 b'may lose data'
2669 b'may lose data'
2655 ),
2670 ),
2656 hint=_(b'use -f to force'),
2671 hint=_(b'use -f to force'),
2657 )
2672 )
2658
2673
2659 ui.status(msg)
2674 ui.status(msg)
2660 if dryrun:
2675 if dryrun:
2661 return 0
2676 return 0
2662
2677
2663 parents = self.dirstate.parents()
2678 parents = self.dirstate.parents()
2664 self.destroying()
2679 self.destroying()
2665 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2680 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2666 transaction.rollback(
2681 transaction.rollback(
2667 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2682 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2668 )
2683 )
2669 bookmarksvfs = bookmarks.bookmarksvfs(self)
2684 bookmarksvfs = bookmarks.bookmarksvfs(self)
2670 if bookmarksvfs.exists(b'undo.bookmarks'):
2685 if bookmarksvfs.exists(b'undo.bookmarks'):
2671 bookmarksvfs.rename(
2686 bookmarksvfs.rename(
2672 b'undo.bookmarks', b'bookmarks', checkambig=True
2687 b'undo.bookmarks', b'bookmarks', checkambig=True
2673 )
2688 )
2674 if self.svfs.exists(b'undo.phaseroots'):
2689 if self.svfs.exists(b'undo.phaseroots'):
2675 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2690 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2676 self.invalidate()
2691 self.invalidate()
2677
2692
2678 has_node = self.changelog.index.has_node
2693 has_node = self.changelog.index.has_node
2679 parentgone = any(not has_node(p) for p in parents)
2694 parentgone = any(not has_node(p) for p in parents)
2680 if parentgone:
2695 if parentgone:
2681 # prevent dirstateguard from overwriting already restored one
2696 # prevent dirstateguard from overwriting already restored one
2682 dsguard.close()
2697 dsguard.close()
2683
2698
2684 narrowspec.restorebackup(self, b'undo.narrowspec')
2699 narrowspec.restorebackup(self, b'undo.narrowspec')
2685 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2700 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2686 self.dirstate.restorebackup(None, b'undo.dirstate')
2701 self.dirstate.restorebackup(None, b'undo.dirstate')
2687 try:
2702 try:
2688 branch = self.vfs.read(b'undo.branch')
2703 branch = self.vfs.read(b'undo.branch')
2689 self.dirstate.setbranch(encoding.tolocal(branch))
2704 self.dirstate.setbranch(encoding.tolocal(branch))
2690 except IOError:
2705 except IOError:
2691 ui.warn(
2706 ui.warn(
2692 _(
2707 _(
2693 b'named branch could not be reset: '
2708 b'named branch could not be reset: '
2694 b'current branch is still \'%s\'\n'
2709 b'current branch is still \'%s\'\n'
2695 )
2710 )
2696 % self.dirstate.branch()
2711 % self.dirstate.branch()
2697 )
2712 )
2698
2713
2699 parents = tuple([p.rev() for p in self[None].parents()])
2714 parents = tuple([p.rev() for p in self[None].parents()])
2700 if len(parents) > 1:
2715 if len(parents) > 1:
2701 ui.status(
2716 ui.status(
2702 _(
2717 _(
2703 b'working directory now based on '
2718 b'working directory now based on '
2704 b'revisions %d and %d\n'
2719 b'revisions %d and %d\n'
2705 )
2720 )
2706 % parents
2721 % parents
2707 )
2722 )
2708 else:
2723 else:
2709 ui.status(
2724 ui.status(
2710 _(b'working directory now based on revision %d\n') % parents
2725 _(b'working directory now based on revision %d\n') % parents
2711 )
2726 )
2712 mergestatemod.mergestate.clean(self)
2727 mergestatemod.mergestate.clean(self)
2713
2728
2714 # TODO: if we know which new heads may result from this rollback, pass
2729 # TODO: if we know which new heads may result from this rollback, pass
2715 # them to destroy(), which will prevent the branchhead cache from being
2730 # them to destroy(), which will prevent the branchhead cache from being
2716 # invalidated.
2731 # invalidated.
2717 self.destroyed()
2732 self.destroyed()
2718 return 0
2733 return 0
2719
2734
2720 def _buildcacheupdater(self, newtransaction):
2735 def _buildcacheupdater(self, newtransaction):
2721 """called during transaction to build the callback updating cache
2736 """called during transaction to build the callback updating cache
2722
2737
2723 Lives on the repository to help extension who might want to augment
2738 Lives on the repository to help extension who might want to augment
2724 this logic. For this purpose, the created transaction is passed to the
2739 this logic. For this purpose, the created transaction is passed to the
2725 method.
2740 method.
2726 """
2741 """
2727 # we must avoid cyclic reference between repo and transaction.
2742 # we must avoid cyclic reference between repo and transaction.
2728 reporef = weakref.ref(self)
2743 reporef = weakref.ref(self)
2729
2744
2730 def updater(tr):
2745 def updater(tr):
2731 repo = reporef()
2746 repo = reporef()
2732 assert repo is not None # help pytype
2747 assert repo is not None # help pytype
2733 repo.updatecaches(tr)
2748 repo.updatecaches(tr)
2734
2749
2735 return updater
2750 return updater
2736
2751
2737 @unfilteredmethod
2752 @unfilteredmethod
2738 def updatecaches(self, tr=None, full=False, caches=None):
2753 def updatecaches(self, tr=None, full=False, caches=None):
2739 """warm appropriate caches
2754 """warm appropriate caches
2740
2755
2741 If this function is called after a transaction closed. The transaction
2756 If this function is called after a transaction closed. The transaction
2742 will be available in the 'tr' argument. This can be used to selectively
2757 will be available in the 'tr' argument. This can be used to selectively
2743 update caches relevant to the changes in that transaction.
2758 update caches relevant to the changes in that transaction.
2744
2759
2745 If 'full' is set, make sure all caches the function knows about have
2760 If 'full' is set, make sure all caches the function knows about have
2746 up-to-date data. Even the ones usually loaded more lazily.
2761 up-to-date data. Even the ones usually loaded more lazily.
2747
2762
2748 The `full` argument can take a special "post-clone" value. In this case
2763 The `full` argument can take a special "post-clone" value. In this case
2749 the cache warming is made after a clone and of the slower cache might
2764 the cache warming is made after a clone and of the slower cache might
2750 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2765 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2751 as we plan for a cleaner way to deal with this for 5.9.
2766 as we plan for a cleaner way to deal with this for 5.9.
2752 """
2767 """
2753 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2768 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2754 # During strip, many caches are invalid but
2769 # During strip, many caches are invalid but
2755 # later call to `destroyed` will refresh them.
2770 # later call to `destroyed` will refresh them.
2756 return
2771 return
2757
2772
2758 unfi = self.unfiltered()
2773 unfi = self.unfiltered()
2759
2774
2760 if full:
2775 if full:
2761 msg = (
2776 msg = (
2762 "`full` argument for `repo.updatecaches` is deprecated\n"
2777 "`full` argument for `repo.updatecaches` is deprecated\n"
2763 "(use `caches=repository.CACHE_ALL` instead)"
2778 "(use `caches=repository.CACHE_ALL` instead)"
2764 )
2779 )
2765 self.ui.deprecwarn(msg, b"5.9")
2780 self.ui.deprecwarn(msg, b"5.9")
2766 caches = repository.CACHES_ALL
2781 caches = repository.CACHES_ALL
2767 if full == b"post-clone":
2782 if full == b"post-clone":
2768 caches = repository.CACHES_POST_CLONE
2783 caches = repository.CACHES_POST_CLONE
2769 caches = repository.CACHES_ALL
2784 caches = repository.CACHES_ALL
2770 elif caches is None:
2785 elif caches is None:
2771 caches = repository.CACHES_DEFAULT
2786 caches = repository.CACHES_DEFAULT
2772
2787
2773 if repository.CACHE_BRANCHMAP_SERVED in caches:
2788 if repository.CACHE_BRANCHMAP_SERVED in caches:
2774 if tr is None or tr.changes[b'origrepolen'] < len(self):
2789 if tr is None or tr.changes[b'origrepolen'] < len(self):
2775 # accessing the 'served' branchmap should refresh all the others,
2790 # accessing the 'served' branchmap should refresh all the others,
2776 self.ui.debug(b'updating the branch cache\n')
2791 self.ui.debug(b'updating the branch cache\n')
2777 self.filtered(b'served').branchmap()
2792 self.filtered(b'served').branchmap()
2778 self.filtered(b'served.hidden').branchmap()
2793 self.filtered(b'served.hidden').branchmap()
2779
2794
2780 if repository.CACHE_CHANGELOG_CACHE in caches:
2795 if repository.CACHE_CHANGELOG_CACHE in caches:
2781 self.changelog.update_caches(transaction=tr)
2796 self.changelog.update_caches(transaction=tr)
2782
2797
2783 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2798 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2784 self.manifestlog.update_caches(transaction=tr)
2799 self.manifestlog.update_caches(transaction=tr)
2785
2800
2786 if repository.CACHE_REV_BRANCH in caches:
2801 if repository.CACHE_REV_BRANCH in caches:
2787 rbc = unfi.revbranchcache()
2802 rbc = unfi.revbranchcache()
2788 for r in unfi.changelog:
2803 for r in unfi.changelog:
2789 rbc.branchinfo(r)
2804 rbc.branchinfo(r)
2790 rbc.write()
2805 rbc.write()
2791
2806
2792 if repository.CACHE_FULL_MANIFEST in caches:
2807 if repository.CACHE_FULL_MANIFEST in caches:
2793 # ensure the working copy parents are in the manifestfulltextcache
2808 # ensure the working copy parents are in the manifestfulltextcache
2794 for ctx in self[b'.'].parents():
2809 for ctx in self[b'.'].parents():
2795 ctx.manifest() # accessing the manifest is enough
2810 ctx.manifest() # accessing the manifest is enough
2796
2811
2797 if repository.CACHE_FILE_NODE_TAGS in caches:
2812 if repository.CACHE_FILE_NODE_TAGS in caches:
2798 # accessing fnode cache warms the cache
2813 # accessing fnode cache warms the cache
2799 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2814 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2800
2815
2801 if repository.CACHE_TAGS_DEFAULT in caches:
2816 if repository.CACHE_TAGS_DEFAULT in caches:
2802 # accessing tags warm the cache
2817 # accessing tags warm the cache
2803 self.tags()
2818 self.tags()
2804 if repository.CACHE_TAGS_SERVED in caches:
2819 if repository.CACHE_TAGS_SERVED in caches:
2805 self.filtered(b'served').tags()
2820 self.filtered(b'served').tags()
2806
2821
2807 if repository.CACHE_BRANCHMAP_ALL in caches:
2822 if repository.CACHE_BRANCHMAP_ALL in caches:
2808 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2823 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2809 # so we're forcing a write to cause these caches to be warmed up
2824 # so we're forcing a write to cause these caches to be warmed up
2810 # even if they haven't explicitly been requested yet (if they've
2825 # even if they haven't explicitly been requested yet (if they've
2811 # never been used by hg, they won't ever have been written, even if
2826 # never been used by hg, they won't ever have been written, even if
2812 # they're a subset of another kind of cache that *has* been used).
2827 # they're a subset of another kind of cache that *has* been used).
2813 for filt in repoview.filtertable.keys():
2828 for filt in repoview.filtertable.keys():
2814 filtered = self.filtered(filt)
2829 filtered = self.filtered(filt)
2815 filtered.branchmap().write(filtered)
2830 filtered.branchmap().write(filtered)
2816
2831
2817 def invalidatecaches(self):
2832 def invalidatecaches(self):
2818
2833
2819 if '_tagscache' in vars(self):
2834 if '_tagscache' in vars(self):
2820 # can't use delattr on proxy
2835 # can't use delattr on proxy
2821 del self.__dict__['_tagscache']
2836 del self.__dict__['_tagscache']
2822
2837
2823 self._branchcaches.clear()
2838 self._branchcaches.clear()
2824 self.invalidatevolatilesets()
2839 self.invalidatevolatilesets()
2825 self._sparsesignaturecache.clear()
2840 self._sparsesignaturecache.clear()
2826
2841
2827 def invalidatevolatilesets(self):
2842 def invalidatevolatilesets(self):
2828 self.filteredrevcache.clear()
2843 self.filteredrevcache.clear()
2829 obsolete.clearobscaches(self)
2844 obsolete.clearobscaches(self)
2830 self._quick_access_changeid_invalidate()
2845 self._quick_access_changeid_invalidate()
2831
2846
2832 def invalidatedirstate(self):
2847 def invalidatedirstate(self):
2833 """Invalidates the dirstate, causing the next call to dirstate
2848 """Invalidates the dirstate, causing the next call to dirstate
2834 to check if it was modified since the last time it was read,
2849 to check if it was modified since the last time it was read,
2835 rereading it if it has.
2850 rereading it if it has.
2836
2851
2837 This is different to dirstate.invalidate() that it doesn't always
2852 This is different to dirstate.invalidate() that it doesn't always
2838 rereads the dirstate. Use dirstate.invalidate() if you want to
2853 rereads the dirstate. Use dirstate.invalidate() if you want to
2839 explicitly read the dirstate again (i.e. restoring it to a previous
2854 explicitly read the dirstate again (i.e. restoring it to a previous
2840 known good state)."""
2855 known good state)."""
2841 if hasunfilteredcache(self, 'dirstate'):
2856 if hasunfilteredcache(self, 'dirstate'):
2842 for k in self.dirstate._filecache:
2857 for k in self.dirstate._filecache:
2843 try:
2858 try:
2844 delattr(self.dirstate, k)
2859 delattr(self.dirstate, k)
2845 except AttributeError:
2860 except AttributeError:
2846 pass
2861 pass
2847 delattr(self.unfiltered(), 'dirstate')
2862 delattr(self.unfiltered(), 'dirstate')
2848
2863
2849 def invalidate(self, clearfilecache=False):
2864 def invalidate(self, clearfilecache=False):
2850 """Invalidates both store and non-store parts other than dirstate
2865 """Invalidates both store and non-store parts other than dirstate
2851
2866
2852 If a transaction is running, invalidation of store is omitted,
2867 If a transaction is running, invalidation of store is omitted,
2853 because discarding in-memory changes might cause inconsistency
2868 because discarding in-memory changes might cause inconsistency
2854 (e.g. incomplete fncache causes unintentional failure, but
2869 (e.g. incomplete fncache causes unintentional failure, but
2855 redundant one doesn't).
2870 redundant one doesn't).
2856 """
2871 """
2857 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2872 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2858 for k in list(self._filecache.keys()):
2873 for k in list(self._filecache.keys()):
2859 # dirstate is invalidated separately in invalidatedirstate()
2874 # dirstate is invalidated separately in invalidatedirstate()
2860 if k == b'dirstate':
2875 if k == b'dirstate':
2861 continue
2876 continue
2862 if (
2877 if (
2863 k == b'changelog'
2878 k == b'changelog'
2864 and self.currenttransaction()
2879 and self.currenttransaction()
2865 and self.changelog._delayed
2880 and self.changelog._delayed
2866 ):
2881 ):
2867 # The changelog object may store unwritten revisions. We don't
2882 # The changelog object may store unwritten revisions. We don't
2868 # want to lose them.
2883 # want to lose them.
2869 # TODO: Solve the problem instead of working around it.
2884 # TODO: Solve the problem instead of working around it.
2870 continue
2885 continue
2871
2886
2872 if clearfilecache:
2887 if clearfilecache:
2873 del self._filecache[k]
2888 del self._filecache[k]
2874 try:
2889 try:
2875 delattr(unfiltered, k)
2890 delattr(unfiltered, k)
2876 except AttributeError:
2891 except AttributeError:
2877 pass
2892 pass
2878 self.invalidatecaches()
2893 self.invalidatecaches()
2879 if not self.currenttransaction():
2894 if not self.currenttransaction():
2880 # TODO: Changing contents of store outside transaction
2895 # TODO: Changing contents of store outside transaction
2881 # causes inconsistency. We should make in-memory store
2896 # causes inconsistency. We should make in-memory store
2882 # changes detectable, and abort if changed.
2897 # changes detectable, and abort if changed.
2883 self.store.invalidatecaches()
2898 self.store.invalidatecaches()
2884
2899
2885 def invalidateall(self):
2900 def invalidateall(self):
2886 """Fully invalidates both store and non-store parts, causing the
2901 """Fully invalidates both store and non-store parts, causing the
2887 subsequent operation to reread any outside changes."""
2902 subsequent operation to reread any outside changes."""
2888 # extension should hook this to invalidate its caches
2903 # extension should hook this to invalidate its caches
2889 self.invalidate()
2904 self.invalidate()
2890 self.invalidatedirstate()
2905 self.invalidatedirstate()
2891
2906
2892 @unfilteredmethod
2907 @unfilteredmethod
2893 def _refreshfilecachestats(self, tr):
2908 def _refreshfilecachestats(self, tr):
2894 """Reload stats of cached files so that they are flagged as valid"""
2909 """Reload stats of cached files so that they are flagged as valid"""
2895 for k, ce in self._filecache.items():
2910 for k, ce in self._filecache.items():
2896 k = pycompat.sysstr(k)
2911 k = pycompat.sysstr(k)
2897 if k == 'dirstate' or k not in self.__dict__:
2912 if k == 'dirstate' or k not in self.__dict__:
2898 continue
2913 continue
2899 ce.refresh()
2914 ce.refresh()
2900
2915
2901 def _lock(
2916 def _lock(
2902 self,
2917 self,
2903 vfs,
2918 vfs,
2904 lockname,
2919 lockname,
2905 wait,
2920 wait,
2906 releasefn,
2921 releasefn,
2907 acquirefn,
2922 acquirefn,
2908 desc,
2923 desc,
2909 ):
2924 ):
2910 timeout = 0
2925 timeout = 0
2911 warntimeout = 0
2926 warntimeout = 0
2912 if wait:
2927 if wait:
2913 timeout = self.ui.configint(b"ui", b"timeout")
2928 timeout = self.ui.configint(b"ui", b"timeout")
2914 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2929 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2915 # internal config: ui.signal-safe-lock
2930 # internal config: ui.signal-safe-lock
2916 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2931 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2917
2932
2918 l = lockmod.trylock(
2933 l = lockmod.trylock(
2919 self.ui,
2934 self.ui,
2920 vfs,
2935 vfs,
2921 lockname,
2936 lockname,
2922 timeout,
2937 timeout,
2923 warntimeout,
2938 warntimeout,
2924 releasefn=releasefn,
2939 releasefn=releasefn,
2925 acquirefn=acquirefn,
2940 acquirefn=acquirefn,
2926 desc=desc,
2941 desc=desc,
2927 signalsafe=signalsafe,
2942 signalsafe=signalsafe,
2928 )
2943 )
2929 return l
2944 return l
2930
2945
2931 def _afterlock(self, callback):
2946 def _afterlock(self, callback):
2932 """add a callback to be run when the repository is fully unlocked
2947 """add a callback to be run when the repository is fully unlocked
2933
2948
2934 The callback will be executed when the outermost lock is released
2949 The callback will be executed when the outermost lock is released
2935 (with wlock being higher level than 'lock')."""
2950 (with wlock being higher level than 'lock')."""
2936 for ref in (self._wlockref, self._lockref):
2951 for ref in (self._wlockref, self._lockref):
2937 l = ref and ref()
2952 l = ref and ref()
2938 if l and l.held:
2953 if l and l.held:
2939 l.postrelease.append(callback)
2954 l.postrelease.append(callback)
2940 break
2955 break
2941 else: # no lock have been found.
2956 else: # no lock have been found.
2942 callback(True)
2957 callback(True)
2943
2958
2944 def lock(self, wait=True):
2959 def lock(self, wait=True):
2945 """Lock the repository store (.hg/store) and return a weak reference
2960 """Lock the repository store (.hg/store) and return a weak reference
2946 to the lock. Use this before modifying the store (e.g. committing or
2961 to the lock. Use this before modifying the store (e.g. committing or
2947 stripping). If you are opening a transaction, get a lock as well.)
2962 stripping). If you are opening a transaction, get a lock as well.)
2948
2963
2949 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2964 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2950 'wlock' first to avoid a dead-lock hazard."""
2965 'wlock' first to avoid a dead-lock hazard."""
2951 l = self._currentlock(self._lockref)
2966 l = self._currentlock(self._lockref)
2952 if l is not None:
2967 if l is not None:
2953 l.lock()
2968 l.lock()
2954 return l
2969 return l
2955
2970
2956 l = self._lock(
2971 l = self._lock(
2957 vfs=self.svfs,
2972 vfs=self.svfs,
2958 lockname=b"lock",
2973 lockname=b"lock",
2959 wait=wait,
2974 wait=wait,
2960 releasefn=None,
2975 releasefn=None,
2961 acquirefn=self.invalidate,
2976 acquirefn=self.invalidate,
2962 desc=_(b'repository %s') % self.origroot,
2977 desc=_(b'repository %s') % self.origroot,
2963 )
2978 )
2964 self._lockref = weakref.ref(l)
2979 self._lockref = weakref.ref(l)
2965 return l
2980 return l
2966
2981
2967 def wlock(self, wait=True):
2982 def wlock(self, wait=True):
2968 """Lock the non-store parts of the repository (everything under
2983 """Lock the non-store parts of the repository (everything under
2969 .hg except .hg/store) and return a weak reference to the lock.
2984 .hg except .hg/store) and return a weak reference to the lock.
2970
2985
2971 Use this before modifying files in .hg.
2986 Use this before modifying files in .hg.
2972
2987
2973 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2988 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2974 'wlock' first to avoid a dead-lock hazard."""
2989 'wlock' first to avoid a dead-lock hazard."""
2975 l = self._wlockref() if self._wlockref else None
2990 l = self._wlockref() if self._wlockref else None
2976 if l is not None and l.held:
2991 if l is not None and l.held:
2977 l.lock()
2992 l.lock()
2978 return l
2993 return l
2979
2994
2980 # We do not need to check for non-waiting lock acquisition. Such
2995 # We do not need to check for non-waiting lock acquisition. Such
2981 # acquisition would not cause dead-lock as they would just fail.
2996 # acquisition would not cause dead-lock as they would just fail.
2982 if wait and (
2997 if wait and (
2983 self.ui.configbool(b'devel', b'all-warnings')
2998 self.ui.configbool(b'devel', b'all-warnings')
2984 or self.ui.configbool(b'devel', b'check-locks')
2999 or self.ui.configbool(b'devel', b'check-locks')
2985 ):
3000 ):
2986 if self._currentlock(self._lockref) is not None:
3001 if self._currentlock(self._lockref) is not None:
2987 self.ui.develwarn(b'"wlock" acquired after "lock"')
3002 self.ui.develwarn(b'"wlock" acquired after "lock"')
2988
3003
2989 def unlock():
3004 def unlock():
2990 if self.dirstate.pendingparentchange():
3005 if self.dirstate.pendingparentchange():
2991 self.dirstate.invalidate()
3006 self.dirstate.invalidate()
2992 else:
3007 else:
2993 self.dirstate.write(None)
3008 self.dirstate.write(None)
2994
3009
2995 self._filecache[b'dirstate'].refresh()
3010 self._filecache[b'dirstate'].refresh()
2996
3011
2997 l = self._lock(
3012 l = self._lock(
2998 self.vfs,
3013 self.vfs,
2999 b"wlock",
3014 b"wlock",
3000 wait,
3015 wait,
3001 unlock,
3016 unlock,
3002 self.invalidatedirstate,
3017 self.invalidatedirstate,
3003 _(b'working directory of %s') % self.origroot,
3018 _(b'working directory of %s') % self.origroot,
3004 )
3019 )
3005 self._wlockref = weakref.ref(l)
3020 self._wlockref = weakref.ref(l)
3006 return l
3021 return l
3007
3022
3008 def _currentlock(self, lockref):
3023 def _currentlock(self, lockref):
3009 """Returns the lock if it's held, or None if it's not."""
3024 """Returns the lock if it's held, or None if it's not."""
3010 if lockref is None:
3025 if lockref is None:
3011 return None
3026 return None
3012 l = lockref()
3027 l = lockref()
3013 if l is None or not l.held:
3028 if l is None or not l.held:
3014 return None
3029 return None
3015 return l
3030 return l
3016
3031
3017 def currentwlock(self):
3032 def currentwlock(self):
3018 """Returns the wlock if it's held, or None if it's not."""
3033 """Returns the wlock if it's held, or None if it's not."""
3019 return self._currentlock(self._wlockref)
3034 return self._currentlock(self._wlockref)
3020
3035
3021 def checkcommitpatterns(self, wctx, match, status, fail):
3036 def checkcommitpatterns(self, wctx, match, status, fail):
3022 """check for commit arguments that aren't committable"""
3037 """check for commit arguments that aren't committable"""
3023 if match.isexact() or match.prefix():
3038 if match.isexact() or match.prefix():
3024 matched = set(status.modified + status.added + status.removed)
3039 matched = set(status.modified + status.added + status.removed)
3025
3040
3026 for f in match.files():
3041 for f in match.files():
3027 f = self.dirstate.normalize(f)
3042 f = self.dirstate.normalize(f)
3028 if f == b'.' or f in matched or f in wctx.substate:
3043 if f == b'.' or f in matched or f in wctx.substate:
3029 continue
3044 continue
3030 if f in status.deleted:
3045 if f in status.deleted:
3031 fail(f, _(b'file not found!'))
3046 fail(f, _(b'file not found!'))
3032 # Is it a directory that exists or used to exist?
3047 # Is it a directory that exists or used to exist?
3033 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3048 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3034 d = f + b'/'
3049 d = f + b'/'
3035 for mf in matched:
3050 for mf in matched:
3036 if mf.startswith(d):
3051 if mf.startswith(d):
3037 break
3052 break
3038 else:
3053 else:
3039 fail(f, _(b"no match under directory!"))
3054 fail(f, _(b"no match under directory!"))
3040 elif f not in self.dirstate:
3055 elif f not in self.dirstate:
3041 fail(f, _(b"file not tracked!"))
3056 fail(f, _(b"file not tracked!"))
3042
3057
3043 @unfilteredmethod
3058 @unfilteredmethod
3044 def commit(
3059 def commit(
3045 self,
3060 self,
3046 text=b"",
3061 text=b"",
3047 user=None,
3062 user=None,
3048 date=None,
3063 date=None,
3049 match=None,
3064 match=None,
3050 force=False,
3065 force=False,
3051 editor=None,
3066 editor=None,
3052 extra=None,
3067 extra=None,
3053 ):
3068 ):
3054 """Add a new revision to current repository.
3069 """Add a new revision to current repository.
3055
3070
3056 Revision information is gathered from the working directory,
3071 Revision information is gathered from the working directory,
3057 match can be used to filter the committed files. If editor is
3072 match can be used to filter the committed files. If editor is
3058 supplied, it is called to get a commit message.
3073 supplied, it is called to get a commit message.
3059 """
3074 """
3060 if extra is None:
3075 if extra is None:
3061 extra = {}
3076 extra = {}
3062
3077
3063 def fail(f, msg):
3078 def fail(f, msg):
3064 raise error.InputError(b'%s: %s' % (f, msg))
3079 raise error.InputError(b'%s: %s' % (f, msg))
3065
3080
3066 if not match:
3081 if not match:
3067 match = matchmod.always()
3082 match = matchmod.always()
3068
3083
3069 if not force:
3084 if not force:
3070 match.bad = fail
3085 match.bad = fail
3071
3086
3072 # lock() for recent changelog (see issue4368)
3087 # lock() for recent changelog (see issue4368)
3073 with self.wlock(), self.lock():
3088 with self.wlock(), self.lock():
3074 wctx = self[None]
3089 wctx = self[None]
3075 merge = len(wctx.parents()) > 1
3090 merge = len(wctx.parents()) > 1
3076
3091
3077 if not force and merge and not match.always():
3092 if not force and merge and not match.always():
3078 raise error.Abort(
3093 raise error.Abort(
3079 _(
3094 _(
3080 b'cannot partially commit a merge '
3095 b'cannot partially commit a merge '
3081 b'(do not specify files or patterns)'
3096 b'(do not specify files or patterns)'
3082 )
3097 )
3083 )
3098 )
3084
3099
3085 status = self.status(match=match, clean=force)
3100 status = self.status(match=match, clean=force)
3086 if force:
3101 if force:
3087 status.modified.extend(
3102 status.modified.extend(
3088 status.clean
3103 status.clean
3089 ) # mq may commit clean files
3104 ) # mq may commit clean files
3090
3105
3091 # check subrepos
3106 # check subrepos
3092 subs, commitsubs, newstate = subrepoutil.precommit(
3107 subs, commitsubs, newstate = subrepoutil.precommit(
3093 self.ui, wctx, status, match, force=force
3108 self.ui, wctx, status, match, force=force
3094 )
3109 )
3095
3110
3096 # make sure all explicit patterns are matched
3111 # make sure all explicit patterns are matched
3097 if not force:
3112 if not force:
3098 self.checkcommitpatterns(wctx, match, status, fail)
3113 self.checkcommitpatterns(wctx, match, status, fail)
3099
3114
3100 cctx = context.workingcommitctx(
3115 cctx = context.workingcommitctx(
3101 self, status, text, user, date, extra
3116 self, status, text, user, date, extra
3102 )
3117 )
3103
3118
3104 ms = mergestatemod.mergestate.read(self)
3119 ms = mergestatemod.mergestate.read(self)
3105 mergeutil.checkunresolved(ms)
3120 mergeutil.checkunresolved(ms)
3106
3121
3107 # internal config: ui.allowemptycommit
3122 # internal config: ui.allowemptycommit
3108 if cctx.isempty() and not self.ui.configbool(
3123 if cctx.isempty() and not self.ui.configbool(
3109 b'ui', b'allowemptycommit'
3124 b'ui', b'allowemptycommit'
3110 ):
3125 ):
3111 self.ui.debug(b'nothing to commit, clearing merge state\n')
3126 self.ui.debug(b'nothing to commit, clearing merge state\n')
3112 ms.reset()
3127 ms.reset()
3113 return None
3128 return None
3114
3129
3115 if merge and cctx.deleted():
3130 if merge and cctx.deleted():
3116 raise error.Abort(_(b"cannot commit merge with missing files"))
3131 raise error.Abort(_(b"cannot commit merge with missing files"))
3117
3132
3118 if editor:
3133 if editor:
3119 cctx._text = editor(self, cctx, subs)
3134 cctx._text = editor(self, cctx, subs)
3120 edited = text != cctx._text
3135 edited = text != cctx._text
3121
3136
3122 # Save commit message in case this transaction gets rolled back
3137 # Save commit message in case this transaction gets rolled back
3123 # (e.g. by a pretxncommit hook). Leave the content alone on
3138 # (e.g. by a pretxncommit hook). Leave the content alone on
3124 # the assumption that the user will use the same editor again.
3139 # the assumption that the user will use the same editor again.
3125 msgfn = self.savecommitmessage(cctx._text)
3140 msgfn = self.savecommitmessage(cctx._text)
3126
3141
3127 # commit subs and write new state
3142 # commit subs and write new state
3128 if subs:
3143 if subs:
3129 uipathfn = scmutil.getuipathfn(self)
3144 uipathfn = scmutil.getuipathfn(self)
3130 for s in sorted(commitsubs):
3145 for s in sorted(commitsubs):
3131 sub = wctx.sub(s)
3146 sub = wctx.sub(s)
3132 self.ui.status(
3147 self.ui.status(
3133 _(b'committing subrepository %s\n')
3148 _(b'committing subrepository %s\n')
3134 % uipathfn(subrepoutil.subrelpath(sub))
3149 % uipathfn(subrepoutil.subrelpath(sub))
3135 )
3150 )
3136 sr = sub.commit(cctx._text, user, date)
3151 sr = sub.commit(cctx._text, user, date)
3137 newstate[s] = (newstate[s][0], sr)
3152 newstate[s] = (newstate[s][0], sr)
3138 subrepoutil.writestate(self, newstate)
3153 subrepoutil.writestate(self, newstate)
3139
3154
3140 p1, p2 = self.dirstate.parents()
3155 p1, p2 = self.dirstate.parents()
3141 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3156 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3142 try:
3157 try:
3143 self.hook(
3158 self.hook(
3144 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3159 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3145 )
3160 )
3146 with self.transaction(b'commit'):
3161 with self.transaction(b'commit'):
3147 ret = self.commitctx(cctx, True)
3162 ret = self.commitctx(cctx, True)
3148 # update bookmarks, dirstate and mergestate
3163 # update bookmarks, dirstate and mergestate
3149 bookmarks.update(self, [p1, p2], ret)
3164 bookmarks.update(self, [p1, p2], ret)
3150 cctx.markcommitted(ret)
3165 cctx.markcommitted(ret)
3151 ms.reset()
3166 ms.reset()
3152 except: # re-raises
3167 except: # re-raises
3153 if edited:
3168 if edited:
3154 self.ui.write(
3169 self.ui.write(
3155 _(b'note: commit message saved in %s\n') % msgfn
3170 _(b'note: commit message saved in %s\n') % msgfn
3156 )
3171 )
3157 self.ui.write(
3172 self.ui.write(
3158 _(
3173 _(
3159 b"note: use 'hg commit --logfile "
3174 b"note: use 'hg commit --logfile "
3160 b".hg/last-message.txt --edit' to reuse it\n"
3175 b".hg/last-message.txt --edit' to reuse it\n"
3161 )
3176 )
3162 )
3177 )
3163 raise
3178 raise
3164
3179
3165 def commithook(unused_success):
3180 def commithook(unused_success):
3166 # hack for command that use a temporary commit (eg: histedit)
3181 # hack for command that use a temporary commit (eg: histedit)
3167 # temporary commit got stripped before hook release
3182 # temporary commit got stripped before hook release
3168 if self.changelog.hasnode(ret):
3183 if self.changelog.hasnode(ret):
3169 self.hook(
3184 self.hook(
3170 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3185 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3171 )
3186 )
3172
3187
3173 self._afterlock(commithook)
3188 self._afterlock(commithook)
3174 return ret
3189 return ret
3175
3190
3176 @unfilteredmethod
3191 @unfilteredmethod
3177 def commitctx(self, ctx, error=False, origctx=None):
3192 def commitctx(self, ctx, error=False, origctx=None):
3178 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3193 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3179
3194
3180 @unfilteredmethod
3195 @unfilteredmethod
3181 def destroying(self):
3196 def destroying(self):
3182 """Inform the repository that nodes are about to be destroyed.
3197 """Inform the repository that nodes are about to be destroyed.
3183 Intended for use by strip and rollback, so there's a common
3198 Intended for use by strip and rollback, so there's a common
3184 place for anything that has to be done before destroying history.
3199 place for anything that has to be done before destroying history.
3185
3200
3186 This is mostly useful for saving state that is in memory and waiting
3201 This is mostly useful for saving state that is in memory and waiting
3187 to be flushed when the current lock is released. Because a call to
3202 to be flushed when the current lock is released. Because a call to
3188 destroyed is imminent, the repo will be invalidated causing those
3203 destroyed is imminent, the repo will be invalidated causing those
3189 changes to stay in memory (waiting for the next unlock), or vanish
3204 changes to stay in memory (waiting for the next unlock), or vanish
3190 completely.
3205 completely.
3191 """
3206 """
3192 # When using the same lock to commit and strip, the phasecache is left
3207 # When using the same lock to commit and strip, the phasecache is left
3193 # dirty after committing. Then when we strip, the repo is invalidated,
3208 # dirty after committing. Then when we strip, the repo is invalidated,
3194 # causing those changes to disappear.
3209 # causing those changes to disappear.
3195 if '_phasecache' in vars(self):
3210 if '_phasecache' in vars(self):
3196 self._phasecache.write()
3211 self._phasecache.write()
3197
3212
3198 @unfilteredmethod
3213 @unfilteredmethod
3199 def destroyed(self):
3214 def destroyed(self):
3200 """Inform the repository that nodes have been destroyed.
3215 """Inform the repository that nodes have been destroyed.
3201 Intended for use by strip and rollback, so there's a common
3216 Intended for use by strip and rollback, so there's a common
3202 place for anything that has to be done after destroying history.
3217 place for anything that has to be done after destroying history.
3203 """
3218 """
3204 # When one tries to:
3219 # When one tries to:
3205 # 1) destroy nodes thus calling this method (e.g. strip)
3220 # 1) destroy nodes thus calling this method (e.g. strip)
3206 # 2) use phasecache somewhere (e.g. commit)
3221 # 2) use phasecache somewhere (e.g. commit)
3207 #
3222 #
3208 # then 2) will fail because the phasecache contains nodes that were
3223 # then 2) will fail because the phasecache contains nodes that were
3209 # removed. We can either remove phasecache from the filecache,
3224 # removed. We can either remove phasecache from the filecache,
3210 # causing it to reload next time it is accessed, or simply filter
3225 # causing it to reload next time it is accessed, or simply filter
3211 # the removed nodes now and write the updated cache.
3226 # the removed nodes now and write the updated cache.
3212 self._phasecache.filterunknown(self)
3227 self._phasecache.filterunknown(self)
3213 self._phasecache.write()
3228 self._phasecache.write()
3214
3229
3215 # refresh all repository caches
3230 # refresh all repository caches
3216 self.updatecaches()
3231 self.updatecaches()
3217
3232
3218 # Ensure the persistent tag cache is updated. Doing it now
3233 # Ensure the persistent tag cache is updated. Doing it now
3219 # means that the tag cache only has to worry about destroyed
3234 # means that the tag cache only has to worry about destroyed
3220 # heads immediately after a strip/rollback. That in turn
3235 # heads immediately after a strip/rollback. That in turn
3221 # guarantees that "cachetip == currenttip" (comparing both rev
3236 # guarantees that "cachetip == currenttip" (comparing both rev
3222 # and node) always means no nodes have been added or destroyed.
3237 # and node) always means no nodes have been added or destroyed.
3223
3238
3224 # XXX this is suboptimal when qrefresh'ing: we strip the current
3239 # XXX this is suboptimal when qrefresh'ing: we strip the current
3225 # head, refresh the tag cache, then immediately add a new head.
3240 # head, refresh the tag cache, then immediately add a new head.
3226 # But I think doing it this way is necessary for the "instant
3241 # But I think doing it this way is necessary for the "instant
3227 # tag cache retrieval" case to work.
3242 # tag cache retrieval" case to work.
3228 self.invalidate()
3243 self.invalidate()
3229
3244
3230 def status(
3245 def status(
3231 self,
3246 self,
3232 node1=b'.',
3247 node1=b'.',
3233 node2=None,
3248 node2=None,
3234 match=None,
3249 match=None,
3235 ignored=False,
3250 ignored=False,
3236 clean=False,
3251 clean=False,
3237 unknown=False,
3252 unknown=False,
3238 listsubrepos=False,
3253 listsubrepos=False,
3239 ):
3254 ):
3240 '''a convenience method that calls node1.status(node2)'''
3255 '''a convenience method that calls node1.status(node2)'''
3241 return self[node1].status(
3256 return self[node1].status(
3242 node2, match, ignored, clean, unknown, listsubrepos
3257 node2, match, ignored, clean, unknown, listsubrepos
3243 )
3258 )
3244
3259
3245 def addpostdsstatus(self, ps):
3260 def addpostdsstatus(self, ps):
3246 """Add a callback to run within the wlock, at the point at which status
3261 """Add a callback to run within the wlock, at the point at which status
3247 fixups happen.
3262 fixups happen.
3248
3263
3249 On status completion, callback(wctx, status) will be called with the
3264 On status completion, callback(wctx, status) will be called with the
3250 wlock held, unless the dirstate has changed from underneath or the wlock
3265 wlock held, unless the dirstate has changed from underneath or the wlock
3251 couldn't be grabbed.
3266 couldn't be grabbed.
3252
3267
3253 Callbacks should not capture and use a cached copy of the dirstate --
3268 Callbacks should not capture and use a cached copy of the dirstate --
3254 it might change in the meanwhile. Instead, they should access the
3269 it might change in the meanwhile. Instead, they should access the
3255 dirstate via wctx.repo().dirstate.
3270 dirstate via wctx.repo().dirstate.
3256
3271
3257 This list is emptied out after each status run -- extensions should
3272 This list is emptied out after each status run -- extensions should
3258 make sure it adds to this list each time dirstate.status is called.
3273 make sure it adds to this list each time dirstate.status is called.
3259 Extensions should also make sure they don't call this for statuses
3274 Extensions should also make sure they don't call this for statuses
3260 that don't involve the dirstate.
3275 that don't involve the dirstate.
3261 """
3276 """
3262
3277
3263 # The list is located here for uniqueness reasons -- it is actually
3278 # The list is located here for uniqueness reasons -- it is actually
3264 # managed by the workingctx, but that isn't unique per-repo.
3279 # managed by the workingctx, but that isn't unique per-repo.
3265 self._postdsstatus.append(ps)
3280 self._postdsstatus.append(ps)
3266
3281
3267 def postdsstatus(self):
3282 def postdsstatus(self):
3268 """Used by workingctx to get the list of post-dirstate-status hooks."""
3283 """Used by workingctx to get the list of post-dirstate-status hooks."""
3269 return self._postdsstatus
3284 return self._postdsstatus
3270
3285
3271 def clearpostdsstatus(self):
3286 def clearpostdsstatus(self):
3272 """Used by workingctx to clear post-dirstate-status hooks."""
3287 """Used by workingctx to clear post-dirstate-status hooks."""
3273 del self._postdsstatus[:]
3288 del self._postdsstatus[:]
3274
3289
3275 def heads(self, start=None):
3290 def heads(self, start=None):
3276 if start is None:
3291 if start is None:
3277 cl = self.changelog
3292 cl = self.changelog
3278 headrevs = reversed(cl.headrevs())
3293 headrevs = reversed(cl.headrevs())
3279 return [cl.node(rev) for rev in headrevs]
3294 return [cl.node(rev) for rev in headrevs]
3280
3295
3281 heads = self.changelog.heads(start)
3296 heads = self.changelog.heads(start)
3282 # sort the output in rev descending order
3297 # sort the output in rev descending order
3283 return sorted(heads, key=self.changelog.rev, reverse=True)
3298 return sorted(heads, key=self.changelog.rev, reverse=True)
3284
3299
3285 def branchheads(self, branch=None, start=None, closed=False):
3300 def branchheads(self, branch=None, start=None, closed=False):
3286 """return a (possibly filtered) list of heads for the given branch
3301 """return a (possibly filtered) list of heads for the given branch
3287
3302
3288 Heads are returned in topological order, from newest to oldest.
3303 Heads are returned in topological order, from newest to oldest.
3289 If branch is None, use the dirstate branch.
3304 If branch is None, use the dirstate branch.
3290 If start is not None, return only heads reachable from start.
3305 If start is not None, return only heads reachable from start.
3291 If closed is True, return heads that are marked as closed as well.
3306 If closed is True, return heads that are marked as closed as well.
3292 """
3307 """
3293 if branch is None:
3308 if branch is None:
3294 branch = self[None].branch()
3309 branch = self[None].branch()
3295 branches = self.branchmap()
3310 branches = self.branchmap()
3296 if not branches.hasbranch(branch):
3311 if not branches.hasbranch(branch):
3297 return []
3312 return []
3298 # the cache returns heads ordered lowest to highest
3313 # the cache returns heads ordered lowest to highest
3299 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3314 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3300 if start is not None:
3315 if start is not None:
3301 # filter out the heads that cannot be reached from startrev
3316 # filter out the heads that cannot be reached from startrev
3302 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3317 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3303 bheads = [h for h in bheads if h in fbheads]
3318 bheads = [h for h in bheads if h in fbheads]
3304 return bheads
3319 return bheads
3305
3320
3306 def branches(self, nodes):
3321 def branches(self, nodes):
3307 if not nodes:
3322 if not nodes:
3308 nodes = [self.changelog.tip()]
3323 nodes = [self.changelog.tip()]
3309 b = []
3324 b = []
3310 for n in nodes:
3325 for n in nodes:
3311 t = n
3326 t = n
3312 while True:
3327 while True:
3313 p = self.changelog.parents(n)
3328 p = self.changelog.parents(n)
3314 if p[1] != self.nullid or p[0] == self.nullid:
3329 if p[1] != self.nullid or p[0] == self.nullid:
3315 b.append((t, n, p[0], p[1]))
3330 b.append((t, n, p[0], p[1]))
3316 break
3331 break
3317 n = p[0]
3332 n = p[0]
3318 return b
3333 return b
3319
3334
3320 def between(self, pairs):
3335 def between(self, pairs):
3321 r = []
3336 r = []
3322
3337
3323 for top, bottom in pairs:
3338 for top, bottom in pairs:
3324 n, l, i = top, [], 0
3339 n, l, i = top, [], 0
3325 f = 1
3340 f = 1
3326
3341
3327 while n != bottom and n != self.nullid:
3342 while n != bottom and n != self.nullid:
3328 p = self.changelog.parents(n)[0]
3343 p = self.changelog.parents(n)[0]
3329 if i == f:
3344 if i == f:
3330 l.append(n)
3345 l.append(n)
3331 f = f * 2
3346 f = f * 2
3332 n = p
3347 n = p
3333 i += 1
3348 i += 1
3334
3349
3335 r.append(l)
3350 r.append(l)
3336
3351
3337 return r
3352 return r
3338
3353
3339 def checkpush(self, pushop):
3354 def checkpush(self, pushop):
3340 """Extensions can override this function if additional checks have
3355 """Extensions can override this function if additional checks have
3341 to be performed before pushing, or call it if they override push
3356 to be performed before pushing, or call it if they override push
3342 command.
3357 command.
3343 """
3358 """
3344
3359
3345 @unfilteredpropertycache
3360 @unfilteredpropertycache
3346 def prepushoutgoinghooks(self):
3361 def prepushoutgoinghooks(self):
3347 """Return util.hooks consists of a pushop with repo, remote, outgoing
3362 """Return util.hooks consists of a pushop with repo, remote, outgoing
3348 methods, which are called before pushing changesets.
3363 methods, which are called before pushing changesets.
3349 """
3364 """
3350 return util.hooks()
3365 return util.hooks()
3351
3366
3352 def pushkey(self, namespace, key, old, new):
3367 def pushkey(self, namespace, key, old, new):
3353 try:
3368 try:
3354 tr = self.currenttransaction()
3369 tr = self.currenttransaction()
3355 hookargs = {}
3370 hookargs = {}
3356 if tr is not None:
3371 if tr is not None:
3357 hookargs.update(tr.hookargs)
3372 hookargs.update(tr.hookargs)
3358 hookargs = pycompat.strkwargs(hookargs)
3373 hookargs = pycompat.strkwargs(hookargs)
3359 hookargs['namespace'] = namespace
3374 hookargs['namespace'] = namespace
3360 hookargs['key'] = key
3375 hookargs['key'] = key
3361 hookargs['old'] = old
3376 hookargs['old'] = old
3362 hookargs['new'] = new
3377 hookargs['new'] = new
3363 self.hook(b'prepushkey', throw=True, **hookargs)
3378 self.hook(b'prepushkey', throw=True, **hookargs)
3364 except error.HookAbort as exc:
3379 except error.HookAbort as exc:
3365 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3380 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3366 if exc.hint:
3381 if exc.hint:
3367 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3382 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3368 return False
3383 return False
3369 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3384 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3370 ret = pushkey.push(self, namespace, key, old, new)
3385 ret = pushkey.push(self, namespace, key, old, new)
3371
3386
3372 def runhook(unused_success):
3387 def runhook(unused_success):
3373 self.hook(
3388 self.hook(
3374 b'pushkey',
3389 b'pushkey',
3375 namespace=namespace,
3390 namespace=namespace,
3376 key=key,
3391 key=key,
3377 old=old,
3392 old=old,
3378 new=new,
3393 new=new,
3379 ret=ret,
3394 ret=ret,
3380 )
3395 )
3381
3396
3382 self._afterlock(runhook)
3397 self._afterlock(runhook)
3383 return ret
3398 return ret
3384
3399
3385 def listkeys(self, namespace):
3400 def listkeys(self, namespace):
3386 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3401 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3387 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3402 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3388 values = pushkey.list(self, namespace)
3403 values = pushkey.list(self, namespace)
3389 self.hook(b'listkeys', namespace=namespace, values=values)
3404 self.hook(b'listkeys', namespace=namespace, values=values)
3390 return values
3405 return values
3391
3406
3392 def debugwireargs(self, one, two, three=None, four=None, five=None):
3407 def debugwireargs(self, one, two, three=None, four=None, five=None):
3393 '''used to test argument passing over the wire'''
3408 '''used to test argument passing over the wire'''
3394 return b"%s %s %s %s %s" % (
3409 return b"%s %s %s %s %s" % (
3395 one,
3410 one,
3396 two,
3411 two,
3397 pycompat.bytestr(three),
3412 pycompat.bytestr(three),
3398 pycompat.bytestr(four),
3413 pycompat.bytestr(four),
3399 pycompat.bytestr(five),
3414 pycompat.bytestr(five),
3400 )
3415 )
3401
3416
3402 def savecommitmessage(self, text):
3417 def savecommitmessage(self, text):
3403 fp = self.vfs(b'last-message.txt', b'wb')
3418 fp = self.vfs(b'last-message.txt', b'wb')
3404 try:
3419 try:
3405 fp.write(text)
3420 fp.write(text)
3406 finally:
3421 finally:
3407 fp.close()
3422 fp.close()
3408 return self.pathto(fp.name[len(self.root) + 1 :])
3423 return self.pathto(fp.name[len(self.root) + 1 :])
3409
3424
3410 def register_wanted_sidedata(self, category):
3425 def register_wanted_sidedata(self, category):
3411 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3426 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3412 # Only revlogv2 repos can want sidedata.
3427 # Only revlogv2 repos can want sidedata.
3413 return
3428 return
3414 self._wanted_sidedata.add(pycompat.bytestr(category))
3429 self._wanted_sidedata.add(pycompat.bytestr(category))
3415
3430
3416 def register_sidedata_computer(
3431 def register_sidedata_computer(
3417 self, kind, category, keys, computer, flags, replace=False
3432 self, kind, category, keys, computer, flags, replace=False
3418 ):
3433 ):
3419 if kind not in revlogconst.ALL_KINDS:
3434 if kind not in revlogconst.ALL_KINDS:
3420 msg = _(b"unexpected revlog kind '%s'.")
3435 msg = _(b"unexpected revlog kind '%s'.")
3421 raise error.ProgrammingError(msg % kind)
3436 raise error.ProgrammingError(msg % kind)
3422 category = pycompat.bytestr(category)
3437 category = pycompat.bytestr(category)
3423 already_registered = category in self._sidedata_computers.get(kind, [])
3438 already_registered = category in self._sidedata_computers.get(kind, [])
3424 if already_registered and not replace:
3439 if already_registered and not replace:
3425 msg = _(
3440 msg = _(
3426 b"cannot register a sidedata computer twice for category '%s'."
3441 b"cannot register a sidedata computer twice for category '%s'."
3427 )
3442 )
3428 raise error.ProgrammingError(msg % category)
3443 raise error.ProgrammingError(msg % category)
3429 if replace and not already_registered:
3444 if replace and not already_registered:
3430 msg = _(
3445 msg = _(
3431 b"cannot replace a sidedata computer that isn't registered "
3446 b"cannot replace a sidedata computer that isn't registered "
3432 b"for category '%s'."
3447 b"for category '%s'."
3433 )
3448 )
3434 raise error.ProgrammingError(msg % category)
3449 raise error.ProgrammingError(msg % category)
3435 self._sidedata_computers.setdefault(kind, {})
3450 self._sidedata_computers.setdefault(kind, {})
3436 self._sidedata_computers[kind][category] = (keys, computer, flags)
3451 self._sidedata_computers[kind][category] = (keys, computer, flags)
3437
3452
3438
3453
3439 # used to avoid circular references so destructors work
3454 # used to avoid circular references so destructors work
3440 def aftertrans(files):
3455 def aftertrans(files):
3441 renamefiles = [tuple(t) for t in files]
3456 renamefiles = [tuple(t) for t in files]
3442
3457
3443 def a():
3458 def a():
3444 for vfs, src, dest in renamefiles:
3459 for vfs, src, dest in renamefiles:
3445 # if src and dest refer to a same file, vfs.rename is a no-op,
3460 # if src and dest refer to a same file, vfs.rename is a no-op,
3446 # leaving both src and dest on disk. delete dest to make sure
3461 # leaving both src and dest on disk. delete dest to make sure
3447 # the rename couldn't be such a no-op.
3462 # the rename couldn't be such a no-op.
3448 vfs.tryunlink(dest)
3463 vfs.tryunlink(dest)
3449 try:
3464 try:
3450 vfs.rename(src, dest)
3465 vfs.rename(src, dest)
3451 except OSError as exc: # journal file does not yet exist
3466 except OSError as exc: # journal file does not yet exist
3452 if exc.errno != errno.ENOENT:
3467 if exc.errno != errno.ENOENT:
3453 raise
3468 raise
3454
3469
3455 return a
3470 return a
3456
3471
3457
3472
3458 def undoname(fn):
3473 def undoname(fn):
3459 base, name = os.path.split(fn)
3474 base, name = os.path.split(fn)
3460 assert name.startswith(b'journal')
3475 assert name.startswith(b'journal')
3461 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3476 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3462
3477
3463
3478
3464 def instance(ui, path, create, intents=None, createopts=None):
3479 def instance(ui, path, create, intents=None, createopts=None):
3465 localpath = urlutil.urllocalpath(path)
3480 localpath = urlutil.urllocalpath(path)
3466 if create:
3481 if create:
3467 createrepository(ui, localpath, createopts=createopts)
3482 createrepository(ui, localpath, createopts=createopts)
3468
3483
3469 return makelocalrepository(ui, localpath, intents=intents)
3484 return makelocalrepository(ui, localpath, intents=intents)
3470
3485
3471
3486
3472 def islocal(path):
3487 def islocal(path):
3473 return True
3488 return True
3474
3489
3475
3490
3476 def defaultcreateopts(ui, createopts=None):
3491 def defaultcreateopts(ui, createopts=None):
3477 """Populate the default creation options for a repository.
3492 """Populate the default creation options for a repository.
3478
3493
3479 A dictionary of explicitly requested creation options can be passed
3494 A dictionary of explicitly requested creation options can be passed
3480 in. Missing keys will be populated.
3495 in. Missing keys will be populated.
3481 """
3496 """
3482 createopts = dict(createopts or {})
3497 createopts = dict(createopts or {})
3483
3498
3484 if b'backend' not in createopts:
3499 if b'backend' not in createopts:
3485 # experimental config: storage.new-repo-backend
3500 # experimental config: storage.new-repo-backend
3486 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3501 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3487
3502
3488 return createopts
3503 return createopts
3489
3504
3490
3505
3491 def clone_requirements(ui, createopts, srcrepo):
3506 def clone_requirements(ui, createopts, srcrepo):
3492 """clone the requirements of a local repo for a local clone
3507 """clone the requirements of a local repo for a local clone
3493
3508
3494 The store requirements are unchanged while the working copy requirements
3509 The store requirements are unchanged while the working copy requirements
3495 depends on the configuration
3510 depends on the configuration
3496 """
3511 """
3497 target_requirements = set()
3512 target_requirements = set()
3498 createopts = defaultcreateopts(ui, createopts=createopts)
3513 createopts = defaultcreateopts(ui, createopts=createopts)
3499 for r in newreporequirements(ui, createopts):
3514 for r in newreporequirements(ui, createopts):
3500 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3515 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3501 target_requirements.add(r)
3516 target_requirements.add(r)
3502
3517
3503 for r in srcrepo.requirements:
3518 for r in srcrepo.requirements:
3504 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3519 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3505 target_requirements.add(r)
3520 target_requirements.add(r)
3506 return target_requirements
3521 return target_requirements
3507
3522
3508
3523
3509 def newreporequirements(ui, createopts):
3524 def newreporequirements(ui, createopts):
3510 """Determine the set of requirements for a new local repository.
3525 """Determine the set of requirements for a new local repository.
3511
3526
3512 Extensions can wrap this function to specify custom requirements for
3527 Extensions can wrap this function to specify custom requirements for
3513 new repositories.
3528 new repositories.
3514 """
3529 """
3515 # If the repo is being created from a shared repository, we copy
3530 # If the repo is being created from a shared repository, we copy
3516 # its requirements.
3531 # its requirements.
3517 if b'sharedrepo' in createopts:
3532 if b'sharedrepo' in createopts:
3518 requirements = set(createopts[b'sharedrepo'].requirements)
3533 requirements = set(createopts[b'sharedrepo'].requirements)
3519 if createopts.get(b'sharedrelative'):
3534 if createopts.get(b'sharedrelative'):
3520 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3535 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3521 else:
3536 else:
3522 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3537 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3523
3538
3524 return requirements
3539 return requirements
3525
3540
3526 if b'backend' not in createopts:
3541 if b'backend' not in createopts:
3527 raise error.ProgrammingError(
3542 raise error.ProgrammingError(
3528 b'backend key not present in createopts; '
3543 b'backend key not present in createopts; '
3529 b'was defaultcreateopts() called?'
3544 b'was defaultcreateopts() called?'
3530 )
3545 )
3531
3546
3532 if createopts[b'backend'] != b'revlogv1':
3547 if createopts[b'backend'] != b'revlogv1':
3533 raise error.Abort(
3548 raise error.Abort(
3534 _(
3549 _(
3535 b'unable to determine repository requirements for '
3550 b'unable to determine repository requirements for '
3536 b'storage backend: %s'
3551 b'storage backend: %s'
3537 )
3552 )
3538 % createopts[b'backend']
3553 % createopts[b'backend']
3539 )
3554 )
3540
3555
3541 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3556 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3542 if ui.configbool(b'format', b'usestore'):
3557 if ui.configbool(b'format', b'usestore'):
3543 requirements.add(requirementsmod.STORE_REQUIREMENT)
3558 requirements.add(requirementsmod.STORE_REQUIREMENT)
3544 if ui.configbool(b'format', b'usefncache'):
3559 if ui.configbool(b'format', b'usefncache'):
3545 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3560 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3546 if ui.configbool(b'format', b'dotencode'):
3561 if ui.configbool(b'format', b'dotencode'):
3547 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3562 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3548
3563
3549 compengines = ui.configlist(b'format', b'revlog-compression')
3564 compengines = ui.configlist(b'format', b'revlog-compression')
3550 for compengine in compengines:
3565 for compengine in compengines:
3551 if compengine in util.compengines:
3566 if compengine in util.compengines:
3552 engine = util.compengines[compengine]
3567 engine = util.compengines[compengine]
3553 if engine.available() and engine.revlogheader():
3568 if engine.available() and engine.revlogheader():
3554 break
3569 break
3555 else:
3570 else:
3556 raise error.Abort(
3571 raise error.Abort(
3557 _(
3572 _(
3558 b'compression engines %s defined by '
3573 b'compression engines %s defined by '
3559 b'format.revlog-compression not available'
3574 b'format.revlog-compression not available'
3560 )
3575 )
3561 % b', '.join(b'"%s"' % e for e in compengines),
3576 % b', '.join(b'"%s"' % e for e in compengines),
3562 hint=_(
3577 hint=_(
3563 b'run "hg debuginstall" to list available '
3578 b'run "hg debuginstall" to list available '
3564 b'compression engines'
3579 b'compression engines'
3565 ),
3580 ),
3566 )
3581 )
3567
3582
3568 # zlib is the historical default and doesn't need an explicit requirement.
3583 # zlib is the historical default and doesn't need an explicit requirement.
3569 if compengine == b'zstd':
3584 if compengine == b'zstd':
3570 requirements.add(b'revlog-compression-zstd')
3585 requirements.add(b'revlog-compression-zstd')
3571 elif compengine != b'zlib':
3586 elif compengine != b'zlib':
3572 requirements.add(b'exp-compression-%s' % compengine)
3587 requirements.add(b'exp-compression-%s' % compengine)
3573
3588
3574 if scmutil.gdinitconfig(ui):
3589 if scmutil.gdinitconfig(ui):
3575 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3590 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3576 if ui.configbool(b'format', b'sparse-revlog'):
3591 if ui.configbool(b'format', b'sparse-revlog'):
3577 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3592 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3578
3593
3579 # experimental config: format.exp-dirstate-v2
3594 # experimental config: format.exp-dirstate-v2
3580 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3595 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3581 if ui.configbool(b'format', b'exp-dirstate-v2'):
3596 if ui.configbool(b'format', b'exp-dirstate-v2'):
3582 if dirstate.SUPPORTS_DIRSTATE_V2:
3597 if dirstate.SUPPORTS_DIRSTATE_V2:
3583 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3598 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3584 else:
3599 else:
3585 raise error.Abort(
3600 raise error.Abort(
3586 _(
3601 _(
3587 b"dirstate v2 format requested by config "
3602 b"dirstate v2 format requested by config "
3588 b"but not supported (requires Rust extensions)"
3603 b"but not supported (requires Rust extensions)"
3589 )
3604 )
3590 )
3605 )
3591
3606
3592 # experimental config: format.exp-use-copies-side-data-changeset
3607 # experimental config: format.exp-use-copies-side-data-changeset
3593 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3608 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3594 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3609 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3595 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3610 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3596 if ui.configbool(b'experimental', b'treemanifest'):
3611 if ui.configbool(b'experimental', b'treemanifest'):
3597 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3612 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3598
3613
3599 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3614 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3600 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3615 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3601 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3616 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3602
3617
3603 revlogv2 = ui.config(b'experimental', b'revlogv2')
3618 revlogv2 = ui.config(b'experimental', b'revlogv2')
3604 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3619 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3605 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3620 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3606 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3621 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3607 # experimental config: format.internal-phase
3622 # experimental config: format.internal-phase
3608 if ui.configbool(b'format', b'internal-phase'):
3623 if ui.configbool(b'format', b'internal-phase'):
3609 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3624 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3610
3625
3611 if createopts.get(b'narrowfiles'):
3626 if createopts.get(b'narrowfiles'):
3612 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3627 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3613
3628
3614 if createopts.get(b'lfs'):
3629 if createopts.get(b'lfs'):
3615 requirements.add(b'lfs')
3630 requirements.add(b'lfs')
3616
3631
3617 if ui.configbool(b'format', b'bookmarks-in-store'):
3632 if ui.configbool(b'format', b'bookmarks-in-store'):
3618 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3633 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3619
3634
3620 if ui.configbool(b'format', b'use-persistent-nodemap'):
3635 if ui.configbool(b'format', b'use-persistent-nodemap'):
3621 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3636 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3622
3637
3623 # if share-safe is enabled, let's create the new repository with the new
3638 # if share-safe is enabled, let's create the new repository with the new
3624 # requirement
3639 # requirement
3625 if ui.configbool(b'format', b'use-share-safe'):
3640 if ui.configbool(b'format', b'use-share-safe'):
3626 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3641 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3627
3642
3628 return requirements
3643 return requirements
3629
3644
3630
3645
3631 def checkrequirementscompat(ui, requirements):
3646 def checkrequirementscompat(ui, requirements):
3632 """Checks compatibility of repository requirements enabled and disabled.
3647 """Checks compatibility of repository requirements enabled and disabled.
3633
3648
3634 Returns a set of requirements which needs to be dropped because dependend
3649 Returns a set of requirements which needs to be dropped because dependend
3635 requirements are not enabled. Also warns users about it"""
3650 requirements are not enabled. Also warns users about it"""
3636
3651
3637 dropped = set()
3652 dropped = set()
3638
3653
3639 if requirementsmod.STORE_REQUIREMENT not in requirements:
3654 if requirementsmod.STORE_REQUIREMENT not in requirements:
3640 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3655 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3641 ui.warn(
3656 ui.warn(
3642 _(
3657 _(
3643 b'ignoring enabled \'format.bookmarks-in-store\' config '
3658 b'ignoring enabled \'format.bookmarks-in-store\' config '
3644 b'beacuse it is incompatible with disabled '
3659 b'beacuse it is incompatible with disabled '
3645 b'\'format.usestore\' config\n'
3660 b'\'format.usestore\' config\n'
3646 )
3661 )
3647 )
3662 )
3648 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3663 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3649
3664
3650 if (
3665 if (
3651 requirementsmod.SHARED_REQUIREMENT in requirements
3666 requirementsmod.SHARED_REQUIREMENT in requirements
3652 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3667 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3653 ):
3668 ):
3654 raise error.Abort(
3669 raise error.Abort(
3655 _(
3670 _(
3656 b"cannot create shared repository as source was created"
3671 b"cannot create shared repository as source was created"
3657 b" with 'format.usestore' config disabled"
3672 b" with 'format.usestore' config disabled"
3658 )
3673 )
3659 )
3674 )
3660
3675
3661 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3676 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3662 ui.warn(
3677 ui.warn(
3663 _(
3678 _(
3664 b"ignoring enabled 'format.use-share-safe' config because "
3679 b"ignoring enabled 'format.use-share-safe' config because "
3665 b"it is incompatible with disabled 'format.usestore'"
3680 b"it is incompatible with disabled 'format.usestore'"
3666 b" config\n"
3681 b" config\n"
3667 )
3682 )
3668 )
3683 )
3669 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3684 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3670
3685
3671 return dropped
3686 return dropped
3672
3687
3673
3688
3674 def filterknowncreateopts(ui, createopts):
3689 def filterknowncreateopts(ui, createopts):
3675 """Filters a dict of repo creation options against options that are known.
3690 """Filters a dict of repo creation options against options that are known.
3676
3691
3677 Receives a dict of repo creation options and returns a dict of those
3692 Receives a dict of repo creation options and returns a dict of those
3678 options that we don't know how to handle.
3693 options that we don't know how to handle.
3679
3694
3680 This function is called as part of repository creation. If the
3695 This function is called as part of repository creation. If the
3681 returned dict contains any items, repository creation will not
3696 returned dict contains any items, repository creation will not
3682 be allowed, as it means there was a request to create a repository
3697 be allowed, as it means there was a request to create a repository
3683 with options not recognized by loaded code.
3698 with options not recognized by loaded code.
3684
3699
3685 Extensions can wrap this function to filter out creation options
3700 Extensions can wrap this function to filter out creation options
3686 they know how to handle.
3701 they know how to handle.
3687 """
3702 """
3688 known = {
3703 known = {
3689 b'backend',
3704 b'backend',
3690 b'lfs',
3705 b'lfs',
3691 b'narrowfiles',
3706 b'narrowfiles',
3692 b'sharedrepo',
3707 b'sharedrepo',
3693 b'sharedrelative',
3708 b'sharedrelative',
3694 b'shareditems',
3709 b'shareditems',
3695 b'shallowfilestore',
3710 b'shallowfilestore',
3696 }
3711 }
3697
3712
3698 return {k: v for k, v in createopts.items() if k not in known}
3713 return {k: v for k, v in createopts.items() if k not in known}
3699
3714
3700
3715
3701 def createrepository(ui, path, createopts=None, requirements=None):
3716 def createrepository(ui, path, createopts=None, requirements=None):
3702 """Create a new repository in a vfs.
3717 """Create a new repository in a vfs.
3703
3718
3704 ``path`` path to the new repo's working directory.
3719 ``path`` path to the new repo's working directory.
3705 ``createopts`` options for the new repository.
3720 ``createopts`` options for the new repository.
3706 ``requirement`` predefined set of requirements.
3721 ``requirement`` predefined set of requirements.
3707 (incompatible with ``createopts``)
3722 (incompatible with ``createopts``)
3708
3723
3709 The following keys for ``createopts`` are recognized:
3724 The following keys for ``createopts`` are recognized:
3710
3725
3711 backend
3726 backend
3712 The storage backend to use.
3727 The storage backend to use.
3713 lfs
3728 lfs
3714 Repository will be created with ``lfs`` requirement. The lfs extension
3729 Repository will be created with ``lfs`` requirement. The lfs extension
3715 will automatically be loaded when the repository is accessed.
3730 will automatically be loaded when the repository is accessed.
3716 narrowfiles
3731 narrowfiles
3717 Set up repository to support narrow file storage.
3732 Set up repository to support narrow file storage.
3718 sharedrepo
3733 sharedrepo
3719 Repository object from which storage should be shared.
3734 Repository object from which storage should be shared.
3720 sharedrelative
3735 sharedrelative
3721 Boolean indicating if the path to the shared repo should be
3736 Boolean indicating if the path to the shared repo should be
3722 stored as relative. By default, the pointer to the "parent" repo
3737 stored as relative. By default, the pointer to the "parent" repo
3723 is stored as an absolute path.
3738 is stored as an absolute path.
3724 shareditems
3739 shareditems
3725 Set of items to share to the new repository (in addition to storage).
3740 Set of items to share to the new repository (in addition to storage).
3726 shallowfilestore
3741 shallowfilestore
3727 Indicates that storage for files should be shallow (not all ancestor
3742 Indicates that storage for files should be shallow (not all ancestor
3728 revisions are known).
3743 revisions are known).
3729 """
3744 """
3730
3745
3731 if requirements is not None:
3746 if requirements is not None:
3732 if createopts is not None:
3747 if createopts is not None:
3733 msg = b'cannot specify both createopts and requirements'
3748 msg = b'cannot specify both createopts and requirements'
3734 raise error.ProgrammingError(msg)
3749 raise error.ProgrammingError(msg)
3735 createopts = {}
3750 createopts = {}
3736 else:
3751 else:
3737 createopts = defaultcreateopts(ui, createopts=createopts)
3752 createopts = defaultcreateopts(ui, createopts=createopts)
3738
3753
3739 unknownopts = filterknowncreateopts(ui, createopts)
3754 unknownopts = filterknowncreateopts(ui, createopts)
3740
3755
3741 if not isinstance(unknownopts, dict):
3756 if not isinstance(unknownopts, dict):
3742 raise error.ProgrammingError(
3757 raise error.ProgrammingError(
3743 b'filterknowncreateopts() did not return a dict'
3758 b'filterknowncreateopts() did not return a dict'
3744 )
3759 )
3745
3760
3746 if unknownopts:
3761 if unknownopts:
3747 raise error.Abort(
3762 raise error.Abort(
3748 _(
3763 _(
3749 b'unable to create repository because of unknown '
3764 b'unable to create repository because of unknown '
3750 b'creation option: %s'
3765 b'creation option: %s'
3751 )
3766 )
3752 % b', '.join(sorted(unknownopts)),
3767 % b', '.join(sorted(unknownopts)),
3753 hint=_(b'is a required extension not loaded?'),
3768 hint=_(b'is a required extension not loaded?'),
3754 )
3769 )
3755
3770
3756 requirements = newreporequirements(ui, createopts=createopts)
3771 requirements = newreporequirements(ui, createopts=createopts)
3757 requirements -= checkrequirementscompat(ui, requirements)
3772 requirements -= checkrequirementscompat(ui, requirements)
3758
3773
3759 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3774 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3760
3775
3761 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3776 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3762 if hgvfs.exists():
3777 if hgvfs.exists():
3763 raise error.RepoError(_(b'repository %s already exists') % path)
3778 raise error.RepoError(_(b'repository %s already exists') % path)
3764
3779
3765 if b'sharedrepo' in createopts:
3780 if b'sharedrepo' in createopts:
3766 sharedpath = createopts[b'sharedrepo'].sharedpath
3781 sharedpath = createopts[b'sharedrepo'].sharedpath
3767
3782
3768 if createopts.get(b'sharedrelative'):
3783 if createopts.get(b'sharedrelative'):
3769 try:
3784 try:
3770 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3785 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3771 sharedpath = util.pconvert(sharedpath)
3786 sharedpath = util.pconvert(sharedpath)
3772 except (IOError, ValueError) as e:
3787 except (IOError, ValueError) as e:
3773 # ValueError is raised on Windows if the drive letters differ
3788 # ValueError is raised on Windows if the drive letters differ
3774 # on each path.
3789 # on each path.
3775 raise error.Abort(
3790 raise error.Abort(
3776 _(b'cannot calculate relative path'),
3791 _(b'cannot calculate relative path'),
3777 hint=stringutil.forcebytestr(e),
3792 hint=stringutil.forcebytestr(e),
3778 )
3793 )
3779
3794
3780 if not wdirvfs.exists():
3795 if not wdirvfs.exists():
3781 wdirvfs.makedirs()
3796 wdirvfs.makedirs()
3782
3797
3783 hgvfs.makedir(notindexed=True)
3798 hgvfs.makedir(notindexed=True)
3784 if b'sharedrepo' not in createopts:
3799 if b'sharedrepo' not in createopts:
3785 hgvfs.mkdir(b'cache')
3800 hgvfs.mkdir(b'cache')
3786 hgvfs.mkdir(b'wcache')
3801 hgvfs.mkdir(b'wcache')
3787
3802
3788 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3803 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3789 if has_store and b'sharedrepo' not in createopts:
3804 if has_store and b'sharedrepo' not in createopts:
3790 hgvfs.mkdir(b'store')
3805 hgvfs.mkdir(b'store')
3791
3806
3792 # We create an invalid changelog outside the store so very old
3807 # We create an invalid changelog outside the store so very old
3793 # Mercurial versions (which didn't know about the requirements
3808 # Mercurial versions (which didn't know about the requirements
3794 # file) encounter an error on reading the changelog. This
3809 # file) encounter an error on reading the changelog. This
3795 # effectively locks out old clients and prevents them from
3810 # effectively locks out old clients and prevents them from
3796 # mucking with a repo in an unknown format.
3811 # mucking with a repo in an unknown format.
3797 #
3812 #
3798 # The revlog header has version 65535, which won't be recognized by
3813 # The revlog header has version 65535, which won't be recognized by
3799 # such old clients.
3814 # such old clients.
3800 hgvfs.append(
3815 hgvfs.append(
3801 b'00changelog.i',
3816 b'00changelog.i',
3802 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3817 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3803 b'layout',
3818 b'layout',
3804 )
3819 )
3805
3820
3806 # Filter the requirements into working copy and store ones
3821 # Filter the requirements into working copy and store ones
3807 wcreq, storereq = scmutil.filterrequirements(requirements)
3822 wcreq, storereq = scmutil.filterrequirements(requirements)
3808 # write working copy ones
3823 # write working copy ones
3809 scmutil.writerequires(hgvfs, wcreq)
3824 scmutil.writerequires(hgvfs, wcreq)
3810 # If there are store requirements and the current repository
3825 # If there are store requirements and the current repository
3811 # is not a shared one, write stored requirements
3826 # is not a shared one, write stored requirements
3812 # For new shared repository, we don't need to write the store
3827 # For new shared repository, we don't need to write the store
3813 # requirements as they are already present in store requires
3828 # requirements as they are already present in store requires
3814 if storereq and b'sharedrepo' not in createopts:
3829 if storereq and b'sharedrepo' not in createopts:
3815 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3830 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3816 scmutil.writerequires(storevfs, storereq)
3831 scmutil.writerequires(storevfs, storereq)
3817
3832
3818 # Write out file telling readers where to find the shared store.
3833 # Write out file telling readers where to find the shared store.
3819 if b'sharedrepo' in createopts:
3834 if b'sharedrepo' in createopts:
3820 hgvfs.write(b'sharedpath', sharedpath)
3835 hgvfs.write(b'sharedpath', sharedpath)
3821
3836
3822 if createopts.get(b'shareditems'):
3837 if createopts.get(b'shareditems'):
3823 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3838 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3824 hgvfs.write(b'shared', shared)
3839 hgvfs.write(b'shared', shared)
3825
3840
3826
3841
3827 def poisonrepository(repo):
3842 def poisonrepository(repo):
3828 """Poison a repository instance so it can no longer be used."""
3843 """Poison a repository instance so it can no longer be used."""
3829 # Perform any cleanup on the instance.
3844 # Perform any cleanup on the instance.
3830 repo.close()
3845 repo.close()
3831
3846
3832 # Our strategy is to replace the type of the object with one that
3847 # Our strategy is to replace the type of the object with one that
3833 # has all attribute lookups result in error.
3848 # has all attribute lookups result in error.
3834 #
3849 #
3835 # But we have to allow the close() method because some constructors
3850 # But we have to allow the close() method because some constructors
3836 # of repos call close() on repo references.
3851 # of repos call close() on repo references.
3837 class poisonedrepository(object):
3852 class poisonedrepository(object):
3838 def __getattribute__(self, item):
3853 def __getattribute__(self, item):
3839 if item == 'close':
3854 if item == 'close':
3840 return object.__getattribute__(self, item)
3855 return object.__getattribute__(self, item)
3841
3856
3842 raise error.ProgrammingError(
3857 raise error.ProgrammingError(
3843 b'repo instances should not be used after unshare'
3858 b'repo instances should not be used after unshare'
3844 )
3859 )
3845
3860
3846 def close(self):
3861 def close(self):
3847 pass
3862 pass
3848
3863
3849 # We may have a repoview, which intercepts __setattr__. So be sure
3864 # We may have a repoview, which intercepts __setattr__. So be sure
3850 # we operate at the lowest level possible.
3865 # we operate at the lowest level possible.
3851 object.__setattr__(repo, '__class__', poisonedrepository)
3866 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,1297 +1,1295 b''
1 ===================================
1 ===================================
2 Test the persistent on-disk nodemap
2 Test the persistent on-disk nodemap
3 ===================================
3 ===================================
4
4
5
5
6 $ cat << EOF >> $HGRCPATH
6 $ cat << EOF >> $HGRCPATH
7 > [format]
7 > [format]
8 > use-share-safe=yes
8 > use-share-safe=yes
9 > [extensions]
9 > [extensions]
10 > share=
10 > share=
11 > EOF
11 > EOF
12
12
13 #if no-rust
13 #if no-rust
14
14
15 $ cat << EOF >> $HGRCPATH
15 $ cat << EOF >> $HGRCPATH
16 > [format]
16 > [format]
17 > use-persistent-nodemap=yes
17 > use-persistent-nodemap=yes
18 > [devel]
18 > [devel]
19 > persistent-nodemap=yes
19 > persistent-nodemap=yes
20 > EOF
20 > EOF
21
21
22 #endif
22 #endif
23
23
24 $ hg init test-repo --config storage.revlog.persistent-nodemap.slow-path=allow
24 $ hg init test-repo --config storage.revlog.persistent-nodemap.slow-path=allow
25 $ cd test-repo
25 $ cd test-repo
26
26
27 Check handling of the default slow-path value
27 Check handling of the default slow-path value
28
28
29 #if no-pure no-rust
29 #if no-pure no-rust
30
30
31 $ hg id
31 $ hg id
32 abort: accessing `persistent-nodemap` repository without associated fast implementation.
32 abort: accessing `persistent-nodemap` repository without associated fast implementation.
33 (check `hg help config.format.use-persistent-nodemap` for details)
33 (check `hg help config.format.use-persistent-nodemap` for details)
34 [255]
34 [255]
35
35
36 Unlock further check (we are here to test the feature)
36 Unlock further check (we are here to test the feature)
37
37
38 $ cat << EOF >> $HGRCPATH
38 $ cat << EOF >> $HGRCPATH
39 > [storage]
39 > [storage]
40 > # to avoid spamming the test
40 > # to avoid spamming the test
41 > revlog.persistent-nodemap.slow-path=allow
41 > revlog.persistent-nodemap.slow-path=allow
42 > EOF
42 > EOF
43
43
44 #endif
44 #endif
45
45
46 #if rust
46 #if rust
47
47
48 Regression test for a previous bug in Rust/C FFI for the `Revlog_CAPI` capsule:
48 Regression test for a previous bug in Rust/C FFI for the `Revlog_CAPI` capsule:
49 in places where `mercurial/cext/revlog.c` function signatures use `Py_ssize_t`
49 in places where `mercurial/cext/revlog.c` function signatures use `Py_ssize_t`
50 (64 bits on Linux x86_64), corresponding declarations in `rust/hg-cpython/src/cindex.rs`
50 (64 bits on Linux x86_64), corresponding declarations in `rust/hg-cpython/src/cindex.rs`
51 incorrectly used `libc::c_int` (32 bits).
51 incorrectly used `libc::c_int` (32 bits).
52 As a result, -1 passed from Rust for the null revision became 4294967295 in C.
52 As a result, -1 passed from Rust for the null revision became 4294967295 in C.
53
53
54 $ hg log -r 00000000
54 $ hg log -r 00000000
55 changeset: -1:000000000000
55 changeset: -1:000000000000
56 tag: tip
56 tag: tip
57 user:
57 user:
58 date: Thu Jan 01 00:00:00 1970 +0000
58 date: Thu Jan 01 00:00:00 1970 +0000
59
59
60
60
61 #endif
61 #endif
62
62
63
63
64 $ hg debugformat
64 $ hg debugformat
65 format-variant repo
65 format-variant repo
66 fncache: yes
66 fncache: yes
67 dirstate-v2: no
67 dirstate-v2: no
68 dotencode: yes
68 dotencode: yes
69 generaldelta: yes
69 generaldelta: yes
70 share-safe: yes
70 share-safe: yes
71 sparserevlog: yes
71 sparserevlog: yes
72 persistent-nodemap: yes
72 persistent-nodemap: yes
73 copies-sdc: no
73 copies-sdc: no
74 revlog-v2: no
74 revlog-v2: no
75 changelog-v2: no
75 changelog-v2: no
76 plain-cl-delta: yes
76 plain-cl-delta: yes
77 compression: zlib (no-zstd !)
77 compression: zlib (no-zstd !)
78 compression: zstd (zstd !)
78 compression: zstd (zstd !)
79 compression-level: default
79 compression-level: default
80 $ hg debugbuilddag .+5000 --new-file
80 $ hg debugbuilddag .+5000 --new-file
81
81
82 $ hg debugnodemap --metadata
82 $ hg debugnodemap --metadata
83 uid: ???????? (glob)
83 uid: ???????? (glob)
84 tip-rev: 5000
84 tip-rev: 5000
85 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
85 tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
86 data-length: 121088
86 data-length: 121088
87 data-unused: 0
87 data-unused: 0
88 data-unused: 0.000%
88 data-unused: 0.000%
89 $ f --size .hg/store/00changelog.n
89 $ f --size .hg/store/00changelog.n
90 .hg/store/00changelog.n: size=62
90 .hg/store/00changelog.n: size=62
91
91
92 Simple lookup works
92 Simple lookup works
93
93
94 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
94 $ ANYNODE=`hg log --template '{node|short}\n' --rev tip`
95 $ hg log -r "$ANYNODE" --template '{rev}\n'
95 $ hg log -r "$ANYNODE" --template '{rev}\n'
96 5000
96 5000
97
97
98
98
99 #if rust
99 #if rust
100
100
101 $ f --sha256 .hg/store/00changelog-*.nd
101 $ f --sha256 .hg/store/00changelog-*.nd
102 .hg/store/00changelog-????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
102 .hg/store/00changelog-????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
103
103
104 $ f --sha256 .hg/store/00manifest-*.nd
104 $ f --sha256 .hg/store/00manifest-*.nd
105 .hg/store/00manifest-????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
105 .hg/store/00manifest-????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
106 $ hg debugnodemap --dump-new | f --sha256 --size
106 $ hg debugnodemap --dump-new | f --sha256 --size
107 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
107 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
108 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
108 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
109 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
109 size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
110 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
110 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
111 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
111 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
112 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
112 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
113 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
113 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
114 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
114 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
115 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
115 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
116 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
116 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
117 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
117 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
118 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
118 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
119 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
119 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
120 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
120 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
121 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
121 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
122 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
122 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
123 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
123 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
124 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
124 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
125 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
125 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
126
126
127
127
128 #else
128 #else
129
129
130 $ f --sha256 .hg/store/00changelog-*.nd
130 $ f --sha256 .hg/store/00changelog-*.nd
131 .hg/store/00changelog-????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
131 .hg/store/00changelog-????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
132 $ hg debugnodemap --dump-new | f --sha256 --size
132 $ hg debugnodemap --dump-new | f --sha256 --size
133 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
133 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
134 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
134 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
135 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
135 size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
136 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
136 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
137 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
137 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
138 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
138 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
139 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
139 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
140 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
140 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
141 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
141 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
142 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
142 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
143 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
143 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
144 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
144 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
145 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
145 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
146 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
146 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
147 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
147 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
148 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
148 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
149 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
149 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
150 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
150 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
151 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
151 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
152
152
153 #endif
153 #endif
154
154
155 $ hg debugnodemap --check
155 $ hg debugnodemap --check
156 revision in index: 5001
156 revision in index: 5001
157 revision in nodemap: 5001
157 revision in nodemap: 5001
158
158
159 add a new commit
159 add a new commit
160
160
161 $ hg up
161 $ hg up
162 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
162 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
163 $ echo foo > foo
163 $ echo foo > foo
164 $ hg add foo
164 $ hg add foo
165
165
166
166
167 Check slow-path config value handling
167 Check slow-path config value handling
168 -------------------------------------
168 -------------------------------------
169
169
170 #if no-pure no-rust
170 #if no-pure no-rust
171
171
172 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
172 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
173 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
173 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
174 falling back to default value: abort
174 falling back to default value: abort
175 abort: accessing `persistent-nodemap` repository without associated fast implementation.
175 abort: accessing `persistent-nodemap` repository without associated fast implementation.
176 (check `hg help config.format.use-persistent-nodemap` for details)
176 (check `hg help config.format.use-persistent-nodemap` for details)
177 [255]
177 [255]
178
178
179 $ hg log -r . --config "storage.revlog.persistent-nodemap.slow-path=warn"
179 $ hg log -r . --config "storage.revlog.persistent-nodemap.slow-path=warn"
180 warning: accessing `persistent-nodemap` repository without associated fast implementation.
180 warning: accessing `persistent-nodemap` repository without associated fast implementation.
181 (check `hg help config.format.use-persistent-nodemap` for details)
181 (check `hg help config.format.use-persistent-nodemap` for details)
182 changeset: 5000:6b02b8c7b966
182 changeset: 5000:6b02b8c7b966
183 tag: tip
183 tag: tip
184 user: debugbuilddag
184 user: debugbuilddag
185 date: Thu Jan 01 01:23:20 1970 +0000
185 date: Thu Jan 01 01:23:20 1970 +0000
186 summary: r5000
186 summary: r5000
187
187
188 $ hg ci -m 'foo' --config "storage.revlog.persistent-nodemap.slow-path=abort"
188 $ hg ci -m 'foo' --config "storage.revlog.persistent-nodemap.slow-path=abort"
189 abort: accessing `persistent-nodemap` repository without associated fast implementation.
189 abort: accessing `persistent-nodemap` repository without associated fast implementation.
190 (check `hg help config.format.use-persistent-nodemap` for details)
190 (check `hg help config.format.use-persistent-nodemap` for details)
191 [255]
191 [255]
192
192
193 #else
193 #else
194
194
195 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
195 $ hg id --config "storage.revlog.persistent-nodemap.slow-path=invalid-value"
196 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
196 unknown value for config "storage.revlog.persistent-nodemap.slow-path": "invalid-value"
197 falling back to default value: abort
197 falling back to default value: abort
198 6b02b8c7b966+ tip
198 6b02b8c7b966+ tip
199
199
200 #endif
200 #endif
201
201
202 $ hg ci -m 'foo'
202 $ hg ci -m 'foo'
203
203
204 #if no-pure no-rust
204 #if no-pure no-rust
205 $ hg debugnodemap --metadata
205 $ hg debugnodemap --metadata
206 uid: ???????? (glob)
206 uid: ???????? (glob)
207 tip-rev: 5001
207 tip-rev: 5001
208 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
208 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
209 data-length: 121088
209 data-length: 121088
210 data-unused: 0
210 data-unused: 0
211 data-unused: 0.000%
211 data-unused: 0.000%
212 #else
212 #else
213 $ hg debugnodemap --metadata
213 $ hg debugnodemap --metadata
214 uid: ???????? (glob)
214 uid: ???????? (glob)
215 tip-rev: 5001
215 tip-rev: 5001
216 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
216 tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
217 data-length: 121344
217 data-length: 121344
218 data-unused: 256
218 data-unused: 256
219 data-unused: 0.211%
219 data-unused: 0.211%
220 #endif
220 #endif
221
221
222 $ f --size .hg/store/00changelog.n
222 $ f --size .hg/store/00changelog.n
223 .hg/store/00changelog.n: size=62
223 .hg/store/00changelog.n: size=62
224
224
225 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
225 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
226
226
227 #if pure
227 #if pure
228 $ f --sha256 .hg/store/00changelog-*.nd --size
228 $ f --sha256 .hg/store/00changelog-*.nd --size
229 .hg/store/00changelog-????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
229 .hg/store/00changelog-????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
230 #endif
230 #endif
231
231
232 #if rust
232 #if rust
233 $ f --sha256 .hg/store/00changelog-*.nd --size
233 $ f --sha256 .hg/store/00changelog-*.nd --size
234 .hg/store/00changelog-????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
234 .hg/store/00changelog-????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
235 #endif
235 #endif
236
236
237 #if no-pure no-rust
237 #if no-pure no-rust
238 $ f --sha256 .hg/store/00changelog-*.nd --size
238 $ f --sha256 .hg/store/00changelog-*.nd --size
239 .hg/store/00changelog-????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
239 .hg/store/00changelog-????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
240 #endif
240 #endif
241
241
242 $ hg debugnodemap --check
242 $ hg debugnodemap --check
243 revision in index: 5002
243 revision in index: 5002
244 revision in nodemap: 5002
244 revision in nodemap: 5002
245
245
246 Test code path without mmap
246 Test code path without mmap
247 ---------------------------
247 ---------------------------
248
248
249 $ echo bar > bar
249 $ echo bar > bar
250 $ hg add bar
250 $ hg add bar
251 $ hg ci -m 'bar' --config storage.revlog.persistent-nodemap.mmap=no
251 $ hg ci -m 'bar' --config storage.revlog.persistent-nodemap.mmap=no
252
252
253 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=yes
253 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=yes
254 revision in index: 5003
254 revision in index: 5003
255 revision in nodemap: 5003
255 revision in nodemap: 5003
256 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=no
256 $ hg debugnodemap --check --config storage.revlog.persistent-nodemap.mmap=no
257 revision in index: 5003
257 revision in index: 5003
258 revision in nodemap: 5003
258 revision in nodemap: 5003
259
259
260
260
261 #if pure
261 #if pure
262 $ hg debugnodemap --metadata
262 $ hg debugnodemap --metadata
263 uid: ???????? (glob)
263 uid: ???????? (glob)
264 tip-rev: 5002
264 tip-rev: 5002
265 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
265 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
266 data-length: 121600
266 data-length: 121600
267 data-unused: 512
267 data-unused: 512
268 data-unused: 0.421%
268 data-unused: 0.421%
269 $ f --sha256 .hg/store/00changelog-*.nd --size
269 $ f --sha256 .hg/store/00changelog-*.nd --size
270 .hg/store/00changelog-????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
270 .hg/store/00changelog-????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
271 #endif
271 #endif
272 #if rust
272 #if rust
273 $ hg debugnodemap --metadata
273 $ hg debugnodemap --metadata
274 uid: ???????? (glob)
274 uid: ???????? (glob)
275 tip-rev: 5002
275 tip-rev: 5002
276 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
276 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
277 data-length: 121600
277 data-length: 121600
278 data-unused: 512
278 data-unused: 512
279 data-unused: 0.421%
279 data-unused: 0.421%
280 $ f --sha256 .hg/store/00changelog-*.nd --size
280 $ f --sha256 .hg/store/00changelog-*.nd --size
281 .hg/store/00changelog-????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
281 .hg/store/00changelog-????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
282 #endif
282 #endif
283 #if no-pure no-rust
283 #if no-pure no-rust
284 $ hg debugnodemap --metadata
284 $ hg debugnodemap --metadata
285 uid: ???????? (glob)
285 uid: ???????? (glob)
286 tip-rev: 5002
286 tip-rev: 5002
287 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
287 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
288 data-length: 121088
288 data-length: 121088
289 data-unused: 0
289 data-unused: 0
290 data-unused: 0.000%
290 data-unused: 0.000%
291 $ f --sha256 .hg/store/00changelog-*.nd --size
291 $ f --sha256 .hg/store/00changelog-*.nd --size
292 .hg/store/00changelog-????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
292 .hg/store/00changelog-????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
293 #endif
293 #endif
294
294
295 Test force warming the cache
295 Test force warming the cache
296
296
297 $ rm .hg/store/00changelog.n
297 $ rm .hg/store/00changelog.n
298 $ hg debugnodemap --metadata
298 $ hg debugnodemap --metadata
299 $ hg debugupdatecache
299 $ hg debugupdatecache
300 #if pure
300 #if pure
301 $ hg debugnodemap --metadata
301 $ hg debugnodemap --metadata
302 uid: ???????? (glob)
302 uid: ???????? (glob)
303 tip-rev: 5002
303 tip-rev: 5002
304 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
304 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
305 data-length: 121088
305 data-length: 121088
306 data-unused: 0
306 data-unused: 0
307 data-unused: 0.000%
307 data-unused: 0.000%
308 #else
308 #else
309 $ hg debugnodemap --metadata
309 $ hg debugnodemap --metadata
310 uid: ???????? (glob)
310 uid: ???????? (glob)
311 tip-rev: 5002
311 tip-rev: 5002
312 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
312 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
313 data-length: 121088
313 data-length: 121088
314 data-unused: 0
314 data-unused: 0
315 data-unused: 0.000%
315 data-unused: 0.000%
316 #endif
316 #endif
317
317
318 Check out of sync nodemap
318 Check out of sync nodemap
319 =========================
319 =========================
320
320
321 First copy old data on the side.
321 First copy old data on the side.
322
322
323 $ mkdir ../tmp-copies
323 $ mkdir ../tmp-copies
324 $ cp .hg/store/00changelog-????????.nd .hg/store/00changelog.n ../tmp-copies
324 $ cp .hg/store/00changelog-????????.nd .hg/store/00changelog.n ../tmp-copies
325
325
326 Nodemap lagging behind
326 Nodemap lagging behind
327 ----------------------
327 ----------------------
328
328
329 make a new commit
329 make a new commit
330
330
331 $ echo bar2 > bar
331 $ echo bar2 > bar
332 $ hg ci -m 'bar2'
332 $ hg ci -m 'bar2'
333 $ NODE=`hg log -r tip -T '{node}\n'`
333 $ NODE=`hg log -r tip -T '{node}\n'`
334 $ hg log -r "$NODE" -T '{rev}\n'
334 $ hg log -r "$NODE" -T '{rev}\n'
335 5003
335 5003
336
336
337 If the nodemap is lagging behind, it can catch up fine
337 If the nodemap is lagging behind, it can catch up fine
338
338
339 $ hg debugnodemap --metadata
339 $ hg debugnodemap --metadata
340 uid: ???????? (glob)
340 uid: ???????? (glob)
341 tip-rev: 5003
341 tip-rev: 5003
342 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
342 tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
343 data-length: 121344 (pure !)
343 data-length: 121344 (pure !)
344 data-length: 121344 (rust !)
344 data-length: 121344 (rust !)
345 data-length: 121152 (no-rust no-pure !)
345 data-length: 121152 (no-rust no-pure !)
346 data-unused: 192 (pure !)
346 data-unused: 192 (pure !)
347 data-unused: 192 (rust !)
347 data-unused: 192 (rust !)
348 data-unused: 0 (no-rust no-pure !)
348 data-unused: 0 (no-rust no-pure !)
349 data-unused: 0.158% (pure !)
349 data-unused: 0.158% (pure !)
350 data-unused: 0.158% (rust !)
350 data-unused: 0.158% (rust !)
351 data-unused: 0.000% (no-rust no-pure !)
351 data-unused: 0.000% (no-rust no-pure !)
352 $ cp -f ../tmp-copies/* .hg/store/
352 $ cp -f ../tmp-copies/* .hg/store/
353 $ hg debugnodemap --metadata
353 $ hg debugnodemap --metadata
354 uid: ???????? (glob)
354 uid: ???????? (glob)
355 tip-rev: 5002
355 tip-rev: 5002
356 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
356 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
357 data-length: 121088
357 data-length: 121088
358 data-unused: 0
358 data-unused: 0
359 data-unused: 0.000%
359 data-unused: 0.000%
360 $ hg log -r "$NODE" -T '{rev}\n'
360 $ hg log -r "$NODE" -T '{rev}\n'
361 5003
361 5003
362
362
363 changelog altered
363 changelog altered
364 -----------------
364 -----------------
365
365
366 If the nodemap is not gated behind a requirements, an unaware client can alter
366 If the nodemap is not gated behind a requirements, an unaware client can alter
367 the repository so the revlog used to generate the nodemap is not longer
367 the repository so the revlog used to generate the nodemap is not longer
368 compatible with the persistent nodemap. We need to detect that.
368 compatible with the persistent nodemap. We need to detect that.
369
369
370 $ hg up "$NODE~5"
370 $ hg up "$NODE~5"
371 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
371 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
372 $ echo bar > babar
372 $ echo bar > babar
373 $ hg add babar
373 $ hg add babar
374 $ hg ci -m 'babar'
374 $ hg ci -m 'babar'
375 created new head
375 created new head
376 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
376 $ OTHERNODE=`hg log -r tip -T '{node}\n'`
377 $ hg log -r "$OTHERNODE" -T '{rev}\n'
377 $ hg log -r "$OTHERNODE" -T '{rev}\n'
378 5004
378 5004
379
379
380 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
380 $ hg --config extensions.strip= strip --rev "$NODE~1" --no-backup
381
381
382 the nodemap should detect the changelog have been tampered with and recover.
382 the nodemap should detect the changelog have been tampered with and recover.
383
383
384 $ hg debugnodemap --metadata
384 $ hg debugnodemap --metadata
385 uid: ???????? (glob)
385 uid: ???????? (glob)
386 tip-rev: 5002
386 tip-rev: 5002
387 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
387 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
388 data-length: 121536 (pure !)
388 data-length: 121536 (pure !)
389 data-length: 121088 (rust !)
389 data-length: 121088 (rust !)
390 data-length: 121088 (no-pure no-rust !)
390 data-length: 121088 (no-pure no-rust !)
391 data-unused: 448 (pure !)
391 data-unused: 448 (pure !)
392 data-unused: 0 (rust !)
392 data-unused: 0 (rust !)
393 data-unused: 0 (no-pure no-rust !)
393 data-unused: 0 (no-pure no-rust !)
394 data-unused: 0.000% (rust !)
394 data-unused: 0.000% (rust !)
395 data-unused: 0.369% (pure !)
395 data-unused: 0.369% (pure !)
396 data-unused: 0.000% (no-pure no-rust !)
396 data-unused: 0.000% (no-pure no-rust !)
397
397
398 $ cp -f ../tmp-copies/* .hg/store/
398 $ cp -f ../tmp-copies/* .hg/store/
399 $ hg debugnodemap --metadata
399 $ hg debugnodemap --metadata
400 uid: ???????? (glob)
400 uid: ???????? (glob)
401 tip-rev: 5002
401 tip-rev: 5002
402 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
402 tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
403 data-length: 121088
403 data-length: 121088
404 data-unused: 0
404 data-unused: 0
405 data-unused: 0.000%
405 data-unused: 0.000%
406 $ hg log -r "$OTHERNODE" -T '{rev}\n'
406 $ hg log -r "$OTHERNODE" -T '{rev}\n'
407 5002
407 5002
408
408
409 missing data file
409 missing data file
410 -----------------
410 -----------------
411
411
412 $ UUID=`hg debugnodemap --metadata| grep 'uid:' | \
412 $ UUID=`hg debugnodemap --metadata| grep 'uid:' | \
413 > sed 's/uid: //'`
413 > sed 's/uid: //'`
414 $ FILE=.hg/store/00changelog-"${UUID}".nd
414 $ FILE=.hg/store/00changelog-"${UUID}".nd
415 $ mv $FILE ../tmp-data-file
415 $ mv $FILE ../tmp-data-file
416 $ cp .hg/store/00changelog.n ../tmp-docket
416 $ cp .hg/store/00changelog.n ../tmp-docket
417
417
418 mercurial don't crash
418 mercurial don't crash
419
419
420 $ hg log -r .
420 $ hg log -r .
421 changeset: 5002:b355ef8adce0
421 changeset: 5002:b355ef8adce0
422 tag: tip
422 tag: tip
423 parent: 4998:d918ad6d18d3
423 parent: 4998:d918ad6d18d3
424 user: test
424 user: test
425 date: Thu Jan 01 00:00:00 1970 +0000
425 date: Thu Jan 01 00:00:00 1970 +0000
426 summary: babar
426 summary: babar
427
427
428 $ hg debugnodemap --metadata
428 $ hg debugnodemap --metadata
429
429
430 $ hg debugupdatecache
430 $ hg debugupdatecache
431 $ hg debugnodemap --metadata
431 $ hg debugnodemap --metadata
432 uid: * (glob)
432 uid: * (glob)
433 tip-rev: 5002
433 tip-rev: 5002
434 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
434 tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
435 data-length: 121088
435 data-length: 121088
436 data-unused: 0
436 data-unused: 0
437 data-unused: 0.000%
437 data-unused: 0.000%
438
438
439 Sub-case: fallback for corrupted data file
439 Sub-case: fallback for corrupted data file
440 ------------------------------------------
440 ------------------------------------------
441
441
442 Sabotaging the data file so that nodemap resolutions fail, triggering fallback to
442 Sabotaging the data file so that nodemap resolutions fail, triggering fallback to
443 (non-persistent) C implementation.
443 (non-persistent) C implementation.
444
444
445
445
446 $ UUID=`hg debugnodemap --metadata| grep 'uid:' | \
446 $ UUID=`hg debugnodemap --metadata| grep 'uid:' | \
447 > sed 's/uid: //'`
447 > sed 's/uid: //'`
448 $ FILE=.hg/store/00changelog-"${UUID}".nd
448 $ FILE=.hg/store/00changelog-"${UUID}".nd
449 $ python -c "fobj = open('$FILE', 'r+b'); fobj.write(b'\xff' * 121088); fobj.close()"
449 $ python -c "fobj = open('$FILE', 'r+b'); fobj.write(b'\xff' * 121088); fobj.close()"
450
450
451 The nodemap data file is still considered in sync with the docket. This
451 The nodemap data file is still considered in sync with the docket. This
452 would fail without the fallback to the (non-persistent) C implementation:
452 would fail without the fallback to the (non-persistent) C implementation:
453
453
454 $ hg log -r b355ef8adce0949b8bdf6afc72ca853740d65944 -T '{rev}\n' --traceback
454 $ hg log -r b355ef8adce0949b8bdf6afc72ca853740d65944 -T '{rev}\n' --traceback
455 5002
455 5002
456
456
457 The nodemap data file hasn't been fixed, more tests can be inserted:
457 The nodemap data file hasn't been fixed, more tests can be inserted:
458
458
459 $ hg debugnodemap --dump-disk | f --bytes=256 --hexdump --size
459 $ hg debugnodemap --dump-disk | f --bytes=256 --hexdump --size
460 size=121088
460 size=121088
461 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
461 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
462 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
462 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
463 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
463 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
464 0030: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
464 0030: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
465 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
465 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
466 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
466 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
467 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
467 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
468 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
468 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
469 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
469 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
470 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
470 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
471 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
471 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
472 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
472 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
473 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
473 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
474 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
474 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
475 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
475 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
476 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
476 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
477
477
478 $ mv ../tmp-data-file $FILE
478 $ mv ../tmp-data-file $FILE
479 $ mv ../tmp-docket .hg/store/00changelog.n
479 $ mv ../tmp-docket .hg/store/00changelog.n
480
480
481 Check transaction related property
481 Check transaction related property
482 ==================================
482 ==================================
483
483
484 An up to date nodemap should be available to shell hooks,
484 An up to date nodemap should be available to shell hooks,
485
485
486 $ echo dsljfl > a
486 $ echo dsljfl > a
487 $ hg add a
487 $ hg add a
488 $ hg ci -m a
488 $ hg ci -m a
489 $ hg debugnodemap --metadata
489 $ hg debugnodemap --metadata
490 uid: ???????? (glob)
490 uid: ???????? (glob)
491 tip-rev: 5003
491 tip-rev: 5003
492 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
492 tip-node: a52c5079765b5865d97b993b303a18740113bbb2
493 data-length: 121088
493 data-length: 121088
494 data-unused: 0
494 data-unused: 0
495 data-unused: 0.000%
495 data-unused: 0.000%
496 $ echo babar2 > babar
496 $ echo babar2 > babar
497 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
497 $ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
498 uid: ???????? (glob)
498 uid: ???????? (glob)
499 tip-rev: 5004
499 tip-rev: 5004
500 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
500 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
501 data-length: 121280 (pure !)
501 data-length: 121280 (pure !)
502 data-length: 121280 (rust !)
502 data-length: 121280 (rust !)
503 data-length: 121088 (no-pure no-rust !)
503 data-length: 121088 (no-pure no-rust !)
504 data-unused: 192 (pure !)
504 data-unused: 192 (pure !)
505 data-unused: 192 (rust !)
505 data-unused: 192 (rust !)
506 data-unused: 0 (no-pure no-rust !)
506 data-unused: 0 (no-pure no-rust !)
507 data-unused: 0.158% (pure !)
507 data-unused: 0.158% (pure !)
508 data-unused: 0.158% (rust !)
508 data-unused: 0.158% (rust !)
509 data-unused: 0.000% (no-pure no-rust !)
509 data-unused: 0.000% (no-pure no-rust !)
510 $ hg debugnodemap --metadata
510 $ hg debugnodemap --metadata
511 uid: ???????? (glob)
511 uid: ???????? (glob)
512 tip-rev: 5004
512 tip-rev: 5004
513 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
513 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
514 data-length: 121280 (pure !)
514 data-length: 121280 (pure !)
515 data-length: 121280 (rust !)
515 data-length: 121280 (rust !)
516 data-length: 121088 (no-pure no-rust !)
516 data-length: 121088 (no-pure no-rust !)
517 data-unused: 192 (pure !)
517 data-unused: 192 (pure !)
518 data-unused: 192 (rust !)
518 data-unused: 192 (rust !)
519 data-unused: 0 (no-pure no-rust !)
519 data-unused: 0 (no-pure no-rust !)
520 data-unused: 0.158% (pure !)
520 data-unused: 0.158% (pure !)
521 data-unused: 0.158% (rust !)
521 data-unused: 0.158% (rust !)
522 data-unused: 0.000% (no-pure no-rust !)
522 data-unused: 0.000% (no-pure no-rust !)
523
523
524 Another process does not see the pending nodemap content during run.
524 Another process does not see the pending nodemap content during run.
525
525
526 $ echo qpoasp > a
526 $ echo qpoasp > a
527 $ hg ci -m a2 \
527 $ hg ci -m a2 \
528 > --config "hooks.pretxnclose=sh \"$RUNTESTDIR/testlib/wait-on-file\" 20 sync-repo-read sync-txn-pending" \
528 > --config "hooks.pretxnclose=sh \"$RUNTESTDIR/testlib/wait-on-file\" 20 sync-repo-read sync-txn-pending" \
529 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
529 > --config "hooks.txnclose=touch sync-txn-close" > output.txt 2>&1 &
530
530
531 (read the repository while the commit transaction is pending)
531 (read the repository while the commit transaction is pending)
532
532
533 $ sh "$RUNTESTDIR/testlib/wait-on-file" 20 sync-txn-pending && \
533 $ sh "$RUNTESTDIR/testlib/wait-on-file" 20 sync-txn-pending && \
534 > hg debugnodemap --metadata && \
534 > hg debugnodemap --metadata && \
535 > sh "$RUNTESTDIR/testlib/wait-on-file" 20 sync-txn-close sync-repo-read
535 > sh "$RUNTESTDIR/testlib/wait-on-file" 20 sync-txn-close sync-repo-read
536 uid: ???????? (glob)
536 uid: ???????? (glob)
537 tip-rev: 5004
537 tip-rev: 5004
538 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
538 tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
539 data-length: 121280 (pure !)
539 data-length: 121280 (pure !)
540 data-length: 121280 (rust !)
540 data-length: 121280 (rust !)
541 data-length: 121088 (no-pure no-rust !)
541 data-length: 121088 (no-pure no-rust !)
542 data-unused: 192 (pure !)
542 data-unused: 192 (pure !)
543 data-unused: 192 (rust !)
543 data-unused: 192 (rust !)
544 data-unused: 0 (no-pure no-rust !)
544 data-unused: 0 (no-pure no-rust !)
545 data-unused: 0.158% (pure !)
545 data-unused: 0.158% (pure !)
546 data-unused: 0.158% (rust !)
546 data-unused: 0.158% (rust !)
547 data-unused: 0.000% (no-pure no-rust !)
547 data-unused: 0.000% (no-pure no-rust !)
548 $ hg debugnodemap --metadata
548 $ hg debugnodemap --metadata
549 uid: ???????? (glob)
549 uid: ???????? (glob)
550 tip-rev: 5005
550 tip-rev: 5005
551 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
551 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
552 data-length: 121536 (pure !)
552 data-length: 121536 (pure !)
553 data-length: 121536 (rust !)
553 data-length: 121536 (rust !)
554 data-length: 121088 (no-pure no-rust !)
554 data-length: 121088 (no-pure no-rust !)
555 data-unused: 448 (pure !)
555 data-unused: 448 (pure !)
556 data-unused: 448 (rust !)
556 data-unused: 448 (rust !)
557 data-unused: 0 (no-pure no-rust !)
557 data-unused: 0 (no-pure no-rust !)
558 data-unused: 0.369% (pure !)
558 data-unused: 0.369% (pure !)
559 data-unused: 0.369% (rust !)
559 data-unused: 0.369% (rust !)
560 data-unused: 0.000% (no-pure no-rust !)
560 data-unused: 0.000% (no-pure no-rust !)
561
561
562 $ cat output.txt
562 $ cat output.txt
563
563
564 Check that a failing transaction will properly revert the data
564 Check that a failing transaction will properly revert the data
565
565
566 $ echo plakfe > a
566 $ echo plakfe > a
567 $ f --size --sha256 .hg/store/00changelog-*.nd
567 $ f --size --sha256 .hg/store/00changelog-*.nd
568 .hg/store/00changelog-????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
568 .hg/store/00changelog-????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
569 .hg/store/00changelog-????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
569 .hg/store/00changelog-????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
570 .hg/store/00changelog-????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
570 .hg/store/00changelog-????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
571 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
571 $ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
572 transaction abort!
572 transaction abort!
573 rollback completed
573 rollback completed
574 abort: This is a late abort
574 abort: This is a late abort
575 [255]
575 [255]
576 $ hg debugnodemap --metadata
576 $ hg debugnodemap --metadata
577 uid: ???????? (glob)
577 uid: ???????? (glob)
578 tip-rev: 5005
578 tip-rev: 5005
579 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
579 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
580 data-length: 121536 (pure !)
580 data-length: 121536 (pure !)
581 data-length: 121536 (rust !)
581 data-length: 121536 (rust !)
582 data-length: 121088 (no-pure no-rust !)
582 data-length: 121088 (no-pure no-rust !)
583 data-unused: 448 (pure !)
583 data-unused: 448 (pure !)
584 data-unused: 448 (rust !)
584 data-unused: 448 (rust !)
585 data-unused: 0 (no-pure no-rust !)
585 data-unused: 0 (no-pure no-rust !)
586 data-unused: 0.369% (pure !)
586 data-unused: 0.369% (pure !)
587 data-unused: 0.369% (rust !)
587 data-unused: 0.369% (rust !)
588 data-unused: 0.000% (no-pure no-rust !)
588 data-unused: 0.000% (no-pure no-rust !)
589 $ f --size --sha256 .hg/store/00changelog-*.nd
589 $ f --size --sha256 .hg/store/00changelog-*.nd
590 .hg/store/00changelog-????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
590 .hg/store/00changelog-????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
591 .hg/store/00changelog-????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
591 .hg/store/00changelog-????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
592 .hg/store/00changelog-????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
592 .hg/store/00changelog-????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
593
593
594 Check that removing content does not confuse the nodemap
594 Check that removing content does not confuse the nodemap
595 --------------------------------------------------------
595 --------------------------------------------------------
596
596
597 removing data with rollback
597 removing data with rollback
598
598
599 $ echo aso > a
599 $ echo aso > a
600 $ hg ci -m a4
600 $ hg ci -m a4
601 $ hg rollback
601 $ hg rollback
602 repository tip rolled back to revision 5005 (undo commit)
602 repository tip rolled back to revision 5005 (undo commit)
603 working directory now based on revision 5005
603 working directory now based on revision 5005
604 $ hg id -r .
604 $ hg id -r .
605 90d5d3ba2fc4 tip
605 90d5d3ba2fc4 tip
606
606
607 removing data with strip
607 removing data with strip
608
608
609 $ echo aso > a
609 $ echo aso > a
610 $ hg ci -m a4
610 $ hg ci -m a4
611 $ hg --config extensions.strip= strip -r . --no-backup
611 $ hg --config extensions.strip= strip -r . --no-backup
612 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
612 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
613 $ hg id -r . --traceback
613 $ hg id -r . --traceback
614 90d5d3ba2fc4 tip
614 90d5d3ba2fc4 tip
615
615
616 (be a good citizen and regenerate the nodemap)
616 (be a good citizen and regenerate the nodemap)
617 $ hg debugupdatecaches
617 $ hg debugupdatecaches
618 $ hg debugnodemap --metadata
618 $ hg debugnodemap --metadata
619 uid: * (glob)
619 uid: * (glob)
620 tip-rev: 5005
620 tip-rev: 5005
621 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
621 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
622 data-length: 121088
622 data-length: 121088
623 data-unused: 0
623 data-unused: 0
624 data-unused: 0.000%
624 data-unused: 0.000%
625
625
626 Check race condition when multiple process write new data to the repository
626 Check race condition when multiple process write new data to the repository
627 ---------------------------------------------------------------------------
627 ---------------------------------------------------------------------------
628
628
629 In this test, we check that two writers touching the repositories will not
629 In this test, we check that two writers touching the repositories will not
630 overwrite each other data. This test is prompted by the existent of issue6554.
630 overwrite each other data. This test is prompted by the existent of issue6554.
631 Where a writer ended up using and outdated docket to update the repository. See
631 Where a writer ended up using and outdated docket to update the repository. See
632 the dedicated extension for details on the race windows and read/write schedule
632 the dedicated extension for details on the race windows and read/write schedule
633 necessary to end up in this situation: testlib/persistent-nodemap-race-ext.py
633 necessary to end up in this situation: testlib/persistent-nodemap-race-ext.py
634
634
635 The issue was initially observed on a server with a high push trafic, but it
635 The issue was initially observed on a server with a high push trafic, but it
636 can be reproduced using a share and two commiting process which seems simpler.
636 can be reproduced using a share and two commiting process which seems simpler.
637
637
638 The test is Rust only as the other implementation does not use the same
638 The test is Rust only as the other implementation does not use the same
639 read/write patterns.
639 read/write patterns.
640
640
641 $ cd ..
641 $ cd ..
642
642
643 #if rust
643 #if rust
644
644
645 $ cp -R test-repo race-repo
645 $ cp -R test-repo race-repo
646 $ hg share race-repo ./other-wc --config format.use-share-safe=yes
646 $ hg share race-repo ./other-wc --config format.use-share-safe=yes
647 updating working directory
647 updating working directory
648 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
648 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
649 $ hg debugformat -R ./race-repo | egrep 'share-safe|persistent-nodemap'
649 $ hg debugformat -R ./race-repo | egrep 'share-safe|persistent-nodemap'
650 share-safe: yes
650 share-safe: yes
651 persistent-nodemap: yes
651 persistent-nodemap: yes
652 $ hg debugformat -R ./other-wc/ | egrep 'share-safe|persistent-nodemap'
652 $ hg debugformat -R ./other-wc/ | egrep 'share-safe|persistent-nodemap'
653 share-safe: yes
653 share-safe: yes
654 persistent-nodemap: yes
654 persistent-nodemap: yes
655 $ hg -R ./other-wc update 'min(head())'
655 $ hg -R ./other-wc update 'min(head())'
656 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
656 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
657 $ hg -R ./race-repo debugnodemap --metadata
657 $ hg -R ./race-repo debugnodemap --metadata
658 uid: 43c37dde
658 uid: 43c37dde
659 tip-rev: 5005
659 tip-rev: 5005
660 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
660 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
661 data-length: 121088
661 data-length: 121088
662 data-unused: 0
662 data-unused: 0
663 data-unused: 0.000%
663 data-unused: 0.000%
664 $ hg -R ./race-repo log -G -r 'head()'
664 $ hg -R ./race-repo log -G -r 'head()'
665 @ changeset: 5005:90d5d3ba2fc4
665 @ changeset: 5005:90d5d3ba2fc4
666 | tag: tip
666 | tag: tip
667 ~ user: test
667 ~ user: test
668 date: Thu Jan 01 00:00:00 1970 +0000
668 date: Thu Jan 01 00:00:00 1970 +0000
669 summary: a2
669 summary: a2
670
670
671 o changeset: 5001:16395c3cf7e2
671 o changeset: 5001:16395c3cf7e2
672 | user: test
672 | user: test
673 ~ date: Thu Jan 01 00:00:00 1970 +0000
673 ~ date: Thu Jan 01 00:00:00 1970 +0000
674 summary: foo
674 summary: foo
675
675
676 $ hg -R ./other-wc log -G -r 'head()'
676 $ hg -R ./other-wc log -G -r 'head()'
677 o changeset: 5005:90d5d3ba2fc4
677 o changeset: 5005:90d5d3ba2fc4
678 | tag: tip
678 | tag: tip
679 ~ user: test
679 ~ user: test
680 date: Thu Jan 01 00:00:00 1970 +0000
680 date: Thu Jan 01 00:00:00 1970 +0000
681 summary: a2
681 summary: a2
682
682
683 @ changeset: 5001:16395c3cf7e2
683 @ changeset: 5001:16395c3cf7e2
684 | user: test
684 | user: test
685 ~ date: Thu Jan 01 00:00:00 1970 +0000
685 ~ date: Thu Jan 01 00:00:00 1970 +0000
686 summary: foo
686 summary: foo
687
687
688 $ echo left-side-race > race-repo/left-side-race
688 $ echo left-side-race > race-repo/left-side-race
689 $ hg -R ./race-repo/ add race-repo/left-side-race
689 $ hg -R ./race-repo/ add race-repo/left-side-race
690
690
691 $ echo right-side-race > ./other-wc/right-side-race
691 $ echo right-side-race > ./other-wc/right-side-race
692 $ hg -R ./other-wc/ add ./other-wc/right-side-race
692 $ hg -R ./other-wc/ add ./other-wc/right-side-race
693
693
694 $ mkdir sync-files
694 $ mkdir sync-files
695 $ mkdir outputs
695 $ mkdir outputs
696 $ (
696 $ (
697 > hg -R ./race-repo/ commit -m left-side-commit \
697 > hg -R ./race-repo/ commit -m left-side-commit \
698 > --config "extensions.race=${RUNTESTDIR}/testlib/persistent-nodemap-race-ext.py" \
698 > --config "extensions.race=${RUNTESTDIR}/testlib/persistent-nodemap-race-ext.py" \
699 > --config 'devel.nodemap-race.role=left';
699 > --config 'devel.nodemap-race.role=left';
700 > touch sync-files/left-done
700 > touch sync-files/left-done
701 > ) > outputs/left.txt 2>&1 &
701 > ) > outputs/left.txt 2>&1 &
702 $ (
702 $ (
703 > hg -R ./other-wc/ commit -m right-side-commit \
703 > hg -R ./other-wc/ commit -m right-side-commit \
704 > --config "extensions.race=${RUNTESTDIR}/testlib/persistent-nodemap-race-ext.py" \
704 > --config "extensions.race=${RUNTESTDIR}/testlib/persistent-nodemap-race-ext.py" \
705 > --config 'devel.nodemap-race.role=right';
705 > --config 'devel.nodemap-race.role=right';
706 > touch sync-files/right-done
706 > touch sync-files/right-done
707 > ) > outputs/right.txt 2>&1 &
707 > ) > outputs/right.txt 2>&1 &
708 $ (
708 $ (
709 > hg -R ./race-repo/ check-nodemap-race \
709 > hg -R ./race-repo/ check-nodemap-race \
710 > --config "extensions.race=${RUNTESTDIR}/testlib/persistent-nodemap-race-ext.py" \
710 > --config "extensions.race=${RUNTESTDIR}/testlib/persistent-nodemap-race-ext.py" \
711 > --config 'devel.nodemap-race.role=reader';
711 > --config 'devel.nodemap-race.role=reader';
712 > touch sync-files/reader-done
712 > touch sync-files/reader-done
713 > ) > outputs/reader.txt 2>&1 &
713 > ) > outputs/reader.txt 2>&1 &
714 $ sh "$RUNTESTDIR"/testlib/wait-on-file 10 sync-files/left-done
714 $ sh "$RUNTESTDIR"/testlib/wait-on-file 10 sync-files/left-done
715 $ cat outputs/left.txt
715 $ cat outputs/left.txt
716 docket-details:
716 docket-details:
717 uid: 43c37dde
717 uid: 43c37dde
718 actual-tip: 5005
718 actual-tip: 5005
719 tip-rev: 5005
719 tip-rev: 5005
720 data-length: 121088
720 data-length: 121088
721 nodemap-race: left side locked and ready to commit
721 nodemap-race: left side locked and ready to commit
722 docket-details:
722 docket-details:
723 uid: 43c37dde
723 uid: 43c37dde
724 actual-tip: 5005
724 actual-tip: 5005
725 tip-rev: 5005
725 tip-rev: 5005
726 data-length: 121088
726 data-length: 121088
727 finalized changelog write
727 finalized changelog write
728 persisting changelog nodemap
728 persisting changelog nodemap
729 new data start at 121088
729 new data start at 121088
730 persisted changelog nodemap
730 persisted changelog nodemap
731 docket-details:
731 docket-details:
732 uid: 43c37dde
732 uid: 43c37dde
733 actual-tip: 5006
733 actual-tip: 5006
734 tip-rev: 5006
734 tip-rev: 5006
735 data-length: 121280
735 data-length: 121280
736 $ sh "$RUNTESTDIR"/testlib/wait-on-file 10 sync-files/right-done
736 $ sh "$RUNTESTDIR"/testlib/wait-on-file 10 sync-files/right-done
737 $ cat outputs/right.txt
737 $ cat outputs/right.txt
738 nodemap-race: right side start of the locking sequence
738 nodemap-race: right side start of the locking sequence
739 nodemap-race: right side reading changelog
739 nodemap-race: right side reading changelog
740 nodemap-race: right side reading of changelog is done
740 nodemap-race: right side reading of changelog is done
741 docket-details:
741 docket-details:
742 uid: 43c37dde
742 uid: 43c37dde
743 actual-tip: 5006
743 actual-tip: 5006
744 tip-rev: 5005
744 tip-rev: 5005
745 data-length: 121088
745 data-length: 121088
746 nodemap-race: right side ready to wait for the lock
746 nodemap-race: right side ready to wait for the lock
747 nodemap-race: right side locked and ready to commit
747 nodemap-race: right side locked and ready to commit
748 docket-details:
748 docket-details:
749 uid: 43c37dde
749 uid: 43c37dde
750 actual-tip: 5006
750 actual-tip: 5006
751 tip-rev: 5005
751 tip-rev: 5006
752 data-length: 121088
752 data-length: 121280
753 right ready to write, waiting for reader
753 right ready to write, waiting for reader
754 right proceeding with writing its changelog index and nodemap
754 right proceeding with writing its changelog index and nodemap
755 finalized changelog write
755 finalized changelog write
756 persisting changelog nodemap
756 persisting changelog nodemap
757 new data start at 121088
757 new data start at 121280
758 persisted changelog nodemap
758 persisted changelog nodemap
759 docket-details:
759 docket-details:
760 uid: 43c37dde
760 uid: 43c37dde
761 actual-tip: 5007
761 actual-tip: 5007
762 tip-rev: 5007
762 tip-rev: 5007
763 data-length: 121472
763 data-length: 121536
764 $ sh "$RUNTESTDIR"/testlib/wait-on-file 10 sync-files/reader-done
764 $ sh "$RUNTESTDIR"/testlib/wait-on-file 10 sync-files/reader-done
765 $ cat outputs/reader.txt
765 $ cat outputs/reader.txt
766 reader: reading changelog
766 reader: reading changelog
767 reader ready to read the changelog, waiting for right
767 reader ready to read the changelog, waiting for right
768 reader: nodemap docket read
768 reader: nodemap docket read
769 record-data-length: 121280
769 record-data-length: 121280
770 actual-data-length: 121280
770 actual-data-length: 121280
771 file-actual-length: 121472
771 file-actual-length: 121536
772 reader: changelog read
772 reader: changelog read
773 docket-details:
773 docket-details:
774 uid: 43c37dde
774 uid: 43c37dde
775 actual-tip: 5006
775 actual-tip: 5006
776 tip-rev: 5006
776 tip-rev: 5006
777 data-length: 121280
777 data-length: 121280
778 tip-rev: 5006
778 tip-rev: 5006
779 tip-node: 492901161367
779 tip-node: 492901161367
780 node-rev: 5006
780 node-rev: 5006
781 error while checking revision: 18 (known-bad-output !)
782 Inconsistency: Revision 5007 found in nodemap is not in revlog indexi (known-bad-output !)
783
781
784 $ hg -R ./race-repo log -G -r 'head()'
782 $ hg -R ./race-repo log -G -r 'head()'
785 o changeset: 5007:ac4a2abde241
783 o changeset: 5007:ac4a2abde241
786 | tag: tip
784 | tag: tip
787 ~ parent: 5001:16395c3cf7e2
785 ~ parent: 5001:16395c3cf7e2
788 user: test
786 user: test
789 date: Thu Jan 01 00:00:00 1970 +0000
787 date: Thu Jan 01 00:00:00 1970 +0000
790 summary: right-side-commit
788 summary: right-side-commit
791
789
792 @ changeset: 5006:492901161367
790 @ changeset: 5006:492901161367
793 | user: test
791 | user: test
794 ~ date: Thu Jan 01 00:00:00 1970 +0000
792 ~ date: Thu Jan 01 00:00:00 1970 +0000
795 summary: left-side-commit
793 summary: left-side-commit
796
794
797 $ hg -R ./other-wc log -G -r 'head()'
795 $ hg -R ./other-wc log -G -r 'head()'
798 @ changeset: 5007:ac4a2abde241
796 @ changeset: 5007:ac4a2abde241
799 | tag: tip
797 | tag: tip
800 ~ parent: 5001:16395c3cf7e2
798 ~ parent: 5001:16395c3cf7e2
801 user: test
799 user: test
802 date: Thu Jan 01 00:00:00 1970 +0000
800 date: Thu Jan 01 00:00:00 1970 +0000
803 summary: right-side-commit
801 summary: right-side-commit
804
802
805 o changeset: 5006:492901161367
803 o changeset: 5006:492901161367
806 | user: test
804 | user: test
807 ~ date: Thu Jan 01 00:00:00 1970 +0000
805 ~ date: Thu Jan 01 00:00:00 1970 +0000
808 summary: left-side-commit
806 summary: left-side-commit
809
807
810 #endif
808 #endif
811
809
812 Test upgrade / downgrade
810 Test upgrade / downgrade
813 ========================
811 ========================
814
812
815 $ cd ./test-repo/
813 $ cd ./test-repo/
816
814
817 downgrading
815 downgrading
818
816
819 $ cat << EOF >> .hg/hgrc
817 $ cat << EOF >> .hg/hgrc
820 > [format]
818 > [format]
821 > use-persistent-nodemap=no
819 > use-persistent-nodemap=no
822 > EOF
820 > EOF
823 $ hg debugformat -v
821 $ hg debugformat -v
824 format-variant repo config default
822 format-variant repo config default
825 fncache: yes yes yes
823 fncache: yes yes yes
826 dirstate-v2: no no no
824 dirstate-v2: no no no
827 dotencode: yes yes yes
825 dotencode: yes yes yes
828 generaldelta: yes yes yes
826 generaldelta: yes yes yes
829 share-safe: yes yes no
827 share-safe: yes yes no
830 sparserevlog: yes yes yes
828 sparserevlog: yes yes yes
831 persistent-nodemap: yes no no
829 persistent-nodemap: yes no no
832 copies-sdc: no no no
830 copies-sdc: no no no
833 revlog-v2: no no no
831 revlog-v2: no no no
834 changelog-v2: no no no
832 changelog-v2: no no no
835 plain-cl-delta: yes yes yes
833 plain-cl-delta: yes yes yes
836 compression: zlib zlib zlib (no-zstd !)
834 compression: zlib zlib zlib (no-zstd !)
837 compression: zstd zstd zstd (zstd !)
835 compression: zstd zstd zstd (zstd !)
838 compression-level: default default default
836 compression-level: default default default
839 $ hg debugupgraderepo --run --no-backup --quiet
837 $ hg debugupgraderepo --run --no-backup --quiet
840 upgrade will perform the following actions:
838 upgrade will perform the following actions:
841
839
842 requirements
840 requirements
843 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
841 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
844 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
842 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
845 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
843 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
846 removed: persistent-nodemap
844 removed: persistent-nodemap
847
845
848 processed revlogs:
846 processed revlogs:
849 - all-filelogs
847 - all-filelogs
850 - changelog
848 - changelog
851 - manifest
849 - manifest
852
850
853 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
851 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
854 00changelog-*.nd (glob)
852 00changelog-*.nd (glob)
855 00manifest-*.nd (glob)
853 00manifest-*.nd (glob)
856 undo.backup.00changelog.n
854 undo.backup.00changelog.n
857 undo.backup.00manifest.n
855 undo.backup.00manifest.n
858 $ hg debugnodemap --metadata
856 $ hg debugnodemap --metadata
859
857
860
858
861 upgrading
859 upgrading
862
860
863 $ cat << EOF >> .hg/hgrc
861 $ cat << EOF >> .hg/hgrc
864 > [format]
862 > [format]
865 > use-persistent-nodemap=yes
863 > use-persistent-nodemap=yes
866 > EOF
864 > EOF
867 $ hg debugformat -v
865 $ hg debugformat -v
868 format-variant repo config default
866 format-variant repo config default
869 fncache: yes yes yes
867 fncache: yes yes yes
870 dirstate-v2: no no no
868 dirstate-v2: no no no
871 dotencode: yes yes yes
869 dotencode: yes yes yes
872 generaldelta: yes yes yes
870 generaldelta: yes yes yes
873 share-safe: yes yes no
871 share-safe: yes yes no
874 sparserevlog: yes yes yes
872 sparserevlog: yes yes yes
875 persistent-nodemap: no yes no
873 persistent-nodemap: no yes no
876 copies-sdc: no no no
874 copies-sdc: no no no
877 revlog-v2: no no no
875 revlog-v2: no no no
878 changelog-v2: no no no
876 changelog-v2: no no no
879 plain-cl-delta: yes yes yes
877 plain-cl-delta: yes yes yes
880 compression: zlib zlib zlib (no-zstd !)
878 compression: zlib zlib zlib (no-zstd !)
881 compression: zstd zstd zstd (zstd !)
879 compression: zstd zstd zstd (zstd !)
882 compression-level: default default default
880 compression-level: default default default
883 $ hg debugupgraderepo --run --no-backup --quiet
881 $ hg debugupgraderepo --run --no-backup --quiet
884 upgrade will perform the following actions:
882 upgrade will perform the following actions:
885
883
886 requirements
884 requirements
887 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
885 preserved: dotencode, fncache, generaldelta, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
888 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
886 preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
889 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
887 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
890 added: persistent-nodemap
888 added: persistent-nodemap
891
889
892 processed revlogs:
890 processed revlogs:
893 - all-filelogs
891 - all-filelogs
894 - changelog
892 - changelog
895 - manifest
893 - manifest
896
894
897 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
895 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
898 00changelog-*.nd (glob)
896 00changelog-*.nd (glob)
899 00changelog.n
897 00changelog.n
900 00manifest-*.nd (glob)
898 00manifest-*.nd (glob)
901 00manifest.n
899 00manifest.n
902 undo.backup.00changelog.n
900 undo.backup.00changelog.n
903 undo.backup.00manifest.n
901 undo.backup.00manifest.n
904
902
905 $ hg debugnodemap --metadata
903 $ hg debugnodemap --metadata
906 uid: * (glob)
904 uid: * (glob)
907 tip-rev: 5005
905 tip-rev: 5005
908 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
906 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
909 data-length: 121088
907 data-length: 121088
910 data-unused: 0
908 data-unused: 0
911 data-unused: 0.000%
909 data-unused: 0.000%
912
910
913 Running unrelated upgrade
911 Running unrelated upgrade
914
912
915 $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all
913 $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all
916 upgrade will perform the following actions:
914 upgrade will perform the following actions:
917
915
918 requirements
916 requirements
919 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
917 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, share-safe, sparserevlog, store (no-zstd no-dirstate-v2 !)
920 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
918 preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd no-dirstate-v2 !)
921 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
919 preserved: dotencode, exp-dirstate-v2, fncache, generaldelta, persistent-nodemap, revlog-compression-zstd, revlogv1, share-safe, sparserevlog, store (zstd dirstate-v2 !)
922
920
923 optimisations: re-delta-all
921 optimisations: re-delta-all
924
922
925 processed revlogs:
923 processed revlogs:
926 - all-filelogs
924 - all-filelogs
927 - changelog
925 - changelog
928 - manifest
926 - manifest
929
927
930 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
928 $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
931 00changelog-*.nd (glob)
929 00changelog-*.nd (glob)
932 00changelog.n
930 00changelog.n
933 00manifest-*.nd (glob)
931 00manifest-*.nd (glob)
934 00manifest.n
932 00manifest.n
935
933
936 $ hg debugnodemap --metadata
934 $ hg debugnodemap --metadata
937 uid: * (glob)
935 uid: * (glob)
938 tip-rev: 5005
936 tip-rev: 5005
939 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
937 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
940 data-length: 121088
938 data-length: 121088
941 data-unused: 0
939 data-unused: 0
942 data-unused: 0.000%
940 data-unused: 0.000%
943
941
944 Persistent nodemap and local/streaming clone
942 Persistent nodemap and local/streaming clone
945 ============================================
943 ============================================
946
944
947 $ cd ..
945 $ cd ..
948
946
949 standard clone
947 standard clone
950 --------------
948 --------------
951
949
952 The persistent nodemap should exist after a streaming clone
950 The persistent nodemap should exist after a streaming clone
953
951
954 $ hg clone --pull --quiet -U test-repo standard-clone
952 $ hg clone --pull --quiet -U test-repo standard-clone
955 $ ls -1 standard-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
953 $ ls -1 standard-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
956 00changelog-*.nd (glob)
954 00changelog-*.nd (glob)
957 00changelog.n
955 00changelog.n
958 00manifest-*.nd (glob)
956 00manifest-*.nd (glob)
959 00manifest.n
957 00manifest.n
960 $ hg -R standard-clone debugnodemap --metadata
958 $ hg -R standard-clone debugnodemap --metadata
961 uid: * (glob)
959 uid: * (glob)
962 tip-rev: 5005
960 tip-rev: 5005
963 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
961 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
964 data-length: 121088
962 data-length: 121088
965 data-unused: 0
963 data-unused: 0
966 data-unused: 0.000%
964 data-unused: 0.000%
967
965
968
966
969 local clone
967 local clone
970 ------------
968 ------------
971
969
972 The persistent nodemap should exist after a streaming clone
970 The persistent nodemap should exist after a streaming clone
973
971
974 $ hg clone -U test-repo local-clone
972 $ hg clone -U test-repo local-clone
975 $ ls -1 local-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
973 $ ls -1 local-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
976 00changelog-*.nd (glob)
974 00changelog-*.nd (glob)
977 00changelog.n
975 00changelog.n
978 00manifest-*.nd (glob)
976 00manifest-*.nd (glob)
979 00manifest.n
977 00manifest.n
980 $ hg -R local-clone debugnodemap --metadata
978 $ hg -R local-clone debugnodemap --metadata
981 uid: * (glob)
979 uid: * (glob)
982 tip-rev: 5005
980 tip-rev: 5005
983 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
981 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
984 data-length: 121088
982 data-length: 121088
985 data-unused: 0
983 data-unused: 0
986 data-unused: 0.000%
984 data-unused: 0.000%
987
985
988 Test various corruption case
986 Test various corruption case
989 ============================
987 ============================
990
988
991 Missing datafile
989 Missing datafile
992 ----------------
990 ----------------
993
991
994 Test behavior with a missing datafile
992 Test behavior with a missing datafile
995
993
996 $ hg clone --quiet --pull test-repo corruption-test-repo
994 $ hg clone --quiet --pull test-repo corruption-test-repo
997 $ ls -1 corruption-test-repo/.hg/store/00changelog*
995 $ ls -1 corruption-test-repo/.hg/store/00changelog*
998 corruption-test-repo/.hg/store/00changelog-*.nd (glob)
996 corruption-test-repo/.hg/store/00changelog-*.nd (glob)
999 corruption-test-repo/.hg/store/00changelog.d
997 corruption-test-repo/.hg/store/00changelog.d
1000 corruption-test-repo/.hg/store/00changelog.i
998 corruption-test-repo/.hg/store/00changelog.i
1001 corruption-test-repo/.hg/store/00changelog.n
999 corruption-test-repo/.hg/store/00changelog.n
1002 $ rm corruption-test-repo/.hg/store/00changelog*.nd
1000 $ rm corruption-test-repo/.hg/store/00changelog*.nd
1003 $ hg log -R corruption-test-repo -r .
1001 $ hg log -R corruption-test-repo -r .
1004 changeset: 5005:90d5d3ba2fc4
1002 changeset: 5005:90d5d3ba2fc4
1005 tag: tip
1003 tag: tip
1006 user: test
1004 user: test
1007 date: Thu Jan 01 00:00:00 1970 +0000
1005 date: Thu Jan 01 00:00:00 1970 +0000
1008 summary: a2
1006 summary: a2
1009
1007
1010 $ ls -1 corruption-test-repo/.hg/store/00changelog*
1008 $ ls -1 corruption-test-repo/.hg/store/00changelog*
1011 corruption-test-repo/.hg/store/00changelog.d
1009 corruption-test-repo/.hg/store/00changelog.d
1012 corruption-test-repo/.hg/store/00changelog.i
1010 corruption-test-repo/.hg/store/00changelog.i
1013 corruption-test-repo/.hg/store/00changelog.n
1011 corruption-test-repo/.hg/store/00changelog.n
1014
1012
1015 Truncated data file
1013 Truncated data file
1016 -------------------
1014 -------------------
1017
1015
1018 Test behavior with a too short datafile
1016 Test behavior with a too short datafile
1019
1017
1020 rebuild the missing data
1018 rebuild the missing data
1021 $ hg -R corruption-test-repo debugupdatecache
1019 $ hg -R corruption-test-repo debugupdatecache
1022 $ ls -1 corruption-test-repo/.hg/store/00changelog*
1020 $ ls -1 corruption-test-repo/.hg/store/00changelog*
1023 corruption-test-repo/.hg/store/00changelog-*.nd (glob)
1021 corruption-test-repo/.hg/store/00changelog-*.nd (glob)
1024 corruption-test-repo/.hg/store/00changelog.d
1022 corruption-test-repo/.hg/store/00changelog.d
1025 corruption-test-repo/.hg/store/00changelog.i
1023 corruption-test-repo/.hg/store/00changelog.i
1026 corruption-test-repo/.hg/store/00changelog.n
1024 corruption-test-repo/.hg/store/00changelog.n
1027
1025
1028 truncate the file
1026 truncate the file
1029
1027
1030 $ datafilepath=`ls corruption-test-repo/.hg/store/00changelog*.nd`
1028 $ datafilepath=`ls corruption-test-repo/.hg/store/00changelog*.nd`
1031 $ f -s $datafilepath
1029 $ f -s $datafilepath
1032 corruption-test-repo/.hg/store/00changelog-*.nd: size=121088 (glob)
1030 corruption-test-repo/.hg/store/00changelog-*.nd: size=121088 (glob)
1033 $ dd if=$datafilepath bs=1000 count=10 of=$datafilepath-tmp status=noxfer
1031 $ dd if=$datafilepath bs=1000 count=10 of=$datafilepath-tmp status=noxfer
1034 10+0 records in
1032 10+0 records in
1035 10+0 records out
1033 10+0 records out
1036 $ mv $datafilepath-tmp $datafilepath
1034 $ mv $datafilepath-tmp $datafilepath
1037 $ f -s $datafilepath
1035 $ f -s $datafilepath
1038 corruption-test-repo/.hg/store/00changelog-*.nd: size=10000 (glob)
1036 corruption-test-repo/.hg/store/00changelog-*.nd: size=10000 (glob)
1039
1037
1040 Check that Mercurial reaction to this event
1038 Check that Mercurial reaction to this event
1041
1039
1042 $ hg -R corruption-test-repo log -r . --traceback
1040 $ hg -R corruption-test-repo log -r . --traceback
1043 changeset: 5005:90d5d3ba2fc4
1041 changeset: 5005:90d5d3ba2fc4
1044 tag: tip
1042 tag: tip
1045 user: test
1043 user: test
1046 date: Thu Jan 01 00:00:00 1970 +0000
1044 date: Thu Jan 01 00:00:00 1970 +0000
1047 summary: a2
1045 summary: a2
1048
1046
1049
1047
1050
1048
1051 stream clone
1049 stream clone
1052 ============
1050 ============
1053
1051
1054 The persistent nodemap should exist after a streaming clone
1052 The persistent nodemap should exist after a streaming clone
1055
1053
1056 Simple case
1054 Simple case
1057 -----------
1055 -----------
1058
1056
1059 No race condition
1057 No race condition
1060
1058
1061 $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
1059 $ hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone --debug | egrep '00(changelog|manifest)'
1062 adding [s] 00manifest.n (62 bytes)
1060 adding [s] 00manifest.n (62 bytes)
1063 adding [s] 00manifest-*.nd (118 KB) (glob)
1061 adding [s] 00manifest-*.nd (118 KB) (glob)
1064 adding [s] 00changelog.n (62 bytes)
1062 adding [s] 00changelog.n (62 bytes)
1065 adding [s] 00changelog-*.nd (118 KB) (glob)
1063 adding [s] 00changelog-*.nd (118 KB) (glob)
1066 adding [s] 00manifest.d (452 KB) (no-zstd !)
1064 adding [s] 00manifest.d (452 KB) (no-zstd !)
1067 adding [s] 00manifest.d (491 KB) (zstd !)
1065 adding [s] 00manifest.d (491 KB) (zstd !)
1068 adding [s] 00changelog.d (360 KB) (no-zstd !)
1066 adding [s] 00changelog.d (360 KB) (no-zstd !)
1069 adding [s] 00changelog.d (368 KB) (zstd !)
1067 adding [s] 00changelog.d (368 KB) (zstd !)
1070 adding [s] 00manifest.i (313 KB)
1068 adding [s] 00manifest.i (313 KB)
1071 adding [s] 00changelog.i (313 KB)
1069 adding [s] 00changelog.i (313 KB)
1072 $ ls -1 stream-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
1070 $ ls -1 stream-clone/.hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
1073 00changelog-*.nd (glob)
1071 00changelog-*.nd (glob)
1074 00changelog.n
1072 00changelog.n
1075 00manifest-*.nd (glob)
1073 00manifest-*.nd (glob)
1076 00manifest.n
1074 00manifest.n
1077 $ hg -R stream-clone debugnodemap --metadata
1075 $ hg -R stream-clone debugnodemap --metadata
1078 uid: * (glob)
1076 uid: * (glob)
1079 tip-rev: 5005
1077 tip-rev: 5005
1080 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
1078 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
1081 data-length: 121088
1079 data-length: 121088
1082 data-unused: 0
1080 data-unused: 0
1083 data-unused: 0.000%
1081 data-unused: 0.000%
1084
1082
1085 new data appened
1083 new data appened
1086 -----------------
1084 -----------------
1087
1085
1088 Other commit happening on the server during the stream clone
1086 Other commit happening on the server during the stream clone
1089
1087
1090 setup the step-by-step stream cloning
1088 setup the step-by-step stream cloning
1091
1089
1092 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
1090 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
1093 $ export HG_TEST_STREAM_WALKED_FILE_1
1091 $ export HG_TEST_STREAM_WALKED_FILE_1
1094 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
1092 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
1095 $ export HG_TEST_STREAM_WALKED_FILE_2
1093 $ export HG_TEST_STREAM_WALKED_FILE_2
1096 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
1094 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
1097 $ export HG_TEST_STREAM_WALKED_FILE_3
1095 $ export HG_TEST_STREAM_WALKED_FILE_3
1098 $ cat << EOF >> test-repo/.hg/hgrc
1096 $ cat << EOF >> test-repo/.hg/hgrc
1099 > [extensions]
1097 > [extensions]
1100 > steps=$RUNTESTDIR/testlib/ext-stream-clone-steps.py
1098 > steps=$RUNTESTDIR/testlib/ext-stream-clone-steps.py
1101 > EOF
1099 > EOF
1102
1100
1103 Check and record file state beforehand
1101 Check and record file state beforehand
1104
1102
1105 $ f --size test-repo/.hg/store/00changelog*
1103 $ f --size test-repo/.hg/store/00changelog*
1106 test-repo/.hg/store/00changelog-*.nd: size=121088 (glob)
1104 test-repo/.hg/store/00changelog-*.nd: size=121088 (glob)
1107 test-repo/.hg/store/00changelog.d: size=376891 (zstd !)
1105 test-repo/.hg/store/00changelog.d: size=376891 (zstd !)
1108 test-repo/.hg/store/00changelog.d: size=368890 (no-zstd !)
1106 test-repo/.hg/store/00changelog.d: size=368890 (no-zstd !)
1109 test-repo/.hg/store/00changelog.i: size=320384
1107 test-repo/.hg/store/00changelog.i: size=320384
1110 test-repo/.hg/store/00changelog.n: size=62
1108 test-repo/.hg/store/00changelog.n: size=62
1111 $ hg -R test-repo debugnodemap --metadata | tee server-metadata.txt
1109 $ hg -R test-repo debugnodemap --metadata | tee server-metadata.txt
1112 uid: * (glob)
1110 uid: * (glob)
1113 tip-rev: 5005
1111 tip-rev: 5005
1114 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
1112 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
1115 data-length: 121088
1113 data-length: 121088
1116 data-unused: 0
1114 data-unused: 0
1117 data-unused: 0.000%
1115 data-unused: 0.000%
1118
1116
1119 Prepare a commit
1117 Prepare a commit
1120
1118
1121 $ echo foo >> test-repo/foo
1119 $ echo foo >> test-repo/foo
1122 $ hg -R test-repo/ add test-repo/foo
1120 $ hg -R test-repo/ add test-repo/foo
1123
1121
1124 Do a mix of clone and commit at the same time so that the file listed on disk differ at actual transfer time.
1122 Do a mix of clone and commit at the same time so that the file listed on disk differ at actual transfer time.
1125
1123
1126 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1124 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-1 --debug 2>> clone-output | egrep '00(changelog|manifest)' >> clone-output; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1127 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1125 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1128 $ hg -R test-repo/ commit -m foo
1126 $ hg -R test-repo/ commit -m foo
1129 $ touch $HG_TEST_STREAM_WALKED_FILE_2
1127 $ touch $HG_TEST_STREAM_WALKED_FILE_2
1130 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
1128 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
1131 $ cat clone-output
1129 $ cat clone-output
1132 adding [s] 00manifest.n (62 bytes)
1130 adding [s] 00manifest.n (62 bytes)
1133 adding [s] 00manifest-*.nd (118 KB) (glob)
1131 adding [s] 00manifest-*.nd (118 KB) (glob)
1134 adding [s] 00changelog.n (62 bytes)
1132 adding [s] 00changelog.n (62 bytes)
1135 adding [s] 00changelog-*.nd (118 KB) (glob)
1133 adding [s] 00changelog-*.nd (118 KB) (glob)
1136 adding [s] 00manifest.d (452 KB) (no-zstd !)
1134 adding [s] 00manifest.d (452 KB) (no-zstd !)
1137 adding [s] 00manifest.d (491 KB) (zstd !)
1135 adding [s] 00manifest.d (491 KB) (zstd !)
1138 adding [s] 00changelog.d (360 KB) (no-zstd !)
1136 adding [s] 00changelog.d (360 KB) (no-zstd !)
1139 adding [s] 00changelog.d (368 KB) (zstd !)
1137 adding [s] 00changelog.d (368 KB) (zstd !)
1140 adding [s] 00manifest.i (313 KB)
1138 adding [s] 00manifest.i (313 KB)
1141 adding [s] 00changelog.i (313 KB)
1139 adding [s] 00changelog.i (313 KB)
1142
1140
1143 Check the result state
1141 Check the result state
1144
1142
1145 $ f --size stream-clone-race-1/.hg/store/00changelog*
1143 $ f --size stream-clone-race-1/.hg/store/00changelog*
1146 stream-clone-race-1/.hg/store/00changelog-*.nd: size=121088 (glob)
1144 stream-clone-race-1/.hg/store/00changelog-*.nd: size=121088 (glob)
1147 stream-clone-race-1/.hg/store/00changelog.d: size=368890 (no-zstd !)
1145 stream-clone-race-1/.hg/store/00changelog.d: size=368890 (no-zstd !)
1148 stream-clone-race-1/.hg/store/00changelog.d: size=376891 (zstd !)
1146 stream-clone-race-1/.hg/store/00changelog.d: size=376891 (zstd !)
1149 stream-clone-race-1/.hg/store/00changelog.i: size=320384
1147 stream-clone-race-1/.hg/store/00changelog.i: size=320384
1150 stream-clone-race-1/.hg/store/00changelog.n: size=62
1148 stream-clone-race-1/.hg/store/00changelog.n: size=62
1151
1149
1152 $ hg -R stream-clone-race-1 debugnodemap --metadata | tee client-metadata.txt
1150 $ hg -R stream-clone-race-1 debugnodemap --metadata | tee client-metadata.txt
1153 uid: * (glob)
1151 uid: * (glob)
1154 tip-rev: 5005
1152 tip-rev: 5005
1155 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
1153 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
1156 data-length: 121088
1154 data-length: 121088
1157 data-unused: 0
1155 data-unused: 0
1158 data-unused: 0.000%
1156 data-unused: 0.000%
1159
1157
1160 We get a usable nodemap, so no rewrite would be needed and the metadata should be identical
1158 We get a usable nodemap, so no rewrite would be needed and the metadata should be identical
1161 (ie: the following diff should be empty)
1159 (ie: the following diff should be empty)
1162
1160
1163 This isn't the case for the `no-rust` `no-pure` implementation as it use a very minimal nodemap implementation that unconditionnaly rewrite the nodemap "all the time".
1161 This isn't the case for the `no-rust` `no-pure` implementation as it use a very minimal nodemap implementation that unconditionnaly rewrite the nodemap "all the time".
1164
1162
1165 #if no-rust no-pure
1163 #if no-rust no-pure
1166 $ diff -u server-metadata.txt client-metadata.txt
1164 $ diff -u server-metadata.txt client-metadata.txt
1167 --- server-metadata.txt * (glob)
1165 --- server-metadata.txt * (glob)
1168 +++ client-metadata.txt * (glob)
1166 +++ client-metadata.txt * (glob)
1169 @@ -1,4 +1,4 @@
1167 @@ -1,4 +1,4 @@
1170 -uid: * (glob)
1168 -uid: * (glob)
1171 +uid: * (glob)
1169 +uid: * (glob)
1172 tip-rev: 5005
1170 tip-rev: 5005
1173 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
1171 tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
1174 data-length: 121088
1172 data-length: 121088
1175 [1]
1173 [1]
1176 #else
1174 #else
1177 $ diff -u server-metadata.txt client-metadata.txt
1175 $ diff -u server-metadata.txt client-metadata.txt
1178 #endif
1176 #endif
1179
1177
1180
1178
1181 Clean up after the test.
1179 Clean up after the test.
1182
1180
1183 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_1"
1181 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_1"
1184 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_2"
1182 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_2"
1185 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_3"
1183 $ rm -f "$HG_TEST_STREAM_WALKED_FILE_3"
1186
1184
1187 full regeneration
1185 full regeneration
1188 -----------------
1186 -----------------
1189
1187
1190 A full nodemap is generated
1188 A full nodemap is generated
1191
1189
1192 (ideally this test would append enough data to make sure the nodemap data file
1190 (ideally this test would append enough data to make sure the nodemap data file
1193 get changed, however to make thing simpler we will force the regeneration for
1191 get changed, however to make thing simpler we will force the regeneration for
1194 this test.
1192 this test.
1195
1193
1196 Check the initial state
1194 Check the initial state
1197
1195
1198 $ f --size test-repo/.hg/store/00changelog*
1196 $ f --size test-repo/.hg/store/00changelog*
1199 test-repo/.hg/store/00changelog-*.nd: size=121344 (glob) (rust !)
1197 test-repo/.hg/store/00changelog-*.nd: size=121344 (glob) (rust !)
1200 test-repo/.hg/store/00changelog-*.nd: size=121344 (glob) (pure !)
1198 test-repo/.hg/store/00changelog-*.nd: size=121344 (glob) (pure !)
1201 test-repo/.hg/store/00changelog-*.nd: size=121152 (glob) (no-rust no-pure !)
1199 test-repo/.hg/store/00changelog-*.nd: size=121152 (glob) (no-rust no-pure !)
1202 test-repo/.hg/store/00changelog.d: size=376950 (zstd !)
1200 test-repo/.hg/store/00changelog.d: size=376950 (zstd !)
1203 test-repo/.hg/store/00changelog.d: size=368949 (no-zstd !)
1201 test-repo/.hg/store/00changelog.d: size=368949 (no-zstd !)
1204 test-repo/.hg/store/00changelog.i: size=320448
1202 test-repo/.hg/store/00changelog.i: size=320448
1205 test-repo/.hg/store/00changelog.n: size=62
1203 test-repo/.hg/store/00changelog.n: size=62
1206 $ hg -R test-repo debugnodemap --metadata | tee server-metadata-2.txt
1204 $ hg -R test-repo debugnodemap --metadata | tee server-metadata-2.txt
1207 uid: * (glob)
1205 uid: * (glob)
1208 tip-rev: 5006
1206 tip-rev: 5006
1209 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
1207 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
1210 data-length: 121344 (rust !)
1208 data-length: 121344 (rust !)
1211 data-length: 121344 (pure !)
1209 data-length: 121344 (pure !)
1212 data-length: 121152 (no-rust no-pure !)
1210 data-length: 121152 (no-rust no-pure !)
1213 data-unused: 192 (rust !)
1211 data-unused: 192 (rust !)
1214 data-unused: 192 (pure !)
1212 data-unused: 192 (pure !)
1215 data-unused: 0 (no-rust no-pure !)
1213 data-unused: 0 (no-rust no-pure !)
1216 data-unused: 0.158% (rust !)
1214 data-unused: 0.158% (rust !)
1217 data-unused: 0.158% (pure !)
1215 data-unused: 0.158% (pure !)
1218 data-unused: 0.000% (no-rust no-pure !)
1216 data-unused: 0.000% (no-rust no-pure !)
1219
1217
1220 Performe the mix of clone and full refresh of the nodemap, so that the files
1218 Performe the mix of clone and full refresh of the nodemap, so that the files
1221 (and filenames) are different between listing time and actual transfer time.
1219 (and filenames) are different between listing time and actual transfer time.
1222
1220
1223 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1221 $ (hg clone -U --stream --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/test-repo stream-clone-race-2 --debug 2>> clone-output-2 | egrep '00(changelog|manifest)' >> clone-output-2; touch $HG_TEST_STREAM_WALKED_FILE_3) &
1224 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1222 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
1225 $ rm test-repo/.hg/store/00changelog.n
1223 $ rm test-repo/.hg/store/00changelog.n
1226 $ rm test-repo/.hg/store/00changelog-*.nd
1224 $ rm test-repo/.hg/store/00changelog-*.nd
1227 $ hg -R test-repo/ debugupdatecache
1225 $ hg -R test-repo/ debugupdatecache
1228 $ touch $HG_TEST_STREAM_WALKED_FILE_2
1226 $ touch $HG_TEST_STREAM_WALKED_FILE_2
1229 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
1227 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
1230
1228
1231 (note: the stream clone code wronly pick the `undo.` files)
1229 (note: the stream clone code wronly pick the `undo.` files)
1232
1230
1233 $ cat clone-output-2
1231 $ cat clone-output-2
1234 adding [s] undo.backup.00manifest.n (62 bytes) (known-bad-output !)
1232 adding [s] undo.backup.00manifest.n (62 bytes) (known-bad-output !)
1235 adding [s] undo.backup.00changelog.n (62 bytes) (known-bad-output !)
1233 adding [s] undo.backup.00changelog.n (62 bytes) (known-bad-output !)
1236 adding [s] 00manifest.n (62 bytes)
1234 adding [s] 00manifest.n (62 bytes)
1237 adding [s] 00manifest-*.nd (118 KB) (glob)
1235 adding [s] 00manifest-*.nd (118 KB) (glob)
1238 adding [s] 00changelog.n (62 bytes)
1236 adding [s] 00changelog.n (62 bytes)
1239 adding [s] 00changelog-*.nd (118 KB) (glob)
1237 adding [s] 00changelog-*.nd (118 KB) (glob)
1240 adding [s] 00manifest.d (492 KB) (zstd !)
1238 adding [s] 00manifest.d (492 KB) (zstd !)
1241 adding [s] 00manifest.d (452 KB) (no-zstd !)
1239 adding [s] 00manifest.d (452 KB) (no-zstd !)
1242 adding [s] 00changelog.d (360 KB) (no-zstd !)
1240 adding [s] 00changelog.d (360 KB) (no-zstd !)
1243 adding [s] 00changelog.d (368 KB) (zstd !)
1241 adding [s] 00changelog.d (368 KB) (zstd !)
1244 adding [s] 00manifest.i (313 KB)
1242 adding [s] 00manifest.i (313 KB)
1245 adding [s] 00changelog.i (313 KB)
1243 adding [s] 00changelog.i (313 KB)
1246
1244
1247 Check the result.
1245 Check the result.
1248
1246
1249 $ f --size stream-clone-race-2/.hg/store/00changelog*
1247 $ f --size stream-clone-race-2/.hg/store/00changelog*
1250 stream-clone-race-2/.hg/store/00changelog-*.nd: size=121344 (glob) (rust !)
1248 stream-clone-race-2/.hg/store/00changelog-*.nd: size=121344 (glob) (rust !)
1251 stream-clone-race-2/.hg/store/00changelog-*.nd: size=121344 (glob) (pure !)
1249 stream-clone-race-2/.hg/store/00changelog-*.nd: size=121344 (glob) (pure !)
1252 stream-clone-race-2/.hg/store/00changelog-*.nd: size=121152 (glob) (no-rust no-pure !)
1250 stream-clone-race-2/.hg/store/00changelog-*.nd: size=121152 (glob) (no-rust no-pure !)
1253 stream-clone-race-2/.hg/store/00changelog.d: size=376950 (zstd !)
1251 stream-clone-race-2/.hg/store/00changelog.d: size=376950 (zstd !)
1254 stream-clone-race-2/.hg/store/00changelog.d: size=368949 (no-zstd !)
1252 stream-clone-race-2/.hg/store/00changelog.d: size=368949 (no-zstd !)
1255 stream-clone-race-2/.hg/store/00changelog.i: size=320448
1253 stream-clone-race-2/.hg/store/00changelog.i: size=320448
1256 stream-clone-race-2/.hg/store/00changelog.n: size=62
1254 stream-clone-race-2/.hg/store/00changelog.n: size=62
1257
1255
1258 $ hg -R stream-clone-race-2 debugnodemap --metadata | tee client-metadata-2.txt
1256 $ hg -R stream-clone-race-2 debugnodemap --metadata | tee client-metadata-2.txt
1259 uid: * (glob)
1257 uid: * (glob)
1260 tip-rev: 5006
1258 tip-rev: 5006
1261 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
1259 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
1262 data-length: 121344 (rust !)
1260 data-length: 121344 (rust !)
1263 data-unused: 192 (rust !)
1261 data-unused: 192 (rust !)
1264 data-unused: 0.158% (rust !)
1262 data-unused: 0.158% (rust !)
1265 data-length: 121152 (no-rust no-pure !)
1263 data-length: 121152 (no-rust no-pure !)
1266 data-unused: 0 (no-rust no-pure !)
1264 data-unused: 0 (no-rust no-pure !)
1267 data-unused: 0.000% (no-rust no-pure !)
1265 data-unused: 0.000% (no-rust no-pure !)
1268 data-length: 121344 (pure !)
1266 data-length: 121344 (pure !)
1269 data-unused: 192 (pure !)
1267 data-unused: 192 (pure !)
1270 data-unused: 0.158% (pure !)
1268 data-unused: 0.158% (pure !)
1271
1269
1272 We get a usable nodemap, so no rewrite would be needed and the metadata should be identical
1270 We get a usable nodemap, so no rewrite would be needed and the metadata should be identical
1273 (ie: the following diff should be empty)
1271 (ie: the following diff should be empty)
1274
1272
1275 This isn't the case for the `no-rust` `no-pure` implementation as it use a very minimal nodemap implementation that unconditionnaly rewrite the nodemap "all the time".
1273 This isn't the case for the `no-rust` `no-pure` implementation as it use a very minimal nodemap implementation that unconditionnaly rewrite the nodemap "all the time".
1276
1274
1277 #if no-rust no-pure
1275 #if no-rust no-pure
1278 $ diff -u server-metadata-2.txt client-metadata-2.txt
1276 $ diff -u server-metadata-2.txt client-metadata-2.txt
1279 --- server-metadata-2.txt * (glob)
1277 --- server-metadata-2.txt * (glob)
1280 +++ client-metadata-2.txt * (glob)
1278 +++ client-metadata-2.txt * (glob)
1281 @@ -1,4 +1,4 @@
1279 @@ -1,4 +1,4 @@
1282 -uid: * (glob)
1280 -uid: * (glob)
1283 +uid: * (glob)
1281 +uid: * (glob)
1284 tip-rev: 5006
1282 tip-rev: 5006
1285 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
1283 tip-node: ed2ec1eef9aa2a0ec5057c51483bc148d03e810b
1286 data-length: 121152
1284 data-length: 121152
1287 [1]
1285 [1]
1288 #else
1286 #else
1289 $ diff -u server-metadata-2.txt client-metadata-2.txt
1287 $ diff -u server-metadata-2.txt client-metadata-2.txt
1290 #endif
1288 #endif
1291
1289
1292 Clean up after the test
1290 Clean up after the test
1293
1291
1294 $ rm -f $HG_TEST_STREAM_WALKED_FILE_1
1292 $ rm -f $HG_TEST_STREAM_WALKED_FILE_1
1295 $ rm -f $HG_TEST_STREAM_WALKED_FILE_2
1293 $ rm -f $HG_TEST_STREAM_WALKED_FILE_2
1296 $ rm -f $HG_TEST_STREAM_WALKED_FILE_3
1294 $ rm -f $HG_TEST_STREAM_WALKED_FILE_3
1297
1295
General Comments 0
You need to be logged in to leave comments. Login now