##// END OF EJS Templates
share: make it possible to control the working copy format variant...
marmoute -
r49297:bf2738e0 default
parent child Browse files
Show More
@@ -1,3898 +1,3918
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 #
3 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
5 #
5 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
7
8
8 from __future__ import absolute_import
9 from __future__ import absolute_import
9
10
10 import errno
11 import errno
11 import functools
12 import functools
12 import os
13 import os
13 import random
14 import random
14 import sys
15 import sys
15 import time
16 import time
16 import weakref
17 import weakref
17
18
18 from .i18n import _
19 from .i18n import _
19 from .node import (
20 from .node import (
20 bin,
21 bin,
21 hex,
22 hex,
22 nullrev,
23 nullrev,
23 sha1nodeconstants,
24 sha1nodeconstants,
24 short,
25 short,
25 )
26 )
26 from .pycompat import (
27 from .pycompat import (
27 delattr,
28 delattr,
28 getattr,
29 getattr,
29 )
30 )
30 from . import (
31 from . import (
31 bookmarks,
32 bookmarks,
32 branchmap,
33 branchmap,
33 bundle2,
34 bundle2,
34 bundlecaches,
35 bundlecaches,
35 changegroup,
36 changegroup,
36 color,
37 color,
37 commit,
38 commit,
38 context,
39 context,
39 dirstate,
40 dirstate,
40 dirstateguard,
41 dirstateguard,
41 discovery,
42 discovery,
42 encoding,
43 encoding,
43 error,
44 error,
44 exchange,
45 exchange,
45 extensions,
46 extensions,
46 filelog,
47 filelog,
47 hook,
48 hook,
48 lock as lockmod,
49 lock as lockmod,
49 match as matchmod,
50 match as matchmod,
50 mergestate as mergestatemod,
51 mergestate as mergestatemod,
51 mergeutil,
52 mergeutil,
52 namespaces,
53 namespaces,
53 narrowspec,
54 narrowspec,
54 obsolete,
55 obsolete,
55 pathutil,
56 pathutil,
56 phases,
57 phases,
57 pushkey,
58 pushkey,
58 pycompat,
59 pycompat,
59 rcutil,
60 rcutil,
60 repoview,
61 repoview,
61 requirements as requirementsmod,
62 requirements as requirementsmod,
62 revlog,
63 revlog,
63 revset,
64 revset,
64 revsetlang,
65 revsetlang,
65 scmutil,
66 scmutil,
66 sparse,
67 sparse,
67 store as storemod,
68 store as storemod,
68 subrepoutil,
69 subrepoutil,
69 tags as tagsmod,
70 tags as tagsmod,
70 transaction,
71 transaction,
71 txnutil,
72 txnutil,
72 util,
73 util,
73 vfs as vfsmod,
74 vfs as vfsmod,
74 wireprototypes,
75 wireprototypes,
75 )
76 )
76
77
77 from .interfaces import (
78 from .interfaces import (
78 repository,
79 repository,
79 util as interfaceutil,
80 util as interfaceutil,
80 )
81 )
81
82
82 from .utils import (
83 from .utils import (
83 hashutil,
84 hashutil,
84 procutil,
85 procutil,
85 stringutil,
86 stringutil,
86 urlutil,
87 urlutil,
87 )
88 )
88
89
89 from .revlogutils import (
90 from .revlogutils import (
90 concurrency_checker as revlogchecker,
91 concurrency_checker as revlogchecker,
91 constants as revlogconst,
92 constants as revlogconst,
92 sidedata as sidedatamod,
93 sidedata as sidedatamod,
93 )
94 )
94
95
95 release = lockmod.release
96 release = lockmod.release
96 urlerr = util.urlerr
97 urlerr = util.urlerr
97 urlreq = util.urlreq
98 urlreq = util.urlreq
98
99
99 # set of (path, vfs-location) tuples. vfs-location is:
100 # set of (path, vfs-location) tuples. vfs-location is:
100 # - 'plain for vfs relative paths
101 # - 'plain for vfs relative paths
101 # - '' for svfs relative paths
102 # - '' for svfs relative paths
102 _cachedfiles = set()
103 _cachedfiles = set()
103
104
104
105
105 class _basefilecache(scmutil.filecache):
106 class _basefilecache(scmutil.filecache):
106 """All filecache usage on repo are done for logic that should be unfiltered"""
107 """All filecache usage on repo are done for logic that should be unfiltered"""
107
108
108 def __get__(self, repo, type=None):
109 def __get__(self, repo, type=None):
109 if repo is None:
110 if repo is None:
110 return self
111 return self
111 # proxy to unfiltered __dict__ since filtered repo has no entry
112 # proxy to unfiltered __dict__ since filtered repo has no entry
112 unfi = repo.unfiltered()
113 unfi = repo.unfiltered()
113 try:
114 try:
114 return unfi.__dict__[self.sname]
115 return unfi.__dict__[self.sname]
115 except KeyError:
116 except KeyError:
116 pass
117 pass
117 return super(_basefilecache, self).__get__(unfi, type)
118 return super(_basefilecache, self).__get__(unfi, type)
118
119
119 def set(self, repo, value):
120 def set(self, repo, value):
120 return super(_basefilecache, self).set(repo.unfiltered(), value)
121 return super(_basefilecache, self).set(repo.unfiltered(), value)
121
122
122
123
123 class repofilecache(_basefilecache):
124 class repofilecache(_basefilecache):
124 """filecache for files in .hg but outside of .hg/store"""
125 """filecache for files in .hg but outside of .hg/store"""
125
126
126 def __init__(self, *paths):
127 def __init__(self, *paths):
127 super(repofilecache, self).__init__(*paths)
128 super(repofilecache, self).__init__(*paths)
128 for path in paths:
129 for path in paths:
129 _cachedfiles.add((path, b'plain'))
130 _cachedfiles.add((path, b'plain'))
130
131
131 def join(self, obj, fname):
132 def join(self, obj, fname):
132 return obj.vfs.join(fname)
133 return obj.vfs.join(fname)
133
134
134
135
135 class storecache(_basefilecache):
136 class storecache(_basefilecache):
136 """filecache for files in the store"""
137 """filecache for files in the store"""
137
138
138 def __init__(self, *paths):
139 def __init__(self, *paths):
139 super(storecache, self).__init__(*paths)
140 super(storecache, self).__init__(*paths)
140 for path in paths:
141 for path in paths:
141 _cachedfiles.add((path, b''))
142 _cachedfiles.add((path, b''))
142
143
143 def join(self, obj, fname):
144 def join(self, obj, fname):
144 return obj.sjoin(fname)
145 return obj.sjoin(fname)
145
146
146
147
147 class changelogcache(storecache):
148 class changelogcache(storecache):
148 """filecache for the changelog"""
149 """filecache for the changelog"""
149
150
150 def __init__(self):
151 def __init__(self):
151 super(changelogcache, self).__init__()
152 super(changelogcache, self).__init__()
152 _cachedfiles.add((b'00changelog.i', b''))
153 _cachedfiles.add((b'00changelog.i', b''))
153 _cachedfiles.add((b'00changelog.n', b''))
154 _cachedfiles.add((b'00changelog.n', b''))
154
155
155 def tracked_paths(self, obj):
156 def tracked_paths(self, obj):
156 paths = [self.join(obj, b'00changelog.i')]
157 paths = [self.join(obj, b'00changelog.i')]
157 if obj.store.opener.options.get(b'persistent-nodemap', False):
158 if obj.store.opener.options.get(b'persistent-nodemap', False):
158 paths.append(self.join(obj, b'00changelog.n'))
159 paths.append(self.join(obj, b'00changelog.n'))
159 return paths
160 return paths
160
161
161
162
162 class manifestlogcache(storecache):
163 class manifestlogcache(storecache):
163 """filecache for the manifestlog"""
164 """filecache for the manifestlog"""
164
165
165 def __init__(self):
166 def __init__(self):
166 super(manifestlogcache, self).__init__()
167 super(manifestlogcache, self).__init__()
167 _cachedfiles.add((b'00manifest.i', b''))
168 _cachedfiles.add((b'00manifest.i', b''))
168 _cachedfiles.add((b'00manifest.n', b''))
169 _cachedfiles.add((b'00manifest.n', b''))
169
170
170 def tracked_paths(self, obj):
171 def tracked_paths(self, obj):
171 paths = [self.join(obj, b'00manifest.i')]
172 paths = [self.join(obj, b'00manifest.i')]
172 if obj.store.opener.options.get(b'persistent-nodemap', False):
173 if obj.store.opener.options.get(b'persistent-nodemap', False):
173 paths.append(self.join(obj, b'00manifest.n'))
174 paths.append(self.join(obj, b'00manifest.n'))
174 return paths
175 return paths
175
176
176
177
177 class mixedrepostorecache(_basefilecache):
178 class mixedrepostorecache(_basefilecache):
178 """filecache for a mix files in .hg/store and outside"""
179 """filecache for a mix files in .hg/store and outside"""
179
180
180 def __init__(self, *pathsandlocations):
181 def __init__(self, *pathsandlocations):
181 # scmutil.filecache only uses the path for passing back into our
182 # scmutil.filecache only uses the path for passing back into our
182 # join(), so we can safely pass a list of paths and locations
183 # join(), so we can safely pass a list of paths and locations
183 super(mixedrepostorecache, self).__init__(*pathsandlocations)
184 super(mixedrepostorecache, self).__init__(*pathsandlocations)
184 _cachedfiles.update(pathsandlocations)
185 _cachedfiles.update(pathsandlocations)
185
186
186 def join(self, obj, fnameandlocation):
187 def join(self, obj, fnameandlocation):
187 fname, location = fnameandlocation
188 fname, location = fnameandlocation
188 if location == b'plain':
189 if location == b'plain':
189 return obj.vfs.join(fname)
190 return obj.vfs.join(fname)
190 else:
191 else:
191 if location != b'':
192 if location != b'':
192 raise error.ProgrammingError(
193 raise error.ProgrammingError(
193 b'unexpected location: %s' % location
194 b'unexpected location: %s' % location
194 )
195 )
195 return obj.sjoin(fname)
196 return obj.sjoin(fname)
196
197
197
198
198 def isfilecached(repo, name):
199 def isfilecached(repo, name):
199 """check if a repo has already cached "name" filecache-ed property
200 """check if a repo has already cached "name" filecache-ed property
200
201
201 This returns (cachedobj-or-None, iscached) tuple.
202 This returns (cachedobj-or-None, iscached) tuple.
202 """
203 """
203 cacheentry = repo.unfiltered()._filecache.get(name, None)
204 cacheentry = repo.unfiltered()._filecache.get(name, None)
204 if not cacheentry:
205 if not cacheentry:
205 return None, False
206 return None, False
206 return cacheentry.obj, True
207 return cacheentry.obj, True
207
208
208
209
209 class unfilteredpropertycache(util.propertycache):
210 class unfilteredpropertycache(util.propertycache):
210 """propertycache that apply to unfiltered repo only"""
211 """propertycache that apply to unfiltered repo only"""
211
212
212 def __get__(self, repo, type=None):
213 def __get__(self, repo, type=None):
213 unfi = repo.unfiltered()
214 unfi = repo.unfiltered()
214 if unfi is repo:
215 if unfi is repo:
215 return super(unfilteredpropertycache, self).__get__(unfi)
216 return super(unfilteredpropertycache, self).__get__(unfi)
216 return getattr(unfi, self.name)
217 return getattr(unfi, self.name)
217
218
218
219
219 class filteredpropertycache(util.propertycache):
220 class filteredpropertycache(util.propertycache):
220 """propertycache that must take filtering in account"""
221 """propertycache that must take filtering in account"""
221
222
222 def cachevalue(self, obj, value):
223 def cachevalue(self, obj, value):
223 object.__setattr__(obj, self.name, value)
224 object.__setattr__(obj, self.name, value)
224
225
225
226
226 def hasunfilteredcache(repo, name):
227 def hasunfilteredcache(repo, name):
227 """check if a repo has an unfilteredpropertycache value for <name>"""
228 """check if a repo has an unfilteredpropertycache value for <name>"""
228 return name in vars(repo.unfiltered())
229 return name in vars(repo.unfiltered())
229
230
230
231
231 def unfilteredmethod(orig):
232 def unfilteredmethod(orig):
232 """decorate method that always need to be run on unfiltered version"""
233 """decorate method that always need to be run on unfiltered version"""
233
234
234 @functools.wraps(orig)
235 @functools.wraps(orig)
235 def wrapper(repo, *args, **kwargs):
236 def wrapper(repo, *args, **kwargs):
236 return orig(repo.unfiltered(), *args, **kwargs)
237 return orig(repo.unfiltered(), *args, **kwargs)
237
238
238 return wrapper
239 return wrapper
239
240
240
241
241 moderncaps = {
242 moderncaps = {
242 b'lookup',
243 b'lookup',
243 b'branchmap',
244 b'branchmap',
244 b'pushkey',
245 b'pushkey',
245 b'known',
246 b'known',
246 b'getbundle',
247 b'getbundle',
247 b'unbundle',
248 b'unbundle',
248 }
249 }
249 legacycaps = moderncaps.union({b'changegroupsubset'})
250 legacycaps = moderncaps.union({b'changegroupsubset'})
250
251
251
252
252 @interfaceutil.implementer(repository.ipeercommandexecutor)
253 @interfaceutil.implementer(repository.ipeercommandexecutor)
253 class localcommandexecutor(object):
254 class localcommandexecutor(object):
254 def __init__(self, peer):
255 def __init__(self, peer):
255 self._peer = peer
256 self._peer = peer
256 self._sent = False
257 self._sent = False
257 self._closed = False
258 self._closed = False
258
259
259 def __enter__(self):
260 def __enter__(self):
260 return self
261 return self
261
262
262 def __exit__(self, exctype, excvalue, exctb):
263 def __exit__(self, exctype, excvalue, exctb):
263 self.close()
264 self.close()
264
265
265 def callcommand(self, command, args):
266 def callcommand(self, command, args):
266 if self._sent:
267 if self._sent:
267 raise error.ProgrammingError(
268 raise error.ProgrammingError(
268 b'callcommand() cannot be used after sendcommands()'
269 b'callcommand() cannot be used after sendcommands()'
269 )
270 )
270
271
271 if self._closed:
272 if self._closed:
272 raise error.ProgrammingError(
273 raise error.ProgrammingError(
273 b'callcommand() cannot be used after close()'
274 b'callcommand() cannot be used after close()'
274 )
275 )
275
276
276 # We don't need to support anything fancy. Just call the named
277 # We don't need to support anything fancy. Just call the named
277 # method on the peer and return a resolved future.
278 # method on the peer and return a resolved future.
278 fn = getattr(self._peer, pycompat.sysstr(command))
279 fn = getattr(self._peer, pycompat.sysstr(command))
279
280
280 f = pycompat.futures.Future()
281 f = pycompat.futures.Future()
281
282
282 try:
283 try:
283 result = fn(**pycompat.strkwargs(args))
284 result = fn(**pycompat.strkwargs(args))
284 except Exception:
285 except Exception:
285 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
286 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
286 else:
287 else:
287 f.set_result(result)
288 f.set_result(result)
288
289
289 return f
290 return f
290
291
291 def sendcommands(self):
292 def sendcommands(self):
292 self._sent = True
293 self._sent = True
293
294
294 def close(self):
295 def close(self):
295 self._closed = True
296 self._closed = True
296
297
297
298
298 @interfaceutil.implementer(repository.ipeercommands)
299 @interfaceutil.implementer(repository.ipeercommands)
299 class localpeer(repository.peer):
300 class localpeer(repository.peer):
300 '''peer for a local repo; reflects only the most recent API'''
301 '''peer for a local repo; reflects only the most recent API'''
301
302
302 def __init__(self, repo, caps=None):
303 def __init__(self, repo, caps=None):
303 super(localpeer, self).__init__()
304 super(localpeer, self).__init__()
304
305
305 if caps is None:
306 if caps is None:
306 caps = moderncaps.copy()
307 caps = moderncaps.copy()
307 self._repo = repo.filtered(b'served')
308 self._repo = repo.filtered(b'served')
308 self.ui = repo.ui
309 self.ui = repo.ui
309
310
310 if repo._wanted_sidedata:
311 if repo._wanted_sidedata:
311 formatted = bundle2.format_remote_wanted_sidedata(repo)
312 formatted = bundle2.format_remote_wanted_sidedata(repo)
312 caps.add(b'exp-wanted-sidedata=' + formatted)
313 caps.add(b'exp-wanted-sidedata=' + formatted)
313
314
314 self._caps = repo._restrictcapabilities(caps)
315 self._caps = repo._restrictcapabilities(caps)
315
316
316 # Begin of _basepeer interface.
317 # Begin of _basepeer interface.
317
318
318 def url(self):
319 def url(self):
319 return self._repo.url()
320 return self._repo.url()
320
321
321 def local(self):
322 def local(self):
322 return self._repo
323 return self._repo
323
324
324 def peer(self):
325 def peer(self):
325 return self
326 return self
326
327
327 def canpush(self):
328 def canpush(self):
328 return True
329 return True
329
330
330 def close(self):
331 def close(self):
331 self._repo.close()
332 self._repo.close()
332
333
333 # End of _basepeer interface.
334 # End of _basepeer interface.
334
335
335 # Begin of _basewirecommands interface.
336 # Begin of _basewirecommands interface.
336
337
337 def branchmap(self):
338 def branchmap(self):
338 return self._repo.branchmap()
339 return self._repo.branchmap()
339
340
340 def capabilities(self):
341 def capabilities(self):
341 return self._caps
342 return self._caps
342
343
343 def clonebundles(self):
344 def clonebundles(self):
344 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
345 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
345
346
346 def debugwireargs(self, one, two, three=None, four=None, five=None):
347 def debugwireargs(self, one, two, three=None, four=None, five=None):
347 """Used to test argument passing over the wire"""
348 """Used to test argument passing over the wire"""
348 return b"%s %s %s %s %s" % (
349 return b"%s %s %s %s %s" % (
349 one,
350 one,
350 two,
351 two,
351 pycompat.bytestr(three),
352 pycompat.bytestr(three),
352 pycompat.bytestr(four),
353 pycompat.bytestr(four),
353 pycompat.bytestr(five),
354 pycompat.bytestr(five),
354 )
355 )
355
356
356 def getbundle(
357 def getbundle(
357 self,
358 self,
358 source,
359 source,
359 heads=None,
360 heads=None,
360 common=None,
361 common=None,
361 bundlecaps=None,
362 bundlecaps=None,
362 remote_sidedata=None,
363 remote_sidedata=None,
363 **kwargs
364 **kwargs
364 ):
365 ):
365 chunks = exchange.getbundlechunks(
366 chunks = exchange.getbundlechunks(
366 self._repo,
367 self._repo,
367 source,
368 source,
368 heads=heads,
369 heads=heads,
369 common=common,
370 common=common,
370 bundlecaps=bundlecaps,
371 bundlecaps=bundlecaps,
371 remote_sidedata=remote_sidedata,
372 remote_sidedata=remote_sidedata,
372 **kwargs
373 **kwargs
373 )[1]
374 )[1]
374 cb = util.chunkbuffer(chunks)
375 cb = util.chunkbuffer(chunks)
375
376
376 if exchange.bundle2requested(bundlecaps):
377 if exchange.bundle2requested(bundlecaps):
377 # When requesting a bundle2, getbundle returns a stream to make the
378 # When requesting a bundle2, getbundle returns a stream to make the
378 # wire level function happier. We need to build a proper object
379 # wire level function happier. We need to build a proper object
379 # from it in local peer.
380 # from it in local peer.
380 return bundle2.getunbundler(self.ui, cb)
381 return bundle2.getunbundler(self.ui, cb)
381 else:
382 else:
382 return changegroup.getunbundler(b'01', cb, None)
383 return changegroup.getunbundler(b'01', cb, None)
383
384
384 def heads(self):
385 def heads(self):
385 return self._repo.heads()
386 return self._repo.heads()
386
387
387 def known(self, nodes):
388 def known(self, nodes):
388 return self._repo.known(nodes)
389 return self._repo.known(nodes)
389
390
390 def listkeys(self, namespace):
391 def listkeys(self, namespace):
391 return self._repo.listkeys(namespace)
392 return self._repo.listkeys(namespace)
392
393
393 def lookup(self, key):
394 def lookup(self, key):
394 return self._repo.lookup(key)
395 return self._repo.lookup(key)
395
396
396 def pushkey(self, namespace, key, old, new):
397 def pushkey(self, namespace, key, old, new):
397 return self._repo.pushkey(namespace, key, old, new)
398 return self._repo.pushkey(namespace, key, old, new)
398
399
399 def stream_out(self):
400 def stream_out(self):
400 raise error.Abort(_(b'cannot perform stream clone against local peer'))
401 raise error.Abort(_(b'cannot perform stream clone against local peer'))
401
402
402 def unbundle(self, bundle, heads, url):
403 def unbundle(self, bundle, heads, url):
403 """apply a bundle on a repo
404 """apply a bundle on a repo
404
405
405 This function handles the repo locking itself."""
406 This function handles the repo locking itself."""
406 try:
407 try:
407 try:
408 try:
408 bundle = exchange.readbundle(self.ui, bundle, None)
409 bundle = exchange.readbundle(self.ui, bundle, None)
409 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
410 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
410 if util.safehasattr(ret, b'getchunks'):
411 if util.safehasattr(ret, b'getchunks'):
411 # This is a bundle20 object, turn it into an unbundler.
412 # This is a bundle20 object, turn it into an unbundler.
412 # This little dance should be dropped eventually when the
413 # This little dance should be dropped eventually when the
413 # API is finally improved.
414 # API is finally improved.
414 stream = util.chunkbuffer(ret.getchunks())
415 stream = util.chunkbuffer(ret.getchunks())
415 ret = bundle2.getunbundler(self.ui, stream)
416 ret = bundle2.getunbundler(self.ui, stream)
416 return ret
417 return ret
417 except Exception as exc:
418 except Exception as exc:
418 # If the exception contains output salvaged from a bundle2
419 # If the exception contains output salvaged from a bundle2
419 # reply, we need to make sure it is printed before continuing
420 # reply, we need to make sure it is printed before continuing
420 # to fail. So we build a bundle2 with such output and consume
421 # to fail. So we build a bundle2 with such output and consume
421 # it directly.
422 # it directly.
422 #
423 #
423 # This is not very elegant but allows a "simple" solution for
424 # This is not very elegant but allows a "simple" solution for
424 # issue4594
425 # issue4594
425 output = getattr(exc, '_bundle2salvagedoutput', ())
426 output = getattr(exc, '_bundle2salvagedoutput', ())
426 if output:
427 if output:
427 bundler = bundle2.bundle20(self._repo.ui)
428 bundler = bundle2.bundle20(self._repo.ui)
428 for out in output:
429 for out in output:
429 bundler.addpart(out)
430 bundler.addpart(out)
430 stream = util.chunkbuffer(bundler.getchunks())
431 stream = util.chunkbuffer(bundler.getchunks())
431 b = bundle2.getunbundler(self.ui, stream)
432 b = bundle2.getunbundler(self.ui, stream)
432 bundle2.processbundle(self._repo, b)
433 bundle2.processbundle(self._repo, b)
433 raise
434 raise
434 except error.PushRaced as exc:
435 except error.PushRaced as exc:
435 raise error.ResponseError(
436 raise error.ResponseError(
436 _(b'push failed:'), stringutil.forcebytestr(exc)
437 _(b'push failed:'), stringutil.forcebytestr(exc)
437 )
438 )
438
439
439 # End of _basewirecommands interface.
440 # End of _basewirecommands interface.
440
441
441 # Begin of peer interface.
442 # Begin of peer interface.
442
443
443 def commandexecutor(self):
444 def commandexecutor(self):
444 return localcommandexecutor(self)
445 return localcommandexecutor(self)
445
446
446 # End of peer interface.
447 # End of peer interface.
447
448
448
449
449 @interfaceutil.implementer(repository.ipeerlegacycommands)
450 @interfaceutil.implementer(repository.ipeerlegacycommands)
450 class locallegacypeer(localpeer):
451 class locallegacypeer(localpeer):
451 """peer extension which implements legacy methods too; used for tests with
452 """peer extension which implements legacy methods too; used for tests with
452 restricted capabilities"""
453 restricted capabilities"""
453
454
454 def __init__(self, repo):
455 def __init__(self, repo):
455 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
456 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
456
457
457 # Begin of baselegacywirecommands interface.
458 # Begin of baselegacywirecommands interface.
458
459
459 def between(self, pairs):
460 def between(self, pairs):
460 return self._repo.between(pairs)
461 return self._repo.between(pairs)
461
462
462 def branches(self, nodes):
463 def branches(self, nodes):
463 return self._repo.branches(nodes)
464 return self._repo.branches(nodes)
464
465
465 def changegroup(self, nodes, source):
466 def changegroup(self, nodes, source):
466 outgoing = discovery.outgoing(
467 outgoing = discovery.outgoing(
467 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
468 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
468 )
469 )
469 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
470 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
470
471
471 def changegroupsubset(self, bases, heads, source):
472 def changegroupsubset(self, bases, heads, source):
472 outgoing = discovery.outgoing(
473 outgoing = discovery.outgoing(
473 self._repo, missingroots=bases, ancestorsof=heads
474 self._repo, missingroots=bases, ancestorsof=heads
474 )
475 )
475 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
476 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
476
477
477 # End of baselegacywirecommands interface.
478 # End of baselegacywirecommands interface.
478
479
479
480
480 # Functions receiving (ui, features) that extensions can register to impact
481 # Functions receiving (ui, features) that extensions can register to impact
481 # the ability to load repositories with custom requirements. Only
482 # the ability to load repositories with custom requirements. Only
482 # functions defined in loaded extensions are called.
483 # functions defined in loaded extensions are called.
483 #
484 #
484 # The function receives a set of requirement strings that the repository
485 # The function receives a set of requirement strings that the repository
485 # is capable of opening. Functions will typically add elements to the
486 # is capable of opening. Functions will typically add elements to the
486 # set to reflect that the extension knows how to handle that requirements.
487 # set to reflect that the extension knows how to handle that requirements.
487 featuresetupfuncs = set()
488 featuresetupfuncs = set()
488
489
489
490
490 def _getsharedvfs(hgvfs, requirements):
491 def _getsharedvfs(hgvfs, requirements):
491 """returns the vfs object pointing to root of shared source
492 """returns the vfs object pointing to root of shared source
492 repo for a shared repository
493 repo for a shared repository
493
494
494 hgvfs is vfs pointing at .hg/ of current repo (shared one)
495 hgvfs is vfs pointing at .hg/ of current repo (shared one)
495 requirements is a set of requirements of current repo (shared one)
496 requirements is a set of requirements of current repo (shared one)
496 """
497 """
497 # The ``shared`` or ``relshared`` requirements indicate the
498 # The ``shared`` or ``relshared`` requirements indicate the
498 # store lives in the path contained in the ``.hg/sharedpath`` file.
499 # store lives in the path contained in the ``.hg/sharedpath`` file.
499 # This is an absolute path for ``shared`` and relative to
500 # This is an absolute path for ``shared`` and relative to
500 # ``.hg/`` for ``relshared``.
501 # ``.hg/`` for ``relshared``.
501 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
502 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
502 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
503 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
503 sharedpath = util.normpath(hgvfs.join(sharedpath))
504 sharedpath = util.normpath(hgvfs.join(sharedpath))
504
505
505 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
506 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
506
507
507 if not sharedvfs.exists():
508 if not sharedvfs.exists():
508 raise error.RepoError(
509 raise error.RepoError(
509 _(b'.hg/sharedpath points to nonexistent directory %s')
510 _(b'.hg/sharedpath points to nonexistent directory %s')
510 % sharedvfs.base
511 % sharedvfs.base
511 )
512 )
512 return sharedvfs
513 return sharedvfs
513
514
514
515
515 def _readrequires(vfs, allowmissing):
516 def _readrequires(vfs, allowmissing):
516 """reads the require file present at root of this vfs
517 """reads the require file present at root of this vfs
517 and return a set of requirements
518 and return a set of requirements
518
519
519 If allowmissing is True, we suppress ENOENT if raised"""
520 If allowmissing is True, we suppress ENOENT if raised"""
520 # requires file contains a newline-delimited list of
521 # requires file contains a newline-delimited list of
521 # features/capabilities the opener (us) must have in order to use
522 # features/capabilities the opener (us) must have in order to use
522 # the repository. This file was introduced in Mercurial 0.9.2,
523 # the repository. This file was introduced in Mercurial 0.9.2,
523 # which means very old repositories may not have one. We assume
524 # which means very old repositories may not have one. We assume
524 # a missing file translates to no requirements.
525 # a missing file translates to no requirements.
525 try:
526 try:
526 requirements = set(vfs.read(b'requires').splitlines())
527 requirements = set(vfs.read(b'requires').splitlines())
527 except IOError as e:
528 except IOError as e:
528 if not (allowmissing and e.errno == errno.ENOENT):
529 if not (allowmissing and e.errno == errno.ENOENT):
529 raise
530 raise
530 requirements = set()
531 requirements = set()
531 return requirements
532 return requirements
532
533
533
534
534 def makelocalrepository(baseui, path, intents=None):
535 def makelocalrepository(baseui, path, intents=None):
535 """Create a local repository object.
536 """Create a local repository object.
536
537
537 Given arguments needed to construct a local repository, this function
538 Given arguments needed to construct a local repository, this function
538 performs various early repository loading functionality (such as
539 performs various early repository loading functionality (such as
539 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
540 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
540 the repository can be opened, derives a type suitable for representing
541 the repository can be opened, derives a type suitable for representing
541 that repository, and returns an instance of it.
542 that repository, and returns an instance of it.
542
543
543 The returned object conforms to the ``repository.completelocalrepository``
544 The returned object conforms to the ``repository.completelocalrepository``
544 interface.
545 interface.
545
546
546 The repository type is derived by calling a series of factory functions
547 The repository type is derived by calling a series of factory functions
547 for each aspect/interface of the final repository. These are defined by
548 for each aspect/interface of the final repository. These are defined by
548 ``REPO_INTERFACES``.
549 ``REPO_INTERFACES``.
549
550
550 Each factory function is called to produce a type implementing a specific
551 Each factory function is called to produce a type implementing a specific
551 interface. The cumulative list of returned types will be combined into a
552 interface. The cumulative list of returned types will be combined into a
552 new type and that type will be instantiated to represent the local
553 new type and that type will be instantiated to represent the local
553 repository.
554 repository.
554
555
555 The factory functions each receive various state that may be consulted
556 The factory functions each receive various state that may be consulted
556 as part of deriving a type.
557 as part of deriving a type.
557
558
558 Extensions should wrap these factory functions to customize repository type
559 Extensions should wrap these factory functions to customize repository type
559 creation. Note that an extension's wrapped function may be called even if
560 creation. Note that an extension's wrapped function may be called even if
560 that extension is not loaded for the repo being constructed. Extensions
561 that extension is not loaded for the repo being constructed. Extensions
561 should check if their ``__name__`` appears in the
562 should check if their ``__name__`` appears in the
562 ``extensionmodulenames`` set passed to the factory function and no-op if
563 ``extensionmodulenames`` set passed to the factory function and no-op if
563 not.
564 not.
564 """
565 """
565 ui = baseui.copy()
566 ui = baseui.copy()
566 # Prevent copying repo configuration.
567 # Prevent copying repo configuration.
567 ui.copy = baseui.copy
568 ui.copy = baseui.copy
568
569
569 # Working directory VFS rooted at repository root.
570 # Working directory VFS rooted at repository root.
570 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
571 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
571
572
572 # Main VFS for .hg/ directory.
573 # Main VFS for .hg/ directory.
573 hgpath = wdirvfs.join(b'.hg')
574 hgpath = wdirvfs.join(b'.hg')
574 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
575 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
575 # Whether this repository is shared one or not
576 # Whether this repository is shared one or not
576 shared = False
577 shared = False
577 # If this repository is shared, vfs pointing to shared repo
578 # If this repository is shared, vfs pointing to shared repo
578 sharedvfs = None
579 sharedvfs = None
579
580
580 # The .hg/ path should exist and should be a directory. All other
581 # The .hg/ path should exist and should be a directory. All other
581 # cases are errors.
582 # cases are errors.
582 if not hgvfs.isdir():
583 if not hgvfs.isdir():
583 try:
584 try:
584 hgvfs.stat()
585 hgvfs.stat()
585 except OSError as e:
586 except OSError as e:
586 if e.errno != errno.ENOENT:
587 if e.errno != errno.ENOENT:
587 raise
588 raise
588 except ValueError as e:
589 except ValueError as e:
589 # Can be raised on Python 3.8 when path is invalid.
590 # Can be raised on Python 3.8 when path is invalid.
590 raise error.Abort(
591 raise error.Abort(
591 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
592 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
592 )
593 )
593
594
594 raise error.RepoError(_(b'repository %s not found') % path)
595 raise error.RepoError(_(b'repository %s not found') % path)
595
596
596 requirements = _readrequires(hgvfs, True)
597 requirements = _readrequires(hgvfs, True)
597 shared = (
598 shared = (
598 requirementsmod.SHARED_REQUIREMENT in requirements
599 requirementsmod.SHARED_REQUIREMENT in requirements
599 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
600 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
600 )
601 )
601 storevfs = None
602 storevfs = None
602 if shared:
603 if shared:
603 # This is a shared repo
604 # This is a shared repo
604 sharedvfs = _getsharedvfs(hgvfs, requirements)
605 sharedvfs = _getsharedvfs(hgvfs, requirements)
605 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
606 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
606 else:
607 else:
607 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
608 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
608
609
609 # if .hg/requires contains the sharesafe requirement, it means
610 # if .hg/requires contains the sharesafe requirement, it means
610 # there exists a `.hg/store/requires` too and we should read it
611 # there exists a `.hg/store/requires` too and we should read it
611 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
612 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
612 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
613 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
613 # is not present, refer checkrequirementscompat() for that
614 # is not present, refer checkrequirementscompat() for that
614 #
615 #
615 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
616 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
616 # repository was shared the old way. We check the share source .hg/requires
617 # repository was shared the old way. We check the share source .hg/requires
617 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
618 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
618 # to be reshared
619 # to be reshared
619 hint = _(b"see `hg help config.format.use-share-safe` for more information")
620 hint = _(b"see `hg help config.format.use-share-safe` for more information")
620 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
621 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
621
622
622 if (
623 if (
623 shared
624 shared
624 and requirementsmod.SHARESAFE_REQUIREMENT
625 and requirementsmod.SHARESAFE_REQUIREMENT
625 not in _readrequires(sharedvfs, True)
626 not in _readrequires(sharedvfs, True)
626 ):
627 ):
627 mismatch_warn = ui.configbool(
628 mismatch_warn = ui.configbool(
628 b'share', b'safe-mismatch.source-not-safe.warn'
629 b'share', b'safe-mismatch.source-not-safe.warn'
629 )
630 )
630 mismatch_config = ui.config(
631 mismatch_config = ui.config(
631 b'share', b'safe-mismatch.source-not-safe'
632 b'share', b'safe-mismatch.source-not-safe'
632 )
633 )
633 if mismatch_config in (
634 if mismatch_config in (
634 b'downgrade-allow',
635 b'downgrade-allow',
635 b'allow',
636 b'allow',
636 b'downgrade-abort',
637 b'downgrade-abort',
637 ):
638 ):
638 # prevent cyclic import localrepo -> upgrade -> localrepo
639 # prevent cyclic import localrepo -> upgrade -> localrepo
639 from . import upgrade
640 from . import upgrade
640
641
641 upgrade.downgrade_share_to_non_safe(
642 upgrade.downgrade_share_to_non_safe(
642 ui,
643 ui,
643 hgvfs,
644 hgvfs,
644 sharedvfs,
645 sharedvfs,
645 requirements,
646 requirements,
646 mismatch_config,
647 mismatch_config,
647 mismatch_warn,
648 mismatch_warn,
648 )
649 )
649 elif mismatch_config == b'abort':
650 elif mismatch_config == b'abort':
650 raise error.Abort(
651 raise error.Abort(
651 _(b"share source does not support share-safe requirement"),
652 _(b"share source does not support share-safe requirement"),
652 hint=hint,
653 hint=hint,
653 )
654 )
654 else:
655 else:
655 raise error.Abort(
656 raise error.Abort(
656 _(
657 _(
657 b"share-safe mismatch with source.\nUnrecognized"
658 b"share-safe mismatch with source.\nUnrecognized"
658 b" value '%s' of `share.safe-mismatch.source-not-safe`"
659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
659 b" set."
660 b" set."
660 )
661 )
661 % mismatch_config,
662 % mismatch_config,
662 hint=hint,
663 hint=hint,
663 )
664 )
664 else:
665 else:
665 requirements |= _readrequires(storevfs, False)
666 requirements |= _readrequires(storevfs, False)
666 elif shared:
667 elif shared:
667 sourcerequires = _readrequires(sharedvfs, False)
668 sourcerequires = _readrequires(sharedvfs, False)
668 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
669 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
670 mismatch_warn = ui.configbool(
671 mismatch_warn = ui.configbool(
671 b'share', b'safe-mismatch.source-safe.warn'
672 b'share', b'safe-mismatch.source-safe.warn'
672 )
673 )
673 if mismatch_config in (
674 if mismatch_config in (
674 b'upgrade-allow',
675 b'upgrade-allow',
675 b'allow',
676 b'allow',
676 b'upgrade-abort',
677 b'upgrade-abort',
677 ):
678 ):
678 # prevent cyclic import localrepo -> upgrade -> localrepo
679 # prevent cyclic import localrepo -> upgrade -> localrepo
679 from . import upgrade
680 from . import upgrade
680
681
681 upgrade.upgrade_share_to_safe(
682 upgrade.upgrade_share_to_safe(
682 ui,
683 ui,
683 hgvfs,
684 hgvfs,
684 storevfs,
685 storevfs,
685 requirements,
686 requirements,
686 mismatch_config,
687 mismatch_config,
687 mismatch_warn,
688 mismatch_warn,
688 )
689 )
689 elif mismatch_config == b'abort':
690 elif mismatch_config == b'abort':
690 raise error.Abort(
691 raise error.Abort(
691 _(
692 _(
692 b'version mismatch: source uses share-safe'
693 b'version mismatch: source uses share-safe'
693 b' functionality while the current share does not'
694 b' functionality while the current share does not'
694 ),
695 ),
695 hint=hint,
696 hint=hint,
696 )
697 )
697 else:
698 else:
698 raise error.Abort(
699 raise error.Abort(
699 _(
700 _(
700 b"share-safe mismatch with source.\nUnrecognized"
701 b"share-safe mismatch with source.\nUnrecognized"
701 b" value '%s' of `share.safe-mismatch.source-safe` set."
702 b" value '%s' of `share.safe-mismatch.source-safe` set."
702 )
703 )
703 % mismatch_config,
704 % mismatch_config,
704 hint=hint,
705 hint=hint,
705 )
706 )
706
707
707 # The .hg/hgrc file may load extensions or contain config options
708 # The .hg/hgrc file may load extensions or contain config options
708 # that influence repository construction. Attempt to load it and
709 # that influence repository construction. Attempt to load it and
709 # process any new extensions that it may have pulled in.
710 # process any new extensions that it may have pulled in.
710 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
711 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
711 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
712 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
712 extensions.loadall(ui)
713 extensions.loadall(ui)
713 extensions.populateui(ui)
714 extensions.populateui(ui)
714
715
715 # Set of module names of extensions loaded for this repository.
716 # Set of module names of extensions loaded for this repository.
716 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
717 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
717
718
718 supportedrequirements = gathersupportedrequirements(ui)
719 supportedrequirements = gathersupportedrequirements(ui)
719
720
720 # We first validate the requirements are known.
721 # We first validate the requirements are known.
721 ensurerequirementsrecognized(requirements, supportedrequirements)
722 ensurerequirementsrecognized(requirements, supportedrequirements)
722
723
723 # Then we validate that the known set is reasonable to use together.
724 # Then we validate that the known set is reasonable to use together.
724 ensurerequirementscompatible(ui, requirements)
725 ensurerequirementscompatible(ui, requirements)
725
726
726 # TODO there are unhandled edge cases related to opening repositories with
727 # TODO there are unhandled edge cases related to opening repositories with
727 # shared storage. If storage is shared, we should also test for requirements
728 # shared storage. If storage is shared, we should also test for requirements
728 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
729 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
729 # that repo, as that repo may load extensions needed to open it. This is a
730 # that repo, as that repo may load extensions needed to open it. This is a
730 # bit complicated because we don't want the other hgrc to overwrite settings
731 # bit complicated because we don't want the other hgrc to overwrite settings
731 # in this hgrc.
732 # in this hgrc.
732 #
733 #
733 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
734 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
734 # file when sharing repos. But if a requirement is added after the share is
735 # file when sharing repos. But if a requirement is added after the share is
735 # performed, thereby introducing a new requirement for the opener, we may
736 # performed, thereby introducing a new requirement for the opener, we may
736 # will not see that and could encounter a run-time error interacting with
737 # will not see that and could encounter a run-time error interacting with
737 # that shared store since it has an unknown-to-us requirement.
738 # that shared store since it has an unknown-to-us requirement.
738
739
739 # At this point, we know we should be capable of opening the repository.
740 # At this point, we know we should be capable of opening the repository.
740 # Now get on with doing that.
741 # Now get on with doing that.
741
742
742 features = set()
743 features = set()
743
744
744 # The "store" part of the repository holds versioned data. How it is
745 # The "store" part of the repository holds versioned data. How it is
745 # accessed is determined by various requirements. If `shared` or
746 # accessed is determined by various requirements. If `shared` or
746 # `relshared` requirements are present, this indicates current repository
747 # `relshared` requirements are present, this indicates current repository
747 # is a share and store exists in path mentioned in `.hg/sharedpath`
748 # is a share and store exists in path mentioned in `.hg/sharedpath`
748 if shared:
749 if shared:
749 storebasepath = sharedvfs.base
750 storebasepath = sharedvfs.base
750 cachepath = sharedvfs.join(b'cache')
751 cachepath = sharedvfs.join(b'cache')
751 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
752 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
752 else:
753 else:
753 storebasepath = hgvfs.base
754 storebasepath = hgvfs.base
754 cachepath = hgvfs.join(b'cache')
755 cachepath = hgvfs.join(b'cache')
755 wcachepath = hgvfs.join(b'wcache')
756 wcachepath = hgvfs.join(b'wcache')
756
757
757 # The store has changed over time and the exact layout is dictated by
758 # The store has changed over time and the exact layout is dictated by
758 # requirements. The store interface abstracts differences across all
759 # requirements. The store interface abstracts differences across all
759 # of them.
760 # of them.
760 store = makestore(
761 store = makestore(
761 requirements,
762 requirements,
762 storebasepath,
763 storebasepath,
763 lambda base: vfsmod.vfs(base, cacheaudited=True),
764 lambda base: vfsmod.vfs(base, cacheaudited=True),
764 )
765 )
765 hgvfs.createmode = store.createmode
766 hgvfs.createmode = store.createmode
766
767
767 storevfs = store.vfs
768 storevfs = store.vfs
768 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
769 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
769
770
770 if (
771 if (
771 requirementsmod.REVLOGV2_REQUIREMENT in requirements
772 requirementsmod.REVLOGV2_REQUIREMENT in requirements
772 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
773 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
773 ):
774 ):
774 features.add(repository.REPO_FEATURE_SIDE_DATA)
775 features.add(repository.REPO_FEATURE_SIDE_DATA)
775 # the revlogv2 docket introduced race condition that we need to fix
776 # the revlogv2 docket introduced race condition that we need to fix
776 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
777 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
777
778
778 # The cache vfs is used to manage cache files.
779 # The cache vfs is used to manage cache files.
779 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
780 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
780 cachevfs.createmode = store.createmode
781 cachevfs.createmode = store.createmode
781 # The cache vfs is used to manage cache files related to the working copy
782 # The cache vfs is used to manage cache files related to the working copy
782 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
783 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
783 wcachevfs.createmode = store.createmode
784 wcachevfs.createmode = store.createmode
784
785
785 # Now resolve the type for the repository object. We do this by repeatedly
786 # Now resolve the type for the repository object. We do this by repeatedly
786 # calling a factory function to produces types for specific aspects of the
787 # calling a factory function to produces types for specific aspects of the
787 # repo's operation. The aggregate returned types are used as base classes
788 # repo's operation. The aggregate returned types are used as base classes
788 # for a dynamically-derived type, which will represent our new repository.
789 # for a dynamically-derived type, which will represent our new repository.
789
790
790 bases = []
791 bases = []
791 extrastate = {}
792 extrastate = {}
792
793
793 for iface, fn in REPO_INTERFACES:
794 for iface, fn in REPO_INTERFACES:
794 # We pass all potentially useful state to give extensions tons of
795 # We pass all potentially useful state to give extensions tons of
795 # flexibility.
796 # flexibility.
796 typ = fn()(
797 typ = fn()(
797 ui=ui,
798 ui=ui,
798 intents=intents,
799 intents=intents,
799 requirements=requirements,
800 requirements=requirements,
800 features=features,
801 features=features,
801 wdirvfs=wdirvfs,
802 wdirvfs=wdirvfs,
802 hgvfs=hgvfs,
803 hgvfs=hgvfs,
803 store=store,
804 store=store,
804 storevfs=storevfs,
805 storevfs=storevfs,
805 storeoptions=storevfs.options,
806 storeoptions=storevfs.options,
806 cachevfs=cachevfs,
807 cachevfs=cachevfs,
807 wcachevfs=wcachevfs,
808 wcachevfs=wcachevfs,
808 extensionmodulenames=extensionmodulenames,
809 extensionmodulenames=extensionmodulenames,
809 extrastate=extrastate,
810 extrastate=extrastate,
810 baseclasses=bases,
811 baseclasses=bases,
811 )
812 )
812
813
813 if not isinstance(typ, type):
814 if not isinstance(typ, type):
814 raise error.ProgrammingError(
815 raise error.ProgrammingError(
815 b'unable to construct type for %s' % iface
816 b'unable to construct type for %s' % iface
816 )
817 )
817
818
818 bases.append(typ)
819 bases.append(typ)
819
820
820 # type() allows you to use characters in type names that wouldn't be
821 # type() allows you to use characters in type names that wouldn't be
821 # recognized as Python symbols in source code. We abuse that to add
822 # recognized as Python symbols in source code. We abuse that to add
822 # rich information about our constructed repo.
823 # rich information about our constructed repo.
823 name = pycompat.sysstr(
824 name = pycompat.sysstr(
824 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
825 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
825 )
826 )
826
827
827 cls = type(name, tuple(bases), {})
828 cls = type(name, tuple(bases), {})
828
829
829 return cls(
830 return cls(
830 baseui=baseui,
831 baseui=baseui,
831 ui=ui,
832 ui=ui,
832 origroot=path,
833 origroot=path,
833 wdirvfs=wdirvfs,
834 wdirvfs=wdirvfs,
834 hgvfs=hgvfs,
835 hgvfs=hgvfs,
835 requirements=requirements,
836 requirements=requirements,
836 supportedrequirements=supportedrequirements,
837 supportedrequirements=supportedrequirements,
837 sharedpath=storebasepath,
838 sharedpath=storebasepath,
838 store=store,
839 store=store,
839 cachevfs=cachevfs,
840 cachevfs=cachevfs,
840 wcachevfs=wcachevfs,
841 wcachevfs=wcachevfs,
841 features=features,
842 features=features,
842 intents=intents,
843 intents=intents,
843 )
844 )
844
845
845
846
846 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
847 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
847 """Load hgrc files/content into a ui instance.
848 """Load hgrc files/content into a ui instance.
848
849
849 This is called during repository opening to load any additional
850 This is called during repository opening to load any additional
850 config files or settings relevant to the current repository.
851 config files or settings relevant to the current repository.
851
852
852 Returns a bool indicating whether any additional configs were loaded.
853 Returns a bool indicating whether any additional configs were loaded.
853
854
854 Extensions should monkeypatch this function to modify how per-repo
855 Extensions should monkeypatch this function to modify how per-repo
855 configs are loaded. For example, an extension may wish to pull in
856 configs are loaded. For example, an extension may wish to pull in
856 configs from alternate files or sources.
857 configs from alternate files or sources.
857
858
858 sharedvfs is vfs object pointing to source repo if the current one is a
859 sharedvfs is vfs object pointing to source repo if the current one is a
859 shared one
860 shared one
860 """
861 """
861 if not rcutil.use_repo_hgrc():
862 if not rcutil.use_repo_hgrc():
862 return False
863 return False
863
864
864 ret = False
865 ret = False
865 # first load config from shared source if we has to
866 # first load config from shared source if we has to
866 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
867 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
867 try:
868 try:
868 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
869 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
869 ret = True
870 ret = True
870 except IOError:
871 except IOError:
871 pass
872 pass
872
873
873 try:
874 try:
874 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
875 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
875 ret = True
876 ret = True
876 except IOError:
877 except IOError:
877 pass
878 pass
878
879
879 try:
880 try:
880 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
881 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
881 ret = True
882 ret = True
882 except IOError:
883 except IOError:
883 pass
884 pass
884
885
885 return ret
886 return ret
886
887
887
888
888 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
889 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
889 """Perform additional actions after .hg/hgrc is loaded.
890 """Perform additional actions after .hg/hgrc is loaded.
890
891
891 This function is called during repository loading immediately after
892 This function is called during repository loading immediately after
892 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
893 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
893
894
894 The function can be used to validate configs, automatically add
895 The function can be used to validate configs, automatically add
895 options (including extensions) based on requirements, etc.
896 options (including extensions) based on requirements, etc.
896 """
897 """
897
898
898 # Map of requirements to list of extensions to load automatically when
899 # Map of requirements to list of extensions to load automatically when
899 # requirement is present.
900 # requirement is present.
900 autoextensions = {
901 autoextensions = {
901 b'git': [b'git'],
902 b'git': [b'git'],
902 b'largefiles': [b'largefiles'],
903 b'largefiles': [b'largefiles'],
903 b'lfs': [b'lfs'],
904 b'lfs': [b'lfs'],
904 }
905 }
905
906
906 for requirement, names in sorted(autoextensions.items()):
907 for requirement, names in sorted(autoextensions.items()):
907 if requirement not in requirements:
908 if requirement not in requirements:
908 continue
909 continue
909
910
910 for name in names:
911 for name in names:
911 if not ui.hasconfig(b'extensions', name):
912 if not ui.hasconfig(b'extensions', name):
912 ui.setconfig(b'extensions', name, b'', source=b'autoload')
913 ui.setconfig(b'extensions', name, b'', source=b'autoload')
913
914
914
915
915 def gathersupportedrequirements(ui):
916 def gathersupportedrequirements(ui):
916 """Determine the complete set of recognized requirements."""
917 """Determine the complete set of recognized requirements."""
917 # Start with all requirements supported by this file.
918 # Start with all requirements supported by this file.
918 supported = set(localrepository._basesupported)
919 supported = set(localrepository._basesupported)
919
920
920 # Execute ``featuresetupfuncs`` entries if they belong to an extension
921 # Execute ``featuresetupfuncs`` entries if they belong to an extension
921 # relevant to this ui instance.
922 # relevant to this ui instance.
922 modules = {m.__name__ for n, m in extensions.extensions(ui)}
923 modules = {m.__name__ for n, m in extensions.extensions(ui)}
923
924
924 for fn in featuresetupfuncs:
925 for fn in featuresetupfuncs:
925 if fn.__module__ in modules:
926 if fn.__module__ in modules:
926 fn(ui, supported)
927 fn(ui, supported)
927
928
928 # Add derived requirements from registered compression engines.
929 # Add derived requirements from registered compression engines.
929 for name in util.compengines:
930 for name in util.compengines:
930 engine = util.compengines[name]
931 engine = util.compengines[name]
931 if engine.available() and engine.revlogheader():
932 if engine.available() and engine.revlogheader():
932 supported.add(b'exp-compression-%s' % name)
933 supported.add(b'exp-compression-%s' % name)
933 if engine.name() == b'zstd':
934 if engine.name() == b'zstd':
934 supported.add(b'revlog-compression-zstd')
935 supported.add(b'revlog-compression-zstd')
935
936
936 return supported
937 return supported
937
938
938
939
939 def ensurerequirementsrecognized(requirements, supported):
940 def ensurerequirementsrecognized(requirements, supported):
940 """Validate that a set of local requirements is recognized.
941 """Validate that a set of local requirements is recognized.
941
942
942 Receives a set of requirements. Raises an ``error.RepoError`` if there
943 Receives a set of requirements. Raises an ``error.RepoError`` if there
943 exists any requirement in that set that currently loaded code doesn't
944 exists any requirement in that set that currently loaded code doesn't
944 recognize.
945 recognize.
945
946
946 Returns a set of supported requirements.
947 Returns a set of supported requirements.
947 """
948 """
948 missing = set()
949 missing = set()
949
950
950 for requirement in requirements:
951 for requirement in requirements:
951 if requirement in supported:
952 if requirement in supported:
952 continue
953 continue
953
954
954 if not requirement or not requirement[0:1].isalnum():
955 if not requirement or not requirement[0:1].isalnum():
955 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
956 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
956
957
957 missing.add(requirement)
958 missing.add(requirement)
958
959
959 if missing:
960 if missing:
960 raise error.RequirementError(
961 raise error.RequirementError(
961 _(b'repository requires features unknown to this Mercurial: %s')
962 _(b'repository requires features unknown to this Mercurial: %s')
962 % b' '.join(sorted(missing)),
963 % b' '.join(sorted(missing)),
963 hint=_(
964 hint=_(
964 b'see https://mercurial-scm.org/wiki/MissingRequirement '
965 b'see https://mercurial-scm.org/wiki/MissingRequirement '
965 b'for more information'
966 b'for more information'
966 ),
967 ),
967 )
968 )
968
969
969
970
970 def ensurerequirementscompatible(ui, requirements):
971 def ensurerequirementscompatible(ui, requirements):
971 """Validates that a set of recognized requirements is mutually compatible.
972 """Validates that a set of recognized requirements is mutually compatible.
972
973
973 Some requirements may not be compatible with others or require
974 Some requirements may not be compatible with others or require
974 config options that aren't enabled. This function is called during
975 config options that aren't enabled. This function is called during
975 repository opening to ensure that the set of requirements needed
976 repository opening to ensure that the set of requirements needed
976 to open a repository is sane and compatible with config options.
977 to open a repository is sane and compatible with config options.
977
978
978 Extensions can monkeypatch this function to perform additional
979 Extensions can monkeypatch this function to perform additional
979 checking.
980 checking.
980
981
981 ``error.RepoError`` should be raised on failure.
982 ``error.RepoError`` should be raised on failure.
982 """
983 """
983 if (
984 if (
984 requirementsmod.SPARSE_REQUIREMENT in requirements
985 requirementsmod.SPARSE_REQUIREMENT in requirements
985 and not sparse.enabled
986 and not sparse.enabled
986 ):
987 ):
987 raise error.RepoError(
988 raise error.RepoError(
988 _(
989 _(
989 b'repository is using sparse feature but '
990 b'repository is using sparse feature but '
990 b'sparse is not enabled; enable the '
991 b'sparse is not enabled; enable the '
991 b'"sparse" extensions to access'
992 b'"sparse" extensions to access'
992 )
993 )
993 )
994 )
994
995
995
996
996 def makestore(requirements, path, vfstype):
997 def makestore(requirements, path, vfstype):
997 """Construct a storage object for a repository."""
998 """Construct a storage object for a repository."""
998 if requirementsmod.STORE_REQUIREMENT in requirements:
999 if requirementsmod.STORE_REQUIREMENT in requirements:
999 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1000 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1000 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1001 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1001 return storemod.fncachestore(path, vfstype, dotencode)
1002 return storemod.fncachestore(path, vfstype, dotencode)
1002
1003
1003 return storemod.encodedstore(path, vfstype)
1004 return storemod.encodedstore(path, vfstype)
1004
1005
1005 return storemod.basicstore(path, vfstype)
1006 return storemod.basicstore(path, vfstype)
1006
1007
1007
1008
1008 def resolvestorevfsoptions(ui, requirements, features):
1009 def resolvestorevfsoptions(ui, requirements, features):
1009 """Resolve the options to pass to the store vfs opener.
1010 """Resolve the options to pass to the store vfs opener.
1010
1011
1011 The returned dict is used to influence behavior of the storage layer.
1012 The returned dict is used to influence behavior of the storage layer.
1012 """
1013 """
1013 options = {}
1014 options = {}
1014
1015
1015 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1016 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1016 options[b'treemanifest'] = True
1017 options[b'treemanifest'] = True
1017
1018
1018 # experimental config: format.manifestcachesize
1019 # experimental config: format.manifestcachesize
1019 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1020 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1020 if manifestcachesize is not None:
1021 if manifestcachesize is not None:
1021 options[b'manifestcachesize'] = manifestcachesize
1022 options[b'manifestcachesize'] = manifestcachesize
1022
1023
1023 # In the absence of another requirement superseding a revlog-related
1024 # In the absence of another requirement superseding a revlog-related
1024 # requirement, we have to assume the repo is using revlog version 0.
1025 # requirement, we have to assume the repo is using revlog version 0.
1025 # This revlog format is super old and we don't bother trying to parse
1026 # This revlog format is super old and we don't bother trying to parse
1026 # opener options for it because those options wouldn't do anything
1027 # opener options for it because those options wouldn't do anything
1027 # meaningful on such old repos.
1028 # meaningful on such old repos.
1028 if (
1029 if (
1029 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1030 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1030 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1031 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1031 ):
1032 ):
1032 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1033 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1033 else: # explicitly mark repo as using revlogv0
1034 else: # explicitly mark repo as using revlogv0
1034 options[b'revlogv0'] = True
1035 options[b'revlogv0'] = True
1035
1036
1036 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1037 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1037 options[b'copies-storage'] = b'changeset-sidedata'
1038 options[b'copies-storage'] = b'changeset-sidedata'
1038 else:
1039 else:
1039 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1040 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1040 copiesextramode = (b'changeset-only', b'compatibility')
1041 copiesextramode = (b'changeset-only', b'compatibility')
1041 if writecopiesto in copiesextramode:
1042 if writecopiesto in copiesextramode:
1042 options[b'copies-storage'] = b'extra'
1043 options[b'copies-storage'] = b'extra'
1043
1044
1044 return options
1045 return options
1045
1046
1046
1047
1047 def resolverevlogstorevfsoptions(ui, requirements, features):
1048 def resolverevlogstorevfsoptions(ui, requirements, features):
1048 """Resolve opener options specific to revlogs."""
1049 """Resolve opener options specific to revlogs."""
1049
1050
1050 options = {}
1051 options = {}
1051 options[b'flagprocessors'] = {}
1052 options[b'flagprocessors'] = {}
1052
1053
1053 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1054 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1054 options[b'revlogv1'] = True
1055 options[b'revlogv1'] = True
1055 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1056 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1056 options[b'revlogv2'] = True
1057 options[b'revlogv2'] = True
1057 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1058 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1058 options[b'changelogv2'] = True
1059 options[b'changelogv2'] = True
1059
1060
1060 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1061 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1061 options[b'generaldelta'] = True
1062 options[b'generaldelta'] = True
1062
1063
1063 # experimental config: format.chunkcachesize
1064 # experimental config: format.chunkcachesize
1064 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1065 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1065 if chunkcachesize is not None:
1066 if chunkcachesize is not None:
1066 options[b'chunkcachesize'] = chunkcachesize
1067 options[b'chunkcachesize'] = chunkcachesize
1067
1068
1068 deltabothparents = ui.configbool(
1069 deltabothparents = ui.configbool(
1069 b'storage', b'revlog.optimize-delta-parent-choice'
1070 b'storage', b'revlog.optimize-delta-parent-choice'
1070 )
1071 )
1071 options[b'deltabothparents'] = deltabothparents
1072 options[b'deltabothparents'] = deltabothparents
1072
1073
1073 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1074 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1074 options[b'issue6528.fix-incoming'] = issue6528
1075 options[b'issue6528.fix-incoming'] = issue6528
1075
1076
1076 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1077 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1077 lazydeltabase = False
1078 lazydeltabase = False
1078 if lazydelta:
1079 if lazydelta:
1079 lazydeltabase = ui.configbool(
1080 lazydeltabase = ui.configbool(
1080 b'storage', b'revlog.reuse-external-delta-parent'
1081 b'storage', b'revlog.reuse-external-delta-parent'
1081 )
1082 )
1082 if lazydeltabase is None:
1083 if lazydeltabase is None:
1083 lazydeltabase = not scmutil.gddeltaconfig(ui)
1084 lazydeltabase = not scmutil.gddeltaconfig(ui)
1084 options[b'lazydelta'] = lazydelta
1085 options[b'lazydelta'] = lazydelta
1085 options[b'lazydeltabase'] = lazydeltabase
1086 options[b'lazydeltabase'] = lazydeltabase
1086
1087
1087 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1088 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1088 if 0 <= chainspan:
1089 if 0 <= chainspan:
1089 options[b'maxdeltachainspan'] = chainspan
1090 options[b'maxdeltachainspan'] = chainspan
1090
1091
1091 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1092 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1092 if mmapindexthreshold is not None:
1093 if mmapindexthreshold is not None:
1093 options[b'mmapindexthreshold'] = mmapindexthreshold
1094 options[b'mmapindexthreshold'] = mmapindexthreshold
1094
1095
1095 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1096 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1096 srdensitythres = float(
1097 srdensitythres = float(
1097 ui.config(b'experimental', b'sparse-read.density-threshold')
1098 ui.config(b'experimental', b'sparse-read.density-threshold')
1098 )
1099 )
1099 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1100 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1100 options[b'with-sparse-read'] = withsparseread
1101 options[b'with-sparse-read'] = withsparseread
1101 options[b'sparse-read-density-threshold'] = srdensitythres
1102 options[b'sparse-read-density-threshold'] = srdensitythres
1102 options[b'sparse-read-min-gap-size'] = srmingapsize
1103 options[b'sparse-read-min-gap-size'] = srmingapsize
1103
1104
1104 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1105 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1105 options[b'sparse-revlog'] = sparserevlog
1106 options[b'sparse-revlog'] = sparserevlog
1106 if sparserevlog:
1107 if sparserevlog:
1107 options[b'generaldelta'] = True
1108 options[b'generaldelta'] = True
1108
1109
1109 maxchainlen = None
1110 maxchainlen = None
1110 if sparserevlog:
1111 if sparserevlog:
1111 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1112 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1112 # experimental config: format.maxchainlen
1113 # experimental config: format.maxchainlen
1113 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1114 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1114 if maxchainlen is not None:
1115 if maxchainlen is not None:
1115 options[b'maxchainlen'] = maxchainlen
1116 options[b'maxchainlen'] = maxchainlen
1116
1117
1117 for r in requirements:
1118 for r in requirements:
1118 # we allow multiple compression engine requirement to co-exist because
1119 # we allow multiple compression engine requirement to co-exist because
1119 # strickly speaking, revlog seems to support mixed compression style.
1120 # strickly speaking, revlog seems to support mixed compression style.
1120 #
1121 #
1121 # The compression used for new entries will be "the last one"
1122 # The compression used for new entries will be "the last one"
1122 prefix = r.startswith
1123 prefix = r.startswith
1123 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1124 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1124 options[b'compengine'] = r.split(b'-', 2)[2]
1125 options[b'compengine'] = r.split(b'-', 2)[2]
1125
1126
1126 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1127 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1127 if options[b'zlib.level'] is not None:
1128 if options[b'zlib.level'] is not None:
1128 if not (0 <= options[b'zlib.level'] <= 9):
1129 if not (0 <= options[b'zlib.level'] <= 9):
1129 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1130 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1130 raise error.Abort(msg % options[b'zlib.level'])
1131 raise error.Abort(msg % options[b'zlib.level'])
1131 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1132 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1132 if options[b'zstd.level'] is not None:
1133 if options[b'zstd.level'] is not None:
1133 if not (0 <= options[b'zstd.level'] <= 22):
1134 if not (0 <= options[b'zstd.level'] <= 22):
1134 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1135 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1135 raise error.Abort(msg % options[b'zstd.level'])
1136 raise error.Abort(msg % options[b'zstd.level'])
1136
1137
1137 if requirementsmod.NARROW_REQUIREMENT in requirements:
1138 if requirementsmod.NARROW_REQUIREMENT in requirements:
1138 options[b'enableellipsis'] = True
1139 options[b'enableellipsis'] = True
1139
1140
1140 if ui.configbool(b'experimental', b'rust.index'):
1141 if ui.configbool(b'experimental', b'rust.index'):
1141 options[b'rust.index'] = True
1142 options[b'rust.index'] = True
1142 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1143 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1143 slow_path = ui.config(
1144 slow_path = ui.config(
1144 b'storage', b'revlog.persistent-nodemap.slow-path'
1145 b'storage', b'revlog.persistent-nodemap.slow-path'
1145 )
1146 )
1146 if slow_path not in (b'allow', b'warn', b'abort'):
1147 if slow_path not in (b'allow', b'warn', b'abort'):
1147 default = ui.config_default(
1148 default = ui.config_default(
1148 b'storage', b'revlog.persistent-nodemap.slow-path'
1149 b'storage', b'revlog.persistent-nodemap.slow-path'
1149 )
1150 )
1150 msg = _(
1151 msg = _(
1151 b'unknown value for config '
1152 b'unknown value for config '
1152 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1153 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1153 )
1154 )
1154 ui.warn(msg % slow_path)
1155 ui.warn(msg % slow_path)
1155 if not ui.quiet:
1156 if not ui.quiet:
1156 ui.warn(_(b'falling back to default value: %s\n') % default)
1157 ui.warn(_(b'falling back to default value: %s\n') % default)
1157 slow_path = default
1158 slow_path = default
1158
1159
1159 msg = _(
1160 msg = _(
1160 b"accessing `persistent-nodemap` repository without associated "
1161 b"accessing `persistent-nodemap` repository without associated "
1161 b"fast implementation."
1162 b"fast implementation."
1162 )
1163 )
1163 hint = _(
1164 hint = _(
1164 b"check `hg help config.format.use-persistent-nodemap` "
1165 b"check `hg help config.format.use-persistent-nodemap` "
1165 b"for details"
1166 b"for details"
1166 )
1167 )
1167 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1168 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1168 if slow_path == b'warn':
1169 if slow_path == b'warn':
1169 msg = b"warning: " + msg + b'\n'
1170 msg = b"warning: " + msg + b'\n'
1170 ui.warn(msg)
1171 ui.warn(msg)
1171 if not ui.quiet:
1172 if not ui.quiet:
1172 hint = b'(' + hint + b')\n'
1173 hint = b'(' + hint + b')\n'
1173 ui.warn(hint)
1174 ui.warn(hint)
1174 if slow_path == b'abort':
1175 if slow_path == b'abort':
1175 raise error.Abort(msg, hint=hint)
1176 raise error.Abort(msg, hint=hint)
1176 options[b'persistent-nodemap'] = True
1177 options[b'persistent-nodemap'] = True
1177 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1178 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1178 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1179 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1179 if slow_path not in (b'allow', b'warn', b'abort'):
1180 if slow_path not in (b'allow', b'warn', b'abort'):
1180 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1181 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1181 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1182 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1182 ui.warn(msg % slow_path)
1183 ui.warn(msg % slow_path)
1183 if not ui.quiet:
1184 if not ui.quiet:
1184 ui.warn(_(b'falling back to default value: %s\n') % default)
1185 ui.warn(_(b'falling back to default value: %s\n') % default)
1185 slow_path = default
1186 slow_path = default
1186
1187
1187 msg = _(
1188 msg = _(
1188 b"accessing `dirstate-v2` repository without associated "
1189 b"accessing `dirstate-v2` repository without associated "
1189 b"fast implementation."
1190 b"fast implementation."
1190 )
1191 )
1191 hint = _(
1192 hint = _(
1192 b"check `hg help config.format.exp-rc-dirstate-v2` " b"for details"
1193 b"check `hg help config.format.exp-rc-dirstate-v2` " b"for details"
1193 )
1194 )
1194 if not dirstate.HAS_FAST_DIRSTATE_V2:
1195 if not dirstate.HAS_FAST_DIRSTATE_V2:
1195 if slow_path == b'warn':
1196 if slow_path == b'warn':
1196 msg = b"warning: " + msg + b'\n'
1197 msg = b"warning: " + msg + b'\n'
1197 ui.warn(msg)
1198 ui.warn(msg)
1198 if not ui.quiet:
1199 if not ui.quiet:
1199 hint = b'(' + hint + b')\n'
1200 hint = b'(' + hint + b')\n'
1200 ui.warn(hint)
1201 ui.warn(hint)
1201 if slow_path == b'abort':
1202 if slow_path == b'abort':
1202 raise error.Abort(msg, hint=hint)
1203 raise error.Abort(msg, hint=hint)
1203 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1204 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1204 options[b'persistent-nodemap.mmap'] = True
1205 options[b'persistent-nodemap.mmap'] = True
1205 if ui.configbool(b'devel', b'persistent-nodemap'):
1206 if ui.configbool(b'devel', b'persistent-nodemap'):
1206 options[b'devel-force-nodemap'] = True
1207 options[b'devel-force-nodemap'] = True
1207
1208
1208 return options
1209 return options
1209
1210
1210
1211
1211 def makemain(**kwargs):
1212 def makemain(**kwargs):
1212 """Produce a type conforming to ``ilocalrepositorymain``."""
1213 """Produce a type conforming to ``ilocalrepositorymain``."""
1213 return localrepository
1214 return localrepository
1214
1215
1215
1216
1216 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1217 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1217 class revlogfilestorage(object):
1218 class revlogfilestorage(object):
1218 """File storage when using revlogs."""
1219 """File storage when using revlogs."""
1219
1220
1220 def file(self, path):
1221 def file(self, path):
1221 if path.startswith(b'/'):
1222 if path.startswith(b'/'):
1222 path = path[1:]
1223 path = path[1:]
1223
1224
1224 return filelog.filelog(self.svfs, path)
1225 return filelog.filelog(self.svfs, path)
1225
1226
1226
1227
1227 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1228 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1228 class revlognarrowfilestorage(object):
1229 class revlognarrowfilestorage(object):
1229 """File storage when using revlogs and narrow files."""
1230 """File storage when using revlogs and narrow files."""
1230
1231
1231 def file(self, path):
1232 def file(self, path):
1232 if path.startswith(b'/'):
1233 if path.startswith(b'/'):
1233 path = path[1:]
1234 path = path[1:]
1234
1235
1235 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1236 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1236
1237
1237
1238
1238 def makefilestorage(requirements, features, **kwargs):
1239 def makefilestorage(requirements, features, **kwargs):
1239 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1240 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1240 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1241 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1241 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1242 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1242
1243
1243 if requirementsmod.NARROW_REQUIREMENT in requirements:
1244 if requirementsmod.NARROW_REQUIREMENT in requirements:
1244 return revlognarrowfilestorage
1245 return revlognarrowfilestorage
1245 else:
1246 else:
1246 return revlogfilestorage
1247 return revlogfilestorage
1247
1248
1248
1249
1249 # List of repository interfaces and factory functions for them. Each
1250 # List of repository interfaces and factory functions for them. Each
1250 # will be called in order during ``makelocalrepository()`` to iteratively
1251 # will be called in order during ``makelocalrepository()`` to iteratively
1251 # derive the final type for a local repository instance. We capture the
1252 # derive the final type for a local repository instance. We capture the
1252 # function as a lambda so we don't hold a reference and the module-level
1253 # function as a lambda so we don't hold a reference and the module-level
1253 # functions can be wrapped.
1254 # functions can be wrapped.
1254 REPO_INTERFACES = [
1255 REPO_INTERFACES = [
1255 (repository.ilocalrepositorymain, lambda: makemain),
1256 (repository.ilocalrepositorymain, lambda: makemain),
1256 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1257 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1257 ]
1258 ]
1258
1259
1259
1260
1260 @interfaceutil.implementer(repository.ilocalrepositorymain)
1261 @interfaceutil.implementer(repository.ilocalrepositorymain)
1261 class localrepository(object):
1262 class localrepository(object):
1262 """Main class for representing local repositories.
1263 """Main class for representing local repositories.
1263
1264
1264 All local repositories are instances of this class.
1265 All local repositories are instances of this class.
1265
1266
1266 Constructed on its own, instances of this class are not usable as
1267 Constructed on its own, instances of this class are not usable as
1267 repository objects. To obtain a usable repository object, call
1268 repository objects. To obtain a usable repository object, call
1268 ``hg.repository()``, ``localrepo.instance()``, or
1269 ``hg.repository()``, ``localrepo.instance()``, or
1269 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1270 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1270 ``instance()`` adds support for creating new repositories.
1271 ``instance()`` adds support for creating new repositories.
1271 ``hg.repository()`` adds more extension integration, including calling
1272 ``hg.repository()`` adds more extension integration, including calling
1272 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1273 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1273 used.
1274 used.
1274 """
1275 """
1275
1276
1276 # obsolete experimental requirements:
1277 # obsolete experimental requirements:
1277 # - manifestv2: An experimental new manifest format that allowed
1278 # - manifestv2: An experimental new manifest format that allowed
1278 # for stem compression of long paths. Experiment ended up not
1279 # for stem compression of long paths. Experiment ended up not
1279 # being successful (repository sizes went up due to worse delta
1280 # being successful (repository sizes went up due to worse delta
1280 # chains), and the code was deleted in 4.6.
1281 # chains), and the code was deleted in 4.6.
1281 supportedformats = {
1282 supportedformats = {
1282 requirementsmod.REVLOGV1_REQUIREMENT,
1283 requirementsmod.REVLOGV1_REQUIREMENT,
1283 requirementsmod.GENERALDELTA_REQUIREMENT,
1284 requirementsmod.GENERALDELTA_REQUIREMENT,
1284 requirementsmod.TREEMANIFEST_REQUIREMENT,
1285 requirementsmod.TREEMANIFEST_REQUIREMENT,
1285 requirementsmod.COPIESSDC_REQUIREMENT,
1286 requirementsmod.COPIESSDC_REQUIREMENT,
1286 requirementsmod.REVLOGV2_REQUIREMENT,
1287 requirementsmod.REVLOGV2_REQUIREMENT,
1287 requirementsmod.CHANGELOGV2_REQUIREMENT,
1288 requirementsmod.CHANGELOGV2_REQUIREMENT,
1288 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1289 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1289 requirementsmod.NODEMAP_REQUIREMENT,
1290 requirementsmod.NODEMAP_REQUIREMENT,
1290 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1291 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1291 requirementsmod.SHARESAFE_REQUIREMENT,
1292 requirementsmod.SHARESAFE_REQUIREMENT,
1292 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1293 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1293 }
1294 }
1294 _basesupported = supportedformats | {
1295 _basesupported = supportedformats | {
1295 requirementsmod.STORE_REQUIREMENT,
1296 requirementsmod.STORE_REQUIREMENT,
1296 requirementsmod.FNCACHE_REQUIREMENT,
1297 requirementsmod.FNCACHE_REQUIREMENT,
1297 requirementsmod.SHARED_REQUIREMENT,
1298 requirementsmod.SHARED_REQUIREMENT,
1298 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1299 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1299 requirementsmod.DOTENCODE_REQUIREMENT,
1300 requirementsmod.DOTENCODE_REQUIREMENT,
1300 requirementsmod.SPARSE_REQUIREMENT,
1301 requirementsmod.SPARSE_REQUIREMENT,
1301 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1302 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1302 }
1303 }
1303
1304
1304 # list of prefix for file which can be written without 'wlock'
1305 # list of prefix for file which can be written without 'wlock'
1305 # Extensions should extend this list when needed
1306 # Extensions should extend this list when needed
1306 _wlockfreeprefix = {
1307 _wlockfreeprefix = {
1307 # We migh consider requiring 'wlock' for the next
1308 # We migh consider requiring 'wlock' for the next
1308 # two, but pretty much all the existing code assume
1309 # two, but pretty much all the existing code assume
1309 # wlock is not needed so we keep them excluded for
1310 # wlock is not needed so we keep them excluded for
1310 # now.
1311 # now.
1311 b'hgrc',
1312 b'hgrc',
1312 b'requires',
1313 b'requires',
1313 # XXX cache is a complicatged business someone
1314 # XXX cache is a complicatged business someone
1314 # should investigate this in depth at some point
1315 # should investigate this in depth at some point
1315 b'cache/',
1316 b'cache/',
1316 # XXX shouldn't be dirstate covered by the wlock?
1317 # XXX shouldn't be dirstate covered by the wlock?
1317 b'dirstate',
1318 b'dirstate',
1318 # XXX bisect was still a bit too messy at the time
1319 # XXX bisect was still a bit too messy at the time
1319 # this changeset was introduced. Someone should fix
1320 # this changeset was introduced. Someone should fix
1320 # the remainig bit and drop this line
1321 # the remainig bit and drop this line
1321 b'bisect.state',
1322 b'bisect.state',
1322 }
1323 }
1323
1324
1324 def __init__(
1325 def __init__(
1325 self,
1326 self,
1326 baseui,
1327 baseui,
1327 ui,
1328 ui,
1328 origroot,
1329 origroot,
1329 wdirvfs,
1330 wdirvfs,
1330 hgvfs,
1331 hgvfs,
1331 requirements,
1332 requirements,
1332 supportedrequirements,
1333 supportedrequirements,
1333 sharedpath,
1334 sharedpath,
1334 store,
1335 store,
1335 cachevfs,
1336 cachevfs,
1336 wcachevfs,
1337 wcachevfs,
1337 features,
1338 features,
1338 intents=None,
1339 intents=None,
1339 ):
1340 ):
1340 """Create a new local repository instance.
1341 """Create a new local repository instance.
1341
1342
1342 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1343 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1343 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1344 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1344 object.
1345 object.
1345
1346
1346 Arguments:
1347 Arguments:
1347
1348
1348 baseui
1349 baseui
1349 ``ui.ui`` instance that ``ui`` argument was based off of.
1350 ``ui.ui`` instance that ``ui`` argument was based off of.
1350
1351
1351 ui
1352 ui
1352 ``ui.ui`` instance for use by the repository.
1353 ``ui.ui`` instance for use by the repository.
1353
1354
1354 origroot
1355 origroot
1355 ``bytes`` path to working directory root of this repository.
1356 ``bytes`` path to working directory root of this repository.
1356
1357
1357 wdirvfs
1358 wdirvfs
1358 ``vfs.vfs`` rooted at the working directory.
1359 ``vfs.vfs`` rooted at the working directory.
1359
1360
1360 hgvfs
1361 hgvfs
1361 ``vfs.vfs`` rooted at .hg/
1362 ``vfs.vfs`` rooted at .hg/
1362
1363
1363 requirements
1364 requirements
1364 ``set`` of bytestrings representing repository opening requirements.
1365 ``set`` of bytestrings representing repository opening requirements.
1365
1366
1366 supportedrequirements
1367 supportedrequirements
1367 ``set`` of bytestrings representing repository requirements that we
1368 ``set`` of bytestrings representing repository requirements that we
1368 know how to open. May be a supetset of ``requirements``.
1369 know how to open. May be a supetset of ``requirements``.
1369
1370
1370 sharedpath
1371 sharedpath
1371 ``bytes`` Defining path to storage base directory. Points to a
1372 ``bytes`` Defining path to storage base directory. Points to a
1372 ``.hg/`` directory somewhere.
1373 ``.hg/`` directory somewhere.
1373
1374
1374 store
1375 store
1375 ``store.basicstore`` (or derived) instance providing access to
1376 ``store.basicstore`` (or derived) instance providing access to
1376 versioned storage.
1377 versioned storage.
1377
1378
1378 cachevfs
1379 cachevfs
1379 ``vfs.vfs`` used for cache files.
1380 ``vfs.vfs`` used for cache files.
1380
1381
1381 wcachevfs
1382 wcachevfs
1382 ``vfs.vfs`` used for cache files related to the working copy.
1383 ``vfs.vfs`` used for cache files related to the working copy.
1383
1384
1384 features
1385 features
1385 ``set`` of bytestrings defining features/capabilities of this
1386 ``set`` of bytestrings defining features/capabilities of this
1386 instance.
1387 instance.
1387
1388
1388 intents
1389 intents
1389 ``set`` of system strings indicating what this repo will be used
1390 ``set`` of system strings indicating what this repo will be used
1390 for.
1391 for.
1391 """
1392 """
1392 self.baseui = baseui
1393 self.baseui = baseui
1393 self.ui = ui
1394 self.ui = ui
1394 self.origroot = origroot
1395 self.origroot = origroot
1395 # vfs rooted at working directory.
1396 # vfs rooted at working directory.
1396 self.wvfs = wdirvfs
1397 self.wvfs = wdirvfs
1397 self.root = wdirvfs.base
1398 self.root = wdirvfs.base
1398 # vfs rooted at .hg/. Used to access most non-store paths.
1399 # vfs rooted at .hg/. Used to access most non-store paths.
1399 self.vfs = hgvfs
1400 self.vfs = hgvfs
1400 self.path = hgvfs.base
1401 self.path = hgvfs.base
1401 self.requirements = requirements
1402 self.requirements = requirements
1402 self.nodeconstants = sha1nodeconstants
1403 self.nodeconstants = sha1nodeconstants
1403 self.nullid = self.nodeconstants.nullid
1404 self.nullid = self.nodeconstants.nullid
1404 self.supported = supportedrequirements
1405 self.supported = supportedrequirements
1405 self.sharedpath = sharedpath
1406 self.sharedpath = sharedpath
1406 self.store = store
1407 self.store = store
1407 self.cachevfs = cachevfs
1408 self.cachevfs = cachevfs
1408 self.wcachevfs = wcachevfs
1409 self.wcachevfs = wcachevfs
1409 self.features = features
1410 self.features = features
1410
1411
1411 self.filtername = None
1412 self.filtername = None
1412
1413
1413 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1414 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1414 b'devel', b'check-locks'
1415 b'devel', b'check-locks'
1415 ):
1416 ):
1416 self.vfs.audit = self._getvfsward(self.vfs.audit)
1417 self.vfs.audit = self._getvfsward(self.vfs.audit)
1417 # A list of callback to shape the phase if no data were found.
1418 # A list of callback to shape the phase if no data were found.
1418 # Callback are in the form: func(repo, roots) --> processed root.
1419 # Callback are in the form: func(repo, roots) --> processed root.
1419 # This list it to be filled by extension during repo setup
1420 # This list it to be filled by extension during repo setup
1420 self._phasedefaults = []
1421 self._phasedefaults = []
1421
1422
1422 color.setup(self.ui)
1423 color.setup(self.ui)
1423
1424
1424 self.spath = self.store.path
1425 self.spath = self.store.path
1425 self.svfs = self.store.vfs
1426 self.svfs = self.store.vfs
1426 self.sjoin = self.store.join
1427 self.sjoin = self.store.join
1427 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1428 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1428 b'devel', b'check-locks'
1429 b'devel', b'check-locks'
1429 ):
1430 ):
1430 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1431 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1431 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1432 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1432 else: # standard vfs
1433 else: # standard vfs
1433 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1434 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1434
1435
1435 self._dirstatevalidatewarned = False
1436 self._dirstatevalidatewarned = False
1436
1437
1437 self._branchcaches = branchmap.BranchMapCache()
1438 self._branchcaches = branchmap.BranchMapCache()
1438 self._revbranchcache = None
1439 self._revbranchcache = None
1439 self._filterpats = {}
1440 self._filterpats = {}
1440 self._datafilters = {}
1441 self._datafilters = {}
1441 self._transref = self._lockref = self._wlockref = None
1442 self._transref = self._lockref = self._wlockref = None
1442
1443
1443 # A cache for various files under .hg/ that tracks file changes,
1444 # A cache for various files under .hg/ that tracks file changes,
1444 # (used by the filecache decorator)
1445 # (used by the filecache decorator)
1445 #
1446 #
1446 # Maps a property name to its util.filecacheentry
1447 # Maps a property name to its util.filecacheentry
1447 self._filecache = {}
1448 self._filecache = {}
1448
1449
1449 # hold sets of revision to be filtered
1450 # hold sets of revision to be filtered
1450 # should be cleared when something might have changed the filter value:
1451 # should be cleared when something might have changed the filter value:
1451 # - new changesets,
1452 # - new changesets,
1452 # - phase change,
1453 # - phase change,
1453 # - new obsolescence marker,
1454 # - new obsolescence marker,
1454 # - working directory parent change,
1455 # - working directory parent change,
1455 # - bookmark changes
1456 # - bookmark changes
1456 self.filteredrevcache = {}
1457 self.filteredrevcache = {}
1457
1458
1458 # post-dirstate-status hooks
1459 # post-dirstate-status hooks
1459 self._postdsstatus = []
1460 self._postdsstatus = []
1460
1461
1461 # generic mapping between names and nodes
1462 # generic mapping between names and nodes
1462 self.names = namespaces.namespaces()
1463 self.names = namespaces.namespaces()
1463
1464
1464 # Key to signature value.
1465 # Key to signature value.
1465 self._sparsesignaturecache = {}
1466 self._sparsesignaturecache = {}
1466 # Signature to cached matcher instance.
1467 # Signature to cached matcher instance.
1467 self._sparsematchercache = {}
1468 self._sparsematchercache = {}
1468
1469
1469 self._extrafilterid = repoview.extrafilter(ui)
1470 self._extrafilterid = repoview.extrafilter(ui)
1470
1471
1471 self.filecopiesmode = None
1472 self.filecopiesmode = None
1472 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1473 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1473 self.filecopiesmode = b'changeset-sidedata'
1474 self.filecopiesmode = b'changeset-sidedata'
1474
1475
1475 self._wanted_sidedata = set()
1476 self._wanted_sidedata = set()
1476 self._sidedata_computers = {}
1477 self._sidedata_computers = {}
1477 sidedatamod.set_sidedata_spec_for_repo(self)
1478 sidedatamod.set_sidedata_spec_for_repo(self)
1478
1479
1479 def _getvfsward(self, origfunc):
1480 def _getvfsward(self, origfunc):
1480 """build a ward for self.vfs"""
1481 """build a ward for self.vfs"""
1481 rref = weakref.ref(self)
1482 rref = weakref.ref(self)
1482
1483
1483 def checkvfs(path, mode=None):
1484 def checkvfs(path, mode=None):
1484 ret = origfunc(path, mode=mode)
1485 ret = origfunc(path, mode=mode)
1485 repo = rref()
1486 repo = rref()
1486 if (
1487 if (
1487 repo is None
1488 repo is None
1488 or not util.safehasattr(repo, b'_wlockref')
1489 or not util.safehasattr(repo, b'_wlockref')
1489 or not util.safehasattr(repo, b'_lockref')
1490 or not util.safehasattr(repo, b'_lockref')
1490 ):
1491 ):
1491 return
1492 return
1492 if mode in (None, b'r', b'rb'):
1493 if mode in (None, b'r', b'rb'):
1493 return
1494 return
1494 if path.startswith(repo.path):
1495 if path.startswith(repo.path):
1495 # truncate name relative to the repository (.hg)
1496 # truncate name relative to the repository (.hg)
1496 path = path[len(repo.path) + 1 :]
1497 path = path[len(repo.path) + 1 :]
1497 if path.startswith(b'cache/'):
1498 if path.startswith(b'cache/'):
1498 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1499 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1499 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1500 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1500 # path prefixes covered by 'lock'
1501 # path prefixes covered by 'lock'
1501 vfs_path_prefixes = (
1502 vfs_path_prefixes = (
1502 b'journal.',
1503 b'journal.',
1503 b'undo.',
1504 b'undo.',
1504 b'strip-backup/',
1505 b'strip-backup/',
1505 b'cache/',
1506 b'cache/',
1506 )
1507 )
1507 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1508 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1508 if repo._currentlock(repo._lockref) is None:
1509 if repo._currentlock(repo._lockref) is None:
1509 repo.ui.develwarn(
1510 repo.ui.develwarn(
1510 b'write with no lock: "%s"' % path,
1511 b'write with no lock: "%s"' % path,
1511 stacklevel=3,
1512 stacklevel=3,
1512 config=b'check-locks',
1513 config=b'check-locks',
1513 )
1514 )
1514 elif repo._currentlock(repo._wlockref) is None:
1515 elif repo._currentlock(repo._wlockref) is None:
1515 # rest of vfs files are covered by 'wlock'
1516 # rest of vfs files are covered by 'wlock'
1516 #
1517 #
1517 # exclude special files
1518 # exclude special files
1518 for prefix in self._wlockfreeprefix:
1519 for prefix in self._wlockfreeprefix:
1519 if path.startswith(prefix):
1520 if path.startswith(prefix):
1520 return
1521 return
1521 repo.ui.develwarn(
1522 repo.ui.develwarn(
1522 b'write with no wlock: "%s"' % path,
1523 b'write with no wlock: "%s"' % path,
1523 stacklevel=3,
1524 stacklevel=3,
1524 config=b'check-locks',
1525 config=b'check-locks',
1525 )
1526 )
1526 return ret
1527 return ret
1527
1528
1528 return checkvfs
1529 return checkvfs
1529
1530
1530 def _getsvfsward(self, origfunc):
1531 def _getsvfsward(self, origfunc):
1531 """build a ward for self.svfs"""
1532 """build a ward for self.svfs"""
1532 rref = weakref.ref(self)
1533 rref = weakref.ref(self)
1533
1534
1534 def checksvfs(path, mode=None):
1535 def checksvfs(path, mode=None):
1535 ret = origfunc(path, mode=mode)
1536 ret = origfunc(path, mode=mode)
1536 repo = rref()
1537 repo = rref()
1537 if repo is None or not util.safehasattr(repo, b'_lockref'):
1538 if repo is None or not util.safehasattr(repo, b'_lockref'):
1538 return
1539 return
1539 if mode in (None, b'r', b'rb'):
1540 if mode in (None, b'r', b'rb'):
1540 return
1541 return
1541 if path.startswith(repo.sharedpath):
1542 if path.startswith(repo.sharedpath):
1542 # truncate name relative to the repository (.hg)
1543 # truncate name relative to the repository (.hg)
1543 path = path[len(repo.sharedpath) + 1 :]
1544 path = path[len(repo.sharedpath) + 1 :]
1544 if repo._currentlock(repo._lockref) is None:
1545 if repo._currentlock(repo._lockref) is None:
1545 repo.ui.develwarn(
1546 repo.ui.develwarn(
1546 b'write with no lock: "%s"' % path, stacklevel=4
1547 b'write with no lock: "%s"' % path, stacklevel=4
1547 )
1548 )
1548 return ret
1549 return ret
1549
1550
1550 return checksvfs
1551 return checksvfs
1551
1552
1552 def close(self):
1553 def close(self):
1553 self._writecaches()
1554 self._writecaches()
1554
1555
1555 def _writecaches(self):
1556 def _writecaches(self):
1556 if self._revbranchcache:
1557 if self._revbranchcache:
1557 self._revbranchcache.write()
1558 self._revbranchcache.write()
1558
1559
1559 def _restrictcapabilities(self, caps):
1560 def _restrictcapabilities(self, caps):
1560 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1561 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1561 caps = set(caps)
1562 caps = set(caps)
1562 capsblob = bundle2.encodecaps(
1563 capsblob = bundle2.encodecaps(
1563 bundle2.getrepocaps(self, role=b'client')
1564 bundle2.getrepocaps(self, role=b'client')
1564 )
1565 )
1565 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1566 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1566 if self.ui.configbool(b'experimental', b'narrow'):
1567 if self.ui.configbool(b'experimental', b'narrow'):
1567 caps.add(wireprototypes.NARROWCAP)
1568 caps.add(wireprototypes.NARROWCAP)
1568 return caps
1569 return caps
1569
1570
1570 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1571 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1571 # self -> auditor -> self._checknested -> self
1572 # self -> auditor -> self._checknested -> self
1572
1573
1573 @property
1574 @property
1574 def auditor(self):
1575 def auditor(self):
1575 # This is only used by context.workingctx.match in order to
1576 # This is only used by context.workingctx.match in order to
1576 # detect files in subrepos.
1577 # detect files in subrepos.
1577 return pathutil.pathauditor(self.root, callback=self._checknested)
1578 return pathutil.pathauditor(self.root, callback=self._checknested)
1578
1579
1579 @property
1580 @property
1580 def nofsauditor(self):
1581 def nofsauditor(self):
1581 # This is only used by context.basectx.match in order to detect
1582 # This is only used by context.basectx.match in order to detect
1582 # files in subrepos.
1583 # files in subrepos.
1583 return pathutil.pathauditor(
1584 return pathutil.pathauditor(
1584 self.root, callback=self._checknested, realfs=False, cached=True
1585 self.root, callback=self._checknested, realfs=False, cached=True
1585 )
1586 )
1586
1587
1587 def _checknested(self, path):
1588 def _checknested(self, path):
1588 """Determine if path is a legal nested repository."""
1589 """Determine if path is a legal nested repository."""
1589 if not path.startswith(self.root):
1590 if not path.startswith(self.root):
1590 return False
1591 return False
1591 subpath = path[len(self.root) + 1 :]
1592 subpath = path[len(self.root) + 1 :]
1592 normsubpath = util.pconvert(subpath)
1593 normsubpath = util.pconvert(subpath)
1593
1594
1594 # XXX: Checking against the current working copy is wrong in
1595 # XXX: Checking against the current working copy is wrong in
1595 # the sense that it can reject things like
1596 # the sense that it can reject things like
1596 #
1597 #
1597 # $ hg cat -r 10 sub/x.txt
1598 # $ hg cat -r 10 sub/x.txt
1598 #
1599 #
1599 # if sub/ is no longer a subrepository in the working copy
1600 # if sub/ is no longer a subrepository in the working copy
1600 # parent revision.
1601 # parent revision.
1601 #
1602 #
1602 # However, it can of course also allow things that would have
1603 # However, it can of course also allow things that would have
1603 # been rejected before, such as the above cat command if sub/
1604 # been rejected before, such as the above cat command if sub/
1604 # is a subrepository now, but was a normal directory before.
1605 # is a subrepository now, but was a normal directory before.
1605 # The old path auditor would have rejected by mistake since it
1606 # The old path auditor would have rejected by mistake since it
1606 # panics when it sees sub/.hg/.
1607 # panics when it sees sub/.hg/.
1607 #
1608 #
1608 # All in all, checking against the working copy seems sensible
1609 # All in all, checking against the working copy seems sensible
1609 # since we want to prevent access to nested repositories on
1610 # since we want to prevent access to nested repositories on
1610 # the filesystem *now*.
1611 # the filesystem *now*.
1611 ctx = self[None]
1612 ctx = self[None]
1612 parts = util.splitpath(subpath)
1613 parts = util.splitpath(subpath)
1613 while parts:
1614 while parts:
1614 prefix = b'/'.join(parts)
1615 prefix = b'/'.join(parts)
1615 if prefix in ctx.substate:
1616 if prefix in ctx.substate:
1616 if prefix == normsubpath:
1617 if prefix == normsubpath:
1617 return True
1618 return True
1618 else:
1619 else:
1619 sub = ctx.sub(prefix)
1620 sub = ctx.sub(prefix)
1620 return sub.checknested(subpath[len(prefix) + 1 :])
1621 return sub.checknested(subpath[len(prefix) + 1 :])
1621 else:
1622 else:
1622 parts.pop()
1623 parts.pop()
1623 return False
1624 return False
1624
1625
1625 def peer(self):
1626 def peer(self):
1626 return localpeer(self) # not cached to avoid reference cycle
1627 return localpeer(self) # not cached to avoid reference cycle
1627
1628
1628 def unfiltered(self):
1629 def unfiltered(self):
1629 """Return unfiltered version of the repository
1630 """Return unfiltered version of the repository
1630
1631
1631 Intended to be overwritten by filtered repo."""
1632 Intended to be overwritten by filtered repo."""
1632 return self
1633 return self
1633
1634
1634 def filtered(self, name, visibilityexceptions=None):
1635 def filtered(self, name, visibilityexceptions=None):
1635 """Return a filtered version of a repository
1636 """Return a filtered version of a repository
1636
1637
1637 The `name` parameter is the identifier of the requested view. This
1638 The `name` parameter is the identifier of the requested view. This
1638 will return a repoview object set "exactly" to the specified view.
1639 will return a repoview object set "exactly" to the specified view.
1639
1640
1640 This function does not apply recursive filtering to a repository. For
1641 This function does not apply recursive filtering to a repository. For
1641 example calling `repo.filtered("served")` will return a repoview using
1642 example calling `repo.filtered("served")` will return a repoview using
1642 the "served" view, regardless of the initial view used by `repo`.
1643 the "served" view, regardless of the initial view used by `repo`.
1643
1644
1644 In other word, there is always only one level of `repoview` "filtering".
1645 In other word, there is always only one level of `repoview` "filtering".
1645 """
1646 """
1646 if self._extrafilterid is not None and b'%' not in name:
1647 if self._extrafilterid is not None and b'%' not in name:
1647 name = name + b'%' + self._extrafilterid
1648 name = name + b'%' + self._extrafilterid
1648
1649
1649 cls = repoview.newtype(self.unfiltered().__class__)
1650 cls = repoview.newtype(self.unfiltered().__class__)
1650 return cls(self, name, visibilityexceptions)
1651 return cls(self, name, visibilityexceptions)
1651
1652
1652 @mixedrepostorecache(
1653 @mixedrepostorecache(
1653 (b'bookmarks', b'plain'),
1654 (b'bookmarks', b'plain'),
1654 (b'bookmarks.current', b'plain'),
1655 (b'bookmarks.current', b'plain'),
1655 (b'bookmarks', b''),
1656 (b'bookmarks', b''),
1656 (b'00changelog.i', b''),
1657 (b'00changelog.i', b''),
1657 )
1658 )
1658 def _bookmarks(self):
1659 def _bookmarks(self):
1659 # Since the multiple files involved in the transaction cannot be
1660 # Since the multiple files involved in the transaction cannot be
1660 # written atomically (with current repository format), there is a race
1661 # written atomically (with current repository format), there is a race
1661 # condition here.
1662 # condition here.
1662 #
1663 #
1663 # 1) changelog content A is read
1664 # 1) changelog content A is read
1664 # 2) outside transaction update changelog to content B
1665 # 2) outside transaction update changelog to content B
1665 # 3) outside transaction update bookmark file referring to content B
1666 # 3) outside transaction update bookmark file referring to content B
1666 # 4) bookmarks file content is read and filtered against changelog-A
1667 # 4) bookmarks file content is read and filtered against changelog-A
1667 #
1668 #
1668 # When this happens, bookmarks against nodes missing from A are dropped.
1669 # When this happens, bookmarks against nodes missing from A are dropped.
1669 #
1670 #
1670 # Having this happening during read is not great, but it become worse
1671 # Having this happening during read is not great, but it become worse
1671 # when this happen during write because the bookmarks to the "unknown"
1672 # when this happen during write because the bookmarks to the "unknown"
1672 # nodes will be dropped for good. However, writes happen within locks.
1673 # nodes will be dropped for good. However, writes happen within locks.
1673 # This locking makes it possible to have a race free consistent read.
1674 # This locking makes it possible to have a race free consistent read.
1674 # For this purpose data read from disc before locking are
1675 # For this purpose data read from disc before locking are
1675 # "invalidated" right after the locks are taken. This invalidations are
1676 # "invalidated" right after the locks are taken. This invalidations are
1676 # "light", the `filecache` mechanism keep the data in memory and will
1677 # "light", the `filecache` mechanism keep the data in memory and will
1677 # reuse them if the underlying files did not changed. Not parsing the
1678 # reuse them if the underlying files did not changed. Not parsing the
1678 # same data multiple times helps performances.
1679 # same data multiple times helps performances.
1679 #
1680 #
1680 # Unfortunately in the case describe above, the files tracked by the
1681 # Unfortunately in the case describe above, the files tracked by the
1681 # bookmarks file cache might not have changed, but the in-memory
1682 # bookmarks file cache might not have changed, but the in-memory
1682 # content is still "wrong" because we used an older changelog content
1683 # content is still "wrong" because we used an older changelog content
1683 # to process the on-disk data. So after locking, the changelog would be
1684 # to process the on-disk data. So after locking, the changelog would be
1684 # refreshed but `_bookmarks` would be preserved.
1685 # refreshed but `_bookmarks` would be preserved.
1685 # Adding `00changelog.i` to the list of tracked file is not
1686 # Adding `00changelog.i` to the list of tracked file is not
1686 # enough, because at the time we build the content for `_bookmarks` in
1687 # enough, because at the time we build the content for `_bookmarks` in
1687 # (4), the changelog file has already diverged from the content used
1688 # (4), the changelog file has already diverged from the content used
1688 # for loading `changelog` in (1)
1689 # for loading `changelog` in (1)
1689 #
1690 #
1690 # To prevent the issue, we force the changelog to be explicitly
1691 # To prevent the issue, we force the changelog to be explicitly
1691 # reloaded while computing `_bookmarks`. The data race can still happen
1692 # reloaded while computing `_bookmarks`. The data race can still happen
1692 # without the lock (with a narrower window), but it would no longer go
1693 # without the lock (with a narrower window), but it would no longer go
1693 # undetected during the lock time refresh.
1694 # undetected during the lock time refresh.
1694 #
1695 #
1695 # The new schedule is as follow
1696 # The new schedule is as follow
1696 #
1697 #
1697 # 1) filecache logic detect that `_bookmarks` needs to be computed
1698 # 1) filecache logic detect that `_bookmarks` needs to be computed
1698 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1699 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1699 # 3) We force `changelog` filecache to be tested
1700 # 3) We force `changelog` filecache to be tested
1700 # 4) cachestat for `changelog` are captured (for changelog)
1701 # 4) cachestat for `changelog` are captured (for changelog)
1701 # 5) `_bookmarks` is computed and cached
1702 # 5) `_bookmarks` is computed and cached
1702 #
1703 #
1703 # The step in (3) ensure we have a changelog at least as recent as the
1704 # The step in (3) ensure we have a changelog at least as recent as the
1704 # cache stat computed in (1). As a result at locking time:
1705 # cache stat computed in (1). As a result at locking time:
1705 # * if the changelog did not changed since (1) -> we can reuse the data
1706 # * if the changelog did not changed since (1) -> we can reuse the data
1706 # * otherwise -> the bookmarks get refreshed.
1707 # * otherwise -> the bookmarks get refreshed.
1707 self._refreshchangelog()
1708 self._refreshchangelog()
1708 return bookmarks.bmstore(self)
1709 return bookmarks.bmstore(self)
1709
1710
1710 def _refreshchangelog(self):
1711 def _refreshchangelog(self):
1711 """make sure the in memory changelog match the on-disk one"""
1712 """make sure the in memory changelog match the on-disk one"""
1712 if 'changelog' in vars(self) and self.currenttransaction() is None:
1713 if 'changelog' in vars(self) and self.currenttransaction() is None:
1713 del self.changelog
1714 del self.changelog
1714
1715
1715 @property
1716 @property
1716 def _activebookmark(self):
1717 def _activebookmark(self):
1717 return self._bookmarks.active
1718 return self._bookmarks.active
1718
1719
1719 # _phasesets depend on changelog. what we need is to call
1720 # _phasesets depend on changelog. what we need is to call
1720 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1721 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1721 # can't be easily expressed in filecache mechanism.
1722 # can't be easily expressed in filecache mechanism.
1722 @storecache(b'phaseroots', b'00changelog.i')
1723 @storecache(b'phaseroots', b'00changelog.i')
1723 def _phasecache(self):
1724 def _phasecache(self):
1724 return phases.phasecache(self, self._phasedefaults)
1725 return phases.phasecache(self, self._phasedefaults)
1725
1726
1726 @storecache(b'obsstore')
1727 @storecache(b'obsstore')
1727 def obsstore(self):
1728 def obsstore(self):
1728 return obsolete.makestore(self.ui, self)
1729 return obsolete.makestore(self.ui, self)
1729
1730
1730 @changelogcache()
1731 @changelogcache()
1731 def changelog(repo):
1732 def changelog(repo):
1732 # load dirstate before changelog to avoid race see issue6303
1733 # load dirstate before changelog to avoid race see issue6303
1733 repo.dirstate.prefetch_parents()
1734 repo.dirstate.prefetch_parents()
1734 return repo.store.changelog(
1735 return repo.store.changelog(
1735 txnutil.mayhavepending(repo.root),
1736 txnutil.mayhavepending(repo.root),
1736 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1737 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1737 )
1738 )
1738
1739
1739 @manifestlogcache()
1740 @manifestlogcache()
1740 def manifestlog(self):
1741 def manifestlog(self):
1741 return self.store.manifestlog(self, self._storenarrowmatch)
1742 return self.store.manifestlog(self, self._storenarrowmatch)
1742
1743
1743 @repofilecache(b'dirstate')
1744 @repofilecache(b'dirstate')
1744 def dirstate(self):
1745 def dirstate(self):
1745 return self._makedirstate()
1746 return self._makedirstate()
1746
1747
1747 def _makedirstate(self):
1748 def _makedirstate(self):
1748 """Extension point for wrapping the dirstate per-repo."""
1749 """Extension point for wrapping the dirstate per-repo."""
1749 sparsematchfn = lambda: sparse.matcher(self)
1750 sparsematchfn = lambda: sparse.matcher(self)
1750 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1751 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1751 use_dirstate_v2 = v2_req in self.requirements
1752 use_dirstate_v2 = v2_req in self.requirements
1752
1753
1753 return dirstate.dirstate(
1754 return dirstate.dirstate(
1754 self.vfs,
1755 self.vfs,
1755 self.ui,
1756 self.ui,
1756 self.root,
1757 self.root,
1757 self._dirstatevalidate,
1758 self._dirstatevalidate,
1758 sparsematchfn,
1759 sparsematchfn,
1759 self.nodeconstants,
1760 self.nodeconstants,
1760 use_dirstate_v2,
1761 use_dirstate_v2,
1761 )
1762 )
1762
1763
1763 def _dirstatevalidate(self, node):
1764 def _dirstatevalidate(self, node):
1764 try:
1765 try:
1765 self.changelog.rev(node)
1766 self.changelog.rev(node)
1766 return node
1767 return node
1767 except error.LookupError:
1768 except error.LookupError:
1768 if not self._dirstatevalidatewarned:
1769 if not self._dirstatevalidatewarned:
1769 self._dirstatevalidatewarned = True
1770 self._dirstatevalidatewarned = True
1770 self.ui.warn(
1771 self.ui.warn(
1771 _(b"warning: ignoring unknown working parent %s!\n")
1772 _(b"warning: ignoring unknown working parent %s!\n")
1772 % short(node)
1773 % short(node)
1773 )
1774 )
1774 return self.nullid
1775 return self.nullid
1775
1776
1776 @storecache(narrowspec.FILENAME)
1777 @storecache(narrowspec.FILENAME)
1777 def narrowpats(self):
1778 def narrowpats(self):
1778 """matcher patterns for this repository's narrowspec
1779 """matcher patterns for this repository's narrowspec
1779
1780
1780 A tuple of (includes, excludes).
1781 A tuple of (includes, excludes).
1781 """
1782 """
1782 return narrowspec.load(self)
1783 return narrowspec.load(self)
1783
1784
1784 @storecache(narrowspec.FILENAME)
1785 @storecache(narrowspec.FILENAME)
1785 def _storenarrowmatch(self):
1786 def _storenarrowmatch(self):
1786 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1787 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1787 return matchmod.always()
1788 return matchmod.always()
1788 include, exclude = self.narrowpats
1789 include, exclude = self.narrowpats
1789 return narrowspec.match(self.root, include=include, exclude=exclude)
1790 return narrowspec.match(self.root, include=include, exclude=exclude)
1790
1791
1791 @storecache(narrowspec.FILENAME)
1792 @storecache(narrowspec.FILENAME)
1792 def _narrowmatch(self):
1793 def _narrowmatch(self):
1793 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1794 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1794 return matchmod.always()
1795 return matchmod.always()
1795 narrowspec.checkworkingcopynarrowspec(self)
1796 narrowspec.checkworkingcopynarrowspec(self)
1796 include, exclude = self.narrowpats
1797 include, exclude = self.narrowpats
1797 return narrowspec.match(self.root, include=include, exclude=exclude)
1798 return narrowspec.match(self.root, include=include, exclude=exclude)
1798
1799
1799 def narrowmatch(self, match=None, includeexact=False):
1800 def narrowmatch(self, match=None, includeexact=False):
1800 """matcher corresponding the the repo's narrowspec
1801 """matcher corresponding the the repo's narrowspec
1801
1802
1802 If `match` is given, then that will be intersected with the narrow
1803 If `match` is given, then that will be intersected with the narrow
1803 matcher.
1804 matcher.
1804
1805
1805 If `includeexact` is True, then any exact matches from `match` will
1806 If `includeexact` is True, then any exact matches from `match` will
1806 be included even if they're outside the narrowspec.
1807 be included even if they're outside the narrowspec.
1807 """
1808 """
1808 if match:
1809 if match:
1809 if includeexact and not self._narrowmatch.always():
1810 if includeexact and not self._narrowmatch.always():
1810 # do not exclude explicitly-specified paths so that they can
1811 # do not exclude explicitly-specified paths so that they can
1811 # be warned later on
1812 # be warned later on
1812 em = matchmod.exact(match.files())
1813 em = matchmod.exact(match.files())
1813 nm = matchmod.unionmatcher([self._narrowmatch, em])
1814 nm = matchmod.unionmatcher([self._narrowmatch, em])
1814 return matchmod.intersectmatchers(match, nm)
1815 return matchmod.intersectmatchers(match, nm)
1815 return matchmod.intersectmatchers(match, self._narrowmatch)
1816 return matchmod.intersectmatchers(match, self._narrowmatch)
1816 return self._narrowmatch
1817 return self._narrowmatch
1817
1818
1818 def setnarrowpats(self, newincludes, newexcludes):
1819 def setnarrowpats(self, newincludes, newexcludes):
1819 narrowspec.save(self, newincludes, newexcludes)
1820 narrowspec.save(self, newincludes, newexcludes)
1820 self.invalidate(clearfilecache=True)
1821 self.invalidate(clearfilecache=True)
1821
1822
1822 @unfilteredpropertycache
1823 @unfilteredpropertycache
1823 def _quick_access_changeid_null(self):
1824 def _quick_access_changeid_null(self):
1824 return {
1825 return {
1825 b'null': (nullrev, self.nodeconstants.nullid),
1826 b'null': (nullrev, self.nodeconstants.nullid),
1826 nullrev: (nullrev, self.nodeconstants.nullid),
1827 nullrev: (nullrev, self.nodeconstants.nullid),
1827 self.nullid: (nullrev, self.nullid),
1828 self.nullid: (nullrev, self.nullid),
1828 }
1829 }
1829
1830
1830 @unfilteredpropertycache
1831 @unfilteredpropertycache
1831 def _quick_access_changeid_wc(self):
1832 def _quick_access_changeid_wc(self):
1832 # also fast path access to the working copy parents
1833 # also fast path access to the working copy parents
1833 # however, only do it for filter that ensure wc is visible.
1834 # however, only do it for filter that ensure wc is visible.
1834 quick = self._quick_access_changeid_null.copy()
1835 quick = self._quick_access_changeid_null.copy()
1835 cl = self.unfiltered().changelog
1836 cl = self.unfiltered().changelog
1836 for node in self.dirstate.parents():
1837 for node in self.dirstate.parents():
1837 if node == self.nullid:
1838 if node == self.nullid:
1838 continue
1839 continue
1839 rev = cl.index.get_rev(node)
1840 rev = cl.index.get_rev(node)
1840 if rev is None:
1841 if rev is None:
1841 # unknown working copy parent case:
1842 # unknown working copy parent case:
1842 #
1843 #
1843 # skip the fast path and let higher code deal with it
1844 # skip the fast path and let higher code deal with it
1844 continue
1845 continue
1845 pair = (rev, node)
1846 pair = (rev, node)
1846 quick[rev] = pair
1847 quick[rev] = pair
1847 quick[node] = pair
1848 quick[node] = pair
1848 # also add the parents of the parents
1849 # also add the parents of the parents
1849 for r in cl.parentrevs(rev):
1850 for r in cl.parentrevs(rev):
1850 if r == nullrev:
1851 if r == nullrev:
1851 continue
1852 continue
1852 n = cl.node(r)
1853 n = cl.node(r)
1853 pair = (r, n)
1854 pair = (r, n)
1854 quick[r] = pair
1855 quick[r] = pair
1855 quick[n] = pair
1856 quick[n] = pair
1856 p1node = self.dirstate.p1()
1857 p1node = self.dirstate.p1()
1857 if p1node != self.nullid:
1858 if p1node != self.nullid:
1858 quick[b'.'] = quick[p1node]
1859 quick[b'.'] = quick[p1node]
1859 return quick
1860 return quick
1860
1861
1861 @unfilteredmethod
1862 @unfilteredmethod
1862 def _quick_access_changeid_invalidate(self):
1863 def _quick_access_changeid_invalidate(self):
1863 if '_quick_access_changeid_wc' in vars(self):
1864 if '_quick_access_changeid_wc' in vars(self):
1864 del self.__dict__['_quick_access_changeid_wc']
1865 del self.__dict__['_quick_access_changeid_wc']
1865
1866
1866 @property
1867 @property
1867 def _quick_access_changeid(self):
1868 def _quick_access_changeid(self):
1868 """an helper dictionnary for __getitem__ calls
1869 """an helper dictionnary for __getitem__ calls
1869
1870
1870 This contains a list of symbol we can recognise right away without
1871 This contains a list of symbol we can recognise right away without
1871 further processing.
1872 further processing.
1872 """
1873 """
1873 if self.filtername in repoview.filter_has_wc:
1874 if self.filtername in repoview.filter_has_wc:
1874 return self._quick_access_changeid_wc
1875 return self._quick_access_changeid_wc
1875 return self._quick_access_changeid_null
1876 return self._quick_access_changeid_null
1876
1877
1877 def __getitem__(self, changeid):
1878 def __getitem__(self, changeid):
1878 # dealing with special cases
1879 # dealing with special cases
1879 if changeid is None:
1880 if changeid is None:
1880 return context.workingctx(self)
1881 return context.workingctx(self)
1881 if isinstance(changeid, context.basectx):
1882 if isinstance(changeid, context.basectx):
1882 return changeid
1883 return changeid
1883
1884
1884 # dealing with multiple revisions
1885 # dealing with multiple revisions
1885 if isinstance(changeid, slice):
1886 if isinstance(changeid, slice):
1886 # wdirrev isn't contiguous so the slice shouldn't include it
1887 # wdirrev isn't contiguous so the slice shouldn't include it
1887 return [
1888 return [
1888 self[i]
1889 self[i]
1889 for i in pycompat.xrange(*changeid.indices(len(self)))
1890 for i in pycompat.xrange(*changeid.indices(len(self)))
1890 if i not in self.changelog.filteredrevs
1891 if i not in self.changelog.filteredrevs
1891 ]
1892 ]
1892
1893
1893 # dealing with some special values
1894 # dealing with some special values
1894 quick_access = self._quick_access_changeid.get(changeid)
1895 quick_access = self._quick_access_changeid.get(changeid)
1895 if quick_access is not None:
1896 if quick_access is not None:
1896 rev, node = quick_access
1897 rev, node = quick_access
1897 return context.changectx(self, rev, node, maybe_filtered=False)
1898 return context.changectx(self, rev, node, maybe_filtered=False)
1898 if changeid == b'tip':
1899 if changeid == b'tip':
1899 node = self.changelog.tip()
1900 node = self.changelog.tip()
1900 rev = self.changelog.rev(node)
1901 rev = self.changelog.rev(node)
1901 return context.changectx(self, rev, node)
1902 return context.changectx(self, rev, node)
1902
1903
1903 # dealing with arbitrary values
1904 # dealing with arbitrary values
1904 try:
1905 try:
1905 if isinstance(changeid, int):
1906 if isinstance(changeid, int):
1906 node = self.changelog.node(changeid)
1907 node = self.changelog.node(changeid)
1907 rev = changeid
1908 rev = changeid
1908 elif changeid == b'.':
1909 elif changeid == b'.':
1909 # this is a hack to delay/avoid loading obsmarkers
1910 # this is a hack to delay/avoid loading obsmarkers
1910 # when we know that '.' won't be hidden
1911 # when we know that '.' won't be hidden
1911 node = self.dirstate.p1()
1912 node = self.dirstate.p1()
1912 rev = self.unfiltered().changelog.rev(node)
1913 rev = self.unfiltered().changelog.rev(node)
1913 elif len(changeid) == self.nodeconstants.nodelen:
1914 elif len(changeid) == self.nodeconstants.nodelen:
1914 try:
1915 try:
1915 node = changeid
1916 node = changeid
1916 rev = self.changelog.rev(changeid)
1917 rev = self.changelog.rev(changeid)
1917 except error.FilteredLookupError:
1918 except error.FilteredLookupError:
1918 changeid = hex(changeid) # for the error message
1919 changeid = hex(changeid) # for the error message
1919 raise
1920 raise
1920 except LookupError:
1921 except LookupError:
1921 # check if it might have come from damaged dirstate
1922 # check if it might have come from damaged dirstate
1922 #
1923 #
1923 # XXX we could avoid the unfiltered if we had a recognizable
1924 # XXX we could avoid the unfiltered if we had a recognizable
1924 # exception for filtered changeset access
1925 # exception for filtered changeset access
1925 if (
1926 if (
1926 self.local()
1927 self.local()
1927 and changeid in self.unfiltered().dirstate.parents()
1928 and changeid in self.unfiltered().dirstate.parents()
1928 ):
1929 ):
1929 msg = _(b"working directory has unknown parent '%s'!")
1930 msg = _(b"working directory has unknown parent '%s'!")
1930 raise error.Abort(msg % short(changeid))
1931 raise error.Abort(msg % short(changeid))
1931 changeid = hex(changeid) # for the error message
1932 changeid = hex(changeid) # for the error message
1932 raise
1933 raise
1933
1934
1934 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1935 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1935 node = bin(changeid)
1936 node = bin(changeid)
1936 rev = self.changelog.rev(node)
1937 rev = self.changelog.rev(node)
1937 else:
1938 else:
1938 raise error.ProgrammingError(
1939 raise error.ProgrammingError(
1939 b"unsupported changeid '%s' of type %s"
1940 b"unsupported changeid '%s' of type %s"
1940 % (changeid, pycompat.bytestr(type(changeid)))
1941 % (changeid, pycompat.bytestr(type(changeid)))
1941 )
1942 )
1942
1943
1943 return context.changectx(self, rev, node)
1944 return context.changectx(self, rev, node)
1944
1945
1945 except (error.FilteredIndexError, error.FilteredLookupError):
1946 except (error.FilteredIndexError, error.FilteredLookupError):
1946 raise error.FilteredRepoLookupError(
1947 raise error.FilteredRepoLookupError(
1947 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1948 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1948 )
1949 )
1949 except (IndexError, LookupError):
1950 except (IndexError, LookupError):
1950 raise error.RepoLookupError(
1951 raise error.RepoLookupError(
1951 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1952 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1952 )
1953 )
1953 except error.WdirUnsupported:
1954 except error.WdirUnsupported:
1954 return context.workingctx(self)
1955 return context.workingctx(self)
1955
1956
1956 def __contains__(self, changeid):
1957 def __contains__(self, changeid):
1957 """True if the given changeid exists"""
1958 """True if the given changeid exists"""
1958 try:
1959 try:
1959 self[changeid]
1960 self[changeid]
1960 return True
1961 return True
1961 except error.RepoLookupError:
1962 except error.RepoLookupError:
1962 return False
1963 return False
1963
1964
1964 def __nonzero__(self):
1965 def __nonzero__(self):
1965 return True
1966 return True
1966
1967
1967 __bool__ = __nonzero__
1968 __bool__ = __nonzero__
1968
1969
1969 def __len__(self):
1970 def __len__(self):
1970 # no need to pay the cost of repoview.changelog
1971 # no need to pay the cost of repoview.changelog
1971 unfi = self.unfiltered()
1972 unfi = self.unfiltered()
1972 return len(unfi.changelog)
1973 return len(unfi.changelog)
1973
1974
1974 def __iter__(self):
1975 def __iter__(self):
1975 return iter(self.changelog)
1976 return iter(self.changelog)
1976
1977
1977 def revs(self, expr, *args):
1978 def revs(self, expr, *args):
1978 """Find revisions matching a revset.
1979 """Find revisions matching a revset.
1979
1980
1980 The revset is specified as a string ``expr`` that may contain
1981 The revset is specified as a string ``expr`` that may contain
1981 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1982 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1982
1983
1983 Revset aliases from the configuration are not expanded. To expand
1984 Revset aliases from the configuration are not expanded. To expand
1984 user aliases, consider calling ``scmutil.revrange()`` or
1985 user aliases, consider calling ``scmutil.revrange()`` or
1985 ``repo.anyrevs([expr], user=True)``.
1986 ``repo.anyrevs([expr], user=True)``.
1986
1987
1987 Returns a smartset.abstractsmartset, which is a list-like interface
1988 Returns a smartset.abstractsmartset, which is a list-like interface
1988 that contains integer revisions.
1989 that contains integer revisions.
1989 """
1990 """
1990 tree = revsetlang.spectree(expr, *args)
1991 tree = revsetlang.spectree(expr, *args)
1991 return revset.makematcher(tree)(self)
1992 return revset.makematcher(tree)(self)
1992
1993
1993 def set(self, expr, *args):
1994 def set(self, expr, *args):
1994 """Find revisions matching a revset and emit changectx instances.
1995 """Find revisions matching a revset and emit changectx instances.
1995
1996
1996 This is a convenience wrapper around ``revs()`` that iterates the
1997 This is a convenience wrapper around ``revs()`` that iterates the
1997 result and is a generator of changectx instances.
1998 result and is a generator of changectx instances.
1998
1999
1999 Revset aliases from the configuration are not expanded. To expand
2000 Revset aliases from the configuration are not expanded. To expand
2000 user aliases, consider calling ``scmutil.revrange()``.
2001 user aliases, consider calling ``scmutil.revrange()``.
2001 """
2002 """
2002 for r in self.revs(expr, *args):
2003 for r in self.revs(expr, *args):
2003 yield self[r]
2004 yield self[r]
2004
2005
2005 def anyrevs(self, specs, user=False, localalias=None):
2006 def anyrevs(self, specs, user=False, localalias=None):
2006 """Find revisions matching one of the given revsets.
2007 """Find revisions matching one of the given revsets.
2007
2008
2008 Revset aliases from the configuration are not expanded by default. To
2009 Revset aliases from the configuration are not expanded by default. To
2009 expand user aliases, specify ``user=True``. To provide some local
2010 expand user aliases, specify ``user=True``. To provide some local
2010 definitions overriding user aliases, set ``localalias`` to
2011 definitions overriding user aliases, set ``localalias`` to
2011 ``{name: definitionstring}``.
2012 ``{name: definitionstring}``.
2012 """
2013 """
2013 if specs == [b'null']:
2014 if specs == [b'null']:
2014 return revset.baseset([nullrev])
2015 return revset.baseset([nullrev])
2015 if specs == [b'.']:
2016 if specs == [b'.']:
2016 quick_data = self._quick_access_changeid.get(b'.')
2017 quick_data = self._quick_access_changeid.get(b'.')
2017 if quick_data is not None:
2018 if quick_data is not None:
2018 return revset.baseset([quick_data[0]])
2019 return revset.baseset([quick_data[0]])
2019 if user:
2020 if user:
2020 m = revset.matchany(
2021 m = revset.matchany(
2021 self.ui,
2022 self.ui,
2022 specs,
2023 specs,
2023 lookup=revset.lookupfn(self),
2024 lookup=revset.lookupfn(self),
2024 localalias=localalias,
2025 localalias=localalias,
2025 )
2026 )
2026 else:
2027 else:
2027 m = revset.matchany(None, specs, localalias=localalias)
2028 m = revset.matchany(None, specs, localalias=localalias)
2028 return m(self)
2029 return m(self)
2029
2030
2030 def url(self):
2031 def url(self):
2031 return b'file:' + self.root
2032 return b'file:' + self.root
2032
2033
2033 def hook(self, name, throw=False, **args):
2034 def hook(self, name, throw=False, **args):
2034 """Call a hook, passing this repo instance.
2035 """Call a hook, passing this repo instance.
2035
2036
2036 This a convenience method to aid invoking hooks. Extensions likely
2037 This a convenience method to aid invoking hooks. Extensions likely
2037 won't call this unless they have registered a custom hook or are
2038 won't call this unless they have registered a custom hook or are
2038 replacing code that is expected to call a hook.
2039 replacing code that is expected to call a hook.
2039 """
2040 """
2040 return hook.hook(self.ui, self, name, throw, **args)
2041 return hook.hook(self.ui, self, name, throw, **args)
2041
2042
2042 @filteredpropertycache
2043 @filteredpropertycache
2043 def _tagscache(self):
2044 def _tagscache(self):
2044 """Returns a tagscache object that contains various tags related
2045 """Returns a tagscache object that contains various tags related
2045 caches."""
2046 caches."""
2046
2047
2047 # This simplifies its cache management by having one decorated
2048 # This simplifies its cache management by having one decorated
2048 # function (this one) and the rest simply fetch things from it.
2049 # function (this one) and the rest simply fetch things from it.
2049 class tagscache(object):
2050 class tagscache(object):
2050 def __init__(self):
2051 def __init__(self):
2051 # These two define the set of tags for this repository. tags
2052 # These two define the set of tags for this repository. tags
2052 # maps tag name to node; tagtypes maps tag name to 'global' or
2053 # maps tag name to node; tagtypes maps tag name to 'global' or
2053 # 'local'. (Global tags are defined by .hgtags across all
2054 # 'local'. (Global tags are defined by .hgtags across all
2054 # heads, and local tags are defined in .hg/localtags.)
2055 # heads, and local tags are defined in .hg/localtags.)
2055 # They constitute the in-memory cache of tags.
2056 # They constitute the in-memory cache of tags.
2056 self.tags = self.tagtypes = None
2057 self.tags = self.tagtypes = None
2057
2058
2058 self.nodetagscache = self.tagslist = None
2059 self.nodetagscache = self.tagslist = None
2059
2060
2060 cache = tagscache()
2061 cache = tagscache()
2061 cache.tags, cache.tagtypes = self._findtags()
2062 cache.tags, cache.tagtypes = self._findtags()
2062
2063
2063 return cache
2064 return cache
2064
2065
2065 def tags(self):
2066 def tags(self):
2066 '''return a mapping of tag to node'''
2067 '''return a mapping of tag to node'''
2067 t = {}
2068 t = {}
2068 if self.changelog.filteredrevs:
2069 if self.changelog.filteredrevs:
2069 tags, tt = self._findtags()
2070 tags, tt = self._findtags()
2070 else:
2071 else:
2071 tags = self._tagscache.tags
2072 tags = self._tagscache.tags
2072 rev = self.changelog.rev
2073 rev = self.changelog.rev
2073 for k, v in pycompat.iteritems(tags):
2074 for k, v in pycompat.iteritems(tags):
2074 try:
2075 try:
2075 # ignore tags to unknown nodes
2076 # ignore tags to unknown nodes
2076 rev(v)
2077 rev(v)
2077 t[k] = v
2078 t[k] = v
2078 except (error.LookupError, ValueError):
2079 except (error.LookupError, ValueError):
2079 pass
2080 pass
2080 return t
2081 return t
2081
2082
2082 def _findtags(self):
2083 def _findtags(self):
2083 """Do the hard work of finding tags. Return a pair of dicts
2084 """Do the hard work of finding tags. Return a pair of dicts
2084 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2085 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2085 maps tag name to a string like \'global\' or \'local\'.
2086 maps tag name to a string like \'global\' or \'local\'.
2086 Subclasses or extensions are free to add their own tags, but
2087 Subclasses or extensions are free to add their own tags, but
2087 should be aware that the returned dicts will be retained for the
2088 should be aware that the returned dicts will be retained for the
2088 duration of the localrepo object."""
2089 duration of the localrepo object."""
2089
2090
2090 # XXX what tagtype should subclasses/extensions use? Currently
2091 # XXX what tagtype should subclasses/extensions use? Currently
2091 # mq and bookmarks add tags, but do not set the tagtype at all.
2092 # mq and bookmarks add tags, but do not set the tagtype at all.
2092 # Should each extension invent its own tag type? Should there
2093 # Should each extension invent its own tag type? Should there
2093 # be one tagtype for all such "virtual" tags? Or is the status
2094 # be one tagtype for all such "virtual" tags? Or is the status
2094 # quo fine?
2095 # quo fine?
2095
2096
2096 # map tag name to (node, hist)
2097 # map tag name to (node, hist)
2097 alltags = tagsmod.findglobaltags(self.ui, self)
2098 alltags = tagsmod.findglobaltags(self.ui, self)
2098 # map tag name to tag type
2099 # map tag name to tag type
2099 tagtypes = {tag: b'global' for tag in alltags}
2100 tagtypes = {tag: b'global' for tag in alltags}
2100
2101
2101 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2102 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2102
2103
2103 # Build the return dicts. Have to re-encode tag names because
2104 # Build the return dicts. Have to re-encode tag names because
2104 # the tags module always uses UTF-8 (in order not to lose info
2105 # the tags module always uses UTF-8 (in order not to lose info
2105 # writing to the cache), but the rest of Mercurial wants them in
2106 # writing to the cache), but the rest of Mercurial wants them in
2106 # local encoding.
2107 # local encoding.
2107 tags = {}
2108 tags = {}
2108 for (name, (node, hist)) in pycompat.iteritems(alltags):
2109 for (name, (node, hist)) in pycompat.iteritems(alltags):
2109 if node != self.nullid:
2110 if node != self.nullid:
2110 tags[encoding.tolocal(name)] = node
2111 tags[encoding.tolocal(name)] = node
2111 tags[b'tip'] = self.changelog.tip()
2112 tags[b'tip'] = self.changelog.tip()
2112 tagtypes = {
2113 tagtypes = {
2113 encoding.tolocal(name): value
2114 encoding.tolocal(name): value
2114 for (name, value) in pycompat.iteritems(tagtypes)
2115 for (name, value) in pycompat.iteritems(tagtypes)
2115 }
2116 }
2116 return (tags, tagtypes)
2117 return (tags, tagtypes)
2117
2118
2118 def tagtype(self, tagname):
2119 def tagtype(self, tagname):
2119 """
2120 """
2120 return the type of the given tag. result can be:
2121 return the type of the given tag. result can be:
2121
2122
2122 'local' : a local tag
2123 'local' : a local tag
2123 'global' : a global tag
2124 'global' : a global tag
2124 None : tag does not exist
2125 None : tag does not exist
2125 """
2126 """
2126
2127
2127 return self._tagscache.tagtypes.get(tagname)
2128 return self._tagscache.tagtypes.get(tagname)
2128
2129
2129 def tagslist(self):
2130 def tagslist(self):
2130 '''return a list of tags ordered by revision'''
2131 '''return a list of tags ordered by revision'''
2131 if not self._tagscache.tagslist:
2132 if not self._tagscache.tagslist:
2132 l = []
2133 l = []
2133 for t, n in pycompat.iteritems(self.tags()):
2134 for t, n in pycompat.iteritems(self.tags()):
2134 l.append((self.changelog.rev(n), t, n))
2135 l.append((self.changelog.rev(n), t, n))
2135 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2136 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2136
2137
2137 return self._tagscache.tagslist
2138 return self._tagscache.tagslist
2138
2139
2139 def nodetags(self, node):
2140 def nodetags(self, node):
2140 '''return the tags associated with a node'''
2141 '''return the tags associated with a node'''
2141 if not self._tagscache.nodetagscache:
2142 if not self._tagscache.nodetagscache:
2142 nodetagscache = {}
2143 nodetagscache = {}
2143 for t, n in pycompat.iteritems(self._tagscache.tags):
2144 for t, n in pycompat.iteritems(self._tagscache.tags):
2144 nodetagscache.setdefault(n, []).append(t)
2145 nodetagscache.setdefault(n, []).append(t)
2145 for tags in pycompat.itervalues(nodetagscache):
2146 for tags in pycompat.itervalues(nodetagscache):
2146 tags.sort()
2147 tags.sort()
2147 self._tagscache.nodetagscache = nodetagscache
2148 self._tagscache.nodetagscache = nodetagscache
2148 return self._tagscache.nodetagscache.get(node, [])
2149 return self._tagscache.nodetagscache.get(node, [])
2149
2150
2150 def nodebookmarks(self, node):
2151 def nodebookmarks(self, node):
2151 """return the list of bookmarks pointing to the specified node"""
2152 """return the list of bookmarks pointing to the specified node"""
2152 return self._bookmarks.names(node)
2153 return self._bookmarks.names(node)
2153
2154
2154 def branchmap(self):
2155 def branchmap(self):
2155 """returns a dictionary {branch: [branchheads]} with branchheads
2156 """returns a dictionary {branch: [branchheads]} with branchheads
2156 ordered by increasing revision number"""
2157 ordered by increasing revision number"""
2157 return self._branchcaches[self]
2158 return self._branchcaches[self]
2158
2159
2159 @unfilteredmethod
2160 @unfilteredmethod
2160 def revbranchcache(self):
2161 def revbranchcache(self):
2161 if not self._revbranchcache:
2162 if not self._revbranchcache:
2162 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2163 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2163 return self._revbranchcache
2164 return self._revbranchcache
2164
2165
2165 def register_changeset(self, rev, changelogrevision):
2166 def register_changeset(self, rev, changelogrevision):
2166 self.revbranchcache().setdata(rev, changelogrevision)
2167 self.revbranchcache().setdata(rev, changelogrevision)
2167
2168
2168 def branchtip(self, branch, ignoremissing=False):
2169 def branchtip(self, branch, ignoremissing=False):
2169 """return the tip node for a given branch
2170 """return the tip node for a given branch
2170
2171
2171 If ignoremissing is True, then this method will not raise an error.
2172 If ignoremissing is True, then this method will not raise an error.
2172 This is helpful for callers that only expect None for a missing branch
2173 This is helpful for callers that only expect None for a missing branch
2173 (e.g. namespace).
2174 (e.g. namespace).
2174
2175
2175 """
2176 """
2176 try:
2177 try:
2177 return self.branchmap().branchtip(branch)
2178 return self.branchmap().branchtip(branch)
2178 except KeyError:
2179 except KeyError:
2179 if not ignoremissing:
2180 if not ignoremissing:
2180 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2181 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2181 else:
2182 else:
2182 pass
2183 pass
2183
2184
2184 def lookup(self, key):
2185 def lookup(self, key):
2185 node = scmutil.revsymbol(self, key).node()
2186 node = scmutil.revsymbol(self, key).node()
2186 if node is None:
2187 if node is None:
2187 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2188 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2188 return node
2189 return node
2189
2190
2190 def lookupbranch(self, key):
2191 def lookupbranch(self, key):
2191 if self.branchmap().hasbranch(key):
2192 if self.branchmap().hasbranch(key):
2192 return key
2193 return key
2193
2194
2194 return scmutil.revsymbol(self, key).branch()
2195 return scmutil.revsymbol(self, key).branch()
2195
2196
2196 def known(self, nodes):
2197 def known(self, nodes):
2197 cl = self.changelog
2198 cl = self.changelog
2198 get_rev = cl.index.get_rev
2199 get_rev = cl.index.get_rev
2199 filtered = cl.filteredrevs
2200 filtered = cl.filteredrevs
2200 result = []
2201 result = []
2201 for n in nodes:
2202 for n in nodes:
2202 r = get_rev(n)
2203 r = get_rev(n)
2203 resp = not (r is None or r in filtered)
2204 resp = not (r is None or r in filtered)
2204 result.append(resp)
2205 result.append(resp)
2205 return result
2206 return result
2206
2207
2207 def local(self):
2208 def local(self):
2208 return self
2209 return self
2209
2210
2210 def publishing(self):
2211 def publishing(self):
2211 # it's safe (and desirable) to trust the publish flag unconditionally
2212 # it's safe (and desirable) to trust the publish flag unconditionally
2212 # so that we don't finalize changes shared between users via ssh or nfs
2213 # so that we don't finalize changes shared between users via ssh or nfs
2213 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2214 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2214
2215
2215 def cancopy(self):
2216 def cancopy(self):
2216 # so statichttprepo's override of local() works
2217 # so statichttprepo's override of local() works
2217 if not self.local():
2218 if not self.local():
2218 return False
2219 return False
2219 if not self.publishing():
2220 if not self.publishing():
2220 return True
2221 return True
2221 # if publishing we can't copy if there is filtered content
2222 # if publishing we can't copy if there is filtered content
2222 return not self.filtered(b'visible').changelog.filteredrevs
2223 return not self.filtered(b'visible').changelog.filteredrevs
2223
2224
2224 def shared(self):
2225 def shared(self):
2225 '''the type of shared repository (None if not shared)'''
2226 '''the type of shared repository (None if not shared)'''
2226 if self.sharedpath != self.path:
2227 if self.sharedpath != self.path:
2227 return b'store'
2228 return b'store'
2228 return None
2229 return None
2229
2230
2230 def wjoin(self, f, *insidef):
2231 def wjoin(self, f, *insidef):
2231 return self.vfs.reljoin(self.root, f, *insidef)
2232 return self.vfs.reljoin(self.root, f, *insidef)
2232
2233
2233 def setparents(self, p1, p2=None):
2234 def setparents(self, p1, p2=None):
2234 if p2 is None:
2235 if p2 is None:
2235 p2 = self.nullid
2236 p2 = self.nullid
2236 self[None].setparents(p1, p2)
2237 self[None].setparents(p1, p2)
2237 self._quick_access_changeid_invalidate()
2238 self._quick_access_changeid_invalidate()
2238
2239
2239 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2240 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2240 """changeid must be a changeset revision, if specified.
2241 """changeid must be a changeset revision, if specified.
2241 fileid can be a file revision or node."""
2242 fileid can be a file revision or node."""
2242 return context.filectx(
2243 return context.filectx(
2243 self, path, changeid, fileid, changectx=changectx
2244 self, path, changeid, fileid, changectx=changectx
2244 )
2245 )
2245
2246
2246 def getcwd(self):
2247 def getcwd(self):
2247 return self.dirstate.getcwd()
2248 return self.dirstate.getcwd()
2248
2249
2249 def pathto(self, f, cwd=None):
2250 def pathto(self, f, cwd=None):
2250 return self.dirstate.pathto(f, cwd)
2251 return self.dirstate.pathto(f, cwd)
2251
2252
2252 def _loadfilter(self, filter):
2253 def _loadfilter(self, filter):
2253 if filter not in self._filterpats:
2254 if filter not in self._filterpats:
2254 l = []
2255 l = []
2255 for pat, cmd in self.ui.configitems(filter):
2256 for pat, cmd in self.ui.configitems(filter):
2256 if cmd == b'!':
2257 if cmd == b'!':
2257 continue
2258 continue
2258 mf = matchmod.match(self.root, b'', [pat])
2259 mf = matchmod.match(self.root, b'', [pat])
2259 fn = None
2260 fn = None
2260 params = cmd
2261 params = cmd
2261 for name, filterfn in pycompat.iteritems(self._datafilters):
2262 for name, filterfn in pycompat.iteritems(self._datafilters):
2262 if cmd.startswith(name):
2263 if cmd.startswith(name):
2263 fn = filterfn
2264 fn = filterfn
2264 params = cmd[len(name) :].lstrip()
2265 params = cmd[len(name) :].lstrip()
2265 break
2266 break
2266 if not fn:
2267 if not fn:
2267 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2268 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2268 fn.__name__ = 'commandfilter'
2269 fn.__name__ = 'commandfilter'
2269 # Wrap old filters not supporting keyword arguments
2270 # Wrap old filters not supporting keyword arguments
2270 if not pycompat.getargspec(fn)[2]:
2271 if not pycompat.getargspec(fn)[2]:
2271 oldfn = fn
2272 oldfn = fn
2272 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2273 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2273 fn.__name__ = 'compat-' + oldfn.__name__
2274 fn.__name__ = 'compat-' + oldfn.__name__
2274 l.append((mf, fn, params))
2275 l.append((mf, fn, params))
2275 self._filterpats[filter] = l
2276 self._filterpats[filter] = l
2276 return self._filterpats[filter]
2277 return self._filterpats[filter]
2277
2278
2278 def _filter(self, filterpats, filename, data):
2279 def _filter(self, filterpats, filename, data):
2279 for mf, fn, cmd in filterpats:
2280 for mf, fn, cmd in filterpats:
2280 if mf(filename):
2281 if mf(filename):
2281 self.ui.debug(
2282 self.ui.debug(
2282 b"filtering %s through %s\n"
2283 b"filtering %s through %s\n"
2283 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2284 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2284 )
2285 )
2285 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2286 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2286 break
2287 break
2287
2288
2288 return data
2289 return data
2289
2290
2290 @unfilteredpropertycache
2291 @unfilteredpropertycache
2291 def _encodefilterpats(self):
2292 def _encodefilterpats(self):
2292 return self._loadfilter(b'encode')
2293 return self._loadfilter(b'encode')
2293
2294
2294 @unfilteredpropertycache
2295 @unfilteredpropertycache
2295 def _decodefilterpats(self):
2296 def _decodefilterpats(self):
2296 return self._loadfilter(b'decode')
2297 return self._loadfilter(b'decode')
2297
2298
2298 def adddatafilter(self, name, filter):
2299 def adddatafilter(self, name, filter):
2299 self._datafilters[name] = filter
2300 self._datafilters[name] = filter
2300
2301
2301 def wread(self, filename):
2302 def wread(self, filename):
2302 if self.wvfs.islink(filename):
2303 if self.wvfs.islink(filename):
2303 data = self.wvfs.readlink(filename)
2304 data = self.wvfs.readlink(filename)
2304 else:
2305 else:
2305 data = self.wvfs.read(filename)
2306 data = self.wvfs.read(filename)
2306 return self._filter(self._encodefilterpats, filename, data)
2307 return self._filter(self._encodefilterpats, filename, data)
2307
2308
2308 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2309 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2309 """write ``data`` into ``filename`` in the working directory
2310 """write ``data`` into ``filename`` in the working directory
2310
2311
2311 This returns length of written (maybe decoded) data.
2312 This returns length of written (maybe decoded) data.
2312 """
2313 """
2313 data = self._filter(self._decodefilterpats, filename, data)
2314 data = self._filter(self._decodefilterpats, filename, data)
2314 if b'l' in flags:
2315 if b'l' in flags:
2315 self.wvfs.symlink(data, filename)
2316 self.wvfs.symlink(data, filename)
2316 else:
2317 else:
2317 self.wvfs.write(
2318 self.wvfs.write(
2318 filename, data, backgroundclose=backgroundclose, **kwargs
2319 filename, data, backgroundclose=backgroundclose, **kwargs
2319 )
2320 )
2320 if b'x' in flags:
2321 if b'x' in flags:
2321 self.wvfs.setflags(filename, False, True)
2322 self.wvfs.setflags(filename, False, True)
2322 else:
2323 else:
2323 self.wvfs.setflags(filename, False, False)
2324 self.wvfs.setflags(filename, False, False)
2324 return len(data)
2325 return len(data)
2325
2326
2326 def wwritedata(self, filename, data):
2327 def wwritedata(self, filename, data):
2327 return self._filter(self._decodefilterpats, filename, data)
2328 return self._filter(self._decodefilterpats, filename, data)
2328
2329
2329 def currenttransaction(self):
2330 def currenttransaction(self):
2330 """return the current transaction or None if non exists"""
2331 """return the current transaction or None if non exists"""
2331 if self._transref:
2332 if self._transref:
2332 tr = self._transref()
2333 tr = self._transref()
2333 else:
2334 else:
2334 tr = None
2335 tr = None
2335
2336
2336 if tr and tr.running():
2337 if tr and tr.running():
2337 return tr
2338 return tr
2338 return None
2339 return None
2339
2340
2340 def transaction(self, desc, report=None):
2341 def transaction(self, desc, report=None):
2341 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2342 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2342 b'devel', b'check-locks'
2343 b'devel', b'check-locks'
2343 ):
2344 ):
2344 if self._currentlock(self._lockref) is None:
2345 if self._currentlock(self._lockref) is None:
2345 raise error.ProgrammingError(b'transaction requires locking')
2346 raise error.ProgrammingError(b'transaction requires locking')
2346 tr = self.currenttransaction()
2347 tr = self.currenttransaction()
2347 if tr is not None:
2348 if tr is not None:
2348 return tr.nest(name=desc)
2349 return tr.nest(name=desc)
2349
2350
2350 # abort here if the journal already exists
2351 # abort here if the journal already exists
2351 if self.svfs.exists(b"journal"):
2352 if self.svfs.exists(b"journal"):
2352 raise error.RepoError(
2353 raise error.RepoError(
2353 _(b"abandoned transaction found"),
2354 _(b"abandoned transaction found"),
2354 hint=_(b"run 'hg recover' to clean up transaction"),
2355 hint=_(b"run 'hg recover' to clean up transaction"),
2355 )
2356 )
2356
2357
2357 idbase = b"%.40f#%f" % (random.random(), time.time())
2358 idbase = b"%.40f#%f" % (random.random(), time.time())
2358 ha = hex(hashutil.sha1(idbase).digest())
2359 ha = hex(hashutil.sha1(idbase).digest())
2359 txnid = b'TXN:' + ha
2360 txnid = b'TXN:' + ha
2360 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2361 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2361
2362
2362 self._writejournal(desc)
2363 self._writejournal(desc)
2363 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2364 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2364 if report:
2365 if report:
2365 rp = report
2366 rp = report
2366 else:
2367 else:
2367 rp = self.ui.warn
2368 rp = self.ui.warn
2368 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2369 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2369 # we must avoid cyclic reference between repo and transaction.
2370 # we must avoid cyclic reference between repo and transaction.
2370 reporef = weakref.ref(self)
2371 reporef = weakref.ref(self)
2371 # Code to track tag movement
2372 # Code to track tag movement
2372 #
2373 #
2373 # Since tags are all handled as file content, it is actually quite hard
2374 # Since tags are all handled as file content, it is actually quite hard
2374 # to track these movement from a code perspective. So we fallback to a
2375 # to track these movement from a code perspective. So we fallback to a
2375 # tracking at the repository level. One could envision to track changes
2376 # tracking at the repository level. One could envision to track changes
2376 # to the '.hgtags' file through changegroup apply but that fails to
2377 # to the '.hgtags' file through changegroup apply but that fails to
2377 # cope with case where transaction expose new heads without changegroup
2378 # cope with case where transaction expose new heads without changegroup
2378 # being involved (eg: phase movement).
2379 # being involved (eg: phase movement).
2379 #
2380 #
2380 # For now, We gate the feature behind a flag since this likely comes
2381 # For now, We gate the feature behind a flag since this likely comes
2381 # with performance impacts. The current code run more often than needed
2382 # with performance impacts. The current code run more often than needed
2382 # and do not use caches as much as it could. The current focus is on
2383 # and do not use caches as much as it could. The current focus is on
2383 # the behavior of the feature so we disable it by default. The flag
2384 # the behavior of the feature so we disable it by default. The flag
2384 # will be removed when we are happy with the performance impact.
2385 # will be removed when we are happy with the performance impact.
2385 #
2386 #
2386 # Once this feature is no longer experimental move the following
2387 # Once this feature is no longer experimental move the following
2387 # documentation to the appropriate help section:
2388 # documentation to the appropriate help section:
2388 #
2389 #
2389 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2390 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2390 # tags (new or changed or deleted tags). In addition the details of
2391 # tags (new or changed or deleted tags). In addition the details of
2391 # these changes are made available in a file at:
2392 # these changes are made available in a file at:
2392 # ``REPOROOT/.hg/changes/tags.changes``.
2393 # ``REPOROOT/.hg/changes/tags.changes``.
2393 # Make sure you check for HG_TAG_MOVED before reading that file as it
2394 # Make sure you check for HG_TAG_MOVED before reading that file as it
2394 # might exist from a previous transaction even if no tag were touched
2395 # might exist from a previous transaction even if no tag were touched
2395 # in this one. Changes are recorded in a line base format::
2396 # in this one. Changes are recorded in a line base format::
2396 #
2397 #
2397 # <action> <hex-node> <tag-name>\n
2398 # <action> <hex-node> <tag-name>\n
2398 #
2399 #
2399 # Actions are defined as follow:
2400 # Actions are defined as follow:
2400 # "-R": tag is removed,
2401 # "-R": tag is removed,
2401 # "+A": tag is added,
2402 # "+A": tag is added,
2402 # "-M": tag is moved (old value),
2403 # "-M": tag is moved (old value),
2403 # "+M": tag is moved (new value),
2404 # "+M": tag is moved (new value),
2404 tracktags = lambda x: None
2405 tracktags = lambda x: None
2405 # experimental config: experimental.hook-track-tags
2406 # experimental config: experimental.hook-track-tags
2406 shouldtracktags = self.ui.configbool(
2407 shouldtracktags = self.ui.configbool(
2407 b'experimental', b'hook-track-tags'
2408 b'experimental', b'hook-track-tags'
2408 )
2409 )
2409 if desc != b'strip' and shouldtracktags:
2410 if desc != b'strip' and shouldtracktags:
2410 oldheads = self.changelog.headrevs()
2411 oldheads = self.changelog.headrevs()
2411
2412
2412 def tracktags(tr2):
2413 def tracktags(tr2):
2413 repo = reporef()
2414 repo = reporef()
2414 assert repo is not None # help pytype
2415 assert repo is not None # help pytype
2415 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2416 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2416 newheads = repo.changelog.headrevs()
2417 newheads = repo.changelog.headrevs()
2417 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2418 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2418 # notes: we compare lists here.
2419 # notes: we compare lists here.
2419 # As we do it only once buiding set would not be cheaper
2420 # As we do it only once buiding set would not be cheaper
2420 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2421 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2421 if changes:
2422 if changes:
2422 tr2.hookargs[b'tag_moved'] = b'1'
2423 tr2.hookargs[b'tag_moved'] = b'1'
2423 with repo.vfs(
2424 with repo.vfs(
2424 b'changes/tags.changes', b'w', atomictemp=True
2425 b'changes/tags.changes', b'w', atomictemp=True
2425 ) as changesfile:
2426 ) as changesfile:
2426 # note: we do not register the file to the transaction
2427 # note: we do not register the file to the transaction
2427 # because we needs it to still exist on the transaction
2428 # because we needs it to still exist on the transaction
2428 # is close (for txnclose hooks)
2429 # is close (for txnclose hooks)
2429 tagsmod.writediff(changesfile, changes)
2430 tagsmod.writediff(changesfile, changes)
2430
2431
2431 def validate(tr2):
2432 def validate(tr2):
2432 """will run pre-closing hooks"""
2433 """will run pre-closing hooks"""
2433 # XXX the transaction API is a bit lacking here so we take a hacky
2434 # XXX the transaction API is a bit lacking here so we take a hacky
2434 # path for now
2435 # path for now
2435 #
2436 #
2436 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2437 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2437 # dict is copied before these run. In addition we needs the data
2438 # dict is copied before these run. In addition we needs the data
2438 # available to in memory hooks too.
2439 # available to in memory hooks too.
2439 #
2440 #
2440 # Moreover, we also need to make sure this runs before txnclose
2441 # Moreover, we also need to make sure this runs before txnclose
2441 # hooks and there is no "pending" mechanism that would execute
2442 # hooks and there is no "pending" mechanism that would execute
2442 # logic only if hooks are about to run.
2443 # logic only if hooks are about to run.
2443 #
2444 #
2444 # Fixing this limitation of the transaction is also needed to track
2445 # Fixing this limitation of the transaction is also needed to track
2445 # other families of changes (bookmarks, phases, obsolescence).
2446 # other families of changes (bookmarks, phases, obsolescence).
2446 #
2447 #
2447 # This will have to be fixed before we remove the experimental
2448 # This will have to be fixed before we remove the experimental
2448 # gating.
2449 # gating.
2449 tracktags(tr2)
2450 tracktags(tr2)
2450 repo = reporef()
2451 repo = reporef()
2451 assert repo is not None # help pytype
2452 assert repo is not None # help pytype
2452
2453
2453 singleheadopt = (b'experimental', b'single-head-per-branch')
2454 singleheadopt = (b'experimental', b'single-head-per-branch')
2454 singlehead = repo.ui.configbool(*singleheadopt)
2455 singlehead = repo.ui.configbool(*singleheadopt)
2455 if singlehead:
2456 if singlehead:
2456 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2457 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2457 accountclosed = singleheadsub.get(
2458 accountclosed = singleheadsub.get(
2458 b"account-closed-heads", False
2459 b"account-closed-heads", False
2459 )
2460 )
2460 if singleheadsub.get(b"public-changes-only", False):
2461 if singleheadsub.get(b"public-changes-only", False):
2461 filtername = b"immutable"
2462 filtername = b"immutable"
2462 else:
2463 else:
2463 filtername = b"visible"
2464 filtername = b"visible"
2464 scmutil.enforcesinglehead(
2465 scmutil.enforcesinglehead(
2465 repo, tr2, desc, accountclosed, filtername
2466 repo, tr2, desc, accountclosed, filtername
2466 )
2467 )
2467 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2468 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2468 for name, (old, new) in sorted(
2469 for name, (old, new) in sorted(
2469 tr.changes[b'bookmarks'].items()
2470 tr.changes[b'bookmarks'].items()
2470 ):
2471 ):
2471 args = tr.hookargs.copy()
2472 args = tr.hookargs.copy()
2472 args.update(bookmarks.preparehookargs(name, old, new))
2473 args.update(bookmarks.preparehookargs(name, old, new))
2473 repo.hook(
2474 repo.hook(
2474 b'pretxnclose-bookmark',
2475 b'pretxnclose-bookmark',
2475 throw=True,
2476 throw=True,
2476 **pycompat.strkwargs(args)
2477 **pycompat.strkwargs(args)
2477 )
2478 )
2478 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2479 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2479 cl = repo.unfiltered().changelog
2480 cl = repo.unfiltered().changelog
2480 for revs, (old, new) in tr.changes[b'phases']:
2481 for revs, (old, new) in tr.changes[b'phases']:
2481 for rev in revs:
2482 for rev in revs:
2482 args = tr.hookargs.copy()
2483 args = tr.hookargs.copy()
2483 node = hex(cl.node(rev))
2484 node = hex(cl.node(rev))
2484 args.update(phases.preparehookargs(node, old, new))
2485 args.update(phases.preparehookargs(node, old, new))
2485 repo.hook(
2486 repo.hook(
2486 b'pretxnclose-phase',
2487 b'pretxnclose-phase',
2487 throw=True,
2488 throw=True,
2488 **pycompat.strkwargs(args)
2489 **pycompat.strkwargs(args)
2489 )
2490 )
2490
2491
2491 repo.hook(
2492 repo.hook(
2492 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2493 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2493 )
2494 )
2494
2495
2495 def releasefn(tr, success):
2496 def releasefn(tr, success):
2496 repo = reporef()
2497 repo = reporef()
2497 if repo is None:
2498 if repo is None:
2498 # If the repo has been GC'd (and this release function is being
2499 # If the repo has been GC'd (and this release function is being
2499 # called from transaction.__del__), there's not much we can do,
2500 # called from transaction.__del__), there's not much we can do,
2500 # so just leave the unfinished transaction there and let the
2501 # so just leave the unfinished transaction there and let the
2501 # user run `hg recover`.
2502 # user run `hg recover`.
2502 return
2503 return
2503 if success:
2504 if success:
2504 # this should be explicitly invoked here, because
2505 # this should be explicitly invoked here, because
2505 # in-memory changes aren't written out at closing
2506 # in-memory changes aren't written out at closing
2506 # transaction, if tr.addfilegenerator (via
2507 # transaction, if tr.addfilegenerator (via
2507 # dirstate.write or so) isn't invoked while
2508 # dirstate.write or so) isn't invoked while
2508 # transaction running
2509 # transaction running
2509 repo.dirstate.write(None)
2510 repo.dirstate.write(None)
2510 else:
2511 else:
2511 # discard all changes (including ones already written
2512 # discard all changes (including ones already written
2512 # out) in this transaction
2513 # out) in this transaction
2513 narrowspec.restorebackup(self, b'journal.narrowspec')
2514 narrowspec.restorebackup(self, b'journal.narrowspec')
2514 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2515 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2515 repo.dirstate.restorebackup(None, b'journal.dirstate')
2516 repo.dirstate.restorebackup(None, b'journal.dirstate')
2516
2517
2517 repo.invalidate(clearfilecache=True)
2518 repo.invalidate(clearfilecache=True)
2518
2519
2519 tr = transaction.transaction(
2520 tr = transaction.transaction(
2520 rp,
2521 rp,
2521 self.svfs,
2522 self.svfs,
2522 vfsmap,
2523 vfsmap,
2523 b"journal",
2524 b"journal",
2524 b"undo",
2525 b"undo",
2525 aftertrans(renames),
2526 aftertrans(renames),
2526 self.store.createmode,
2527 self.store.createmode,
2527 validator=validate,
2528 validator=validate,
2528 releasefn=releasefn,
2529 releasefn=releasefn,
2529 checkambigfiles=_cachedfiles,
2530 checkambigfiles=_cachedfiles,
2530 name=desc,
2531 name=desc,
2531 )
2532 )
2532 tr.changes[b'origrepolen'] = len(self)
2533 tr.changes[b'origrepolen'] = len(self)
2533 tr.changes[b'obsmarkers'] = set()
2534 tr.changes[b'obsmarkers'] = set()
2534 tr.changes[b'phases'] = []
2535 tr.changes[b'phases'] = []
2535 tr.changes[b'bookmarks'] = {}
2536 tr.changes[b'bookmarks'] = {}
2536
2537
2537 tr.hookargs[b'txnid'] = txnid
2538 tr.hookargs[b'txnid'] = txnid
2538 tr.hookargs[b'txnname'] = desc
2539 tr.hookargs[b'txnname'] = desc
2539 tr.hookargs[b'changes'] = tr.changes
2540 tr.hookargs[b'changes'] = tr.changes
2540 # note: writing the fncache only during finalize mean that the file is
2541 # note: writing the fncache only during finalize mean that the file is
2541 # outdated when running hooks. As fncache is used for streaming clone,
2542 # outdated when running hooks. As fncache is used for streaming clone,
2542 # this is not expected to break anything that happen during the hooks.
2543 # this is not expected to break anything that happen during the hooks.
2543 tr.addfinalize(b'flush-fncache', self.store.write)
2544 tr.addfinalize(b'flush-fncache', self.store.write)
2544
2545
2545 def txnclosehook(tr2):
2546 def txnclosehook(tr2):
2546 """To be run if transaction is successful, will schedule a hook run"""
2547 """To be run if transaction is successful, will schedule a hook run"""
2547 # Don't reference tr2 in hook() so we don't hold a reference.
2548 # Don't reference tr2 in hook() so we don't hold a reference.
2548 # This reduces memory consumption when there are multiple
2549 # This reduces memory consumption when there are multiple
2549 # transactions per lock. This can likely go away if issue5045
2550 # transactions per lock. This can likely go away if issue5045
2550 # fixes the function accumulation.
2551 # fixes the function accumulation.
2551 hookargs = tr2.hookargs
2552 hookargs = tr2.hookargs
2552
2553
2553 def hookfunc(unused_success):
2554 def hookfunc(unused_success):
2554 repo = reporef()
2555 repo = reporef()
2555 assert repo is not None # help pytype
2556 assert repo is not None # help pytype
2556
2557
2557 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2558 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2558 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2559 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2559 for name, (old, new) in bmchanges:
2560 for name, (old, new) in bmchanges:
2560 args = tr.hookargs.copy()
2561 args = tr.hookargs.copy()
2561 args.update(bookmarks.preparehookargs(name, old, new))
2562 args.update(bookmarks.preparehookargs(name, old, new))
2562 repo.hook(
2563 repo.hook(
2563 b'txnclose-bookmark',
2564 b'txnclose-bookmark',
2564 throw=False,
2565 throw=False,
2565 **pycompat.strkwargs(args)
2566 **pycompat.strkwargs(args)
2566 )
2567 )
2567
2568
2568 if hook.hashook(repo.ui, b'txnclose-phase'):
2569 if hook.hashook(repo.ui, b'txnclose-phase'):
2569 cl = repo.unfiltered().changelog
2570 cl = repo.unfiltered().changelog
2570 phasemv = sorted(
2571 phasemv = sorted(
2571 tr.changes[b'phases'], key=lambda r: r[0][0]
2572 tr.changes[b'phases'], key=lambda r: r[0][0]
2572 )
2573 )
2573 for revs, (old, new) in phasemv:
2574 for revs, (old, new) in phasemv:
2574 for rev in revs:
2575 for rev in revs:
2575 args = tr.hookargs.copy()
2576 args = tr.hookargs.copy()
2576 node = hex(cl.node(rev))
2577 node = hex(cl.node(rev))
2577 args.update(phases.preparehookargs(node, old, new))
2578 args.update(phases.preparehookargs(node, old, new))
2578 repo.hook(
2579 repo.hook(
2579 b'txnclose-phase',
2580 b'txnclose-phase',
2580 throw=False,
2581 throw=False,
2581 **pycompat.strkwargs(args)
2582 **pycompat.strkwargs(args)
2582 )
2583 )
2583
2584
2584 repo.hook(
2585 repo.hook(
2585 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2586 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2586 )
2587 )
2587
2588
2588 repo = reporef()
2589 repo = reporef()
2589 assert repo is not None # help pytype
2590 assert repo is not None # help pytype
2590 repo._afterlock(hookfunc)
2591 repo._afterlock(hookfunc)
2591
2592
2592 tr.addfinalize(b'txnclose-hook', txnclosehook)
2593 tr.addfinalize(b'txnclose-hook', txnclosehook)
2593 # Include a leading "-" to make it happen before the transaction summary
2594 # Include a leading "-" to make it happen before the transaction summary
2594 # reports registered via scmutil.registersummarycallback() whose names
2595 # reports registered via scmutil.registersummarycallback() whose names
2595 # are 00-txnreport etc. That way, the caches will be warm when the
2596 # are 00-txnreport etc. That way, the caches will be warm when the
2596 # callbacks run.
2597 # callbacks run.
2597 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2598 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2598
2599
2599 def txnaborthook(tr2):
2600 def txnaborthook(tr2):
2600 """To be run if transaction is aborted"""
2601 """To be run if transaction is aborted"""
2601 repo = reporef()
2602 repo = reporef()
2602 assert repo is not None # help pytype
2603 assert repo is not None # help pytype
2603 repo.hook(
2604 repo.hook(
2604 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2605 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2605 )
2606 )
2606
2607
2607 tr.addabort(b'txnabort-hook', txnaborthook)
2608 tr.addabort(b'txnabort-hook', txnaborthook)
2608 # avoid eager cache invalidation. in-memory data should be identical
2609 # avoid eager cache invalidation. in-memory data should be identical
2609 # to stored data if transaction has no error.
2610 # to stored data if transaction has no error.
2610 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2611 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2611 self._transref = weakref.ref(tr)
2612 self._transref = weakref.ref(tr)
2612 scmutil.registersummarycallback(self, tr, desc)
2613 scmutil.registersummarycallback(self, tr, desc)
2613 return tr
2614 return tr
2614
2615
2615 def _journalfiles(self):
2616 def _journalfiles(self):
2616 return (
2617 return (
2617 (self.svfs, b'journal'),
2618 (self.svfs, b'journal'),
2618 (self.svfs, b'journal.narrowspec'),
2619 (self.svfs, b'journal.narrowspec'),
2619 (self.vfs, b'journal.narrowspec.dirstate'),
2620 (self.vfs, b'journal.narrowspec.dirstate'),
2620 (self.vfs, b'journal.dirstate'),
2621 (self.vfs, b'journal.dirstate'),
2621 (self.vfs, b'journal.branch'),
2622 (self.vfs, b'journal.branch'),
2622 (self.vfs, b'journal.desc'),
2623 (self.vfs, b'journal.desc'),
2623 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2624 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2624 (self.svfs, b'journal.phaseroots'),
2625 (self.svfs, b'journal.phaseroots'),
2625 )
2626 )
2626
2627
2627 def undofiles(self):
2628 def undofiles(self):
2628 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2629 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2629
2630
2630 @unfilteredmethod
2631 @unfilteredmethod
2631 def _writejournal(self, desc):
2632 def _writejournal(self, desc):
2632 self.dirstate.savebackup(None, b'journal.dirstate')
2633 self.dirstate.savebackup(None, b'journal.dirstate')
2633 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2634 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2634 narrowspec.savebackup(self, b'journal.narrowspec')
2635 narrowspec.savebackup(self, b'journal.narrowspec')
2635 self.vfs.write(
2636 self.vfs.write(
2636 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2637 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2637 )
2638 )
2638 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2639 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2639 bookmarksvfs = bookmarks.bookmarksvfs(self)
2640 bookmarksvfs = bookmarks.bookmarksvfs(self)
2640 bookmarksvfs.write(
2641 bookmarksvfs.write(
2641 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2642 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2642 )
2643 )
2643 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2644 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2644
2645
2645 def recover(self):
2646 def recover(self):
2646 with self.lock():
2647 with self.lock():
2647 if self.svfs.exists(b"journal"):
2648 if self.svfs.exists(b"journal"):
2648 self.ui.status(_(b"rolling back interrupted transaction\n"))
2649 self.ui.status(_(b"rolling back interrupted transaction\n"))
2649 vfsmap = {
2650 vfsmap = {
2650 b'': self.svfs,
2651 b'': self.svfs,
2651 b'plain': self.vfs,
2652 b'plain': self.vfs,
2652 }
2653 }
2653 transaction.rollback(
2654 transaction.rollback(
2654 self.svfs,
2655 self.svfs,
2655 vfsmap,
2656 vfsmap,
2656 b"journal",
2657 b"journal",
2657 self.ui.warn,
2658 self.ui.warn,
2658 checkambigfiles=_cachedfiles,
2659 checkambigfiles=_cachedfiles,
2659 )
2660 )
2660 self.invalidate()
2661 self.invalidate()
2661 return True
2662 return True
2662 else:
2663 else:
2663 self.ui.warn(_(b"no interrupted transaction available\n"))
2664 self.ui.warn(_(b"no interrupted transaction available\n"))
2664 return False
2665 return False
2665
2666
2666 def rollback(self, dryrun=False, force=False):
2667 def rollback(self, dryrun=False, force=False):
2667 wlock = lock = dsguard = None
2668 wlock = lock = dsguard = None
2668 try:
2669 try:
2669 wlock = self.wlock()
2670 wlock = self.wlock()
2670 lock = self.lock()
2671 lock = self.lock()
2671 if self.svfs.exists(b"undo"):
2672 if self.svfs.exists(b"undo"):
2672 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2673 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2673
2674
2674 return self._rollback(dryrun, force, dsguard)
2675 return self._rollback(dryrun, force, dsguard)
2675 else:
2676 else:
2676 self.ui.warn(_(b"no rollback information available\n"))
2677 self.ui.warn(_(b"no rollback information available\n"))
2677 return 1
2678 return 1
2678 finally:
2679 finally:
2679 release(dsguard, lock, wlock)
2680 release(dsguard, lock, wlock)
2680
2681
2681 @unfilteredmethod # Until we get smarter cache management
2682 @unfilteredmethod # Until we get smarter cache management
2682 def _rollback(self, dryrun, force, dsguard):
2683 def _rollback(self, dryrun, force, dsguard):
2683 ui = self.ui
2684 ui = self.ui
2684 try:
2685 try:
2685 args = self.vfs.read(b'undo.desc').splitlines()
2686 args = self.vfs.read(b'undo.desc').splitlines()
2686 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2687 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2687 if len(args) >= 3:
2688 if len(args) >= 3:
2688 detail = args[2]
2689 detail = args[2]
2689 oldtip = oldlen - 1
2690 oldtip = oldlen - 1
2690
2691
2691 if detail and ui.verbose:
2692 if detail and ui.verbose:
2692 msg = _(
2693 msg = _(
2693 b'repository tip rolled back to revision %d'
2694 b'repository tip rolled back to revision %d'
2694 b' (undo %s: %s)\n'
2695 b' (undo %s: %s)\n'
2695 ) % (oldtip, desc, detail)
2696 ) % (oldtip, desc, detail)
2696 else:
2697 else:
2697 msg = _(
2698 msg = _(
2698 b'repository tip rolled back to revision %d (undo %s)\n'
2699 b'repository tip rolled back to revision %d (undo %s)\n'
2699 ) % (oldtip, desc)
2700 ) % (oldtip, desc)
2700 except IOError:
2701 except IOError:
2701 msg = _(b'rolling back unknown transaction\n')
2702 msg = _(b'rolling back unknown transaction\n')
2702 desc = None
2703 desc = None
2703
2704
2704 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2705 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2705 raise error.Abort(
2706 raise error.Abort(
2706 _(
2707 _(
2707 b'rollback of last commit while not checked out '
2708 b'rollback of last commit while not checked out '
2708 b'may lose data'
2709 b'may lose data'
2709 ),
2710 ),
2710 hint=_(b'use -f to force'),
2711 hint=_(b'use -f to force'),
2711 )
2712 )
2712
2713
2713 ui.status(msg)
2714 ui.status(msg)
2714 if dryrun:
2715 if dryrun:
2715 return 0
2716 return 0
2716
2717
2717 parents = self.dirstate.parents()
2718 parents = self.dirstate.parents()
2718 self.destroying()
2719 self.destroying()
2719 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2720 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2720 transaction.rollback(
2721 transaction.rollback(
2721 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2722 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2722 )
2723 )
2723 bookmarksvfs = bookmarks.bookmarksvfs(self)
2724 bookmarksvfs = bookmarks.bookmarksvfs(self)
2724 if bookmarksvfs.exists(b'undo.bookmarks'):
2725 if bookmarksvfs.exists(b'undo.bookmarks'):
2725 bookmarksvfs.rename(
2726 bookmarksvfs.rename(
2726 b'undo.bookmarks', b'bookmarks', checkambig=True
2727 b'undo.bookmarks', b'bookmarks', checkambig=True
2727 )
2728 )
2728 if self.svfs.exists(b'undo.phaseroots'):
2729 if self.svfs.exists(b'undo.phaseroots'):
2729 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2730 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2730 self.invalidate()
2731 self.invalidate()
2731
2732
2732 has_node = self.changelog.index.has_node
2733 has_node = self.changelog.index.has_node
2733 parentgone = any(not has_node(p) for p in parents)
2734 parentgone = any(not has_node(p) for p in parents)
2734 if parentgone:
2735 if parentgone:
2735 # prevent dirstateguard from overwriting already restored one
2736 # prevent dirstateguard from overwriting already restored one
2736 dsguard.close()
2737 dsguard.close()
2737
2738
2738 narrowspec.restorebackup(self, b'undo.narrowspec')
2739 narrowspec.restorebackup(self, b'undo.narrowspec')
2739 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2740 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2740 self.dirstate.restorebackup(None, b'undo.dirstate')
2741 self.dirstate.restorebackup(None, b'undo.dirstate')
2741 try:
2742 try:
2742 branch = self.vfs.read(b'undo.branch')
2743 branch = self.vfs.read(b'undo.branch')
2743 self.dirstate.setbranch(encoding.tolocal(branch))
2744 self.dirstate.setbranch(encoding.tolocal(branch))
2744 except IOError:
2745 except IOError:
2745 ui.warn(
2746 ui.warn(
2746 _(
2747 _(
2747 b'named branch could not be reset: '
2748 b'named branch could not be reset: '
2748 b'current branch is still \'%s\'\n'
2749 b'current branch is still \'%s\'\n'
2749 )
2750 )
2750 % self.dirstate.branch()
2751 % self.dirstate.branch()
2751 )
2752 )
2752
2753
2753 parents = tuple([p.rev() for p in self[None].parents()])
2754 parents = tuple([p.rev() for p in self[None].parents()])
2754 if len(parents) > 1:
2755 if len(parents) > 1:
2755 ui.status(
2756 ui.status(
2756 _(
2757 _(
2757 b'working directory now based on '
2758 b'working directory now based on '
2758 b'revisions %d and %d\n'
2759 b'revisions %d and %d\n'
2759 )
2760 )
2760 % parents
2761 % parents
2761 )
2762 )
2762 else:
2763 else:
2763 ui.status(
2764 ui.status(
2764 _(b'working directory now based on revision %d\n') % parents
2765 _(b'working directory now based on revision %d\n') % parents
2765 )
2766 )
2766 mergestatemod.mergestate.clean(self)
2767 mergestatemod.mergestate.clean(self)
2767
2768
2768 # TODO: if we know which new heads may result from this rollback, pass
2769 # TODO: if we know which new heads may result from this rollback, pass
2769 # them to destroy(), which will prevent the branchhead cache from being
2770 # them to destroy(), which will prevent the branchhead cache from being
2770 # invalidated.
2771 # invalidated.
2771 self.destroyed()
2772 self.destroyed()
2772 return 0
2773 return 0
2773
2774
2774 def _buildcacheupdater(self, newtransaction):
2775 def _buildcacheupdater(self, newtransaction):
2775 """called during transaction to build the callback updating cache
2776 """called during transaction to build the callback updating cache
2776
2777
2777 Lives on the repository to help extension who might want to augment
2778 Lives on the repository to help extension who might want to augment
2778 this logic. For this purpose, the created transaction is passed to the
2779 this logic. For this purpose, the created transaction is passed to the
2779 method.
2780 method.
2780 """
2781 """
2781 # we must avoid cyclic reference between repo and transaction.
2782 # we must avoid cyclic reference between repo and transaction.
2782 reporef = weakref.ref(self)
2783 reporef = weakref.ref(self)
2783
2784
2784 def updater(tr):
2785 def updater(tr):
2785 repo = reporef()
2786 repo = reporef()
2786 assert repo is not None # help pytype
2787 assert repo is not None # help pytype
2787 repo.updatecaches(tr)
2788 repo.updatecaches(tr)
2788
2789
2789 return updater
2790 return updater
2790
2791
2791 @unfilteredmethod
2792 @unfilteredmethod
2792 def updatecaches(self, tr=None, full=False, caches=None):
2793 def updatecaches(self, tr=None, full=False, caches=None):
2793 """warm appropriate caches
2794 """warm appropriate caches
2794
2795
2795 If this function is called after a transaction closed. The transaction
2796 If this function is called after a transaction closed. The transaction
2796 will be available in the 'tr' argument. This can be used to selectively
2797 will be available in the 'tr' argument. This can be used to selectively
2797 update caches relevant to the changes in that transaction.
2798 update caches relevant to the changes in that transaction.
2798
2799
2799 If 'full' is set, make sure all caches the function knows about have
2800 If 'full' is set, make sure all caches the function knows about have
2800 up-to-date data. Even the ones usually loaded more lazily.
2801 up-to-date data. Even the ones usually loaded more lazily.
2801
2802
2802 The `full` argument can take a special "post-clone" value. In this case
2803 The `full` argument can take a special "post-clone" value. In this case
2803 the cache warming is made after a clone and of the slower cache might
2804 the cache warming is made after a clone and of the slower cache might
2804 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2805 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2805 as we plan for a cleaner way to deal with this for 5.9.
2806 as we plan for a cleaner way to deal with this for 5.9.
2806 """
2807 """
2807 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2808 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2808 # During strip, many caches are invalid but
2809 # During strip, many caches are invalid but
2809 # later call to `destroyed` will refresh them.
2810 # later call to `destroyed` will refresh them.
2810 return
2811 return
2811
2812
2812 unfi = self.unfiltered()
2813 unfi = self.unfiltered()
2813
2814
2814 if full:
2815 if full:
2815 msg = (
2816 msg = (
2816 "`full` argument for `repo.updatecaches` is deprecated\n"
2817 "`full` argument for `repo.updatecaches` is deprecated\n"
2817 "(use `caches=repository.CACHE_ALL` instead)"
2818 "(use `caches=repository.CACHE_ALL` instead)"
2818 )
2819 )
2819 self.ui.deprecwarn(msg, b"5.9")
2820 self.ui.deprecwarn(msg, b"5.9")
2820 caches = repository.CACHES_ALL
2821 caches = repository.CACHES_ALL
2821 if full == b"post-clone":
2822 if full == b"post-clone":
2822 caches = repository.CACHES_POST_CLONE
2823 caches = repository.CACHES_POST_CLONE
2823 caches = repository.CACHES_ALL
2824 caches = repository.CACHES_ALL
2824 elif caches is None:
2825 elif caches is None:
2825 caches = repository.CACHES_DEFAULT
2826 caches = repository.CACHES_DEFAULT
2826
2827
2827 if repository.CACHE_BRANCHMAP_SERVED in caches:
2828 if repository.CACHE_BRANCHMAP_SERVED in caches:
2828 if tr is None or tr.changes[b'origrepolen'] < len(self):
2829 if tr is None or tr.changes[b'origrepolen'] < len(self):
2829 # accessing the 'served' branchmap should refresh all the others,
2830 # accessing the 'served' branchmap should refresh all the others,
2830 self.ui.debug(b'updating the branch cache\n')
2831 self.ui.debug(b'updating the branch cache\n')
2831 self.filtered(b'served').branchmap()
2832 self.filtered(b'served').branchmap()
2832 self.filtered(b'served.hidden').branchmap()
2833 self.filtered(b'served.hidden').branchmap()
2833
2834
2834 if repository.CACHE_CHANGELOG_CACHE in caches:
2835 if repository.CACHE_CHANGELOG_CACHE in caches:
2835 self.changelog.update_caches(transaction=tr)
2836 self.changelog.update_caches(transaction=tr)
2836
2837
2837 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2838 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2838 self.manifestlog.update_caches(transaction=tr)
2839 self.manifestlog.update_caches(transaction=tr)
2839
2840
2840 if repository.CACHE_REV_BRANCH in caches:
2841 if repository.CACHE_REV_BRANCH in caches:
2841 rbc = unfi.revbranchcache()
2842 rbc = unfi.revbranchcache()
2842 for r in unfi.changelog:
2843 for r in unfi.changelog:
2843 rbc.branchinfo(r)
2844 rbc.branchinfo(r)
2844 rbc.write()
2845 rbc.write()
2845
2846
2846 if repository.CACHE_FULL_MANIFEST in caches:
2847 if repository.CACHE_FULL_MANIFEST in caches:
2847 # ensure the working copy parents are in the manifestfulltextcache
2848 # ensure the working copy parents are in the manifestfulltextcache
2848 for ctx in self[b'.'].parents():
2849 for ctx in self[b'.'].parents():
2849 ctx.manifest() # accessing the manifest is enough
2850 ctx.manifest() # accessing the manifest is enough
2850
2851
2851 if repository.CACHE_FILE_NODE_TAGS in caches:
2852 if repository.CACHE_FILE_NODE_TAGS in caches:
2852 # accessing fnode cache warms the cache
2853 # accessing fnode cache warms the cache
2853 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2854 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2854
2855
2855 if repository.CACHE_TAGS_DEFAULT in caches:
2856 if repository.CACHE_TAGS_DEFAULT in caches:
2856 # accessing tags warm the cache
2857 # accessing tags warm the cache
2857 self.tags()
2858 self.tags()
2858 if repository.CACHE_TAGS_SERVED in caches:
2859 if repository.CACHE_TAGS_SERVED in caches:
2859 self.filtered(b'served').tags()
2860 self.filtered(b'served').tags()
2860
2861
2861 if repository.CACHE_BRANCHMAP_ALL in caches:
2862 if repository.CACHE_BRANCHMAP_ALL in caches:
2862 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2863 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2863 # so we're forcing a write to cause these caches to be warmed up
2864 # so we're forcing a write to cause these caches to be warmed up
2864 # even if they haven't explicitly been requested yet (if they've
2865 # even if they haven't explicitly been requested yet (if they've
2865 # never been used by hg, they won't ever have been written, even if
2866 # never been used by hg, they won't ever have been written, even if
2866 # they're a subset of another kind of cache that *has* been used).
2867 # they're a subset of another kind of cache that *has* been used).
2867 for filt in repoview.filtertable.keys():
2868 for filt in repoview.filtertable.keys():
2868 filtered = self.filtered(filt)
2869 filtered = self.filtered(filt)
2869 filtered.branchmap().write(filtered)
2870 filtered.branchmap().write(filtered)
2870
2871
2871 def invalidatecaches(self):
2872 def invalidatecaches(self):
2872
2873
2873 if '_tagscache' in vars(self):
2874 if '_tagscache' in vars(self):
2874 # can't use delattr on proxy
2875 # can't use delattr on proxy
2875 del self.__dict__['_tagscache']
2876 del self.__dict__['_tagscache']
2876
2877
2877 self._branchcaches.clear()
2878 self._branchcaches.clear()
2878 self.invalidatevolatilesets()
2879 self.invalidatevolatilesets()
2879 self._sparsesignaturecache.clear()
2880 self._sparsesignaturecache.clear()
2880
2881
2881 def invalidatevolatilesets(self):
2882 def invalidatevolatilesets(self):
2882 self.filteredrevcache.clear()
2883 self.filteredrevcache.clear()
2883 obsolete.clearobscaches(self)
2884 obsolete.clearobscaches(self)
2884 self._quick_access_changeid_invalidate()
2885 self._quick_access_changeid_invalidate()
2885
2886
2886 def invalidatedirstate(self):
2887 def invalidatedirstate(self):
2887 """Invalidates the dirstate, causing the next call to dirstate
2888 """Invalidates the dirstate, causing the next call to dirstate
2888 to check if it was modified since the last time it was read,
2889 to check if it was modified since the last time it was read,
2889 rereading it if it has.
2890 rereading it if it has.
2890
2891
2891 This is different to dirstate.invalidate() that it doesn't always
2892 This is different to dirstate.invalidate() that it doesn't always
2892 rereads the dirstate. Use dirstate.invalidate() if you want to
2893 rereads the dirstate. Use dirstate.invalidate() if you want to
2893 explicitly read the dirstate again (i.e. restoring it to a previous
2894 explicitly read the dirstate again (i.e. restoring it to a previous
2894 known good state)."""
2895 known good state)."""
2895 if hasunfilteredcache(self, 'dirstate'):
2896 if hasunfilteredcache(self, 'dirstate'):
2896 for k in self.dirstate._filecache:
2897 for k in self.dirstate._filecache:
2897 try:
2898 try:
2898 delattr(self.dirstate, k)
2899 delattr(self.dirstate, k)
2899 except AttributeError:
2900 except AttributeError:
2900 pass
2901 pass
2901 delattr(self.unfiltered(), 'dirstate')
2902 delattr(self.unfiltered(), 'dirstate')
2902
2903
2903 def invalidate(self, clearfilecache=False):
2904 def invalidate(self, clearfilecache=False):
2904 """Invalidates both store and non-store parts other than dirstate
2905 """Invalidates both store and non-store parts other than dirstate
2905
2906
2906 If a transaction is running, invalidation of store is omitted,
2907 If a transaction is running, invalidation of store is omitted,
2907 because discarding in-memory changes might cause inconsistency
2908 because discarding in-memory changes might cause inconsistency
2908 (e.g. incomplete fncache causes unintentional failure, but
2909 (e.g. incomplete fncache causes unintentional failure, but
2909 redundant one doesn't).
2910 redundant one doesn't).
2910 """
2911 """
2911 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2912 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2912 for k in list(self._filecache.keys()):
2913 for k in list(self._filecache.keys()):
2913 # dirstate is invalidated separately in invalidatedirstate()
2914 # dirstate is invalidated separately in invalidatedirstate()
2914 if k == b'dirstate':
2915 if k == b'dirstate':
2915 continue
2916 continue
2916 if (
2917 if (
2917 k == b'changelog'
2918 k == b'changelog'
2918 and self.currenttransaction()
2919 and self.currenttransaction()
2919 and self.changelog._delayed
2920 and self.changelog._delayed
2920 ):
2921 ):
2921 # The changelog object may store unwritten revisions. We don't
2922 # The changelog object may store unwritten revisions. We don't
2922 # want to lose them.
2923 # want to lose them.
2923 # TODO: Solve the problem instead of working around it.
2924 # TODO: Solve the problem instead of working around it.
2924 continue
2925 continue
2925
2926
2926 if clearfilecache:
2927 if clearfilecache:
2927 del self._filecache[k]
2928 del self._filecache[k]
2928 try:
2929 try:
2929 delattr(unfiltered, k)
2930 delattr(unfiltered, k)
2930 except AttributeError:
2931 except AttributeError:
2931 pass
2932 pass
2932 self.invalidatecaches()
2933 self.invalidatecaches()
2933 if not self.currenttransaction():
2934 if not self.currenttransaction():
2934 # TODO: Changing contents of store outside transaction
2935 # TODO: Changing contents of store outside transaction
2935 # causes inconsistency. We should make in-memory store
2936 # causes inconsistency. We should make in-memory store
2936 # changes detectable, and abort if changed.
2937 # changes detectable, and abort if changed.
2937 self.store.invalidatecaches()
2938 self.store.invalidatecaches()
2938
2939
2939 def invalidateall(self):
2940 def invalidateall(self):
2940 """Fully invalidates both store and non-store parts, causing the
2941 """Fully invalidates both store and non-store parts, causing the
2941 subsequent operation to reread any outside changes."""
2942 subsequent operation to reread any outside changes."""
2942 # extension should hook this to invalidate its caches
2943 # extension should hook this to invalidate its caches
2943 self.invalidate()
2944 self.invalidate()
2944 self.invalidatedirstate()
2945 self.invalidatedirstate()
2945
2946
2946 @unfilteredmethod
2947 @unfilteredmethod
2947 def _refreshfilecachestats(self, tr):
2948 def _refreshfilecachestats(self, tr):
2948 """Reload stats of cached files so that they are flagged as valid"""
2949 """Reload stats of cached files so that they are flagged as valid"""
2949 for k, ce in self._filecache.items():
2950 for k, ce in self._filecache.items():
2950 k = pycompat.sysstr(k)
2951 k = pycompat.sysstr(k)
2951 if k == 'dirstate' or k not in self.__dict__:
2952 if k == 'dirstate' or k not in self.__dict__:
2952 continue
2953 continue
2953 ce.refresh()
2954 ce.refresh()
2954
2955
2955 def _lock(
2956 def _lock(
2956 self,
2957 self,
2957 vfs,
2958 vfs,
2958 lockname,
2959 lockname,
2959 wait,
2960 wait,
2960 releasefn,
2961 releasefn,
2961 acquirefn,
2962 acquirefn,
2962 desc,
2963 desc,
2963 ):
2964 ):
2964 timeout = 0
2965 timeout = 0
2965 warntimeout = 0
2966 warntimeout = 0
2966 if wait:
2967 if wait:
2967 timeout = self.ui.configint(b"ui", b"timeout")
2968 timeout = self.ui.configint(b"ui", b"timeout")
2968 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2969 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2969 # internal config: ui.signal-safe-lock
2970 # internal config: ui.signal-safe-lock
2970 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2971 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2971
2972
2972 l = lockmod.trylock(
2973 l = lockmod.trylock(
2973 self.ui,
2974 self.ui,
2974 vfs,
2975 vfs,
2975 lockname,
2976 lockname,
2976 timeout,
2977 timeout,
2977 warntimeout,
2978 warntimeout,
2978 releasefn=releasefn,
2979 releasefn=releasefn,
2979 acquirefn=acquirefn,
2980 acquirefn=acquirefn,
2980 desc=desc,
2981 desc=desc,
2981 signalsafe=signalsafe,
2982 signalsafe=signalsafe,
2982 )
2983 )
2983 return l
2984 return l
2984
2985
2985 def _afterlock(self, callback):
2986 def _afterlock(self, callback):
2986 """add a callback to be run when the repository is fully unlocked
2987 """add a callback to be run when the repository is fully unlocked
2987
2988
2988 The callback will be executed when the outermost lock is released
2989 The callback will be executed when the outermost lock is released
2989 (with wlock being higher level than 'lock')."""
2990 (with wlock being higher level than 'lock')."""
2990 for ref in (self._wlockref, self._lockref):
2991 for ref in (self._wlockref, self._lockref):
2991 l = ref and ref()
2992 l = ref and ref()
2992 if l and l.held:
2993 if l and l.held:
2993 l.postrelease.append(callback)
2994 l.postrelease.append(callback)
2994 break
2995 break
2995 else: # no lock have been found.
2996 else: # no lock have been found.
2996 callback(True)
2997 callback(True)
2997
2998
2998 def lock(self, wait=True):
2999 def lock(self, wait=True):
2999 """Lock the repository store (.hg/store) and return a weak reference
3000 """Lock the repository store (.hg/store) and return a weak reference
3000 to the lock. Use this before modifying the store (e.g. committing or
3001 to the lock. Use this before modifying the store (e.g. committing or
3001 stripping). If you are opening a transaction, get a lock as well.)
3002 stripping). If you are opening a transaction, get a lock as well.)
3002
3003
3003 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3004 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3004 'wlock' first to avoid a dead-lock hazard."""
3005 'wlock' first to avoid a dead-lock hazard."""
3005 l = self._currentlock(self._lockref)
3006 l = self._currentlock(self._lockref)
3006 if l is not None:
3007 if l is not None:
3007 l.lock()
3008 l.lock()
3008 return l
3009 return l
3009
3010
3010 l = self._lock(
3011 l = self._lock(
3011 vfs=self.svfs,
3012 vfs=self.svfs,
3012 lockname=b"lock",
3013 lockname=b"lock",
3013 wait=wait,
3014 wait=wait,
3014 releasefn=None,
3015 releasefn=None,
3015 acquirefn=self.invalidate,
3016 acquirefn=self.invalidate,
3016 desc=_(b'repository %s') % self.origroot,
3017 desc=_(b'repository %s') % self.origroot,
3017 )
3018 )
3018 self._lockref = weakref.ref(l)
3019 self._lockref = weakref.ref(l)
3019 return l
3020 return l
3020
3021
3021 def wlock(self, wait=True):
3022 def wlock(self, wait=True):
3022 """Lock the non-store parts of the repository (everything under
3023 """Lock the non-store parts of the repository (everything under
3023 .hg except .hg/store) and return a weak reference to the lock.
3024 .hg except .hg/store) and return a weak reference to the lock.
3024
3025
3025 Use this before modifying files in .hg.
3026 Use this before modifying files in .hg.
3026
3027
3027 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3028 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3028 'wlock' first to avoid a dead-lock hazard."""
3029 'wlock' first to avoid a dead-lock hazard."""
3029 l = self._wlockref() if self._wlockref else None
3030 l = self._wlockref() if self._wlockref else None
3030 if l is not None and l.held:
3031 if l is not None and l.held:
3031 l.lock()
3032 l.lock()
3032 return l
3033 return l
3033
3034
3034 # We do not need to check for non-waiting lock acquisition. Such
3035 # We do not need to check for non-waiting lock acquisition. Such
3035 # acquisition would not cause dead-lock as they would just fail.
3036 # acquisition would not cause dead-lock as they would just fail.
3036 if wait and (
3037 if wait and (
3037 self.ui.configbool(b'devel', b'all-warnings')
3038 self.ui.configbool(b'devel', b'all-warnings')
3038 or self.ui.configbool(b'devel', b'check-locks')
3039 or self.ui.configbool(b'devel', b'check-locks')
3039 ):
3040 ):
3040 if self._currentlock(self._lockref) is not None:
3041 if self._currentlock(self._lockref) is not None:
3041 self.ui.develwarn(b'"wlock" acquired after "lock"')
3042 self.ui.develwarn(b'"wlock" acquired after "lock"')
3042
3043
3043 def unlock():
3044 def unlock():
3044 if self.dirstate.pendingparentchange():
3045 if self.dirstate.pendingparentchange():
3045 self.dirstate.invalidate()
3046 self.dirstate.invalidate()
3046 else:
3047 else:
3047 self.dirstate.write(None)
3048 self.dirstate.write(None)
3048
3049
3049 self._filecache[b'dirstate'].refresh()
3050 self._filecache[b'dirstate'].refresh()
3050
3051
3051 l = self._lock(
3052 l = self._lock(
3052 self.vfs,
3053 self.vfs,
3053 b"wlock",
3054 b"wlock",
3054 wait,
3055 wait,
3055 unlock,
3056 unlock,
3056 self.invalidatedirstate,
3057 self.invalidatedirstate,
3057 _(b'working directory of %s') % self.origroot,
3058 _(b'working directory of %s') % self.origroot,
3058 )
3059 )
3059 self._wlockref = weakref.ref(l)
3060 self._wlockref = weakref.ref(l)
3060 return l
3061 return l
3061
3062
3062 def _currentlock(self, lockref):
3063 def _currentlock(self, lockref):
3063 """Returns the lock if it's held, or None if it's not."""
3064 """Returns the lock if it's held, or None if it's not."""
3064 if lockref is None:
3065 if lockref is None:
3065 return None
3066 return None
3066 l = lockref()
3067 l = lockref()
3067 if l is None or not l.held:
3068 if l is None or not l.held:
3068 return None
3069 return None
3069 return l
3070 return l
3070
3071
3071 def currentwlock(self):
3072 def currentwlock(self):
3072 """Returns the wlock if it's held, or None if it's not."""
3073 """Returns the wlock if it's held, or None if it's not."""
3073 return self._currentlock(self._wlockref)
3074 return self._currentlock(self._wlockref)
3074
3075
3075 def checkcommitpatterns(self, wctx, match, status, fail):
3076 def checkcommitpatterns(self, wctx, match, status, fail):
3076 """check for commit arguments that aren't committable"""
3077 """check for commit arguments that aren't committable"""
3077 if match.isexact() or match.prefix():
3078 if match.isexact() or match.prefix():
3078 matched = set(status.modified + status.added + status.removed)
3079 matched = set(status.modified + status.added + status.removed)
3079
3080
3080 for f in match.files():
3081 for f in match.files():
3081 f = self.dirstate.normalize(f)
3082 f = self.dirstate.normalize(f)
3082 if f == b'.' or f in matched or f in wctx.substate:
3083 if f == b'.' or f in matched or f in wctx.substate:
3083 continue
3084 continue
3084 if f in status.deleted:
3085 if f in status.deleted:
3085 fail(f, _(b'file not found!'))
3086 fail(f, _(b'file not found!'))
3086 # Is it a directory that exists or used to exist?
3087 # Is it a directory that exists or used to exist?
3087 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3088 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3088 d = f + b'/'
3089 d = f + b'/'
3089 for mf in matched:
3090 for mf in matched:
3090 if mf.startswith(d):
3091 if mf.startswith(d):
3091 break
3092 break
3092 else:
3093 else:
3093 fail(f, _(b"no match under directory!"))
3094 fail(f, _(b"no match under directory!"))
3094 elif f not in self.dirstate:
3095 elif f not in self.dirstate:
3095 fail(f, _(b"file not tracked!"))
3096 fail(f, _(b"file not tracked!"))
3096
3097
3097 @unfilteredmethod
3098 @unfilteredmethod
3098 def commit(
3099 def commit(
3099 self,
3100 self,
3100 text=b"",
3101 text=b"",
3101 user=None,
3102 user=None,
3102 date=None,
3103 date=None,
3103 match=None,
3104 match=None,
3104 force=False,
3105 force=False,
3105 editor=None,
3106 editor=None,
3106 extra=None,
3107 extra=None,
3107 ):
3108 ):
3108 """Add a new revision to current repository.
3109 """Add a new revision to current repository.
3109
3110
3110 Revision information is gathered from the working directory,
3111 Revision information is gathered from the working directory,
3111 match can be used to filter the committed files. If editor is
3112 match can be used to filter the committed files. If editor is
3112 supplied, it is called to get a commit message.
3113 supplied, it is called to get a commit message.
3113 """
3114 """
3114 if extra is None:
3115 if extra is None:
3115 extra = {}
3116 extra = {}
3116
3117
3117 def fail(f, msg):
3118 def fail(f, msg):
3118 raise error.InputError(b'%s: %s' % (f, msg))
3119 raise error.InputError(b'%s: %s' % (f, msg))
3119
3120
3120 if not match:
3121 if not match:
3121 match = matchmod.always()
3122 match = matchmod.always()
3122
3123
3123 if not force:
3124 if not force:
3124 match.bad = fail
3125 match.bad = fail
3125
3126
3126 # lock() for recent changelog (see issue4368)
3127 # lock() for recent changelog (see issue4368)
3127 with self.wlock(), self.lock():
3128 with self.wlock(), self.lock():
3128 wctx = self[None]
3129 wctx = self[None]
3129 merge = len(wctx.parents()) > 1
3130 merge = len(wctx.parents()) > 1
3130
3131
3131 if not force and merge and not match.always():
3132 if not force and merge and not match.always():
3132 raise error.Abort(
3133 raise error.Abort(
3133 _(
3134 _(
3134 b'cannot partially commit a merge '
3135 b'cannot partially commit a merge '
3135 b'(do not specify files or patterns)'
3136 b'(do not specify files or patterns)'
3136 )
3137 )
3137 )
3138 )
3138
3139
3139 status = self.status(match=match, clean=force)
3140 status = self.status(match=match, clean=force)
3140 if force:
3141 if force:
3141 status.modified.extend(
3142 status.modified.extend(
3142 status.clean
3143 status.clean
3143 ) # mq may commit clean files
3144 ) # mq may commit clean files
3144
3145
3145 # check subrepos
3146 # check subrepos
3146 subs, commitsubs, newstate = subrepoutil.precommit(
3147 subs, commitsubs, newstate = subrepoutil.precommit(
3147 self.ui, wctx, status, match, force=force
3148 self.ui, wctx, status, match, force=force
3148 )
3149 )
3149
3150
3150 # make sure all explicit patterns are matched
3151 # make sure all explicit patterns are matched
3151 if not force:
3152 if not force:
3152 self.checkcommitpatterns(wctx, match, status, fail)
3153 self.checkcommitpatterns(wctx, match, status, fail)
3153
3154
3154 cctx = context.workingcommitctx(
3155 cctx = context.workingcommitctx(
3155 self, status, text, user, date, extra
3156 self, status, text, user, date, extra
3156 )
3157 )
3157
3158
3158 ms = mergestatemod.mergestate.read(self)
3159 ms = mergestatemod.mergestate.read(self)
3159 mergeutil.checkunresolved(ms)
3160 mergeutil.checkunresolved(ms)
3160
3161
3161 # internal config: ui.allowemptycommit
3162 # internal config: ui.allowemptycommit
3162 if cctx.isempty() and not self.ui.configbool(
3163 if cctx.isempty() and not self.ui.configbool(
3163 b'ui', b'allowemptycommit'
3164 b'ui', b'allowemptycommit'
3164 ):
3165 ):
3165 self.ui.debug(b'nothing to commit, clearing merge state\n')
3166 self.ui.debug(b'nothing to commit, clearing merge state\n')
3166 ms.reset()
3167 ms.reset()
3167 return None
3168 return None
3168
3169
3169 if merge and cctx.deleted():
3170 if merge and cctx.deleted():
3170 raise error.Abort(_(b"cannot commit merge with missing files"))
3171 raise error.Abort(_(b"cannot commit merge with missing files"))
3171
3172
3172 if editor:
3173 if editor:
3173 cctx._text = editor(self, cctx, subs)
3174 cctx._text = editor(self, cctx, subs)
3174 edited = text != cctx._text
3175 edited = text != cctx._text
3175
3176
3176 # Save commit message in case this transaction gets rolled back
3177 # Save commit message in case this transaction gets rolled back
3177 # (e.g. by a pretxncommit hook). Leave the content alone on
3178 # (e.g. by a pretxncommit hook). Leave the content alone on
3178 # the assumption that the user will use the same editor again.
3179 # the assumption that the user will use the same editor again.
3179 msgfn = self.savecommitmessage(cctx._text)
3180 msgfn = self.savecommitmessage(cctx._text)
3180
3181
3181 # commit subs and write new state
3182 # commit subs and write new state
3182 if subs:
3183 if subs:
3183 uipathfn = scmutil.getuipathfn(self)
3184 uipathfn = scmutil.getuipathfn(self)
3184 for s in sorted(commitsubs):
3185 for s in sorted(commitsubs):
3185 sub = wctx.sub(s)
3186 sub = wctx.sub(s)
3186 self.ui.status(
3187 self.ui.status(
3187 _(b'committing subrepository %s\n')
3188 _(b'committing subrepository %s\n')
3188 % uipathfn(subrepoutil.subrelpath(sub))
3189 % uipathfn(subrepoutil.subrelpath(sub))
3189 )
3190 )
3190 sr = sub.commit(cctx._text, user, date)
3191 sr = sub.commit(cctx._text, user, date)
3191 newstate[s] = (newstate[s][0], sr)
3192 newstate[s] = (newstate[s][0], sr)
3192 subrepoutil.writestate(self, newstate)
3193 subrepoutil.writestate(self, newstate)
3193
3194
3194 p1, p2 = self.dirstate.parents()
3195 p1, p2 = self.dirstate.parents()
3195 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3196 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3196 try:
3197 try:
3197 self.hook(
3198 self.hook(
3198 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3199 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3199 )
3200 )
3200 with self.transaction(b'commit'):
3201 with self.transaction(b'commit'):
3201 ret = self.commitctx(cctx, True)
3202 ret = self.commitctx(cctx, True)
3202 # update bookmarks, dirstate and mergestate
3203 # update bookmarks, dirstate and mergestate
3203 bookmarks.update(self, [p1, p2], ret)
3204 bookmarks.update(self, [p1, p2], ret)
3204 cctx.markcommitted(ret)
3205 cctx.markcommitted(ret)
3205 ms.reset()
3206 ms.reset()
3206 except: # re-raises
3207 except: # re-raises
3207 if edited:
3208 if edited:
3208 self.ui.write(
3209 self.ui.write(
3209 _(b'note: commit message saved in %s\n') % msgfn
3210 _(b'note: commit message saved in %s\n') % msgfn
3210 )
3211 )
3211 self.ui.write(
3212 self.ui.write(
3212 _(
3213 _(
3213 b"note: use 'hg commit --logfile "
3214 b"note: use 'hg commit --logfile "
3214 b".hg/last-message.txt --edit' to reuse it\n"
3215 b".hg/last-message.txt --edit' to reuse it\n"
3215 )
3216 )
3216 )
3217 )
3217 raise
3218 raise
3218
3219
3219 def commithook(unused_success):
3220 def commithook(unused_success):
3220 # hack for command that use a temporary commit (eg: histedit)
3221 # hack for command that use a temporary commit (eg: histedit)
3221 # temporary commit got stripped before hook release
3222 # temporary commit got stripped before hook release
3222 if self.changelog.hasnode(ret):
3223 if self.changelog.hasnode(ret):
3223 self.hook(
3224 self.hook(
3224 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3225 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3225 )
3226 )
3226
3227
3227 self._afterlock(commithook)
3228 self._afterlock(commithook)
3228 return ret
3229 return ret
3229
3230
3230 @unfilteredmethod
3231 @unfilteredmethod
3231 def commitctx(self, ctx, error=False, origctx=None):
3232 def commitctx(self, ctx, error=False, origctx=None):
3232 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3233 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3233
3234
3234 @unfilteredmethod
3235 @unfilteredmethod
3235 def destroying(self):
3236 def destroying(self):
3236 """Inform the repository that nodes are about to be destroyed.
3237 """Inform the repository that nodes are about to be destroyed.
3237 Intended for use by strip and rollback, so there's a common
3238 Intended for use by strip and rollback, so there's a common
3238 place for anything that has to be done before destroying history.
3239 place for anything that has to be done before destroying history.
3239
3240
3240 This is mostly useful for saving state that is in memory and waiting
3241 This is mostly useful for saving state that is in memory and waiting
3241 to be flushed when the current lock is released. Because a call to
3242 to be flushed when the current lock is released. Because a call to
3242 destroyed is imminent, the repo will be invalidated causing those
3243 destroyed is imminent, the repo will be invalidated causing those
3243 changes to stay in memory (waiting for the next unlock), or vanish
3244 changes to stay in memory (waiting for the next unlock), or vanish
3244 completely.
3245 completely.
3245 """
3246 """
3246 # When using the same lock to commit and strip, the phasecache is left
3247 # When using the same lock to commit and strip, the phasecache is left
3247 # dirty after committing. Then when we strip, the repo is invalidated,
3248 # dirty after committing. Then when we strip, the repo is invalidated,
3248 # causing those changes to disappear.
3249 # causing those changes to disappear.
3249 if '_phasecache' in vars(self):
3250 if '_phasecache' in vars(self):
3250 self._phasecache.write()
3251 self._phasecache.write()
3251
3252
3252 @unfilteredmethod
3253 @unfilteredmethod
3253 def destroyed(self):
3254 def destroyed(self):
3254 """Inform the repository that nodes have been destroyed.
3255 """Inform the repository that nodes have been destroyed.
3255 Intended for use by strip and rollback, so there's a common
3256 Intended for use by strip and rollback, so there's a common
3256 place for anything that has to be done after destroying history.
3257 place for anything that has to be done after destroying history.
3257 """
3258 """
3258 # When one tries to:
3259 # When one tries to:
3259 # 1) destroy nodes thus calling this method (e.g. strip)
3260 # 1) destroy nodes thus calling this method (e.g. strip)
3260 # 2) use phasecache somewhere (e.g. commit)
3261 # 2) use phasecache somewhere (e.g. commit)
3261 #
3262 #
3262 # then 2) will fail because the phasecache contains nodes that were
3263 # then 2) will fail because the phasecache contains nodes that were
3263 # removed. We can either remove phasecache from the filecache,
3264 # removed. We can either remove phasecache from the filecache,
3264 # causing it to reload next time it is accessed, or simply filter
3265 # causing it to reload next time it is accessed, or simply filter
3265 # the removed nodes now and write the updated cache.
3266 # the removed nodes now and write the updated cache.
3266 self._phasecache.filterunknown(self)
3267 self._phasecache.filterunknown(self)
3267 self._phasecache.write()
3268 self._phasecache.write()
3268
3269
3269 # refresh all repository caches
3270 # refresh all repository caches
3270 self.updatecaches()
3271 self.updatecaches()
3271
3272
3272 # Ensure the persistent tag cache is updated. Doing it now
3273 # Ensure the persistent tag cache is updated. Doing it now
3273 # means that the tag cache only has to worry about destroyed
3274 # means that the tag cache only has to worry about destroyed
3274 # heads immediately after a strip/rollback. That in turn
3275 # heads immediately after a strip/rollback. That in turn
3275 # guarantees that "cachetip == currenttip" (comparing both rev
3276 # guarantees that "cachetip == currenttip" (comparing both rev
3276 # and node) always means no nodes have been added or destroyed.
3277 # and node) always means no nodes have been added or destroyed.
3277
3278
3278 # XXX this is suboptimal when qrefresh'ing: we strip the current
3279 # XXX this is suboptimal when qrefresh'ing: we strip the current
3279 # head, refresh the tag cache, then immediately add a new head.
3280 # head, refresh the tag cache, then immediately add a new head.
3280 # But I think doing it this way is necessary for the "instant
3281 # But I think doing it this way is necessary for the "instant
3281 # tag cache retrieval" case to work.
3282 # tag cache retrieval" case to work.
3282 self.invalidate()
3283 self.invalidate()
3283
3284
3284 def status(
3285 def status(
3285 self,
3286 self,
3286 node1=b'.',
3287 node1=b'.',
3287 node2=None,
3288 node2=None,
3288 match=None,
3289 match=None,
3289 ignored=False,
3290 ignored=False,
3290 clean=False,
3291 clean=False,
3291 unknown=False,
3292 unknown=False,
3292 listsubrepos=False,
3293 listsubrepos=False,
3293 ):
3294 ):
3294 '''a convenience method that calls node1.status(node2)'''
3295 '''a convenience method that calls node1.status(node2)'''
3295 return self[node1].status(
3296 return self[node1].status(
3296 node2, match, ignored, clean, unknown, listsubrepos
3297 node2, match, ignored, clean, unknown, listsubrepos
3297 )
3298 )
3298
3299
3299 def addpostdsstatus(self, ps):
3300 def addpostdsstatus(self, ps):
3300 """Add a callback to run within the wlock, at the point at which status
3301 """Add a callback to run within the wlock, at the point at which status
3301 fixups happen.
3302 fixups happen.
3302
3303
3303 On status completion, callback(wctx, status) will be called with the
3304 On status completion, callback(wctx, status) will be called with the
3304 wlock held, unless the dirstate has changed from underneath or the wlock
3305 wlock held, unless the dirstate has changed from underneath or the wlock
3305 couldn't be grabbed.
3306 couldn't be grabbed.
3306
3307
3307 Callbacks should not capture and use a cached copy of the dirstate --
3308 Callbacks should not capture and use a cached copy of the dirstate --
3308 it might change in the meanwhile. Instead, they should access the
3309 it might change in the meanwhile. Instead, they should access the
3309 dirstate via wctx.repo().dirstate.
3310 dirstate via wctx.repo().dirstate.
3310
3311
3311 This list is emptied out after each status run -- extensions should
3312 This list is emptied out after each status run -- extensions should
3312 make sure it adds to this list each time dirstate.status is called.
3313 make sure it adds to this list each time dirstate.status is called.
3313 Extensions should also make sure they don't call this for statuses
3314 Extensions should also make sure they don't call this for statuses
3314 that don't involve the dirstate.
3315 that don't involve the dirstate.
3315 """
3316 """
3316
3317
3317 # The list is located here for uniqueness reasons -- it is actually
3318 # The list is located here for uniqueness reasons -- it is actually
3318 # managed by the workingctx, but that isn't unique per-repo.
3319 # managed by the workingctx, but that isn't unique per-repo.
3319 self._postdsstatus.append(ps)
3320 self._postdsstatus.append(ps)
3320
3321
3321 def postdsstatus(self):
3322 def postdsstatus(self):
3322 """Used by workingctx to get the list of post-dirstate-status hooks."""
3323 """Used by workingctx to get the list of post-dirstate-status hooks."""
3323 return self._postdsstatus
3324 return self._postdsstatus
3324
3325
3325 def clearpostdsstatus(self):
3326 def clearpostdsstatus(self):
3326 """Used by workingctx to clear post-dirstate-status hooks."""
3327 """Used by workingctx to clear post-dirstate-status hooks."""
3327 del self._postdsstatus[:]
3328 del self._postdsstatus[:]
3328
3329
3329 def heads(self, start=None):
3330 def heads(self, start=None):
3330 if start is None:
3331 if start is None:
3331 cl = self.changelog
3332 cl = self.changelog
3332 headrevs = reversed(cl.headrevs())
3333 headrevs = reversed(cl.headrevs())
3333 return [cl.node(rev) for rev in headrevs]
3334 return [cl.node(rev) for rev in headrevs]
3334
3335
3335 heads = self.changelog.heads(start)
3336 heads = self.changelog.heads(start)
3336 # sort the output in rev descending order
3337 # sort the output in rev descending order
3337 return sorted(heads, key=self.changelog.rev, reverse=True)
3338 return sorted(heads, key=self.changelog.rev, reverse=True)
3338
3339
3339 def branchheads(self, branch=None, start=None, closed=False):
3340 def branchheads(self, branch=None, start=None, closed=False):
3340 """return a (possibly filtered) list of heads for the given branch
3341 """return a (possibly filtered) list of heads for the given branch
3341
3342
3342 Heads are returned in topological order, from newest to oldest.
3343 Heads are returned in topological order, from newest to oldest.
3343 If branch is None, use the dirstate branch.
3344 If branch is None, use the dirstate branch.
3344 If start is not None, return only heads reachable from start.
3345 If start is not None, return only heads reachable from start.
3345 If closed is True, return heads that are marked as closed as well.
3346 If closed is True, return heads that are marked as closed as well.
3346 """
3347 """
3347 if branch is None:
3348 if branch is None:
3348 branch = self[None].branch()
3349 branch = self[None].branch()
3349 branches = self.branchmap()
3350 branches = self.branchmap()
3350 if not branches.hasbranch(branch):
3351 if not branches.hasbranch(branch):
3351 return []
3352 return []
3352 # the cache returns heads ordered lowest to highest
3353 # the cache returns heads ordered lowest to highest
3353 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3354 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3354 if start is not None:
3355 if start is not None:
3355 # filter out the heads that cannot be reached from startrev
3356 # filter out the heads that cannot be reached from startrev
3356 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3357 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3357 bheads = [h for h in bheads if h in fbheads]
3358 bheads = [h for h in bheads if h in fbheads]
3358 return bheads
3359 return bheads
3359
3360
3360 def branches(self, nodes):
3361 def branches(self, nodes):
3361 if not nodes:
3362 if not nodes:
3362 nodes = [self.changelog.tip()]
3363 nodes = [self.changelog.tip()]
3363 b = []
3364 b = []
3364 for n in nodes:
3365 for n in nodes:
3365 t = n
3366 t = n
3366 while True:
3367 while True:
3367 p = self.changelog.parents(n)
3368 p = self.changelog.parents(n)
3368 if p[1] != self.nullid or p[0] == self.nullid:
3369 if p[1] != self.nullid or p[0] == self.nullid:
3369 b.append((t, n, p[0], p[1]))
3370 b.append((t, n, p[0], p[1]))
3370 break
3371 break
3371 n = p[0]
3372 n = p[0]
3372 return b
3373 return b
3373
3374
3374 def between(self, pairs):
3375 def between(self, pairs):
3375 r = []
3376 r = []
3376
3377
3377 for top, bottom in pairs:
3378 for top, bottom in pairs:
3378 n, l, i = top, [], 0
3379 n, l, i = top, [], 0
3379 f = 1
3380 f = 1
3380
3381
3381 while n != bottom and n != self.nullid:
3382 while n != bottom and n != self.nullid:
3382 p = self.changelog.parents(n)[0]
3383 p = self.changelog.parents(n)[0]
3383 if i == f:
3384 if i == f:
3384 l.append(n)
3385 l.append(n)
3385 f = f * 2
3386 f = f * 2
3386 n = p
3387 n = p
3387 i += 1
3388 i += 1
3388
3389
3389 r.append(l)
3390 r.append(l)
3390
3391
3391 return r
3392 return r
3392
3393
3393 def checkpush(self, pushop):
3394 def checkpush(self, pushop):
3394 """Extensions can override this function if additional checks have
3395 """Extensions can override this function if additional checks have
3395 to be performed before pushing, or call it if they override push
3396 to be performed before pushing, or call it if they override push
3396 command.
3397 command.
3397 """
3398 """
3398
3399
3399 @unfilteredpropertycache
3400 @unfilteredpropertycache
3400 def prepushoutgoinghooks(self):
3401 def prepushoutgoinghooks(self):
3401 """Return util.hooks consists of a pushop with repo, remote, outgoing
3402 """Return util.hooks consists of a pushop with repo, remote, outgoing
3402 methods, which are called before pushing changesets.
3403 methods, which are called before pushing changesets.
3403 """
3404 """
3404 return util.hooks()
3405 return util.hooks()
3405
3406
3406 def pushkey(self, namespace, key, old, new):
3407 def pushkey(self, namespace, key, old, new):
3407 try:
3408 try:
3408 tr = self.currenttransaction()
3409 tr = self.currenttransaction()
3409 hookargs = {}
3410 hookargs = {}
3410 if tr is not None:
3411 if tr is not None:
3411 hookargs.update(tr.hookargs)
3412 hookargs.update(tr.hookargs)
3412 hookargs = pycompat.strkwargs(hookargs)
3413 hookargs = pycompat.strkwargs(hookargs)
3413 hookargs['namespace'] = namespace
3414 hookargs['namespace'] = namespace
3414 hookargs['key'] = key
3415 hookargs['key'] = key
3415 hookargs['old'] = old
3416 hookargs['old'] = old
3416 hookargs['new'] = new
3417 hookargs['new'] = new
3417 self.hook(b'prepushkey', throw=True, **hookargs)
3418 self.hook(b'prepushkey', throw=True, **hookargs)
3418 except error.HookAbort as exc:
3419 except error.HookAbort as exc:
3419 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3420 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3420 if exc.hint:
3421 if exc.hint:
3421 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3422 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3422 return False
3423 return False
3423 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3424 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3424 ret = pushkey.push(self, namespace, key, old, new)
3425 ret = pushkey.push(self, namespace, key, old, new)
3425
3426
3426 def runhook(unused_success):
3427 def runhook(unused_success):
3427 self.hook(
3428 self.hook(
3428 b'pushkey',
3429 b'pushkey',
3429 namespace=namespace,
3430 namespace=namespace,
3430 key=key,
3431 key=key,
3431 old=old,
3432 old=old,
3432 new=new,
3433 new=new,
3433 ret=ret,
3434 ret=ret,
3434 )
3435 )
3435
3436
3436 self._afterlock(runhook)
3437 self._afterlock(runhook)
3437 return ret
3438 return ret
3438
3439
3439 def listkeys(self, namespace):
3440 def listkeys(self, namespace):
3440 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3441 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3441 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3442 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3442 values = pushkey.list(self, namespace)
3443 values = pushkey.list(self, namespace)
3443 self.hook(b'listkeys', namespace=namespace, values=values)
3444 self.hook(b'listkeys', namespace=namespace, values=values)
3444 return values
3445 return values
3445
3446
3446 def debugwireargs(self, one, two, three=None, four=None, five=None):
3447 def debugwireargs(self, one, two, three=None, four=None, five=None):
3447 '''used to test argument passing over the wire'''
3448 '''used to test argument passing over the wire'''
3448 return b"%s %s %s %s %s" % (
3449 return b"%s %s %s %s %s" % (
3449 one,
3450 one,
3450 two,
3451 two,
3451 pycompat.bytestr(three),
3452 pycompat.bytestr(three),
3452 pycompat.bytestr(four),
3453 pycompat.bytestr(four),
3453 pycompat.bytestr(five),
3454 pycompat.bytestr(five),
3454 )
3455 )
3455
3456
3456 def savecommitmessage(self, text):
3457 def savecommitmessage(self, text):
3457 fp = self.vfs(b'last-message.txt', b'wb')
3458 fp = self.vfs(b'last-message.txt', b'wb')
3458 try:
3459 try:
3459 fp.write(text)
3460 fp.write(text)
3460 finally:
3461 finally:
3461 fp.close()
3462 fp.close()
3462 return self.pathto(fp.name[len(self.root) + 1 :])
3463 return self.pathto(fp.name[len(self.root) + 1 :])
3463
3464
3464 def register_wanted_sidedata(self, category):
3465 def register_wanted_sidedata(self, category):
3465 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3466 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3466 # Only revlogv2 repos can want sidedata.
3467 # Only revlogv2 repos can want sidedata.
3467 return
3468 return
3468 self._wanted_sidedata.add(pycompat.bytestr(category))
3469 self._wanted_sidedata.add(pycompat.bytestr(category))
3469
3470
3470 def register_sidedata_computer(
3471 def register_sidedata_computer(
3471 self, kind, category, keys, computer, flags, replace=False
3472 self, kind, category, keys, computer, flags, replace=False
3472 ):
3473 ):
3473 if kind not in revlogconst.ALL_KINDS:
3474 if kind not in revlogconst.ALL_KINDS:
3474 msg = _(b"unexpected revlog kind '%s'.")
3475 msg = _(b"unexpected revlog kind '%s'.")
3475 raise error.ProgrammingError(msg % kind)
3476 raise error.ProgrammingError(msg % kind)
3476 category = pycompat.bytestr(category)
3477 category = pycompat.bytestr(category)
3477 already_registered = category in self._sidedata_computers.get(kind, [])
3478 already_registered = category in self._sidedata_computers.get(kind, [])
3478 if already_registered and not replace:
3479 if already_registered and not replace:
3479 msg = _(
3480 msg = _(
3480 b"cannot register a sidedata computer twice for category '%s'."
3481 b"cannot register a sidedata computer twice for category '%s'."
3481 )
3482 )
3482 raise error.ProgrammingError(msg % category)
3483 raise error.ProgrammingError(msg % category)
3483 if replace and not already_registered:
3484 if replace and not already_registered:
3484 msg = _(
3485 msg = _(
3485 b"cannot replace a sidedata computer that isn't registered "
3486 b"cannot replace a sidedata computer that isn't registered "
3486 b"for category '%s'."
3487 b"for category '%s'."
3487 )
3488 )
3488 raise error.ProgrammingError(msg % category)
3489 raise error.ProgrammingError(msg % category)
3489 self._sidedata_computers.setdefault(kind, {})
3490 self._sidedata_computers.setdefault(kind, {})
3490 self._sidedata_computers[kind][category] = (keys, computer, flags)
3491 self._sidedata_computers[kind][category] = (keys, computer, flags)
3491
3492
3492
3493
3493 # used to avoid circular references so destructors work
3494 # used to avoid circular references so destructors work
3494 def aftertrans(files):
3495 def aftertrans(files):
3495 renamefiles = [tuple(t) for t in files]
3496 renamefiles = [tuple(t) for t in files]
3496
3497
3497 def a():
3498 def a():
3498 for vfs, src, dest in renamefiles:
3499 for vfs, src, dest in renamefiles:
3499 # if src and dest refer to a same file, vfs.rename is a no-op,
3500 # if src and dest refer to a same file, vfs.rename is a no-op,
3500 # leaving both src and dest on disk. delete dest to make sure
3501 # leaving both src and dest on disk. delete dest to make sure
3501 # the rename couldn't be such a no-op.
3502 # the rename couldn't be such a no-op.
3502 vfs.tryunlink(dest)
3503 vfs.tryunlink(dest)
3503 try:
3504 try:
3504 vfs.rename(src, dest)
3505 vfs.rename(src, dest)
3505 except OSError as exc: # journal file does not yet exist
3506 except OSError as exc: # journal file does not yet exist
3506 if exc.errno != errno.ENOENT:
3507 if exc.errno != errno.ENOENT:
3507 raise
3508 raise
3508
3509
3509 return a
3510 return a
3510
3511
3511
3512
3512 def undoname(fn):
3513 def undoname(fn):
3513 base, name = os.path.split(fn)
3514 base, name = os.path.split(fn)
3514 assert name.startswith(b'journal')
3515 assert name.startswith(b'journal')
3515 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3516 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3516
3517
3517
3518
3518 def instance(ui, path, create, intents=None, createopts=None):
3519 def instance(ui, path, create, intents=None, createopts=None):
3519 localpath = urlutil.urllocalpath(path)
3520 localpath = urlutil.urllocalpath(path)
3520 if create:
3521 if create:
3521 createrepository(ui, localpath, createopts=createopts)
3522 createrepository(ui, localpath, createopts=createopts)
3522
3523
3523 return makelocalrepository(ui, localpath, intents=intents)
3524 return makelocalrepository(ui, localpath, intents=intents)
3524
3525
3525
3526
3526 def islocal(path):
3527 def islocal(path):
3527 return True
3528 return True
3528
3529
3529
3530
3530 def defaultcreateopts(ui, createopts=None):
3531 def defaultcreateopts(ui, createopts=None):
3531 """Populate the default creation options for a repository.
3532 """Populate the default creation options for a repository.
3532
3533
3533 A dictionary of explicitly requested creation options can be passed
3534 A dictionary of explicitly requested creation options can be passed
3534 in. Missing keys will be populated.
3535 in. Missing keys will be populated.
3535 """
3536 """
3536 createopts = dict(createopts or {})
3537 createopts = dict(createopts or {})
3537
3538
3538 if b'backend' not in createopts:
3539 if b'backend' not in createopts:
3539 # experimental config: storage.new-repo-backend
3540 # experimental config: storage.new-repo-backend
3540 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3541 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3541
3542
3542 return createopts
3543 return createopts
3543
3544
3544
3545
3545 def clone_requirements(ui, createopts, srcrepo):
3546 def clone_requirements(ui, createopts, srcrepo):
3546 """clone the requirements of a local repo for a local clone
3547 """clone the requirements of a local repo for a local clone
3547
3548
3548 The store requirements are unchanged while the working copy requirements
3549 The store requirements are unchanged while the working copy requirements
3549 depends on the configuration
3550 depends on the configuration
3550 """
3551 """
3551 target_requirements = set()
3552 target_requirements = set()
3552 createopts = defaultcreateopts(ui, createopts=createopts)
3553 createopts = defaultcreateopts(ui, createopts=createopts)
3553 for r in newreporequirements(ui, createopts):
3554 for r in newreporequirements(ui, createopts):
3554 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3555 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3555 target_requirements.add(r)
3556 target_requirements.add(r)
3556
3557
3557 for r in srcrepo.requirements:
3558 for r in srcrepo.requirements:
3558 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3559 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3559 target_requirements.add(r)
3560 target_requirements.add(r)
3560 return target_requirements
3561 return target_requirements
3561
3562
3562
3563
3563 def newreporequirements(ui, createopts):
3564 def newreporequirements(ui, createopts):
3564 """Determine the set of requirements for a new local repository.
3565 """Determine the set of requirements for a new local repository.
3565
3566
3566 Extensions can wrap this function to specify custom requirements for
3567 Extensions can wrap this function to specify custom requirements for
3567 new repositories.
3568 new repositories.
3568 """
3569 """
3569
3570
3570 if b'backend' not in createopts:
3571 if b'backend' not in createopts:
3571 raise error.ProgrammingError(
3572 raise error.ProgrammingError(
3572 b'backend key not present in createopts; '
3573 b'backend key not present in createopts; '
3573 b'was defaultcreateopts() called?'
3574 b'was defaultcreateopts() called?'
3574 )
3575 )
3575
3576
3576 if createopts[b'backend'] != b'revlogv1':
3577 if createopts[b'backend'] != b'revlogv1':
3577 raise error.Abort(
3578 raise error.Abort(
3578 _(
3579 _(
3579 b'unable to determine repository requirements for '
3580 b'unable to determine repository requirements for '
3580 b'storage backend: %s'
3581 b'storage backend: %s'
3581 )
3582 )
3582 % createopts[b'backend']
3583 % createopts[b'backend']
3583 )
3584 )
3584
3585
3585 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3586 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3586 if ui.configbool(b'format', b'usestore'):
3587 if ui.configbool(b'format', b'usestore'):
3587 requirements.add(requirementsmod.STORE_REQUIREMENT)
3588 requirements.add(requirementsmod.STORE_REQUIREMENT)
3588 if ui.configbool(b'format', b'usefncache'):
3589 if ui.configbool(b'format', b'usefncache'):
3589 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3590 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3590 if ui.configbool(b'format', b'dotencode'):
3591 if ui.configbool(b'format', b'dotencode'):
3591 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3592 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3592
3593
3593 compengines = ui.configlist(b'format', b'revlog-compression')
3594 compengines = ui.configlist(b'format', b'revlog-compression')
3594 for compengine in compengines:
3595 for compengine in compengines:
3595 if compengine in util.compengines:
3596 if compengine in util.compengines:
3596 engine = util.compengines[compengine]
3597 engine = util.compengines[compengine]
3597 if engine.available() and engine.revlogheader():
3598 if engine.available() and engine.revlogheader():
3598 break
3599 break
3599 else:
3600 else:
3600 raise error.Abort(
3601 raise error.Abort(
3601 _(
3602 _(
3602 b'compression engines %s defined by '
3603 b'compression engines %s defined by '
3603 b'format.revlog-compression not available'
3604 b'format.revlog-compression not available'
3604 )
3605 )
3605 % b', '.join(b'"%s"' % e for e in compengines),
3606 % b', '.join(b'"%s"' % e for e in compengines),
3606 hint=_(
3607 hint=_(
3607 b'run "hg debuginstall" to list available '
3608 b'run "hg debuginstall" to list available '
3608 b'compression engines'
3609 b'compression engines'
3609 ),
3610 ),
3610 )
3611 )
3611
3612
3612 # zlib is the historical default and doesn't need an explicit requirement.
3613 # zlib is the historical default and doesn't need an explicit requirement.
3613 if compengine == b'zstd':
3614 if compengine == b'zstd':
3614 requirements.add(b'revlog-compression-zstd')
3615 requirements.add(b'revlog-compression-zstd')
3615 elif compengine != b'zlib':
3616 elif compengine != b'zlib':
3616 requirements.add(b'exp-compression-%s' % compengine)
3617 requirements.add(b'exp-compression-%s' % compengine)
3617
3618
3618 if scmutil.gdinitconfig(ui):
3619 if scmutil.gdinitconfig(ui):
3619 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3620 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3620 if ui.configbool(b'format', b'sparse-revlog'):
3621 if ui.configbool(b'format', b'sparse-revlog'):
3621 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3622 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3622
3623
3623 # experimental config: format.exp-rc-dirstate-v2
3624 # experimental config: format.exp-rc-dirstate-v2
3624 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3625 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3625 if ui.configbool(b'format', b'exp-rc-dirstate-v2'):
3626 if ui.configbool(b'format', b'exp-rc-dirstate-v2'):
3626 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3627 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3627
3628
3628 # experimental config: format.exp-use-copies-side-data-changeset
3629 # experimental config: format.exp-use-copies-side-data-changeset
3629 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3630 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3630 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3631 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3631 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3632 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3632 if ui.configbool(b'experimental', b'treemanifest'):
3633 if ui.configbool(b'experimental', b'treemanifest'):
3633 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3634 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3634
3635
3635 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3636 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3636 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3637 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3637 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3638 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3638
3639
3639 revlogv2 = ui.config(b'experimental', b'revlogv2')
3640 revlogv2 = ui.config(b'experimental', b'revlogv2')
3640 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3641 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3641 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3642 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3642 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3643 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3643 # experimental config: format.internal-phase
3644 # experimental config: format.internal-phase
3644 if ui.configbool(b'format', b'internal-phase'):
3645 if ui.configbool(b'format', b'internal-phase'):
3645 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3646 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3646
3647
3647 if createopts.get(b'narrowfiles'):
3648 if createopts.get(b'narrowfiles'):
3648 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3649 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3649
3650
3650 if createopts.get(b'lfs'):
3651 if createopts.get(b'lfs'):
3651 requirements.add(b'lfs')
3652 requirements.add(b'lfs')
3652
3653
3653 if ui.configbool(b'format', b'bookmarks-in-store'):
3654 if ui.configbool(b'format', b'bookmarks-in-store'):
3654 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3655 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3655
3656
3656 if ui.configbool(b'format', b'use-persistent-nodemap'):
3657 if ui.configbool(b'format', b'use-persistent-nodemap'):
3657 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3658 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3658
3659
3659 # if share-safe is enabled, let's create the new repository with the new
3660 # if share-safe is enabled, let's create the new repository with the new
3660 # requirement
3661 # requirement
3661 if ui.configbool(b'format', b'use-share-safe'):
3662 if ui.configbool(b'format', b'use-share-safe'):
3662 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3663 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3663
3664
3664 # If the repo is being created from a shared repository, we copy
3665 # if we are creating a share-repoΒΉ we have to handle requirement
3665 # its requirements.
3666 # differently.
3667 #
3668 # [1] (i.e. reusing the store from another repository, just having a
3669 # working copy)
3666 if b'sharedrepo' in createopts:
3670 if b'sharedrepo' in createopts:
3667 requirements = set(createopts[b'sharedrepo'].requirements)
3671 source_requirements = set(createopts[b'sharedrepo'].requirements)
3672
3673 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3674 # share to an old school repository, we have to copy the
3675 # requirements and hope for the best.
3676 requirements = source_requirements
3677 else:
3678 # We have control on the working copy only, so "copy" the non
3679 # working copy part over, ignoring previous logic.
3680 to_drop = set()
3681 for req in requirements:
3682 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3683 continue
3684 if req in source_requirements:
3685 continue
3686 to_drop.add(req)
3687 requirements -= to_drop
3688 requirements |= source_requirements
3689
3668 if createopts.get(b'sharedrelative'):
3690 if createopts.get(b'sharedrelative'):
3669 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3691 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3670 else:
3692 else:
3671 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3693 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3672
3694
3673 return requirements
3674
3675 return requirements
3695 return requirements
3676
3696
3677
3697
3678 def checkrequirementscompat(ui, requirements):
3698 def checkrequirementscompat(ui, requirements):
3679 """Checks compatibility of repository requirements enabled and disabled.
3699 """Checks compatibility of repository requirements enabled and disabled.
3680
3700
3681 Returns a set of requirements which needs to be dropped because dependend
3701 Returns a set of requirements which needs to be dropped because dependend
3682 requirements are not enabled. Also warns users about it"""
3702 requirements are not enabled. Also warns users about it"""
3683
3703
3684 dropped = set()
3704 dropped = set()
3685
3705
3686 if requirementsmod.STORE_REQUIREMENT not in requirements:
3706 if requirementsmod.STORE_REQUIREMENT not in requirements:
3687 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3707 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3688 ui.warn(
3708 ui.warn(
3689 _(
3709 _(
3690 b'ignoring enabled \'format.bookmarks-in-store\' config '
3710 b'ignoring enabled \'format.bookmarks-in-store\' config '
3691 b'beacuse it is incompatible with disabled '
3711 b'beacuse it is incompatible with disabled '
3692 b'\'format.usestore\' config\n'
3712 b'\'format.usestore\' config\n'
3693 )
3713 )
3694 )
3714 )
3695 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3715 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3696
3716
3697 if (
3717 if (
3698 requirementsmod.SHARED_REQUIREMENT in requirements
3718 requirementsmod.SHARED_REQUIREMENT in requirements
3699 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3719 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3700 ):
3720 ):
3701 raise error.Abort(
3721 raise error.Abort(
3702 _(
3722 _(
3703 b"cannot create shared repository as source was created"
3723 b"cannot create shared repository as source was created"
3704 b" with 'format.usestore' config disabled"
3724 b" with 'format.usestore' config disabled"
3705 )
3725 )
3706 )
3726 )
3707
3727
3708 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3728 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3709 ui.warn(
3729 ui.warn(
3710 _(
3730 _(
3711 b"ignoring enabled 'format.use-share-safe' config because "
3731 b"ignoring enabled 'format.use-share-safe' config because "
3712 b"it is incompatible with disabled 'format.usestore'"
3732 b"it is incompatible with disabled 'format.usestore'"
3713 b" config\n"
3733 b" config\n"
3714 )
3734 )
3715 )
3735 )
3716 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3736 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3717
3737
3718 return dropped
3738 return dropped
3719
3739
3720
3740
3721 def filterknowncreateopts(ui, createopts):
3741 def filterknowncreateopts(ui, createopts):
3722 """Filters a dict of repo creation options against options that are known.
3742 """Filters a dict of repo creation options against options that are known.
3723
3743
3724 Receives a dict of repo creation options and returns a dict of those
3744 Receives a dict of repo creation options and returns a dict of those
3725 options that we don't know how to handle.
3745 options that we don't know how to handle.
3726
3746
3727 This function is called as part of repository creation. If the
3747 This function is called as part of repository creation. If the
3728 returned dict contains any items, repository creation will not
3748 returned dict contains any items, repository creation will not
3729 be allowed, as it means there was a request to create a repository
3749 be allowed, as it means there was a request to create a repository
3730 with options not recognized by loaded code.
3750 with options not recognized by loaded code.
3731
3751
3732 Extensions can wrap this function to filter out creation options
3752 Extensions can wrap this function to filter out creation options
3733 they know how to handle.
3753 they know how to handle.
3734 """
3754 """
3735 known = {
3755 known = {
3736 b'backend',
3756 b'backend',
3737 b'lfs',
3757 b'lfs',
3738 b'narrowfiles',
3758 b'narrowfiles',
3739 b'sharedrepo',
3759 b'sharedrepo',
3740 b'sharedrelative',
3760 b'sharedrelative',
3741 b'shareditems',
3761 b'shareditems',
3742 b'shallowfilestore',
3762 b'shallowfilestore',
3743 }
3763 }
3744
3764
3745 return {k: v for k, v in createopts.items() if k not in known}
3765 return {k: v for k, v in createopts.items() if k not in known}
3746
3766
3747
3767
3748 def createrepository(ui, path, createopts=None, requirements=None):
3768 def createrepository(ui, path, createopts=None, requirements=None):
3749 """Create a new repository in a vfs.
3769 """Create a new repository in a vfs.
3750
3770
3751 ``path`` path to the new repo's working directory.
3771 ``path`` path to the new repo's working directory.
3752 ``createopts`` options for the new repository.
3772 ``createopts`` options for the new repository.
3753 ``requirement`` predefined set of requirements.
3773 ``requirement`` predefined set of requirements.
3754 (incompatible with ``createopts``)
3774 (incompatible with ``createopts``)
3755
3775
3756 The following keys for ``createopts`` are recognized:
3776 The following keys for ``createopts`` are recognized:
3757
3777
3758 backend
3778 backend
3759 The storage backend to use.
3779 The storage backend to use.
3760 lfs
3780 lfs
3761 Repository will be created with ``lfs`` requirement. The lfs extension
3781 Repository will be created with ``lfs`` requirement. The lfs extension
3762 will automatically be loaded when the repository is accessed.
3782 will automatically be loaded when the repository is accessed.
3763 narrowfiles
3783 narrowfiles
3764 Set up repository to support narrow file storage.
3784 Set up repository to support narrow file storage.
3765 sharedrepo
3785 sharedrepo
3766 Repository object from which storage should be shared.
3786 Repository object from which storage should be shared.
3767 sharedrelative
3787 sharedrelative
3768 Boolean indicating if the path to the shared repo should be
3788 Boolean indicating if the path to the shared repo should be
3769 stored as relative. By default, the pointer to the "parent" repo
3789 stored as relative. By default, the pointer to the "parent" repo
3770 is stored as an absolute path.
3790 is stored as an absolute path.
3771 shareditems
3791 shareditems
3772 Set of items to share to the new repository (in addition to storage).
3792 Set of items to share to the new repository (in addition to storage).
3773 shallowfilestore
3793 shallowfilestore
3774 Indicates that storage for files should be shallow (not all ancestor
3794 Indicates that storage for files should be shallow (not all ancestor
3775 revisions are known).
3795 revisions are known).
3776 """
3796 """
3777
3797
3778 if requirements is not None:
3798 if requirements is not None:
3779 if createopts is not None:
3799 if createopts is not None:
3780 msg = b'cannot specify both createopts and requirements'
3800 msg = b'cannot specify both createopts and requirements'
3781 raise error.ProgrammingError(msg)
3801 raise error.ProgrammingError(msg)
3782 createopts = {}
3802 createopts = {}
3783 else:
3803 else:
3784 createopts = defaultcreateopts(ui, createopts=createopts)
3804 createopts = defaultcreateopts(ui, createopts=createopts)
3785
3805
3786 unknownopts = filterknowncreateopts(ui, createopts)
3806 unknownopts = filterknowncreateopts(ui, createopts)
3787
3807
3788 if not isinstance(unknownopts, dict):
3808 if not isinstance(unknownopts, dict):
3789 raise error.ProgrammingError(
3809 raise error.ProgrammingError(
3790 b'filterknowncreateopts() did not return a dict'
3810 b'filterknowncreateopts() did not return a dict'
3791 )
3811 )
3792
3812
3793 if unknownopts:
3813 if unknownopts:
3794 raise error.Abort(
3814 raise error.Abort(
3795 _(
3815 _(
3796 b'unable to create repository because of unknown '
3816 b'unable to create repository because of unknown '
3797 b'creation option: %s'
3817 b'creation option: %s'
3798 )
3818 )
3799 % b', '.join(sorted(unknownopts)),
3819 % b', '.join(sorted(unknownopts)),
3800 hint=_(b'is a required extension not loaded?'),
3820 hint=_(b'is a required extension not loaded?'),
3801 )
3821 )
3802
3822
3803 requirements = newreporequirements(ui, createopts=createopts)
3823 requirements = newreporequirements(ui, createopts=createopts)
3804 requirements -= checkrequirementscompat(ui, requirements)
3824 requirements -= checkrequirementscompat(ui, requirements)
3805
3825
3806 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3826 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3807
3827
3808 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3828 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3809 if hgvfs.exists():
3829 if hgvfs.exists():
3810 raise error.RepoError(_(b'repository %s already exists') % path)
3830 raise error.RepoError(_(b'repository %s already exists') % path)
3811
3831
3812 if b'sharedrepo' in createopts:
3832 if b'sharedrepo' in createopts:
3813 sharedpath = createopts[b'sharedrepo'].sharedpath
3833 sharedpath = createopts[b'sharedrepo'].sharedpath
3814
3834
3815 if createopts.get(b'sharedrelative'):
3835 if createopts.get(b'sharedrelative'):
3816 try:
3836 try:
3817 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3837 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3818 sharedpath = util.pconvert(sharedpath)
3838 sharedpath = util.pconvert(sharedpath)
3819 except (IOError, ValueError) as e:
3839 except (IOError, ValueError) as e:
3820 # ValueError is raised on Windows if the drive letters differ
3840 # ValueError is raised on Windows if the drive letters differ
3821 # on each path.
3841 # on each path.
3822 raise error.Abort(
3842 raise error.Abort(
3823 _(b'cannot calculate relative path'),
3843 _(b'cannot calculate relative path'),
3824 hint=stringutil.forcebytestr(e),
3844 hint=stringutil.forcebytestr(e),
3825 )
3845 )
3826
3846
3827 if not wdirvfs.exists():
3847 if not wdirvfs.exists():
3828 wdirvfs.makedirs()
3848 wdirvfs.makedirs()
3829
3849
3830 hgvfs.makedir(notindexed=True)
3850 hgvfs.makedir(notindexed=True)
3831 if b'sharedrepo' not in createopts:
3851 if b'sharedrepo' not in createopts:
3832 hgvfs.mkdir(b'cache')
3852 hgvfs.mkdir(b'cache')
3833 hgvfs.mkdir(b'wcache')
3853 hgvfs.mkdir(b'wcache')
3834
3854
3835 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3855 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3836 if has_store and b'sharedrepo' not in createopts:
3856 if has_store and b'sharedrepo' not in createopts:
3837 hgvfs.mkdir(b'store')
3857 hgvfs.mkdir(b'store')
3838
3858
3839 # We create an invalid changelog outside the store so very old
3859 # We create an invalid changelog outside the store so very old
3840 # Mercurial versions (which didn't know about the requirements
3860 # Mercurial versions (which didn't know about the requirements
3841 # file) encounter an error on reading the changelog. This
3861 # file) encounter an error on reading the changelog. This
3842 # effectively locks out old clients and prevents them from
3862 # effectively locks out old clients and prevents them from
3843 # mucking with a repo in an unknown format.
3863 # mucking with a repo in an unknown format.
3844 #
3864 #
3845 # The revlog header has version 65535, which won't be recognized by
3865 # The revlog header has version 65535, which won't be recognized by
3846 # such old clients.
3866 # such old clients.
3847 hgvfs.append(
3867 hgvfs.append(
3848 b'00changelog.i',
3868 b'00changelog.i',
3849 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3869 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3850 b'layout',
3870 b'layout',
3851 )
3871 )
3852
3872
3853 # Filter the requirements into working copy and store ones
3873 # Filter the requirements into working copy and store ones
3854 wcreq, storereq = scmutil.filterrequirements(requirements)
3874 wcreq, storereq = scmutil.filterrequirements(requirements)
3855 # write working copy ones
3875 # write working copy ones
3856 scmutil.writerequires(hgvfs, wcreq)
3876 scmutil.writerequires(hgvfs, wcreq)
3857 # If there are store requirements and the current repository
3877 # If there are store requirements and the current repository
3858 # is not a shared one, write stored requirements
3878 # is not a shared one, write stored requirements
3859 # For new shared repository, we don't need to write the store
3879 # For new shared repository, we don't need to write the store
3860 # requirements as they are already present in store requires
3880 # requirements as they are already present in store requires
3861 if storereq and b'sharedrepo' not in createopts:
3881 if storereq and b'sharedrepo' not in createopts:
3862 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3882 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3863 scmutil.writerequires(storevfs, storereq)
3883 scmutil.writerequires(storevfs, storereq)
3864
3884
3865 # Write out file telling readers where to find the shared store.
3885 # Write out file telling readers where to find the shared store.
3866 if b'sharedrepo' in createopts:
3886 if b'sharedrepo' in createopts:
3867 hgvfs.write(b'sharedpath', sharedpath)
3887 hgvfs.write(b'sharedpath', sharedpath)
3868
3888
3869 if createopts.get(b'shareditems'):
3889 if createopts.get(b'shareditems'):
3870 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3890 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3871 hgvfs.write(b'shared', shared)
3891 hgvfs.write(b'shared', shared)
3872
3892
3873
3893
3874 def poisonrepository(repo):
3894 def poisonrepository(repo):
3875 """Poison a repository instance so it can no longer be used."""
3895 """Poison a repository instance so it can no longer be used."""
3876 # Perform any cleanup on the instance.
3896 # Perform any cleanup on the instance.
3877 repo.close()
3897 repo.close()
3878
3898
3879 # Our strategy is to replace the type of the object with one that
3899 # Our strategy is to replace the type of the object with one that
3880 # has all attribute lookups result in error.
3900 # has all attribute lookups result in error.
3881 #
3901 #
3882 # But we have to allow the close() method because some constructors
3902 # But we have to allow the close() method because some constructors
3883 # of repos call close() on repo references.
3903 # of repos call close() on repo references.
3884 class poisonedrepository(object):
3904 class poisonedrepository(object):
3885 def __getattribute__(self, item):
3905 def __getattribute__(self, item):
3886 if item == 'close':
3906 if item == 'close':
3887 return object.__getattribute__(self, item)
3907 return object.__getattribute__(self, item)
3888
3908
3889 raise error.ProgrammingError(
3909 raise error.ProgrammingError(
3890 b'repo instances should not be used after unshare'
3910 b'repo instances should not be used after unshare'
3891 )
3911 )
3892
3912
3893 def close(self):
3913 def close(self):
3894 pass
3914 pass
3895
3915
3896 # We may have a repoview, which intercepts __setattr__. So be sure
3916 # We may have a repoview, which intercepts __setattr__. So be sure
3897 # we operate at the lowest level possible.
3917 # we operate at the lowest level possible.
3898 object.__setattr__(repo, '__class__', poisonedrepository)
3918 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,286 +1,308
1 #testcases safe normal
1 #testcases safe normal
2
2
3 #if safe
3 #if safe
4 $ echo "[format]" >> $HGRCPATH
4 $ echo "[format]" >> $HGRCPATH
5 $ echo "use-share-safe = True" >> $HGRCPATH
5 $ echo "use-share-safe = True" >> $HGRCPATH
6 #endif
6 #endif
7
7
8 $ echo "[extensions]" >> $HGRCPATH
8 $ echo "[extensions]" >> $HGRCPATH
9 $ echo "share = " >> $HGRCPATH
9 $ echo "share = " >> $HGRCPATH
10
10
11 prepare repo1
11 prepare repo1
12
12
13 $ hg init repo1
13 $ hg init repo1
14 $ cd repo1
14 $ cd repo1
15 $ echo a > a
15 $ echo a > a
16 $ hg commit -A -m'init'
16 $ hg commit -A -m'init'
17 adding a
17 adding a
18
18
19 share it
19 share it
20
20
21 $ cd ..
21 $ cd ..
22 $ hg share repo1 repo2
22 $ hg share repo1 repo2
23 updating working directory
23 updating working directory
24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
25
25
26 share shouldn't have a store dir
26 share shouldn't have a store dir
27
27
28 $ cd repo2
28 $ cd repo2
29 $ test -d .hg/store
29 $ test -d .hg/store
30 [1]
30 [1]
31 $ hg root -Tjson | sed 's|\\\\|\\|g'
31 $ hg root -Tjson | sed 's|\\\\|\\|g'
32 [
32 [
33 {
33 {
34 "hgpath": "$TESTTMP/repo2/.hg",
34 "hgpath": "$TESTTMP/repo2/.hg",
35 "reporoot": "$TESTTMP/repo2",
35 "reporoot": "$TESTTMP/repo2",
36 "storepath": "$TESTTMP/repo1/.hg/store"
36 "storepath": "$TESTTMP/repo1/.hg/store"
37 }
37 }
38 ]
38 ]
39
39
40 share shouldn't have a full cache dir, original repo should
40 share shouldn't have a full cache dir, original repo should
41
41
42 $ hg branches
42 $ hg branches
43 default 0:d3873e73d99e
43 default 0:d3873e73d99e
44 $ hg tags
44 $ hg tags
45 tip 0:d3873e73d99e
45 tip 0:d3873e73d99e
46 $ test -d .hg/cache
46 $ test -d .hg/cache
47 [1]
47 [1]
48 $ ls -1 .hg/wcache || true
48 $ ls -1 .hg/wcache || true
49 checkisexec (execbit !)
49 checkisexec (execbit !)
50 checklink (symlink no-rust !)
50 checklink (symlink no-rust !)
51 checklink-target (symlink no-rust !)
51 checklink-target (symlink no-rust !)
52 manifestfulltextcache (reporevlogstore !)
52 manifestfulltextcache (reporevlogstore !)
53 $ ls -1 ../repo1/.hg/cache
53 $ ls -1 ../repo1/.hg/cache
54 branch2-served
54 branch2-served
55 rbc-names-v1
55 rbc-names-v1
56 rbc-revs-v1
56 rbc-revs-v1
57 tags2-visible
57 tags2-visible
58
58
59 Cloning a shared repo should pick up the full cache dir on the other hand.
59 Cloning a shared repo should pick up the full cache dir on the other hand.
60
60
61 $ hg clone . ../repo2-clone
61 $ hg clone . ../repo2-clone
62 updating to branch default
62 updating to branch default
63 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
63 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
64 $ ls -1 ../repo2-clone/.hg/cache
64 $ ls -1 ../repo2-clone/.hg/cache
65 branch2-base
65 branch2-base
66 branch2-immutable
66 branch2-immutable
67 branch2-served
67 branch2-served
68 branch2-served.hidden
68 branch2-served.hidden
69 branch2-visible
69 branch2-visible
70 branch2-visible-hidden
70 branch2-visible-hidden
71 rbc-names-v1
71 rbc-names-v1
72 rbc-revs-v1
72 rbc-revs-v1
73 tags2
73 tags2
74 tags2-served
74 tags2-served
75 tags2-visible
75 tags2-visible
76
76
77 Some sed versions appends newline, some don't, and some just fails
77 Some sed versions appends newline, some don't, and some just fails
78
78
79 $ cat .hg/sharedpath; echo
79 $ cat .hg/sharedpath; echo
80 $TESTTMP/repo1/.hg
80 $TESTTMP/repo1/.hg
81
81
82 trailing newline on .hg/sharedpath is ok
82 trailing newline on .hg/sharedpath is ok
83 $ hg tip -q
83 $ hg tip -q
84 0:d3873e73d99e
84 0:d3873e73d99e
85 $ echo '' >> .hg/sharedpath
85 $ echo '' >> .hg/sharedpath
86 $ cat .hg/sharedpath
86 $ cat .hg/sharedpath
87 $TESTTMP/repo1/.hg
87 $TESTTMP/repo1/.hg
88 $ hg tip -q
88 $ hg tip -q
89 0:d3873e73d99e
89 0:d3873e73d99e
90
90
91 commit in shared clone
91 commit in shared clone
92
92
93 $ echo a >> a
93 $ echo a >> a
94 $ hg commit -m'change in shared clone'
94 $ hg commit -m'change in shared clone'
95
95
96 check original
96 check original
97
97
98 $ cd ../repo1
98 $ cd ../repo1
99 $ hg log
99 $ hg log
100 changeset: 1:8af4dc49db9e
100 changeset: 1:8af4dc49db9e
101 tag: tip
101 tag: tip
102 user: test
102 user: test
103 date: Thu Jan 01 00:00:00 1970 +0000
103 date: Thu Jan 01 00:00:00 1970 +0000
104 summary: change in shared clone
104 summary: change in shared clone
105
105
106 changeset: 0:d3873e73d99e
106 changeset: 0:d3873e73d99e
107 user: test
107 user: test
108 date: Thu Jan 01 00:00:00 1970 +0000
108 date: Thu Jan 01 00:00:00 1970 +0000
109 summary: init
109 summary: init
110
110
111 $ hg update
111 $ hg update
112 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
113 $ cat a # should be two lines of "a"
113 $ cat a # should be two lines of "a"
114 a
114 a
115 a
115 a
116
116
117 commit in original
117 commit in original
118
118
119 $ echo b > b
119 $ echo b > b
120 $ hg commit -A -m'another file'
120 $ hg commit -A -m'another file'
121 adding b
121 adding b
122
122
123 check in shared clone
123 check in shared clone
124
124
125 $ cd ../repo2
125 $ cd ../repo2
126 $ hg log
126 $ hg log
127 changeset: 2:c2e0ac586386
127 changeset: 2:c2e0ac586386
128 tag: tip
128 tag: tip
129 user: test
129 user: test
130 date: Thu Jan 01 00:00:00 1970 +0000
130 date: Thu Jan 01 00:00:00 1970 +0000
131 summary: another file
131 summary: another file
132
132
133 changeset: 1:8af4dc49db9e
133 changeset: 1:8af4dc49db9e
134 user: test
134 user: test
135 date: Thu Jan 01 00:00:00 1970 +0000
135 date: Thu Jan 01 00:00:00 1970 +0000
136 summary: change in shared clone
136 summary: change in shared clone
137
137
138 changeset: 0:d3873e73d99e
138 changeset: 0:d3873e73d99e
139 user: test
139 user: test
140 date: Thu Jan 01 00:00:00 1970 +0000
140 date: Thu Jan 01 00:00:00 1970 +0000
141 summary: init
141 summary: init
142
142
143 $ hg update
143 $ hg update
144 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
144 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
145 $ cat b # should exist with one "b"
145 $ cat b # should exist with one "b"
146 b
146 b
147
147
148 hg serve shared clone
148 hg serve shared clone
149
149
150 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid
150 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid
151 $ cat hg.pid >> $DAEMON_PIDS
151 $ cat hg.pid >> $DAEMON_PIDS
152 $ get-with-headers.py localhost:$HGPORT 'raw-file/'
152 $ get-with-headers.py localhost:$HGPORT 'raw-file/'
153 200 Script output follows
153 200 Script output follows
154
154
155
155
156 -rw-r--r-- 4 a
156 -rw-r--r-- 4 a
157 -rw-r--r-- 2 b
157 -rw-r--r-- 2 b
158
158
159
159
160 Cloning a shared repo via bundle2 results in a non-shared clone
160 Cloning a shared repo via bundle2 results in a non-shared clone
161
161
162 $ cd ..
162 $ cd ..
163 $ hg clone -q --stream ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
163 $ hg clone -q --stream ssh://user@dummy/`pwd`/repo2 cloned-via-bundle2
164 $ hg -R cloned-via-bundle2 debugrequires | grep "shared"
164 $ hg -R cloned-via-bundle2 debugrequires | grep "shared"
165 [1]
165 [1]
166 $ hg id --cwd cloned-via-bundle2 -r tip
166 $ hg id --cwd cloned-via-bundle2 -r tip
167 c2e0ac586386 tip
167 c2e0ac586386 tip
168 $ cd repo2
168 $ cd repo2
169
169
170 test unshare command
170 test unshare command
171
171
172 $ hg unshare
172 $ hg unshare
173 $ test -d .hg/store
173 $ test -d .hg/store
174 $ test -f .hg/sharedpath
174 $ test -f .hg/sharedpath
175 [1]
175 [1]
176 $ grep shared .hg/requires
176 $ grep shared .hg/requires
177 [1]
177 [1]
178 $ hg unshare
178 $ hg unshare
179 abort: this is not a shared repo
179 abort: this is not a shared repo
180 [255]
180 [255]
181
181
182 check that a change does not propagate
182 check that a change does not propagate
183
183
184 $ echo b >> b
184 $ echo b >> b
185 $ hg commit -m'change in unshared'
185 $ hg commit -m'change in unshared'
186 $ cd ../repo1
186 $ cd ../repo1
187 $ hg id -r tip
187 $ hg id -r tip
188 c2e0ac586386 tip
188 c2e0ac586386 tip
189
189
190 $ cd ..
190 $ cd ..
191
191
192
192
193 non largefiles repos won't enable largefiles
193 non largefiles repos won't enable largefiles
194
194
195 $ hg share --config extensions.largefiles= repo2 sharedrepo
195 $ hg share --config extensions.largefiles= repo2 sharedrepo
196 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
196 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
197 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
197 The fsmonitor extension is incompatible with the largefiles extension and has been disabled. (fsmonitor !)
198 updating working directory
198 updating working directory
199 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
199 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
200 $ [ -f sharedrepo/.hg/hgrc ]
200 $ [ -f sharedrepo/.hg/hgrc ]
201 [1]
201 [1]
202
202
203 test shared clones using relative paths work
203 test shared clones using relative paths work
204
204
205 $ mkdir thisdir
205 $ mkdir thisdir
206 $ hg init thisdir/orig
206 $ hg init thisdir/orig
207 $ hg share -U thisdir/orig thisdir/abs
207 $ hg share -U thisdir/orig thisdir/abs
208 $ hg share -U --relative thisdir/abs thisdir/rel
208 $ hg share -U --relative thisdir/abs thisdir/rel
209 $ cat thisdir/rel/.hg/sharedpath
209 $ cat thisdir/rel/.hg/sharedpath
210 ../../orig/.hg (no-eol)
210 ../../orig/.hg (no-eol)
211 $ grep shared thisdir/*/.hg/requires
211 $ grep shared thisdir/*/.hg/requires
212 thisdir/abs/.hg/requires:shared
212 thisdir/abs/.hg/requires:shared
213 thisdir/rel/.hg/requires:relshared
213 thisdir/rel/.hg/requires:relshared
214 thisdir/rel/.hg/requires:shared
214 thisdir/rel/.hg/requires:shared
215
215
216 test that relative shared paths aren't relative to $PWD
216 test that relative shared paths aren't relative to $PWD
217
217
218 $ cd thisdir
218 $ cd thisdir
219 $ hg -R rel root
219 $ hg -R rel root
220 $TESTTMP/thisdir/rel
220 $TESTTMP/thisdir/rel
221 $ cd ..
221 $ cd ..
222
222
223 now test that relative paths really are relative, survive across
223 now test that relative paths really are relative, survive across
224 renames and changes of PWD
224 renames and changes of PWD
225
225
226 $ hg -R thisdir/abs root
226 $ hg -R thisdir/abs root
227 $TESTTMP/thisdir/abs
227 $TESTTMP/thisdir/abs
228 $ hg -R thisdir/rel root
228 $ hg -R thisdir/rel root
229 $TESTTMP/thisdir/rel
229 $TESTTMP/thisdir/rel
230 $ mv thisdir thatdir
230 $ mv thisdir thatdir
231 $ hg -R thatdir/abs root
231 $ hg -R thatdir/abs root
232 abort: .hg/sharedpath points to nonexistent directory $TESTTMP/thisdir/orig/.hg
232 abort: .hg/sharedpath points to nonexistent directory $TESTTMP/thisdir/orig/.hg
233 [255]
233 [255]
234 $ hg -R thatdir/rel root
234 $ hg -R thatdir/rel root
235 $TESTTMP/thatdir/rel
235 $TESTTMP/thatdir/rel
236
236
237 test unshare relshared repo
237 test unshare relshared repo
238
238
239 $ cd thatdir/rel
239 $ cd thatdir/rel
240 $ hg unshare
240 $ hg unshare
241 $ test -d .hg/store
241 $ test -d .hg/store
242 $ test -f .hg/sharedpath
242 $ test -f .hg/sharedpath
243 [1]
243 [1]
244 $ grep shared .hg/requires
244 $ grep shared .hg/requires
245 [1]
245 [1]
246 $ hg unshare
246 $ hg unshare
247 abort: this is not a shared repo
247 abort: this is not a shared repo
248 [255]
248 [255]
249 $ cd ../..
249 $ cd ../..
250
250
251 $ rm -r thatdir
251 $ rm -r thatdir
252
252
253 Demonstrate buggy behavior around requirements validation
253 Demonstrate buggy behavior around requirements validation
254 See comment in localrepo.py:makelocalrepository() for more.
254 See comment in localrepo.py:makelocalrepository() for more.
255
255
256 $ hg init sharenewrequires
256 $ hg init sharenewrequires
257 $ hg share sharenewrequires shareoldrequires
257 $ hg share sharenewrequires shareoldrequires
258 updating working directory
258 updating working directory
259 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
259 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
260
260
261 $ cat >> sharenewrequires/.hg/requires << EOF
261 $ cat >> sharenewrequires/.hg/requires << EOF
262 > missing-requirement
262 > missing-requirement
263 > EOF
263 > EOF
264
264
265 We cannot open the repo with the unknown requirement
265 We cannot open the repo with the unknown requirement
266
266
267 $ hg -R sharenewrequires status
267 $ hg -R sharenewrequires status
268 abort: repository requires features unknown to this Mercurial: missing-requirement
268 abort: repository requires features unknown to this Mercurial: missing-requirement
269 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
269 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
270 [255]
270 [255]
271
271
272 BUG: we don't get the same error when opening the shared repo pointing to it
272 BUG: we don't get the same error when opening the shared repo pointing to it
273
273
274 $ hg -R shareoldrequires status
274 $ hg -R shareoldrequires status
275
275
276 Explicitly kill daemons to let the test exit on Windows
276 Explicitly kill daemons to let the test exit on Windows
277
277
278 $ killdaemons.py
278 $ killdaemons.py
279
279
280 Test sharing a repository which was created with store requirement disable
280 Test sharing a repository which was created with store requirement disable
281
281
282 $ hg init nostore --config format.usestore=false
282 $ hg init nostore --config format.usestore=false
283 ignoring enabled 'format.use-share-safe' config because it is incompatible with disabled 'format.usestore' config (safe !)
283 ignoring enabled 'format.use-share-safe' config because it is incompatible with disabled 'format.usestore' config (safe !)
284 $ hg share nostore sharednostore
284 $ hg share nostore sharednostore
285 abort: cannot create shared repository as source was created with 'format.usestore' config disabled
285 abort: cannot create shared repository as source was created with 'format.usestore' config disabled
286 [255]
286 [255]
287
288 Check that (safe) share can control wc-specific format variant at creation time
289 -------------------------------------------------------------------------------
290
291 #if no-rust
292
293 $ cat << EOF >> $HGRCPATH
294 > [storage]
295 > dirstate-v2.slow-path = allow
296 > EOF
297
298 #endif
299
300 $ hg init repo-safe-d1 --config format.use-share-safe=yes --config format.exp-rc-dirstate-v2=no
301 $ hg debugformat -R repo-safe-d1 | grep dirstate-v2
302 dirstate-v2: no
303
304 $ hg share repo-safe-d1 share-safe-d2 --config format.use-share-safe=yes --config format.exp-rc-dirstate-v2=yes
305 updating working directory
306 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
307 $ hg debugformat -R share-safe-d2 | grep dirstate-v2
308 dirstate-v2: yes
General Comments 0
You need to be logged in to leave comments. Login now