##// END OF EJS Templates
localrepo: purge filecache attribute using there unicode name...
marmoute -
r51812:b3174be5 default
parent child Browse files
Show More
@@ -1,4043 +1,4047 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import re
13 import re
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from concurrent import futures
18 from concurrent import futures
19 from typing import (
19 from typing import (
20 Optional,
20 Optional,
21 )
21 )
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 bin,
25 bin,
26 hex,
26 hex,
27 nullrev,
27 nullrev,
28 sha1nodeconstants,
28 sha1nodeconstants,
29 short,
29 short,
30 )
30 )
31 from .pycompat import (
31 from .pycompat import (
32 delattr,
32 delattr,
33 getattr,
33 getattr,
34 )
34 )
35 from . import (
35 from . import (
36 bookmarks,
36 bookmarks,
37 branchmap,
37 branchmap,
38 bundle2,
38 bundle2,
39 bundlecaches,
39 bundlecaches,
40 changegroup,
40 changegroup,
41 color,
41 color,
42 commit,
42 commit,
43 context,
43 context,
44 dirstate,
44 dirstate,
45 discovery,
45 discovery,
46 encoding,
46 encoding,
47 error,
47 error,
48 exchange,
48 exchange,
49 extensions,
49 extensions,
50 filelog,
50 filelog,
51 hook,
51 hook,
52 lock as lockmod,
52 lock as lockmod,
53 match as matchmod,
53 match as matchmod,
54 mergestate as mergestatemod,
54 mergestate as mergestatemod,
55 mergeutil,
55 mergeutil,
56 namespaces,
56 namespaces,
57 narrowspec,
57 narrowspec,
58 obsolete,
58 obsolete,
59 pathutil,
59 pathutil,
60 phases,
60 phases,
61 policy,
61 policy,
62 pushkey,
62 pushkey,
63 pycompat,
63 pycompat,
64 rcutil,
64 rcutil,
65 repoview,
65 repoview,
66 requirements as requirementsmod,
66 requirements as requirementsmod,
67 revlog,
67 revlog,
68 revset,
68 revset,
69 revsetlang,
69 revsetlang,
70 scmutil,
70 scmutil,
71 sparse,
71 sparse,
72 store as storemod,
72 store as storemod,
73 subrepoutil,
73 subrepoutil,
74 tags as tagsmod,
74 tags as tagsmod,
75 transaction,
75 transaction,
76 txnutil,
76 txnutil,
77 util,
77 util,
78 vfs as vfsmod,
78 vfs as vfsmod,
79 wireprototypes,
79 wireprototypes,
80 )
80 )
81
81
82 from .interfaces import (
82 from .interfaces import (
83 repository,
83 repository,
84 util as interfaceutil,
84 util as interfaceutil,
85 )
85 )
86
86
87 from .utils import (
87 from .utils import (
88 hashutil,
88 hashutil,
89 procutil,
89 procutil,
90 stringutil,
90 stringutil,
91 urlutil,
91 urlutil,
92 )
92 )
93
93
94 from .revlogutils import (
94 from .revlogutils import (
95 concurrency_checker as revlogchecker,
95 concurrency_checker as revlogchecker,
96 constants as revlogconst,
96 constants as revlogconst,
97 sidedata as sidedatamod,
97 sidedata as sidedatamod,
98 )
98 )
99
99
100 release = lockmod.release
100 release = lockmod.release
101 urlerr = util.urlerr
101 urlerr = util.urlerr
102 urlreq = util.urlreq
102 urlreq = util.urlreq
103
103
104 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
104 RE_SKIP_DIRSTATE_ROLLBACK = re.compile(
105 b"^((dirstate|narrowspec.dirstate).*|branch$)"
105 b"^((dirstate|narrowspec.dirstate).*|branch$)"
106 )
106 )
107
107
108 # set of (path, vfs-location) tuples. vfs-location is:
108 # set of (path, vfs-location) tuples. vfs-location is:
109 # - 'plain for vfs relative paths
109 # - 'plain for vfs relative paths
110 # - '' for svfs relative paths
110 # - '' for svfs relative paths
111 _cachedfiles = set()
111 _cachedfiles = set()
112
112
113
113
114 class _basefilecache(scmutil.filecache):
114 class _basefilecache(scmutil.filecache):
115 """All filecache usage on repo are done for logic that should be unfiltered"""
115 """All filecache usage on repo are done for logic that should be unfiltered"""
116
116
117 def __get__(self, repo, type=None):
117 def __get__(self, repo, type=None):
118 if repo is None:
118 if repo is None:
119 return self
119 return self
120 # proxy to unfiltered __dict__ since filtered repo has no entry
120 # proxy to unfiltered __dict__ since filtered repo has no entry
121 unfi = repo.unfiltered()
121 unfi = repo.unfiltered()
122 try:
122 try:
123 return unfi.__dict__[self.sname]
123 return unfi.__dict__[self.sname]
124 except KeyError:
124 except KeyError:
125 pass
125 pass
126 return super(_basefilecache, self).__get__(unfi, type)
126 return super(_basefilecache, self).__get__(unfi, type)
127
127
128 def set(self, repo, value):
128 def set(self, repo, value):
129 return super(_basefilecache, self).set(repo.unfiltered(), value)
129 return super(_basefilecache, self).set(repo.unfiltered(), value)
130
130
131
131
132 class repofilecache(_basefilecache):
132 class repofilecache(_basefilecache):
133 """filecache for files in .hg but outside of .hg/store"""
133 """filecache for files in .hg but outside of .hg/store"""
134
134
135 def __init__(self, *paths):
135 def __init__(self, *paths):
136 super(repofilecache, self).__init__(*paths)
136 super(repofilecache, self).__init__(*paths)
137 for path in paths:
137 for path in paths:
138 _cachedfiles.add((path, b'plain'))
138 _cachedfiles.add((path, b'plain'))
139
139
140 def join(self, obj, fname):
140 def join(self, obj, fname):
141 return obj.vfs.join(fname)
141 return obj.vfs.join(fname)
142
142
143
143
144 class storecache(_basefilecache):
144 class storecache(_basefilecache):
145 """filecache for files in the store"""
145 """filecache for files in the store"""
146
146
147 def __init__(self, *paths):
147 def __init__(self, *paths):
148 super(storecache, self).__init__(*paths)
148 super(storecache, self).__init__(*paths)
149 for path in paths:
149 for path in paths:
150 _cachedfiles.add((path, b''))
150 _cachedfiles.add((path, b''))
151
151
152 def join(self, obj, fname):
152 def join(self, obj, fname):
153 return obj.sjoin(fname)
153 return obj.sjoin(fname)
154
154
155
155
156 class changelogcache(storecache):
156 class changelogcache(storecache):
157 """filecache for the changelog"""
157 """filecache for the changelog"""
158
158
159 def __init__(self):
159 def __init__(self):
160 super(changelogcache, self).__init__()
160 super(changelogcache, self).__init__()
161 _cachedfiles.add((b'00changelog.i', b''))
161 _cachedfiles.add((b'00changelog.i', b''))
162 _cachedfiles.add((b'00changelog.n', b''))
162 _cachedfiles.add((b'00changelog.n', b''))
163
163
164 def tracked_paths(self, obj):
164 def tracked_paths(self, obj):
165 paths = [self.join(obj, b'00changelog.i')]
165 paths = [self.join(obj, b'00changelog.i')]
166 if obj.store.opener.options.get(b'persistent-nodemap', False):
166 if obj.store.opener.options.get(b'persistent-nodemap', False):
167 paths.append(self.join(obj, b'00changelog.n'))
167 paths.append(self.join(obj, b'00changelog.n'))
168 return paths
168 return paths
169
169
170
170
171 class manifestlogcache(storecache):
171 class manifestlogcache(storecache):
172 """filecache for the manifestlog"""
172 """filecache for the manifestlog"""
173
173
174 def __init__(self):
174 def __init__(self):
175 super(manifestlogcache, self).__init__()
175 super(manifestlogcache, self).__init__()
176 _cachedfiles.add((b'00manifest.i', b''))
176 _cachedfiles.add((b'00manifest.i', b''))
177 _cachedfiles.add((b'00manifest.n', b''))
177 _cachedfiles.add((b'00manifest.n', b''))
178
178
179 def tracked_paths(self, obj):
179 def tracked_paths(self, obj):
180 paths = [self.join(obj, b'00manifest.i')]
180 paths = [self.join(obj, b'00manifest.i')]
181 if obj.store.opener.options.get(b'persistent-nodemap', False):
181 if obj.store.opener.options.get(b'persistent-nodemap', False):
182 paths.append(self.join(obj, b'00manifest.n'))
182 paths.append(self.join(obj, b'00manifest.n'))
183 return paths
183 return paths
184
184
185
185
186 class mixedrepostorecache(_basefilecache):
186 class mixedrepostorecache(_basefilecache):
187 """filecache for a mix files in .hg/store and outside"""
187 """filecache for a mix files in .hg/store and outside"""
188
188
189 def __init__(self, *pathsandlocations):
189 def __init__(self, *pathsandlocations):
190 # scmutil.filecache only uses the path for passing back into our
190 # scmutil.filecache only uses the path for passing back into our
191 # join(), so we can safely pass a list of paths and locations
191 # join(), so we can safely pass a list of paths and locations
192 super(mixedrepostorecache, self).__init__(*pathsandlocations)
192 super(mixedrepostorecache, self).__init__(*pathsandlocations)
193 _cachedfiles.update(pathsandlocations)
193 _cachedfiles.update(pathsandlocations)
194
194
195 def join(self, obj, fnameandlocation):
195 def join(self, obj, fnameandlocation):
196 fname, location = fnameandlocation
196 fname, location = fnameandlocation
197 if location == b'plain':
197 if location == b'plain':
198 return obj.vfs.join(fname)
198 return obj.vfs.join(fname)
199 else:
199 else:
200 if location != b'':
200 if location != b'':
201 raise error.ProgrammingError(
201 raise error.ProgrammingError(
202 b'unexpected location: %s' % location
202 b'unexpected location: %s' % location
203 )
203 )
204 return obj.sjoin(fname)
204 return obj.sjoin(fname)
205
205
206
206
207 def isfilecached(repo, name):
207 def isfilecached(repo, name):
208 """check if a repo has already cached "name" filecache-ed property
208 """check if a repo has already cached "name" filecache-ed property
209
209
210 This returns (cachedobj-or-None, iscached) tuple.
210 This returns (cachedobj-or-None, iscached) tuple.
211 """
211 """
212 cacheentry = repo.unfiltered()._filecache.get(name, None)
212 cacheentry = repo.unfiltered()._filecache.get(name, None)
213 if not cacheentry:
213 if not cacheentry:
214 return None, False
214 return None, False
215 return cacheentry.obj, True
215 return cacheentry.obj, True
216
216
217
217
218 class unfilteredpropertycache(util.propertycache):
218 class unfilteredpropertycache(util.propertycache):
219 """propertycache that apply to unfiltered repo only"""
219 """propertycache that apply to unfiltered repo only"""
220
220
221 def __get__(self, repo, type=None):
221 def __get__(self, repo, type=None):
222 unfi = repo.unfiltered()
222 unfi = repo.unfiltered()
223 if unfi is repo:
223 if unfi is repo:
224 return super(unfilteredpropertycache, self).__get__(unfi)
224 return super(unfilteredpropertycache, self).__get__(unfi)
225 return getattr(unfi, self.name)
225 return getattr(unfi, self.name)
226
226
227
227
228 class filteredpropertycache(util.propertycache):
228 class filteredpropertycache(util.propertycache):
229 """propertycache that must take filtering in account"""
229 """propertycache that must take filtering in account"""
230
230
231 def cachevalue(self, obj, value):
231 def cachevalue(self, obj, value):
232 object.__setattr__(obj, self.name, value)
232 object.__setattr__(obj, self.name, value)
233
233
234
234
235 def hasunfilteredcache(repo, name):
235 def hasunfilteredcache(repo, name):
236 """check if a repo has an unfilteredpropertycache value for <name>"""
236 """check if a repo has an unfilteredpropertycache value for <name>"""
237 return name in vars(repo.unfiltered())
237 return name in vars(repo.unfiltered())
238
238
239
239
240 def unfilteredmethod(orig):
240 def unfilteredmethod(orig):
241 """decorate method that always need to be run on unfiltered version"""
241 """decorate method that always need to be run on unfiltered version"""
242
242
243 @functools.wraps(orig)
243 @functools.wraps(orig)
244 def wrapper(repo, *args, **kwargs):
244 def wrapper(repo, *args, **kwargs):
245 return orig(repo.unfiltered(), *args, **kwargs)
245 return orig(repo.unfiltered(), *args, **kwargs)
246
246
247 return wrapper
247 return wrapper
248
248
249
249
250 moderncaps = {
250 moderncaps = {
251 b'lookup',
251 b'lookup',
252 b'branchmap',
252 b'branchmap',
253 b'pushkey',
253 b'pushkey',
254 b'known',
254 b'known',
255 b'getbundle',
255 b'getbundle',
256 b'unbundle',
256 b'unbundle',
257 }
257 }
258 legacycaps = moderncaps.union({b'changegroupsubset'})
258 legacycaps = moderncaps.union({b'changegroupsubset'})
259
259
260
260
261 @interfaceutil.implementer(repository.ipeercommandexecutor)
261 @interfaceutil.implementer(repository.ipeercommandexecutor)
262 class localcommandexecutor:
262 class localcommandexecutor:
263 def __init__(self, peer):
263 def __init__(self, peer):
264 self._peer = peer
264 self._peer = peer
265 self._sent = False
265 self._sent = False
266 self._closed = False
266 self._closed = False
267
267
268 def __enter__(self):
268 def __enter__(self):
269 return self
269 return self
270
270
271 def __exit__(self, exctype, excvalue, exctb):
271 def __exit__(self, exctype, excvalue, exctb):
272 self.close()
272 self.close()
273
273
274 def callcommand(self, command, args):
274 def callcommand(self, command, args):
275 if self._sent:
275 if self._sent:
276 raise error.ProgrammingError(
276 raise error.ProgrammingError(
277 b'callcommand() cannot be used after sendcommands()'
277 b'callcommand() cannot be used after sendcommands()'
278 )
278 )
279
279
280 if self._closed:
280 if self._closed:
281 raise error.ProgrammingError(
281 raise error.ProgrammingError(
282 b'callcommand() cannot be used after close()'
282 b'callcommand() cannot be used after close()'
283 )
283 )
284
284
285 # We don't need to support anything fancy. Just call the named
285 # We don't need to support anything fancy. Just call the named
286 # method on the peer and return a resolved future.
286 # method on the peer and return a resolved future.
287 fn = getattr(self._peer, pycompat.sysstr(command))
287 fn = getattr(self._peer, pycompat.sysstr(command))
288
288
289 f = futures.Future()
289 f = futures.Future()
290
290
291 try:
291 try:
292 result = fn(**pycompat.strkwargs(args))
292 result = fn(**pycompat.strkwargs(args))
293 except Exception:
293 except Exception:
294 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
294 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
295 else:
295 else:
296 f.set_result(result)
296 f.set_result(result)
297
297
298 return f
298 return f
299
299
300 def sendcommands(self):
300 def sendcommands(self):
301 self._sent = True
301 self._sent = True
302
302
303 def close(self):
303 def close(self):
304 self._closed = True
304 self._closed = True
305
305
306
306
307 @interfaceutil.implementer(repository.ipeercommands)
307 @interfaceutil.implementer(repository.ipeercommands)
308 class localpeer(repository.peer):
308 class localpeer(repository.peer):
309 '''peer for a local repo; reflects only the most recent API'''
309 '''peer for a local repo; reflects only the most recent API'''
310
310
311 def __init__(self, repo, caps=None, path=None, remotehidden=False):
311 def __init__(self, repo, caps=None, path=None, remotehidden=False):
312 super(localpeer, self).__init__(
312 super(localpeer, self).__init__(
313 repo.ui, path=path, remotehidden=remotehidden
313 repo.ui, path=path, remotehidden=remotehidden
314 )
314 )
315
315
316 if caps is None:
316 if caps is None:
317 caps = moderncaps.copy()
317 caps = moderncaps.copy()
318 if remotehidden:
318 if remotehidden:
319 self._repo = repo.filtered(b'served.hidden')
319 self._repo = repo.filtered(b'served.hidden')
320 else:
320 else:
321 self._repo = repo.filtered(b'served')
321 self._repo = repo.filtered(b'served')
322 if repo._wanted_sidedata:
322 if repo._wanted_sidedata:
323 formatted = bundle2.format_remote_wanted_sidedata(repo)
323 formatted = bundle2.format_remote_wanted_sidedata(repo)
324 caps.add(b'exp-wanted-sidedata=' + formatted)
324 caps.add(b'exp-wanted-sidedata=' + formatted)
325
325
326 self._caps = repo._restrictcapabilities(caps)
326 self._caps = repo._restrictcapabilities(caps)
327
327
328 # Begin of _basepeer interface.
328 # Begin of _basepeer interface.
329
329
330 def url(self):
330 def url(self):
331 return self._repo.url()
331 return self._repo.url()
332
332
333 def local(self):
333 def local(self):
334 return self._repo
334 return self._repo
335
335
336 def canpush(self):
336 def canpush(self):
337 return True
337 return True
338
338
339 def close(self):
339 def close(self):
340 self._repo.close()
340 self._repo.close()
341
341
342 # End of _basepeer interface.
342 # End of _basepeer interface.
343
343
344 # Begin of _basewirecommands interface.
344 # Begin of _basewirecommands interface.
345
345
346 def branchmap(self):
346 def branchmap(self):
347 return self._repo.branchmap()
347 return self._repo.branchmap()
348
348
349 def capabilities(self):
349 def capabilities(self):
350 return self._caps
350 return self._caps
351
351
352 def get_cached_bundle_inline(self, path):
352 def get_cached_bundle_inline(self, path):
353 # not needed with local peer
353 # not needed with local peer
354 raise NotImplementedError
354 raise NotImplementedError
355
355
356 def clonebundles(self):
356 def clonebundles(self):
357 return bundlecaches.get_manifest(self._repo)
357 return bundlecaches.get_manifest(self._repo)
358
358
359 def debugwireargs(self, one, two, three=None, four=None, five=None):
359 def debugwireargs(self, one, two, three=None, four=None, five=None):
360 """Used to test argument passing over the wire"""
360 """Used to test argument passing over the wire"""
361 return b"%s %s %s %s %s" % (
361 return b"%s %s %s %s %s" % (
362 one,
362 one,
363 two,
363 two,
364 pycompat.bytestr(three),
364 pycompat.bytestr(three),
365 pycompat.bytestr(four),
365 pycompat.bytestr(four),
366 pycompat.bytestr(five),
366 pycompat.bytestr(five),
367 )
367 )
368
368
369 def getbundle(
369 def getbundle(
370 self,
370 self,
371 source,
371 source,
372 heads=None,
372 heads=None,
373 common=None,
373 common=None,
374 bundlecaps=None,
374 bundlecaps=None,
375 remote_sidedata=None,
375 remote_sidedata=None,
376 **kwargs
376 **kwargs
377 ):
377 ):
378 chunks = exchange.getbundlechunks(
378 chunks = exchange.getbundlechunks(
379 self._repo,
379 self._repo,
380 source,
380 source,
381 heads=heads,
381 heads=heads,
382 common=common,
382 common=common,
383 bundlecaps=bundlecaps,
383 bundlecaps=bundlecaps,
384 remote_sidedata=remote_sidedata,
384 remote_sidedata=remote_sidedata,
385 **kwargs
385 **kwargs
386 )[1]
386 )[1]
387 cb = util.chunkbuffer(chunks)
387 cb = util.chunkbuffer(chunks)
388
388
389 if exchange.bundle2requested(bundlecaps):
389 if exchange.bundle2requested(bundlecaps):
390 # When requesting a bundle2, getbundle returns a stream to make the
390 # When requesting a bundle2, getbundle returns a stream to make the
391 # wire level function happier. We need to build a proper object
391 # wire level function happier. We need to build a proper object
392 # from it in local peer.
392 # from it in local peer.
393 return bundle2.getunbundler(self.ui, cb)
393 return bundle2.getunbundler(self.ui, cb)
394 else:
394 else:
395 return changegroup.getunbundler(b'01', cb, None)
395 return changegroup.getunbundler(b'01', cb, None)
396
396
397 def heads(self):
397 def heads(self):
398 return self._repo.heads()
398 return self._repo.heads()
399
399
400 def known(self, nodes):
400 def known(self, nodes):
401 return self._repo.known(nodes)
401 return self._repo.known(nodes)
402
402
403 def listkeys(self, namespace):
403 def listkeys(self, namespace):
404 return self._repo.listkeys(namespace)
404 return self._repo.listkeys(namespace)
405
405
406 def lookup(self, key):
406 def lookup(self, key):
407 return self._repo.lookup(key)
407 return self._repo.lookup(key)
408
408
409 def pushkey(self, namespace, key, old, new):
409 def pushkey(self, namespace, key, old, new):
410 return self._repo.pushkey(namespace, key, old, new)
410 return self._repo.pushkey(namespace, key, old, new)
411
411
412 def stream_out(self):
412 def stream_out(self):
413 raise error.Abort(_(b'cannot perform stream clone against local peer'))
413 raise error.Abort(_(b'cannot perform stream clone against local peer'))
414
414
415 def unbundle(self, bundle, heads, url):
415 def unbundle(self, bundle, heads, url):
416 """apply a bundle on a repo
416 """apply a bundle on a repo
417
417
418 This function handles the repo locking itself."""
418 This function handles the repo locking itself."""
419 try:
419 try:
420 try:
420 try:
421 bundle = exchange.readbundle(self.ui, bundle, None)
421 bundle = exchange.readbundle(self.ui, bundle, None)
422 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
422 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
423 if util.safehasattr(ret, 'getchunks'):
423 if util.safehasattr(ret, 'getchunks'):
424 # This is a bundle20 object, turn it into an unbundler.
424 # This is a bundle20 object, turn it into an unbundler.
425 # This little dance should be dropped eventually when the
425 # This little dance should be dropped eventually when the
426 # API is finally improved.
426 # API is finally improved.
427 stream = util.chunkbuffer(ret.getchunks())
427 stream = util.chunkbuffer(ret.getchunks())
428 ret = bundle2.getunbundler(self.ui, stream)
428 ret = bundle2.getunbundler(self.ui, stream)
429 return ret
429 return ret
430 except Exception as exc:
430 except Exception as exc:
431 # If the exception contains output salvaged from a bundle2
431 # If the exception contains output salvaged from a bundle2
432 # reply, we need to make sure it is printed before continuing
432 # reply, we need to make sure it is printed before continuing
433 # to fail. So we build a bundle2 with such output and consume
433 # to fail. So we build a bundle2 with such output and consume
434 # it directly.
434 # it directly.
435 #
435 #
436 # This is not very elegant but allows a "simple" solution for
436 # This is not very elegant but allows a "simple" solution for
437 # issue4594
437 # issue4594
438 output = getattr(exc, '_bundle2salvagedoutput', ())
438 output = getattr(exc, '_bundle2salvagedoutput', ())
439 if output:
439 if output:
440 bundler = bundle2.bundle20(self._repo.ui)
440 bundler = bundle2.bundle20(self._repo.ui)
441 for out in output:
441 for out in output:
442 bundler.addpart(out)
442 bundler.addpart(out)
443 stream = util.chunkbuffer(bundler.getchunks())
443 stream = util.chunkbuffer(bundler.getchunks())
444 b = bundle2.getunbundler(self.ui, stream)
444 b = bundle2.getunbundler(self.ui, stream)
445 bundle2.processbundle(self._repo, b)
445 bundle2.processbundle(self._repo, b)
446 raise
446 raise
447 except error.PushRaced as exc:
447 except error.PushRaced as exc:
448 raise error.ResponseError(
448 raise error.ResponseError(
449 _(b'push failed:'), stringutil.forcebytestr(exc)
449 _(b'push failed:'), stringutil.forcebytestr(exc)
450 )
450 )
451
451
452 # End of _basewirecommands interface.
452 # End of _basewirecommands interface.
453
453
454 # Begin of peer interface.
454 # Begin of peer interface.
455
455
456 def commandexecutor(self):
456 def commandexecutor(self):
457 return localcommandexecutor(self)
457 return localcommandexecutor(self)
458
458
459 # End of peer interface.
459 # End of peer interface.
460
460
461
461
462 @interfaceutil.implementer(repository.ipeerlegacycommands)
462 @interfaceutil.implementer(repository.ipeerlegacycommands)
463 class locallegacypeer(localpeer):
463 class locallegacypeer(localpeer):
464 """peer extension which implements legacy methods too; used for tests with
464 """peer extension which implements legacy methods too; used for tests with
465 restricted capabilities"""
465 restricted capabilities"""
466
466
467 def __init__(self, repo, path=None, remotehidden=False):
467 def __init__(self, repo, path=None, remotehidden=False):
468 super(locallegacypeer, self).__init__(
468 super(locallegacypeer, self).__init__(
469 repo, caps=legacycaps, path=path, remotehidden=remotehidden
469 repo, caps=legacycaps, path=path, remotehidden=remotehidden
470 )
470 )
471
471
472 # Begin of baselegacywirecommands interface.
472 # Begin of baselegacywirecommands interface.
473
473
474 def between(self, pairs):
474 def between(self, pairs):
475 return self._repo.between(pairs)
475 return self._repo.between(pairs)
476
476
477 def branches(self, nodes):
477 def branches(self, nodes):
478 return self._repo.branches(nodes)
478 return self._repo.branches(nodes)
479
479
480 def changegroup(self, nodes, source):
480 def changegroup(self, nodes, source):
481 outgoing = discovery.outgoing(
481 outgoing = discovery.outgoing(
482 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
482 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
483 )
483 )
484 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
484 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
485
485
486 def changegroupsubset(self, bases, heads, source):
486 def changegroupsubset(self, bases, heads, source):
487 outgoing = discovery.outgoing(
487 outgoing = discovery.outgoing(
488 self._repo, missingroots=bases, ancestorsof=heads
488 self._repo, missingroots=bases, ancestorsof=heads
489 )
489 )
490 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
490 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
491
491
492 # End of baselegacywirecommands interface.
492 # End of baselegacywirecommands interface.
493
493
494
494
495 # Functions receiving (ui, features) that extensions can register to impact
495 # Functions receiving (ui, features) that extensions can register to impact
496 # the ability to load repositories with custom requirements. Only
496 # the ability to load repositories with custom requirements. Only
497 # functions defined in loaded extensions are called.
497 # functions defined in loaded extensions are called.
498 #
498 #
499 # The function receives a set of requirement strings that the repository
499 # The function receives a set of requirement strings that the repository
500 # is capable of opening. Functions will typically add elements to the
500 # is capable of opening. Functions will typically add elements to the
501 # set to reflect that the extension knows how to handle that requirements.
501 # set to reflect that the extension knows how to handle that requirements.
502 featuresetupfuncs = set()
502 featuresetupfuncs = set()
503
503
504
504
505 def _getsharedvfs(hgvfs, requirements):
505 def _getsharedvfs(hgvfs, requirements):
506 """returns the vfs object pointing to root of shared source
506 """returns the vfs object pointing to root of shared source
507 repo for a shared repository
507 repo for a shared repository
508
508
509 hgvfs is vfs pointing at .hg/ of current repo (shared one)
509 hgvfs is vfs pointing at .hg/ of current repo (shared one)
510 requirements is a set of requirements of current repo (shared one)
510 requirements is a set of requirements of current repo (shared one)
511 """
511 """
512 # The ``shared`` or ``relshared`` requirements indicate the
512 # The ``shared`` or ``relshared`` requirements indicate the
513 # store lives in the path contained in the ``.hg/sharedpath`` file.
513 # store lives in the path contained in the ``.hg/sharedpath`` file.
514 # This is an absolute path for ``shared`` and relative to
514 # This is an absolute path for ``shared`` and relative to
515 # ``.hg/`` for ``relshared``.
515 # ``.hg/`` for ``relshared``.
516 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
516 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
517 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
517 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
518 sharedpath = util.normpath(hgvfs.join(sharedpath))
518 sharedpath = util.normpath(hgvfs.join(sharedpath))
519
519
520 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
520 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
521
521
522 if not sharedvfs.exists():
522 if not sharedvfs.exists():
523 raise error.RepoError(
523 raise error.RepoError(
524 _(b'.hg/sharedpath points to nonexistent directory %s')
524 _(b'.hg/sharedpath points to nonexistent directory %s')
525 % sharedvfs.base
525 % sharedvfs.base
526 )
526 )
527 return sharedvfs
527 return sharedvfs
528
528
529
529
530 def _readrequires(vfs, allowmissing):
530 def _readrequires(vfs, allowmissing):
531 """reads the require file present at root of this vfs
531 """reads the require file present at root of this vfs
532 and return a set of requirements
532 and return a set of requirements
533
533
534 If allowmissing is True, we suppress FileNotFoundError if raised"""
534 If allowmissing is True, we suppress FileNotFoundError if raised"""
535 # requires file contains a newline-delimited list of
535 # requires file contains a newline-delimited list of
536 # features/capabilities the opener (us) must have in order to use
536 # features/capabilities the opener (us) must have in order to use
537 # the repository. This file was introduced in Mercurial 0.9.2,
537 # the repository. This file was introduced in Mercurial 0.9.2,
538 # which means very old repositories may not have one. We assume
538 # which means very old repositories may not have one. We assume
539 # a missing file translates to no requirements.
539 # a missing file translates to no requirements.
540 read = vfs.tryread if allowmissing else vfs.read
540 read = vfs.tryread if allowmissing else vfs.read
541 return set(read(b'requires').splitlines())
541 return set(read(b'requires').splitlines())
542
542
543
543
544 def makelocalrepository(baseui, path: bytes, intents=None):
544 def makelocalrepository(baseui, path: bytes, intents=None):
545 """Create a local repository object.
545 """Create a local repository object.
546
546
547 Given arguments needed to construct a local repository, this function
547 Given arguments needed to construct a local repository, this function
548 performs various early repository loading functionality (such as
548 performs various early repository loading functionality (such as
549 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
549 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
550 the repository can be opened, derives a type suitable for representing
550 the repository can be opened, derives a type suitable for representing
551 that repository, and returns an instance of it.
551 that repository, and returns an instance of it.
552
552
553 The returned object conforms to the ``repository.completelocalrepository``
553 The returned object conforms to the ``repository.completelocalrepository``
554 interface.
554 interface.
555
555
556 The repository type is derived by calling a series of factory functions
556 The repository type is derived by calling a series of factory functions
557 for each aspect/interface of the final repository. These are defined by
557 for each aspect/interface of the final repository. These are defined by
558 ``REPO_INTERFACES``.
558 ``REPO_INTERFACES``.
559
559
560 Each factory function is called to produce a type implementing a specific
560 Each factory function is called to produce a type implementing a specific
561 interface. The cumulative list of returned types will be combined into a
561 interface. The cumulative list of returned types will be combined into a
562 new type and that type will be instantiated to represent the local
562 new type and that type will be instantiated to represent the local
563 repository.
563 repository.
564
564
565 The factory functions each receive various state that may be consulted
565 The factory functions each receive various state that may be consulted
566 as part of deriving a type.
566 as part of deriving a type.
567
567
568 Extensions should wrap these factory functions to customize repository type
568 Extensions should wrap these factory functions to customize repository type
569 creation. Note that an extension's wrapped function may be called even if
569 creation. Note that an extension's wrapped function may be called even if
570 that extension is not loaded for the repo being constructed. Extensions
570 that extension is not loaded for the repo being constructed. Extensions
571 should check if their ``__name__`` appears in the
571 should check if their ``__name__`` appears in the
572 ``extensionmodulenames`` set passed to the factory function and no-op if
572 ``extensionmodulenames`` set passed to the factory function and no-op if
573 not.
573 not.
574 """
574 """
575 ui = baseui.copy()
575 ui = baseui.copy()
576 # Prevent copying repo configuration.
576 # Prevent copying repo configuration.
577 ui.copy = baseui.copy
577 ui.copy = baseui.copy
578
578
579 # Working directory VFS rooted at repository root.
579 # Working directory VFS rooted at repository root.
580 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
580 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
581
581
582 # Main VFS for .hg/ directory.
582 # Main VFS for .hg/ directory.
583 hgpath = wdirvfs.join(b'.hg')
583 hgpath = wdirvfs.join(b'.hg')
584 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
584 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
585 # Whether this repository is shared one or not
585 # Whether this repository is shared one or not
586 shared = False
586 shared = False
587 # If this repository is shared, vfs pointing to shared repo
587 # If this repository is shared, vfs pointing to shared repo
588 sharedvfs = None
588 sharedvfs = None
589
589
590 # The .hg/ path should exist and should be a directory. All other
590 # The .hg/ path should exist and should be a directory. All other
591 # cases are errors.
591 # cases are errors.
592 if not hgvfs.isdir():
592 if not hgvfs.isdir():
593 try:
593 try:
594 hgvfs.stat()
594 hgvfs.stat()
595 except FileNotFoundError:
595 except FileNotFoundError:
596 pass
596 pass
597 except ValueError as e:
597 except ValueError as e:
598 # Can be raised on Python 3.8 when path is invalid.
598 # Can be raised on Python 3.8 when path is invalid.
599 raise error.Abort(
599 raise error.Abort(
600 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
600 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
601 )
601 )
602
602
603 raise error.RepoError(_(b'repository %s not found') % path)
603 raise error.RepoError(_(b'repository %s not found') % path)
604
604
605 requirements = _readrequires(hgvfs, True)
605 requirements = _readrequires(hgvfs, True)
606 shared = (
606 shared = (
607 requirementsmod.SHARED_REQUIREMENT in requirements
607 requirementsmod.SHARED_REQUIREMENT in requirements
608 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
608 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
609 )
609 )
610 storevfs = None
610 storevfs = None
611 if shared:
611 if shared:
612 # This is a shared repo
612 # This is a shared repo
613 sharedvfs = _getsharedvfs(hgvfs, requirements)
613 sharedvfs = _getsharedvfs(hgvfs, requirements)
614 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
614 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
615 else:
615 else:
616 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
616 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
617
617
618 # if .hg/requires contains the sharesafe requirement, it means
618 # if .hg/requires contains the sharesafe requirement, it means
619 # there exists a `.hg/store/requires` too and we should read it
619 # there exists a `.hg/store/requires` too and we should read it
620 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
620 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
621 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
621 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
622 # is not present, refer checkrequirementscompat() for that
622 # is not present, refer checkrequirementscompat() for that
623 #
623 #
624 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
624 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
625 # repository was shared the old way. We check the share source .hg/requires
625 # repository was shared the old way. We check the share source .hg/requires
626 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
626 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
627 # to be reshared
627 # to be reshared
628 hint = _(b"see `hg help config.format.use-share-safe` for more information")
628 hint = _(b"see `hg help config.format.use-share-safe` for more information")
629 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
629 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
630 if (
630 if (
631 shared
631 shared
632 and requirementsmod.SHARESAFE_REQUIREMENT
632 and requirementsmod.SHARESAFE_REQUIREMENT
633 not in _readrequires(sharedvfs, True)
633 not in _readrequires(sharedvfs, True)
634 ):
634 ):
635 mismatch_warn = ui.configbool(
635 mismatch_warn = ui.configbool(
636 b'share', b'safe-mismatch.source-not-safe.warn'
636 b'share', b'safe-mismatch.source-not-safe.warn'
637 )
637 )
638 mismatch_config = ui.config(
638 mismatch_config = ui.config(
639 b'share', b'safe-mismatch.source-not-safe'
639 b'share', b'safe-mismatch.source-not-safe'
640 )
640 )
641 mismatch_verbose_upgrade = ui.configbool(
641 mismatch_verbose_upgrade = ui.configbool(
642 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
642 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
643 )
643 )
644 if mismatch_config in (
644 if mismatch_config in (
645 b'downgrade-allow',
645 b'downgrade-allow',
646 b'allow',
646 b'allow',
647 b'downgrade-abort',
647 b'downgrade-abort',
648 ):
648 ):
649 # prevent cyclic import localrepo -> upgrade -> localrepo
649 # prevent cyclic import localrepo -> upgrade -> localrepo
650 from . import upgrade
650 from . import upgrade
651
651
652 upgrade.downgrade_share_to_non_safe(
652 upgrade.downgrade_share_to_non_safe(
653 ui,
653 ui,
654 hgvfs,
654 hgvfs,
655 sharedvfs,
655 sharedvfs,
656 requirements,
656 requirements,
657 mismatch_config,
657 mismatch_config,
658 mismatch_warn,
658 mismatch_warn,
659 mismatch_verbose_upgrade,
659 mismatch_verbose_upgrade,
660 )
660 )
661 elif mismatch_config == b'abort':
661 elif mismatch_config == b'abort':
662 raise error.Abort(
662 raise error.Abort(
663 _(b"share source does not support share-safe requirement"),
663 _(b"share source does not support share-safe requirement"),
664 hint=hint,
664 hint=hint,
665 )
665 )
666 else:
666 else:
667 raise error.Abort(
667 raise error.Abort(
668 _(
668 _(
669 b"share-safe mismatch with source.\nUnrecognized"
669 b"share-safe mismatch with source.\nUnrecognized"
670 b" value '%s' of `share.safe-mismatch.source-not-safe`"
670 b" value '%s' of `share.safe-mismatch.source-not-safe`"
671 b" set."
671 b" set."
672 )
672 )
673 % mismatch_config,
673 % mismatch_config,
674 hint=hint,
674 hint=hint,
675 )
675 )
676 else:
676 else:
677 requirements |= _readrequires(storevfs, False)
677 requirements |= _readrequires(storevfs, False)
678 elif shared:
678 elif shared:
679 sourcerequires = _readrequires(sharedvfs, False)
679 sourcerequires = _readrequires(sharedvfs, False)
680 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
680 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
681 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
681 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
682 mismatch_warn = ui.configbool(
682 mismatch_warn = ui.configbool(
683 b'share', b'safe-mismatch.source-safe.warn'
683 b'share', b'safe-mismatch.source-safe.warn'
684 )
684 )
685 mismatch_verbose_upgrade = ui.configbool(
685 mismatch_verbose_upgrade = ui.configbool(
686 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
686 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
687 )
687 )
688 if mismatch_config in (
688 if mismatch_config in (
689 b'upgrade-allow',
689 b'upgrade-allow',
690 b'allow',
690 b'allow',
691 b'upgrade-abort',
691 b'upgrade-abort',
692 ):
692 ):
693 # prevent cyclic import localrepo -> upgrade -> localrepo
693 # prevent cyclic import localrepo -> upgrade -> localrepo
694 from . import upgrade
694 from . import upgrade
695
695
696 upgrade.upgrade_share_to_safe(
696 upgrade.upgrade_share_to_safe(
697 ui,
697 ui,
698 hgvfs,
698 hgvfs,
699 storevfs,
699 storevfs,
700 requirements,
700 requirements,
701 mismatch_config,
701 mismatch_config,
702 mismatch_warn,
702 mismatch_warn,
703 mismatch_verbose_upgrade,
703 mismatch_verbose_upgrade,
704 )
704 )
705 elif mismatch_config == b'abort':
705 elif mismatch_config == b'abort':
706 raise error.Abort(
706 raise error.Abort(
707 _(
707 _(
708 b'version mismatch: source uses share-safe'
708 b'version mismatch: source uses share-safe'
709 b' functionality while the current share does not'
709 b' functionality while the current share does not'
710 ),
710 ),
711 hint=hint,
711 hint=hint,
712 )
712 )
713 else:
713 else:
714 raise error.Abort(
714 raise error.Abort(
715 _(
715 _(
716 b"share-safe mismatch with source.\nUnrecognized"
716 b"share-safe mismatch with source.\nUnrecognized"
717 b" value '%s' of `share.safe-mismatch.source-safe` set."
717 b" value '%s' of `share.safe-mismatch.source-safe` set."
718 )
718 )
719 % mismatch_config,
719 % mismatch_config,
720 hint=hint,
720 hint=hint,
721 )
721 )
722
722
723 # The .hg/hgrc file may load extensions or contain config options
723 # The .hg/hgrc file may load extensions or contain config options
724 # that influence repository construction. Attempt to load it and
724 # that influence repository construction. Attempt to load it and
725 # process any new extensions that it may have pulled in.
725 # process any new extensions that it may have pulled in.
726 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
726 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
727 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
727 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
728 extensions.loadall(ui)
728 extensions.loadall(ui)
729 extensions.populateui(ui)
729 extensions.populateui(ui)
730
730
731 # Set of module names of extensions loaded for this repository.
731 # Set of module names of extensions loaded for this repository.
732 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
732 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
733
733
734 supportedrequirements = gathersupportedrequirements(ui)
734 supportedrequirements = gathersupportedrequirements(ui)
735
735
736 # We first validate the requirements are known.
736 # We first validate the requirements are known.
737 ensurerequirementsrecognized(requirements, supportedrequirements)
737 ensurerequirementsrecognized(requirements, supportedrequirements)
738
738
739 # Then we validate that the known set is reasonable to use together.
739 # Then we validate that the known set is reasonable to use together.
740 ensurerequirementscompatible(ui, requirements)
740 ensurerequirementscompatible(ui, requirements)
741
741
742 # TODO there are unhandled edge cases related to opening repositories with
742 # TODO there are unhandled edge cases related to opening repositories with
743 # shared storage. If storage is shared, we should also test for requirements
743 # shared storage. If storage is shared, we should also test for requirements
744 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
744 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
745 # that repo, as that repo may load extensions needed to open it. This is a
745 # that repo, as that repo may load extensions needed to open it. This is a
746 # bit complicated because we don't want the other hgrc to overwrite settings
746 # bit complicated because we don't want the other hgrc to overwrite settings
747 # in this hgrc.
747 # in this hgrc.
748 #
748 #
749 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
749 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
750 # file when sharing repos. But if a requirement is added after the share is
750 # file when sharing repos. But if a requirement is added after the share is
751 # performed, thereby introducing a new requirement for the opener, we may
751 # performed, thereby introducing a new requirement for the opener, we may
752 # will not see that and could encounter a run-time error interacting with
752 # will not see that and could encounter a run-time error interacting with
753 # that shared store since it has an unknown-to-us requirement.
753 # that shared store since it has an unknown-to-us requirement.
754
754
755 # At this point, we know we should be capable of opening the repository.
755 # At this point, we know we should be capable of opening the repository.
756 # Now get on with doing that.
756 # Now get on with doing that.
757
757
758 features = set()
758 features = set()
759
759
760 # The "store" part of the repository holds versioned data. How it is
760 # The "store" part of the repository holds versioned data. How it is
761 # accessed is determined by various requirements. If `shared` or
761 # accessed is determined by various requirements. If `shared` or
762 # `relshared` requirements are present, this indicates current repository
762 # `relshared` requirements are present, this indicates current repository
763 # is a share and store exists in path mentioned in `.hg/sharedpath`
763 # is a share and store exists in path mentioned in `.hg/sharedpath`
764 if shared:
764 if shared:
765 storebasepath = sharedvfs.base
765 storebasepath = sharedvfs.base
766 cachepath = sharedvfs.join(b'cache')
766 cachepath = sharedvfs.join(b'cache')
767 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
767 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
768 else:
768 else:
769 storebasepath = hgvfs.base
769 storebasepath = hgvfs.base
770 cachepath = hgvfs.join(b'cache')
770 cachepath = hgvfs.join(b'cache')
771 wcachepath = hgvfs.join(b'wcache')
771 wcachepath = hgvfs.join(b'wcache')
772
772
773 # The store has changed over time and the exact layout is dictated by
773 # The store has changed over time and the exact layout is dictated by
774 # requirements. The store interface abstracts differences across all
774 # requirements. The store interface abstracts differences across all
775 # of them.
775 # of them.
776 store = makestore(
776 store = makestore(
777 requirements,
777 requirements,
778 storebasepath,
778 storebasepath,
779 lambda base: vfsmod.vfs(base, cacheaudited=True),
779 lambda base: vfsmod.vfs(base, cacheaudited=True),
780 )
780 )
781 hgvfs.createmode = store.createmode
781 hgvfs.createmode = store.createmode
782
782
783 storevfs = store.vfs
783 storevfs = store.vfs
784 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
784 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
785
785
786 if (
786 if (
787 requirementsmod.REVLOGV2_REQUIREMENT in requirements
787 requirementsmod.REVLOGV2_REQUIREMENT in requirements
788 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
788 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
789 ):
789 ):
790 features.add(repository.REPO_FEATURE_SIDE_DATA)
790 features.add(repository.REPO_FEATURE_SIDE_DATA)
791 # the revlogv2 docket introduced race condition that we need to fix
791 # the revlogv2 docket introduced race condition that we need to fix
792 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
792 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
793
793
794 # The cache vfs is used to manage cache files.
794 # The cache vfs is used to manage cache files.
795 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
795 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
796 cachevfs.createmode = store.createmode
796 cachevfs.createmode = store.createmode
797 # The cache vfs is used to manage cache files related to the working copy
797 # The cache vfs is used to manage cache files related to the working copy
798 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
798 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
799 wcachevfs.createmode = store.createmode
799 wcachevfs.createmode = store.createmode
800
800
801 # Now resolve the type for the repository object. We do this by repeatedly
801 # Now resolve the type for the repository object. We do this by repeatedly
802 # calling a factory function to produces types for specific aspects of the
802 # calling a factory function to produces types for specific aspects of the
803 # repo's operation. The aggregate returned types are used as base classes
803 # repo's operation. The aggregate returned types are used as base classes
804 # for a dynamically-derived type, which will represent our new repository.
804 # for a dynamically-derived type, which will represent our new repository.
805
805
806 bases = []
806 bases = []
807 extrastate = {}
807 extrastate = {}
808
808
809 for iface, fn in REPO_INTERFACES:
809 for iface, fn in REPO_INTERFACES:
810 # We pass all potentially useful state to give extensions tons of
810 # We pass all potentially useful state to give extensions tons of
811 # flexibility.
811 # flexibility.
812 typ = fn()(
812 typ = fn()(
813 ui=ui,
813 ui=ui,
814 intents=intents,
814 intents=intents,
815 requirements=requirements,
815 requirements=requirements,
816 features=features,
816 features=features,
817 wdirvfs=wdirvfs,
817 wdirvfs=wdirvfs,
818 hgvfs=hgvfs,
818 hgvfs=hgvfs,
819 store=store,
819 store=store,
820 storevfs=storevfs,
820 storevfs=storevfs,
821 storeoptions=storevfs.options,
821 storeoptions=storevfs.options,
822 cachevfs=cachevfs,
822 cachevfs=cachevfs,
823 wcachevfs=wcachevfs,
823 wcachevfs=wcachevfs,
824 extensionmodulenames=extensionmodulenames,
824 extensionmodulenames=extensionmodulenames,
825 extrastate=extrastate,
825 extrastate=extrastate,
826 baseclasses=bases,
826 baseclasses=bases,
827 )
827 )
828
828
829 if not isinstance(typ, type):
829 if not isinstance(typ, type):
830 raise error.ProgrammingError(
830 raise error.ProgrammingError(
831 b'unable to construct type for %s' % iface
831 b'unable to construct type for %s' % iface
832 )
832 )
833
833
834 bases.append(typ)
834 bases.append(typ)
835
835
836 # type() allows you to use characters in type names that wouldn't be
836 # type() allows you to use characters in type names that wouldn't be
837 # recognized as Python symbols in source code. We abuse that to add
837 # recognized as Python symbols in source code. We abuse that to add
838 # rich information about our constructed repo.
838 # rich information about our constructed repo.
839 name = pycompat.sysstr(
839 name = pycompat.sysstr(
840 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
840 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
841 )
841 )
842
842
843 cls = type(name, tuple(bases), {})
843 cls = type(name, tuple(bases), {})
844
844
845 return cls(
845 return cls(
846 baseui=baseui,
846 baseui=baseui,
847 ui=ui,
847 ui=ui,
848 origroot=path,
848 origroot=path,
849 wdirvfs=wdirvfs,
849 wdirvfs=wdirvfs,
850 hgvfs=hgvfs,
850 hgvfs=hgvfs,
851 requirements=requirements,
851 requirements=requirements,
852 supportedrequirements=supportedrequirements,
852 supportedrequirements=supportedrequirements,
853 sharedpath=storebasepath,
853 sharedpath=storebasepath,
854 store=store,
854 store=store,
855 cachevfs=cachevfs,
855 cachevfs=cachevfs,
856 wcachevfs=wcachevfs,
856 wcachevfs=wcachevfs,
857 features=features,
857 features=features,
858 intents=intents,
858 intents=intents,
859 )
859 )
860
860
861
861
862 def loadhgrc(
862 def loadhgrc(
863 ui,
863 ui,
864 wdirvfs: vfsmod.vfs,
864 wdirvfs: vfsmod.vfs,
865 hgvfs: vfsmod.vfs,
865 hgvfs: vfsmod.vfs,
866 requirements,
866 requirements,
867 sharedvfs: Optional[vfsmod.vfs] = None,
867 sharedvfs: Optional[vfsmod.vfs] = None,
868 ):
868 ):
869 """Load hgrc files/content into a ui instance.
869 """Load hgrc files/content into a ui instance.
870
870
871 This is called during repository opening to load any additional
871 This is called during repository opening to load any additional
872 config files or settings relevant to the current repository.
872 config files or settings relevant to the current repository.
873
873
874 Returns a bool indicating whether any additional configs were loaded.
874 Returns a bool indicating whether any additional configs were loaded.
875
875
876 Extensions should monkeypatch this function to modify how per-repo
876 Extensions should monkeypatch this function to modify how per-repo
877 configs are loaded. For example, an extension may wish to pull in
877 configs are loaded. For example, an extension may wish to pull in
878 configs from alternate files or sources.
878 configs from alternate files or sources.
879
879
880 sharedvfs is vfs object pointing to source repo if the current one is a
880 sharedvfs is vfs object pointing to source repo if the current one is a
881 shared one
881 shared one
882 """
882 """
883 if not rcutil.use_repo_hgrc():
883 if not rcutil.use_repo_hgrc():
884 return False
884 return False
885
885
886 ret = False
886 ret = False
887 # first load config from shared source if we has to
887 # first load config from shared source if we has to
888 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
888 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
889 try:
889 try:
890 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
890 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
891 ret = True
891 ret = True
892 except IOError:
892 except IOError:
893 pass
893 pass
894
894
895 try:
895 try:
896 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
896 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
897 ret = True
897 ret = True
898 except IOError:
898 except IOError:
899 pass
899 pass
900
900
901 try:
901 try:
902 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
902 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
903 ret = True
903 ret = True
904 except IOError:
904 except IOError:
905 pass
905 pass
906
906
907 return ret
907 return ret
908
908
909
909
910 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
910 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
911 """Perform additional actions after .hg/hgrc is loaded.
911 """Perform additional actions after .hg/hgrc is loaded.
912
912
913 This function is called during repository loading immediately after
913 This function is called during repository loading immediately after
914 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
914 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
915
915
916 The function can be used to validate configs, automatically add
916 The function can be used to validate configs, automatically add
917 options (including extensions) based on requirements, etc.
917 options (including extensions) based on requirements, etc.
918 """
918 """
919
919
920 # Map of requirements to list of extensions to load automatically when
920 # Map of requirements to list of extensions to load automatically when
921 # requirement is present.
921 # requirement is present.
922 autoextensions = {
922 autoextensions = {
923 b'git': [b'git'],
923 b'git': [b'git'],
924 b'largefiles': [b'largefiles'],
924 b'largefiles': [b'largefiles'],
925 b'lfs': [b'lfs'],
925 b'lfs': [b'lfs'],
926 }
926 }
927
927
928 for requirement, names in sorted(autoextensions.items()):
928 for requirement, names in sorted(autoextensions.items()):
929 if requirement not in requirements:
929 if requirement not in requirements:
930 continue
930 continue
931
931
932 for name in names:
932 for name in names:
933 if not ui.hasconfig(b'extensions', name):
933 if not ui.hasconfig(b'extensions', name):
934 ui.setconfig(b'extensions', name, b'', source=b'autoload')
934 ui.setconfig(b'extensions', name, b'', source=b'autoload')
935
935
936
936
937 def gathersupportedrequirements(ui):
937 def gathersupportedrequirements(ui):
938 """Determine the complete set of recognized requirements."""
938 """Determine the complete set of recognized requirements."""
939 # Start with all requirements supported by this file.
939 # Start with all requirements supported by this file.
940 supported = set(localrepository._basesupported)
940 supported = set(localrepository._basesupported)
941
941
942 # Execute ``featuresetupfuncs`` entries if they belong to an extension
942 # Execute ``featuresetupfuncs`` entries if they belong to an extension
943 # relevant to this ui instance.
943 # relevant to this ui instance.
944 modules = {m.__name__ for n, m in extensions.extensions(ui)}
944 modules = {m.__name__ for n, m in extensions.extensions(ui)}
945
945
946 for fn in featuresetupfuncs:
946 for fn in featuresetupfuncs:
947 if fn.__module__ in modules:
947 if fn.__module__ in modules:
948 fn(ui, supported)
948 fn(ui, supported)
949
949
950 # Add derived requirements from registered compression engines.
950 # Add derived requirements from registered compression engines.
951 for name in util.compengines:
951 for name in util.compengines:
952 engine = util.compengines[name]
952 engine = util.compengines[name]
953 if engine.available() and engine.revlogheader():
953 if engine.available() and engine.revlogheader():
954 supported.add(b'exp-compression-%s' % name)
954 supported.add(b'exp-compression-%s' % name)
955 if engine.name() == b'zstd':
955 if engine.name() == b'zstd':
956 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
956 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
957
957
958 return supported
958 return supported
959
959
960
960
961 def ensurerequirementsrecognized(requirements, supported):
961 def ensurerequirementsrecognized(requirements, supported):
962 """Validate that a set of local requirements is recognized.
962 """Validate that a set of local requirements is recognized.
963
963
964 Receives a set of requirements. Raises an ``error.RepoError`` if there
964 Receives a set of requirements. Raises an ``error.RepoError`` if there
965 exists any requirement in that set that currently loaded code doesn't
965 exists any requirement in that set that currently loaded code doesn't
966 recognize.
966 recognize.
967
967
968 Returns a set of supported requirements.
968 Returns a set of supported requirements.
969 """
969 """
970 missing = set()
970 missing = set()
971
971
972 for requirement in requirements:
972 for requirement in requirements:
973 if requirement in supported:
973 if requirement in supported:
974 continue
974 continue
975
975
976 if not requirement or not requirement[0:1].isalnum():
976 if not requirement or not requirement[0:1].isalnum():
977 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
977 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
978
978
979 missing.add(requirement)
979 missing.add(requirement)
980
980
981 if missing:
981 if missing:
982 raise error.RequirementError(
982 raise error.RequirementError(
983 _(b'repository requires features unknown to this Mercurial: %s')
983 _(b'repository requires features unknown to this Mercurial: %s')
984 % b' '.join(sorted(missing)),
984 % b' '.join(sorted(missing)),
985 hint=_(
985 hint=_(
986 b'see https://mercurial-scm.org/wiki/MissingRequirement '
986 b'see https://mercurial-scm.org/wiki/MissingRequirement '
987 b'for more information'
987 b'for more information'
988 ),
988 ),
989 )
989 )
990
990
991
991
992 def ensurerequirementscompatible(ui, requirements):
992 def ensurerequirementscompatible(ui, requirements):
993 """Validates that a set of recognized requirements is mutually compatible.
993 """Validates that a set of recognized requirements is mutually compatible.
994
994
995 Some requirements may not be compatible with others or require
995 Some requirements may not be compatible with others or require
996 config options that aren't enabled. This function is called during
996 config options that aren't enabled. This function is called during
997 repository opening to ensure that the set of requirements needed
997 repository opening to ensure that the set of requirements needed
998 to open a repository is sane and compatible with config options.
998 to open a repository is sane and compatible with config options.
999
999
1000 Extensions can monkeypatch this function to perform additional
1000 Extensions can monkeypatch this function to perform additional
1001 checking.
1001 checking.
1002
1002
1003 ``error.RepoError`` should be raised on failure.
1003 ``error.RepoError`` should be raised on failure.
1004 """
1004 """
1005 if (
1005 if (
1006 requirementsmod.SPARSE_REQUIREMENT in requirements
1006 requirementsmod.SPARSE_REQUIREMENT in requirements
1007 and not sparse.enabled
1007 and not sparse.enabled
1008 ):
1008 ):
1009 raise error.RepoError(
1009 raise error.RepoError(
1010 _(
1010 _(
1011 b'repository is using sparse feature but '
1011 b'repository is using sparse feature but '
1012 b'sparse is not enabled; enable the '
1012 b'sparse is not enabled; enable the '
1013 b'"sparse" extensions to access'
1013 b'"sparse" extensions to access'
1014 )
1014 )
1015 )
1015 )
1016
1016
1017
1017
1018 def makestore(requirements, path, vfstype):
1018 def makestore(requirements, path, vfstype):
1019 """Construct a storage object for a repository."""
1019 """Construct a storage object for a repository."""
1020 if requirementsmod.STORE_REQUIREMENT in requirements:
1020 if requirementsmod.STORE_REQUIREMENT in requirements:
1021 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1021 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1022 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1022 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1023 return storemod.fncachestore(path, vfstype, dotencode)
1023 return storemod.fncachestore(path, vfstype, dotencode)
1024
1024
1025 return storemod.encodedstore(path, vfstype)
1025 return storemod.encodedstore(path, vfstype)
1026
1026
1027 return storemod.basicstore(path, vfstype)
1027 return storemod.basicstore(path, vfstype)
1028
1028
1029
1029
1030 def resolvestorevfsoptions(ui, requirements, features):
1030 def resolvestorevfsoptions(ui, requirements, features):
1031 """Resolve the options to pass to the store vfs opener.
1031 """Resolve the options to pass to the store vfs opener.
1032
1032
1033 The returned dict is used to influence behavior of the storage layer.
1033 The returned dict is used to influence behavior of the storage layer.
1034 """
1034 """
1035 options = {}
1035 options = {}
1036
1036
1037 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1037 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1038 options[b'treemanifest'] = True
1038 options[b'treemanifest'] = True
1039
1039
1040 # experimental config: format.manifestcachesize
1040 # experimental config: format.manifestcachesize
1041 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1041 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1042 if manifestcachesize is not None:
1042 if manifestcachesize is not None:
1043 options[b'manifestcachesize'] = manifestcachesize
1043 options[b'manifestcachesize'] = manifestcachesize
1044
1044
1045 # In the absence of another requirement superseding a revlog-related
1045 # In the absence of another requirement superseding a revlog-related
1046 # requirement, we have to assume the repo is using revlog version 0.
1046 # requirement, we have to assume the repo is using revlog version 0.
1047 # This revlog format is super old and we don't bother trying to parse
1047 # This revlog format is super old and we don't bother trying to parse
1048 # opener options for it because those options wouldn't do anything
1048 # opener options for it because those options wouldn't do anything
1049 # meaningful on such old repos.
1049 # meaningful on such old repos.
1050 if (
1050 if (
1051 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1051 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1052 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1052 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1053 ):
1053 ):
1054 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1054 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1055 else: # explicitly mark repo as using revlogv0
1055 else: # explicitly mark repo as using revlogv0
1056 options[b'revlogv0'] = True
1056 options[b'revlogv0'] = True
1057
1057
1058 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1058 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1059 options[b'copies-storage'] = b'changeset-sidedata'
1059 options[b'copies-storage'] = b'changeset-sidedata'
1060 else:
1060 else:
1061 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1061 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1062 copiesextramode = (b'changeset-only', b'compatibility')
1062 copiesextramode = (b'changeset-only', b'compatibility')
1063 if writecopiesto in copiesextramode:
1063 if writecopiesto in copiesextramode:
1064 options[b'copies-storage'] = b'extra'
1064 options[b'copies-storage'] = b'extra'
1065
1065
1066 return options
1066 return options
1067
1067
1068
1068
1069 def resolverevlogstorevfsoptions(ui, requirements, features):
1069 def resolverevlogstorevfsoptions(ui, requirements, features):
1070 """Resolve opener options specific to revlogs."""
1070 """Resolve opener options specific to revlogs."""
1071
1071
1072 options = {}
1072 options = {}
1073 options[b'flagprocessors'] = {}
1073 options[b'flagprocessors'] = {}
1074
1074
1075 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1075 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1076 options[b'revlogv1'] = True
1076 options[b'revlogv1'] = True
1077 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1077 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1078 options[b'revlogv2'] = True
1078 options[b'revlogv2'] = True
1079 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1079 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1080 options[b'changelogv2'] = True
1080 options[b'changelogv2'] = True
1081 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1081 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1082 options[b'changelogv2.compute-rank'] = cmp_rank
1082 options[b'changelogv2.compute-rank'] = cmp_rank
1083
1083
1084 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1084 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1085 options[b'generaldelta'] = True
1085 options[b'generaldelta'] = True
1086
1086
1087 # experimental config: format.chunkcachesize
1087 # experimental config: format.chunkcachesize
1088 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1088 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1089 if chunkcachesize is not None:
1089 if chunkcachesize is not None:
1090 options[b'chunkcachesize'] = chunkcachesize
1090 options[b'chunkcachesize'] = chunkcachesize
1091
1091
1092 deltabothparents = ui.configbool(
1092 deltabothparents = ui.configbool(
1093 b'storage', b'revlog.optimize-delta-parent-choice'
1093 b'storage', b'revlog.optimize-delta-parent-choice'
1094 )
1094 )
1095 options[b'deltabothparents'] = deltabothparents
1095 options[b'deltabothparents'] = deltabothparents
1096 dps_cgds = ui.configint(
1096 dps_cgds = ui.configint(
1097 b'storage',
1097 b'storage',
1098 b'revlog.delta-parent-search.candidate-group-chunk-size',
1098 b'revlog.delta-parent-search.candidate-group-chunk-size',
1099 )
1099 )
1100 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1100 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1101 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1101 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1102
1102
1103 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1103 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1104 options[b'issue6528.fix-incoming'] = issue6528
1104 options[b'issue6528.fix-incoming'] = issue6528
1105
1105
1106 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1106 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1107 lazydeltabase = False
1107 lazydeltabase = False
1108 if lazydelta:
1108 if lazydelta:
1109 lazydeltabase = ui.configbool(
1109 lazydeltabase = ui.configbool(
1110 b'storage', b'revlog.reuse-external-delta-parent'
1110 b'storage', b'revlog.reuse-external-delta-parent'
1111 )
1111 )
1112 if lazydeltabase is None:
1112 if lazydeltabase is None:
1113 lazydeltabase = not scmutil.gddeltaconfig(ui)
1113 lazydeltabase = not scmutil.gddeltaconfig(ui)
1114 options[b'lazydelta'] = lazydelta
1114 options[b'lazydelta'] = lazydelta
1115 options[b'lazydeltabase'] = lazydeltabase
1115 options[b'lazydeltabase'] = lazydeltabase
1116
1116
1117 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1117 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1118 if 0 <= chainspan:
1118 if 0 <= chainspan:
1119 options[b'maxdeltachainspan'] = chainspan
1119 options[b'maxdeltachainspan'] = chainspan
1120
1120
1121 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1121 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1122 if mmapindexthreshold is not None:
1122 if mmapindexthreshold is not None:
1123 options[b'mmapindexthreshold'] = mmapindexthreshold
1123 options[b'mmapindexthreshold'] = mmapindexthreshold
1124
1124
1125 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1125 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1126 srdensitythres = float(
1126 srdensitythres = float(
1127 ui.config(b'experimental', b'sparse-read.density-threshold')
1127 ui.config(b'experimental', b'sparse-read.density-threshold')
1128 )
1128 )
1129 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1129 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1130 options[b'with-sparse-read'] = withsparseread
1130 options[b'with-sparse-read'] = withsparseread
1131 options[b'sparse-read-density-threshold'] = srdensitythres
1131 options[b'sparse-read-density-threshold'] = srdensitythres
1132 options[b'sparse-read-min-gap-size'] = srmingapsize
1132 options[b'sparse-read-min-gap-size'] = srmingapsize
1133
1133
1134 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1134 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1135 options[b'sparse-revlog'] = sparserevlog
1135 options[b'sparse-revlog'] = sparserevlog
1136 if sparserevlog:
1136 if sparserevlog:
1137 options[b'generaldelta'] = True
1137 options[b'generaldelta'] = True
1138
1138
1139 maxchainlen = None
1139 maxchainlen = None
1140 if sparserevlog:
1140 if sparserevlog:
1141 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1141 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1142 # experimental config: format.maxchainlen
1142 # experimental config: format.maxchainlen
1143 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1143 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1144 if maxchainlen is not None:
1144 if maxchainlen is not None:
1145 options[b'maxchainlen'] = maxchainlen
1145 options[b'maxchainlen'] = maxchainlen
1146
1146
1147 for r in requirements:
1147 for r in requirements:
1148 # we allow multiple compression engine requirement to co-exist because
1148 # we allow multiple compression engine requirement to co-exist because
1149 # strickly speaking, revlog seems to support mixed compression style.
1149 # strickly speaking, revlog seems to support mixed compression style.
1150 #
1150 #
1151 # The compression used for new entries will be "the last one"
1151 # The compression used for new entries will be "the last one"
1152 prefix = r.startswith
1152 prefix = r.startswith
1153 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1153 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1154 options[b'compengine'] = r.split(b'-', 2)[2]
1154 options[b'compengine'] = r.split(b'-', 2)[2]
1155
1155
1156 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1156 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1157 if options[b'zlib.level'] is not None:
1157 if options[b'zlib.level'] is not None:
1158 if not (0 <= options[b'zlib.level'] <= 9):
1158 if not (0 <= options[b'zlib.level'] <= 9):
1159 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1159 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1160 raise error.Abort(msg % options[b'zlib.level'])
1160 raise error.Abort(msg % options[b'zlib.level'])
1161 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1161 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1162 if options[b'zstd.level'] is not None:
1162 if options[b'zstd.level'] is not None:
1163 if not (0 <= options[b'zstd.level'] <= 22):
1163 if not (0 <= options[b'zstd.level'] <= 22):
1164 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1164 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1165 raise error.Abort(msg % options[b'zstd.level'])
1165 raise error.Abort(msg % options[b'zstd.level'])
1166
1166
1167 if requirementsmod.NARROW_REQUIREMENT in requirements:
1167 if requirementsmod.NARROW_REQUIREMENT in requirements:
1168 options[b'enableellipsis'] = True
1168 options[b'enableellipsis'] = True
1169
1169
1170 if ui.configbool(b'experimental', b'rust.index'):
1170 if ui.configbool(b'experimental', b'rust.index'):
1171 options[b'rust.index'] = True
1171 options[b'rust.index'] = True
1172 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1172 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1173 slow_path = ui.config(
1173 slow_path = ui.config(
1174 b'storage', b'revlog.persistent-nodemap.slow-path'
1174 b'storage', b'revlog.persistent-nodemap.slow-path'
1175 )
1175 )
1176 if slow_path not in (b'allow', b'warn', b'abort'):
1176 if slow_path not in (b'allow', b'warn', b'abort'):
1177 default = ui.config_default(
1177 default = ui.config_default(
1178 b'storage', b'revlog.persistent-nodemap.slow-path'
1178 b'storage', b'revlog.persistent-nodemap.slow-path'
1179 )
1179 )
1180 msg = _(
1180 msg = _(
1181 b'unknown value for config '
1181 b'unknown value for config '
1182 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1182 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1183 )
1183 )
1184 ui.warn(msg % slow_path)
1184 ui.warn(msg % slow_path)
1185 if not ui.quiet:
1185 if not ui.quiet:
1186 ui.warn(_(b'falling back to default value: %s\n') % default)
1186 ui.warn(_(b'falling back to default value: %s\n') % default)
1187 slow_path = default
1187 slow_path = default
1188
1188
1189 msg = _(
1189 msg = _(
1190 b"accessing `persistent-nodemap` repository without associated "
1190 b"accessing `persistent-nodemap` repository without associated "
1191 b"fast implementation."
1191 b"fast implementation."
1192 )
1192 )
1193 hint = _(
1193 hint = _(
1194 b"check `hg help config.format.use-persistent-nodemap` "
1194 b"check `hg help config.format.use-persistent-nodemap` "
1195 b"for details"
1195 b"for details"
1196 )
1196 )
1197 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1197 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1198 if slow_path == b'warn':
1198 if slow_path == b'warn':
1199 msg = b"warning: " + msg + b'\n'
1199 msg = b"warning: " + msg + b'\n'
1200 ui.warn(msg)
1200 ui.warn(msg)
1201 if not ui.quiet:
1201 if not ui.quiet:
1202 hint = b'(' + hint + b')\n'
1202 hint = b'(' + hint + b')\n'
1203 ui.warn(hint)
1203 ui.warn(hint)
1204 if slow_path == b'abort':
1204 if slow_path == b'abort':
1205 raise error.Abort(msg, hint=hint)
1205 raise error.Abort(msg, hint=hint)
1206 options[b'persistent-nodemap'] = True
1206 options[b'persistent-nodemap'] = True
1207 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1207 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1208 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1208 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1209 if slow_path not in (b'allow', b'warn', b'abort'):
1209 if slow_path not in (b'allow', b'warn', b'abort'):
1210 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1210 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1211 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1211 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1212 ui.warn(msg % slow_path)
1212 ui.warn(msg % slow_path)
1213 if not ui.quiet:
1213 if not ui.quiet:
1214 ui.warn(_(b'falling back to default value: %s\n') % default)
1214 ui.warn(_(b'falling back to default value: %s\n') % default)
1215 slow_path = default
1215 slow_path = default
1216
1216
1217 msg = _(
1217 msg = _(
1218 b"accessing `dirstate-v2` repository without associated "
1218 b"accessing `dirstate-v2` repository without associated "
1219 b"fast implementation."
1219 b"fast implementation."
1220 )
1220 )
1221 hint = _(
1221 hint = _(
1222 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1222 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1223 )
1223 )
1224 if not dirstate.HAS_FAST_DIRSTATE_V2:
1224 if not dirstate.HAS_FAST_DIRSTATE_V2:
1225 if slow_path == b'warn':
1225 if slow_path == b'warn':
1226 msg = b"warning: " + msg + b'\n'
1226 msg = b"warning: " + msg + b'\n'
1227 ui.warn(msg)
1227 ui.warn(msg)
1228 if not ui.quiet:
1228 if not ui.quiet:
1229 hint = b'(' + hint + b')\n'
1229 hint = b'(' + hint + b')\n'
1230 ui.warn(hint)
1230 ui.warn(hint)
1231 if slow_path == b'abort':
1231 if slow_path == b'abort':
1232 raise error.Abort(msg, hint=hint)
1232 raise error.Abort(msg, hint=hint)
1233 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1233 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1234 options[b'persistent-nodemap.mmap'] = True
1234 options[b'persistent-nodemap.mmap'] = True
1235 if ui.configbool(b'devel', b'persistent-nodemap'):
1235 if ui.configbool(b'devel', b'persistent-nodemap'):
1236 options[b'devel-force-nodemap'] = True
1236 options[b'devel-force-nodemap'] = True
1237
1237
1238 return options
1238 return options
1239
1239
1240
1240
1241 def makemain(**kwargs):
1241 def makemain(**kwargs):
1242 """Produce a type conforming to ``ilocalrepositorymain``."""
1242 """Produce a type conforming to ``ilocalrepositorymain``."""
1243 return localrepository
1243 return localrepository
1244
1244
1245
1245
1246 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1246 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1247 class revlogfilestorage:
1247 class revlogfilestorage:
1248 """File storage when using revlogs."""
1248 """File storage when using revlogs."""
1249
1249
1250 def file(self, path):
1250 def file(self, path):
1251 if path.startswith(b'/'):
1251 if path.startswith(b'/'):
1252 path = path[1:]
1252 path = path[1:]
1253
1253
1254 try_split = (
1254 try_split = (
1255 self.currenttransaction() is not None
1255 self.currenttransaction() is not None
1256 or txnutil.mayhavepending(self.root)
1256 or txnutil.mayhavepending(self.root)
1257 )
1257 )
1258
1258
1259 return filelog.filelog(self.svfs, path, try_split=try_split)
1259 return filelog.filelog(self.svfs, path, try_split=try_split)
1260
1260
1261
1261
1262 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1262 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1263 class revlognarrowfilestorage:
1263 class revlognarrowfilestorage:
1264 """File storage when using revlogs and narrow files."""
1264 """File storage when using revlogs and narrow files."""
1265
1265
1266 def file(self, path):
1266 def file(self, path):
1267 if path.startswith(b'/'):
1267 if path.startswith(b'/'):
1268 path = path[1:]
1268 path = path[1:]
1269
1269
1270 try_split = (
1270 try_split = (
1271 self.currenttransaction() is not None
1271 self.currenttransaction() is not None
1272 or txnutil.mayhavepending(self.root)
1272 or txnutil.mayhavepending(self.root)
1273 )
1273 )
1274 return filelog.narrowfilelog(
1274 return filelog.narrowfilelog(
1275 self.svfs, path, self._storenarrowmatch, try_split=try_split
1275 self.svfs, path, self._storenarrowmatch, try_split=try_split
1276 )
1276 )
1277
1277
1278
1278
1279 def makefilestorage(requirements, features, **kwargs):
1279 def makefilestorage(requirements, features, **kwargs):
1280 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1280 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1281 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1281 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1282 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1282 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1283
1283
1284 if requirementsmod.NARROW_REQUIREMENT in requirements:
1284 if requirementsmod.NARROW_REQUIREMENT in requirements:
1285 return revlognarrowfilestorage
1285 return revlognarrowfilestorage
1286 else:
1286 else:
1287 return revlogfilestorage
1287 return revlogfilestorage
1288
1288
1289
1289
1290 # List of repository interfaces and factory functions for them. Each
1290 # List of repository interfaces and factory functions for them. Each
1291 # will be called in order during ``makelocalrepository()`` to iteratively
1291 # will be called in order during ``makelocalrepository()`` to iteratively
1292 # derive the final type for a local repository instance. We capture the
1292 # derive the final type for a local repository instance. We capture the
1293 # function as a lambda so we don't hold a reference and the module-level
1293 # function as a lambda so we don't hold a reference and the module-level
1294 # functions can be wrapped.
1294 # functions can be wrapped.
1295 REPO_INTERFACES = [
1295 REPO_INTERFACES = [
1296 (repository.ilocalrepositorymain, lambda: makemain),
1296 (repository.ilocalrepositorymain, lambda: makemain),
1297 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1297 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1298 ]
1298 ]
1299
1299
1300
1300
1301 @interfaceutil.implementer(repository.ilocalrepositorymain)
1301 @interfaceutil.implementer(repository.ilocalrepositorymain)
1302 class localrepository:
1302 class localrepository:
1303 """Main class for representing local repositories.
1303 """Main class for representing local repositories.
1304
1304
1305 All local repositories are instances of this class.
1305 All local repositories are instances of this class.
1306
1306
1307 Constructed on its own, instances of this class are not usable as
1307 Constructed on its own, instances of this class are not usable as
1308 repository objects. To obtain a usable repository object, call
1308 repository objects. To obtain a usable repository object, call
1309 ``hg.repository()``, ``localrepo.instance()``, or
1309 ``hg.repository()``, ``localrepo.instance()``, or
1310 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1310 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1311 ``instance()`` adds support for creating new repositories.
1311 ``instance()`` adds support for creating new repositories.
1312 ``hg.repository()`` adds more extension integration, including calling
1312 ``hg.repository()`` adds more extension integration, including calling
1313 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1313 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1314 used.
1314 used.
1315 """
1315 """
1316
1316
1317 _basesupported = {
1317 _basesupported = {
1318 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1318 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1319 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1319 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1320 requirementsmod.CHANGELOGV2_REQUIREMENT,
1320 requirementsmod.CHANGELOGV2_REQUIREMENT,
1321 requirementsmod.COPIESSDC_REQUIREMENT,
1321 requirementsmod.COPIESSDC_REQUIREMENT,
1322 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1322 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1323 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1323 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1324 requirementsmod.DOTENCODE_REQUIREMENT,
1324 requirementsmod.DOTENCODE_REQUIREMENT,
1325 requirementsmod.FNCACHE_REQUIREMENT,
1325 requirementsmod.FNCACHE_REQUIREMENT,
1326 requirementsmod.GENERALDELTA_REQUIREMENT,
1326 requirementsmod.GENERALDELTA_REQUIREMENT,
1327 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1327 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1328 requirementsmod.NODEMAP_REQUIREMENT,
1328 requirementsmod.NODEMAP_REQUIREMENT,
1329 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1329 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1330 requirementsmod.REVLOGV1_REQUIREMENT,
1330 requirementsmod.REVLOGV1_REQUIREMENT,
1331 requirementsmod.REVLOGV2_REQUIREMENT,
1331 requirementsmod.REVLOGV2_REQUIREMENT,
1332 requirementsmod.SHARED_REQUIREMENT,
1332 requirementsmod.SHARED_REQUIREMENT,
1333 requirementsmod.SHARESAFE_REQUIREMENT,
1333 requirementsmod.SHARESAFE_REQUIREMENT,
1334 requirementsmod.SPARSE_REQUIREMENT,
1334 requirementsmod.SPARSE_REQUIREMENT,
1335 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1335 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1336 requirementsmod.STORE_REQUIREMENT,
1336 requirementsmod.STORE_REQUIREMENT,
1337 requirementsmod.TREEMANIFEST_REQUIREMENT,
1337 requirementsmod.TREEMANIFEST_REQUIREMENT,
1338 }
1338 }
1339
1339
1340 # list of prefix for file which can be written without 'wlock'
1340 # list of prefix for file which can be written without 'wlock'
1341 # Extensions should extend this list when needed
1341 # Extensions should extend this list when needed
1342 _wlockfreeprefix = {
1342 _wlockfreeprefix = {
1343 # We migh consider requiring 'wlock' for the next
1343 # We migh consider requiring 'wlock' for the next
1344 # two, but pretty much all the existing code assume
1344 # two, but pretty much all the existing code assume
1345 # wlock is not needed so we keep them excluded for
1345 # wlock is not needed so we keep them excluded for
1346 # now.
1346 # now.
1347 b'hgrc',
1347 b'hgrc',
1348 b'requires',
1348 b'requires',
1349 # XXX cache is a complicatged business someone
1349 # XXX cache is a complicatged business someone
1350 # should investigate this in depth at some point
1350 # should investigate this in depth at some point
1351 b'cache/',
1351 b'cache/',
1352 # XXX bisect was still a bit too messy at the time
1352 # XXX bisect was still a bit too messy at the time
1353 # this changeset was introduced. Someone should fix
1353 # this changeset was introduced. Someone should fix
1354 # the remainig bit and drop this line
1354 # the remainig bit and drop this line
1355 b'bisect.state',
1355 b'bisect.state',
1356 }
1356 }
1357
1357
1358 def __init__(
1358 def __init__(
1359 self,
1359 self,
1360 baseui,
1360 baseui,
1361 ui,
1361 ui,
1362 origroot: bytes,
1362 origroot: bytes,
1363 wdirvfs: vfsmod.vfs,
1363 wdirvfs: vfsmod.vfs,
1364 hgvfs: vfsmod.vfs,
1364 hgvfs: vfsmod.vfs,
1365 requirements,
1365 requirements,
1366 supportedrequirements,
1366 supportedrequirements,
1367 sharedpath: bytes,
1367 sharedpath: bytes,
1368 store,
1368 store,
1369 cachevfs: vfsmod.vfs,
1369 cachevfs: vfsmod.vfs,
1370 wcachevfs: vfsmod.vfs,
1370 wcachevfs: vfsmod.vfs,
1371 features,
1371 features,
1372 intents=None,
1372 intents=None,
1373 ):
1373 ):
1374 """Create a new local repository instance.
1374 """Create a new local repository instance.
1375
1375
1376 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1376 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1377 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1377 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1378 object.
1378 object.
1379
1379
1380 Arguments:
1380 Arguments:
1381
1381
1382 baseui
1382 baseui
1383 ``ui.ui`` instance that ``ui`` argument was based off of.
1383 ``ui.ui`` instance that ``ui`` argument was based off of.
1384
1384
1385 ui
1385 ui
1386 ``ui.ui`` instance for use by the repository.
1386 ``ui.ui`` instance for use by the repository.
1387
1387
1388 origroot
1388 origroot
1389 ``bytes`` path to working directory root of this repository.
1389 ``bytes`` path to working directory root of this repository.
1390
1390
1391 wdirvfs
1391 wdirvfs
1392 ``vfs.vfs`` rooted at the working directory.
1392 ``vfs.vfs`` rooted at the working directory.
1393
1393
1394 hgvfs
1394 hgvfs
1395 ``vfs.vfs`` rooted at .hg/
1395 ``vfs.vfs`` rooted at .hg/
1396
1396
1397 requirements
1397 requirements
1398 ``set`` of bytestrings representing repository opening requirements.
1398 ``set`` of bytestrings representing repository opening requirements.
1399
1399
1400 supportedrequirements
1400 supportedrequirements
1401 ``set`` of bytestrings representing repository requirements that we
1401 ``set`` of bytestrings representing repository requirements that we
1402 know how to open. May be a supetset of ``requirements``.
1402 know how to open. May be a supetset of ``requirements``.
1403
1403
1404 sharedpath
1404 sharedpath
1405 ``bytes`` Defining path to storage base directory. Points to a
1405 ``bytes`` Defining path to storage base directory. Points to a
1406 ``.hg/`` directory somewhere.
1406 ``.hg/`` directory somewhere.
1407
1407
1408 store
1408 store
1409 ``store.basicstore`` (or derived) instance providing access to
1409 ``store.basicstore`` (or derived) instance providing access to
1410 versioned storage.
1410 versioned storage.
1411
1411
1412 cachevfs
1412 cachevfs
1413 ``vfs.vfs`` used for cache files.
1413 ``vfs.vfs`` used for cache files.
1414
1414
1415 wcachevfs
1415 wcachevfs
1416 ``vfs.vfs`` used for cache files related to the working copy.
1416 ``vfs.vfs`` used for cache files related to the working copy.
1417
1417
1418 features
1418 features
1419 ``set`` of bytestrings defining features/capabilities of this
1419 ``set`` of bytestrings defining features/capabilities of this
1420 instance.
1420 instance.
1421
1421
1422 intents
1422 intents
1423 ``set`` of system strings indicating what this repo will be used
1423 ``set`` of system strings indicating what this repo will be used
1424 for.
1424 for.
1425 """
1425 """
1426 self.baseui = baseui
1426 self.baseui = baseui
1427 self.ui = ui
1427 self.ui = ui
1428 self.origroot = origroot
1428 self.origroot = origroot
1429 # vfs rooted at working directory.
1429 # vfs rooted at working directory.
1430 self.wvfs = wdirvfs
1430 self.wvfs = wdirvfs
1431 self.root = wdirvfs.base
1431 self.root = wdirvfs.base
1432 # vfs rooted at .hg/. Used to access most non-store paths.
1432 # vfs rooted at .hg/. Used to access most non-store paths.
1433 self.vfs = hgvfs
1433 self.vfs = hgvfs
1434 self.path = hgvfs.base
1434 self.path = hgvfs.base
1435 self.requirements = requirements
1435 self.requirements = requirements
1436 self.nodeconstants = sha1nodeconstants
1436 self.nodeconstants = sha1nodeconstants
1437 self.nullid = self.nodeconstants.nullid
1437 self.nullid = self.nodeconstants.nullid
1438 self.supported = supportedrequirements
1438 self.supported = supportedrequirements
1439 self.sharedpath = sharedpath
1439 self.sharedpath = sharedpath
1440 self.store = store
1440 self.store = store
1441 self.cachevfs = cachevfs
1441 self.cachevfs = cachevfs
1442 self.wcachevfs = wcachevfs
1442 self.wcachevfs = wcachevfs
1443 self.features = features
1443 self.features = features
1444
1444
1445 self.filtername = None
1445 self.filtername = None
1446
1446
1447 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1447 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1448 b'devel', b'check-locks'
1448 b'devel', b'check-locks'
1449 ):
1449 ):
1450 self.vfs.audit = self._getvfsward(self.vfs.audit)
1450 self.vfs.audit = self._getvfsward(self.vfs.audit)
1451 # A list of callback to shape the phase if no data were found.
1451 # A list of callback to shape the phase if no data were found.
1452 # Callback are in the form: func(repo, roots) --> processed root.
1452 # Callback are in the form: func(repo, roots) --> processed root.
1453 # This list it to be filled by extension during repo setup
1453 # This list it to be filled by extension during repo setup
1454 self._phasedefaults = []
1454 self._phasedefaults = []
1455
1455
1456 color.setup(self.ui)
1456 color.setup(self.ui)
1457
1457
1458 self.spath = self.store.path
1458 self.spath = self.store.path
1459 self.svfs = self.store.vfs
1459 self.svfs = self.store.vfs
1460 self.sjoin = self.store.join
1460 self.sjoin = self.store.join
1461 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1461 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1462 b'devel', b'check-locks'
1462 b'devel', b'check-locks'
1463 ):
1463 ):
1464 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1464 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1465 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1465 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1466 else: # standard vfs
1466 else: # standard vfs
1467 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1467 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1468
1468
1469 self._dirstatevalidatewarned = False
1469 self._dirstatevalidatewarned = False
1470
1470
1471 self._branchcaches = branchmap.BranchMapCache()
1471 self._branchcaches = branchmap.BranchMapCache()
1472 self._revbranchcache = None
1472 self._revbranchcache = None
1473 self._filterpats = {}
1473 self._filterpats = {}
1474 self._datafilters = {}
1474 self._datafilters = {}
1475 self._transref = self._lockref = self._wlockref = None
1475 self._transref = self._lockref = self._wlockref = None
1476
1476
1477 # A cache for various files under .hg/ that tracks file changes,
1477 # A cache for various files under .hg/ that tracks file changes,
1478 # (used by the filecache decorator)
1478 # (used by the filecache decorator)
1479 #
1479 #
1480 # Maps a property name to its util.filecacheentry
1480 # Maps a property name to its util.filecacheentry
1481 self._filecache = {}
1481 self._filecache = {}
1482
1482
1483 # hold sets of revision to be filtered
1483 # hold sets of revision to be filtered
1484 # should be cleared when something might have changed the filter value:
1484 # should be cleared when something might have changed the filter value:
1485 # - new changesets,
1485 # - new changesets,
1486 # - phase change,
1486 # - phase change,
1487 # - new obsolescence marker,
1487 # - new obsolescence marker,
1488 # - working directory parent change,
1488 # - working directory parent change,
1489 # - bookmark changes
1489 # - bookmark changes
1490 self.filteredrevcache = {}
1490 self.filteredrevcache = {}
1491
1491
1492 self._dirstate = None
1492 self._dirstate = None
1493 # post-dirstate-status hooks
1493 # post-dirstate-status hooks
1494 self._postdsstatus = []
1494 self._postdsstatus = []
1495
1495
1496 self._pending_narrow_pats = None
1496 self._pending_narrow_pats = None
1497 self._pending_narrow_pats_dirstate = None
1497 self._pending_narrow_pats_dirstate = None
1498
1498
1499 # generic mapping between names and nodes
1499 # generic mapping between names and nodes
1500 self.names = namespaces.namespaces()
1500 self.names = namespaces.namespaces()
1501
1501
1502 # Key to signature value.
1502 # Key to signature value.
1503 self._sparsesignaturecache = {}
1503 self._sparsesignaturecache = {}
1504 # Signature to cached matcher instance.
1504 # Signature to cached matcher instance.
1505 self._sparsematchercache = {}
1505 self._sparsematchercache = {}
1506
1506
1507 self._extrafilterid = repoview.extrafilter(ui)
1507 self._extrafilterid = repoview.extrafilter(ui)
1508
1508
1509 self.filecopiesmode = None
1509 self.filecopiesmode = None
1510 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1510 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1511 self.filecopiesmode = b'changeset-sidedata'
1511 self.filecopiesmode = b'changeset-sidedata'
1512
1512
1513 self._wanted_sidedata = set()
1513 self._wanted_sidedata = set()
1514 self._sidedata_computers = {}
1514 self._sidedata_computers = {}
1515 sidedatamod.set_sidedata_spec_for_repo(self)
1515 sidedatamod.set_sidedata_spec_for_repo(self)
1516
1516
1517 def _getvfsward(self, origfunc):
1517 def _getvfsward(self, origfunc):
1518 """build a ward for self.vfs"""
1518 """build a ward for self.vfs"""
1519 rref = weakref.ref(self)
1519 rref = weakref.ref(self)
1520
1520
1521 def checkvfs(path, mode=None):
1521 def checkvfs(path, mode=None):
1522 ret = origfunc(path, mode=mode)
1522 ret = origfunc(path, mode=mode)
1523 repo = rref()
1523 repo = rref()
1524 if (
1524 if (
1525 repo is None
1525 repo is None
1526 or not util.safehasattr(repo, '_wlockref')
1526 or not util.safehasattr(repo, '_wlockref')
1527 or not util.safehasattr(repo, '_lockref')
1527 or not util.safehasattr(repo, '_lockref')
1528 ):
1528 ):
1529 return
1529 return
1530 if mode in (None, b'r', b'rb'):
1530 if mode in (None, b'r', b'rb'):
1531 return
1531 return
1532 if path.startswith(repo.path):
1532 if path.startswith(repo.path):
1533 # truncate name relative to the repository (.hg)
1533 # truncate name relative to the repository (.hg)
1534 path = path[len(repo.path) + 1 :]
1534 path = path[len(repo.path) + 1 :]
1535 if path.startswith(b'cache/'):
1535 if path.startswith(b'cache/'):
1536 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1536 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1537 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1537 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1538 # path prefixes covered by 'lock'
1538 # path prefixes covered by 'lock'
1539 vfs_path_prefixes = (
1539 vfs_path_prefixes = (
1540 b'journal.',
1540 b'journal.',
1541 b'undo.',
1541 b'undo.',
1542 b'strip-backup/',
1542 b'strip-backup/',
1543 b'cache/',
1543 b'cache/',
1544 )
1544 )
1545 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1545 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1546 if repo._currentlock(repo._lockref) is None:
1546 if repo._currentlock(repo._lockref) is None:
1547 repo.ui.develwarn(
1547 repo.ui.develwarn(
1548 b'write with no lock: "%s"' % path,
1548 b'write with no lock: "%s"' % path,
1549 stacklevel=3,
1549 stacklevel=3,
1550 config=b'check-locks',
1550 config=b'check-locks',
1551 )
1551 )
1552 elif repo._currentlock(repo._wlockref) is None:
1552 elif repo._currentlock(repo._wlockref) is None:
1553 # rest of vfs files are covered by 'wlock'
1553 # rest of vfs files are covered by 'wlock'
1554 #
1554 #
1555 # exclude special files
1555 # exclude special files
1556 for prefix in self._wlockfreeprefix:
1556 for prefix in self._wlockfreeprefix:
1557 if path.startswith(prefix):
1557 if path.startswith(prefix):
1558 return
1558 return
1559 repo.ui.develwarn(
1559 repo.ui.develwarn(
1560 b'write with no wlock: "%s"' % path,
1560 b'write with no wlock: "%s"' % path,
1561 stacklevel=3,
1561 stacklevel=3,
1562 config=b'check-locks',
1562 config=b'check-locks',
1563 )
1563 )
1564 return ret
1564 return ret
1565
1565
1566 return checkvfs
1566 return checkvfs
1567
1567
1568 def _getsvfsward(self, origfunc):
1568 def _getsvfsward(self, origfunc):
1569 """build a ward for self.svfs"""
1569 """build a ward for self.svfs"""
1570 rref = weakref.ref(self)
1570 rref = weakref.ref(self)
1571
1571
1572 def checksvfs(path, mode=None):
1572 def checksvfs(path, mode=None):
1573 ret = origfunc(path, mode=mode)
1573 ret = origfunc(path, mode=mode)
1574 repo = rref()
1574 repo = rref()
1575 if repo is None or not util.safehasattr(repo, '_lockref'):
1575 if repo is None or not util.safehasattr(repo, '_lockref'):
1576 return
1576 return
1577 if mode in (None, b'r', b'rb'):
1577 if mode in (None, b'r', b'rb'):
1578 return
1578 return
1579 if path.startswith(repo.sharedpath):
1579 if path.startswith(repo.sharedpath):
1580 # truncate name relative to the repository (.hg)
1580 # truncate name relative to the repository (.hg)
1581 path = path[len(repo.sharedpath) + 1 :]
1581 path = path[len(repo.sharedpath) + 1 :]
1582 if repo._currentlock(repo._lockref) is None:
1582 if repo._currentlock(repo._lockref) is None:
1583 repo.ui.develwarn(
1583 repo.ui.develwarn(
1584 b'write with no lock: "%s"' % path, stacklevel=4
1584 b'write with no lock: "%s"' % path, stacklevel=4
1585 )
1585 )
1586 return ret
1586 return ret
1587
1587
1588 return checksvfs
1588 return checksvfs
1589
1589
1590 @property
1590 @property
1591 def vfs_map(self):
1591 def vfs_map(self):
1592 return {
1592 return {
1593 b'': self.svfs,
1593 b'': self.svfs,
1594 b'plain': self.vfs,
1594 b'plain': self.vfs,
1595 b'store': self.svfs,
1595 b'store': self.svfs,
1596 }
1596 }
1597
1597
1598 def close(self):
1598 def close(self):
1599 self._writecaches()
1599 self._writecaches()
1600
1600
1601 def _writecaches(self):
1601 def _writecaches(self):
1602 if self._revbranchcache:
1602 if self._revbranchcache:
1603 self._revbranchcache.write()
1603 self._revbranchcache.write()
1604
1604
1605 def _restrictcapabilities(self, caps):
1605 def _restrictcapabilities(self, caps):
1606 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1606 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1607 caps = set(caps)
1607 caps = set(caps)
1608 capsblob = bundle2.encodecaps(
1608 capsblob = bundle2.encodecaps(
1609 bundle2.getrepocaps(self, role=b'client')
1609 bundle2.getrepocaps(self, role=b'client')
1610 )
1610 )
1611 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1611 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1612 if self.ui.configbool(b'experimental', b'narrow'):
1612 if self.ui.configbool(b'experimental', b'narrow'):
1613 caps.add(wireprototypes.NARROWCAP)
1613 caps.add(wireprototypes.NARROWCAP)
1614 return caps
1614 return caps
1615
1615
1616 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1616 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1617 # self -> auditor -> self._checknested -> self
1617 # self -> auditor -> self._checknested -> self
1618
1618
1619 @property
1619 @property
1620 def auditor(self):
1620 def auditor(self):
1621 # This is only used by context.workingctx.match in order to
1621 # This is only used by context.workingctx.match in order to
1622 # detect files in subrepos.
1622 # detect files in subrepos.
1623 return pathutil.pathauditor(self.root, callback=self._checknested)
1623 return pathutil.pathauditor(self.root, callback=self._checknested)
1624
1624
1625 @property
1625 @property
1626 def nofsauditor(self):
1626 def nofsauditor(self):
1627 # This is only used by context.basectx.match in order to detect
1627 # This is only used by context.basectx.match in order to detect
1628 # files in subrepos.
1628 # files in subrepos.
1629 return pathutil.pathauditor(
1629 return pathutil.pathauditor(
1630 self.root, callback=self._checknested, realfs=False, cached=True
1630 self.root, callback=self._checknested, realfs=False, cached=True
1631 )
1631 )
1632
1632
1633 def _checknested(self, path):
1633 def _checknested(self, path):
1634 """Determine if path is a legal nested repository."""
1634 """Determine if path is a legal nested repository."""
1635 if not path.startswith(self.root):
1635 if not path.startswith(self.root):
1636 return False
1636 return False
1637 subpath = path[len(self.root) + 1 :]
1637 subpath = path[len(self.root) + 1 :]
1638 normsubpath = util.pconvert(subpath)
1638 normsubpath = util.pconvert(subpath)
1639
1639
1640 # XXX: Checking against the current working copy is wrong in
1640 # XXX: Checking against the current working copy is wrong in
1641 # the sense that it can reject things like
1641 # the sense that it can reject things like
1642 #
1642 #
1643 # $ hg cat -r 10 sub/x.txt
1643 # $ hg cat -r 10 sub/x.txt
1644 #
1644 #
1645 # if sub/ is no longer a subrepository in the working copy
1645 # if sub/ is no longer a subrepository in the working copy
1646 # parent revision.
1646 # parent revision.
1647 #
1647 #
1648 # However, it can of course also allow things that would have
1648 # However, it can of course also allow things that would have
1649 # been rejected before, such as the above cat command if sub/
1649 # been rejected before, such as the above cat command if sub/
1650 # is a subrepository now, but was a normal directory before.
1650 # is a subrepository now, but was a normal directory before.
1651 # The old path auditor would have rejected by mistake since it
1651 # The old path auditor would have rejected by mistake since it
1652 # panics when it sees sub/.hg/.
1652 # panics when it sees sub/.hg/.
1653 #
1653 #
1654 # All in all, checking against the working copy seems sensible
1654 # All in all, checking against the working copy seems sensible
1655 # since we want to prevent access to nested repositories on
1655 # since we want to prevent access to nested repositories on
1656 # the filesystem *now*.
1656 # the filesystem *now*.
1657 ctx = self[None]
1657 ctx = self[None]
1658 parts = util.splitpath(subpath)
1658 parts = util.splitpath(subpath)
1659 while parts:
1659 while parts:
1660 prefix = b'/'.join(parts)
1660 prefix = b'/'.join(parts)
1661 if prefix in ctx.substate:
1661 if prefix in ctx.substate:
1662 if prefix == normsubpath:
1662 if prefix == normsubpath:
1663 return True
1663 return True
1664 else:
1664 else:
1665 sub = ctx.sub(prefix)
1665 sub = ctx.sub(prefix)
1666 return sub.checknested(subpath[len(prefix) + 1 :])
1666 return sub.checknested(subpath[len(prefix) + 1 :])
1667 else:
1667 else:
1668 parts.pop()
1668 parts.pop()
1669 return False
1669 return False
1670
1670
1671 def peer(self, path=None, remotehidden=False):
1671 def peer(self, path=None, remotehidden=False):
1672 return localpeer(
1672 return localpeer(
1673 self, path=path, remotehidden=remotehidden
1673 self, path=path, remotehidden=remotehidden
1674 ) # not cached to avoid reference cycle
1674 ) # not cached to avoid reference cycle
1675
1675
1676 def unfiltered(self):
1676 def unfiltered(self):
1677 """Return unfiltered version of the repository
1677 """Return unfiltered version of the repository
1678
1678
1679 Intended to be overwritten by filtered repo."""
1679 Intended to be overwritten by filtered repo."""
1680 return self
1680 return self
1681
1681
1682 def filtered(self, name, visibilityexceptions=None):
1682 def filtered(self, name, visibilityexceptions=None):
1683 """Return a filtered version of a repository
1683 """Return a filtered version of a repository
1684
1684
1685 The `name` parameter is the identifier of the requested view. This
1685 The `name` parameter is the identifier of the requested view. This
1686 will return a repoview object set "exactly" to the specified view.
1686 will return a repoview object set "exactly" to the specified view.
1687
1687
1688 This function does not apply recursive filtering to a repository. For
1688 This function does not apply recursive filtering to a repository. For
1689 example calling `repo.filtered("served")` will return a repoview using
1689 example calling `repo.filtered("served")` will return a repoview using
1690 the "served" view, regardless of the initial view used by `repo`.
1690 the "served" view, regardless of the initial view used by `repo`.
1691
1691
1692 In other word, there is always only one level of `repoview` "filtering".
1692 In other word, there is always only one level of `repoview` "filtering".
1693 """
1693 """
1694 if self._extrafilterid is not None and b'%' not in name:
1694 if self._extrafilterid is not None and b'%' not in name:
1695 name = name + b'%' + self._extrafilterid
1695 name = name + b'%' + self._extrafilterid
1696
1696
1697 cls = repoview.newtype(self.unfiltered().__class__)
1697 cls = repoview.newtype(self.unfiltered().__class__)
1698 return cls(self, name, visibilityexceptions)
1698 return cls(self, name, visibilityexceptions)
1699
1699
1700 @mixedrepostorecache(
1700 @mixedrepostorecache(
1701 (b'bookmarks', b'plain'),
1701 (b'bookmarks', b'plain'),
1702 (b'bookmarks.current', b'plain'),
1702 (b'bookmarks.current', b'plain'),
1703 (b'bookmarks', b''),
1703 (b'bookmarks', b''),
1704 (b'00changelog.i', b''),
1704 (b'00changelog.i', b''),
1705 )
1705 )
1706 def _bookmarks(self):
1706 def _bookmarks(self):
1707 # Since the multiple files involved in the transaction cannot be
1707 # Since the multiple files involved in the transaction cannot be
1708 # written atomically (with current repository format), there is a race
1708 # written atomically (with current repository format), there is a race
1709 # condition here.
1709 # condition here.
1710 #
1710 #
1711 # 1) changelog content A is read
1711 # 1) changelog content A is read
1712 # 2) outside transaction update changelog to content B
1712 # 2) outside transaction update changelog to content B
1713 # 3) outside transaction update bookmark file referring to content B
1713 # 3) outside transaction update bookmark file referring to content B
1714 # 4) bookmarks file content is read and filtered against changelog-A
1714 # 4) bookmarks file content is read and filtered against changelog-A
1715 #
1715 #
1716 # When this happens, bookmarks against nodes missing from A are dropped.
1716 # When this happens, bookmarks against nodes missing from A are dropped.
1717 #
1717 #
1718 # Having this happening during read is not great, but it become worse
1718 # Having this happening during read is not great, but it become worse
1719 # when this happen during write because the bookmarks to the "unknown"
1719 # when this happen during write because the bookmarks to the "unknown"
1720 # nodes will be dropped for good. However, writes happen within locks.
1720 # nodes will be dropped for good. However, writes happen within locks.
1721 # This locking makes it possible to have a race free consistent read.
1721 # This locking makes it possible to have a race free consistent read.
1722 # For this purpose data read from disc before locking are
1722 # For this purpose data read from disc before locking are
1723 # "invalidated" right after the locks are taken. This invalidations are
1723 # "invalidated" right after the locks are taken. This invalidations are
1724 # "light", the `filecache` mechanism keep the data in memory and will
1724 # "light", the `filecache` mechanism keep the data in memory and will
1725 # reuse them if the underlying files did not changed. Not parsing the
1725 # reuse them if the underlying files did not changed. Not parsing the
1726 # same data multiple times helps performances.
1726 # same data multiple times helps performances.
1727 #
1727 #
1728 # Unfortunately in the case describe above, the files tracked by the
1728 # Unfortunately in the case describe above, the files tracked by the
1729 # bookmarks file cache might not have changed, but the in-memory
1729 # bookmarks file cache might not have changed, but the in-memory
1730 # content is still "wrong" because we used an older changelog content
1730 # content is still "wrong" because we used an older changelog content
1731 # to process the on-disk data. So after locking, the changelog would be
1731 # to process the on-disk data. So after locking, the changelog would be
1732 # refreshed but `_bookmarks` would be preserved.
1732 # refreshed but `_bookmarks` would be preserved.
1733 # Adding `00changelog.i` to the list of tracked file is not
1733 # Adding `00changelog.i` to the list of tracked file is not
1734 # enough, because at the time we build the content for `_bookmarks` in
1734 # enough, because at the time we build the content for `_bookmarks` in
1735 # (4), the changelog file has already diverged from the content used
1735 # (4), the changelog file has already diverged from the content used
1736 # for loading `changelog` in (1)
1736 # for loading `changelog` in (1)
1737 #
1737 #
1738 # To prevent the issue, we force the changelog to be explicitly
1738 # To prevent the issue, we force the changelog to be explicitly
1739 # reloaded while computing `_bookmarks`. The data race can still happen
1739 # reloaded while computing `_bookmarks`. The data race can still happen
1740 # without the lock (with a narrower window), but it would no longer go
1740 # without the lock (with a narrower window), but it would no longer go
1741 # undetected during the lock time refresh.
1741 # undetected during the lock time refresh.
1742 #
1742 #
1743 # The new schedule is as follow
1743 # The new schedule is as follow
1744 #
1744 #
1745 # 1) filecache logic detect that `_bookmarks` needs to be computed
1745 # 1) filecache logic detect that `_bookmarks` needs to be computed
1746 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1746 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1747 # 3) We force `changelog` filecache to be tested
1747 # 3) We force `changelog` filecache to be tested
1748 # 4) cachestat for `changelog` are captured (for changelog)
1748 # 4) cachestat for `changelog` are captured (for changelog)
1749 # 5) `_bookmarks` is computed and cached
1749 # 5) `_bookmarks` is computed and cached
1750 #
1750 #
1751 # The step in (3) ensure we have a changelog at least as recent as the
1751 # The step in (3) ensure we have a changelog at least as recent as the
1752 # cache stat computed in (1). As a result at locking time:
1752 # cache stat computed in (1). As a result at locking time:
1753 # * if the changelog did not changed since (1) -> we can reuse the data
1753 # * if the changelog did not changed since (1) -> we can reuse the data
1754 # * otherwise -> the bookmarks get refreshed.
1754 # * otherwise -> the bookmarks get refreshed.
1755 self._refreshchangelog()
1755 self._refreshchangelog()
1756 return bookmarks.bmstore(self)
1756 return bookmarks.bmstore(self)
1757
1757
1758 def _refreshchangelog(self):
1758 def _refreshchangelog(self):
1759 """make sure the in memory changelog match the on-disk one"""
1759 """make sure the in memory changelog match the on-disk one"""
1760 if 'changelog' in vars(self) and self.currenttransaction() is None:
1760 if 'changelog' in vars(self) and self.currenttransaction() is None:
1761 del self.changelog
1761 del self.changelog
1762
1762
1763 @property
1763 @property
1764 def _activebookmark(self):
1764 def _activebookmark(self):
1765 return self._bookmarks.active
1765 return self._bookmarks.active
1766
1766
1767 # _phasesets depend on changelog. what we need is to call
1767 # _phasesets depend on changelog. what we need is to call
1768 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1768 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1769 # can't be easily expressed in filecache mechanism.
1769 # can't be easily expressed in filecache mechanism.
1770 @storecache(b'phaseroots', b'00changelog.i')
1770 @storecache(b'phaseroots', b'00changelog.i')
1771 def _phasecache(self):
1771 def _phasecache(self):
1772 return phases.phasecache(self, self._phasedefaults)
1772 return phases.phasecache(self, self._phasedefaults)
1773
1773
1774 @storecache(b'obsstore')
1774 @storecache(b'obsstore')
1775 def obsstore(self):
1775 def obsstore(self):
1776 return obsolete.makestore(self.ui, self)
1776 return obsolete.makestore(self.ui, self)
1777
1777
1778 @changelogcache()
1778 @changelogcache()
1779 def changelog(repo):
1779 def changelog(repo):
1780 # load dirstate before changelog to avoid race see issue6303
1780 # load dirstate before changelog to avoid race see issue6303
1781 repo.dirstate.prefetch_parents()
1781 repo.dirstate.prefetch_parents()
1782 return repo.store.changelog(
1782 return repo.store.changelog(
1783 txnutil.mayhavepending(repo.root),
1783 txnutil.mayhavepending(repo.root),
1784 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1784 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1785 )
1785 )
1786
1786
1787 @manifestlogcache()
1787 @manifestlogcache()
1788 def manifestlog(self):
1788 def manifestlog(self):
1789 return self.store.manifestlog(self, self._storenarrowmatch)
1789 return self.store.manifestlog(self, self._storenarrowmatch)
1790
1790
1791 @unfilteredpropertycache
1791 @unfilteredpropertycache
1792 def dirstate(self):
1792 def dirstate(self):
1793 if self._dirstate is None:
1793 if self._dirstate is None:
1794 self._dirstate = self._makedirstate()
1794 self._dirstate = self._makedirstate()
1795 else:
1795 else:
1796 self._dirstate.refresh()
1796 self._dirstate.refresh()
1797 return self._dirstate
1797 return self._dirstate
1798
1798
1799 def _makedirstate(self):
1799 def _makedirstate(self):
1800 """Extension point for wrapping the dirstate per-repo."""
1800 """Extension point for wrapping the dirstate per-repo."""
1801 sparsematchfn = None
1801 sparsematchfn = None
1802 if sparse.use_sparse(self):
1802 if sparse.use_sparse(self):
1803 sparsematchfn = lambda: sparse.matcher(self)
1803 sparsematchfn = lambda: sparse.matcher(self)
1804 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1804 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1805 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1805 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1806 use_dirstate_v2 = v2_req in self.requirements
1806 use_dirstate_v2 = v2_req in self.requirements
1807 use_tracked_hint = th in self.requirements
1807 use_tracked_hint = th in self.requirements
1808
1808
1809 return dirstate.dirstate(
1809 return dirstate.dirstate(
1810 self.vfs,
1810 self.vfs,
1811 self.ui,
1811 self.ui,
1812 self.root,
1812 self.root,
1813 self._dirstatevalidate,
1813 self._dirstatevalidate,
1814 sparsematchfn,
1814 sparsematchfn,
1815 self.nodeconstants,
1815 self.nodeconstants,
1816 use_dirstate_v2,
1816 use_dirstate_v2,
1817 use_tracked_hint=use_tracked_hint,
1817 use_tracked_hint=use_tracked_hint,
1818 )
1818 )
1819
1819
1820 def _dirstatevalidate(self, node):
1820 def _dirstatevalidate(self, node):
1821 okay = True
1821 okay = True
1822 try:
1822 try:
1823 self.changelog.rev(node)
1823 self.changelog.rev(node)
1824 except error.LookupError:
1824 except error.LookupError:
1825 # If the parent are unknown it might just be because the changelog
1825 # If the parent are unknown it might just be because the changelog
1826 # in memory is lagging behind the dirstate in memory. So try to
1826 # in memory is lagging behind the dirstate in memory. So try to
1827 # refresh the changelog first.
1827 # refresh the changelog first.
1828 #
1828 #
1829 # We only do so if we don't hold the lock, if we do hold the lock
1829 # We only do so if we don't hold the lock, if we do hold the lock
1830 # the invalidation at that time should have taken care of this and
1830 # the invalidation at that time should have taken care of this and
1831 # something is very fishy.
1831 # something is very fishy.
1832 if self.currentlock() is None:
1832 if self.currentlock() is None:
1833 self.invalidate()
1833 self.invalidate()
1834 try:
1834 try:
1835 self.changelog.rev(node)
1835 self.changelog.rev(node)
1836 except error.LookupError:
1836 except error.LookupError:
1837 okay = False
1837 okay = False
1838 else:
1838 else:
1839 # XXX we should consider raising an error here.
1839 # XXX we should consider raising an error here.
1840 okay = False
1840 okay = False
1841 if okay:
1841 if okay:
1842 return node
1842 return node
1843 else:
1843 else:
1844 if not self._dirstatevalidatewarned:
1844 if not self._dirstatevalidatewarned:
1845 self._dirstatevalidatewarned = True
1845 self._dirstatevalidatewarned = True
1846 self.ui.warn(
1846 self.ui.warn(
1847 _(b"warning: ignoring unknown working parent %s!\n")
1847 _(b"warning: ignoring unknown working parent %s!\n")
1848 % short(node)
1848 % short(node)
1849 )
1849 )
1850 return self.nullid
1850 return self.nullid
1851
1851
1852 @storecache(narrowspec.FILENAME)
1852 @storecache(narrowspec.FILENAME)
1853 def narrowpats(self):
1853 def narrowpats(self):
1854 """matcher patterns for this repository's narrowspec
1854 """matcher patterns for this repository's narrowspec
1855
1855
1856 A tuple of (includes, excludes).
1856 A tuple of (includes, excludes).
1857 """
1857 """
1858 # the narrow management should probably move into its own object
1858 # the narrow management should probably move into its own object
1859 val = self._pending_narrow_pats
1859 val = self._pending_narrow_pats
1860 if val is None:
1860 if val is None:
1861 val = narrowspec.load(self)
1861 val = narrowspec.load(self)
1862 return val
1862 return val
1863
1863
1864 @storecache(narrowspec.FILENAME)
1864 @storecache(narrowspec.FILENAME)
1865 def _storenarrowmatch(self):
1865 def _storenarrowmatch(self):
1866 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1866 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1867 return matchmod.always()
1867 return matchmod.always()
1868 include, exclude = self.narrowpats
1868 include, exclude = self.narrowpats
1869 return narrowspec.match(self.root, include=include, exclude=exclude)
1869 return narrowspec.match(self.root, include=include, exclude=exclude)
1870
1870
1871 @storecache(narrowspec.FILENAME)
1871 @storecache(narrowspec.FILENAME)
1872 def _narrowmatch(self):
1872 def _narrowmatch(self):
1873 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1873 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1874 return matchmod.always()
1874 return matchmod.always()
1875 narrowspec.checkworkingcopynarrowspec(self)
1875 narrowspec.checkworkingcopynarrowspec(self)
1876 include, exclude = self.narrowpats
1876 include, exclude = self.narrowpats
1877 return narrowspec.match(self.root, include=include, exclude=exclude)
1877 return narrowspec.match(self.root, include=include, exclude=exclude)
1878
1878
1879 def narrowmatch(self, match=None, includeexact=False):
1879 def narrowmatch(self, match=None, includeexact=False):
1880 """matcher corresponding the the repo's narrowspec
1880 """matcher corresponding the the repo's narrowspec
1881
1881
1882 If `match` is given, then that will be intersected with the narrow
1882 If `match` is given, then that will be intersected with the narrow
1883 matcher.
1883 matcher.
1884
1884
1885 If `includeexact` is True, then any exact matches from `match` will
1885 If `includeexact` is True, then any exact matches from `match` will
1886 be included even if they're outside the narrowspec.
1886 be included even if they're outside the narrowspec.
1887 """
1887 """
1888 if match:
1888 if match:
1889 if includeexact and not self._narrowmatch.always():
1889 if includeexact and not self._narrowmatch.always():
1890 # do not exclude explicitly-specified paths so that they can
1890 # do not exclude explicitly-specified paths so that they can
1891 # be warned later on
1891 # be warned later on
1892 em = matchmod.exact(match.files())
1892 em = matchmod.exact(match.files())
1893 nm = matchmod.unionmatcher([self._narrowmatch, em])
1893 nm = matchmod.unionmatcher([self._narrowmatch, em])
1894 return matchmod.intersectmatchers(match, nm)
1894 return matchmod.intersectmatchers(match, nm)
1895 return matchmod.intersectmatchers(match, self._narrowmatch)
1895 return matchmod.intersectmatchers(match, self._narrowmatch)
1896 return self._narrowmatch
1896 return self._narrowmatch
1897
1897
1898 def setnarrowpats(self, newincludes, newexcludes):
1898 def setnarrowpats(self, newincludes, newexcludes):
1899 narrowspec.save(self, newincludes, newexcludes)
1899 narrowspec.save(self, newincludes, newexcludes)
1900 self.invalidate(clearfilecache=True)
1900 self.invalidate(clearfilecache=True)
1901
1901
1902 @unfilteredpropertycache
1902 @unfilteredpropertycache
1903 def _quick_access_changeid_null(self):
1903 def _quick_access_changeid_null(self):
1904 return {
1904 return {
1905 b'null': (nullrev, self.nodeconstants.nullid),
1905 b'null': (nullrev, self.nodeconstants.nullid),
1906 nullrev: (nullrev, self.nodeconstants.nullid),
1906 nullrev: (nullrev, self.nodeconstants.nullid),
1907 self.nullid: (nullrev, self.nullid),
1907 self.nullid: (nullrev, self.nullid),
1908 }
1908 }
1909
1909
1910 @unfilteredpropertycache
1910 @unfilteredpropertycache
1911 def _quick_access_changeid_wc(self):
1911 def _quick_access_changeid_wc(self):
1912 # also fast path access to the working copy parents
1912 # also fast path access to the working copy parents
1913 # however, only do it for filter that ensure wc is visible.
1913 # however, only do it for filter that ensure wc is visible.
1914 quick = self._quick_access_changeid_null.copy()
1914 quick = self._quick_access_changeid_null.copy()
1915 cl = self.unfiltered().changelog
1915 cl = self.unfiltered().changelog
1916 for node in self.dirstate.parents():
1916 for node in self.dirstate.parents():
1917 if node == self.nullid:
1917 if node == self.nullid:
1918 continue
1918 continue
1919 rev = cl.index.get_rev(node)
1919 rev = cl.index.get_rev(node)
1920 if rev is None:
1920 if rev is None:
1921 # unknown working copy parent case:
1921 # unknown working copy parent case:
1922 #
1922 #
1923 # skip the fast path and let higher code deal with it
1923 # skip the fast path and let higher code deal with it
1924 continue
1924 continue
1925 pair = (rev, node)
1925 pair = (rev, node)
1926 quick[rev] = pair
1926 quick[rev] = pair
1927 quick[node] = pair
1927 quick[node] = pair
1928 # also add the parents of the parents
1928 # also add the parents of the parents
1929 for r in cl.parentrevs(rev):
1929 for r in cl.parentrevs(rev):
1930 if r == nullrev:
1930 if r == nullrev:
1931 continue
1931 continue
1932 n = cl.node(r)
1932 n = cl.node(r)
1933 pair = (r, n)
1933 pair = (r, n)
1934 quick[r] = pair
1934 quick[r] = pair
1935 quick[n] = pair
1935 quick[n] = pair
1936 p1node = self.dirstate.p1()
1936 p1node = self.dirstate.p1()
1937 if p1node != self.nullid:
1937 if p1node != self.nullid:
1938 quick[b'.'] = quick[p1node]
1938 quick[b'.'] = quick[p1node]
1939 return quick
1939 return quick
1940
1940
1941 @unfilteredmethod
1941 @unfilteredmethod
1942 def _quick_access_changeid_invalidate(self):
1942 def _quick_access_changeid_invalidate(self):
1943 if '_quick_access_changeid_wc' in vars(self):
1943 if '_quick_access_changeid_wc' in vars(self):
1944 del self.__dict__['_quick_access_changeid_wc']
1944 del self.__dict__['_quick_access_changeid_wc']
1945
1945
1946 @property
1946 @property
1947 def _quick_access_changeid(self):
1947 def _quick_access_changeid(self):
1948 """an helper dictionnary for __getitem__ calls
1948 """an helper dictionnary for __getitem__ calls
1949
1949
1950 This contains a list of symbol we can recognise right away without
1950 This contains a list of symbol we can recognise right away without
1951 further processing.
1951 further processing.
1952 """
1952 """
1953 if self.filtername in repoview.filter_has_wc:
1953 if self.filtername in repoview.filter_has_wc:
1954 return self._quick_access_changeid_wc
1954 return self._quick_access_changeid_wc
1955 return self._quick_access_changeid_null
1955 return self._quick_access_changeid_null
1956
1956
1957 def __getitem__(self, changeid):
1957 def __getitem__(self, changeid):
1958 # dealing with special cases
1958 # dealing with special cases
1959 if changeid is None:
1959 if changeid is None:
1960 return context.workingctx(self)
1960 return context.workingctx(self)
1961 if isinstance(changeid, context.basectx):
1961 if isinstance(changeid, context.basectx):
1962 return changeid
1962 return changeid
1963
1963
1964 # dealing with multiple revisions
1964 # dealing with multiple revisions
1965 if isinstance(changeid, slice):
1965 if isinstance(changeid, slice):
1966 # wdirrev isn't contiguous so the slice shouldn't include it
1966 # wdirrev isn't contiguous so the slice shouldn't include it
1967 return [
1967 return [
1968 self[i]
1968 self[i]
1969 for i in range(*changeid.indices(len(self)))
1969 for i in range(*changeid.indices(len(self)))
1970 if i not in self.changelog.filteredrevs
1970 if i not in self.changelog.filteredrevs
1971 ]
1971 ]
1972
1972
1973 # dealing with some special values
1973 # dealing with some special values
1974 quick_access = self._quick_access_changeid.get(changeid)
1974 quick_access = self._quick_access_changeid.get(changeid)
1975 if quick_access is not None:
1975 if quick_access is not None:
1976 rev, node = quick_access
1976 rev, node = quick_access
1977 return context.changectx(self, rev, node, maybe_filtered=False)
1977 return context.changectx(self, rev, node, maybe_filtered=False)
1978 if changeid == b'tip':
1978 if changeid == b'tip':
1979 node = self.changelog.tip()
1979 node = self.changelog.tip()
1980 rev = self.changelog.rev(node)
1980 rev = self.changelog.rev(node)
1981 return context.changectx(self, rev, node)
1981 return context.changectx(self, rev, node)
1982
1982
1983 # dealing with arbitrary values
1983 # dealing with arbitrary values
1984 try:
1984 try:
1985 if isinstance(changeid, int):
1985 if isinstance(changeid, int):
1986 node = self.changelog.node(changeid)
1986 node = self.changelog.node(changeid)
1987 rev = changeid
1987 rev = changeid
1988 elif changeid == b'.':
1988 elif changeid == b'.':
1989 # this is a hack to delay/avoid loading obsmarkers
1989 # this is a hack to delay/avoid loading obsmarkers
1990 # when we know that '.' won't be hidden
1990 # when we know that '.' won't be hidden
1991 node = self.dirstate.p1()
1991 node = self.dirstate.p1()
1992 rev = self.unfiltered().changelog.rev(node)
1992 rev = self.unfiltered().changelog.rev(node)
1993 elif len(changeid) == self.nodeconstants.nodelen:
1993 elif len(changeid) == self.nodeconstants.nodelen:
1994 try:
1994 try:
1995 node = changeid
1995 node = changeid
1996 rev = self.changelog.rev(changeid)
1996 rev = self.changelog.rev(changeid)
1997 except error.FilteredLookupError:
1997 except error.FilteredLookupError:
1998 changeid = hex(changeid) # for the error message
1998 changeid = hex(changeid) # for the error message
1999 raise
1999 raise
2000 except LookupError:
2000 except LookupError:
2001 # check if it might have come from damaged dirstate
2001 # check if it might have come from damaged dirstate
2002 #
2002 #
2003 # XXX we could avoid the unfiltered if we had a recognizable
2003 # XXX we could avoid the unfiltered if we had a recognizable
2004 # exception for filtered changeset access
2004 # exception for filtered changeset access
2005 if (
2005 if (
2006 self.local()
2006 self.local()
2007 and changeid in self.unfiltered().dirstate.parents()
2007 and changeid in self.unfiltered().dirstate.parents()
2008 ):
2008 ):
2009 msg = _(b"working directory has unknown parent '%s'!")
2009 msg = _(b"working directory has unknown parent '%s'!")
2010 raise error.Abort(msg % short(changeid))
2010 raise error.Abort(msg % short(changeid))
2011 changeid = hex(changeid) # for the error message
2011 changeid = hex(changeid) # for the error message
2012 raise
2012 raise
2013
2013
2014 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2014 elif len(changeid) == 2 * self.nodeconstants.nodelen:
2015 node = bin(changeid)
2015 node = bin(changeid)
2016 rev = self.changelog.rev(node)
2016 rev = self.changelog.rev(node)
2017 else:
2017 else:
2018 raise error.ProgrammingError(
2018 raise error.ProgrammingError(
2019 b"unsupported changeid '%s' of type %s"
2019 b"unsupported changeid '%s' of type %s"
2020 % (changeid, pycompat.bytestr(type(changeid)))
2020 % (changeid, pycompat.bytestr(type(changeid)))
2021 )
2021 )
2022
2022
2023 return context.changectx(self, rev, node)
2023 return context.changectx(self, rev, node)
2024
2024
2025 except (error.FilteredIndexError, error.FilteredLookupError):
2025 except (error.FilteredIndexError, error.FilteredLookupError):
2026 raise error.FilteredRepoLookupError(
2026 raise error.FilteredRepoLookupError(
2027 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2027 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
2028 )
2028 )
2029 except (IndexError, LookupError):
2029 except (IndexError, LookupError):
2030 raise error.RepoLookupError(
2030 raise error.RepoLookupError(
2031 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2031 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
2032 )
2032 )
2033 except error.WdirUnsupported:
2033 except error.WdirUnsupported:
2034 return context.workingctx(self)
2034 return context.workingctx(self)
2035
2035
2036 def __contains__(self, changeid):
2036 def __contains__(self, changeid):
2037 """True if the given changeid exists"""
2037 """True if the given changeid exists"""
2038 try:
2038 try:
2039 self[changeid]
2039 self[changeid]
2040 return True
2040 return True
2041 except error.RepoLookupError:
2041 except error.RepoLookupError:
2042 return False
2042 return False
2043
2043
2044 def __nonzero__(self):
2044 def __nonzero__(self):
2045 return True
2045 return True
2046
2046
2047 __bool__ = __nonzero__
2047 __bool__ = __nonzero__
2048
2048
2049 def __len__(self):
2049 def __len__(self):
2050 # no need to pay the cost of repoview.changelog
2050 # no need to pay the cost of repoview.changelog
2051 unfi = self.unfiltered()
2051 unfi = self.unfiltered()
2052 return len(unfi.changelog)
2052 return len(unfi.changelog)
2053
2053
2054 def __iter__(self):
2054 def __iter__(self):
2055 return iter(self.changelog)
2055 return iter(self.changelog)
2056
2056
2057 def revs(self, expr: bytes, *args):
2057 def revs(self, expr: bytes, *args):
2058 """Find revisions matching a revset.
2058 """Find revisions matching a revset.
2059
2059
2060 The revset is specified as a string ``expr`` that may contain
2060 The revset is specified as a string ``expr`` that may contain
2061 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2061 %-formatting to escape certain types. See ``revsetlang.formatspec``.
2062
2062
2063 Revset aliases from the configuration are not expanded. To expand
2063 Revset aliases from the configuration are not expanded. To expand
2064 user aliases, consider calling ``scmutil.revrange()`` or
2064 user aliases, consider calling ``scmutil.revrange()`` or
2065 ``repo.anyrevs([expr], user=True)``.
2065 ``repo.anyrevs([expr], user=True)``.
2066
2066
2067 Returns a smartset.abstractsmartset, which is a list-like interface
2067 Returns a smartset.abstractsmartset, which is a list-like interface
2068 that contains integer revisions.
2068 that contains integer revisions.
2069 """
2069 """
2070 tree = revsetlang.spectree(expr, *args)
2070 tree = revsetlang.spectree(expr, *args)
2071 return revset.makematcher(tree)(self)
2071 return revset.makematcher(tree)(self)
2072
2072
2073 def set(self, expr: bytes, *args):
2073 def set(self, expr: bytes, *args):
2074 """Find revisions matching a revset and emit changectx instances.
2074 """Find revisions matching a revset and emit changectx instances.
2075
2075
2076 This is a convenience wrapper around ``revs()`` that iterates the
2076 This is a convenience wrapper around ``revs()`` that iterates the
2077 result and is a generator of changectx instances.
2077 result and is a generator of changectx instances.
2078
2078
2079 Revset aliases from the configuration are not expanded. To expand
2079 Revset aliases from the configuration are not expanded. To expand
2080 user aliases, consider calling ``scmutil.revrange()``.
2080 user aliases, consider calling ``scmutil.revrange()``.
2081 """
2081 """
2082 for r in self.revs(expr, *args):
2082 for r in self.revs(expr, *args):
2083 yield self[r]
2083 yield self[r]
2084
2084
2085 def anyrevs(self, specs: bytes, user=False, localalias=None):
2085 def anyrevs(self, specs: bytes, user=False, localalias=None):
2086 """Find revisions matching one of the given revsets.
2086 """Find revisions matching one of the given revsets.
2087
2087
2088 Revset aliases from the configuration are not expanded by default. To
2088 Revset aliases from the configuration are not expanded by default. To
2089 expand user aliases, specify ``user=True``. To provide some local
2089 expand user aliases, specify ``user=True``. To provide some local
2090 definitions overriding user aliases, set ``localalias`` to
2090 definitions overriding user aliases, set ``localalias`` to
2091 ``{name: definitionstring}``.
2091 ``{name: definitionstring}``.
2092 """
2092 """
2093 if specs == [b'null']:
2093 if specs == [b'null']:
2094 return revset.baseset([nullrev])
2094 return revset.baseset([nullrev])
2095 if specs == [b'.']:
2095 if specs == [b'.']:
2096 quick_data = self._quick_access_changeid.get(b'.')
2096 quick_data = self._quick_access_changeid.get(b'.')
2097 if quick_data is not None:
2097 if quick_data is not None:
2098 return revset.baseset([quick_data[0]])
2098 return revset.baseset([quick_data[0]])
2099 if user:
2099 if user:
2100 m = revset.matchany(
2100 m = revset.matchany(
2101 self.ui,
2101 self.ui,
2102 specs,
2102 specs,
2103 lookup=revset.lookupfn(self),
2103 lookup=revset.lookupfn(self),
2104 localalias=localalias,
2104 localalias=localalias,
2105 )
2105 )
2106 else:
2106 else:
2107 m = revset.matchany(None, specs, localalias=localalias)
2107 m = revset.matchany(None, specs, localalias=localalias)
2108 return m(self)
2108 return m(self)
2109
2109
2110 def url(self) -> bytes:
2110 def url(self) -> bytes:
2111 return b'file:' + self.root
2111 return b'file:' + self.root
2112
2112
2113 def hook(self, name, throw=False, **args):
2113 def hook(self, name, throw=False, **args):
2114 """Call a hook, passing this repo instance.
2114 """Call a hook, passing this repo instance.
2115
2115
2116 This a convenience method to aid invoking hooks. Extensions likely
2116 This a convenience method to aid invoking hooks. Extensions likely
2117 won't call this unless they have registered a custom hook or are
2117 won't call this unless they have registered a custom hook or are
2118 replacing code that is expected to call a hook.
2118 replacing code that is expected to call a hook.
2119 """
2119 """
2120 return hook.hook(self.ui, self, name, throw, **args)
2120 return hook.hook(self.ui, self, name, throw, **args)
2121
2121
2122 @filteredpropertycache
2122 @filteredpropertycache
2123 def _tagscache(self):
2123 def _tagscache(self):
2124 """Returns a tagscache object that contains various tags related
2124 """Returns a tagscache object that contains various tags related
2125 caches."""
2125 caches."""
2126
2126
2127 # This simplifies its cache management by having one decorated
2127 # This simplifies its cache management by having one decorated
2128 # function (this one) and the rest simply fetch things from it.
2128 # function (this one) and the rest simply fetch things from it.
2129 class tagscache:
2129 class tagscache:
2130 def __init__(self):
2130 def __init__(self):
2131 # These two define the set of tags for this repository. tags
2131 # These two define the set of tags for this repository. tags
2132 # maps tag name to node; tagtypes maps tag name to 'global' or
2132 # maps tag name to node; tagtypes maps tag name to 'global' or
2133 # 'local'. (Global tags are defined by .hgtags across all
2133 # 'local'. (Global tags are defined by .hgtags across all
2134 # heads, and local tags are defined in .hg/localtags.)
2134 # heads, and local tags are defined in .hg/localtags.)
2135 # They constitute the in-memory cache of tags.
2135 # They constitute the in-memory cache of tags.
2136 self.tags = self.tagtypes = None
2136 self.tags = self.tagtypes = None
2137
2137
2138 self.nodetagscache = self.tagslist = None
2138 self.nodetagscache = self.tagslist = None
2139
2139
2140 cache = tagscache()
2140 cache = tagscache()
2141 cache.tags, cache.tagtypes = self._findtags()
2141 cache.tags, cache.tagtypes = self._findtags()
2142
2142
2143 return cache
2143 return cache
2144
2144
2145 def tags(self):
2145 def tags(self):
2146 '''return a mapping of tag to node'''
2146 '''return a mapping of tag to node'''
2147 t = {}
2147 t = {}
2148 if self.changelog.filteredrevs:
2148 if self.changelog.filteredrevs:
2149 tags, tt = self._findtags()
2149 tags, tt = self._findtags()
2150 else:
2150 else:
2151 tags = self._tagscache.tags
2151 tags = self._tagscache.tags
2152 rev = self.changelog.rev
2152 rev = self.changelog.rev
2153 for k, v in tags.items():
2153 for k, v in tags.items():
2154 try:
2154 try:
2155 # ignore tags to unknown nodes
2155 # ignore tags to unknown nodes
2156 rev(v)
2156 rev(v)
2157 t[k] = v
2157 t[k] = v
2158 except (error.LookupError, ValueError):
2158 except (error.LookupError, ValueError):
2159 pass
2159 pass
2160 return t
2160 return t
2161
2161
2162 def _findtags(self):
2162 def _findtags(self):
2163 """Do the hard work of finding tags. Return a pair of dicts
2163 """Do the hard work of finding tags. Return a pair of dicts
2164 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2164 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2165 maps tag name to a string like \'global\' or \'local\'.
2165 maps tag name to a string like \'global\' or \'local\'.
2166 Subclasses or extensions are free to add their own tags, but
2166 Subclasses or extensions are free to add their own tags, but
2167 should be aware that the returned dicts will be retained for the
2167 should be aware that the returned dicts will be retained for the
2168 duration of the localrepo object."""
2168 duration of the localrepo object."""
2169
2169
2170 # XXX what tagtype should subclasses/extensions use? Currently
2170 # XXX what tagtype should subclasses/extensions use? Currently
2171 # mq and bookmarks add tags, but do not set the tagtype at all.
2171 # mq and bookmarks add tags, but do not set the tagtype at all.
2172 # Should each extension invent its own tag type? Should there
2172 # Should each extension invent its own tag type? Should there
2173 # be one tagtype for all such "virtual" tags? Or is the status
2173 # be one tagtype for all such "virtual" tags? Or is the status
2174 # quo fine?
2174 # quo fine?
2175
2175
2176 # map tag name to (node, hist)
2176 # map tag name to (node, hist)
2177 alltags = tagsmod.findglobaltags(self.ui, self)
2177 alltags = tagsmod.findglobaltags(self.ui, self)
2178 # map tag name to tag type
2178 # map tag name to tag type
2179 tagtypes = {tag: b'global' for tag in alltags}
2179 tagtypes = {tag: b'global' for tag in alltags}
2180
2180
2181 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2181 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2182
2182
2183 # Build the return dicts. Have to re-encode tag names because
2183 # Build the return dicts. Have to re-encode tag names because
2184 # the tags module always uses UTF-8 (in order not to lose info
2184 # the tags module always uses UTF-8 (in order not to lose info
2185 # writing to the cache), but the rest of Mercurial wants them in
2185 # writing to the cache), but the rest of Mercurial wants them in
2186 # local encoding.
2186 # local encoding.
2187 tags = {}
2187 tags = {}
2188 for name, (node, hist) in alltags.items():
2188 for name, (node, hist) in alltags.items():
2189 if node != self.nullid:
2189 if node != self.nullid:
2190 tags[encoding.tolocal(name)] = node
2190 tags[encoding.tolocal(name)] = node
2191 tags[b'tip'] = self.changelog.tip()
2191 tags[b'tip'] = self.changelog.tip()
2192 tagtypes = {
2192 tagtypes = {
2193 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2193 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2194 }
2194 }
2195 return (tags, tagtypes)
2195 return (tags, tagtypes)
2196
2196
2197 def tagtype(self, tagname):
2197 def tagtype(self, tagname):
2198 """
2198 """
2199 return the type of the given tag. result can be:
2199 return the type of the given tag. result can be:
2200
2200
2201 'local' : a local tag
2201 'local' : a local tag
2202 'global' : a global tag
2202 'global' : a global tag
2203 None : tag does not exist
2203 None : tag does not exist
2204 """
2204 """
2205
2205
2206 return self._tagscache.tagtypes.get(tagname)
2206 return self._tagscache.tagtypes.get(tagname)
2207
2207
2208 def tagslist(self):
2208 def tagslist(self):
2209 '''return a list of tags ordered by revision'''
2209 '''return a list of tags ordered by revision'''
2210 if not self._tagscache.tagslist:
2210 if not self._tagscache.tagslist:
2211 l = []
2211 l = []
2212 for t, n in self.tags().items():
2212 for t, n in self.tags().items():
2213 l.append((self.changelog.rev(n), t, n))
2213 l.append((self.changelog.rev(n), t, n))
2214 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2214 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2215
2215
2216 return self._tagscache.tagslist
2216 return self._tagscache.tagslist
2217
2217
2218 def nodetags(self, node):
2218 def nodetags(self, node):
2219 '''return the tags associated with a node'''
2219 '''return the tags associated with a node'''
2220 if not self._tagscache.nodetagscache:
2220 if not self._tagscache.nodetagscache:
2221 nodetagscache = {}
2221 nodetagscache = {}
2222 for t, n in self._tagscache.tags.items():
2222 for t, n in self._tagscache.tags.items():
2223 nodetagscache.setdefault(n, []).append(t)
2223 nodetagscache.setdefault(n, []).append(t)
2224 for tags in nodetagscache.values():
2224 for tags in nodetagscache.values():
2225 tags.sort()
2225 tags.sort()
2226 self._tagscache.nodetagscache = nodetagscache
2226 self._tagscache.nodetagscache = nodetagscache
2227 return self._tagscache.nodetagscache.get(node, [])
2227 return self._tagscache.nodetagscache.get(node, [])
2228
2228
2229 def nodebookmarks(self, node):
2229 def nodebookmarks(self, node):
2230 """return the list of bookmarks pointing to the specified node"""
2230 """return the list of bookmarks pointing to the specified node"""
2231 return self._bookmarks.names(node)
2231 return self._bookmarks.names(node)
2232
2232
2233 def branchmap(self):
2233 def branchmap(self):
2234 """returns a dictionary {branch: [branchheads]} with branchheads
2234 """returns a dictionary {branch: [branchheads]} with branchheads
2235 ordered by increasing revision number"""
2235 ordered by increasing revision number"""
2236 return self._branchcaches[self]
2236 return self._branchcaches[self]
2237
2237
2238 @unfilteredmethod
2238 @unfilteredmethod
2239 def revbranchcache(self):
2239 def revbranchcache(self):
2240 if not self._revbranchcache:
2240 if not self._revbranchcache:
2241 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2241 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2242 return self._revbranchcache
2242 return self._revbranchcache
2243
2243
2244 def register_changeset(self, rev, changelogrevision):
2244 def register_changeset(self, rev, changelogrevision):
2245 self.revbranchcache().setdata(rev, changelogrevision)
2245 self.revbranchcache().setdata(rev, changelogrevision)
2246
2246
2247 def branchtip(self, branch, ignoremissing=False):
2247 def branchtip(self, branch, ignoremissing=False):
2248 """return the tip node for a given branch
2248 """return the tip node for a given branch
2249
2249
2250 If ignoremissing is True, then this method will not raise an error.
2250 If ignoremissing is True, then this method will not raise an error.
2251 This is helpful for callers that only expect None for a missing branch
2251 This is helpful for callers that only expect None for a missing branch
2252 (e.g. namespace).
2252 (e.g. namespace).
2253
2253
2254 """
2254 """
2255 try:
2255 try:
2256 return self.branchmap().branchtip(branch)
2256 return self.branchmap().branchtip(branch)
2257 except KeyError:
2257 except KeyError:
2258 if not ignoremissing:
2258 if not ignoremissing:
2259 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2259 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2260 else:
2260 else:
2261 pass
2261 pass
2262
2262
2263 def lookup(self, key):
2263 def lookup(self, key):
2264 node = scmutil.revsymbol(self, key).node()
2264 node = scmutil.revsymbol(self, key).node()
2265 if node is None:
2265 if node is None:
2266 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2266 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2267 return node
2267 return node
2268
2268
2269 def lookupbranch(self, key):
2269 def lookupbranch(self, key):
2270 if self.branchmap().hasbranch(key):
2270 if self.branchmap().hasbranch(key):
2271 return key
2271 return key
2272
2272
2273 return scmutil.revsymbol(self, key).branch()
2273 return scmutil.revsymbol(self, key).branch()
2274
2274
2275 def known(self, nodes):
2275 def known(self, nodes):
2276 cl = self.changelog
2276 cl = self.changelog
2277 get_rev = cl.index.get_rev
2277 get_rev = cl.index.get_rev
2278 filtered = cl.filteredrevs
2278 filtered = cl.filteredrevs
2279 result = []
2279 result = []
2280 for n in nodes:
2280 for n in nodes:
2281 r = get_rev(n)
2281 r = get_rev(n)
2282 resp = not (r is None or r in filtered)
2282 resp = not (r is None or r in filtered)
2283 result.append(resp)
2283 result.append(resp)
2284 return result
2284 return result
2285
2285
2286 def local(self):
2286 def local(self):
2287 return self
2287 return self
2288
2288
2289 def publishing(self):
2289 def publishing(self):
2290 # it's safe (and desirable) to trust the publish flag unconditionally
2290 # it's safe (and desirable) to trust the publish flag unconditionally
2291 # so that we don't finalize changes shared between users via ssh or nfs
2291 # so that we don't finalize changes shared between users via ssh or nfs
2292 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2292 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2293
2293
2294 def cancopy(self):
2294 def cancopy(self):
2295 # so statichttprepo's override of local() works
2295 # so statichttprepo's override of local() works
2296 if not self.local():
2296 if not self.local():
2297 return False
2297 return False
2298 if not self.publishing():
2298 if not self.publishing():
2299 return True
2299 return True
2300 # if publishing we can't copy if there is filtered content
2300 # if publishing we can't copy if there is filtered content
2301 return not self.filtered(b'visible').changelog.filteredrevs
2301 return not self.filtered(b'visible').changelog.filteredrevs
2302
2302
2303 def shared(self):
2303 def shared(self):
2304 '''the type of shared repository (None if not shared)'''
2304 '''the type of shared repository (None if not shared)'''
2305 if self.sharedpath != self.path:
2305 if self.sharedpath != self.path:
2306 return b'store'
2306 return b'store'
2307 return None
2307 return None
2308
2308
2309 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2309 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2310 return self.vfs.reljoin(self.root, f, *insidef)
2310 return self.vfs.reljoin(self.root, f, *insidef)
2311
2311
2312 def setparents(self, p1, p2=None):
2312 def setparents(self, p1, p2=None):
2313 if p2 is None:
2313 if p2 is None:
2314 p2 = self.nullid
2314 p2 = self.nullid
2315 self[None].setparents(p1, p2)
2315 self[None].setparents(p1, p2)
2316 self._quick_access_changeid_invalidate()
2316 self._quick_access_changeid_invalidate()
2317
2317
2318 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2318 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2319 """changeid must be a changeset revision, if specified.
2319 """changeid must be a changeset revision, if specified.
2320 fileid can be a file revision or node."""
2320 fileid can be a file revision or node."""
2321 return context.filectx(
2321 return context.filectx(
2322 self, path, changeid, fileid, changectx=changectx
2322 self, path, changeid, fileid, changectx=changectx
2323 )
2323 )
2324
2324
2325 def getcwd(self) -> bytes:
2325 def getcwd(self) -> bytes:
2326 return self.dirstate.getcwd()
2326 return self.dirstate.getcwd()
2327
2327
2328 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2328 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2329 return self.dirstate.pathto(f, cwd)
2329 return self.dirstate.pathto(f, cwd)
2330
2330
2331 def _loadfilter(self, filter):
2331 def _loadfilter(self, filter):
2332 if filter not in self._filterpats:
2332 if filter not in self._filterpats:
2333 l = []
2333 l = []
2334 for pat, cmd in self.ui.configitems(filter):
2334 for pat, cmd in self.ui.configitems(filter):
2335 if cmd == b'!':
2335 if cmd == b'!':
2336 continue
2336 continue
2337 mf = matchmod.match(self.root, b'', [pat])
2337 mf = matchmod.match(self.root, b'', [pat])
2338 fn = None
2338 fn = None
2339 params = cmd
2339 params = cmd
2340 for name, filterfn in self._datafilters.items():
2340 for name, filterfn in self._datafilters.items():
2341 if cmd.startswith(name):
2341 if cmd.startswith(name):
2342 fn = filterfn
2342 fn = filterfn
2343 params = cmd[len(name) :].lstrip()
2343 params = cmd[len(name) :].lstrip()
2344 break
2344 break
2345 if not fn:
2345 if not fn:
2346 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2346 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2347 fn.__name__ = 'commandfilter'
2347 fn.__name__ = 'commandfilter'
2348 # Wrap old filters not supporting keyword arguments
2348 # Wrap old filters not supporting keyword arguments
2349 if not pycompat.getargspec(fn)[2]:
2349 if not pycompat.getargspec(fn)[2]:
2350 oldfn = fn
2350 oldfn = fn
2351 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2351 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2352 fn.__name__ = 'compat-' + oldfn.__name__
2352 fn.__name__ = 'compat-' + oldfn.__name__
2353 l.append((mf, fn, params))
2353 l.append((mf, fn, params))
2354 self._filterpats[filter] = l
2354 self._filterpats[filter] = l
2355 return self._filterpats[filter]
2355 return self._filterpats[filter]
2356
2356
2357 def _filter(self, filterpats, filename, data):
2357 def _filter(self, filterpats, filename, data):
2358 for mf, fn, cmd in filterpats:
2358 for mf, fn, cmd in filterpats:
2359 if mf(filename):
2359 if mf(filename):
2360 self.ui.debug(
2360 self.ui.debug(
2361 b"filtering %s through %s\n"
2361 b"filtering %s through %s\n"
2362 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2362 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2363 )
2363 )
2364 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2364 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2365 break
2365 break
2366
2366
2367 return data
2367 return data
2368
2368
2369 @unfilteredpropertycache
2369 @unfilteredpropertycache
2370 def _encodefilterpats(self):
2370 def _encodefilterpats(self):
2371 return self._loadfilter(b'encode')
2371 return self._loadfilter(b'encode')
2372
2372
2373 @unfilteredpropertycache
2373 @unfilteredpropertycache
2374 def _decodefilterpats(self):
2374 def _decodefilterpats(self):
2375 return self._loadfilter(b'decode')
2375 return self._loadfilter(b'decode')
2376
2376
2377 def adddatafilter(self, name, filter):
2377 def adddatafilter(self, name, filter):
2378 self._datafilters[name] = filter
2378 self._datafilters[name] = filter
2379
2379
2380 def wread(self, filename: bytes) -> bytes:
2380 def wread(self, filename: bytes) -> bytes:
2381 if self.wvfs.islink(filename):
2381 if self.wvfs.islink(filename):
2382 data = self.wvfs.readlink(filename)
2382 data = self.wvfs.readlink(filename)
2383 else:
2383 else:
2384 data = self.wvfs.read(filename)
2384 data = self.wvfs.read(filename)
2385 return self._filter(self._encodefilterpats, filename, data)
2385 return self._filter(self._encodefilterpats, filename, data)
2386
2386
2387 def wwrite(
2387 def wwrite(
2388 self,
2388 self,
2389 filename: bytes,
2389 filename: bytes,
2390 data: bytes,
2390 data: bytes,
2391 flags: bytes,
2391 flags: bytes,
2392 backgroundclose=False,
2392 backgroundclose=False,
2393 **kwargs
2393 **kwargs
2394 ) -> int:
2394 ) -> int:
2395 """write ``data`` into ``filename`` in the working directory
2395 """write ``data`` into ``filename`` in the working directory
2396
2396
2397 This returns length of written (maybe decoded) data.
2397 This returns length of written (maybe decoded) data.
2398 """
2398 """
2399 data = self._filter(self._decodefilterpats, filename, data)
2399 data = self._filter(self._decodefilterpats, filename, data)
2400 if b'l' in flags:
2400 if b'l' in flags:
2401 self.wvfs.symlink(data, filename)
2401 self.wvfs.symlink(data, filename)
2402 else:
2402 else:
2403 self.wvfs.write(
2403 self.wvfs.write(
2404 filename, data, backgroundclose=backgroundclose, **kwargs
2404 filename, data, backgroundclose=backgroundclose, **kwargs
2405 )
2405 )
2406 if b'x' in flags:
2406 if b'x' in flags:
2407 self.wvfs.setflags(filename, False, True)
2407 self.wvfs.setflags(filename, False, True)
2408 else:
2408 else:
2409 self.wvfs.setflags(filename, False, False)
2409 self.wvfs.setflags(filename, False, False)
2410 return len(data)
2410 return len(data)
2411
2411
2412 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2412 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2413 return self._filter(self._decodefilterpats, filename, data)
2413 return self._filter(self._decodefilterpats, filename, data)
2414
2414
2415 def currenttransaction(self):
2415 def currenttransaction(self):
2416 """return the current transaction or None if non exists"""
2416 """return the current transaction or None if non exists"""
2417 if self._transref:
2417 if self._transref:
2418 tr = self._transref()
2418 tr = self._transref()
2419 else:
2419 else:
2420 tr = None
2420 tr = None
2421
2421
2422 if tr and tr.running():
2422 if tr and tr.running():
2423 return tr
2423 return tr
2424 return None
2424 return None
2425
2425
2426 def transaction(self, desc, report=None):
2426 def transaction(self, desc, report=None):
2427 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2427 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2428 b'devel', b'check-locks'
2428 b'devel', b'check-locks'
2429 ):
2429 ):
2430 if self._currentlock(self._lockref) is None:
2430 if self._currentlock(self._lockref) is None:
2431 raise error.ProgrammingError(b'transaction requires locking')
2431 raise error.ProgrammingError(b'transaction requires locking')
2432 tr = self.currenttransaction()
2432 tr = self.currenttransaction()
2433 if tr is not None:
2433 if tr is not None:
2434 return tr.nest(name=desc)
2434 return tr.nest(name=desc)
2435
2435
2436 # abort here if the journal already exists
2436 # abort here if the journal already exists
2437 if self.svfs.exists(b"journal"):
2437 if self.svfs.exists(b"journal"):
2438 raise error.RepoError(
2438 raise error.RepoError(
2439 _(b"abandoned transaction found"),
2439 _(b"abandoned transaction found"),
2440 hint=_(b"run 'hg recover' to clean up transaction"),
2440 hint=_(b"run 'hg recover' to clean up transaction"),
2441 )
2441 )
2442
2442
2443 # At that point your dirstate should be clean:
2443 # At that point your dirstate should be clean:
2444 #
2444 #
2445 # - If you don't have the wlock, why would you still have a dirty
2445 # - If you don't have the wlock, why would you still have a dirty
2446 # dirstate ?
2446 # dirstate ?
2447 #
2447 #
2448 # - If you hold the wlock, you should not be opening a transaction in
2448 # - If you hold the wlock, you should not be opening a transaction in
2449 # the middle of a `distate.changing_*` block. The transaction needs to
2449 # the middle of a `distate.changing_*` block. The transaction needs to
2450 # be open before that and wrap the change-context.
2450 # be open before that and wrap the change-context.
2451 #
2451 #
2452 # - If you are not within a `dirstate.changing_*` context, why is our
2452 # - If you are not within a `dirstate.changing_*` context, why is our
2453 # dirstate dirty?
2453 # dirstate dirty?
2454 if self.dirstate._dirty:
2454 if self.dirstate._dirty:
2455 m = "cannot open a transaction with a dirty dirstate"
2455 m = "cannot open a transaction with a dirty dirstate"
2456 raise error.ProgrammingError(m)
2456 raise error.ProgrammingError(m)
2457
2457
2458 idbase = b"%.40f#%f" % (random.random(), time.time())
2458 idbase = b"%.40f#%f" % (random.random(), time.time())
2459 ha = hex(hashutil.sha1(idbase).digest())
2459 ha = hex(hashutil.sha1(idbase).digest())
2460 txnid = b'TXN:' + ha
2460 txnid = b'TXN:' + ha
2461 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2461 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2462
2462
2463 self._writejournal(desc)
2463 self._writejournal(desc)
2464 if report:
2464 if report:
2465 rp = report
2465 rp = report
2466 else:
2466 else:
2467 rp = self.ui.warn
2467 rp = self.ui.warn
2468 vfsmap = self.vfs_map
2468 vfsmap = self.vfs_map
2469 # we must avoid cyclic reference between repo and transaction.
2469 # we must avoid cyclic reference between repo and transaction.
2470 reporef = weakref.ref(self)
2470 reporef = weakref.ref(self)
2471 # Code to track tag movement
2471 # Code to track tag movement
2472 #
2472 #
2473 # Since tags are all handled as file content, it is actually quite hard
2473 # Since tags are all handled as file content, it is actually quite hard
2474 # to track these movement from a code perspective. So we fallback to a
2474 # to track these movement from a code perspective. So we fallback to a
2475 # tracking at the repository level. One could envision to track changes
2475 # tracking at the repository level. One could envision to track changes
2476 # to the '.hgtags' file through changegroup apply but that fails to
2476 # to the '.hgtags' file through changegroup apply but that fails to
2477 # cope with case where transaction expose new heads without changegroup
2477 # cope with case where transaction expose new heads without changegroup
2478 # being involved (eg: phase movement).
2478 # being involved (eg: phase movement).
2479 #
2479 #
2480 # For now, We gate the feature behind a flag since this likely comes
2480 # For now, We gate the feature behind a flag since this likely comes
2481 # with performance impacts. The current code run more often than needed
2481 # with performance impacts. The current code run more often than needed
2482 # and do not use caches as much as it could. The current focus is on
2482 # and do not use caches as much as it could. The current focus is on
2483 # the behavior of the feature so we disable it by default. The flag
2483 # the behavior of the feature so we disable it by default. The flag
2484 # will be removed when we are happy with the performance impact.
2484 # will be removed when we are happy with the performance impact.
2485 #
2485 #
2486 # Once this feature is no longer experimental move the following
2486 # Once this feature is no longer experimental move the following
2487 # documentation to the appropriate help section:
2487 # documentation to the appropriate help section:
2488 #
2488 #
2489 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2489 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2490 # tags (new or changed or deleted tags). In addition the details of
2490 # tags (new or changed or deleted tags). In addition the details of
2491 # these changes are made available in a file at:
2491 # these changes are made available in a file at:
2492 # ``REPOROOT/.hg/changes/tags.changes``.
2492 # ``REPOROOT/.hg/changes/tags.changes``.
2493 # Make sure you check for HG_TAG_MOVED before reading that file as it
2493 # Make sure you check for HG_TAG_MOVED before reading that file as it
2494 # might exist from a previous transaction even if no tag were touched
2494 # might exist from a previous transaction even if no tag were touched
2495 # in this one. Changes are recorded in a line base format::
2495 # in this one. Changes are recorded in a line base format::
2496 #
2496 #
2497 # <action> <hex-node> <tag-name>\n
2497 # <action> <hex-node> <tag-name>\n
2498 #
2498 #
2499 # Actions are defined as follow:
2499 # Actions are defined as follow:
2500 # "-R": tag is removed,
2500 # "-R": tag is removed,
2501 # "+A": tag is added,
2501 # "+A": tag is added,
2502 # "-M": tag is moved (old value),
2502 # "-M": tag is moved (old value),
2503 # "+M": tag is moved (new value),
2503 # "+M": tag is moved (new value),
2504 tracktags = lambda x: None
2504 tracktags = lambda x: None
2505 # experimental config: experimental.hook-track-tags
2505 # experimental config: experimental.hook-track-tags
2506 shouldtracktags = self.ui.configbool(
2506 shouldtracktags = self.ui.configbool(
2507 b'experimental', b'hook-track-tags'
2507 b'experimental', b'hook-track-tags'
2508 )
2508 )
2509 if desc != b'strip' and shouldtracktags:
2509 if desc != b'strip' and shouldtracktags:
2510 oldheads = self.changelog.headrevs()
2510 oldheads = self.changelog.headrevs()
2511
2511
2512 def tracktags(tr2):
2512 def tracktags(tr2):
2513 repo = reporef()
2513 repo = reporef()
2514 assert repo is not None # help pytype
2514 assert repo is not None # help pytype
2515 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2515 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2516 newheads = repo.changelog.headrevs()
2516 newheads = repo.changelog.headrevs()
2517 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2517 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2518 # notes: we compare lists here.
2518 # notes: we compare lists here.
2519 # As we do it only once buiding set would not be cheaper
2519 # As we do it only once buiding set would not be cheaper
2520 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2520 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2521 if changes:
2521 if changes:
2522 tr2.hookargs[b'tag_moved'] = b'1'
2522 tr2.hookargs[b'tag_moved'] = b'1'
2523 with repo.vfs(
2523 with repo.vfs(
2524 b'changes/tags.changes', b'w', atomictemp=True
2524 b'changes/tags.changes', b'w', atomictemp=True
2525 ) as changesfile:
2525 ) as changesfile:
2526 # note: we do not register the file to the transaction
2526 # note: we do not register the file to the transaction
2527 # because we needs it to still exist on the transaction
2527 # because we needs it to still exist on the transaction
2528 # is close (for txnclose hooks)
2528 # is close (for txnclose hooks)
2529 tagsmod.writediff(changesfile, changes)
2529 tagsmod.writediff(changesfile, changes)
2530
2530
2531 def validate(tr2):
2531 def validate(tr2):
2532 """will run pre-closing hooks"""
2532 """will run pre-closing hooks"""
2533 # XXX the transaction API is a bit lacking here so we take a hacky
2533 # XXX the transaction API is a bit lacking here so we take a hacky
2534 # path for now
2534 # path for now
2535 #
2535 #
2536 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2536 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2537 # dict is copied before these run. In addition we needs the data
2537 # dict is copied before these run. In addition we needs the data
2538 # available to in memory hooks too.
2538 # available to in memory hooks too.
2539 #
2539 #
2540 # Moreover, we also need to make sure this runs before txnclose
2540 # Moreover, we also need to make sure this runs before txnclose
2541 # hooks and there is no "pending" mechanism that would execute
2541 # hooks and there is no "pending" mechanism that would execute
2542 # logic only if hooks are about to run.
2542 # logic only if hooks are about to run.
2543 #
2543 #
2544 # Fixing this limitation of the transaction is also needed to track
2544 # Fixing this limitation of the transaction is also needed to track
2545 # other families of changes (bookmarks, phases, obsolescence).
2545 # other families of changes (bookmarks, phases, obsolescence).
2546 #
2546 #
2547 # This will have to be fixed before we remove the experimental
2547 # This will have to be fixed before we remove the experimental
2548 # gating.
2548 # gating.
2549 tracktags(tr2)
2549 tracktags(tr2)
2550 repo = reporef()
2550 repo = reporef()
2551 assert repo is not None # help pytype
2551 assert repo is not None # help pytype
2552
2552
2553 singleheadopt = (b'experimental', b'single-head-per-branch')
2553 singleheadopt = (b'experimental', b'single-head-per-branch')
2554 singlehead = repo.ui.configbool(*singleheadopt)
2554 singlehead = repo.ui.configbool(*singleheadopt)
2555 if singlehead:
2555 if singlehead:
2556 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2556 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2557 accountclosed = singleheadsub.get(
2557 accountclosed = singleheadsub.get(
2558 b"account-closed-heads", False
2558 b"account-closed-heads", False
2559 )
2559 )
2560 if singleheadsub.get(b"public-changes-only", False):
2560 if singleheadsub.get(b"public-changes-only", False):
2561 filtername = b"immutable"
2561 filtername = b"immutable"
2562 else:
2562 else:
2563 filtername = b"visible"
2563 filtername = b"visible"
2564 scmutil.enforcesinglehead(
2564 scmutil.enforcesinglehead(
2565 repo, tr2, desc, accountclosed, filtername
2565 repo, tr2, desc, accountclosed, filtername
2566 )
2566 )
2567 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2567 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2568 for name, (old, new) in sorted(
2568 for name, (old, new) in sorted(
2569 tr.changes[b'bookmarks'].items()
2569 tr.changes[b'bookmarks'].items()
2570 ):
2570 ):
2571 args = tr.hookargs.copy()
2571 args = tr.hookargs.copy()
2572 args.update(bookmarks.preparehookargs(name, old, new))
2572 args.update(bookmarks.preparehookargs(name, old, new))
2573 repo.hook(
2573 repo.hook(
2574 b'pretxnclose-bookmark',
2574 b'pretxnclose-bookmark',
2575 throw=True,
2575 throw=True,
2576 **pycompat.strkwargs(args)
2576 **pycompat.strkwargs(args)
2577 )
2577 )
2578 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2578 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2579 cl = repo.unfiltered().changelog
2579 cl = repo.unfiltered().changelog
2580 for revs, (old, new) in tr.changes[b'phases']:
2580 for revs, (old, new) in tr.changes[b'phases']:
2581 for rev in revs:
2581 for rev in revs:
2582 args = tr.hookargs.copy()
2582 args = tr.hookargs.copy()
2583 node = hex(cl.node(rev))
2583 node = hex(cl.node(rev))
2584 args.update(phases.preparehookargs(node, old, new))
2584 args.update(phases.preparehookargs(node, old, new))
2585 repo.hook(
2585 repo.hook(
2586 b'pretxnclose-phase',
2586 b'pretxnclose-phase',
2587 throw=True,
2587 throw=True,
2588 **pycompat.strkwargs(args)
2588 **pycompat.strkwargs(args)
2589 )
2589 )
2590
2590
2591 repo.hook(
2591 repo.hook(
2592 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2592 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2593 )
2593 )
2594
2594
2595 def releasefn(tr, success):
2595 def releasefn(tr, success):
2596 repo = reporef()
2596 repo = reporef()
2597 if repo is None:
2597 if repo is None:
2598 # If the repo has been GC'd (and this release function is being
2598 # If the repo has been GC'd (and this release function is being
2599 # called from transaction.__del__), there's not much we can do,
2599 # called from transaction.__del__), there's not much we can do,
2600 # so just leave the unfinished transaction there and let the
2600 # so just leave the unfinished transaction there and let the
2601 # user run `hg recover`.
2601 # user run `hg recover`.
2602 return
2602 return
2603 if success:
2603 if success:
2604 # this should be explicitly invoked here, because
2604 # this should be explicitly invoked here, because
2605 # in-memory changes aren't written out at closing
2605 # in-memory changes aren't written out at closing
2606 # transaction, if tr.addfilegenerator (via
2606 # transaction, if tr.addfilegenerator (via
2607 # dirstate.write or so) isn't invoked while
2607 # dirstate.write or so) isn't invoked while
2608 # transaction running
2608 # transaction running
2609 repo.dirstate.write(None)
2609 repo.dirstate.write(None)
2610 else:
2610 else:
2611 # discard all changes (including ones already written
2611 # discard all changes (including ones already written
2612 # out) in this transaction
2612 # out) in this transaction
2613 repo.invalidate(clearfilecache=True)
2613 repo.invalidate(clearfilecache=True)
2614
2614
2615 tr = transaction.transaction(
2615 tr = transaction.transaction(
2616 rp,
2616 rp,
2617 self.svfs,
2617 self.svfs,
2618 vfsmap,
2618 vfsmap,
2619 b"journal",
2619 b"journal",
2620 b"undo",
2620 b"undo",
2621 lambda: None,
2621 lambda: None,
2622 self.store.createmode,
2622 self.store.createmode,
2623 validator=validate,
2623 validator=validate,
2624 releasefn=releasefn,
2624 releasefn=releasefn,
2625 checkambigfiles=_cachedfiles,
2625 checkambigfiles=_cachedfiles,
2626 name=desc,
2626 name=desc,
2627 )
2627 )
2628 for vfs_id, path in self._journalfiles():
2628 for vfs_id, path in self._journalfiles():
2629 tr.add_journal(vfs_id, path)
2629 tr.add_journal(vfs_id, path)
2630 tr.changes[b'origrepolen'] = len(self)
2630 tr.changes[b'origrepolen'] = len(self)
2631 tr.changes[b'obsmarkers'] = set()
2631 tr.changes[b'obsmarkers'] = set()
2632 tr.changes[b'phases'] = []
2632 tr.changes[b'phases'] = []
2633 tr.changes[b'bookmarks'] = {}
2633 tr.changes[b'bookmarks'] = {}
2634
2634
2635 tr.hookargs[b'txnid'] = txnid
2635 tr.hookargs[b'txnid'] = txnid
2636 tr.hookargs[b'txnname'] = desc
2636 tr.hookargs[b'txnname'] = desc
2637 tr.hookargs[b'changes'] = tr.changes
2637 tr.hookargs[b'changes'] = tr.changes
2638 # note: writing the fncache only during finalize mean that the file is
2638 # note: writing the fncache only during finalize mean that the file is
2639 # outdated when running hooks. As fncache is used for streaming clone,
2639 # outdated when running hooks. As fncache is used for streaming clone,
2640 # this is not expected to break anything that happen during the hooks.
2640 # this is not expected to break anything that happen during the hooks.
2641 tr.addfinalize(b'flush-fncache', self.store.write)
2641 tr.addfinalize(b'flush-fncache', self.store.write)
2642
2642
2643 def txnclosehook(tr2):
2643 def txnclosehook(tr2):
2644 """To be run if transaction is successful, will schedule a hook run"""
2644 """To be run if transaction is successful, will schedule a hook run"""
2645 # Don't reference tr2 in hook() so we don't hold a reference.
2645 # Don't reference tr2 in hook() so we don't hold a reference.
2646 # This reduces memory consumption when there are multiple
2646 # This reduces memory consumption when there are multiple
2647 # transactions per lock. This can likely go away if issue5045
2647 # transactions per lock. This can likely go away if issue5045
2648 # fixes the function accumulation.
2648 # fixes the function accumulation.
2649 hookargs = tr2.hookargs
2649 hookargs = tr2.hookargs
2650
2650
2651 def hookfunc(unused_success):
2651 def hookfunc(unused_success):
2652 repo = reporef()
2652 repo = reporef()
2653 assert repo is not None # help pytype
2653 assert repo is not None # help pytype
2654
2654
2655 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2655 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2656 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2656 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2657 for name, (old, new) in bmchanges:
2657 for name, (old, new) in bmchanges:
2658 args = tr.hookargs.copy()
2658 args = tr.hookargs.copy()
2659 args.update(bookmarks.preparehookargs(name, old, new))
2659 args.update(bookmarks.preparehookargs(name, old, new))
2660 repo.hook(
2660 repo.hook(
2661 b'txnclose-bookmark',
2661 b'txnclose-bookmark',
2662 throw=False,
2662 throw=False,
2663 **pycompat.strkwargs(args)
2663 **pycompat.strkwargs(args)
2664 )
2664 )
2665
2665
2666 if hook.hashook(repo.ui, b'txnclose-phase'):
2666 if hook.hashook(repo.ui, b'txnclose-phase'):
2667 cl = repo.unfiltered().changelog
2667 cl = repo.unfiltered().changelog
2668 phasemv = sorted(
2668 phasemv = sorted(
2669 tr.changes[b'phases'], key=lambda r: r[0][0]
2669 tr.changes[b'phases'], key=lambda r: r[0][0]
2670 )
2670 )
2671 for revs, (old, new) in phasemv:
2671 for revs, (old, new) in phasemv:
2672 for rev in revs:
2672 for rev in revs:
2673 args = tr.hookargs.copy()
2673 args = tr.hookargs.copy()
2674 node = hex(cl.node(rev))
2674 node = hex(cl.node(rev))
2675 args.update(phases.preparehookargs(node, old, new))
2675 args.update(phases.preparehookargs(node, old, new))
2676 repo.hook(
2676 repo.hook(
2677 b'txnclose-phase',
2677 b'txnclose-phase',
2678 throw=False,
2678 throw=False,
2679 **pycompat.strkwargs(args)
2679 **pycompat.strkwargs(args)
2680 )
2680 )
2681
2681
2682 repo.hook(
2682 repo.hook(
2683 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2683 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2684 )
2684 )
2685
2685
2686 repo = reporef()
2686 repo = reporef()
2687 assert repo is not None # help pytype
2687 assert repo is not None # help pytype
2688 repo._afterlock(hookfunc)
2688 repo._afterlock(hookfunc)
2689
2689
2690 tr.addfinalize(b'txnclose-hook', txnclosehook)
2690 tr.addfinalize(b'txnclose-hook', txnclosehook)
2691 # Include a leading "-" to make it happen before the transaction summary
2691 # Include a leading "-" to make it happen before the transaction summary
2692 # reports registered via scmutil.registersummarycallback() whose names
2692 # reports registered via scmutil.registersummarycallback() whose names
2693 # are 00-txnreport etc. That way, the caches will be warm when the
2693 # are 00-txnreport etc. That way, the caches will be warm when the
2694 # callbacks run.
2694 # callbacks run.
2695 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2695 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2696
2696
2697 def txnaborthook(tr2):
2697 def txnaborthook(tr2):
2698 """To be run if transaction is aborted"""
2698 """To be run if transaction is aborted"""
2699 repo = reporef()
2699 repo = reporef()
2700 assert repo is not None # help pytype
2700 assert repo is not None # help pytype
2701 repo.hook(
2701 repo.hook(
2702 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2702 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2703 )
2703 )
2704
2704
2705 tr.addabort(b'txnabort-hook', txnaborthook)
2705 tr.addabort(b'txnabort-hook', txnaborthook)
2706 # avoid eager cache invalidation. in-memory data should be identical
2706 # avoid eager cache invalidation. in-memory data should be identical
2707 # to stored data if transaction has no error.
2707 # to stored data if transaction has no error.
2708 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2708 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2709 self._transref = weakref.ref(tr)
2709 self._transref = weakref.ref(tr)
2710 scmutil.registersummarycallback(self, tr, desc)
2710 scmutil.registersummarycallback(self, tr, desc)
2711 # This only exist to deal with the need of rollback to have viable
2711 # This only exist to deal with the need of rollback to have viable
2712 # parents at the end of the operation. So backup viable parents at the
2712 # parents at the end of the operation. So backup viable parents at the
2713 # time of this operation.
2713 # time of this operation.
2714 #
2714 #
2715 # We only do it when the `wlock` is taken, otherwise other might be
2715 # We only do it when the `wlock` is taken, otherwise other might be
2716 # altering the dirstate under us.
2716 # altering the dirstate under us.
2717 #
2717 #
2718 # This is really not a great way to do this (first, because we cannot
2718 # This is really not a great way to do this (first, because we cannot
2719 # always do it). There are more viable alternative that exists
2719 # always do it). There are more viable alternative that exists
2720 #
2720 #
2721 # - backing only the working copy parent in a dedicated files and doing
2721 # - backing only the working copy parent in a dedicated files and doing
2722 # a clean "keep-update" to them on `hg rollback`.
2722 # a clean "keep-update" to them on `hg rollback`.
2723 #
2723 #
2724 # - slightly changing the behavior an applying a logic similar to "hg
2724 # - slightly changing the behavior an applying a logic similar to "hg
2725 # strip" to pick a working copy destination on `hg rollback`
2725 # strip" to pick a working copy destination on `hg rollback`
2726 if self.currentwlock() is not None:
2726 if self.currentwlock() is not None:
2727 ds = self.dirstate
2727 ds = self.dirstate
2728 if not self.vfs.exists(b'branch'):
2728 if not self.vfs.exists(b'branch'):
2729 # force a file to be written if None exist
2729 # force a file to be written if None exist
2730 ds.setbranch(b'default', None)
2730 ds.setbranch(b'default', None)
2731
2731
2732 def backup_dirstate(tr):
2732 def backup_dirstate(tr):
2733 for f in ds.all_file_names():
2733 for f in ds.all_file_names():
2734 # hardlink backup is okay because `dirstate` is always
2734 # hardlink backup is okay because `dirstate` is always
2735 # atomically written and possible data file are append only
2735 # atomically written and possible data file are append only
2736 # and resistant to trailing data.
2736 # and resistant to trailing data.
2737 tr.addbackup(f, hardlink=True, location=b'plain')
2737 tr.addbackup(f, hardlink=True, location=b'plain')
2738
2738
2739 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2739 tr.addvalidator(b'dirstate-backup', backup_dirstate)
2740 return tr
2740 return tr
2741
2741
2742 def _journalfiles(self):
2742 def _journalfiles(self):
2743 return (
2743 return (
2744 (self.svfs, b'journal'),
2744 (self.svfs, b'journal'),
2745 (self.vfs, b'journal.desc'),
2745 (self.vfs, b'journal.desc'),
2746 )
2746 )
2747
2747
2748 def undofiles(self):
2748 def undofiles(self):
2749 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2749 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2750
2750
2751 @unfilteredmethod
2751 @unfilteredmethod
2752 def _writejournal(self, desc):
2752 def _writejournal(self, desc):
2753 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2753 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2754
2754
2755 def recover(self):
2755 def recover(self):
2756 with self.lock():
2756 with self.lock():
2757 if self.svfs.exists(b"journal"):
2757 if self.svfs.exists(b"journal"):
2758 self.ui.status(_(b"rolling back interrupted transaction\n"))
2758 self.ui.status(_(b"rolling back interrupted transaction\n"))
2759 vfsmap = self.vfs_map
2759 vfsmap = self.vfs_map
2760 transaction.rollback(
2760 transaction.rollback(
2761 self.svfs,
2761 self.svfs,
2762 vfsmap,
2762 vfsmap,
2763 b"journal",
2763 b"journal",
2764 self.ui.warn,
2764 self.ui.warn,
2765 checkambigfiles=_cachedfiles,
2765 checkambigfiles=_cachedfiles,
2766 )
2766 )
2767 self.invalidate()
2767 self.invalidate()
2768 return True
2768 return True
2769 else:
2769 else:
2770 self.ui.warn(_(b"no interrupted transaction available\n"))
2770 self.ui.warn(_(b"no interrupted transaction available\n"))
2771 return False
2771 return False
2772
2772
2773 def rollback(self, dryrun=False, force=False):
2773 def rollback(self, dryrun=False, force=False):
2774 wlock = lock = None
2774 wlock = lock = None
2775 try:
2775 try:
2776 wlock = self.wlock()
2776 wlock = self.wlock()
2777 lock = self.lock()
2777 lock = self.lock()
2778 if self.svfs.exists(b"undo"):
2778 if self.svfs.exists(b"undo"):
2779 return self._rollback(dryrun, force)
2779 return self._rollback(dryrun, force)
2780 else:
2780 else:
2781 self.ui.warn(_(b"no rollback information available\n"))
2781 self.ui.warn(_(b"no rollback information available\n"))
2782 return 1
2782 return 1
2783 finally:
2783 finally:
2784 release(lock, wlock)
2784 release(lock, wlock)
2785
2785
2786 @unfilteredmethod # Until we get smarter cache management
2786 @unfilteredmethod # Until we get smarter cache management
2787 def _rollback(self, dryrun, force):
2787 def _rollback(self, dryrun, force):
2788 ui = self.ui
2788 ui = self.ui
2789
2789
2790 parents = self.dirstate.parents()
2790 parents = self.dirstate.parents()
2791 try:
2791 try:
2792 args = self.vfs.read(b'undo.desc').splitlines()
2792 args = self.vfs.read(b'undo.desc').splitlines()
2793 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2793 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2794 if len(args) >= 3:
2794 if len(args) >= 3:
2795 detail = args[2]
2795 detail = args[2]
2796 oldtip = oldlen - 1
2796 oldtip = oldlen - 1
2797
2797
2798 if detail and ui.verbose:
2798 if detail and ui.verbose:
2799 msg = _(
2799 msg = _(
2800 b'repository tip rolled back to revision %d'
2800 b'repository tip rolled back to revision %d'
2801 b' (undo %s: %s)\n'
2801 b' (undo %s: %s)\n'
2802 ) % (oldtip, desc, detail)
2802 ) % (oldtip, desc, detail)
2803 else:
2803 else:
2804 msg = _(
2804 msg = _(
2805 b'repository tip rolled back to revision %d (undo %s)\n'
2805 b'repository tip rolled back to revision %d (undo %s)\n'
2806 ) % (oldtip, desc)
2806 ) % (oldtip, desc)
2807 parentgone = any(self[p].rev() > oldtip for p in parents)
2807 parentgone = any(self[p].rev() > oldtip for p in parents)
2808 except IOError:
2808 except IOError:
2809 msg = _(b'rolling back unknown transaction\n')
2809 msg = _(b'rolling back unknown transaction\n')
2810 desc = None
2810 desc = None
2811 parentgone = True
2811 parentgone = True
2812
2812
2813 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2813 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2814 raise error.Abort(
2814 raise error.Abort(
2815 _(
2815 _(
2816 b'rollback of last commit while not checked out '
2816 b'rollback of last commit while not checked out '
2817 b'may lose data'
2817 b'may lose data'
2818 ),
2818 ),
2819 hint=_(b'use -f to force'),
2819 hint=_(b'use -f to force'),
2820 )
2820 )
2821
2821
2822 ui.status(msg)
2822 ui.status(msg)
2823 if dryrun:
2823 if dryrun:
2824 return 0
2824 return 0
2825
2825
2826 self.destroying()
2826 self.destroying()
2827 vfsmap = self.vfs_map
2827 vfsmap = self.vfs_map
2828 skip_journal_pattern = None
2828 skip_journal_pattern = None
2829 if not parentgone:
2829 if not parentgone:
2830 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2830 skip_journal_pattern = RE_SKIP_DIRSTATE_ROLLBACK
2831 transaction.rollback(
2831 transaction.rollback(
2832 self.svfs,
2832 self.svfs,
2833 vfsmap,
2833 vfsmap,
2834 b'undo',
2834 b'undo',
2835 ui.warn,
2835 ui.warn,
2836 checkambigfiles=_cachedfiles,
2836 checkambigfiles=_cachedfiles,
2837 skip_journal_pattern=skip_journal_pattern,
2837 skip_journal_pattern=skip_journal_pattern,
2838 )
2838 )
2839 self.invalidate()
2839 self.invalidate()
2840 self.dirstate.invalidate()
2840 self.dirstate.invalidate()
2841
2841
2842 if parentgone:
2842 if parentgone:
2843 # replace this with some explicit parent update in the future.
2843 # replace this with some explicit parent update in the future.
2844 has_node = self.changelog.index.has_node
2844 has_node = self.changelog.index.has_node
2845 if not all(has_node(p) for p in self.dirstate._pl):
2845 if not all(has_node(p) for p in self.dirstate._pl):
2846 # There was no dirstate to backup initially, we need to drop
2846 # There was no dirstate to backup initially, we need to drop
2847 # the existing one.
2847 # the existing one.
2848 with self.dirstate.changing_parents(self):
2848 with self.dirstate.changing_parents(self):
2849 self.dirstate.setparents(self.nullid)
2849 self.dirstate.setparents(self.nullid)
2850 self.dirstate.clear()
2850 self.dirstate.clear()
2851
2851
2852 parents = tuple([p.rev() for p in self[None].parents()])
2852 parents = tuple([p.rev() for p in self[None].parents()])
2853 if len(parents) > 1:
2853 if len(parents) > 1:
2854 ui.status(
2854 ui.status(
2855 _(
2855 _(
2856 b'working directory now based on '
2856 b'working directory now based on '
2857 b'revisions %d and %d\n'
2857 b'revisions %d and %d\n'
2858 )
2858 )
2859 % parents
2859 % parents
2860 )
2860 )
2861 else:
2861 else:
2862 ui.status(
2862 ui.status(
2863 _(b'working directory now based on revision %d\n') % parents
2863 _(b'working directory now based on revision %d\n') % parents
2864 )
2864 )
2865 mergestatemod.mergestate.clean(self)
2865 mergestatemod.mergestate.clean(self)
2866
2866
2867 # TODO: if we know which new heads may result from this rollback, pass
2867 # TODO: if we know which new heads may result from this rollback, pass
2868 # them to destroy(), which will prevent the branchhead cache from being
2868 # them to destroy(), which will prevent the branchhead cache from being
2869 # invalidated.
2869 # invalidated.
2870 self.destroyed()
2870 self.destroyed()
2871 return 0
2871 return 0
2872
2872
2873 def _buildcacheupdater(self, newtransaction):
2873 def _buildcacheupdater(self, newtransaction):
2874 """called during transaction to build the callback updating cache
2874 """called during transaction to build the callback updating cache
2875
2875
2876 Lives on the repository to help extension who might want to augment
2876 Lives on the repository to help extension who might want to augment
2877 this logic. For this purpose, the created transaction is passed to the
2877 this logic. For this purpose, the created transaction is passed to the
2878 method.
2878 method.
2879 """
2879 """
2880 # we must avoid cyclic reference between repo and transaction.
2880 # we must avoid cyclic reference between repo and transaction.
2881 reporef = weakref.ref(self)
2881 reporef = weakref.ref(self)
2882
2882
2883 def updater(tr):
2883 def updater(tr):
2884 repo = reporef()
2884 repo = reporef()
2885 assert repo is not None # help pytype
2885 assert repo is not None # help pytype
2886 repo.updatecaches(tr)
2886 repo.updatecaches(tr)
2887
2887
2888 return updater
2888 return updater
2889
2889
2890 @unfilteredmethod
2890 @unfilteredmethod
2891 def updatecaches(self, tr=None, full=False, caches=None):
2891 def updatecaches(self, tr=None, full=False, caches=None):
2892 """warm appropriate caches
2892 """warm appropriate caches
2893
2893
2894 If this function is called after a transaction closed. The transaction
2894 If this function is called after a transaction closed. The transaction
2895 will be available in the 'tr' argument. This can be used to selectively
2895 will be available in the 'tr' argument. This can be used to selectively
2896 update caches relevant to the changes in that transaction.
2896 update caches relevant to the changes in that transaction.
2897
2897
2898 If 'full' is set, make sure all caches the function knows about have
2898 If 'full' is set, make sure all caches the function knows about have
2899 up-to-date data. Even the ones usually loaded more lazily.
2899 up-to-date data. Even the ones usually loaded more lazily.
2900
2900
2901 The `full` argument can take a special "post-clone" value. In this case
2901 The `full` argument can take a special "post-clone" value. In this case
2902 the cache warming is made after a clone and of the slower cache might
2902 the cache warming is made after a clone and of the slower cache might
2903 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2903 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2904 as we plan for a cleaner way to deal with this for 5.9.
2904 as we plan for a cleaner way to deal with this for 5.9.
2905 """
2905 """
2906 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2906 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2907 # During strip, many caches are invalid but
2907 # During strip, many caches are invalid but
2908 # later call to `destroyed` will refresh them.
2908 # later call to `destroyed` will refresh them.
2909 return
2909 return
2910
2910
2911 unfi = self.unfiltered()
2911 unfi = self.unfiltered()
2912
2912
2913 if full:
2913 if full:
2914 msg = (
2914 msg = (
2915 "`full` argument for `repo.updatecaches` is deprecated\n"
2915 "`full` argument for `repo.updatecaches` is deprecated\n"
2916 "(use `caches=repository.CACHE_ALL` instead)"
2916 "(use `caches=repository.CACHE_ALL` instead)"
2917 )
2917 )
2918 self.ui.deprecwarn(msg, b"5.9")
2918 self.ui.deprecwarn(msg, b"5.9")
2919 caches = repository.CACHES_ALL
2919 caches = repository.CACHES_ALL
2920 if full == b"post-clone":
2920 if full == b"post-clone":
2921 caches = repository.CACHES_POST_CLONE
2921 caches = repository.CACHES_POST_CLONE
2922 caches = repository.CACHES_ALL
2922 caches = repository.CACHES_ALL
2923 elif caches is None:
2923 elif caches is None:
2924 caches = repository.CACHES_DEFAULT
2924 caches = repository.CACHES_DEFAULT
2925
2925
2926 if repository.CACHE_BRANCHMAP_SERVED in caches:
2926 if repository.CACHE_BRANCHMAP_SERVED in caches:
2927 if tr is None or tr.changes[b'origrepolen'] < len(self):
2927 if tr is None or tr.changes[b'origrepolen'] < len(self):
2928 # accessing the 'served' branchmap should refresh all the others,
2928 # accessing the 'served' branchmap should refresh all the others,
2929 self.ui.debug(b'updating the branch cache\n')
2929 self.ui.debug(b'updating the branch cache\n')
2930 self.filtered(b'served').branchmap()
2930 self.filtered(b'served').branchmap()
2931 self.filtered(b'served.hidden').branchmap()
2931 self.filtered(b'served.hidden').branchmap()
2932 # flush all possibly delayed write.
2932 # flush all possibly delayed write.
2933 self._branchcaches.write_delayed(self)
2933 self._branchcaches.write_delayed(self)
2934
2934
2935 if repository.CACHE_CHANGELOG_CACHE in caches:
2935 if repository.CACHE_CHANGELOG_CACHE in caches:
2936 self.changelog.update_caches(transaction=tr)
2936 self.changelog.update_caches(transaction=tr)
2937
2937
2938 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2938 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2939 self.manifestlog.update_caches(transaction=tr)
2939 self.manifestlog.update_caches(transaction=tr)
2940 for entry in self.store.walk():
2940 for entry in self.store.walk():
2941 if not entry.is_revlog:
2941 if not entry.is_revlog:
2942 continue
2942 continue
2943 if not entry.is_manifestlog:
2943 if not entry.is_manifestlog:
2944 continue
2944 continue
2945 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2945 manifestrevlog = entry.get_revlog_instance(self).get_revlog()
2946 if manifestrevlog is not None:
2946 if manifestrevlog is not None:
2947 manifestrevlog.update_caches(transaction=tr)
2947 manifestrevlog.update_caches(transaction=tr)
2948
2948
2949 if repository.CACHE_REV_BRANCH in caches:
2949 if repository.CACHE_REV_BRANCH in caches:
2950 rbc = unfi.revbranchcache()
2950 rbc = unfi.revbranchcache()
2951 for r in unfi.changelog:
2951 for r in unfi.changelog:
2952 rbc.branchinfo(r)
2952 rbc.branchinfo(r)
2953 rbc.write()
2953 rbc.write()
2954
2954
2955 if repository.CACHE_FULL_MANIFEST in caches:
2955 if repository.CACHE_FULL_MANIFEST in caches:
2956 # ensure the working copy parents are in the manifestfulltextcache
2956 # ensure the working copy parents are in the manifestfulltextcache
2957 for ctx in self[b'.'].parents():
2957 for ctx in self[b'.'].parents():
2958 ctx.manifest() # accessing the manifest is enough
2958 ctx.manifest() # accessing the manifest is enough
2959
2959
2960 if repository.CACHE_FILE_NODE_TAGS in caches:
2960 if repository.CACHE_FILE_NODE_TAGS in caches:
2961 # accessing fnode cache warms the cache
2961 # accessing fnode cache warms the cache
2962 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2962 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2963
2963
2964 if repository.CACHE_TAGS_DEFAULT in caches:
2964 if repository.CACHE_TAGS_DEFAULT in caches:
2965 # accessing tags warm the cache
2965 # accessing tags warm the cache
2966 self.tags()
2966 self.tags()
2967 if repository.CACHE_TAGS_SERVED in caches:
2967 if repository.CACHE_TAGS_SERVED in caches:
2968 self.filtered(b'served').tags()
2968 self.filtered(b'served').tags()
2969
2969
2970 if repository.CACHE_BRANCHMAP_ALL in caches:
2970 if repository.CACHE_BRANCHMAP_ALL in caches:
2971 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2971 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2972 # so we're forcing a write to cause these caches to be warmed up
2972 # so we're forcing a write to cause these caches to be warmed up
2973 # even if they haven't explicitly been requested yet (if they've
2973 # even if they haven't explicitly been requested yet (if they've
2974 # never been used by hg, they won't ever have been written, even if
2974 # never been used by hg, they won't ever have been written, even if
2975 # they're a subset of another kind of cache that *has* been used).
2975 # they're a subset of another kind of cache that *has* been used).
2976 for filt in repoview.filtertable.keys():
2976 for filt in repoview.filtertable.keys():
2977 filtered = self.filtered(filt)
2977 filtered = self.filtered(filt)
2978 filtered.branchmap().write(filtered)
2978 filtered.branchmap().write(filtered)
2979
2979
2980 def invalidatecaches(self):
2980 def invalidatecaches(self):
2981 if '_tagscache' in vars(self):
2981 if '_tagscache' in vars(self):
2982 # can't use delattr on proxy
2982 # can't use delattr on proxy
2983 del self.__dict__['_tagscache']
2983 del self.__dict__['_tagscache']
2984
2984
2985 self._branchcaches.clear()
2985 self._branchcaches.clear()
2986 self.invalidatevolatilesets()
2986 self.invalidatevolatilesets()
2987 self._sparsesignaturecache.clear()
2987 self._sparsesignaturecache.clear()
2988
2988
2989 def invalidatevolatilesets(self):
2989 def invalidatevolatilesets(self):
2990 self.filteredrevcache.clear()
2990 self.filteredrevcache.clear()
2991 obsolete.clearobscaches(self)
2991 obsolete.clearobscaches(self)
2992 self._quick_access_changeid_invalidate()
2992 self._quick_access_changeid_invalidate()
2993
2993
2994 def invalidatedirstate(self):
2994 def invalidatedirstate(self):
2995 """Invalidates the dirstate, causing the next call to dirstate
2995 """Invalidates the dirstate, causing the next call to dirstate
2996 to check if it was modified since the last time it was read,
2996 to check if it was modified since the last time it was read,
2997 rereading it if it has.
2997 rereading it if it has.
2998
2998
2999 This is different to dirstate.invalidate() that it doesn't always
2999 This is different to dirstate.invalidate() that it doesn't always
3000 rereads the dirstate. Use dirstate.invalidate() if you want to
3000 rereads the dirstate. Use dirstate.invalidate() if you want to
3001 explicitly read the dirstate again (i.e. restoring it to a previous
3001 explicitly read the dirstate again (i.e. restoring it to a previous
3002 known good state)."""
3002 known good state)."""
3003 unfi = self.unfiltered()
3003 unfi = self.unfiltered()
3004 if 'dirstate' in unfi.__dict__:
3004 if 'dirstate' in unfi.__dict__:
3005 assert not self.dirstate.is_changing_any
3005 assert not self.dirstate.is_changing_any
3006 del unfi.__dict__['dirstate']
3006 del unfi.__dict__['dirstate']
3007
3007
3008 def invalidate(self, clearfilecache=False):
3008 def invalidate(self, clearfilecache=False):
3009 """Invalidates both store and non-store parts other than dirstate
3009 """Invalidates both store and non-store parts other than dirstate
3010
3010
3011 If a transaction is running, invalidation of store is omitted,
3011 If a transaction is running, invalidation of store is omitted,
3012 because discarding in-memory changes might cause inconsistency
3012 because discarding in-memory changes might cause inconsistency
3013 (e.g. incomplete fncache causes unintentional failure, but
3013 (e.g. incomplete fncache causes unintentional failure, but
3014 redundant one doesn't).
3014 redundant one doesn't).
3015 """
3015 """
3016 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3016 unfiltered = self.unfiltered() # all file caches are stored unfiltered
3017 for k in list(self._filecache.keys()):
3017 for k in list(self._filecache.keys()):
3018 if (
3018 if (
3019 k == b'changelog'
3019 k == b'changelog'
3020 and self.currenttransaction()
3020 and self.currenttransaction()
3021 and self.changelog._delayed
3021 and self.changelog._delayed
3022 ):
3022 ):
3023 # The changelog object may store unwritten revisions. We don't
3023 # The changelog object may store unwritten revisions. We don't
3024 # want to lose them.
3024 # want to lose them.
3025 # TODO: Solve the problem instead of working around it.
3025 # TODO: Solve the problem instead of working around it.
3026 continue
3026 continue
3027
3027
3028 if clearfilecache:
3028 if clearfilecache:
3029 del self._filecache[k]
3029 del self._filecache[k]
3030 try:
3030 try:
3031 delattr(unfiltered, k)
3031 # XXX ideally, the key would be a unicode string to match the
3032 # fact it refers to an attribut name. However changing this was
3033 # a bit a scope creep compared to the series cleaning up
3034 # del/set/getattr so we kept thing simple here.
3035 delattr(unfiltered, pycompat.sysstr(k))
3032 except AttributeError:
3036 except AttributeError:
3033 pass
3037 pass
3034 self.invalidatecaches()
3038 self.invalidatecaches()
3035 if not self.currenttransaction():
3039 if not self.currenttransaction():
3036 # TODO: Changing contents of store outside transaction
3040 # TODO: Changing contents of store outside transaction
3037 # causes inconsistency. We should make in-memory store
3041 # causes inconsistency. We should make in-memory store
3038 # changes detectable, and abort if changed.
3042 # changes detectable, and abort if changed.
3039 self.store.invalidatecaches()
3043 self.store.invalidatecaches()
3040
3044
3041 def invalidateall(self):
3045 def invalidateall(self):
3042 """Fully invalidates both store and non-store parts, causing the
3046 """Fully invalidates both store and non-store parts, causing the
3043 subsequent operation to reread any outside changes."""
3047 subsequent operation to reread any outside changes."""
3044 # extension should hook this to invalidate its caches
3048 # extension should hook this to invalidate its caches
3045 self.invalidate()
3049 self.invalidate()
3046 self.invalidatedirstate()
3050 self.invalidatedirstate()
3047
3051
3048 @unfilteredmethod
3052 @unfilteredmethod
3049 def _refreshfilecachestats(self, tr):
3053 def _refreshfilecachestats(self, tr):
3050 """Reload stats of cached files so that they are flagged as valid"""
3054 """Reload stats of cached files so that they are flagged as valid"""
3051 for k, ce in self._filecache.items():
3055 for k, ce in self._filecache.items():
3052 k = pycompat.sysstr(k)
3056 k = pycompat.sysstr(k)
3053 if k == 'dirstate' or k not in self.__dict__:
3057 if k == 'dirstate' or k not in self.__dict__:
3054 continue
3058 continue
3055 ce.refresh()
3059 ce.refresh()
3056
3060
3057 def _lock(
3061 def _lock(
3058 self,
3062 self,
3059 vfs,
3063 vfs,
3060 lockname,
3064 lockname,
3061 wait,
3065 wait,
3062 releasefn,
3066 releasefn,
3063 acquirefn,
3067 acquirefn,
3064 desc,
3068 desc,
3065 ):
3069 ):
3066 timeout = 0
3070 timeout = 0
3067 warntimeout = 0
3071 warntimeout = 0
3068 if wait:
3072 if wait:
3069 timeout = self.ui.configint(b"ui", b"timeout")
3073 timeout = self.ui.configint(b"ui", b"timeout")
3070 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3074 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
3071 # internal config: ui.signal-safe-lock
3075 # internal config: ui.signal-safe-lock
3072 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3076 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3073
3077
3074 l = lockmod.trylock(
3078 l = lockmod.trylock(
3075 self.ui,
3079 self.ui,
3076 vfs,
3080 vfs,
3077 lockname,
3081 lockname,
3078 timeout,
3082 timeout,
3079 warntimeout,
3083 warntimeout,
3080 releasefn=releasefn,
3084 releasefn=releasefn,
3081 acquirefn=acquirefn,
3085 acquirefn=acquirefn,
3082 desc=desc,
3086 desc=desc,
3083 signalsafe=signalsafe,
3087 signalsafe=signalsafe,
3084 )
3088 )
3085 return l
3089 return l
3086
3090
3087 def _afterlock(self, callback):
3091 def _afterlock(self, callback):
3088 """add a callback to be run when the repository is fully unlocked
3092 """add a callback to be run when the repository is fully unlocked
3089
3093
3090 The callback will be executed when the outermost lock is released
3094 The callback will be executed when the outermost lock is released
3091 (with wlock being higher level than 'lock')."""
3095 (with wlock being higher level than 'lock')."""
3092 for ref in (self._wlockref, self._lockref):
3096 for ref in (self._wlockref, self._lockref):
3093 l = ref and ref()
3097 l = ref and ref()
3094 if l and l.held:
3098 if l and l.held:
3095 l.postrelease.append(callback)
3099 l.postrelease.append(callback)
3096 break
3100 break
3097 else: # no lock have been found.
3101 else: # no lock have been found.
3098 callback(True)
3102 callback(True)
3099
3103
3100 def lock(self, wait=True):
3104 def lock(self, wait=True):
3101 """Lock the repository store (.hg/store) and return a weak reference
3105 """Lock the repository store (.hg/store) and return a weak reference
3102 to the lock. Use this before modifying the store (e.g. committing or
3106 to the lock. Use this before modifying the store (e.g. committing or
3103 stripping). If you are opening a transaction, get a lock as well.)
3107 stripping). If you are opening a transaction, get a lock as well.)
3104
3108
3105 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3109 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3106 'wlock' first to avoid a dead-lock hazard."""
3110 'wlock' first to avoid a dead-lock hazard."""
3107 l = self._currentlock(self._lockref)
3111 l = self._currentlock(self._lockref)
3108 if l is not None:
3112 if l is not None:
3109 l.lock()
3113 l.lock()
3110 return l
3114 return l
3111
3115
3112 l = self._lock(
3116 l = self._lock(
3113 vfs=self.svfs,
3117 vfs=self.svfs,
3114 lockname=b"lock",
3118 lockname=b"lock",
3115 wait=wait,
3119 wait=wait,
3116 releasefn=None,
3120 releasefn=None,
3117 acquirefn=self.invalidate,
3121 acquirefn=self.invalidate,
3118 desc=_(b'repository %s') % self.origroot,
3122 desc=_(b'repository %s') % self.origroot,
3119 )
3123 )
3120 self._lockref = weakref.ref(l)
3124 self._lockref = weakref.ref(l)
3121 return l
3125 return l
3122
3126
3123 def wlock(self, wait=True):
3127 def wlock(self, wait=True):
3124 """Lock the non-store parts of the repository (everything under
3128 """Lock the non-store parts of the repository (everything under
3125 .hg except .hg/store) and return a weak reference to the lock.
3129 .hg except .hg/store) and return a weak reference to the lock.
3126
3130
3127 Use this before modifying files in .hg.
3131 Use this before modifying files in .hg.
3128
3132
3129 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3133 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3130 'wlock' first to avoid a dead-lock hazard."""
3134 'wlock' first to avoid a dead-lock hazard."""
3131 l = self._wlockref() if self._wlockref else None
3135 l = self._wlockref() if self._wlockref else None
3132 if l is not None and l.held:
3136 if l is not None and l.held:
3133 l.lock()
3137 l.lock()
3134 return l
3138 return l
3135
3139
3136 # We do not need to check for non-waiting lock acquisition. Such
3140 # We do not need to check for non-waiting lock acquisition. Such
3137 # acquisition would not cause dead-lock as they would just fail.
3141 # acquisition would not cause dead-lock as they would just fail.
3138 if wait and (
3142 if wait and (
3139 self.ui.configbool(b'devel', b'all-warnings')
3143 self.ui.configbool(b'devel', b'all-warnings')
3140 or self.ui.configbool(b'devel', b'check-locks')
3144 or self.ui.configbool(b'devel', b'check-locks')
3141 ):
3145 ):
3142 if self._currentlock(self._lockref) is not None:
3146 if self._currentlock(self._lockref) is not None:
3143 self.ui.develwarn(b'"wlock" acquired after "lock"')
3147 self.ui.develwarn(b'"wlock" acquired after "lock"')
3144
3148
3145 def unlock():
3149 def unlock():
3146 if self.dirstate.is_changing_any:
3150 if self.dirstate.is_changing_any:
3147 msg = b"wlock release in the middle of a changing parents"
3151 msg = b"wlock release in the middle of a changing parents"
3148 self.ui.develwarn(msg)
3152 self.ui.develwarn(msg)
3149 self.dirstate.invalidate()
3153 self.dirstate.invalidate()
3150 else:
3154 else:
3151 if self.dirstate._dirty:
3155 if self.dirstate._dirty:
3152 msg = b"dirty dirstate on wlock release"
3156 msg = b"dirty dirstate on wlock release"
3153 self.ui.develwarn(msg)
3157 self.ui.develwarn(msg)
3154 self.dirstate.write(None)
3158 self.dirstate.write(None)
3155
3159
3156 unfi = self.unfiltered()
3160 unfi = self.unfiltered()
3157 if 'dirstate' in unfi.__dict__:
3161 if 'dirstate' in unfi.__dict__:
3158 del unfi.__dict__['dirstate']
3162 del unfi.__dict__['dirstate']
3159
3163
3160 l = self._lock(
3164 l = self._lock(
3161 self.vfs,
3165 self.vfs,
3162 b"wlock",
3166 b"wlock",
3163 wait,
3167 wait,
3164 unlock,
3168 unlock,
3165 self.invalidatedirstate,
3169 self.invalidatedirstate,
3166 _(b'working directory of %s') % self.origroot,
3170 _(b'working directory of %s') % self.origroot,
3167 )
3171 )
3168 self._wlockref = weakref.ref(l)
3172 self._wlockref = weakref.ref(l)
3169 return l
3173 return l
3170
3174
3171 def _currentlock(self, lockref):
3175 def _currentlock(self, lockref):
3172 """Returns the lock if it's held, or None if it's not."""
3176 """Returns the lock if it's held, or None if it's not."""
3173 if lockref is None:
3177 if lockref is None:
3174 return None
3178 return None
3175 l = lockref()
3179 l = lockref()
3176 if l is None or not l.held:
3180 if l is None or not l.held:
3177 return None
3181 return None
3178 return l
3182 return l
3179
3183
3180 def currentwlock(self):
3184 def currentwlock(self):
3181 """Returns the wlock if it's held, or None if it's not."""
3185 """Returns the wlock if it's held, or None if it's not."""
3182 return self._currentlock(self._wlockref)
3186 return self._currentlock(self._wlockref)
3183
3187
3184 def currentlock(self):
3188 def currentlock(self):
3185 """Returns the lock if it's held, or None if it's not."""
3189 """Returns the lock if it's held, or None if it's not."""
3186 return self._currentlock(self._lockref)
3190 return self._currentlock(self._lockref)
3187
3191
3188 def checkcommitpatterns(self, wctx, match, status, fail):
3192 def checkcommitpatterns(self, wctx, match, status, fail):
3189 """check for commit arguments that aren't committable"""
3193 """check for commit arguments that aren't committable"""
3190 if match.isexact() or match.prefix():
3194 if match.isexact() or match.prefix():
3191 matched = set(status.modified + status.added + status.removed)
3195 matched = set(status.modified + status.added + status.removed)
3192
3196
3193 for f in match.files():
3197 for f in match.files():
3194 f = self.dirstate.normalize(f)
3198 f = self.dirstate.normalize(f)
3195 if f == b'.' or f in matched or f in wctx.substate:
3199 if f == b'.' or f in matched or f in wctx.substate:
3196 continue
3200 continue
3197 if f in status.deleted:
3201 if f in status.deleted:
3198 fail(f, _(b'file not found!'))
3202 fail(f, _(b'file not found!'))
3199 # Is it a directory that exists or used to exist?
3203 # Is it a directory that exists or used to exist?
3200 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3204 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3201 d = f + b'/'
3205 d = f + b'/'
3202 for mf in matched:
3206 for mf in matched:
3203 if mf.startswith(d):
3207 if mf.startswith(d):
3204 break
3208 break
3205 else:
3209 else:
3206 fail(f, _(b"no match under directory!"))
3210 fail(f, _(b"no match under directory!"))
3207 elif f not in self.dirstate:
3211 elif f not in self.dirstate:
3208 fail(f, _(b"file not tracked!"))
3212 fail(f, _(b"file not tracked!"))
3209
3213
3210 @unfilteredmethod
3214 @unfilteredmethod
3211 def commit(
3215 def commit(
3212 self,
3216 self,
3213 text=b"",
3217 text=b"",
3214 user=None,
3218 user=None,
3215 date=None,
3219 date=None,
3216 match=None,
3220 match=None,
3217 force=False,
3221 force=False,
3218 editor=None,
3222 editor=None,
3219 extra=None,
3223 extra=None,
3220 ):
3224 ):
3221 """Add a new revision to current repository.
3225 """Add a new revision to current repository.
3222
3226
3223 Revision information is gathered from the working directory,
3227 Revision information is gathered from the working directory,
3224 match can be used to filter the committed files. If editor is
3228 match can be used to filter the committed files. If editor is
3225 supplied, it is called to get a commit message.
3229 supplied, it is called to get a commit message.
3226 """
3230 """
3227 if extra is None:
3231 if extra is None:
3228 extra = {}
3232 extra = {}
3229
3233
3230 def fail(f, msg):
3234 def fail(f, msg):
3231 raise error.InputError(b'%s: %s' % (f, msg))
3235 raise error.InputError(b'%s: %s' % (f, msg))
3232
3236
3233 if not match:
3237 if not match:
3234 match = matchmod.always()
3238 match = matchmod.always()
3235
3239
3236 if not force:
3240 if not force:
3237 match.bad = fail
3241 match.bad = fail
3238
3242
3239 # lock() for recent changelog (see issue4368)
3243 # lock() for recent changelog (see issue4368)
3240 with self.wlock(), self.lock():
3244 with self.wlock(), self.lock():
3241 wctx = self[None]
3245 wctx = self[None]
3242 merge = len(wctx.parents()) > 1
3246 merge = len(wctx.parents()) > 1
3243
3247
3244 if not force and merge and not match.always():
3248 if not force and merge and not match.always():
3245 raise error.Abort(
3249 raise error.Abort(
3246 _(
3250 _(
3247 b'cannot partially commit a merge '
3251 b'cannot partially commit a merge '
3248 b'(do not specify files or patterns)'
3252 b'(do not specify files or patterns)'
3249 )
3253 )
3250 )
3254 )
3251
3255
3252 status = self.status(match=match, clean=force)
3256 status = self.status(match=match, clean=force)
3253 if force:
3257 if force:
3254 status.modified.extend(
3258 status.modified.extend(
3255 status.clean
3259 status.clean
3256 ) # mq may commit clean files
3260 ) # mq may commit clean files
3257
3261
3258 # check subrepos
3262 # check subrepos
3259 subs, commitsubs, newstate = subrepoutil.precommit(
3263 subs, commitsubs, newstate = subrepoutil.precommit(
3260 self.ui, wctx, status, match, force=force
3264 self.ui, wctx, status, match, force=force
3261 )
3265 )
3262
3266
3263 # make sure all explicit patterns are matched
3267 # make sure all explicit patterns are matched
3264 if not force:
3268 if not force:
3265 self.checkcommitpatterns(wctx, match, status, fail)
3269 self.checkcommitpatterns(wctx, match, status, fail)
3266
3270
3267 cctx = context.workingcommitctx(
3271 cctx = context.workingcommitctx(
3268 self, status, text, user, date, extra
3272 self, status, text, user, date, extra
3269 )
3273 )
3270
3274
3271 ms = mergestatemod.mergestate.read(self)
3275 ms = mergestatemod.mergestate.read(self)
3272 mergeutil.checkunresolved(ms)
3276 mergeutil.checkunresolved(ms)
3273
3277
3274 # internal config: ui.allowemptycommit
3278 # internal config: ui.allowemptycommit
3275 if cctx.isempty() and not self.ui.configbool(
3279 if cctx.isempty() and not self.ui.configbool(
3276 b'ui', b'allowemptycommit'
3280 b'ui', b'allowemptycommit'
3277 ):
3281 ):
3278 self.ui.debug(b'nothing to commit, clearing merge state\n')
3282 self.ui.debug(b'nothing to commit, clearing merge state\n')
3279 ms.reset()
3283 ms.reset()
3280 return None
3284 return None
3281
3285
3282 if merge and cctx.deleted():
3286 if merge and cctx.deleted():
3283 raise error.Abort(_(b"cannot commit merge with missing files"))
3287 raise error.Abort(_(b"cannot commit merge with missing files"))
3284
3288
3285 if editor:
3289 if editor:
3286 cctx._text = editor(self, cctx, subs)
3290 cctx._text = editor(self, cctx, subs)
3287 edited = text != cctx._text
3291 edited = text != cctx._text
3288
3292
3289 # Save commit message in case this transaction gets rolled back
3293 # Save commit message in case this transaction gets rolled back
3290 # (e.g. by a pretxncommit hook). Leave the content alone on
3294 # (e.g. by a pretxncommit hook). Leave the content alone on
3291 # the assumption that the user will use the same editor again.
3295 # the assumption that the user will use the same editor again.
3292 msg_path = self.savecommitmessage(cctx._text)
3296 msg_path = self.savecommitmessage(cctx._text)
3293
3297
3294 # commit subs and write new state
3298 # commit subs and write new state
3295 if subs:
3299 if subs:
3296 uipathfn = scmutil.getuipathfn(self)
3300 uipathfn = scmutil.getuipathfn(self)
3297 for s in sorted(commitsubs):
3301 for s in sorted(commitsubs):
3298 sub = wctx.sub(s)
3302 sub = wctx.sub(s)
3299 self.ui.status(
3303 self.ui.status(
3300 _(b'committing subrepository %s\n')
3304 _(b'committing subrepository %s\n')
3301 % uipathfn(subrepoutil.subrelpath(sub))
3305 % uipathfn(subrepoutil.subrelpath(sub))
3302 )
3306 )
3303 sr = sub.commit(cctx._text, user, date)
3307 sr = sub.commit(cctx._text, user, date)
3304 newstate[s] = (newstate[s][0], sr)
3308 newstate[s] = (newstate[s][0], sr)
3305 subrepoutil.writestate(self, newstate)
3309 subrepoutil.writestate(self, newstate)
3306
3310
3307 p1, p2 = self.dirstate.parents()
3311 p1, p2 = self.dirstate.parents()
3308 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3312 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3309 try:
3313 try:
3310 self.hook(
3314 self.hook(
3311 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3315 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3312 )
3316 )
3313 with self.transaction(b'commit'):
3317 with self.transaction(b'commit'):
3314 ret = self.commitctx(cctx, True)
3318 ret = self.commitctx(cctx, True)
3315 # update bookmarks, dirstate and mergestate
3319 # update bookmarks, dirstate and mergestate
3316 bookmarks.update(self, [p1, p2], ret)
3320 bookmarks.update(self, [p1, p2], ret)
3317 cctx.markcommitted(ret)
3321 cctx.markcommitted(ret)
3318 ms.reset()
3322 ms.reset()
3319 except: # re-raises
3323 except: # re-raises
3320 if edited:
3324 if edited:
3321 self.ui.write(
3325 self.ui.write(
3322 _(b'note: commit message saved in %s\n') % msg_path
3326 _(b'note: commit message saved in %s\n') % msg_path
3323 )
3327 )
3324 self.ui.write(
3328 self.ui.write(
3325 _(
3329 _(
3326 b"note: use 'hg commit --logfile "
3330 b"note: use 'hg commit --logfile "
3327 b"%s --edit' to reuse it\n"
3331 b"%s --edit' to reuse it\n"
3328 )
3332 )
3329 % msg_path
3333 % msg_path
3330 )
3334 )
3331 raise
3335 raise
3332
3336
3333 def commithook(unused_success):
3337 def commithook(unused_success):
3334 # hack for command that use a temporary commit (eg: histedit)
3338 # hack for command that use a temporary commit (eg: histedit)
3335 # temporary commit got stripped before hook release
3339 # temporary commit got stripped before hook release
3336 if self.changelog.hasnode(ret):
3340 if self.changelog.hasnode(ret):
3337 self.hook(
3341 self.hook(
3338 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3342 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3339 )
3343 )
3340
3344
3341 self._afterlock(commithook)
3345 self._afterlock(commithook)
3342 return ret
3346 return ret
3343
3347
3344 @unfilteredmethod
3348 @unfilteredmethod
3345 def commitctx(self, ctx, error=False, origctx=None):
3349 def commitctx(self, ctx, error=False, origctx=None):
3346 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3350 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3347
3351
3348 @unfilteredmethod
3352 @unfilteredmethod
3349 def destroying(self):
3353 def destroying(self):
3350 """Inform the repository that nodes are about to be destroyed.
3354 """Inform the repository that nodes are about to be destroyed.
3351 Intended for use by strip and rollback, so there's a common
3355 Intended for use by strip and rollback, so there's a common
3352 place for anything that has to be done before destroying history.
3356 place for anything that has to be done before destroying history.
3353
3357
3354 This is mostly useful for saving state that is in memory and waiting
3358 This is mostly useful for saving state that is in memory and waiting
3355 to be flushed when the current lock is released. Because a call to
3359 to be flushed when the current lock is released. Because a call to
3356 destroyed is imminent, the repo will be invalidated causing those
3360 destroyed is imminent, the repo will be invalidated causing those
3357 changes to stay in memory (waiting for the next unlock), or vanish
3361 changes to stay in memory (waiting for the next unlock), or vanish
3358 completely.
3362 completely.
3359 """
3363 """
3360 # When using the same lock to commit and strip, the phasecache is left
3364 # When using the same lock to commit and strip, the phasecache is left
3361 # dirty after committing. Then when we strip, the repo is invalidated,
3365 # dirty after committing. Then when we strip, the repo is invalidated,
3362 # causing those changes to disappear.
3366 # causing those changes to disappear.
3363 if '_phasecache' in vars(self):
3367 if '_phasecache' in vars(self):
3364 self._phasecache.write()
3368 self._phasecache.write()
3365
3369
3366 @unfilteredmethod
3370 @unfilteredmethod
3367 def destroyed(self):
3371 def destroyed(self):
3368 """Inform the repository that nodes have been destroyed.
3372 """Inform the repository that nodes have been destroyed.
3369 Intended for use by strip and rollback, so there's a common
3373 Intended for use by strip and rollback, so there's a common
3370 place for anything that has to be done after destroying history.
3374 place for anything that has to be done after destroying history.
3371 """
3375 """
3372 # When one tries to:
3376 # When one tries to:
3373 # 1) destroy nodes thus calling this method (e.g. strip)
3377 # 1) destroy nodes thus calling this method (e.g. strip)
3374 # 2) use phasecache somewhere (e.g. commit)
3378 # 2) use phasecache somewhere (e.g. commit)
3375 #
3379 #
3376 # then 2) will fail because the phasecache contains nodes that were
3380 # then 2) will fail because the phasecache contains nodes that were
3377 # removed. We can either remove phasecache from the filecache,
3381 # removed. We can either remove phasecache from the filecache,
3378 # causing it to reload next time it is accessed, or simply filter
3382 # causing it to reload next time it is accessed, or simply filter
3379 # the removed nodes now and write the updated cache.
3383 # the removed nodes now and write the updated cache.
3380 self._phasecache.filterunknown(self)
3384 self._phasecache.filterunknown(self)
3381 self._phasecache.write()
3385 self._phasecache.write()
3382
3386
3383 # refresh all repository caches
3387 # refresh all repository caches
3384 self.updatecaches()
3388 self.updatecaches()
3385
3389
3386 # Ensure the persistent tag cache is updated. Doing it now
3390 # Ensure the persistent tag cache is updated. Doing it now
3387 # means that the tag cache only has to worry about destroyed
3391 # means that the tag cache only has to worry about destroyed
3388 # heads immediately after a strip/rollback. That in turn
3392 # heads immediately after a strip/rollback. That in turn
3389 # guarantees that "cachetip == currenttip" (comparing both rev
3393 # guarantees that "cachetip == currenttip" (comparing both rev
3390 # and node) always means no nodes have been added or destroyed.
3394 # and node) always means no nodes have been added or destroyed.
3391
3395
3392 # XXX this is suboptimal when qrefresh'ing: we strip the current
3396 # XXX this is suboptimal when qrefresh'ing: we strip the current
3393 # head, refresh the tag cache, then immediately add a new head.
3397 # head, refresh the tag cache, then immediately add a new head.
3394 # But I think doing it this way is necessary for the "instant
3398 # But I think doing it this way is necessary for the "instant
3395 # tag cache retrieval" case to work.
3399 # tag cache retrieval" case to work.
3396 self.invalidate()
3400 self.invalidate()
3397
3401
3398 def status(
3402 def status(
3399 self,
3403 self,
3400 node1=b'.',
3404 node1=b'.',
3401 node2=None,
3405 node2=None,
3402 match=None,
3406 match=None,
3403 ignored=False,
3407 ignored=False,
3404 clean=False,
3408 clean=False,
3405 unknown=False,
3409 unknown=False,
3406 listsubrepos=False,
3410 listsubrepos=False,
3407 ):
3411 ):
3408 '''a convenience method that calls node1.status(node2)'''
3412 '''a convenience method that calls node1.status(node2)'''
3409 return self[node1].status(
3413 return self[node1].status(
3410 node2, match, ignored, clean, unknown, listsubrepos
3414 node2, match, ignored, clean, unknown, listsubrepos
3411 )
3415 )
3412
3416
3413 def addpostdsstatus(self, ps):
3417 def addpostdsstatus(self, ps):
3414 """Add a callback to run within the wlock, at the point at which status
3418 """Add a callback to run within the wlock, at the point at which status
3415 fixups happen.
3419 fixups happen.
3416
3420
3417 On status completion, callback(wctx, status) will be called with the
3421 On status completion, callback(wctx, status) will be called with the
3418 wlock held, unless the dirstate has changed from underneath or the wlock
3422 wlock held, unless the dirstate has changed from underneath or the wlock
3419 couldn't be grabbed.
3423 couldn't be grabbed.
3420
3424
3421 Callbacks should not capture and use a cached copy of the dirstate --
3425 Callbacks should not capture and use a cached copy of the dirstate --
3422 it might change in the meanwhile. Instead, they should access the
3426 it might change in the meanwhile. Instead, they should access the
3423 dirstate via wctx.repo().dirstate.
3427 dirstate via wctx.repo().dirstate.
3424
3428
3425 This list is emptied out after each status run -- extensions should
3429 This list is emptied out after each status run -- extensions should
3426 make sure it adds to this list each time dirstate.status is called.
3430 make sure it adds to this list each time dirstate.status is called.
3427 Extensions should also make sure they don't call this for statuses
3431 Extensions should also make sure they don't call this for statuses
3428 that don't involve the dirstate.
3432 that don't involve the dirstate.
3429 """
3433 """
3430
3434
3431 # The list is located here for uniqueness reasons -- it is actually
3435 # The list is located here for uniqueness reasons -- it is actually
3432 # managed by the workingctx, but that isn't unique per-repo.
3436 # managed by the workingctx, but that isn't unique per-repo.
3433 self._postdsstatus.append(ps)
3437 self._postdsstatus.append(ps)
3434
3438
3435 def postdsstatus(self):
3439 def postdsstatus(self):
3436 """Used by workingctx to get the list of post-dirstate-status hooks."""
3440 """Used by workingctx to get the list of post-dirstate-status hooks."""
3437 return self._postdsstatus
3441 return self._postdsstatus
3438
3442
3439 def clearpostdsstatus(self):
3443 def clearpostdsstatus(self):
3440 """Used by workingctx to clear post-dirstate-status hooks."""
3444 """Used by workingctx to clear post-dirstate-status hooks."""
3441 del self._postdsstatus[:]
3445 del self._postdsstatus[:]
3442
3446
3443 def heads(self, start=None):
3447 def heads(self, start=None):
3444 if start is None:
3448 if start is None:
3445 cl = self.changelog
3449 cl = self.changelog
3446 headrevs = reversed(cl.headrevs())
3450 headrevs = reversed(cl.headrevs())
3447 return [cl.node(rev) for rev in headrevs]
3451 return [cl.node(rev) for rev in headrevs]
3448
3452
3449 heads = self.changelog.heads(start)
3453 heads = self.changelog.heads(start)
3450 # sort the output in rev descending order
3454 # sort the output in rev descending order
3451 return sorted(heads, key=self.changelog.rev, reverse=True)
3455 return sorted(heads, key=self.changelog.rev, reverse=True)
3452
3456
3453 def branchheads(self, branch=None, start=None, closed=False):
3457 def branchheads(self, branch=None, start=None, closed=False):
3454 """return a (possibly filtered) list of heads for the given branch
3458 """return a (possibly filtered) list of heads for the given branch
3455
3459
3456 Heads are returned in topological order, from newest to oldest.
3460 Heads are returned in topological order, from newest to oldest.
3457 If branch is None, use the dirstate branch.
3461 If branch is None, use the dirstate branch.
3458 If start is not None, return only heads reachable from start.
3462 If start is not None, return only heads reachable from start.
3459 If closed is True, return heads that are marked as closed as well.
3463 If closed is True, return heads that are marked as closed as well.
3460 """
3464 """
3461 if branch is None:
3465 if branch is None:
3462 branch = self[None].branch()
3466 branch = self[None].branch()
3463 branches = self.branchmap()
3467 branches = self.branchmap()
3464 if not branches.hasbranch(branch):
3468 if not branches.hasbranch(branch):
3465 return []
3469 return []
3466 # the cache returns heads ordered lowest to highest
3470 # the cache returns heads ordered lowest to highest
3467 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3471 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3468 if start is not None:
3472 if start is not None:
3469 # filter out the heads that cannot be reached from startrev
3473 # filter out the heads that cannot be reached from startrev
3470 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3474 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3471 bheads = [h for h in bheads if h in fbheads]
3475 bheads = [h for h in bheads if h in fbheads]
3472 return bheads
3476 return bheads
3473
3477
3474 def branches(self, nodes):
3478 def branches(self, nodes):
3475 if not nodes:
3479 if not nodes:
3476 nodes = [self.changelog.tip()]
3480 nodes = [self.changelog.tip()]
3477 b = []
3481 b = []
3478 for n in nodes:
3482 for n in nodes:
3479 t = n
3483 t = n
3480 while True:
3484 while True:
3481 p = self.changelog.parents(n)
3485 p = self.changelog.parents(n)
3482 if p[1] != self.nullid or p[0] == self.nullid:
3486 if p[1] != self.nullid or p[0] == self.nullid:
3483 b.append((t, n, p[0], p[1]))
3487 b.append((t, n, p[0], p[1]))
3484 break
3488 break
3485 n = p[0]
3489 n = p[0]
3486 return b
3490 return b
3487
3491
3488 def between(self, pairs):
3492 def between(self, pairs):
3489 r = []
3493 r = []
3490
3494
3491 for top, bottom in pairs:
3495 for top, bottom in pairs:
3492 n, l, i = top, [], 0
3496 n, l, i = top, [], 0
3493 f = 1
3497 f = 1
3494
3498
3495 while n != bottom and n != self.nullid:
3499 while n != bottom and n != self.nullid:
3496 p = self.changelog.parents(n)[0]
3500 p = self.changelog.parents(n)[0]
3497 if i == f:
3501 if i == f:
3498 l.append(n)
3502 l.append(n)
3499 f = f * 2
3503 f = f * 2
3500 n = p
3504 n = p
3501 i += 1
3505 i += 1
3502
3506
3503 r.append(l)
3507 r.append(l)
3504
3508
3505 return r
3509 return r
3506
3510
3507 def checkpush(self, pushop):
3511 def checkpush(self, pushop):
3508 """Extensions can override this function if additional checks have
3512 """Extensions can override this function if additional checks have
3509 to be performed before pushing, or call it if they override push
3513 to be performed before pushing, or call it if they override push
3510 command.
3514 command.
3511 """
3515 """
3512
3516
3513 @unfilteredpropertycache
3517 @unfilteredpropertycache
3514 def prepushoutgoinghooks(self):
3518 def prepushoutgoinghooks(self):
3515 """Return util.hooks consists of a pushop with repo, remote, outgoing
3519 """Return util.hooks consists of a pushop with repo, remote, outgoing
3516 methods, which are called before pushing changesets.
3520 methods, which are called before pushing changesets.
3517 """
3521 """
3518 return util.hooks()
3522 return util.hooks()
3519
3523
3520 def pushkey(self, namespace, key, old, new):
3524 def pushkey(self, namespace, key, old, new):
3521 try:
3525 try:
3522 tr = self.currenttransaction()
3526 tr = self.currenttransaction()
3523 hookargs = {}
3527 hookargs = {}
3524 if tr is not None:
3528 if tr is not None:
3525 hookargs.update(tr.hookargs)
3529 hookargs.update(tr.hookargs)
3526 hookargs = pycompat.strkwargs(hookargs)
3530 hookargs = pycompat.strkwargs(hookargs)
3527 hookargs['namespace'] = namespace
3531 hookargs['namespace'] = namespace
3528 hookargs['key'] = key
3532 hookargs['key'] = key
3529 hookargs['old'] = old
3533 hookargs['old'] = old
3530 hookargs['new'] = new
3534 hookargs['new'] = new
3531 self.hook(b'prepushkey', throw=True, **hookargs)
3535 self.hook(b'prepushkey', throw=True, **hookargs)
3532 except error.HookAbort as exc:
3536 except error.HookAbort as exc:
3533 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3537 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3534 if exc.hint:
3538 if exc.hint:
3535 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3539 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3536 return False
3540 return False
3537 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3541 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3538 ret = pushkey.push(self, namespace, key, old, new)
3542 ret = pushkey.push(self, namespace, key, old, new)
3539
3543
3540 def runhook(unused_success):
3544 def runhook(unused_success):
3541 self.hook(
3545 self.hook(
3542 b'pushkey',
3546 b'pushkey',
3543 namespace=namespace,
3547 namespace=namespace,
3544 key=key,
3548 key=key,
3545 old=old,
3549 old=old,
3546 new=new,
3550 new=new,
3547 ret=ret,
3551 ret=ret,
3548 )
3552 )
3549
3553
3550 self._afterlock(runhook)
3554 self._afterlock(runhook)
3551 return ret
3555 return ret
3552
3556
3553 def listkeys(self, namespace):
3557 def listkeys(self, namespace):
3554 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3558 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3555 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3559 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3556 values = pushkey.list(self, namespace)
3560 values = pushkey.list(self, namespace)
3557 self.hook(b'listkeys', namespace=namespace, values=values)
3561 self.hook(b'listkeys', namespace=namespace, values=values)
3558 return values
3562 return values
3559
3563
3560 def debugwireargs(self, one, two, three=None, four=None, five=None):
3564 def debugwireargs(self, one, two, three=None, four=None, five=None):
3561 '''used to test argument passing over the wire'''
3565 '''used to test argument passing over the wire'''
3562 return b"%s %s %s %s %s" % (
3566 return b"%s %s %s %s %s" % (
3563 one,
3567 one,
3564 two,
3568 two,
3565 pycompat.bytestr(three),
3569 pycompat.bytestr(three),
3566 pycompat.bytestr(four),
3570 pycompat.bytestr(four),
3567 pycompat.bytestr(five),
3571 pycompat.bytestr(five),
3568 )
3572 )
3569
3573
3570 def savecommitmessage(self, text):
3574 def savecommitmessage(self, text):
3571 fp = self.vfs(b'last-message.txt', b'wb')
3575 fp = self.vfs(b'last-message.txt', b'wb')
3572 try:
3576 try:
3573 fp.write(text)
3577 fp.write(text)
3574 finally:
3578 finally:
3575 fp.close()
3579 fp.close()
3576 return self.pathto(fp.name[len(self.root) + 1 :])
3580 return self.pathto(fp.name[len(self.root) + 1 :])
3577
3581
3578 def register_wanted_sidedata(self, category):
3582 def register_wanted_sidedata(self, category):
3579 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3583 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3580 # Only revlogv2 repos can want sidedata.
3584 # Only revlogv2 repos can want sidedata.
3581 return
3585 return
3582 self._wanted_sidedata.add(pycompat.bytestr(category))
3586 self._wanted_sidedata.add(pycompat.bytestr(category))
3583
3587
3584 def register_sidedata_computer(
3588 def register_sidedata_computer(
3585 self, kind, category, keys, computer, flags, replace=False
3589 self, kind, category, keys, computer, flags, replace=False
3586 ):
3590 ):
3587 if kind not in revlogconst.ALL_KINDS:
3591 if kind not in revlogconst.ALL_KINDS:
3588 msg = _(b"unexpected revlog kind '%s'.")
3592 msg = _(b"unexpected revlog kind '%s'.")
3589 raise error.ProgrammingError(msg % kind)
3593 raise error.ProgrammingError(msg % kind)
3590 category = pycompat.bytestr(category)
3594 category = pycompat.bytestr(category)
3591 already_registered = category in self._sidedata_computers.get(kind, [])
3595 already_registered = category in self._sidedata_computers.get(kind, [])
3592 if already_registered and not replace:
3596 if already_registered and not replace:
3593 msg = _(
3597 msg = _(
3594 b"cannot register a sidedata computer twice for category '%s'."
3598 b"cannot register a sidedata computer twice for category '%s'."
3595 )
3599 )
3596 raise error.ProgrammingError(msg % category)
3600 raise error.ProgrammingError(msg % category)
3597 if replace and not already_registered:
3601 if replace and not already_registered:
3598 msg = _(
3602 msg = _(
3599 b"cannot replace a sidedata computer that isn't registered "
3603 b"cannot replace a sidedata computer that isn't registered "
3600 b"for category '%s'."
3604 b"for category '%s'."
3601 )
3605 )
3602 raise error.ProgrammingError(msg % category)
3606 raise error.ProgrammingError(msg % category)
3603 self._sidedata_computers.setdefault(kind, {})
3607 self._sidedata_computers.setdefault(kind, {})
3604 self._sidedata_computers[kind][category] = (keys, computer, flags)
3608 self._sidedata_computers[kind][category] = (keys, computer, flags)
3605
3609
3606
3610
3607 def undoname(fn: bytes) -> bytes:
3611 def undoname(fn: bytes) -> bytes:
3608 base, name = os.path.split(fn)
3612 base, name = os.path.split(fn)
3609 assert name.startswith(b'journal')
3613 assert name.startswith(b'journal')
3610 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3614 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3611
3615
3612
3616
3613 def instance(ui, path: bytes, create, intents=None, createopts=None):
3617 def instance(ui, path: bytes, create, intents=None, createopts=None):
3614 # prevent cyclic import localrepo -> upgrade -> localrepo
3618 # prevent cyclic import localrepo -> upgrade -> localrepo
3615 from . import upgrade
3619 from . import upgrade
3616
3620
3617 localpath = urlutil.urllocalpath(path)
3621 localpath = urlutil.urllocalpath(path)
3618 if create:
3622 if create:
3619 createrepository(ui, localpath, createopts=createopts)
3623 createrepository(ui, localpath, createopts=createopts)
3620
3624
3621 def repo_maker():
3625 def repo_maker():
3622 return makelocalrepository(ui, localpath, intents=intents)
3626 return makelocalrepository(ui, localpath, intents=intents)
3623
3627
3624 repo = repo_maker()
3628 repo = repo_maker()
3625 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3629 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3626 return repo
3630 return repo
3627
3631
3628
3632
3629 def islocal(path: bytes) -> bool:
3633 def islocal(path: bytes) -> bool:
3630 return True
3634 return True
3631
3635
3632
3636
3633 def defaultcreateopts(ui, createopts=None):
3637 def defaultcreateopts(ui, createopts=None):
3634 """Populate the default creation options for a repository.
3638 """Populate the default creation options for a repository.
3635
3639
3636 A dictionary of explicitly requested creation options can be passed
3640 A dictionary of explicitly requested creation options can be passed
3637 in. Missing keys will be populated.
3641 in. Missing keys will be populated.
3638 """
3642 """
3639 createopts = dict(createopts or {})
3643 createopts = dict(createopts or {})
3640
3644
3641 if b'backend' not in createopts:
3645 if b'backend' not in createopts:
3642 # experimental config: storage.new-repo-backend
3646 # experimental config: storage.new-repo-backend
3643 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3647 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3644
3648
3645 return createopts
3649 return createopts
3646
3650
3647
3651
3648 def clone_requirements(ui, createopts, srcrepo):
3652 def clone_requirements(ui, createopts, srcrepo):
3649 """clone the requirements of a local repo for a local clone
3653 """clone the requirements of a local repo for a local clone
3650
3654
3651 The store requirements are unchanged while the working copy requirements
3655 The store requirements are unchanged while the working copy requirements
3652 depends on the configuration
3656 depends on the configuration
3653 """
3657 """
3654 target_requirements = set()
3658 target_requirements = set()
3655 if not srcrepo.requirements:
3659 if not srcrepo.requirements:
3656 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3660 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3657 # with it.
3661 # with it.
3658 return target_requirements
3662 return target_requirements
3659 createopts = defaultcreateopts(ui, createopts=createopts)
3663 createopts = defaultcreateopts(ui, createopts=createopts)
3660 for r in newreporequirements(ui, createopts):
3664 for r in newreporequirements(ui, createopts):
3661 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3665 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3662 target_requirements.add(r)
3666 target_requirements.add(r)
3663
3667
3664 for r in srcrepo.requirements:
3668 for r in srcrepo.requirements:
3665 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3669 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3666 target_requirements.add(r)
3670 target_requirements.add(r)
3667 return target_requirements
3671 return target_requirements
3668
3672
3669
3673
3670 def newreporequirements(ui, createopts):
3674 def newreporequirements(ui, createopts):
3671 """Determine the set of requirements for a new local repository.
3675 """Determine the set of requirements for a new local repository.
3672
3676
3673 Extensions can wrap this function to specify custom requirements for
3677 Extensions can wrap this function to specify custom requirements for
3674 new repositories.
3678 new repositories.
3675 """
3679 """
3676
3680
3677 if b'backend' not in createopts:
3681 if b'backend' not in createopts:
3678 raise error.ProgrammingError(
3682 raise error.ProgrammingError(
3679 b'backend key not present in createopts; '
3683 b'backend key not present in createopts; '
3680 b'was defaultcreateopts() called?'
3684 b'was defaultcreateopts() called?'
3681 )
3685 )
3682
3686
3683 if createopts[b'backend'] != b'revlogv1':
3687 if createopts[b'backend'] != b'revlogv1':
3684 raise error.Abort(
3688 raise error.Abort(
3685 _(
3689 _(
3686 b'unable to determine repository requirements for '
3690 b'unable to determine repository requirements for '
3687 b'storage backend: %s'
3691 b'storage backend: %s'
3688 )
3692 )
3689 % createopts[b'backend']
3693 % createopts[b'backend']
3690 )
3694 )
3691
3695
3692 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3696 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3693 if ui.configbool(b'format', b'usestore'):
3697 if ui.configbool(b'format', b'usestore'):
3694 requirements.add(requirementsmod.STORE_REQUIREMENT)
3698 requirements.add(requirementsmod.STORE_REQUIREMENT)
3695 if ui.configbool(b'format', b'usefncache'):
3699 if ui.configbool(b'format', b'usefncache'):
3696 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3700 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3697 if ui.configbool(b'format', b'dotencode'):
3701 if ui.configbool(b'format', b'dotencode'):
3698 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3702 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3699
3703
3700 compengines = ui.configlist(b'format', b'revlog-compression')
3704 compengines = ui.configlist(b'format', b'revlog-compression')
3701 for compengine in compengines:
3705 for compengine in compengines:
3702 if compengine in util.compengines:
3706 if compengine in util.compengines:
3703 engine = util.compengines[compengine]
3707 engine = util.compengines[compengine]
3704 if engine.available() and engine.revlogheader():
3708 if engine.available() and engine.revlogheader():
3705 break
3709 break
3706 else:
3710 else:
3707 raise error.Abort(
3711 raise error.Abort(
3708 _(
3712 _(
3709 b'compression engines %s defined by '
3713 b'compression engines %s defined by '
3710 b'format.revlog-compression not available'
3714 b'format.revlog-compression not available'
3711 )
3715 )
3712 % b', '.join(b'"%s"' % e for e in compengines),
3716 % b', '.join(b'"%s"' % e for e in compengines),
3713 hint=_(
3717 hint=_(
3714 b'run "hg debuginstall" to list available '
3718 b'run "hg debuginstall" to list available '
3715 b'compression engines'
3719 b'compression engines'
3716 ),
3720 ),
3717 )
3721 )
3718
3722
3719 # zlib is the historical default and doesn't need an explicit requirement.
3723 # zlib is the historical default and doesn't need an explicit requirement.
3720 if compengine == b'zstd':
3724 if compengine == b'zstd':
3721 requirements.add(b'revlog-compression-zstd')
3725 requirements.add(b'revlog-compression-zstd')
3722 elif compengine != b'zlib':
3726 elif compengine != b'zlib':
3723 requirements.add(b'exp-compression-%s' % compengine)
3727 requirements.add(b'exp-compression-%s' % compengine)
3724
3728
3725 if scmutil.gdinitconfig(ui):
3729 if scmutil.gdinitconfig(ui):
3726 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3730 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3727 if ui.configbool(b'format', b'sparse-revlog'):
3731 if ui.configbool(b'format', b'sparse-revlog'):
3728 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3732 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3729
3733
3730 # experimental config: format.use-dirstate-v2
3734 # experimental config: format.use-dirstate-v2
3731 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3735 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3732 if ui.configbool(b'format', b'use-dirstate-v2'):
3736 if ui.configbool(b'format', b'use-dirstate-v2'):
3733 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3737 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3734
3738
3735 # experimental config: format.exp-use-copies-side-data-changeset
3739 # experimental config: format.exp-use-copies-side-data-changeset
3736 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3740 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3737 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3741 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3738 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3742 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3739 if ui.configbool(b'experimental', b'treemanifest'):
3743 if ui.configbool(b'experimental', b'treemanifest'):
3740 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3744 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3741
3745
3742 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3746 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3743 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3747 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3744 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3748 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3745
3749
3746 revlogv2 = ui.config(b'experimental', b'revlogv2')
3750 revlogv2 = ui.config(b'experimental', b'revlogv2')
3747 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3751 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3748 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3752 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3749 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3753 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3750 # experimental config: format.internal-phase
3754 # experimental config: format.internal-phase
3751 if ui.configbool(b'format', b'use-internal-phase'):
3755 if ui.configbool(b'format', b'use-internal-phase'):
3752 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3756 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3753
3757
3754 # experimental config: format.exp-archived-phase
3758 # experimental config: format.exp-archived-phase
3755 if ui.configbool(b'format', b'exp-archived-phase'):
3759 if ui.configbool(b'format', b'exp-archived-phase'):
3756 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3760 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3757
3761
3758 if createopts.get(b'narrowfiles'):
3762 if createopts.get(b'narrowfiles'):
3759 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3763 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3760
3764
3761 if createopts.get(b'lfs'):
3765 if createopts.get(b'lfs'):
3762 requirements.add(b'lfs')
3766 requirements.add(b'lfs')
3763
3767
3764 if ui.configbool(b'format', b'bookmarks-in-store'):
3768 if ui.configbool(b'format', b'bookmarks-in-store'):
3765 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3769 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3766
3770
3767 # The feature is disabled unless a fast implementation is available.
3771 # The feature is disabled unless a fast implementation is available.
3768 persistent_nodemap_default = policy.importrust('revlog') is not None
3772 persistent_nodemap_default = policy.importrust('revlog') is not None
3769 if ui.configbool(
3773 if ui.configbool(
3770 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3774 b'format', b'use-persistent-nodemap', persistent_nodemap_default
3771 ):
3775 ):
3772 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3776 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3773
3777
3774 # if share-safe is enabled, let's create the new repository with the new
3778 # if share-safe is enabled, let's create the new repository with the new
3775 # requirement
3779 # requirement
3776 if ui.configbool(b'format', b'use-share-safe'):
3780 if ui.configbool(b'format', b'use-share-safe'):
3777 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3781 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3778
3782
3779 # if we are creating a share-repoΒΉ we have to handle requirement
3783 # if we are creating a share-repoΒΉ we have to handle requirement
3780 # differently.
3784 # differently.
3781 #
3785 #
3782 # [1] (i.e. reusing the store from another repository, just having a
3786 # [1] (i.e. reusing the store from another repository, just having a
3783 # working copy)
3787 # working copy)
3784 if b'sharedrepo' in createopts:
3788 if b'sharedrepo' in createopts:
3785 source_requirements = set(createopts[b'sharedrepo'].requirements)
3789 source_requirements = set(createopts[b'sharedrepo'].requirements)
3786
3790
3787 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3791 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3788 # share to an old school repository, we have to copy the
3792 # share to an old school repository, we have to copy the
3789 # requirements and hope for the best.
3793 # requirements and hope for the best.
3790 requirements = source_requirements
3794 requirements = source_requirements
3791 else:
3795 else:
3792 # We have control on the working copy only, so "copy" the non
3796 # We have control on the working copy only, so "copy" the non
3793 # working copy part over, ignoring previous logic.
3797 # working copy part over, ignoring previous logic.
3794 to_drop = set()
3798 to_drop = set()
3795 for req in requirements:
3799 for req in requirements:
3796 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3800 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3797 continue
3801 continue
3798 if req in source_requirements:
3802 if req in source_requirements:
3799 continue
3803 continue
3800 to_drop.add(req)
3804 to_drop.add(req)
3801 requirements -= to_drop
3805 requirements -= to_drop
3802 requirements |= source_requirements
3806 requirements |= source_requirements
3803
3807
3804 if createopts.get(b'sharedrelative'):
3808 if createopts.get(b'sharedrelative'):
3805 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3809 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3806 else:
3810 else:
3807 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3811 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3808
3812
3809 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3813 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3810 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3814 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3811 msg = _(b"ignoring unknown tracked key version: %d\n")
3815 msg = _(b"ignoring unknown tracked key version: %d\n")
3812 hint = _(
3816 hint = _(
3813 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3817 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3814 )
3818 )
3815 if version != 1:
3819 if version != 1:
3816 ui.warn(msg % version, hint=hint)
3820 ui.warn(msg % version, hint=hint)
3817 else:
3821 else:
3818 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3822 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3819
3823
3820 return requirements
3824 return requirements
3821
3825
3822
3826
3823 def checkrequirementscompat(ui, requirements):
3827 def checkrequirementscompat(ui, requirements):
3824 """Checks compatibility of repository requirements enabled and disabled.
3828 """Checks compatibility of repository requirements enabled and disabled.
3825
3829
3826 Returns a set of requirements which needs to be dropped because dependend
3830 Returns a set of requirements which needs to be dropped because dependend
3827 requirements are not enabled. Also warns users about it"""
3831 requirements are not enabled. Also warns users about it"""
3828
3832
3829 dropped = set()
3833 dropped = set()
3830
3834
3831 if requirementsmod.STORE_REQUIREMENT not in requirements:
3835 if requirementsmod.STORE_REQUIREMENT not in requirements:
3832 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3836 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3833 ui.warn(
3837 ui.warn(
3834 _(
3838 _(
3835 b'ignoring enabled \'format.bookmarks-in-store\' config '
3839 b'ignoring enabled \'format.bookmarks-in-store\' config '
3836 b'beacuse it is incompatible with disabled '
3840 b'beacuse it is incompatible with disabled '
3837 b'\'format.usestore\' config\n'
3841 b'\'format.usestore\' config\n'
3838 )
3842 )
3839 )
3843 )
3840 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3844 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3841
3845
3842 if (
3846 if (
3843 requirementsmod.SHARED_REQUIREMENT in requirements
3847 requirementsmod.SHARED_REQUIREMENT in requirements
3844 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3848 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3845 ):
3849 ):
3846 raise error.Abort(
3850 raise error.Abort(
3847 _(
3851 _(
3848 b"cannot create shared repository as source was created"
3852 b"cannot create shared repository as source was created"
3849 b" with 'format.usestore' config disabled"
3853 b" with 'format.usestore' config disabled"
3850 )
3854 )
3851 )
3855 )
3852
3856
3853 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3857 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3854 if ui.hasconfig(b'format', b'use-share-safe'):
3858 if ui.hasconfig(b'format', b'use-share-safe'):
3855 msg = _(
3859 msg = _(
3856 b"ignoring enabled 'format.use-share-safe' config because "
3860 b"ignoring enabled 'format.use-share-safe' config because "
3857 b"it is incompatible with disabled 'format.usestore'"
3861 b"it is incompatible with disabled 'format.usestore'"
3858 b" config\n"
3862 b" config\n"
3859 )
3863 )
3860 ui.warn(msg)
3864 ui.warn(msg)
3861 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3865 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3862
3866
3863 return dropped
3867 return dropped
3864
3868
3865
3869
3866 def filterknowncreateopts(ui, createopts):
3870 def filterknowncreateopts(ui, createopts):
3867 """Filters a dict of repo creation options against options that are known.
3871 """Filters a dict of repo creation options against options that are known.
3868
3872
3869 Receives a dict of repo creation options and returns a dict of those
3873 Receives a dict of repo creation options and returns a dict of those
3870 options that we don't know how to handle.
3874 options that we don't know how to handle.
3871
3875
3872 This function is called as part of repository creation. If the
3876 This function is called as part of repository creation. If the
3873 returned dict contains any items, repository creation will not
3877 returned dict contains any items, repository creation will not
3874 be allowed, as it means there was a request to create a repository
3878 be allowed, as it means there was a request to create a repository
3875 with options not recognized by loaded code.
3879 with options not recognized by loaded code.
3876
3880
3877 Extensions can wrap this function to filter out creation options
3881 Extensions can wrap this function to filter out creation options
3878 they know how to handle.
3882 they know how to handle.
3879 """
3883 """
3880 known = {
3884 known = {
3881 b'backend',
3885 b'backend',
3882 b'lfs',
3886 b'lfs',
3883 b'narrowfiles',
3887 b'narrowfiles',
3884 b'sharedrepo',
3888 b'sharedrepo',
3885 b'sharedrelative',
3889 b'sharedrelative',
3886 b'shareditems',
3890 b'shareditems',
3887 b'shallowfilestore',
3891 b'shallowfilestore',
3888 }
3892 }
3889
3893
3890 return {k: v for k, v in createopts.items() if k not in known}
3894 return {k: v for k, v in createopts.items() if k not in known}
3891
3895
3892
3896
3893 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3897 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3894 """Create a new repository in a vfs.
3898 """Create a new repository in a vfs.
3895
3899
3896 ``path`` path to the new repo's working directory.
3900 ``path`` path to the new repo's working directory.
3897 ``createopts`` options for the new repository.
3901 ``createopts`` options for the new repository.
3898 ``requirement`` predefined set of requirements.
3902 ``requirement`` predefined set of requirements.
3899 (incompatible with ``createopts``)
3903 (incompatible with ``createopts``)
3900
3904
3901 The following keys for ``createopts`` are recognized:
3905 The following keys for ``createopts`` are recognized:
3902
3906
3903 backend
3907 backend
3904 The storage backend to use.
3908 The storage backend to use.
3905 lfs
3909 lfs
3906 Repository will be created with ``lfs`` requirement. The lfs extension
3910 Repository will be created with ``lfs`` requirement. The lfs extension
3907 will automatically be loaded when the repository is accessed.
3911 will automatically be loaded when the repository is accessed.
3908 narrowfiles
3912 narrowfiles
3909 Set up repository to support narrow file storage.
3913 Set up repository to support narrow file storage.
3910 sharedrepo
3914 sharedrepo
3911 Repository object from which storage should be shared.
3915 Repository object from which storage should be shared.
3912 sharedrelative
3916 sharedrelative
3913 Boolean indicating if the path to the shared repo should be
3917 Boolean indicating if the path to the shared repo should be
3914 stored as relative. By default, the pointer to the "parent" repo
3918 stored as relative. By default, the pointer to the "parent" repo
3915 is stored as an absolute path.
3919 is stored as an absolute path.
3916 shareditems
3920 shareditems
3917 Set of items to share to the new repository (in addition to storage).
3921 Set of items to share to the new repository (in addition to storage).
3918 shallowfilestore
3922 shallowfilestore
3919 Indicates that storage for files should be shallow (not all ancestor
3923 Indicates that storage for files should be shallow (not all ancestor
3920 revisions are known).
3924 revisions are known).
3921 """
3925 """
3922
3926
3923 if requirements is not None:
3927 if requirements is not None:
3924 if createopts is not None:
3928 if createopts is not None:
3925 msg = b'cannot specify both createopts and requirements'
3929 msg = b'cannot specify both createopts and requirements'
3926 raise error.ProgrammingError(msg)
3930 raise error.ProgrammingError(msg)
3927 createopts = {}
3931 createopts = {}
3928 else:
3932 else:
3929 createopts = defaultcreateopts(ui, createopts=createopts)
3933 createopts = defaultcreateopts(ui, createopts=createopts)
3930
3934
3931 unknownopts = filterknowncreateopts(ui, createopts)
3935 unknownopts = filterknowncreateopts(ui, createopts)
3932
3936
3933 if not isinstance(unknownopts, dict):
3937 if not isinstance(unknownopts, dict):
3934 raise error.ProgrammingError(
3938 raise error.ProgrammingError(
3935 b'filterknowncreateopts() did not return a dict'
3939 b'filterknowncreateopts() did not return a dict'
3936 )
3940 )
3937
3941
3938 if unknownopts:
3942 if unknownopts:
3939 raise error.Abort(
3943 raise error.Abort(
3940 _(
3944 _(
3941 b'unable to create repository because of unknown '
3945 b'unable to create repository because of unknown '
3942 b'creation option: %s'
3946 b'creation option: %s'
3943 )
3947 )
3944 % b', '.join(sorted(unknownopts)),
3948 % b', '.join(sorted(unknownopts)),
3945 hint=_(b'is a required extension not loaded?'),
3949 hint=_(b'is a required extension not loaded?'),
3946 )
3950 )
3947
3951
3948 requirements = newreporequirements(ui, createopts=createopts)
3952 requirements = newreporequirements(ui, createopts=createopts)
3949 requirements -= checkrequirementscompat(ui, requirements)
3953 requirements -= checkrequirementscompat(ui, requirements)
3950
3954
3951 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3955 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3952
3956
3953 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3957 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3954 if hgvfs.exists():
3958 if hgvfs.exists():
3955 raise error.RepoError(_(b'repository %s already exists') % path)
3959 raise error.RepoError(_(b'repository %s already exists') % path)
3956
3960
3957 if b'sharedrepo' in createopts:
3961 if b'sharedrepo' in createopts:
3958 sharedpath = createopts[b'sharedrepo'].sharedpath
3962 sharedpath = createopts[b'sharedrepo'].sharedpath
3959
3963
3960 if createopts.get(b'sharedrelative'):
3964 if createopts.get(b'sharedrelative'):
3961 try:
3965 try:
3962 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3966 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3963 sharedpath = util.pconvert(sharedpath)
3967 sharedpath = util.pconvert(sharedpath)
3964 except (IOError, ValueError) as e:
3968 except (IOError, ValueError) as e:
3965 # ValueError is raised on Windows if the drive letters differ
3969 # ValueError is raised on Windows if the drive letters differ
3966 # on each path.
3970 # on each path.
3967 raise error.Abort(
3971 raise error.Abort(
3968 _(b'cannot calculate relative path'),
3972 _(b'cannot calculate relative path'),
3969 hint=stringutil.forcebytestr(e),
3973 hint=stringutil.forcebytestr(e),
3970 )
3974 )
3971
3975
3972 if not wdirvfs.exists():
3976 if not wdirvfs.exists():
3973 wdirvfs.makedirs()
3977 wdirvfs.makedirs()
3974
3978
3975 hgvfs.makedir(notindexed=True)
3979 hgvfs.makedir(notindexed=True)
3976 if b'sharedrepo' not in createopts:
3980 if b'sharedrepo' not in createopts:
3977 hgvfs.mkdir(b'cache')
3981 hgvfs.mkdir(b'cache')
3978 hgvfs.mkdir(b'wcache')
3982 hgvfs.mkdir(b'wcache')
3979
3983
3980 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3984 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3981 if has_store and b'sharedrepo' not in createopts:
3985 if has_store and b'sharedrepo' not in createopts:
3982 hgvfs.mkdir(b'store')
3986 hgvfs.mkdir(b'store')
3983
3987
3984 # We create an invalid changelog outside the store so very old
3988 # We create an invalid changelog outside the store so very old
3985 # Mercurial versions (which didn't know about the requirements
3989 # Mercurial versions (which didn't know about the requirements
3986 # file) encounter an error on reading the changelog. This
3990 # file) encounter an error on reading the changelog. This
3987 # effectively locks out old clients and prevents them from
3991 # effectively locks out old clients and prevents them from
3988 # mucking with a repo in an unknown format.
3992 # mucking with a repo in an unknown format.
3989 #
3993 #
3990 # The revlog header has version 65535, which won't be recognized by
3994 # The revlog header has version 65535, which won't be recognized by
3991 # such old clients.
3995 # such old clients.
3992 hgvfs.append(
3996 hgvfs.append(
3993 b'00changelog.i',
3997 b'00changelog.i',
3994 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3998 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3995 b'layout',
3999 b'layout',
3996 )
4000 )
3997
4001
3998 # Filter the requirements into working copy and store ones
4002 # Filter the requirements into working copy and store ones
3999 wcreq, storereq = scmutil.filterrequirements(requirements)
4003 wcreq, storereq = scmutil.filterrequirements(requirements)
4000 # write working copy ones
4004 # write working copy ones
4001 scmutil.writerequires(hgvfs, wcreq)
4005 scmutil.writerequires(hgvfs, wcreq)
4002 # If there are store requirements and the current repository
4006 # If there are store requirements and the current repository
4003 # is not a shared one, write stored requirements
4007 # is not a shared one, write stored requirements
4004 # For new shared repository, we don't need to write the store
4008 # For new shared repository, we don't need to write the store
4005 # requirements as they are already present in store requires
4009 # requirements as they are already present in store requires
4006 if storereq and b'sharedrepo' not in createopts:
4010 if storereq and b'sharedrepo' not in createopts:
4007 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4011 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
4008 scmutil.writerequires(storevfs, storereq)
4012 scmutil.writerequires(storevfs, storereq)
4009
4013
4010 # Write out file telling readers where to find the shared store.
4014 # Write out file telling readers where to find the shared store.
4011 if b'sharedrepo' in createopts:
4015 if b'sharedrepo' in createopts:
4012 hgvfs.write(b'sharedpath', sharedpath)
4016 hgvfs.write(b'sharedpath', sharedpath)
4013
4017
4014 if createopts.get(b'shareditems'):
4018 if createopts.get(b'shareditems'):
4015 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4019 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
4016 hgvfs.write(b'shared', shared)
4020 hgvfs.write(b'shared', shared)
4017
4021
4018
4022
4019 def poisonrepository(repo):
4023 def poisonrepository(repo):
4020 """Poison a repository instance so it can no longer be used."""
4024 """Poison a repository instance so it can no longer be used."""
4021 # Perform any cleanup on the instance.
4025 # Perform any cleanup on the instance.
4022 repo.close()
4026 repo.close()
4023
4027
4024 # Our strategy is to replace the type of the object with one that
4028 # Our strategy is to replace the type of the object with one that
4025 # has all attribute lookups result in error.
4029 # has all attribute lookups result in error.
4026 #
4030 #
4027 # But we have to allow the close() method because some constructors
4031 # But we have to allow the close() method because some constructors
4028 # of repos call close() on repo references.
4032 # of repos call close() on repo references.
4029 class poisonedrepository:
4033 class poisonedrepository:
4030 def __getattribute__(self, item):
4034 def __getattribute__(self, item):
4031 if item == 'close':
4035 if item == 'close':
4032 return object.__getattribute__(self, item)
4036 return object.__getattribute__(self, item)
4033
4037
4034 raise error.ProgrammingError(
4038 raise error.ProgrammingError(
4035 b'repo instances should not be used after unshare'
4039 b'repo instances should not be used after unshare'
4036 )
4040 )
4037
4041
4038 def close(self):
4042 def close(self):
4039 pass
4043 pass
4040
4044
4041 # We may have a repoview, which intercepts __setattr__. So be sure
4045 # We may have a repoview, which intercepts __setattr__. So be sure
4042 # we operate at the lowest level possible.
4046 # we operate at the lowest level possible.
4043 object.__setattr__(repo, '__class__', poisonedrepository)
4047 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,2325 +1,2329 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Olivia Mackall <olivia@selenic.com>
3 # Copyright Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import binascii
9 import binascii
10 import errno
10 import errno
11 import glob
11 import glob
12 import os
12 import os
13 import posixpath
13 import posixpath
14 import re
14 import re
15 import subprocess
15 import subprocess
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirrev,
24 wdirrev,
25 )
25 )
26 from .pycompat import getattr
26 from .pycompat import getattr
27 from .thirdparty import attr
27 from .thirdparty import attr
28 from . import (
28 from . import (
29 copies as copiesmod,
29 copies as copiesmod,
30 encoding,
30 encoding,
31 error,
31 error,
32 match as matchmod,
32 match as matchmod,
33 obsolete,
33 obsolete,
34 obsutil,
34 obsutil,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 policy,
37 policy,
38 pycompat,
38 pycompat,
39 requirements as requirementsmod,
39 requirements as requirementsmod,
40 revsetlang,
40 revsetlang,
41 similar,
41 similar,
42 smartset,
42 smartset,
43 url,
43 url,
44 util,
44 util,
45 vfs,
45 vfs,
46 )
46 )
47
47
48 from .utils import (
48 from .utils import (
49 hashutil,
49 hashutil,
50 procutil,
50 procutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 if pycompat.iswindows:
54 if pycompat.iswindows:
55 from . import scmwindows as scmplatform
55 from . import scmwindows as scmplatform
56 else:
56 else:
57 from . import scmposix as scmplatform
57 from . import scmposix as scmplatform
58
58
59 parsers = policy.importmod('parsers')
59 parsers = policy.importmod('parsers')
60 rustrevlog = policy.importrust('revlog')
60 rustrevlog = policy.importrust('revlog')
61
61
62 termsize = scmplatform.termsize
62 termsize = scmplatform.termsize
63
63
64
64
65 @attr.s(slots=True, repr=False)
65 @attr.s(slots=True, repr=False)
66 class status:
66 class status:
67 """Struct with a list of files per status.
67 """Struct with a list of files per status.
68
68
69 The 'deleted', 'unknown' and 'ignored' properties are only
69 The 'deleted', 'unknown' and 'ignored' properties are only
70 relevant to the working copy.
70 relevant to the working copy.
71 """
71 """
72
72
73 modified = attr.ib(default=attr.Factory(list))
73 modified = attr.ib(default=attr.Factory(list))
74 added = attr.ib(default=attr.Factory(list))
74 added = attr.ib(default=attr.Factory(list))
75 removed = attr.ib(default=attr.Factory(list))
75 removed = attr.ib(default=attr.Factory(list))
76 deleted = attr.ib(default=attr.Factory(list))
76 deleted = attr.ib(default=attr.Factory(list))
77 unknown = attr.ib(default=attr.Factory(list))
77 unknown = attr.ib(default=attr.Factory(list))
78 ignored = attr.ib(default=attr.Factory(list))
78 ignored = attr.ib(default=attr.Factory(list))
79 clean = attr.ib(default=attr.Factory(list))
79 clean = attr.ib(default=attr.Factory(list))
80
80
81 def __iter__(self):
81 def __iter__(self):
82 yield self.modified
82 yield self.modified
83 yield self.added
83 yield self.added
84 yield self.removed
84 yield self.removed
85 yield self.deleted
85 yield self.deleted
86 yield self.unknown
86 yield self.unknown
87 yield self.ignored
87 yield self.ignored
88 yield self.clean
88 yield self.clean
89
89
90 def __repr__(self):
90 def __repr__(self):
91 return (
91 return (
92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 r'unknown=%s, ignored=%s, clean=%s>'
93 r'unknown=%s, ignored=%s, clean=%s>'
94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95
95
96
96
97 def itersubrepos(ctx1, ctx2):
97 def itersubrepos(ctx1, ctx2):
98 """find subrepos in ctx1 or ctx2"""
98 """find subrepos in ctx1 or ctx2"""
99 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 # has been modified (in ctx2) but not yet committed (in ctx1).
101 # has been modified (in ctx2) but not yet committed (in ctx1).
102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104
104
105 missing = set()
105 missing = set()
106
106
107 for subpath in ctx2.substate:
107 for subpath in ctx2.substate:
108 if subpath not in ctx1.substate:
108 if subpath not in ctx1.substate:
109 del subpaths[subpath]
109 del subpaths[subpath]
110 missing.add(subpath)
110 missing.add(subpath)
111
111
112 for subpath, ctx in sorted(subpaths.items()):
112 for subpath, ctx in sorted(subpaths.items()):
113 yield subpath, ctx.sub(subpath)
113 yield subpath, ctx.sub(subpath)
114
114
115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 # status and diff will have an accurate result when it does
116 # status and diff will have an accurate result when it does
117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 # against itself.
118 # against itself.
119 for subpath in missing:
119 for subpath in missing:
120 yield subpath, ctx2.nullsub(subpath, ctx1)
120 yield subpath, ctx2.nullsub(subpath, ctx1)
121
121
122
122
123 def nochangesfound(ui, repo, excluded=None):
123 def nochangesfound(ui, repo, excluded=None):
124 """Report no changes for push/pull, excluded is None or a list of
124 """Report no changes for push/pull, excluded is None or a list of
125 nodes excluded from the push/pull.
125 nodes excluded from the push/pull.
126 """
126 """
127 secretlist = []
127 secretlist = []
128 if excluded:
128 if excluded:
129 for n in excluded:
129 for n in excluded:
130 ctx = repo[n]
130 ctx = repo[n]
131 if ctx.phase() >= phases.secret and not ctx.extinct():
131 if ctx.phase() >= phases.secret and not ctx.extinct():
132 secretlist.append(n)
132 secretlist.append(n)
133
133
134 if secretlist:
134 if secretlist:
135 ui.status(
135 ui.status(
136 _(b"no changes found (ignored %d secret changesets)\n")
136 _(b"no changes found (ignored %d secret changesets)\n")
137 % len(secretlist)
137 % len(secretlist)
138 )
138 )
139 else:
139 else:
140 ui.status(_(b"no changes found\n"))
140 ui.status(_(b"no changes found\n"))
141
141
142
142
143 def callcatch(ui, func):
143 def callcatch(ui, func):
144 """call func() with global exception handling
144 """call func() with global exception handling
145
145
146 return func() if no exception happens. otherwise do some error handling
146 return func() if no exception happens. otherwise do some error handling
147 and return an exit code accordingly. does not handle all exceptions.
147 and return an exit code accordingly. does not handle all exceptions.
148 """
148 """
149 coarse_exit_code = -1
149 coarse_exit_code = -1
150 detailed_exit_code = -1
150 detailed_exit_code = -1
151 try:
151 try:
152 try:
152 try:
153 return func()
153 return func()
154 except: # re-raises
154 except: # re-raises
155 ui.traceback()
155 ui.traceback()
156 raise
156 raise
157 # Global exception handling, alphabetically
157 # Global exception handling, alphabetically
158 # Mercurial-specific first, followed by built-in and library exceptions
158 # Mercurial-specific first, followed by built-in and library exceptions
159 except error.LockHeld as inst:
159 except error.LockHeld as inst:
160 detailed_exit_code = 20
160 detailed_exit_code = 20
161 if inst.errno == errno.ETIMEDOUT:
161 if inst.errno == errno.ETIMEDOUT:
162 reason = _(b'timed out waiting for lock held by %r') % (
162 reason = _(b'timed out waiting for lock held by %r') % (
163 pycompat.bytestr(inst.locker)
163 pycompat.bytestr(inst.locker)
164 )
164 )
165 else:
165 else:
166 reason = _(b'lock held by %r') % inst.locker
166 reason = _(b'lock held by %r') % inst.locker
167 ui.error(
167 ui.error(
168 _(b"abort: %s: %s\n")
168 _(b"abort: %s: %s\n")
169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
170 )
170 )
171 if not inst.locker:
171 if not inst.locker:
172 ui.error(_(b"(lock might be very busy)\n"))
172 ui.error(_(b"(lock might be very busy)\n"))
173 except error.LockUnavailable as inst:
173 except error.LockUnavailable as inst:
174 detailed_exit_code = 20
174 detailed_exit_code = 20
175 ui.error(
175 ui.error(
176 _(b"abort: could not lock %s: %s\n")
176 _(b"abort: could not lock %s: %s\n")
177 % (
177 % (
178 inst.desc or stringutil.forcebytestr(inst.filename),
178 inst.desc or stringutil.forcebytestr(inst.filename),
179 encoding.strtolocal(inst.strerror),
179 encoding.strtolocal(inst.strerror),
180 )
180 )
181 )
181 )
182 except error.RepoError as inst:
182 except error.RepoError as inst:
183 if isinstance(inst, error.RepoLookupError):
183 if isinstance(inst, error.RepoLookupError):
184 detailed_exit_code = 10
184 detailed_exit_code = 10
185 ui.error(_(b"abort: %s\n") % inst)
185 ui.error(_(b"abort: %s\n") % inst)
186 if inst.hint:
186 if inst.hint:
187 ui.error(_(b"(%s)\n") % inst.hint)
187 ui.error(_(b"(%s)\n") % inst.hint)
188 except error.ResponseError as inst:
188 except error.ResponseError as inst:
189 ui.error(_(b"abort: %s") % inst.args[0])
189 ui.error(_(b"abort: %s") % inst.args[0])
190 msg = inst.args[1]
190 msg = inst.args[1]
191 if isinstance(msg, type(u'')):
191 if isinstance(msg, type(u'')):
192 msg = pycompat.sysbytes(msg)
192 msg = pycompat.sysbytes(msg)
193 if msg is None:
193 if msg is None:
194 ui.error(b"\n")
194 ui.error(b"\n")
195 elif not isinstance(msg, bytes):
195 elif not isinstance(msg, bytes):
196 ui.error(b" %r\n" % (msg,))
196 ui.error(b" %r\n" % (msg,))
197 elif not msg:
197 elif not msg:
198 ui.error(_(b" empty string\n"))
198 ui.error(_(b" empty string\n"))
199 else:
199 else:
200 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
200 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
201 except error.CensoredNodeError as inst:
201 except error.CensoredNodeError as inst:
202 ui.error(_(b"abort: file censored %s\n") % inst)
202 ui.error(_(b"abort: file censored %s\n") % inst)
203 except error.WdirUnsupported:
203 except error.WdirUnsupported:
204 ui.error(_(b"abort: working directory revision cannot be specified\n"))
204 ui.error(_(b"abort: working directory revision cannot be specified\n"))
205 except error.Error as inst:
205 except error.Error as inst:
206 if inst.detailed_exit_code is not None:
206 if inst.detailed_exit_code is not None:
207 detailed_exit_code = inst.detailed_exit_code
207 detailed_exit_code = inst.detailed_exit_code
208 if inst.coarse_exit_code is not None:
208 if inst.coarse_exit_code is not None:
209 coarse_exit_code = inst.coarse_exit_code
209 coarse_exit_code = inst.coarse_exit_code
210 ui.error(inst.format())
210 ui.error(inst.format())
211 except error.WorkerError as inst:
211 except error.WorkerError as inst:
212 # Don't print a message -- the worker already should have
212 # Don't print a message -- the worker already should have
213 return inst.status_code
213 return inst.status_code
214 except ImportError as inst:
214 except ImportError as inst:
215 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
215 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
216 m = stringutil.forcebytestr(inst).split()[-1]
216 m = stringutil.forcebytestr(inst).split()[-1]
217 if m in b"mpatch bdiff".split():
217 if m in b"mpatch bdiff".split():
218 ui.error(_(b"(did you forget to compile extensions?)\n"))
218 ui.error(_(b"(did you forget to compile extensions?)\n"))
219 elif m in b"zlib".split():
219 elif m in b"zlib".split():
220 ui.error(_(b"(is your Python install correct?)\n"))
220 ui.error(_(b"(is your Python install correct?)\n"))
221 except util.urlerr.httperror as inst:
221 except util.urlerr.httperror as inst:
222 detailed_exit_code = 100
222 detailed_exit_code = 100
223 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
223 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
224 except util.urlerr.urlerror as inst:
224 except util.urlerr.urlerror as inst:
225 detailed_exit_code = 100
225 detailed_exit_code = 100
226 try: # usually it is in the form (errno, strerror)
226 try: # usually it is in the form (errno, strerror)
227 reason = inst.reason.args[1]
227 reason = inst.reason.args[1]
228 except (AttributeError, IndexError):
228 except (AttributeError, IndexError):
229 # it might be anything, for example a string
229 # it might be anything, for example a string
230 reason = inst.reason
230 reason = inst.reason
231 if isinstance(reason, str):
231 if isinstance(reason, str):
232 # SSLError of Python 2.7.9 contains a unicode
232 # SSLError of Python 2.7.9 contains a unicode
233 reason = encoding.unitolocal(reason)
233 reason = encoding.unitolocal(reason)
234 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
234 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
235 except (IOError, OSError) as inst:
235 except (IOError, OSError) as inst:
236 if (
236 if (
237 util.safehasattr(inst, "args")
237 util.safehasattr(inst, "args")
238 and inst.args
238 and inst.args
239 and inst.args[0] == errno.EPIPE
239 and inst.args[0] == errno.EPIPE
240 ):
240 ):
241 pass
241 pass
242 elif getattr(inst, "strerror", None): # common IOError or OSError
242 elif getattr(inst, "strerror", None): # common IOError or OSError
243 if getattr(inst, "filename", None) is not None:
243 if getattr(inst, "filename", None) is not None:
244 ui.error(
244 ui.error(
245 _(b"abort: %s: '%s'\n")
245 _(b"abort: %s: '%s'\n")
246 % (
246 % (
247 encoding.strtolocal(inst.strerror),
247 encoding.strtolocal(inst.strerror),
248 stringutil.forcebytestr(inst.filename),
248 stringutil.forcebytestr(inst.filename),
249 )
249 )
250 )
250 )
251 else:
251 else:
252 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
252 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
253 else: # suspicious IOError
253 else: # suspicious IOError
254 raise
254 raise
255 except MemoryError:
255 except MemoryError:
256 ui.error(_(b"abort: out of memory\n"))
256 ui.error(_(b"abort: out of memory\n"))
257 except SystemExit as inst:
257 except SystemExit as inst:
258 # Commands shouldn't sys.exit directly, but give a return code.
258 # Commands shouldn't sys.exit directly, but give a return code.
259 # Just in case catch this and and pass exit code to caller.
259 # Just in case catch this and and pass exit code to caller.
260 detailed_exit_code = 254
260 detailed_exit_code = 254
261 coarse_exit_code = inst.code
261 coarse_exit_code = inst.code
262
262
263 if ui.configbool(b'ui', b'detailed-exit-code'):
263 if ui.configbool(b'ui', b'detailed-exit-code'):
264 return detailed_exit_code
264 return detailed_exit_code
265 else:
265 else:
266 return coarse_exit_code
266 return coarse_exit_code
267
267
268
268
269 def checknewlabel(repo, lbl, kind):
269 def checknewlabel(repo, lbl, kind):
270 # Do not use the "kind" parameter in ui output.
270 # Do not use the "kind" parameter in ui output.
271 # It makes strings difficult to translate.
271 # It makes strings difficult to translate.
272 if lbl in [b'tip', b'.', b'null']:
272 if lbl in [b'tip', b'.', b'null']:
273 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
273 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
274 for c in (b':', b'\0', b'\n', b'\r'):
274 for c in (b':', b'\0', b'\n', b'\r'):
275 if c in lbl:
275 if c in lbl:
276 raise error.InputError(
276 raise error.InputError(
277 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
277 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
278 )
278 )
279 try:
279 try:
280 int(lbl)
280 int(lbl)
281 if b'_' in lbl:
281 if b'_' in lbl:
282 # If label contains underscores, Python might consider it an
282 # If label contains underscores, Python might consider it an
283 # integer (with "_" as visual separators), but we do not.
283 # integer (with "_" as visual separators), but we do not.
284 # See PEP 515 - Underscores in Numeric Literals.
284 # See PEP 515 - Underscores in Numeric Literals.
285 raise ValueError
285 raise ValueError
286 raise error.InputError(_(b"cannot use an integer as a name"))
286 raise error.InputError(_(b"cannot use an integer as a name"))
287 except ValueError:
287 except ValueError:
288 pass
288 pass
289 if lbl.strip() != lbl:
289 if lbl.strip() != lbl:
290 raise error.InputError(
290 raise error.InputError(
291 _(b"leading or trailing whitespace in name %r") % lbl
291 _(b"leading or trailing whitespace in name %r") % lbl
292 )
292 )
293
293
294
294
295 def checkfilename(f):
295 def checkfilename(f):
296 '''Check that the filename f is an acceptable filename for a tracked file'''
296 '''Check that the filename f is an acceptable filename for a tracked file'''
297 if b'\r' in f or b'\n' in f:
297 if b'\r' in f or b'\n' in f:
298 raise error.InputError(
298 raise error.InputError(
299 _(b"'\\n' and '\\r' disallowed in filenames: %r")
299 _(b"'\\n' and '\\r' disallowed in filenames: %r")
300 % pycompat.bytestr(f)
300 % pycompat.bytestr(f)
301 )
301 )
302
302
303
303
304 def checkportable(ui, f):
304 def checkportable(ui, f):
305 '''Check if filename f is portable and warn or abort depending on config'''
305 '''Check if filename f is portable and warn or abort depending on config'''
306 checkfilename(f)
306 checkfilename(f)
307 abort, warn = checkportabilityalert(ui)
307 abort, warn = checkportabilityalert(ui)
308 if abort or warn:
308 if abort or warn:
309 msg = util.checkwinfilename(f)
309 msg = util.checkwinfilename(f)
310 if msg:
310 if msg:
311 msg = b"%s: %s" % (msg, procutil.shellquote(f))
311 msg = b"%s: %s" % (msg, procutil.shellquote(f))
312 if abort:
312 if abort:
313 raise error.InputError(msg)
313 raise error.InputError(msg)
314 ui.warn(_(b"warning: %s\n") % msg)
314 ui.warn(_(b"warning: %s\n") % msg)
315
315
316
316
317 def checkportabilityalert(ui):
317 def checkportabilityalert(ui):
318 """check if the user's config requests nothing, a warning, or abort for
318 """check if the user's config requests nothing, a warning, or abort for
319 non-portable filenames"""
319 non-portable filenames"""
320 val = ui.config(b'ui', b'portablefilenames')
320 val = ui.config(b'ui', b'portablefilenames')
321 lval = val.lower()
321 lval = val.lower()
322 bval = stringutil.parsebool(val)
322 bval = stringutil.parsebool(val)
323 abort = pycompat.iswindows or lval == b'abort'
323 abort = pycompat.iswindows or lval == b'abort'
324 warn = bval or lval == b'warn'
324 warn = bval or lval == b'warn'
325 if bval is None and not (warn or abort or lval == b'ignore'):
325 if bval is None and not (warn or abort or lval == b'ignore'):
326 raise error.ConfigError(
326 raise error.ConfigError(
327 _(b"ui.portablefilenames value is invalid ('%s')") % val
327 _(b"ui.portablefilenames value is invalid ('%s')") % val
328 )
328 )
329 return abort, warn
329 return abort, warn
330
330
331
331
332 class casecollisionauditor:
332 class casecollisionauditor:
333 def __init__(self, ui, abort, dirstate):
333 def __init__(self, ui, abort, dirstate):
334 self._ui = ui
334 self._ui = ui
335 self._abort = abort
335 self._abort = abort
336 allfiles = b'\0'.join(dirstate)
336 allfiles = b'\0'.join(dirstate)
337 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
337 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
338 self._dirstate = dirstate
338 self._dirstate = dirstate
339 # The purpose of _newfiles is so that we don't complain about
339 # The purpose of _newfiles is so that we don't complain about
340 # case collisions if someone were to call this object with the
340 # case collisions if someone were to call this object with the
341 # same filename twice.
341 # same filename twice.
342 self._newfiles = set()
342 self._newfiles = set()
343
343
344 def __call__(self, f):
344 def __call__(self, f):
345 if f in self._newfiles:
345 if f in self._newfiles:
346 return
346 return
347 fl = encoding.lower(f)
347 fl = encoding.lower(f)
348 if fl in self._loweredfiles and f not in self._dirstate:
348 if fl in self._loweredfiles and f not in self._dirstate:
349 msg = _(b'possible case-folding collision for %s') % f
349 msg = _(b'possible case-folding collision for %s') % f
350 if self._abort:
350 if self._abort:
351 raise error.StateError(msg)
351 raise error.StateError(msg)
352 self._ui.warn(_(b"warning: %s\n") % msg)
352 self._ui.warn(_(b"warning: %s\n") % msg)
353 self._loweredfiles.add(fl)
353 self._loweredfiles.add(fl)
354 self._newfiles.add(f)
354 self._newfiles.add(f)
355
355
356
356
357 def filteredhash(repo, maxrev, needobsolete=False):
357 def filteredhash(repo, maxrev, needobsolete=False):
358 """build hash of filtered revisions in the current repoview.
358 """build hash of filtered revisions in the current repoview.
359
359
360 Multiple caches perform up-to-date validation by checking that the
360 Multiple caches perform up-to-date validation by checking that the
361 tiprev and tipnode stored in the cache file match the current repository.
361 tiprev and tipnode stored in the cache file match the current repository.
362 However, this is not sufficient for validating repoviews because the set
362 However, this is not sufficient for validating repoviews because the set
363 of revisions in the view may change without the repository tiprev and
363 of revisions in the view may change without the repository tiprev and
364 tipnode changing.
364 tipnode changing.
365
365
366 This function hashes all the revs filtered from the view (and, optionally,
366 This function hashes all the revs filtered from the view (and, optionally,
367 all obsolete revs) up to maxrev and returns that SHA-1 digest.
367 all obsolete revs) up to maxrev and returns that SHA-1 digest.
368 """
368 """
369 cl = repo.changelog
369 cl = repo.changelog
370 if needobsolete:
370 if needobsolete:
371 obsrevs = obsolete.getrevs(repo, b'obsolete')
371 obsrevs = obsolete.getrevs(repo, b'obsolete')
372 if not cl.filteredrevs and not obsrevs:
372 if not cl.filteredrevs and not obsrevs:
373 return None
373 return None
374 key = (maxrev, hash(cl.filteredrevs), hash(obsrevs))
374 key = (maxrev, hash(cl.filteredrevs), hash(obsrevs))
375 else:
375 else:
376 if not cl.filteredrevs:
376 if not cl.filteredrevs:
377 return None
377 return None
378 key = maxrev
378 key = maxrev
379 obsrevs = frozenset()
379 obsrevs = frozenset()
380
380
381 result = cl._filteredrevs_hashcache.get(key)
381 result = cl._filteredrevs_hashcache.get(key)
382 if not result:
382 if not result:
383 revs = sorted(r for r in cl.filteredrevs | obsrevs if r <= maxrev)
383 revs = sorted(r for r in cl.filteredrevs | obsrevs if r <= maxrev)
384 if revs:
384 if revs:
385 s = hashutil.sha1()
385 s = hashutil.sha1()
386 for rev in revs:
386 for rev in revs:
387 s.update(b'%d;' % rev)
387 s.update(b'%d;' % rev)
388 result = s.digest()
388 result = s.digest()
389 cl._filteredrevs_hashcache[key] = result
389 cl._filteredrevs_hashcache[key] = result
390 return result
390 return result
391
391
392
392
393 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
393 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
394 """yield every hg repository under path, always recursively.
394 """yield every hg repository under path, always recursively.
395 The recurse flag will only control recursion into repo working dirs"""
395 The recurse flag will only control recursion into repo working dirs"""
396
396
397 def errhandler(err):
397 def errhandler(err):
398 if err.filename == path:
398 if err.filename == path:
399 raise err
399 raise err
400
400
401 samestat = getattr(os.path, 'samestat', None)
401 samestat = getattr(os.path, 'samestat', None)
402 if followsym and samestat is not None:
402 if followsym and samestat is not None:
403
403
404 def adddir(dirlst, dirname):
404 def adddir(dirlst, dirname):
405 dirstat = os.stat(dirname)
405 dirstat = os.stat(dirname)
406 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
406 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
407 if not match:
407 if not match:
408 dirlst.append(dirstat)
408 dirlst.append(dirstat)
409 return not match
409 return not match
410
410
411 else:
411 else:
412 followsym = False
412 followsym = False
413
413
414 if (seen_dirs is None) and followsym:
414 if (seen_dirs is None) and followsym:
415 seen_dirs = []
415 seen_dirs = []
416 adddir(seen_dirs, path)
416 adddir(seen_dirs, path)
417 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
417 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
418 dirs.sort()
418 dirs.sort()
419 if b'.hg' in dirs:
419 if b'.hg' in dirs:
420 yield root # found a repository
420 yield root # found a repository
421 qroot = os.path.join(root, b'.hg', b'patches')
421 qroot = os.path.join(root, b'.hg', b'patches')
422 if os.path.isdir(os.path.join(qroot, b'.hg')):
422 if os.path.isdir(os.path.join(qroot, b'.hg')):
423 yield qroot # we have a patch queue repo here
423 yield qroot # we have a patch queue repo here
424 if recurse:
424 if recurse:
425 # avoid recursing inside the .hg directory
425 # avoid recursing inside the .hg directory
426 dirs.remove(b'.hg')
426 dirs.remove(b'.hg')
427 else:
427 else:
428 dirs[:] = [] # don't descend further
428 dirs[:] = [] # don't descend further
429 elif followsym:
429 elif followsym:
430 newdirs = []
430 newdirs = []
431 for d in dirs:
431 for d in dirs:
432 fname = os.path.join(root, d)
432 fname = os.path.join(root, d)
433 if adddir(seen_dirs, fname):
433 if adddir(seen_dirs, fname):
434 if os.path.islink(fname):
434 if os.path.islink(fname):
435 for hgname in walkrepos(fname, True, seen_dirs):
435 for hgname in walkrepos(fname, True, seen_dirs):
436 yield hgname
436 yield hgname
437 else:
437 else:
438 newdirs.append(d)
438 newdirs.append(d)
439 dirs[:] = newdirs
439 dirs[:] = newdirs
440
440
441
441
442 def binnode(ctx):
442 def binnode(ctx):
443 """Return binary node id for a given basectx"""
443 """Return binary node id for a given basectx"""
444 node = ctx.node()
444 node = ctx.node()
445 if node is None:
445 if node is None:
446 return ctx.repo().nodeconstants.wdirid
446 return ctx.repo().nodeconstants.wdirid
447 return node
447 return node
448
448
449
449
450 def intrev(ctx):
450 def intrev(ctx):
451 """Return integer for a given basectx that can be used in comparison or
451 """Return integer for a given basectx that can be used in comparison or
452 arithmetic operation"""
452 arithmetic operation"""
453 rev = ctx.rev()
453 rev = ctx.rev()
454 if rev is None:
454 if rev is None:
455 return wdirrev
455 return wdirrev
456 return rev
456 return rev
457
457
458
458
459 def formatchangeid(ctx):
459 def formatchangeid(ctx):
460 """Format changectx as '{rev}:{node|formatnode}', which is the default
460 """Format changectx as '{rev}:{node|formatnode}', which is the default
461 template provided by logcmdutil.changesettemplater"""
461 template provided by logcmdutil.changesettemplater"""
462 repo = ctx.repo()
462 repo = ctx.repo()
463 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
463 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
464
464
465
465
466 def formatrevnode(ui, rev, node):
466 def formatrevnode(ui, rev, node):
467 """Format given revision and node depending on the current verbosity"""
467 """Format given revision and node depending on the current verbosity"""
468 if ui.debugflag:
468 if ui.debugflag:
469 hexfunc = hex
469 hexfunc = hex
470 else:
470 else:
471 hexfunc = short
471 hexfunc = short
472 return b'%d:%s' % (rev, hexfunc(node))
472 return b'%d:%s' % (rev, hexfunc(node))
473
473
474
474
475 def resolvehexnodeidprefix(repo, prefix):
475 def resolvehexnodeidprefix(repo, prefix):
476 if prefix.startswith(b'x'):
476 if prefix.startswith(b'x'):
477 prefix = prefix[1:]
477 prefix = prefix[1:]
478 try:
478 try:
479 # Uses unfiltered repo because it's faster when prefix is ambiguous/
479 # Uses unfiltered repo because it's faster when prefix is ambiguous/
480 # This matches the shortesthexnodeidprefix() function below.
480 # This matches the shortesthexnodeidprefix() function below.
481 node = repo.unfiltered().changelog._partialmatch(prefix)
481 node = repo.unfiltered().changelog._partialmatch(prefix)
482 except error.AmbiguousPrefixLookupError:
482 except error.AmbiguousPrefixLookupError:
483 revset = repo.ui.config(
483 revset = repo.ui.config(
484 b'experimental', b'revisions.disambiguatewithin'
484 b'experimental', b'revisions.disambiguatewithin'
485 )
485 )
486 if revset:
486 if revset:
487 # Clear config to avoid infinite recursion
487 # Clear config to avoid infinite recursion
488 configoverrides = {
488 configoverrides = {
489 (b'experimental', b'revisions.disambiguatewithin'): None
489 (b'experimental', b'revisions.disambiguatewithin'): None
490 }
490 }
491 with repo.ui.configoverride(configoverrides):
491 with repo.ui.configoverride(configoverrides):
492 revs = repo.anyrevs([revset], user=True)
492 revs = repo.anyrevs([revset], user=True)
493 matches = []
493 matches = []
494 for rev in revs:
494 for rev in revs:
495 node = repo.changelog.node(rev)
495 node = repo.changelog.node(rev)
496 if hex(node).startswith(prefix):
496 if hex(node).startswith(prefix):
497 matches.append(node)
497 matches.append(node)
498 if len(matches) == 1:
498 if len(matches) == 1:
499 return matches[0]
499 return matches[0]
500 raise
500 raise
501 if node is None:
501 if node is None:
502 return
502 return
503 repo.changelog.rev(node) # make sure node isn't filtered
503 repo.changelog.rev(node) # make sure node isn't filtered
504 return node
504 return node
505
505
506
506
507 def mayberevnum(repo, prefix):
507 def mayberevnum(repo, prefix):
508 """Checks if the given prefix may be mistaken for a revision number"""
508 """Checks if the given prefix may be mistaken for a revision number"""
509 try:
509 try:
510 i = int(prefix)
510 i = int(prefix)
511 # if we are a pure int, then starting with zero will not be
511 # if we are a pure int, then starting with zero will not be
512 # confused as a rev; or, obviously, if the int is larger
512 # confused as a rev; or, obviously, if the int is larger
513 # than the value of the tip rev. We still need to disambiguate if
513 # than the value of the tip rev. We still need to disambiguate if
514 # prefix == '0', since that *is* a valid revnum.
514 # prefix == '0', since that *is* a valid revnum.
515 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
515 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
516 return False
516 return False
517 return True
517 return True
518 except ValueError:
518 except ValueError:
519 return False
519 return False
520
520
521
521
522 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
522 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
523 """Find the shortest unambiguous prefix that matches hexnode.
523 """Find the shortest unambiguous prefix that matches hexnode.
524
524
525 If "cache" is not None, it must be a dictionary that can be used for
525 If "cache" is not None, it must be a dictionary that can be used for
526 caching between calls to this method.
526 caching between calls to this method.
527 """
527 """
528 # _partialmatch() of filtered changelog could take O(len(repo)) time,
528 # _partialmatch() of filtered changelog could take O(len(repo)) time,
529 # which would be unacceptably slow. so we look for hash collision in
529 # which would be unacceptably slow. so we look for hash collision in
530 # unfiltered space, which means some hashes may be slightly longer.
530 # unfiltered space, which means some hashes may be slightly longer.
531
531
532 minlength = max(minlength, 1)
532 minlength = max(minlength, 1)
533
533
534 def disambiguate(prefix):
534 def disambiguate(prefix):
535 """Disambiguate against revnums."""
535 """Disambiguate against revnums."""
536 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
536 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
537 if mayberevnum(repo, prefix):
537 if mayberevnum(repo, prefix):
538 return b'x' + prefix
538 return b'x' + prefix
539 else:
539 else:
540 return prefix
540 return prefix
541
541
542 hexnode = hex(node)
542 hexnode = hex(node)
543 for length in range(len(prefix), len(hexnode) + 1):
543 for length in range(len(prefix), len(hexnode) + 1):
544 prefix = hexnode[:length]
544 prefix = hexnode[:length]
545 if not mayberevnum(repo, prefix):
545 if not mayberevnum(repo, prefix):
546 return prefix
546 return prefix
547
547
548 cl = repo.unfiltered().changelog
548 cl = repo.unfiltered().changelog
549 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
549 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
550 if revset:
550 if revset:
551 revs = None
551 revs = None
552 if cache is not None:
552 if cache is not None:
553 revs = cache.get(b'disambiguationrevset')
553 revs = cache.get(b'disambiguationrevset')
554 if revs is None:
554 if revs is None:
555 revs = repo.anyrevs([revset], user=True)
555 revs = repo.anyrevs([revset], user=True)
556 if cache is not None:
556 if cache is not None:
557 cache[b'disambiguationrevset'] = revs
557 cache[b'disambiguationrevset'] = revs
558 if cl.rev(node) in revs:
558 if cl.rev(node) in revs:
559 hexnode = hex(node)
559 hexnode = hex(node)
560 nodetree = None
560 nodetree = None
561 if cache is not None:
561 if cache is not None:
562 nodetree = cache.get(b'disambiguationnodetree')
562 nodetree = cache.get(b'disambiguationnodetree')
563 if not nodetree:
563 if not nodetree:
564 if util.safehasattr(parsers, 'nodetree'):
564 if util.safehasattr(parsers, 'nodetree'):
565 # The CExt is the only implementation to provide a nodetree
565 # The CExt is the only implementation to provide a nodetree
566 # class so far.
566 # class so far.
567 index = cl.index
567 index = cl.index
568 if util.safehasattr(index, 'get_cindex'):
568 if util.safehasattr(index, 'get_cindex'):
569 # the rust wrapped need to give access to its internal index
569 # the rust wrapped need to give access to its internal index
570 index = index.get_cindex()
570 index = index.get_cindex()
571 nodetree = parsers.nodetree(index, len(revs))
571 nodetree = parsers.nodetree(index, len(revs))
572 for r in revs:
572 for r in revs:
573 nodetree.insert(r)
573 nodetree.insert(r)
574 if cache is not None:
574 if cache is not None:
575 cache[b'disambiguationnodetree'] = nodetree
575 cache[b'disambiguationnodetree'] = nodetree
576 if nodetree is not None:
576 if nodetree is not None:
577 length = max(nodetree.shortest(node), minlength)
577 length = max(nodetree.shortest(node), minlength)
578 prefix = hexnode[:length]
578 prefix = hexnode[:length]
579 return disambiguate(prefix)
579 return disambiguate(prefix)
580 for length in range(minlength, len(hexnode) + 1):
580 for length in range(minlength, len(hexnode) + 1):
581 matches = []
581 matches = []
582 prefix = hexnode[:length]
582 prefix = hexnode[:length]
583 for rev in revs:
583 for rev in revs:
584 otherhexnode = repo[rev].hex()
584 otherhexnode = repo[rev].hex()
585 if prefix == otherhexnode[:length]:
585 if prefix == otherhexnode[:length]:
586 matches.append(otherhexnode)
586 matches.append(otherhexnode)
587 if len(matches) == 1:
587 if len(matches) == 1:
588 return disambiguate(prefix)
588 return disambiguate(prefix)
589
589
590 try:
590 try:
591 return disambiguate(cl.shortest(node, minlength))
591 return disambiguate(cl.shortest(node, minlength))
592 except error.LookupError:
592 except error.LookupError:
593 raise error.RepoLookupError()
593 raise error.RepoLookupError()
594
594
595
595
596 def isrevsymbol(repo, symbol):
596 def isrevsymbol(repo, symbol):
597 """Checks if a symbol exists in the repo.
597 """Checks if a symbol exists in the repo.
598
598
599 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
599 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
600 symbol is an ambiguous nodeid prefix.
600 symbol is an ambiguous nodeid prefix.
601 """
601 """
602 try:
602 try:
603 revsymbol(repo, symbol)
603 revsymbol(repo, symbol)
604 return True
604 return True
605 except error.RepoLookupError:
605 except error.RepoLookupError:
606 return False
606 return False
607
607
608
608
609 def revsymbol(repo, symbol):
609 def revsymbol(repo, symbol):
610 """Returns a context given a single revision symbol (as string).
610 """Returns a context given a single revision symbol (as string).
611
611
612 This is similar to revsingle(), but accepts only a single revision symbol,
612 This is similar to revsingle(), but accepts only a single revision symbol,
613 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
613 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
614 not "max(public())".
614 not "max(public())".
615 """
615 """
616 if not isinstance(symbol, bytes):
616 if not isinstance(symbol, bytes):
617 msg = (
617 msg = (
618 b"symbol (%s of type %s) was not a string, did you mean "
618 b"symbol (%s of type %s) was not a string, did you mean "
619 b"repo[symbol]?" % (symbol, type(symbol))
619 b"repo[symbol]?" % (symbol, type(symbol))
620 )
620 )
621 raise error.ProgrammingError(msg)
621 raise error.ProgrammingError(msg)
622 try:
622 try:
623 if symbol in (b'.', b'tip', b'null'):
623 if symbol in (b'.', b'tip', b'null'):
624 return repo[symbol]
624 return repo[symbol]
625
625
626 try:
626 try:
627 r = int(symbol)
627 r = int(symbol)
628 if b'%d' % r != symbol:
628 if b'%d' % r != symbol:
629 raise ValueError
629 raise ValueError
630 l = len(repo.changelog)
630 l = len(repo.changelog)
631 if r < 0:
631 if r < 0:
632 r += l
632 r += l
633 if r < 0 or r >= l and r != wdirrev:
633 if r < 0 or r >= l and r != wdirrev:
634 raise ValueError
634 raise ValueError
635 return repo[r]
635 return repo[r]
636 except error.FilteredIndexError:
636 except error.FilteredIndexError:
637 raise
637 raise
638 except (ValueError, OverflowError, IndexError):
638 except (ValueError, OverflowError, IndexError):
639 pass
639 pass
640
640
641 if len(symbol) == 2 * repo.nodeconstants.nodelen:
641 if len(symbol) == 2 * repo.nodeconstants.nodelen:
642 try:
642 try:
643 node = bin(symbol)
643 node = bin(symbol)
644 rev = repo.changelog.rev(node)
644 rev = repo.changelog.rev(node)
645 return repo[rev]
645 return repo[rev]
646 except error.FilteredLookupError:
646 except error.FilteredLookupError:
647 raise
647 raise
648 except (binascii.Error, LookupError):
648 except (binascii.Error, LookupError):
649 pass
649 pass
650
650
651 # look up bookmarks through the name interface
651 # look up bookmarks through the name interface
652 try:
652 try:
653 node = repo.names.singlenode(repo, symbol)
653 node = repo.names.singlenode(repo, symbol)
654 rev = repo.changelog.rev(node)
654 rev = repo.changelog.rev(node)
655 return repo[rev]
655 return repo[rev]
656 except KeyError:
656 except KeyError:
657 pass
657 pass
658
658
659 node = resolvehexnodeidprefix(repo, symbol)
659 node = resolvehexnodeidprefix(repo, symbol)
660 if node is not None:
660 if node is not None:
661 rev = repo.changelog.rev(node)
661 rev = repo.changelog.rev(node)
662 return repo[rev]
662 return repo[rev]
663
663
664 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
664 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
665
665
666 except error.WdirUnsupported:
666 except error.WdirUnsupported:
667 return repo[None]
667 return repo[None]
668 except (
668 except (
669 error.FilteredIndexError,
669 error.FilteredIndexError,
670 error.FilteredLookupError,
670 error.FilteredLookupError,
671 error.FilteredRepoLookupError,
671 error.FilteredRepoLookupError,
672 ):
672 ):
673 raise _filterederror(repo, symbol)
673 raise _filterederror(repo, symbol)
674
674
675
675
676 def _filterederror(repo, changeid):
676 def _filterederror(repo, changeid):
677 """build an exception to be raised about a filtered changeid
677 """build an exception to be raised about a filtered changeid
678
678
679 This is extracted in a function to help extensions (eg: evolve) to
679 This is extracted in a function to help extensions (eg: evolve) to
680 experiment with various message variants."""
680 experiment with various message variants."""
681 if repo.filtername.startswith(b'visible'):
681 if repo.filtername.startswith(b'visible'):
682
682
683 # Check if the changeset is obsolete
683 # Check if the changeset is obsolete
684 unfilteredrepo = repo.unfiltered()
684 unfilteredrepo = repo.unfiltered()
685 ctx = revsymbol(unfilteredrepo, changeid)
685 ctx = revsymbol(unfilteredrepo, changeid)
686
686
687 # If the changeset is obsolete, enrich the message with the reason
687 # If the changeset is obsolete, enrich the message with the reason
688 # that made this changeset not visible
688 # that made this changeset not visible
689 if ctx.obsolete():
689 if ctx.obsolete():
690 msg = obsutil._getfilteredreason(repo, changeid, ctx)
690 msg = obsutil._getfilteredreason(repo, changeid, ctx)
691 else:
691 else:
692 msg = _(b"hidden revision '%s'") % changeid
692 msg = _(b"hidden revision '%s'") % changeid
693
693
694 hint = _(b'use --hidden to access hidden revisions')
694 hint = _(b'use --hidden to access hidden revisions')
695
695
696 return error.FilteredRepoLookupError(msg, hint=hint)
696 return error.FilteredRepoLookupError(msg, hint=hint)
697 msg = _(b"filtered revision '%s' (not in '%s' subset)")
697 msg = _(b"filtered revision '%s' (not in '%s' subset)")
698 msg %= (changeid, repo.filtername)
698 msg %= (changeid, repo.filtername)
699 return error.FilteredRepoLookupError(msg)
699 return error.FilteredRepoLookupError(msg)
700
700
701
701
702 def revsingle(repo, revspec, default=b'.', localalias=None):
702 def revsingle(repo, revspec, default=b'.', localalias=None):
703 if not revspec and revspec != 0:
703 if not revspec and revspec != 0:
704 return repo[default]
704 return repo[default]
705
705
706 l = revrange(repo, [revspec], localalias=localalias)
706 l = revrange(repo, [revspec], localalias=localalias)
707 if not l:
707 if not l:
708 raise error.InputError(_(b'empty revision set'))
708 raise error.InputError(_(b'empty revision set'))
709 return repo[l.last()]
709 return repo[l.last()]
710
710
711
711
712 def _pairspec(revspec):
712 def _pairspec(revspec):
713 tree = revsetlang.parse(revspec)
713 tree = revsetlang.parse(revspec)
714 return tree and tree[0] in (
714 return tree and tree[0] in (
715 b'range',
715 b'range',
716 b'rangepre',
716 b'rangepre',
717 b'rangepost',
717 b'rangepost',
718 b'rangeall',
718 b'rangeall',
719 )
719 )
720
720
721
721
722 def revpair(repo, revs):
722 def revpair(repo, revs):
723 if not revs:
723 if not revs:
724 return repo[b'.'], repo[None]
724 return repo[b'.'], repo[None]
725
725
726 l = revrange(repo, revs)
726 l = revrange(repo, revs)
727
727
728 if not l:
728 if not l:
729 raise error.InputError(_(b'empty revision range'))
729 raise error.InputError(_(b'empty revision range'))
730
730
731 first = l.first()
731 first = l.first()
732 second = l.last()
732 second = l.last()
733
733
734 if (
734 if (
735 first == second
735 first == second
736 and len(revs) >= 2
736 and len(revs) >= 2
737 and not all(revrange(repo, [r]) for r in revs)
737 and not all(revrange(repo, [r]) for r in revs)
738 ):
738 ):
739 raise error.InputError(_(b'empty revision on one side of range'))
739 raise error.InputError(_(b'empty revision on one side of range'))
740
740
741 # if top-level is range expression, the result must always be a pair
741 # if top-level is range expression, the result must always be a pair
742 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
742 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
743 return repo[first], repo[None]
743 return repo[first], repo[None]
744
744
745 return repo[first], repo[second]
745 return repo[first], repo[second]
746
746
747
747
748 def revrange(repo, specs, localalias=None):
748 def revrange(repo, specs, localalias=None):
749 """Execute 1 to many revsets and return the union.
749 """Execute 1 to many revsets and return the union.
750
750
751 This is the preferred mechanism for executing revsets using user-specified
751 This is the preferred mechanism for executing revsets using user-specified
752 config options, such as revset aliases.
752 config options, such as revset aliases.
753
753
754 The revsets specified by ``specs`` will be executed via a chained ``OR``
754 The revsets specified by ``specs`` will be executed via a chained ``OR``
755 expression. If ``specs`` is empty, an empty result is returned.
755 expression. If ``specs`` is empty, an empty result is returned.
756
756
757 ``specs`` can contain integers, in which case they are assumed to be
757 ``specs`` can contain integers, in which case they are assumed to be
758 revision numbers.
758 revision numbers.
759
759
760 It is assumed the revsets are already formatted. If you have arguments
760 It is assumed the revsets are already formatted. If you have arguments
761 that need to be expanded in the revset, call ``revsetlang.formatspec()``
761 that need to be expanded in the revset, call ``revsetlang.formatspec()``
762 and pass the result as an element of ``specs``.
762 and pass the result as an element of ``specs``.
763
763
764 Specifying a single revset is allowed.
764 Specifying a single revset is allowed.
765
765
766 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
766 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
767 integer revisions.
767 integer revisions.
768 """
768 """
769 allspecs = []
769 allspecs = []
770 for spec in specs:
770 for spec in specs:
771 if isinstance(spec, int):
771 if isinstance(spec, int):
772 spec = revsetlang.formatspec(b'%d', spec)
772 spec = revsetlang.formatspec(b'%d', spec)
773 allspecs.append(spec)
773 allspecs.append(spec)
774 return repo.anyrevs(allspecs, user=True, localalias=localalias)
774 return repo.anyrevs(allspecs, user=True, localalias=localalias)
775
775
776
776
777 def increasingwindows(windowsize=8, sizelimit=512):
777 def increasingwindows(windowsize=8, sizelimit=512):
778 while True:
778 while True:
779 yield windowsize
779 yield windowsize
780 if windowsize < sizelimit:
780 if windowsize < sizelimit:
781 windowsize *= 2
781 windowsize *= 2
782
782
783
783
784 def walkchangerevs(repo, revs, makefilematcher, prepare):
784 def walkchangerevs(repo, revs, makefilematcher, prepare):
785 """Iterate over files and the revs in a "windowed" way.
785 """Iterate over files and the revs in a "windowed" way.
786
786
787 Callers most commonly need to iterate backwards over the history
787 Callers most commonly need to iterate backwards over the history
788 in which they are interested. Doing so has awful (quadratic-looking)
788 in which they are interested. Doing so has awful (quadratic-looking)
789 performance, so we use iterators in a "windowed" way.
789 performance, so we use iterators in a "windowed" way.
790
790
791 We walk a window of revisions in the desired order. Within the
791 We walk a window of revisions in the desired order. Within the
792 window, we first walk forwards to gather data, then in the desired
792 window, we first walk forwards to gather data, then in the desired
793 order (usually backwards) to display it.
793 order (usually backwards) to display it.
794
794
795 This function returns an iterator yielding contexts. Before
795 This function returns an iterator yielding contexts. Before
796 yielding each context, the iterator will first call the prepare
796 yielding each context, the iterator will first call the prepare
797 function on each context in the window in forward order."""
797 function on each context in the window in forward order."""
798
798
799 if not revs:
799 if not revs:
800 return []
800 return []
801 change = repo.__getitem__
801 change = repo.__getitem__
802
802
803 def iterate():
803 def iterate():
804 it = iter(revs)
804 it = iter(revs)
805 stopiteration = False
805 stopiteration = False
806 for windowsize in increasingwindows():
806 for windowsize in increasingwindows():
807 nrevs = []
807 nrevs = []
808 for i in range(windowsize):
808 for i in range(windowsize):
809 rev = next(it, None)
809 rev = next(it, None)
810 if rev is None:
810 if rev is None:
811 stopiteration = True
811 stopiteration = True
812 break
812 break
813 nrevs.append(rev)
813 nrevs.append(rev)
814 for rev in sorted(nrevs):
814 for rev in sorted(nrevs):
815 ctx = change(rev)
815 ctx = change(rev)
816 prepare(ctx, makefilematcher(ctx))
816 prepare(ctx, makefilematcher(ctx))
817 for rev in nrevs:
817 for rev in nrevs:
818 yield change(rev)
818 yield change(rev)
819
819
820 if stopiteration:
820 if stopiteration:
821 break
821 break
822
822
823 return iterate()
823 return iterate()
824
824
825
825
826 def meaningfulparents(repo, ctx):
826 def meaningfulparents(repo, ctx):
827 """Return list of meaningful (or all if debug) parentrevs for rev.
827 """Return list of meaningful (or all if debug) parentrevs for rev.
828
828
829 For merges (two non-nullrev revisions) both parents are meaningful.
829 For merges (two non-nullrev revisions) both parents are meaningful.
830 Otherwise the first parent revision is considered meaningful if it
830 Otherwise the first parent revision is considered meaningful if it
831 is not the preceding revision.
831 is not the preceding revision.
832 """
832 """
833 parents = ctx.parents()
833 parents = ctx.parents()
834 if len(parents) > 1:
834 if len(parents) > 1:
835 return parents
835 return parents
836 if repo.ui.debugflag:
836 if repo.ui.debugflag:
837 return [parents[0], repo[nullrev]]
837 return [parents[0], repo[nullrev]]
838 if parents[0].rev() >= intrev(ctx) - 1:
838 if parents[0].rev() >= intrev(ctx) - 1:
839 return []
839 return []
840 return parents
840 return parents
841
841
842
842
843 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
843 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
844 """Return a function that produced paths for presenting to the user.
844 """Return a function that produced paths for presenting to the user.
845
845
846 The returned function takes a repo-relative path and produces a path
846 The returned function takes a repo-relative path and produces a path
847 that can be presented in the UI.
847 that can be presented in the UI.
848
848
849 Depending on the value of ui.relative-paths, either a repo-relative or
849 Depending on the value of ui.relative-paths, either a repo-relative or
850 cwd-relative path will be produced.
850 cwd-relative path will be produced.
851
851
852 legacyrelativevalue is the value to use if ui.relative-paths=legacy
852 legacyrelativevalue is the value to use if ui.relative-paths=legacy
853
853
854 If forcerelativevalue is not None, then that value will be used regardless
854 If forcerelativevalue is not None, then that value will be used regardless
855 of what ui.relative-paths is set to.
855 of what ui.relative-paths is set to.
856 """
856 """
857 if forcerelativevalue is not None:
857 if forcerelativevalue is not None:
858 relative = forcerelativevalue
858 relative = forcerelativevalue
859 else:
859 else:
860 config = repo.ui.config(b'ui', b'relative-paths')
860 config = repo.ui.config(b'ui', b'relative-paths')
861 if config == b'legacy':
861 if config == b'legacy':
862 relative = legacyrelativevalue
862 relative = legacyrelativevalue
863 else:
863 else:
864 relative = stringutil.parsebool(config)
864 relative = stringutil.parsebool(config)
865 if relative is None:
865 if relative is None:
866 raise error.ConfigError(
866 raise error.ConfigError(
867 _(b"ui.relative-paths is not a boolean ('%s')") % config
867 _(b"ui.relative-paths is not a boolean ('%s')") % config
868 )
868 )
869
869
870 if relative:
870 if relative:
871 cwd = repo.getcwd()
871 cwd = repo.getcwd()
872 if cwd != b'':
872 if cwd != b'':
873 # this branch would work even if cwd == b'' (ie cwd = repo
873 # this branch would work even if cwd == b'' (ie cwd = repo
874 # root), but its generality makes the returned function slower
874 # root), but its generality makes the returned function slower
875 pathto = repo.pathto
875 pathto = repo.pathto
876 return lambda f: pathto(f, cwd)
876 return lambda f: pathto(f, cwd)
877 if repo.ui.configbool(b'ui', b'slash'):
877 if repo.ui.configbool(b'ui', b'slash'):
878 return lambda f: f
878 return lambda f: f
879 else:
879 else:
880 return util.localpath
880 return util.localpath
881
881
882
882
883 def subdiruipathfn(subpath, uipathfn):
883 def subdiruipathfn(subpath, uipathfn):
884 '''Create a new uipathfn that treats the file as relative to subpath.'''
884 '''Create a new uipathfn that treats the file as relative to subpath.'''
885 return lambda f: uipathfn(posixpath.join(subpath, f))
885 return lambda f: uipathfn(posixpath.join(subpath, f))
886
886
887
887
888 def anypats(pats, opts):
888 def anypats(pats, opts):
889 """Checks if any patterns, including --include and --exclude were given.
889 """Checks if any patterns, including --include and --exclude were given.
890
890
891 Some commands (e.g. addremove) use this condition for deciding whether to
891 Some commands (e.g. addremove) use this condition for deciding whether to
892 print absolute or relative paths.
892 print absolute or relative paths.
893 """
893 """
894 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
894 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
895
895
896
896
897 def expandpats(pats):
897 def expandpats(pats):
898 """Expand bare globs when running on windows.
898 """Expand bare globs when running on windows.
899 On posix we assume it already has already been done by sh."""
899 On posix we assume it already has already been done by sh."""
900 if not util.expandglobs:
900 if not util.expandglobs:
901 return list(pats)
901 return list(pats)
902 ret = []
902 ret = []
903 for kindpat in pats:
903 for kindpat in pats:
904 kind, pat = matchmod._patsplit(kindpat, None)
904 kind, pat = matchmod._patsplit(kindpat, None)
905 if kind is None:
905 if kind is None:
906 try:
906 try:
907 globbed = glob.glob(pat)
907 globbed = glob.glob(pat)
908 except re.error:
908 except re.error:
909 globbed = [pat]
909 globbed = [pat]
910 if globbed:
910 if globbed:
911 ret.extend(globbed)
911 ret.extend(globbed)
912 continue
912 continue
913 ret.append(kindpat)
913 ret.append(kindpat)
914 return ret
914 return ret
915
915
916
916
917 def matchandpats(
917 def matchandpats(
918 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
918 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
919 ):
919 ):
920 """Return a matcher and the patterns that were used.
920 """Return a matcher and the patterns that were used.
921 The matcher will warn about bad matches, unless an alternate badfn callback
921 The matcher will warn about bad matches, unless an alternate badfn callback
922 is provided."""
922 is provided."""
923 if opts is None:
923 if opts is None:
924 opts = {}
924 opts = {}
925 if not globbed and default == b'relpath':
925 if not globbed and default == b'relpath':
926 pats = expandpats(pats or [])
926 pats = expandpats(pats or [])
927
927
928 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
928 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
929
929
930 def bad(f, msg):
930 def bad(f, msg):
931 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
931 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
932
932
933 if badfn is None:
933 if badfn is None:
934 badfn = bad
934 badfn = bad
935
935
936 m = ctx.match(
936 m = ctx.match(
937 pats,
937 pats,
938 opts.get(b'include'),
938 opts.get(b'include'),
939 opts.get(b'exclude'),
939 opts.get(b'exclude'),
940 default,
940 default,
941 listsubrepos=opts.get(b'subrepos'),
941 listsubrepos=opts.get(b'subrepos'),
942 badfn=badfn,
942 badfn=badfn,
943 )
943 )
944
944
945 if m.always():
945 if m.always():
946 pats = []
946 pats = []
947 return m, pats
947 return m, pats
948
948
949
949
950 def match(
950 def match(
951 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
951 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
952 ):
952 ):
953 '''Return a matcher that will warn about bad matches.'''
953 '''Return a matcher that will warn about bad matches.'''
954 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
954 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
955
955
956
956
957 def matchall(repo):
957 def matchall(repo):
958 '''Return a matcher that will efficiently match everything.'''
958 '''Return a matcher that will efficiently match everything.'''
959 return matchmod.always()
959 return matchmod.always()
960
960
961
961
962 def matchfiles(repo, files, badfn=None):
962 def matchfiles(repo, files, badfn=None):
963 '''Return a matcher that will efficiently match exactly these files.'''
963 '''Return a matcher that will efficiently match exactly these files.'''
964 return matchmod.exact(files, badfn=badfn)
964 return matchmod.exact(files, badfn=badfn)
965
965
966
966
967 def parsefollowlinespattern(repo, rev, pat, msg):
967 def parsefollowlinespattern(repo, rev, pat, msg):
968 """Return a file name from `pat` pattern suitable for usage in followlines
968 """Return a file name from `pat` pattern suitable for usage in followlines
969 logic.
969 logic.
970 """
970 """
971 if not matchmod.patkind(pat):
971 if not matchmod.patkind(pat):
972 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
972 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
973 else:
973 else:
974 ctx = repo[rev]
974 ctx = repo[rev]
975 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
975 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
976 files = [f for f in ctx if m(f)]
976 files = [f for f in ctx if m(f)]
977 if len(files) != 1:
977 if len(files) != 1:
978 raise error.ParseError(msg)
978 raise error.ParseError(msg)
979 return files[0]
979 return files[0]
980
980
981
981
982 def getorigvfs(ui, repo):
982 def getorigvfs(ui, repo):
983 """return a vfs suitable to save 'orig' file
983 """return a vfs suitable to save 'orig' file
984
984
985 return None if no special directory is configured"""
985 return None if no special directory is configured"""
986 origbackuppath = ui.config(b'ui', b'origbackuppath')
986 origbackuppath = ui.config(b'ui', b'origbackuppath')
987 if not origbackuppath:
987 if not origbackuppath:
988 return None
988 return None
989 return vfs.vfs(repo.wvfs.join(origbackuppath))
989 return vfs.vfs(repo.wvfs.join(origbackuppath))
990
990
991
991
992 def backuppath(ui, repo, filepath):
992 def backuppath(ui, repo, filepath):
993 """customize where working copy backup files (.orig files) are created
993 """customize where working copy backup files (.orig files) are created
994
994
995 Fetch user defined path from config file: [ui] origbackuppath = <path>
995 Fetch user defined path from config file: [ui] origbackuppath = <path>
996 Fall back to default (filepath with .orig suffix) if not specified
996 Fall back to default (filepath with .orig suffix) if not specified
997
997
998 filepath is repo-relative
998 filepath is repo-relative
999
999
1000 Returns an absolute path
1000 Returns an absolute path
1001 """
1001 """
1002 origvfs = getorigvfs(ui, repo)
1002 origvfs = getorigvfs(ui, repo)
1003 if origvfs is None:
1003 if origvfs is None:
1004 return repo.wjoin(filepath + b".orig")
1004 return repo.wjoin(filepath + b".orig")
1005
1005
1006 origbackupdir = origvfs.dirname(filepath)
1006 origbackupdir = origvfs.dirname(filepath)
1007 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1007 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1008 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1008 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1009
1009
1010 # Remove any files that conflict with the backup file's path
1010 # Remove any files that conflict with the backup file's path
1011 for f in reversed(list(pathutil.finddirs(filepath))):
1011 for f in reversed(list(pathutil.finddirs(filepath))):
1012 if origvfs.isfileorlink(f):
1012 if origvfs.isfileorlink(f):
1013 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1013 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1014 origvfs.unlink(f)
1014 origvfs.unlink(f)
1015 break
1015 break
1016
1016
1017 origvfs.makedirs(origbackupdir)
1017 origvfs.makedirs(origbackupdir)
1018
1018
1019 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1019 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1020 ui.note(
1020 ui.note(
1021 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1021 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1022 )
1022 )
1023 origvfs.rmtree(filepath, forcibly=True)
1023 origvfs.rmtree(filepath, forcibly=True)
1024
1024
1025 return origvfs.join(filepath)
1025 return origvfs.join(filepath)
1026
1026
1027
1027
1028 class _containsnode:
1028 class _containsnode:
1029 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1029 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1030
1030
1031 def __init__(self, repo, revcontainer):
1031 def __init__(self, repo, revcontainer):
1032 self._torev = repo.changelog.rev
1032 self._torev = repo.changelog.rev
1033 self._revcontains = revcontainer.__contains__
1033 self._revcontains = revcontainer.__contains__
1034
1034
1035 def __contains__(self, node):
1035 def __contains__(self, node):
1036 return self._revcontains(self._torev(node))
1036 return self._revcontains(self._torev(node))
1037
1037
1038
1038
1039 def cleanupnodes(
1039 def cleanupnodes(
1040 repo,
1040 repo,
1041 replacements,
1041 replacements,
1042 operation,
1042 operation,
1043 moves=None,
1043 moves=None,
1044 metadata=None,
1044 metadata=None,
1045 fixphase=False,
1045 fixphase=False,
1046 targetphase=None,
1046 targetphase=None,
1047 backup=True,
1047 backup=True,
1048 ):
1048 ):
1049 """do common cleanups when old nodes are replaced by new nodes
1049 """do common cleanups when old nodes are replaced by new nodes
1050
1050
1051 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1051 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1052 (we might also want to move working directory parent in the future)
1052 (we might also want to move working directory parent in the future)
1053
1053
1054 By default, bookmark moves are calculated automatically from 'replacements',
1054 By default, bookmark moves are calculated automatically from 'replacements',
1055 but 'moves' can be used to override that. Also, 'moves' may include
1055 but 'moves' can be used to override that. Also, 'moves' may include
1056 additional bookmark moves that should not have associated obsmarkers.
1056 additional bookmark moves that should not have associated obsmarkers.
1057
1057
1058 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1058 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1059 have replacements. operation is a string, like "rebase".
1059 have replacements. operation is a string, like "rebase".
1060
1060
1061 metadata is dictionary containing metadata to be stored in obsmarker if
1061 metadata is dictionary containing metadata to be stored in obsmarker if
1062 obsolescence is enabled.
1062 obsolescence is enabled.
1063 """
1063 """
1064 assert fixphase or targetphase is None
1064 assert fixphase or targetphase is None
1065 if not replacements and not moves:
1065 if not replacements and not moves:
1066 return
1066 return
1067
1067
1068 # translate mapping's other forms
1068 # translate mapping's other forms
1069 if not util.safehasattr(replacements, 'items'):
1069 if not util.safehasattr(replacements, 'items'):
1070 replacements = {(n,): () for n in replacements}
1070 replacements = {(n,): () for n in replacements}
1071 else:
1071 else:
1072 # upgrading non tuple "source" to tuple ones for BC
1072 # upgrading non tuple "source" to tuple ones for BC
1073 repls = {}
1073 repls = {}
1074 for key, value in replacements.items():
1074 for key, value in replacements.items():
1075 if not isinstance(key, tuple):
1075 if not isinstance(key, tuple):
1076 key = (key,)
1076 key = (key,)
1077 repls[key] = value
1077 repls[key] = value
1078 replacements = repls
1078 replacements = repls
1079
1079
1080 # Unfiltered repo is needed since nodes in replacements might be hidden.
1080 # Unfiltered repo is needed since nodes in replacements might be hidden.
1081 unfi = repo.unfiltered()
1081 unfi = repo.unfiltered()
1082
1082
1083 # Calculate bookmark movements
1083 # Calculate bookmark movements
1084 if moves is None:
1084 if moves is None:
1085 moves = {}
1085 moves = {}
1086 for oldnodes, newnodes in replacements.items():
1086 for oldnodes, newnodes in replacements.items():
1087 for oldnode in oldnodes:
1087 for oldnode in oldnodes:
1088 if oldnode in moves:
1088 if oldnode in moves:
1089 continue
1089 continue
1090 if len(newnodes) > 1:
1090 if len(newnodes) > 1:
1091 # usually a split, take the one with biggest rev number
1091 # usually a split, take the one with biggest rev number
1092 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1092 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1093 elif len(newnodes) == 0:
1093 elif len(newnodes) == 0:
1094 # move bookmark backwards
1094 # move bookmark backwards
1095 allreplaced = []
1095 allreplaced = []
1096 for rep in replacements:
1096 for rep in replacements:
1097 allreplaced.extend(rep)
1097 allreplaced.extend(rep)
1098 roots = list(
1098 roots = list(
1099 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1099 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1100 )
1100 )
1101 if roots:
1101 if roots:
1102 newnode = roots[0].node()
1102 newnode = roots[0].node()
1103 else:
1103 else:
1104 newnode = repo.nullid
1104 newnode = repo.nullid
1105 else:
1105 else:
1106 newnode = newnodes[0]
1106 newnode = newnodes[0]
1107 moves[oldnode] = newnode
1107 moves[oldnode] = newnode
1108
1108
1109 allnewnodes = [n for ns in replacements.values() for n in ns]
1109 allnewnodes = [n for ns in replacements.values() for n in ns]
1110 toretract = {}
1110 toretract = {}
1111 toadvance = {}
1111 toadvance = {}
1112 if fixphase:
1112 if fixphase:
1113 precursors = {}
1113 precursors = {}
1114 for oldnodes, newnodes in replacements.items():
1114 for oldnodes, newnodes in replacements.items():
1115 for oldnode in oldnodes:
1115 for oldnode in oldnodes:
1116 for newnode in newnodes:
1116 for newnode in newnodes:
1117 precursors.setdefault(newnode, []).append(oldnode)
1117 precursors.setdefault(newnode, []).append(oldnode)
1118
1118
1119 allnewnodes.sort(key=lambda n: unfi[n].rev())
1119 allnewnodes.sort(key=lambda n: unfi[n].rev())
1120 newphases = {}
1120 newphases = {}
1121
1121
1122 def phase(ctx):
1122 def phase(ctx):
1123 return newphases.get(ctx.node(), ctx.phase())
1123 return newphases.get(ctx.node(), ctx.phase())
1124
1124
1125 for newnode in allnewnodes:
1125 for newnode in allnewnodes:
1126 ctx = unfi[newnode]
1126 ctx = unfi[newnode]
1127 parentphase = max(phase(p) for p in ctx.parents())
1127 parentphase = max(phase(p) for p in ctx.parents())
1128 if targetphase is None:
1128 if targetphase is None:
1129 oldphase = max(
1129 oldphase = max(
1130 unfi[oldnode].phase() for oldnode in precursors[newnode]
1130 unfi[oldnode].phase() for oldnode in precursors[newnode]
1131 )
1131 )
1132 newphase = max(oldphase, parentphase)
1132 newphase = max(oldphase, parentphase)
1133 else:
1133 else:
1134 newphase = max(targetphase, parentphase)
1134 newphase = max(targetphase, parentphase)
1135 newphases[newnode] = newphase
1135 newphases[newnode] = newphase
1136 if newphase > ctx.phase():
1136 if newphase > ctx.phase():
1137 toretract.setdefault(newphase, []).append(newnode)
1137 toretract.setdefault(newphase, []).append(newnode)
1138 elif newphase < ctx.phase():
1138 elif newphase < ctx.phase():
1139 toadvance.setdefault(newphase, []).append(newnode)
1139 toadvance.setdefault(newphase, []).append(newnode)
1140
1140
1141 with repo.transaction(b'cleanup') as tr:
1141 with repo.transaction(b'cleanup') as tr:
1142 # Move bookmarks
1142 # Move bookmarks
1143 bmarks = repo._bookmarks
1143 bmarks = repo._bookmarks
1144 bmarkchanges = []
1144 bmarkchanges = []
1145 for oldnode, newnode in moves.items():
1145 for oldnode, newnode in moves.items():
1146 oldbmarks = repo.nodebookmarks(oldnode)
1146 oldbmarks = repo.nodebookmarks(oldnode)
1147 if not oldbmarks:
1147 if not oldbmarks:
1148 continue
1148 continue
1149 from . import bookmarks # avoid import cycle
1149 from . import bookmarks # avoid import cycle
1150
1150
1151 repo.ui.debug(
1151 repo.ui.debug(
1152 b'moving bookmarks %r from %s to %s\n'
1152 b'moving bookmarks %r from %s to %s\n'
1153 % (
1153 % (
1154 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1154 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1155 hex(oldnode),
1155 hex(oldnode),
1156 hex(newnode),
1156 hex(newnode),
1157 )
1157 )
1158 )
1158 )
1159 # Delete divergent bookmarks being parents of related newnodes
1159 # Delete divergent bookmarks being parents of related newnodes
1160 deleterevs = repo.revs(
1160 deleterevs = repo.revs(
1161 b'parents(roots(%ln & (::%n))) - parents(%n)',
1161 b'parents(roots(%ln & (::%n))) - parents(%n)',
1162 allnewnodes,
1162 allnewnodes,
1163 newnode,
1163 newnode,
1164 oldnode,
1164 oldnode,
1165 )
1165 )
1166 deletenodes = _containsnode(repo, deleterevs)
1166 deletenodes = _containsnode(repo, deleterevs)
1167 for name in oldbmarks:
1167 for name in oldbmarks:
1168 bmarkchanges.append((name, newnode))
1168 bmarkchanges.append((name, newnode))
1169 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1169 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1170 bmarkchanges.append((b, None))
1170 bmarkchanges.append((b, None))
1171
1171
1172 if bmarkchanges:
1172 if bmarkchanges:
1173 bmarks.applychanges(repo, tr, bmarkchanges)
1173 bmarks.applychanges(repo, tr, bmarkchanges)
1174
1174
1175 for phase, nodes in toretract.items():
1175 for phase, nodes in toretract.items():
1176 phases.retractboundary(repo, tr, phase, nodes)
1176 phases.retractboundary(repo, tr, phase, nodes)
1177 for phase, nodes in toadvance.items():
1177 for phase, nodes in toadvance.items():
1178 phases.advanceboundary(repo, tr, phase, nodes)
1178 phases.advanceboundary(repo, tr, phase, nodes)
1179
1179
1180 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1180 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1181 # Obsolete or strip nodes
1181 # Obsolete or strip nodes
1182 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1182 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1183 # If a node is already obsoleted, and we want to obsolete it
1183 # If a node is already obsoleted, and we want to obsolete it
1184 # without a successor, skip that obssolete request since it's
1184 # without a successor, skip that obssolete request since it's
1185 # unnecessary. That's the "if s or not isobs(n)" check below.
1185 # unnecessary. That's the "if s or not isobs(n)" check below.
1186 # Also sort the node in topology order, that might be useful for
1186 # Also sort the node in topology order, that might be useful for
1187 # some obsstore logic.
1187 # some obsstore logic.
1188 # NOTE: the sorting might belong to createmarkers.
1188 # NOTE: the sorting might belong to createmarkers.
1189 torev = unfi.changelog.rev
1189 torev = unfi.changelog.rev
1190 sortfunc = lambda ns: torev(ns[0][0])
1190 sortfunc = lambda ns: torev(ns[0][0])
1191 rels = []
1191 rels = []
1192 for ns, s in sorted(replacements.items(), key=sortfunc):
1192 for ns, s in sorted(replacements.items(), key=sortfunc):
1193 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1193 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1194 rels.append(rel)
1194 rels.append(rel)
1195 if rels:
1195 if rels:
1196 obsolete.createmarkers(
1196 obsolete.createmarkers(
1197 repo, rels, operation=operation, metadata=metadata
1197 repo, rels, operation=operation, metadata=metadata
1198 )
1198 )
1199 elif phases.supportarchived(repo) and mayusearchived:
1199 elif phases.supportarchived(repo) and mayusearchived:
1200 # this assume we do not have "unstable" nodes above the cleaned ones
1200 # this assume we do not have "unstable" nodes above the cleaned ones
1201 allreplaced = set()
1201 allreplaced = set()
1202 for ns in replacements.keys():
1202 for ns in replacements.keys():
1203 allreplaced.update(ns)
1203 allreplaced.update(ns)
1204 if backup:
1204 if backup:
1205 from . import repair # avoid import cycle
1205 from . import repair # avoid import cycle
1206
1206
1207 node = min(allreplaced, key=repo.changelog.rev)
1207 node = min(allreplaced, key=repo.changelog.rev)
1208 repair.backupbundle(
1208 repair.backupbundle(
1209 repo, allreplaced, allreplaced, node, operation
1209 repo, allreplaced, allreplaced, node, operation
1210 )
1210 )
1211 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1211 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1212 else:
1212 else:
1213 from . import repair # avoid import cycle
1213 from . import repair # avoid import cycle
1214
1214
1215 tostrip = list(n for ns in replacements for n in ns)
1215 tostrip = list(n for ns in replacements for n in ns)
1216 if tostrip:
1216 if tostrip:
1217 repair.delayedstrip(
1217 repair.delayedstrip(
1218 repo.ui, repo, tostrip, operation, backup=backup
1218 repo.ui, repo, tostrip, operation, backup=backup
1219 )
1219 )
1220
1220
1221
1221
1222 def addremove(repo, matcher, prefix, uipathfn, opts=None, open_tr=None):
1222 def addremove(repo, matcher, prefix, uipathfn, opts=None, open_tr=None):
1223 if opts is None:
1223 if opts is None:
1224 opts = {}
1224 opts = {}
1225 m = matcher
1225 m = matcher
1226 dry_run = opts.get(b'dry_run')
1226 dry_run = opts.get(b'dry_run')
1227 try:
1227 try:
1228 similarity = float(opts.get(b'similarity') or 0)
1228 similarity = float(opts.get(b'similarity') or 0)
1229 except ValueError:
1229 except ValueError:
1230 raise error.InputError(_(b'similarity must be a number'))
1230 raise error.InputError(_(b'similarity must be a number'))
1231 if similarity < 0 or similarity > 100:
1231 if similarity < 0 or similarity > 100:
1232 raise error.InputError(_(b'similarity must be between 0 and 100'))
1232 raise error.InputError(_(b'similarity must be between 0 and 100'))
1233 similarity /= 100.0
1233 similarity /= 100.0
1234
1234
1235 ret = 0
1235 ret = 0
1236
1236
1237 wctx = repo[None]
1237 wctx = repo[None]
1238 for subpath in sorted(wctx.substate):
1238 for subpath in sorted(wctx.substate):
1239 submatch = matchmod.subdirmatcher(subpath, m)
1239 submatch = matchmod.subdirmatcher(subpath, m)
1240 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1240 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1241 sub = wctx.sub(subpath)
1241 sub = wctx.sub(subpath)
1242 subprefix = repo.wvfs.reljoin(prefix, subpath)
1242 subprefix = repo.wvfs.reljoin(prefix, subpath)
1243 subuipathfn = subdiruipathfn(subpath, uipathfn)
1243 subuipathfn = subdiruipathfn(subpath, uipathfn)
1244 try:
1244 try:
1245 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1245 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1246 ret = 1
1246 ret = 1
1247 except error.LookupError:
1247 except error.LookupError:
1248 repo.ui.status(
1248 repo.ui.status(
1249 _(b"skipping missing subrepository: %s\n")
1249 _(b"skipping missing subrepository: %s\n")
1250 % uipathfn(subpath)
1250 % uipathfn(subpath)
1251 )
1251 )
1252
1252
1253 rejected = []
1253 rejected = []
1254
1254
1255 def badfn(f, msg):
1255 def badfn(f, msg):
1256 if f in m.files():
1256 if f in m.files():
1257 m.bad(f, msg)
1257 m.bad(f, msg)
1258 rejected.append(f)
1258 rejected.append(f)
1259
1259
1260 badmatch = matchmod.badmatch(m, badfn)
1260 badmatch = matchmod.badmatch(m, badfn)
1261 added, unknown, deleted, removed, forgotten = _interestingfiles(
1261 added, unknown, deleted, removed, forgotten = _interestingfiles(
1262 repo, badmatch
1262 repo, badmatch
1263 )
1263 )
1264
1264
1265 unknownset = set(unknown + forgotten)
1265 unknownset = set(unknown + forgotten)
1266 toprint = unknownset.copy()
1266 toprint = unknownset.copy()
1267 toprint.update(deleted)
1267 toprint.update(deleted)
1268 for abs in sorted(toprint):
1268 for abs in sorted(toprint):
1269 if repo.ui.verbose or not m.exact(abs):
1269 if repo.ui.verbose or not m.exact(abs):
1270 if abs in unknownset:
1270 if abs in unknownset:
1271 status = _(b'adding %s\n') % uipathfn(abs)
1271 status = _(b'adding %s\n') % uipathfn(abs)
1272 label = b'ui.addremove.added'
1272 label = b'ui.addremove.added'
1273 else:
1273 else:
1274 status = _(b'removing %s\n') % uipathfn(abs)
1274 status = _(b'removing %s\n') % uipathfn(abs)
1275 label = b'ui.addremove.removed'
1275 label = b'ui.addremove.removed'
1276 repo.ui.status(status, label=label)
1276 repo.ui.status(status, label=label)
1277
1277
1278 renames = _findrenames(
1278 renames = _findrenames(
1279 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1279 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1280 )
1280 )
1281
1281
1282 if not dry_run and (unknown or forgotten or deleted or renames):
1282 if not dry_run and (unknown or forgotten or deleted or renames):
1283 if open_tr is not None:
1283 if open_tr is not None:
1284 open_tr()
1284 open_tr()
1285 _markchanges(repo, unknown + forgotten, deleted, renames)
1285 _markchanges(repo, unknown + forgotten, deleted, renames)
1286
1286
1287 for f in rejected:
1287 for f in rejected:
1288 if f in m.files():
1288 if f in m.files():
1289 return 1
1289 return 1
1290 return ret
1290 return ret
1291
1291
1292
1292
1293 def marktouched(repo, files, similarity=0.0):
1293 def marktouched(repo, files, similarity=0.0):
1294 """Assert that files have somehow been operated upon. files are relative to
1294 """Assert that files have somehow been operated upon. files are relative to
1295 the repo root."""
1295 the repo root."""
1296 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1296 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1297 rejected = []
1297 rejected = []
1298
1298
1299 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1299 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1300
1300
1301 if repo.ui.verbose:
1301 if repo.ui.verbose:
1302 unknownset = set(unknown + forgotten)
1302 unknownset = set(unknown + forgotten)
1303 toprint = unknownset.copy()
1303 toprint = unknownset.copy()
1304 toprint.update(deleted)
1304 toprint.update(deleted)
1305 for abs in sorted(toprint):
1305 for abs in sorted(toprint):
1306 if abs in unknownset:
1306 if abs in unknownset:
1307 status = _(b'adding %s\n') % abs
1307 status = _(b'adding %s\n') % abs
1308 else:
1308 else:
1309 status = _(b'removing %s\n') % abs
1309 status = _(b'removing %s\n') % abs
1310 repo.ui.status(status)
1310 repo.ui.status(status)
1311
1311
1312 # TODO: We should probably have the caller pass in uipathfn and apply it to
1312 # TODO: We should probably have the caller pass in uipathfn and apply it to
1313 # the messages above too. legacyrelativevalue=True is consistent with how
1313 # the messages above too. legacyrelativevalue=True is consistent with how
1314 # it used to work.
1314 # it used to work.
1315 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1315 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1316 renames = _findrenames(
1316 renames = _findrenames(
1317 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1317 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1318 )
1318 )
1319
1319
1320 _markchanges(repo, unknown + forgotten, deleted, renames)
1320 _markchanges(repo, unknown + forgotten, deleted, renames)
1321
1321
1322 for f in rejected:
1322 for f in rejected:
1323 if f in m.files():
1323 if f in m.files():
1324 return 1
1324 return 1
1325 return 0
1325 return 0
1326
1326
1327
1327
1328 def _interestingfiles(repo, matcher):
1328 def _interestingfiles(repo, matcher):
1329 """Walk dirstate with matcher, looking for files that addremove would care
1329 """Walk dirstate with matcher, looking for files that addremove would care
1330 about.
1330 about.
1331
1331
1332 This is different from dirstate.status because it doesn't care about
1332 This is different from dirstate.status because it doesn't care about
1333 whether files are modified or clean."""
1333 whether files are modified or clean."""
1334 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1334 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1335 audit_path = pathutil.pathauditor(repo.root, cached=True)
1335 audit_path = pathutil.pathauditor(repo.root, cached=True)
1336
1336
1337 ctx = repo[None]
1337 ctx = repo[None]
1338 dirstate = repo.dirstate
1338 dirstate = repo.dirstate
1339 matcher = repo.narrowmatch(matcher, includeexact=True)
1339 matcher = repo.narrowmatch(matcher, includeexact=True)
1340 walkresults = dirstate.walk(
1340 walkresults = dirstate.walk(
1341 matcher,
1341 matcher,
1342 subrepos=sorted(ctx.substate),
1342 subrepos=sorted(ctx.substate),
1343 unknown=True,
1343 unknown=True,
1344 ignored=False,
1344 ignored=False,
1345 full=False,
1345 full=False,
1346 )
1346 )
1347 for abs, st in walkresults.items():
1347 for abs, st in walkresults.items():
1348 entry = dirstate.get_entry(abs)
1348 entry = dirstate.get_entry(abs)
1349 if (not entry.any_tracked) and audit_path.check(abs):
1349 if (not entry.any_tracked) and audit_path.check(abs):
1350 unknown.append(abs)
1350 unknown.append(abs)
1351 elif (not entry.removed) and not st:
1351 elif (not entry.removed) and not st:
1352 deleted.append(abs)
1352 deleted.append(abs)
1353 elif entry.removed and st:
1353 elif entry.removed and st:
1354 forgotten.append(abs)
1354 forgotten.append(abs)
1355 # for finding renames
1355 # for finding renames
1356 elif entry.removed and not st:
1356 elif entry.removed and not st:
1357 removed.append(abs)
1357 removed.append(abs)
1358 elif entry.added:
1358 elif entry.added:
1359 added.append(abs)
1359 added.append(abs)
1360
1360
1361 return added, unknown, deleted, removed, forgotten
1361 return added, unknown, deleted, removed, forgotten
1362
1362
1363
1363
1364 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1364 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1365 '''Find renames from removed files to added ones.'''
1365 '''Find renames from removed files to added ones.'''
1366 renames = {}
1366 renames = {}
1367 if similarity > 0:
1367 if similarity > 0:
1368 for old, new, score in similar.findrenames(
1368 for old, new, score in similar.findrenames(
1369 repo, added, removed, similarity
1369 repo, added, removed, similarity
1370 ):
1370 ):
1371 if (
1371 if (
1372 repo.ui.verbose
1372 repo.ui.verbose
1373 or not matcher.exact(old)
1373 or not matcher.exact(old)
1374 or not matcher.exact(new)
1374 or not matcher.exact(new)
1375 ):
1375 ):
1376 repo.ui.status(
1376 repo.ui.status(
1377 _(
1377 _(
1378 b'recording removal of %s as rename to %s '
1378 b'recording removal of %s as rename to %s '
1379 b'(%d%% similar)\n'
1379 b'(%d%% similar)\n'
1380 )
1380 )
1381 % (uipathfn(old), uipathfn(new), score * 100)
1381 % (uipathfn(old), uipathfn(new), score * 100)
1382 )
1382 )
1383 renames[new] = old
1383 renames[new] = old
1384 return renames
1384 return renames
1385
1385
1386
1386
1387 def _markchanges(repo, unknown, deleted, renames):
1387 def _markchanges(repo, unknown, deleted, renames):
1388 """Marks the files in unknown as added, the files in deleted as removed,
1388 """Marks the files in unknown as added, the files in deleted as removed,
1389 and the files in renames as copied."""
1389 and the files in renames as copied."""
1390 wctx = repo[None]
1390 wctx = repo[None]
1391 with repo.wlock():
1391 with repo.wlock():
1392 wctx.forget(deleted)
1392 wctx.forget(deleted)
1393 wctx.add(unknown)
1393 wctx.add(unknown)
1394 for new, old in renames.items():
1394 for new, old in renames.items():
1395 wctx.copy(old, new)
1395 wctx.copy(old, new)
1396
1396
1397
1397
1398 def getrenamedfn(repo, endrev=None):
1398 def getrenamedfn(repo, endrev=None):
1399 if copiesmod.usechangesetcentricalgo(repo):
1399 if copiesmod.usechangesetcentricalgo(repo):
1400
1400
1401 def getrenamed(fn, rev):
1401 def getrenamed(fn, rev):
1402 ctx = repo[rev]
1402 ctx = repo[rev]
1403 p1copies = ctx.p1copies()
1403 p1copies = ctx.p1copies()
1404 if fn in p1copies:
1404 if fn in p1copies:
1405 return p1copies[fn]
1405 return p1copies[fn]
1406 p2copies = ctx.p2copies()
1406 p2copies = ctx.p2copies()
1407 if fn in p2copies:
1407 if fn in p2copies:
1408 return p2copies[fn]
1408 return p2copies[fn]
1409 return None
1409 return None
1410
1410
1411 return getrenamed
1411 return getrenamed
1412
1412
1413 rcache = {}
1413 rcache = {}
1414 if endrev is None:
1414 if endrev is None:
1415 endrev = len(repo)
1415 endrev = len(repo)
1416
1416
1417 def getrenamed(fn, rev):
1417 def getrenamed(fn, rev):
1418 """looks up all renames for a file (up to endrev) the first
1418 """looks up all renames for a file (up to endrev) the first
1419 time the file is given. It indexes on the changerev and only
1419 time the file is given. It indexes on the changerev and only
1420 parses the manifest if linkrev != changerev.
1420 parses the manifest if linkrev != changerev.
1421 Returns rename info for fn at changerev rev."""
1421 Returns rename info for fn at changerev rev."""
1422 if fn not in rcache:
1422 if fn not in rcache:
1423 rcache[fn] = {}
1423 rcache[fn] = {}
1424 fl = repo.file(fn)
1424 fl = repo.file(fn)
1425 for i in fl:
1425 for i in fl:
1426 lr = fl.linkrev(i)
1426 lr = fl.linkrev(i)
1427 renamed = fl.renamed(fl.node(i))
1427 renamed = fl.renamed(fl.node(i))
1428 rcache[fn][lr] = renamed and renamed[0]
1428 rcache[fn][lr] = renamed and renamed[0]
1429 if lr >= endrev:
1429 if lr >= endrev:
1430 break
1430 break
1431 if rev in rcache[fn]:
1431 if rev in rcache[fn]:
1432 return rcache[fn][rev]
1432 return rcache[fn][rev]
1433
1433
1434 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1434 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1435 # filectx logic.
1435 # filectx logic.
1436 try:
1436 try:
1437 return repo[rev][fn].copysource()
1437 return repo[rev][fn].copysource()
1438 except error.LookupError:
1438 except error.LookupError:
1439 return None
1439 return None
1440
1440
1441 return getrenamed
1441 return getrenamed
1442
1442
1443
1443
1444 def getcopiesfn(repo, endrev=None):
1444 def getcopiesfn(repo, endrev=None):
1445 if copiesmod.usechangesetcentricalgo(repo):
1445 if copiesmod.usechangesetcentricalgo(repo):
1446
1446
1447 def copiesfn(ctx):
1447 def copiesfn(ctx):
1448 if ctx.p2copies():
1448 if ctx.p2copies():
1449 allcopies = ctx.p1copies().copy()
1449 allcopies = ctx.p1copies().copy()
1450 # There should be no overlap
1450 # There should be no overlap
1451 allcopies.update(ctx.p2copies())
1451 allcopies.update(ctx.p2copies())
1452 return sorted(allcopies.items())
1452 return sorted(allcopies.items())
1453 else:
1453 else:
1454 return sorted(ctx.p1copies().items())
1454 return sorted(ctx.p1copies().items())
1455
1455
1456 else:
1456 else:
1457 getrenamed = getrenamedfn(repo, endrev)
1457 getrenamed = getrenamedfn(repo, endrev)
1458
1458
1459 def copiesfn(ctx):
1459 def copiesfn(ctx):
1460 copies = []
1460 copies = []
1461 for fn in ctx.files():
1461 for fn in ctx.files():
1462 rename = getrenamed(fn, ctx.rev())
1462 rename = getrenamed(fn, ctx.rev())
1463 if rename:
1463 if rename:
1464 copies.append((fn, rename))
1464 copies.append((fn, rename))
1465 return copies
1465 return copies
1466
1466
1467 return copiesfn
1467 return copiesfn
1468
1468
1469
1469
1470 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1470 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1471 """Update the dirstate to reflect the intent of copying src to dst. For
1471 """Update the dirstate to reflect the intent of copying src to dst. For
1472 different reasons it might not end with dst being marked as copied from src.
1472 different reasons it might not end with dst being marked as copied from src.
1473 """
1473 """
1474 origsrc = repo.dirstate.copied(src) or src
1474 origsrc = repo.dirstate.copied(src) or src
1475 if dst == origsrc: # copying back a copy?
1475 if dst == origsrc: # copying back a copy?
1476 entry = repo.dirstate.get_entry(dst)
1476 entry = repo.dirstate.get_entry(dst)
1477 if (entry.added or not entry.tracked) and not dryrun:
1477 if (entry.added or not entry.tracked) and not dryrun:
1478 repo.dirstate.set_tracked(dst)
1478 repo.dirstate.set_tracked(dst)
1479 else:
1479 else:
1480 if repo.dirstate.get_entry(origsrc).added and origsrc == src:
1480 if repo.dirstate.get_entry(origsrc).added and origsrc == src:
1481 if not ui.quiet:
1481 if not ui.quiet:
1482 ui.warn(
1482 ui.warn(
1483 _(
1483 _(
1484 b"%s has not been committed yet, so no copy "
1484 b"%s has not been committed yet, so no copy "
1485 b"data will be stored for %s.\n"
1485 b"data will be stored for %s.\n"
1486 )
1486 )
1487 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1487 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1488 )
1488 )
1489 if not repo.dirstate.get_entry(dst).tracked and not dryrun:
1489 if not repo.dirstate.get_entry(dst).tracked and not dryrun:
1490 wctx.add([dst])
1490 wctx.add([dst])
1491 elif not dryrun:
1491 elif not dryrun:
1492 wctx.copy(origsrc, dst)
1492 wctx.copy(origsrc, dst)
1493
1493
1494
1494
1495 def movedirstate(repo, newctx, match=None):
1495 def movedirstate(repo, newctx, match=None):
1496 """Move the dirstate to newctx and adjust it as necessary.
1496 """Move the dirstate to newctx and adjust it as necessary.
1497
1497
1498 A matcher can be provided as an optimization. It is probably a bug to pass
1498 A matcher can be provided as an optimization. It is probably a bug to pass
1499 a matcher that doesn't match all the differences between the parent of the
1499 a matcher that doesn't match all the differences between the parent of the
1500 working copy and newctx.
1500 working copy and newctx.
1501 """
1501 """
1502 oldctx = repo[b'.']
1502 oldctx = repo[b'.']
1503 ds = repo.dirstate
1503 ds = repo.dirstate
1504 copies = dict(ds.copies())
1504 copies = dict(ds.copies())
1505 ds.setparents(newctx.node(), repo.nullid)
1505 ds.setparents(newctx.node(), repo.nullid)
1506 s = newctx.status(oldctx, match=match)
1506 s = newctx.status(oldctx, match=match)
1507
1507
1508 for f in s.modified:
1508 for f in s.modified:
1509 ds.update_file_p1(f, p1_tracked=True)
1509 ds.update_file_p1(f, p1_tracked=True)
1510
1510
1511 for f in s.added:
1511 for f in s.added:
1512 ds.update_file_p1(f, p1_tracked=False)
1512 ds.update_file_p1(f, p1_tracked=False)
1513
1513
1514 for f in s.removed:
1514 for f in s.removed:
1515 ds.update_file_p1(f, p1_tracked=True)
1515 ds.update_file_p1(f, p1_tracked=True)
1516
1516
1517 # Merge old parent and old working dir copies
1517 # Merge old parent and old working dir copies
1518 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1518 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1519 oldcopies.update(copies)
1519 oldcopies.update(copies)
1520 copies = {dst: oldcopies.get(src, src) for dst, src in oldcopies.items()}
1520 copies = {dst: oldcopies.get(src, src) for dst, src in oldcopies.items()}
1521 # Adjust the dirstate copies
1521 # Adjust the dirstate copies
1522 for dst, src in copies.items():
1522 for dst, src in copies.items():
1523 if src not in newctx or dst in newctx or not ds.get_entry(dst).added:
1523 if src not in newctx or dst in newctx or not ds.get_entry(dst).added:
1524 src = None
1524 src = None
1525 ds.copy(src, dst)
1525 ds.copy(src, dst)
1526 repo._quick_access_changeid_invalidate()
1526 repo._quick_access_changeid_invalidate()
1527
1527
1528
1528
1529 def filterrequirements(requirements):
1529 def filterrequirements(requirements):
1530 """filters the requirements into two sets:
1530 """filters the requirements into two sets:
1531
1531
1532 wcreq: requirements which should be written in .hg/requires
1532 wcreq: requirements which should be written in .hg/requires
1533 storereq: which should be written in .hg/store/requires
1533 storereq: which should be written in .hg/store/requires
1534
1534
1535 Returns (wcreq, storereq)
1535 Returns (wcreq, storereq)
1536 """
1536 """
1537 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1537 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1538 wc, store = set(), set()
1538 wc, store = set(), set()
1539 for r in requirements:
1539 for r in requirements:
1540 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1540 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1541 wc.add(r)
1541 wc.add(r)
1542 else:
1542 else:
1543 store.add(r)
1543 store.add(r)
1544 return wc, store
1544 return wc, store
1545 return requirements, None
1545 return requirements, None
1546
1546
1547
1547
1548 def istreemanifest(repo):
1548 def istreemanifest(repo):
1549 """returns whether the repository is using treemanifest or not"""
1549 """returns whether the repository is using treemanifest or not"""
1550 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1550 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1551
1551
1552
1552
1553 def writereporequirements(repo, requirements=None):
1553 def writereporequirements(repo, requirements=None):
1554 """writes requirements for the repo
1554 """writes requirements for the repo
1555
1555
1556 Requirements are written to .hg/requires and .hg/store/requires based
1556 Requirements are written to .hg/requires and .hg/store/requires based
1557 on whether share-safe mode is enabled and which requirements are wdir
1557 on whether share-safe mode is enabled and which requirements are wdir
1558 requirements and which are store requirements
1558 requirements and which are store requirements
1559 """
1559 """
1560 if requirements:
1560 if requirements:
1561 repo.requirements = requirements
1561 repo.requirements = requirements
1562 wcreq, storereq = filterrequirements(repo.requirements)
1562 wcreq, storereq = filterrequirements(repo.requirements)
1563 if wcreq is not None:
1563 if wcreq is not None:
1564 writerequires(repo.vfs, wcreq)
1564 writerequires(repo.vfs, wcreq)
1565 if storereq is not None:
1565 if storereq is not None:
1566 writerequires(repo.svfs, storereq)
1566 writerequires(repo.svfs, storereq)
1567 elif repo.ui.configbool(b'format', b'usestore'):
1567 elif repo.ui.configbool(b'format', b'usestore'):
1568 # only remove store requires if we are using store
1568 # only remove store requires if we are using store
1569 repo.svfs.tryunlink(b'requires')
1569 repo.svfs.tryunlink(b'requires')
1570
1570
1571
1571
1572 def writerequires(opener, requirements):
1572 def writerequires(opener, requirements):
1573 with opener(b'requires', b'w', atomictemp=True) as fp:
1573 with opener(b'requires', b'w', atomictemp=True) as fp:
1574 for r in sorted(requirements):
1574 for r in sorted(requirements):
1575 fp.write(b"%s\n" % r)
1575 fp.write(b"%s\n" % r)
1576
1576
1577
1577
1578 class filecachesubentry:
1578 class filecachesubentry:
1579 def __init__(self, path, stat):
1579 def __init__(self, path, stat):
1580 self.path = path
1580 self.path = path
1581 self.cachestat = None
1581 self.cachestat = None
1582 self._cacheable = None
1582 self._cacheable = None
1583
1583
1584 if stat:
1584 if stat:
1585 self.cachestat = filecachesubentry.stat(self.path)
1585 self.cachestat = filecachesubentry.stat(self.path)
1586
1586
1587 if self.cachestat:
1587 if self.cachestat:
1588 self._cacheable = self.cachestat.cacheable()
1588 self._cacheable = self.cachestat.cacheable()
1589 else:
1589 else:
1590 # None means we don't know yet
1590 # None means we don't know yet
1591 self._cacheable = None
1591 self._cacheable = None
1592
1592
1593 def refresh(self):
1593 def refresh(self):
1594 if self.cacheable():
1594 if self.cacheable():
1595 self.cachestat = filecachesubentry.stat(self.path)
1595 self.cachestat = filecachesubentry.stat(self.path)
1596
1596
1597 def cacheable(self):
1597 def cacheable(self):
1598 if self._cacheable is not None:
1598 if self._cacheable is not None:
1599 return self._cacheable
1599 return self._cacheable
1600
1600
1601 # we don't know yet, assume it is for now
1601 # we don't know yet, assume it is for now
1602 return True
1602 return True
1603
1603
1604 def changed(self):
1604 def changed(self):
1605 # no point in going further if we can't cache it
1605 # no point in going further if we can't cache it
1606 if not self.cacheable():
1606 if not self.cacheable():
1607 return True
1607 return True
1608
1608
1609 newstat = filecachesubentry.stat(self.path)
1609 newstat = filecachesubentry.stat(self.path)
1610
1610
1611 # we may not know if it's cacheable yet, check again now
1611 # we may not know if it's cacheable yet, check again now
1612 if newstat and self._cacheable is None:
1612 if newstat and self._cacheable is None:
1613 self._cacheable = newstat.cacheable()
1613 self._cacheable = newstat.cacheable()
1614
1614
1615 # check again
1615 # check again
1616 if not self._cacheable:
1616 if not self._cacheable:
1617 return True
1617 return True
1618
1618
1619 if self.cachestat != newstat:
1619 if self.cachestat != newstat:
1620 self.cachestat = newstat
1620 self.cachestat = newstat
1621 return True
1621 return True
1622 else:
1622 else:
1623 return False
1623 return False
1624
1624
1625 @staticmethod
1625 @staticmethod
1626 def stat(path):
1626 def stat(path):
1627 try:
1627 try:
1628 return util.cachestat(path)
1628 return util.cachestat(path)
1629 except FileNotFoundError:
1629 except FileNotFoundError:
1630 pass
1630 pass
1631
1631
1632
1632
1633 class filecacheentry:
1633 class filecacheentry:
1634 def __init__(self, paths, stat=True):
1634 def __init__(self, paths, stat=True):
1635 self._entries = []
1635 self._entries = []
1636 for path in paths:
1636 for path in paths:
1637 self._entries.append(filecachesubentry(path, stat))
1637 self._entries.append(filecachesubentry(path, stat))
1638
1638
1639 def changed(self):
1639 def changed(self):
1640 '''true if any entry has changed'''
1640 '''true if any entry has changed'''
1641 for entry in self._entries:
1641 for entry in self._entries:
1642 if entry.changed():
1642 if entry.changed():
1643 return True
1643 return True
1644 return False
1644 return False
1645
1645
1646 def refresh(self):
1646 def refresh(self):
1647 for entry in self._entries:
1647 for entry in self._entries:
1648 entry.refresh()
1648 entry.refresh()
1649
1649
1650
1650
1651 class filecache:
1651 class filecache:
1652 """A property like decorator that tracks files under .hg/ for updates.
1652 """A property like decorator that tracks files under .hg/ for updates.
1653
1653
1654 On first access, the files defined as arguments are stat()ed and the
1654 On first access, the files defined as arguments are stat()ed and the
1655 results cached. The decorated function is called. The results are stashed
1655 results cached. The decorated function is called. The results are stashed
1656 away in a ``_filecache`` dict on the object whose method is decorated.
1656 away in a ``_filecache`` dict on the object whose method is decorated.
1657
1657
1658 On subsequent access, the cached result is used as it is set to the
1658 On subsequent access, the cached result is used as it is set to the
1659 instance dictionary.
1659 instance dictionary.
1660
1660
1661 On external property set/delete operations, the caller must update the
1661 On external property set/delete operations, the caller must update the
1662 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1662 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1663 instead of directly setting <attr>.
1663 instead of directly setting <attr>.
1664
1664
1665 When using the property API, the cached data is always used if available.
1665 When using the property API, the cached data is always used if available.
1666 No stat() is performed to check if the file has changed.
1666 No stat() is performed to check if the file has changed.
1667
1667
1668 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1668 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1669 can populate an entry before the property's getter is called. In this case,
1669 can populate an entry before the property's getter is called. In this case,
1670 entries in ``_filecache`` will be used during property operations,
1670 entries in ``_filecache`` will be used during property operations,
1671 if available. If the underlying file changes, it is up to external callers
1671 if available. If the underlying file changes, it is up to external callers
1672 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1672 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1673 method result as well as possibly calling ``del obj._filecache[attr]`` to
1673 method result as well as possibly calling ``del obj._filecache[attr]`` to
1674 remove the ``filecacheentry``.
1674 remove the ``filecacheentry``.
1675 """
1675 """
1676
1676
1677 def __init__(self, *paths):
1677 def __init__(self, *paths):
1678 self.paths = paths
1678 self.paths = paths
1679
1679
1680 def tracked_paths(self, obj):
1680 def tracked_paths(self, obj):
1681 return [self.join(obj, path) for path in self.paths]
1681 return [self.join(obj, path) for path in self.paths]
1682
1682
1683 def join(self, obj, fname):
1683 def join(self, obj, fname):
1684 """Used to compute the runtime path of a cached file.
1684 """Used to compute the runtime path of a cached file.
1685
1685
1686 Users should subclass filecache and provide their own version of this
1686 Users should subclass filecache and provide their own version of this
1687 function to call the appropriate join function on 'obj' (an instance
1687 function to call the appropriate join function on 'obj' (an instance
1688 of the class that its member function was decorated).
1688 of the class that its member function was decorated).
1689 """
1689 """
1690 raise NotImplementedError
1690 raise NotImplementedError
1691
1691
1692 def __call__(self, func):
1692 def __call__(self, func):
1693 self.func = func
1693 self.func = func
1694 self.sname = func.__name__
1694 self.sname = func.__name__
1695 # XXX We should be using a unicode string instead of bytes for the main
1696 # name (and the _filecache key). The fact we use bytes is a remains
1697 # from Python2, since the name is derived from an attribute name a
1698 # `str` is a better fit now that we support Python3 only
1695 self.name = pycompat.sysbytes(self.sname)
1699 self.name = pycompat.sysbytes(self.sname)
1696 return self
1700 return self
1697
1701
1698 def __get__(self, obj, type=None):
1702 def __get__(self, obj, type=None):
1699 # if accessed on the class, return the descriptor itself.
1703 # if accessed on the class, return the descriptor itself.
1700 if obj is None:
1704 if obj is None:
1701 return self
1705 return self
1702
1706
1703 assert self.sname not in obj.__dict__
1707 assert self.sname not in obj.__dict__
1704
1708
1705 entry = obj._filecache.get(self.name)
1709 entry = obj._filecache.get(self.name)
1706
1710
1707 if entry:
1711 if entry:
1708 if entry.changed():
1712 if entry.changed():
1709 entry.obj = self.func(obj)
1713 entry.obj = self.func(obj)
1710 else:
1714 else:
1711 paths = self.tracked_paths(obj)
1715 paths = self.tracked_paths(obj)
1712
1716
1713 # We stat -before- creating the object so our cache doesn't lie if
1717 # We stat -before- creating the object so our cache doesn't lie if
1714 # a writer modified between the time we read and stat
1718 # a writer modified between the time we read and stat
1715 entry = filecacheentry(paths, True)
1719 entry = filecacheentry(paths, True)
1716 entry.obj = self.func(obj)
1720 entry.obj = self.func(obj)
1717
1721
1718 obj._filecache[self.name] = entry
1722 obj._filecache[self.name] = entry
1719
1723
1720 obj.__dict__[self.sname] = entry.obj
1724 obj.__dict__[self.sname] = entry.obj
1721 return entry.obj
1725 return entry.obj
1722
1726
1723 # don't implement __set__(), which would make __dict__ lookup as slow as
1727 # don't implement __set__(), which would make __dict__ lookup as slow as
1724 # function call.
1728 # function call.
1725
1729
1726 def set(self, obj, value):
1730 def set(self, obj, value):
1727 if self.name not in obj._filecache:
1731 if self.name not in obj._filecache:
1728 # we add an entry for the missing value because X in __dict__
1732 # we add an entry for the missing value because X in __dict__
1729 # implies X in _filecache
1733 # implies X in _filecache
1730 paths = self.tracked_paths(obj)
1734 paths = self.tracked_paths(obj)
1731 ce = filecacheentry(paths, False)
1735 ce = filecacheentry(paths, False)
1732 obj._filecache[self.name] = ce
1736 obj._filecache[self.name] = ce
1733 else:
1737 else:
1734 ce = obj._filecache[self.name]
1738 ce = obj._filecache[self.name]
1735
1739
1736 ce.obj = value # update cached copy
1740 ce.obj = value # update cached copy
1737 obj.__dict__[self.sname] = value # update copy returned by obj.x
1741 obj.__dict__[self.sname] = value # update copy returned by obj.x
1738
1742
1739
1743
1740 def extdatasource(repo, source):
1744 def extdatasource(repo, source):
1741 """Gather a map of rev -> value dict from the specified source
1745 """Gather a map of rev -> value dict from the specified source
1742
1746
1743 A source spec is treated as a URL, with a special case shell: type
1747 A source spec is treated as a URL, with a special case shell: type
1744 for parsing the output from a shell command.
1748 for parsing the output from a shell command.
1745
1749
1746 The data is parsed as a series of newline-separated records where
1750 The data is parsed as a series of newline-separated records where
1747 each record is a revision specifier optionally followed by a space
1751 each record is a revision specifier optionally followed by a space
1748 and a freeform string value. If the revision is known locally, it
1752 and a freeform string value. If the revision is known locally, it
1749 is converted to a rev, otherwise the record is skipped.
1753 is converted to a rev, otherwise the record is skipped.
1750
1754
1751 Note that both key and value are treated as UTF-8 and converted to
1755 Note that both key and value are treated as UTF-8 and converted to
1752 the local encoding. This allows uniformity between local and
1756 the local encoding. This allows uniformity between local and
1753 remote data sources.
1757 remote data sources.
1754 """
1758 """
1755
1759
1756 spec = repo.ui.config(b"extdata", source)
1760 spec = repo.ui.config(b"extdata", source)
1757 if not spec:
1761 if not spec:
1758 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1762 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1759
1763
1760 data = {}
1764 data = {}
1761 src = proc = None
1765 src = proc = None
1762 try:
1766 try:
1763 if spec.startswith(b"shell:"):
1767 if spec.startswith(b"shell:"):
1764 # external commands should be run relative to the repo root
1768 # external commands should be run relative to the repo root
1765 cmd = spec[6:]
1769 cmd = spec[6:]
1766 proc = subprocess.Popen(
1770 proc = subprocess.Popen(
1767 procutil.tonativestr(cmd),
1771 procutil.tonativestr(cmd),
1768 shell=True,
1772 shell=True,
1769 bufsize=-1,
1773 bufsize=-1,
1770 close_fds=procutil.closefds,
1774 close_fds=procutil.closefds,
1771 stdout=subprocess.PIPE,
1775 stdout=subprocess.PIPE,
1772 cwd=procutil.tonativestr(repo.root),
1776 cwd=procutil.tonativestr(repo.root),
1773 )
1777 )
1774 src = proc.stdout
1778 src = proc.stdout
1775 else:
1779 else:
1776 # treat as a URL or file
1780 # treat as a URL or file
1777 src = url.open(repo.ui, spec)
1781 src = url.open(repo.ui, spec)
1778 for l in src:
1782 for l in src:
1779 if b" " in l:
1783 if b" " in l:
1780 k, v = l.strip().split(b" ", 1)
1784 k, v = l.strip().split(b" ", 1)
1781 else:
1785 else:
1782 k, v = l.strip(), b""
1786 k, v = l.strip(), b""
1783
1787
1784 k = encoding.tolocal(k)
1788 k = encoding.tolocal(k)
1785 try:
1789 try:
1786 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1790 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1787 except (error.LookupError, error.RepoLookupError, error.InputError):
1791 except (error.LookupError, error.RepoLookupError, error.InputError):
1788 pass # we ignore data for nodes that don't exist locally
1792 pass # we ignore data for nodes that don't exist locally
1789 finally:
1793 finally:
1790 if proc:
1794 if proc:
1791 try:
1795 try:
1792 proc.communicate()
1796 proc.communicate()
1793 except ValueError:
1797 except ValueError:
1794 # This happens if we started iterating src and then
1798 # This happens if we started iterating src and then
1795 # get a parse error on a line. It should be safe to ignore.
1799 # get a parse error on a line. It should be safe to ignore.
1796 pass
1800 pass
1797 if src:
1801 if src:
1798 src.close()
1802 src.close()
1799 if proc and proc.returncode != 0:
1803 if proc and proc.returncode != 0:
1800 raise error.Abort(
1804 raise error.Abort(
1801 _(b"extdata command '%s' failed: %s")
1805 _(b"extdata command '%s' failed: %s")
1802 % (cmd, procutil.explainexit(proc.returncode))
1806 % (cmd, procutil.explainexit(proc.returncode))
1803 )
1807 )
1804
1808
1805 return data
1809 return data
1806
1810
1807
1811
1808 class progress:
1812 class progress:
1809 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1813 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1810 self.ui = ui
1814 self.ui = ui
1811 self.pos = 0
1815 self.pos = 0
1812 self.topic = topic
1816 self.topic = topic
1813 self.unit = unit
1817 self.unit = unit
1814 self.total = total
1818 self.total = total
1815 self.debug = ui.configbool(b'progress', b'debug')
1819 self.debug = ui.configbool(b'progress', b'debug')
1816 self._updatebar = updatebar
1820 self._updatebar = updatebar
1817
1821
1818 def __enter__(self):
1822 def __enter__(self):
1819 return self
1823 return self
1820
1824
1821 def __exit__(self, exc_type, exc_value, exc_tb):
1825 def __exit__(self, exc_type, exc_value, exc_tb):
1822 self.complete()
1826 self.complete()
1823
1827
1824 def update(self, pos, item=b"", total=None):
1828 def update(self, pos, item=b"", total=None):
1825 assert pos is not None
1829 assert pos is not None
1826 if total:
1830 if total:
1827 self.total = total
1831 self.total = total
1828 self.pos = pos
1832 self.pos = pos
1829 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1833 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1830 if self.debug:
1834 if self.debug:
1831 self._printdebug(item)
1835 self._printdebug(item)
1832
1836
1833 def increment(self, step=1, item=b"", total=None):
1837 def increment(self, step=1, item=b"", total=None):
1834 self.update(self.pos + step, item, total)
1838 self.update(self.pos + step, item, total)
1835
1839
1836 def complete(self):
1840 def complete(self):
1837 self.pos = None
1841 self.pos = None
1838 self.unit = b""
1842 self.unit = b""
1839 self.total = None
1843 self.total = None
1840 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1844 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1841
1845
1842 def _printdebug(self, item):
1846 def _printdebug(self, item):
1843 unit = b''
1847 unit = b''
1844 if self.unit:
1848 if self.unit:
1845 unit = b' ' + self.unit
1849 unit = b' ' + self.unit
1846 if item:
1850 if item:
1847 item = b' ' + item
1851 item = b' ' + item
1848
1852
1849 if self.total:
1853 if self.total:
1850 pct = 100.0 * self.pos / self.total
1854 pct = 100.0 * self.pos / self.total
1851 self.ui.debug(
1855 self.ui.debug(
1852 b'%s:%s %d/%d%s (%4.2f%%)\n'
1856 b'%s:%s %d/%d%s (%4.2f%%)\n'
1853 % (self.topic, item, self.pos, self.total, unit, pct)
1857 % (self.topic, item, self.pos, self.total, unit, pct)
1854 )
1858 )
1855 else:
1859 else:
1856 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1860 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1857
1861
1858
1862
1859 def gdinitconfig(ui):
1863 def gdinitconfig(ui):
1860 """helper function to know if a repo should be created as general delta"""
1864 """helper function to know if a repo should be created as general delta"""
1861 # experimental config: format.generaldelta
1865 # experimental config: format.generaldelta
1862 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1866 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1863 b'format', b'usegeneraldelta'
1867 b'format', b'usegeneraldelta'
1864 )
1868 )
1865
1869
1866
1870
1867 def gddeltaconfig(ui):
1871 def gddeltaconfig(ui):
1868 """helper function to know if incoming deltas should be optimized
1872 """helper function to know if incoming deltas should be optimized
1869
1873
1870 The `format.generaldelta` config is an old form of the config that also
1874 The `format.generaldelta` config is an old form of the config that also
1871 implies that incoming delta-bases should be never be trusted. This function
1875 implies that incoming delta-bases should be never be trusted. This function
1872 exists for this purpose.
1876 exists for this purpose.
1873 """
1877 """
1874 # experimental config: format.generaldelta
1878 # experimental config: format.generaldelta
1875 return ui.configbool(b'format', b'generaldelta')
1879 return ui.configbool(b'format', b'generaldelta')
1876
1880
1877
1881
1878 class simplekeyvaluefile:
1882 class simplekeyvaluefile:
1879 """A simple file with key=value lines
1883 """A simple file with key=value lines
1880
1884
1881 Keys must be alphanumerics and start with a letter, values must not
1885 Keys must be alphanumerics and start with a letter, values must not
1882 contain '\n' characters"""
1886 contain '\n' characters"""
1883
1887
1884 firstlinekey = b'__firstline'
1888 firstlinekey = b'__firstline'
1885
1889
1886 def __init__(self, vfs, path, keys=None):
1890 def __init__(self, vfs, path, keys=None):
1887 self.vfs = vfs
1891 self.vfs = vfs
1888 self.path = path
1892 self.path = path
1889
1893
1890 def read(self, firstlinenonkeyval=False):
1894 def read(self, firstlinenonkeyval=False):
1891 """Read the contents of a simple key-value file
1895 """Read the contents of a simple key-value file
1892
1896
1893 'firstlinenonkeyval' indicates whether the first line of file should
1897 'firstlinenonkeyval' indicates whether the first line of file should
1894 be treated as a key-value pair or reuturned fully under the
1898 be treated as a key-value pair or reuturned fully under the
1895 __firstline key."""
1899 __firstline key."""
1896 lines = self.vfs.readlines(self.path)
1900 lines = self.vfs.readlines(self.path)
1897 d = {}
1901 d = {}
1898 if firstlinenonkeyval:
1902 if firstlinenonkeyval:
1899 if not lines:
1903 if not lines:
1900 e = _(b"empty simplekeyvalue file")
1904 e = _(b"empty simplekeyvalue file")
1901 raise error.CorruptedState(e)
1905 raise error.CorruptedState(e)
1902 # we don't want to include '\n' in the __firstline
1906 # we don't want to include '\n' in the __firstline
1903 d[self.firstlinekey] = lines[0][:-1]
1907 d[self.firstlinekey] = lines[0][:-1]
1904 del lines[0]
1908 del lines[0]
1905
1909
1906 try:
1910 try:
1907 # the 'if line.strip()' part prevents us from failing on empty
1911 # the 'if line.strip()' part prevents us from failing on empty
1908 # lines which only contain '\n' therefore are not skipped
1912 # lines which only contain '\n' therefore are not skipped
1909 # by 'if line'
1913 # by 'if line'
1910 updatedict = dict(
1914 updatedict = dict(
1911 line[:-1].split(b'=', 1) for line in lines if line.strip()
1915 line[:-1].split(b'=', 1) for line in lines if line.strip()
1912 )
1916 )
1913 if self.firstlinekey in updatedict:
1917 if self.firstlinekey in updatedict:
1914 e = _(b"%r can't be used as a key")
1918 e = _(b"%r can't be used as a key")
1915 raise error.CorruptedState(e % self.firstlinekey)
1919 raise error.CorruptedState(e % self.firstlinekey)
1916 d.update(updatedict)
1920 d.update(updatedict)
1917 except ValueError as e:
1921 except ValueError as e:
1918 raise error.CorruptedState(stringutil.forcebytestr(e))
1922 raise error.CorruptedState(stringutil.forcebytestr(e))
1919 return d
1923 return d
1920
1924
1921 def write(self, data, firstline=None):
1925 def write(self, data, firstline=None):
1922 """Write key=>value mapping to a file
1926 """Write key=>value mapping to a file
1923 data is a dict. Keys must be alphanumerical and start with a letter.
1927 data is a dict. Keys must be alphanumerical and start with a letter.
1924 Values must not contain newline characters.
1928 Values must not contain newline characters.
1925
1929
1926 If 'firstline' is not None, it is written to file before
1930 If 'firstline' is not None, it is written to file before
1927 everything else, as it is, not in a key=value form"""
1931 everything else, as it is, not in a key=value form"""
1928 lines = []
1932 lines = []
1929 if firstline is not None:
1933 if firstline is not None:
1930 lines.append(b'%s\n' % firstline)
1934 lines.append(b'%s\n' % firstline)
1931
1935
1932 for k, v in data.items():
1936 for k, v in data.items():
1933 if k == self.firstlinekey:
1937 if k == self.firstlinekey:
1934 e = b"key name '%s' is reserved" % self.firstlinekey
1938 e = b"key name '%s' is reserved" % self.firstlinekey
1935 raise error.ProgrammingError(e)
1939 raise error.ProgrammingError(e)
1936 if not k[0:1].isalpha():
1940 if not k[0:1].isalpha():
1937 e = b"keys must start with a letter in a key-value file"
1941 e = b"keys must start with a letter in a key-value file"
1938 raise error.ProgrammingError(e)
1942 raise error.ProgrammingError(e)
1939 if not k.isalnum():
1943 if not k.isalnum():
1940 e = b"invalid key name in a simple key-value file"
1944 e = b"invalid key name in a simple key-value file"
1941 raise error.ProgrammingError(e)
1945 raise error.ProgrammingError(e)
1942 if b'\n' in v:
1946 if b'\n' in v:
1943 e = b"invalid value in a simple key-value file"
1947 e = b"invalid value in a simple key-value file"
1944 raise error.ProgrammingError(e)
1948 raise error.ProgrammingError(e)
1945 lines.append(b"%s=%s\n" % (k, v))
1949 lines.append(b"%s=%s\n" % (k, v))
1946 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1950 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1947 fp.write(b''.join(lines))
1951 fp.write(b''.join(lines))
1948
1952
1949
1953
1950 _reportobsoletedsource = [
1954 _reportobsoletedsource = [
1951 b'debugobsolete',
1955 b'debugobsolete',
1952 b'pull',
1956 b'pull',
1953 b'push',
1957 b'push',
1954 b'serve',
1958 b'serve',
1955 b'unbundle',
1959 b'unbundle',
1956 ]
1960 ]
1957
1961
1958 _reportnewcssource = [
1962 _reportnewcssource = [
1959 b'pull',
1963 b'pull',
1960 b'unbundle',
1964 b'unbundle',
1961 ]
1965 ]
1962
1966
1963
1967
1964 def prefetchfiles(repo, revmatches):
1968 def prefetchfiles(repo, revmatches):
1965 """Invokes the registered file prefetch functions, allowing extensions to
1969 """Invokes the registered file prefetch functions, allowing extensions to
1966 ensure the corresponding files are available locally, before the command
1970 ensure the corresponding files are available locally, before the command
1967 uses them.
1971 uses them.
1968
1972
1969 Args:
1973 Args:
1970 revmatches: a list of (revision, match) tuples to indicate the files to
1974 revmatches: a list of (revision, match) tuples to indicate the files to
1971 fetch at each revision. If any of the match elements is None, it matches
1975 fetch at each revision. If any of the match elements is None, it matches
1972 all files.
1976 all files.
1973 """
1977 """
1974
1978
1975 def _matcher(m):
1979 def _matcher(m):
1976 if m:
1980 if m:
1977 assert isinstance(m, matchmod.basematcher)
1981 assert isinstance(m, matchmod.basematcher)
1978 # The command itself will complain about files that don't exist, so
1982 # The command itself will complain about files that don't exist, so
1979 # don't duplicate the message.
1983 # don't duplicate the message.
1980 return matchmod.badmatch(m, lambda fn, msg: None)
1984 return matchmod.badmatch(m, lambda fn, msg: None)
1981 else:
1985 else:
1982 return matchall(repo)
1986 return matchall(repo)
1983
1987
1984 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1988 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1985
1989
1986 fileprefetchhooks(repo, revbadmatches)
1990 fileprefetchhooks(repo, revbadmatches)
1987
1991
1988
1992
1989 # a list of (repo, revs, match) prefetch functions
1993 # a list of (repo, revs, match) prefetch functions
1990 fileprefetchhooks = util.hooks()
1994 fileprefetchhooks = util.hooks()
1991
1995
1992 # A marker that tells the evolve extension to suppress its own reporting
1996 # A marker that tells the evolve extension to suppress its own reporting
1993 _reportstroubledchangesets = True
1997 _reportstroubledchangesets = True
1994
1998
1995
1999
1996 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
2000 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1997 """register a callback to issue a summary after the transaction is closed
2001 """register a callback to issue a summary after the transaction is closed
1998
2002
1999 If as_validator is true, then the callbacks are registered as transaction
2003 If as_validator is true, then the callbacks are registered as transaction
2000 validators instead
2004 validators instead
2001 """
2005 """
2002
2006
2003 def txmatch(sources):
2007 def txmatch(sources):
2004 return any(txnname.startswith(source) for source in sources)
2008 return any(txnname.startswith(source) for source in sources)
2005
2009
2006 categories = []
2010 categories = []
2007
2011
2008 def reportsummary(func):
2012 def reportsummary(func):
2009 """decorator for report callbacks."""
2013 """decorator for report callbacks."""
2010 # The repoview life cycle is shorter than the one of the actual
2014 # The repoview life cycle is shorter than the one of the actual
2011 # underlying repository. So the filtered object can die before the
2015 # underlying repository. So the filtered object can die before the
2012 # weakref is used leading to troubles. We keep a reference to the
2016 # weakref is used leading to troubles. We keep a reference to the
2013 # unfiltered object and restore the filtering when retrieving the
2017 # unfiltered object and restore the filtering when retrieving the
2014 # repository through the weakref.
2018 # repository through the weakref.
2015 filtername = repo.filtername
2019 filtername = repo.filtername
2016 reporef = weakref.ref(repo.unfiltered())
2020 reporef = weakref.ref(repo.unfiltered())
2017
2021
2018 def wrapped(tr):
2022 def wrapped(tr):
2019 repo = reporef()
2023 repo = reporef()
2020 if filtername:
2024 if filtername:
2021 assert repo is not None # help pytype
2025 assert repo is not None # help pytype
2022 repo = repo.filtered(filtername)
2026 repo = repo.filtered(filtername)
2023 func(repo, tr)
2027 func(repo, tr)
2024
2028
2025 newcat = b'%02i-txnreport' % len(categories)
2029 newcat = b'%02i-txnreport' % len(categories)
2026 if as_validator:
2030 if as_validator:
2027 otr.addvalidator(newcat, wrapped)
2031 otr.addvalidator(newcat, wrapped)
2028 else:
2032 else:
2029 otr.addpostclose(newcat, wrapped)
2033 otr.addpostclose(newcat, wrapped)
2030 categories.append(newcat)
2034 categories.append(newcat)
2031 return wrapped
2035 return wrapped
2032
2036
2033 @reportsummary
2037 @reportsummary
2034 def reportchangegroup(repo, tr):
2038 def reportchangegroup(repo, tr):
2035 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2039 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2036 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2040 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2037 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2041 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2038 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2042 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2039 if cgchangesets or cgrevisions or cgfiles:
2043 if cgchangesets or cgrevisions or cgfiles:
2040 htext = b""
2044 htext = b""
2041 if cgheads:
2045 if cgheads:
2042 htext = _(b" (%+d heads)") % cgheads
2046 htext = _(b" (%+d heads)") % cgheads
2043 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2047 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2044 if as_validator:
2048 if as_validator:
2045 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2049 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2046 assert repo is not None # help pytype
2050 assert repo is not None # help pytype
2047 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2051 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2048
2052
2049 if txmatch(_reportobsoletedsource):
2053 if txmatch(_reportobsoletedsource):
2050
2054
2051 @reportsummary
2055 @reportsummary
2052 def reportobsoleted(repo, tr):
2056 def reportobsoleted(repo, tr):
2053 obsoleted = obsutil.getobsoleted(repo, tr)
2057 obsoleted = obsutil.getobsoleted(repo, tr)
2054 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2058 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2055 if newmarkers:
2059 if newmarkers:
2056 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2060 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2057 if obsoleted:
2061 if obsoleted:
2058 msg = _(b'obsoleted %i changesets\n')
2062 msg = _(b'obsoleted %i changesets\n')
2059 if as_validator:
2063 if as_validator:
2060 msg = _(b'obsoleting %i changesets\n')
2064 msg = _(b'obsoleting %i changesets\n')
2061 repo.ui.status(msg % len(obsoleted))
2065 repo.ui.status(msg % len(obsoleted))
2062
2066
2063 if obsolete.isenabled(
2067 if obsolete.isenabled(
2064 repo, obsolete.createmarkersopt
2068 repo, obsolete.createmarkersopt
2065 ) and repo.ui.configbool(
2069 ) and repo.ui.configbool(
2066 b'experimental', b'evolution.report-instabilities'
2070 b'experimental', b'evolution.report-instabilities'
2067 ):
2071 ):
2068 instabilitytypes = [
2072 instabilitytypes = [
2069 (b'orphan', b'orphan'),
2073 (b'orphan', b'orphan'),
2070 (b'phase-divergent', b'phasedivergent'),
2074 (b'phase-divergent', b'phasedivergent'),
2071 (b'content-divergent', b'contentdivergent'),
2075 (b'content-divergent', b'contentdivergent'),
2072 ]
2076 ]
2073
2077
2074 def getinstabilitycounts(repo):
2078 def getinstabilitycounts(repo):
2075 filtered = repo.changelog.filteredrevs
2079 filtered = repo.changelog.filteredrevs
2076 counts = {}
2080 counts = {}
2077 for instability, revset in instabilitytypes:
2081 for instability, revset in instabilitytypes:
2078 counts[instability] = len(
2082 counts[instability] = len(
2079 set(obsolete.getrevs(repo, revset)) - filtered
2083 set(obsolete.getrevs(repo, revset)) - filtered
2080 )
2084 )
2081 return counts
2085 return counts
2082
2086
2083 oldinstabilitycounts = getinstabilitycounts(repo)
2087 oldinstabilitycounts = getinstabilitycounts(repo)
2084
2088
2085 @reportsummary
2089 @reportsummary
2086 def reportnewinstabilities(repo, tr):
2090 def reportnewinstabilities(repo, tr):
2087 newinstabilitycounts = getinstabilitycounts(repo)
2091 newinstabilitycounts = getinstabilitycounts(repo)
2088 for instability, revset in instabilitytypes:
2092 for instability, revset in instabilitytypes:
2089 delta = (
2093 delta = (
2090 newinstabilitycounts[instability]
2094 newinstabilitycounts[instability]
2091 - oldinstabilitycounts[instability]
2095 - oldinstabilitycounts[instability]
2092 )
2096 )
2093 msg = getinstabilitymessage(delta, instability)
2097 msg = getinstabilitymessage(delta, instability)
2094 if msg:
2098 if msg:
2095 repo.ui.warn(msg)
2099 repo.ui.warn(msg)
2096
2100
2097 if txmatch(_reportnewcssource):
2101 if txmatch(_reportnewcssource):
2098
2102
2099 @reportsummary
2103 @reportsummary
2100 def reportnewcs(repo, tr):
2104 def reportnewcs(repo, tr):
2101 """Report the range of new revisions pulled/unbundled."""
2105 """Report the range of new revisions pulled/unbundled."""
2102 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2106 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2103 unfi = repo.unfiltered()
2107 unfi = repo.unfiltered()
2104 if origrepolen >= len(unfi):
2108 if origrepolen >= len(unfi):
2105 return
2109 return
2106
2110
2107 # Compute the bounds of new visible revisions' range.
2111 # Compute the bounds of new visible revisions' range.
2108 revs = smartset.spanset(repo, start=origrepolen)
2112 revs = smartset.spanset(repo, start=origrepolen)
2109 if revs:
2113 if revs:
2110 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2114 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2111
2115
2112 if minrev == maxrev:
2116 if minrev == maxrev:
2113 revrange = minrev
2117 revrange = minrev
2114 else:
2118 else:
2115 revrange = b'%s:%s' % (minrev, maxrev)
2119 revrange = b'%s:%s' % (minrev, maxrev)
2116 draft = len(repo.revs(b'%ld and draft()', revs))
2120 draft = len(repo.revs(b'%ld and draft()', revs))
2117 secret = len(repo.revs(b'%ld and secret()', revs))
2121 secret = len(repo.revs(b'%ld and secret()', revs))
2118 if not (draft or secret):
2122 if not (draft or secret):
2119 msg = _(b'new changesets %s\n') % revrange
2123 msg = _(b'new changesets %s\n') % revrange
2120 elif draft and secret:
2124 elif draft and secret:
2121 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2125 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2122 msg %= (revrange, draft, secret)
2126 msg %= (revrange, draft, secret)
2123 elif draft:
2127 elif draft:
2124 msg = _(b'new changesets %s (%d drafts)\n')
2128 msg = _(b'new changesets %s (%d drafts)\n')
2125 msg %= (revrange, draft)
2129 msg %= (revrange, draft)
2126 elif secret:
2130 elif secret:
2127 msg = _(b'new changesets %s (%d secrets)\n')
2131 msg = _(b'new changesets %s (%d secrets)\n')
2128 msg %= (revrange, secret)
2132 msg %= (revrange, secret)
2129 else:
2133 else:
2130 errormsg = b'entered unreachable condition'
2134 errormsg = b'entered unreachable condition'
2131 raise error.ProgrammingError(errormsg)
2135 raise error.ProgrammingError(errormsg)
2132 repo.ui.status(msg)
2136 repo.ui.status(msg)
2133
2137
2134 # search new changesets directly pulled as obsolete
2138 # search new changesets directly pulled as obsolete
2135 duplicates = tr.changes.get(b'revduplicates', ())
2139 duplicates = tr.changes.get(b'revduplicates', ())
2136 obsadded = unfi.revs(
2140 obsadded = unfi.revs(
2137 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2141 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2138 )
2142 )
2139 cl = repo.changelog
2143 cl = repo.changelog
2140 extinctadded = [r for r in obsadded if r not in cl]
2144 extinctadded = [r for r in obsadded if r not in cl]
2141 if extinctadded:
2145 if extinctadded:
2142 # They are not just obsolete, but obsolete and invisible
2146 # They are not just obsolete, but obsolete and invisible
2143 # we call them "extinct" internally but the terms have not been
2147 # we call them "extinct" internally but the terms have not been
2144 # exposed to users.
2148 # exposed to users.
2145 msg = b'(%d other changesets obsolete on arrival)\n'
2149 msg = b'(%d other changesets obsolete on arrival)\n'
2146 repo.ui.status(msg % len(extinctadded))
2150 repo.ui.status(msg % len(extinctadded))
2147
2151
2148 @reportsummary
2152 @reportsummary
2149 def reportphasechanges(repo, tr):
2153 def reportphasechanges(repo, tr):
2150 """Report statistics of phase changes for changesets pre-existing
2154 """Report statistics of phase changes for changesets pre-existing
2151 pull/unbundle.
2155 pull/unbundle.
2152 """
2156 """
2153 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2157 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2154 published = []
2158 published = []
2155 for revs, (old, new) in tr.changes.get(b'phases', []):
2159 for revs, (old, new) in tr.changes.get(b'phases', []):
2156 if new != phases.public:
2160 if new != phases.public:
2157 continue
2161 continue
2158 published.extend(rev for rev in revs if rev < origrepolen)
2162 published.extend(rev for rev in revs if rev < origrepolen)
2159 if not published:
2163 if not published:
2160 return
2164 return
2161 msg = _(b'%d local changesets published\n')
2165 msg = _(b'%d local changesets published\n')
2162 if as_validator:
2166 if as_validator:
2163 msg = _(b'%d local changesets will be published\n')
2167 msg = _(b'%d local changesets will be published\n')
2164 repo.ui.status(msg % len(published))
2168 repo.ui.status(msg % len(published))
2165
2169
2166
2170
2167 def getinstabilitymessage(delta, instability):
2171 def getinstabilitymessage(delta, instability):
2168 """function to return the message to show warning about new instabilities
2172 """function to return the message to show warning about new instabilities
2169
2173
2170 exists as a separate function so that extension can wrap to show more
2174 exists as a separate function so that extension can wrap to show more
2171 information like how to fix instabilities"""
2175 information like how to fix instabilities"""
2172 if delta > 0:
2176 if delta > 0:
2173 return _(b'%i new %s changesets\n') % (delta, instability)
2177 return _(b'%i new %s changesets\n') % (delta, instability)
2174
2178
2175
2179
2176 def nodesummaries(repo, nodes, maxnumnodes=4):
2180 def nodesummaries(repo, nodes, maxnumnodes=4):
2177 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2181 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2178 return b' '.join(short(h) for h in nodes)
2182 return b' '.join(short(h) for h in nodes)
2179 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2183 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2180 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2184 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2181
2185
2182
2186
2183 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2187 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2184 """check that no named branch has multiple heads"""
2188 """check that no named branch has multiple heads"""
2185 if desc in (b'strip', b'repair'):
2189 if desc in (b'strip', b'repair'):
2186 # skip the logic during strip
2190 # skip the logic during strip
2187 return
2191 return
2188 visible = repo.filtered(filtername)
2192 visible = repo.filtered(filtername)
2189 # possible improvement: we could restrict the check to affected branch
2193 # possible improvement: we could restrict the check to affected branch
2190 bm = visible.branchmap()
2194 bm = visible.branchmap()
2191 for name in bm:
2195 for name in bm:
2192 heads = bm.branchheads(name, closed=accountclosed)
2196 heads = bm.branchheads(name, closed=accountclosed)
2193 if len(heads) > 1:
2197 if len(heads) > 1:
2194 msg = _(b'rejecting multiple heads on branch "%s"')
2198 msg = _(b'rejecting multiple heads on branch "%s"')
2195 msg %= name
2199 msg %= name
2196 hint = _(b'%d heads: %s')
2200 hint = _(b'%d heads: %s')
2197 hint %= (len(heads), nodesummaries(repo, heads))
2201 hint %= (len(heads), nodesummaries(repo, heads))
2198 raise error.Abort(msg, hint=hint)
2202 raise error.Abort(msg, hint=hint)
2199
2203
2200
2204
2201 def wrapconvertsink(sink):
2205 def wrapconvertsink(sink):
2202 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2206 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2203 before it is used, whether or not the convert extension was formally loaded.
2207 before it is used, whether or not the convert extension was formally loaded.
2204 """
2208 """
2205 return sink
2209 return sink
2206
2210
2207
2211
2208 def unhidehashlikerevs(repo, specs, hiddentype):
2212 def unhidehashlikerevs(repo, specs, hiddentype):
2209 """parse the user specs and unhide changesets whose hash or revision number
2213 """parse the user specs and unhide changesets whose hash or revision number
2210 is passed.
2214 is passed.
2211
2215
2212 hiddentype can be: 1) 'warn': warn while unhiding changesets
2216 hiddentype can be: 1) 'warn': warn while unhiding changesets
2213 2) 'nowarn': don't warn while unhiding changesets
2217 2) 'nowarn': don't warn while unhiding changesets
2214
2218
2215 returns a repo object with the required changesets unhidden
2219 returns a repo object with the required changesets unhidden
2216 """
2220 """
2217 if not specs:
2221 if not specs:
2218 return repo
2222 return repo
2219
2223
2220 if not repo.filtername or not repo.ui.configbool(
2224 if not repo.filtername or not repo.ui.configbool(
2221 b'experimental', b'directaccess'
2225 b'experimental', b'directaccess'
2222 ):
2226 ):
2223 return repo
2227 return repo
2224
2228
2225 if repo.filtername not in (b'visible', b'visible-hidden'):
2229 if repo.filtername not in (b'visible', b'visible-hidden'):
2226 return repo
2230 return repo
2227
2231
2228 symbols = set()
2232 symbols = set()
2229 for spec in specs:
2233 for spec in specs:
2230 try:
2234 try:
2231 tree = revsetlang.parse(spec)
2235 tree = revsetlang.parse(spec)
2232 except error.ParseError: # will be reported by scmutil.revrange()
2236 except error.ParseError: # will be reported by scmutil.revrange()
2233 continue
2237 continue
2234
2238
2235 symbols.update(revsetlang.gethashlikesymbols(tree))
2239 symbols.update(revsetlang.gethashlikesymbols(tree))
2236
2240
2237 if not symbols:
2241 if not symbols:
2238 return repo
2242 return repo
2239
2243
2240 revs = _getrevsfromsymbols(repo, symbols)
2244 revs = _getrevsfromsymbols(repo, symbols)
2241
2245
2242 if not revs:
2246 if not revs:
2243 return repo
2247 return repo
2244
2248
2245 if hiddentype == b'warn':
2249 if hiddentype == b'warn':
2246 unfi = repo.unfiltered()
2250 unfi = repo.unfiltered()
2247 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2251 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2248 repo.ui.warn(
2252 repo.ui.warn(
2249 _(
2253 _(
2250 b"warning: accessing hidden changesets for write "
2254 b"warning: accessing hidden changesets for write "
2251 b"operation: %s\n"
2255 b"operation: %s\n"
2252 )
2256 )
2253 % revstr
2257 % revstr
2254 )
2258 )
2255
2259
2256 # we have to use new filtername to separate branch/tags cache until we can
2260 # we have to use new filtername to separate branch/tags cache until we can
2257 # disbale these cache when revisions are dynamically pinned.
2261 # disbale these cache when revisions are dynamically pinned.
2258 return repo.filtered(b'visible-hidden', revs)
2262 return repo.filtered(b'visible-hidden', revs)
2259
2263
2260
2264
2261 def _getrevsfromsymbols(repo, symbols):
2265 def _getrevsfromsymbols(repo, symbols):
2262 """parse the list of symbols and returns a set of revision numbers of hidden
2266 """parse the list of symbols and returns a set of revision numbers of hidden
2263 changesets present in symbols"""
2267 changesets present in symbols"""
2264 revs = set()
2268 revs = set()
2265 unfi = repo.unfiltered()
2269 unfi = repo.unfiltered()
2266 unficl = unfi.changelog
2270 unficl = unfi.changelog
2267 cl = repo.changelog
2271 cl = repo.changelog
2268 tiprev = len(unficl)
2272 tiprev = len(unficl)
2269 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2273 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2270 for s in symbols:
2274 for s in symbols:
2271 try:
2275 try:
2272 n = int(s)
2276 n = int(s)
2273 if n <= tiprev:
2277 if n <= tiprev:
2274 if not allowrevnums:
2278 if not allowrevnums:
2275 continue
2279 continue
2276 else:
2280 else:
2277 if n not in cl:
2281 if n not in cl:
2278 revs.add(n)
2282 revs.add(n)
2279 continue
2283 continue
2280 except ValueError:
2284 except ValueError:
2281 pass
2285 pass
2282
2286
2283 try:
2287 try:
2284 s = resolvehexnodeidprefix(unfi, s)
2288 s = resolvehexnodeidprefix(unfi, s)
2285 except (error.LookupError, error.WdirUnsupported):
2289 except (error.LookupError, error.WdirUnsupported):
2286 s = None
2290 s = None
2287
2291
2288 if s is not None:
2292 if s is not None:
2289 rev = unficl.rev(s)
2293 rev = unficl.rev(s)
2290 if rev not in cl:
2294 if rev not in cl:
2291 revs.add(rev)
2295 revs.add(rev)
2292
2296
2293 return revs
2297 return revs
2294
2298
2295
2299
2296 def bookmarkrevs(repo, mark):
2300 def bookmarkrevs(repo, mark):
2297 """Select revisions reachable by a given bookmark
2301 """Select revisions reachable by a given bookmark
2298
2302
2299 If the bookmarked revision isn't a head, an empty set will be returned.
2303 If the bookmarked revision isn't a head, an empty set will be returned.
2300 """
2304 """
2301 return repo.revs(format_bookmark_revspec(mark))
2305 return repo.revs(format_bookmark_revspec(mark))
2302
2306
2303
2307
2304 def format_bookmark_revspec(mark):
2308 def format_bookmark_revspec(mark):
2305 """Build a revset expression to select revisions reachable by a given
2309 """Build a revset expression to select revisions reachable by a given
2306 bookmark"""
2310 bookmark"""
2307 mark = b'literal:' + mark
2311 mark = b'literal:' + mark
2308 return revsetlang.formatspec(
2312 return revsetlang.formatspec(
2309 b"ancestors(bookmark(%s)) - "
2313 b"ancestors(bookmark(%s)) - "
2310 b"ancestors(head() and not bookmark(%s)) - "
2314 b"ancestors(head() and not bookmark(%s)) - "
2311 b"ancestors(bookmark() and not bookmark(%s))",
2315 b"ancestors(bookmark() and not bookmark(%s))",
2312 mark,
2316 mark,
2313 mark,
2317 mark,
2314 mark,
2318 mark,
2315 )
2319 )
2316
2320
2317
2321
2318 def ismember(ui, username, userlist):
2322 def ismember(ui, username, userlist):
2319 """Check if username is a member of userlist.
2323 """Check if username is a member of userlist.
2320
2324
2321 If userlist has a single '*' member, all users are considered members.
2325 If userlist has a single '*' member, all users are considered members.
2322 Can be overridden by extensions to provide more complex authorization
2326 Can be overridden by extensions to provide more complex authorization
2323 schemes.
2327 schemes.
2324 """
2328 """
2325 return userlist == [b'*'] or username in userlist
2329 return userlist == [b'*'] or username in userlist
General Comments 0
You need to be logged in to leave comments. Login now