##// END OF EJS Templates
commit: don't use hard-coded `.hg/last-message.txt` path in error message...
Martin von Zweigbergk -
r50042:db3f8e5c stable
parent child Browse files
Show More
@@ -1,3930 +1,3931
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import functools
12 import functools
13 import os
13 import os
14 import random
14 import random
15 import sys
15 import sys
16 import time
16 import time
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullrev,
23 nullrev,
24 sha1nodeconstants,
24 sha1nodeconstants,
25 short,
25 short,
26 )
26 )
27 from .pycompat import (
27 from .pycompat import (
28 delattr,
28 delattr,
29 getattr,
29 getattr,
30 )
30 )
31 from . import (
31 from . import (
32 bookmarks,
32 bookmarks,
33 branchmap,
33 branchmap,
34 bundle2,
34 bundle2,
35 bundlecaches,
35 bundlecaches,
36 changegroup,
36 changegroup,
37 color,
37 color,
38 commit,
38 commit,
39 context,
39 context,
40 dirstate,
40 dirstate,
41 dirstateguard,
41 dirstateguard,
42 discovery,
42 discovery,
43 encoding,
43 encoding,
44 error,
44 error,
45 exchange,
45 exchange,
46 extensions,
46 extensions,
47 filelog,
47 filelog,
48 hook,
48 hook,
49 lock as lockmod,
49 lock as lockmod,
50 match as matchmod,
50 match as matchmod,
51 mergestate as mergestatemod,
51 mergestate as mergestatemod,
52 mergeutil,
52 mergeutil,
53 namespaces,
53 namespaces,
54 narrowspec,
54 narrowspec,
55 obsolete,
55 obsolete,
56 pathutil,
56 pathutil,
57 phases,
57 phases,
58 pushkey,
58 pushkey,
59 pycompat,
59 pycompat,
60 rcutil,
60 rcutil,
61 repoview,
61 repoview,
62 requirements as requirementsmod,
62 requirements as requirementsmod,
63 revlog,
63 revlog,
64 revset,
64 revset,
65 revsetlang,
65 revsetlang,
66 scmutil,
66 scmutil,
67 sparse,
67 sparse,
68 store as storemod,
68 store as storemod,
69 subrepoutil,
69 subrepoutil,
70 tags as tagsmod,
70 tags as tagsmod,
71 transaction,
71 transaction,
72 txnutil,
72 txnutil,
73 util,
73 util,
74 vfs as vfsmod,
74 vfs as vfsmod,
75 wireprototypes,
75 wireprototypes,
76 )
76 )
77
77
78 from .interfaces import (
78 from .interfaces import (
79 repository,
79 repository,
80 util as interfaceutil,
80 util as interfaceutil,
81 )
81 )
82
82
83 from .utils import (
83 from .utils import (
84 hashutil,
84 hashutil,
85 procutil,
85 procutil,
86 stringutil,
86 stringutil,
87 urlutil,
87 urlutil,
88 )
88 )
89
89
90 from .revlogutils import (
90 from .revlogutils import (
91 concurrency_checker as revlogchecker,
91 concurrency_checker as revlogchecker,
92 constants as revlogconst,
92 constants as revlogconst,
93 sidedata as sidedatamod,
93 sidedata as sidedatamod,
94 )
94 )
95
95
96 release = lockmod.release
96 release = lockmod.release
97 urlerr = util.urlerr
97 urlerr = util.urlerr
98 urlreq = util.urlreq
98 urlreq = util.urlreq
99
99
100 # set of (path, vfs-location) tuples. vfs-location is:
100 # set of (path, vfs-location) tuples. vfs-location is:
101 # - 'plain for vfs relative paths
101 # - 'plain for vfs relative paths
102 # - '' for svfs relative paths
102 # - '' for svfs relative paths
103 _cachedfiles = set()
103 _cachedfiles = set()
104
104
105
105
106 class _basefilecache(scmutil.filecache):
106 class _basefilecache(scmutil.filecache):
107 """All filecache usage on repo are done for logic that should be unfiltered"""
107 """All filecache usage on repo are done for logic that should be unfiltered"""
108
108
109 def __get__(self, repo, type=None):
109 def __get__(self, repo, type=None):
110 if repo is None:
110 if repo is None:
111 return self
111 return self
112 # proxy to unfiltered __dict__ since filtered repo has no entry
112 # proxy to unfiltered __dict__ since filtered repo has no entry
113 unfi = repo.unfiltered()
113 unfi = repo.unfiltered()
114 try:
114 try:
115 return unfi.__dict__[self.sname]
115 return unfi.__dict__[self.sname]
116 except KeyError:
116 except KeyError:
117 pass
117 pass
118 return super(_basefilecache, self).__get__(unfi, type)
118 return super(_basefilecache, self).__get__(unfi, type)
119
119
120 def set(self, repo, value):
120 def set(self, repo, value):
121 return super(_basefilecache, self).set(repo.unfiltered(), value)
121 return super(_basefilecache, self).set(repo.unfiltered(), value)
122
122
123
123
124 class repofilecache(_basefilecache):
124 class repofilecache(_basefilecache):
125 """filecache for files in .hg but outside of .hg/store"""
125 """filecache for files in .hg but outside of .hg/store"""
126
126
127 def __init__(self, *paths):
127 def __init__(self, *paths):
128 super(repofilecache, self).__init__(*paths)
128 super(repofilecache, self).__init__(*paths)
129 for path in paths:
129 for path in paths:
130 _cachedfiles.add((path, b'plain'))
130 _cachedfiles.add((path, b'plain'))
131
131
132 def join(self, obj, fname):
132 def join(self, obj, fname):
133 return obj.vfs.join(fname)
133 return obj.vfs.join(fname)
134
134
135
135
136 class storecache(_basefilecache):
136 class storecache(_basefilecache):
137 """filecache for files in the store"""
137 """filecache for files in the store"""
138
138
139 def __init__(self, *paths):
139 def __init__(self, *paths):
140 super(storecache, self).__init__(*paths)
140 super(storecache, self).__init__(*paths)
141 for path in paths:
141 for path in paths:
142 _cachedfiles.add((path, b''))
142 _cachedfiles.add((path, b''))
143
143
144 def join(self, obj, fname):
144 def join(self, obj, fname):
145 return obj.sjoin(fname)
145 return obj.sjoin(fname)
146
146
147
147
148 class changelogcache(storecache):
148 class changelogcache(storecache):
149 """filecache for the changelog"""
149 """filecache for the changelog"""
150
150
151 def __init__(self):
151 def __init__(self):
152 super(changelogcache, self).__init__()
152 super(changelogcache, self).__init__()
153 _cachedfiles.add((b'00changelog.i', b''))
153 _cachedfiles.add((b'00changelog.i', b''))
154 _cachedfiles.add((b'00changelog.n', b''))
154 _cachedfiles.add((b'00changelog.n', b''))
155
155
156 def tracked_paths(self, obj):
156 def tracked_paths(self, obj):
157 paths = [self.join(obj, b'00changelog.i')]
157 paths = [self.join(obj, b'00changelog.i')]
158 if obj.store.opener.options.get(b'persistent-nodemap', False):
158 if obj.store.opener.options.get(b'persistent-nodemap', False):
159 paths.append(self.join(obj, b'00changelog.n'))
159 paths.append(self.join(obj, b'00changelog.n'))
160 return paths
160 return paths
161
161
162
162
163 class manifestlogcache(storecache):
163 class manifestlogcache(storecache):
164 """filecache for the manifestlog"""
164 """filecache for the manifestlog"""
165
165
166 def __init__(self):
166 def __init__(self):
167 super(manifestlogcache, self).__init__()
167 super(manifestlogcache, self).__init__()
168 _cachedfiles.add((b'00manifest.i', b''))
168 _cachedfiles.add((b'00manifest.i', b''))
169 _cachedfiles.add((b'00manifest.n', b''))
169 _cachedfiles.add((b'00manifest.n', b''))
170
170
171 def tracked_paths(self, obj):
171 def tracked_paths(self, obj):
172 paths = [self.join(obj, b'00manifest.i')]
172 paths = [self.join(obj, b'00manifest.i')]
173 if obj.store.opener.options.get(b'persistent-nodemap', False):
173 if obj.store.opener.options.get(b'persistent-nodemap', False):
174 paths.append(self.join(obj, b'00manifest.n'))
174 paths.append(self.join(obj, b'00manifest.n'))
175 return paths
175 return paths
176
176
177
177
178 class mixedrepostorecache(_basefilecache):
178 class mixedrepostorecache(_basefilecache):
179 """filecache for a mix files in .hg/store and outside"""
179 """filecache for a mix files in .hg/store and outside"""
180
180
181 def __init__(self, *pathsandlocations):
181 def __init__(self, *pathsandlocations):
182 # scmutil.filecache only uses the path for passing back into our
182 # scmutil.filecache only uses the path for passing back into our
183 # join(), so we can safely pass a list of paths and locations
183 # join(), so we can safely pass a list of paths and locations
184 super(mixedrepostorecache, self).__init__(*pathsandlocations)
184 super(mixedrepostorecache, self).__init__(*pathsandlocations)
185 _cachedfiles.update(pathsandlocations)
185 _cachedfiles.update(pathsandlocations)
186
186
187 def join(self, obj, fnameandlocation):
187 def join(self, obj, fnameandlocation):
188 fname, location = fnameandlocation
188 fname, location = fnameandlocation
189 if location == b'plain':
189 if location == b'plain':
190 return obj.vfs.join(fname)
190 return obj.vfs.join(fname)
191 else:
191 else:
192 if location != b'':
192 if location != b'':
193 raise error.ProgrammingError(
193 raise error.ProgrammingError(
194 b'unexpected location: %s' % location
194 b'unexpected location: %s' % location
195 )
195 )
196 return obj.sjoin(fname)
196 return obj.sjoin(fname)
197
197
198
198
199 def isfilecached(repo, name):
199 def isfilecached(repo, name):
200 """check if a repo has already cached "name" filecache-ed property
200 """check if a repo has already cached "name" filecache-ed property
201
201
202 This returns (cachedobj-or-None, iscached) tuple.
202 This returns (cachedobj-or-None, iscached) tuple.
203 """
203 """
204 cacheentry = repo.unfiltered()._filecache.get(name, None)
204 cacheentry = repo.unfiltered()._filecache.get(name, None)
205 if not cacheentry:
205 if not cacheentry:
206 return None, False
206 return None, False
207 return cacheentry.obj, True
207 return cacheentry.obj, True
208
208
209
209
210 class unfilteredpropertycache(util.propertycache):
210 class unfilteredpropertycache(util.propertycache):
211 """propertycache that apply to unfiltered repo only"""
211 """propertycache that apply to unfiltered repo only"""
212
212
213 def __get__(self, repo, type=None):
213 def __get__(self, repo, type=None):
214 unfi = repo.unfiltered()
214 unfi = repo.unfiltered()
215 if unfi is repo:
215 if unfi is repo:
216 return super(unfilteredpropertycache, self).__get__(unfi)
216 return super(unfilteredpropertycache, self).__get__(unfi)
217 return getattr(unfi, self.name)
217 return getattr(unfi, self.name)
218
218
219
219
220 class filteredpropertycache(util.propertycache):
220 class filteredpropertycache(util.propertycache):
221 """propertycache that must take filtering in account"""
221 """propertycache that must take filtering in account"""
222
222
223 def cachevalue(self, obj, value):
223 def cachevalue(self, obj, value):
224 object.__setattr__(obj, self.name, value)
224 object.__setattr__(obj, self.name, value)
225
225
226
226
227 def hasunfilteredcache(repo, name):
227 def hasunfilteredcache(repo, name):
228 """check if a repo has an unfilteredpropertycache value for <name>"""
228 """check if a repo has an unfilteredpropertycache value for <name>"""
229 return name in vars(repo.unfiltered())
229 return name in vars(repo.unfiltered())
230
230
231
231
232 def unfilteredmethod(orig):
232 def unfilteredmethod(orig):
233 """decorate method that always need to be run on unfiltered version"""
233 """decorate method that always need to be run on unfiltered version"""
234
234
235 @functools.wraps(orig)
235 @functools.wraps(orig)
236 def wrapper(repo, *args, **kwargs):
236 def wrapper(repo, *args, **kwargs):
237 return orig(repo.unfiltered(), *args, **kwargs)
237 return orig(repo.unfiltered(), *args, **kwargs)
238
238
239 return wrapper
239 return wrapper
240
240
241
241
242 moderncaps = {
242 moderncaps = {
243 b'lookup',
243 b'lookup',
244 b'branchmap',
244 b'branchmap',
245 b'pushkey',
245 b'pushkey',
246 b'known',
246 b'known',
247 b'getbundle',
247 b'getbundle',
248 b'unbundle',
248 b'unbundle',
249 }
249 }
250 legacycaps = moderncaps.union({b'changegroupsubset'})
250 legacycaps = moderncaps.union({b'changegroupsubset'})
251
251
252
252
253 @interfaceutil.implementer(repository.ipeercommandexecutor)
253 @interfaceutil.implementer(repository.ipeercommandexecutor)
254 class localcommandexecutor(object):
254 class localcommandexecutor(object):
255 def __init__(self, peer):
255 def __init__(self, peer):
256 self._peer = peer
256 self._peer = peer
257 self._sent = False
257 self._sent = False
258 self._closed = False
258 self._closed = False
259
259
260 def __enter__(self):
260 def __enter__(self):
261 return self
261 return self
262
262
263 def __exit__(self, exctype, excvalue, exctb):
263 def __exit__(self, exctype, excvalue, exctb):
264 self.close()
264 self.close()
265
265
266 def callcommand(self, command, args):
266 def callcommand(self, command, args):
267 if self._sent:
267 if self._sent:
268 raise error.ProgrammingError(
268 raise error.ProgrammingError(
269 b'callcommand() cannot be used after sendcommands()'
269 b'callcommand() cannot be used after sendcommands()'
270 )
270 )
271
271
272 if self._closed:
272 if self._closed:
273 raise error.ProgrammingError(
273 raise error.ProgrammingError(
274 b'callcommand() cannot be used after close()'
274 b'callcommand() cannot be used after close()'
275 )
275 )
276
276
277 # We don't need to support anything fancy. Just call the named
277 # We don't need to support anything fancy. Just call the named
278 # method on the peer and return a resolved future.
278 # method on the peer and return a resolved future.
279 fn = getattr(self._peer, pycompat.sysstr(command))
279 fn = getattr(self._peer, pycompat.sysstr(command))
280
280
281 f = pycompat.futures.Future()
281 f = pycompat.futures.Future()
282
282
283 try:
283 try:
284 result = fn(**pycompat.strkwargs(args))
284 result = fn(**pycompat.strkwargs(args))
285 except Exception:
285 except Exception:
286 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
286 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
287 else:
287 else:
288 f.set_result(result)
288 f.set_result(result)
289
289
290 return f
290 return f
291
291
292 def sendcommands(self):
292 def sendcommands(self):
293 self._sent = True
293 self._sent = True
294
294
295 def close(self):
295 def close(self):
296 self._closed = True
296 self._closed = True
297
297
298
298
299 @interfaceutil.implementer(repository.ipeercommands)
299 @interfaceutil.implementer(repository.ipeercommands)
300 class localpeer(repository.peer):
300 class localpeer(repository.peer):
301 '''peer for a local repo; reflects only the most recent API'''
301 '''peer for a local repo; reflects only the most recent API'''
302
302
303 def __init__(self, repo, caps=None):
303 def __init__(self, repo, caps=None):
304 super(localpeer, self).__init__()
304 super(localpeer, self).__init__()
305
305
306 if caps is None:
306 if caps is None:
307 caps = moderncaps.copy()
307 caps = moderncaps.copy()
308 self._repo = repo.filtered(b'served')
308 self._repo = repo.filtered(b'served')
309 self.ui = repo.ui
309 self.ui = repo.ui
310
310
311 if repo._wanted_sidedata:
311 if repo._wanted_sidedata:
312 formatted = bundle2.format_remote_wanted_sidedata(repo)
312 formatted = bundle2.format_remote_wanted_sidedata(repo)
313 caps.add(b'exp-wanted-sidedata=' + formatted)
313 caps.add(b'exp-wanted-sidedata=' + formatted)
314
314
315 self._caps = repo._restrictcapabilities(caps)
315 self._caps = repo._restrictcapabilities(caps)
316
316
317 # Begin of _basepeer interface.
317 # Begin of _basepeer interface.
318
318
319 def url(self):
319 def url(self):
320 return self._repo.url()
320 return self._repo.url()
321
321
322 def local(self):
322 def local(self):
323 return self._repo
323 return self._repo
324
324
325 def peer(self):
325 def peer(self):
326 return self
326 return self
327
327
328 def canpush(self):
328 def canpush(self):
329 return True
329 return True
330
330
331 def close(self):
331 def close(self):
332 self._repo.close()
332 self._repo.close()
333
333
334 # End of _basepeer interface.
334 # End of _basepeer interface.
335
335
336 # Begin of _basewirecommands interface.
336 # Begin of _basewirecommands interface.
337
337
338 def branchmap(self):
338 def branchmap(self):
339 return self._repo.branchmap()
339 return self._repo.branchmap()
340
340
341 def capabilities(self):
341 def capabilities(self):
342 return self._caps
342 return self._caps
343
343
344 def clonebundles(self):
344 def clonebundles(self):
345 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
345 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
346
346
347 def debugwireargs(self, one, two, three=None, four=None, five=None):
347 def debugwireargs(self, one, two, three=None, four=None, five=None):
348 """Used to test argument passing over the wire"""
348 """Used to test argument passing over the wire"""
349 return b"%s %s %s %s %s" % (
349 return b"%s %s %s %s %s" % (
350 one,
350 one,
351 two,
351 two,
352 pycompat.bytestr(three),
352 pycompat.bytestr(three),
353 pycompat.bytestr(four),
353 pycompat.bytestr(four),
354 pycompat.bytestr(five),
354 pycompat.bytestr(five),
355 )
355 )
356
356
357 def getbundle(
357 def getbundle(
358 self,
358 self,
359 source,
359 source,
360 heads=None,
360 heads=None,
361 common=None,
361 common=None,
362 bundlecaps=None,
362 bundlecaps=None,
363 remote_sidedata=None,
363 remote_sidedata=None,
364 **kwargs
364 **kwargs
365 ):
365 ):
366 chunks = exchange.getbundlechunks(
366 chunks = exchange.getbundlechunks(
367 self._repo,
367 self._repo,
368 source,
368 source,
369 heads=heads,
369 heads=heads,
370 common=common,
370 common=common,
371 bundlecaps=bundlecaps,
371 bundlecaps=bundlecaps,
372 remote_sidedata=remote_sidedata,
372 remote_sidedata=remote_sidedata,
373 **kwargs
373 **kwargs
374 )[1]
374 )[1]
375 cb = util.chunkbuffer(chunks)
375 cb = util.chunkbuffer(chunks)
376
376
377 if exchange.bundle2requested(bundlecaps):
377 if exchange.bundle2requested(bundlecaps):
378 # When requesting a bundle2, getbundle returns a stream to make the
378 # When requesting a bundle2, getbundle returns a stream to make the
379 # wire level function happier. We need to build a proper object
379 # wire level function happier. We need to build a proper object
380 # from it in local peer.
380 # from it in local peer.
381 return bundle2.getunbundler(self.ui, cb)
381 return bundle2.getunbundler(self.ui, cb)
382 else:
382 else:
383 return changegroup.getunbundler(b'01', cb, None)
383 return changegroup.getunbundler(b'01', cb, None)
384
384
385 def heads(self):
385 def heads(self):
386 return self._repo.heads()
386 return self._repo.heads()
387
387
388 def known(self, nodes):
388 def known(self, nodes):
389 return self._repo.known(nodes)
389 return self._repo.known(nodes)
390
390
391 def listkeys(self, namespace):
391 def listkeys(self, namespace):
392 return self._repo.listkeys(namespace)
392 return self._repo.listkeys(namespace)
393
393
394 def lookup(self, key):
394 def lookup(self, key):
395 return self._repo.lookup(key)
395 return self._repo.lookup(key)
396
396
397 def pushkey(self, namespace, key, old, new):
397 def pushkey(self, namespace, key, old, new):
398 return self._repo.pushkey(namespace, key, old, new)
398 return self._repo.pushkey(namespace, key, old, new)
399
399
400 def stream_out(self):
400 def stream_out(self):
401 raise error.Abort(_(b'cannot perform stream clone against local peer'))
401 raise error.Abort(_(b'cannot perform stream clone against local peer'))
402
402
403 def unbundle(self, bundle, heads, url):
403 def unbundle(self, bundle, heads, url):
404 """apply a bundle on a repo
404 """apply a bundle on a repo
405
405
406 This function handles the repo locking itself."""
406 This function handles the repo locking itself."""
407 try:
407 try:
408 try:
408 try:
409 bundle = exchange.readbundle(self.ui, bundle, None)
409 bundle = exchange.readbundle(self.ui, bundle, None)
410 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
410 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
411 if util.safehasattr(ret, b'getchunks'):
411 if util.safehasattr(ret, b'getchunks'):
412 # This is a bundle20 object, turn it into an unbundler.
412 # This is a bundle20 object, turn it into an unbundler.
413 # This little dance should be dropped eventually when the
413 # This little dance should be dropped eventually when the
414 # API is finally improved.
414 # API is finally improved.
415 stream = util.chunkbuffer(ret.getchunks())
415 stream = util.chunkbuffer(ret.getchunks())
416 ret = bundle2.getunbundler(self.ui, stream)
416 ret = bundle2.getunbundler(self.ui, stream)
417 return ret
417 return ret
418 except Exception as exc:
418 except Exception as exc:
419 # If the exception contains output salvaged from a bundle2
419 # If the exception contains output salvaged from a bundle2
420 # reply, we need to make sure it is printed before continuing
420 # reply, we need to make sure it is printed before continuing
421 # to fail. So we build a bundle2 with such output and consume
421 # to fail. So we build a bundle2 with such output and consume
422 # it directly.
422 # it directly.
423 #
423 #
424 # This is not very elegant but allows a "simple" solution for
424 # This is not very elegant but allows a "simple" solution for
425 # issue4594
425 # issue4594
426 output = getattr(exc, '_bundle2salvagedoutput', ())
426 output = getattr(exc, '_bundle2salvagedoutput', ())
427 if output:
427 if output:
428 bundler = bundle2.bundle20(self._repo.ui)
428 bundler = bundle2.bundle20(self._repo.ui)
429 for out in output:
429 for out in output:
430 bundler.addpart(out)
430 bundler.addpart(out)
431 stream = util.chunkbuffer(bundler.getchunks())
431 stream = util.chunkbuffer(bundler.getchunks())
432 b = bundle2.getunbundler(self.ui, stream)
432 b = bundle2.getunbundler(self.ui, stream)
433 bundle2.processbundle(self._repo, b)
433 bundle2.processbundle(self._repo, b)
434 raise
434 raise
435 except error.PushRaced as exc:
435 except error.PushRaced as exc:
436 raise error.ResponseError(
436 raise error.ResponseError(
437 _(b'push failed:'), stringutil.forcebytestr(exc)
437 _(b'push failed:'), stringutil.forcebytestr(exc)
438 )
438 )
439
439
440 # End of _basewirecommands interface.
440 # End of _basewirecommands interface.
441
441
442 # Begin of peer interface.
442 # Begin of peer interface.
443
443
444 def commandexecutor(self):
444 def commandexecutor(self):
445 return localcommandexecutor(self)
445 return localcommandexecutor(self)
446
446
447 # End of peer interface.
447 # End of peer interface.
448
448
449
449
450 @interfaceutil.implementer(repository.ipeerlegacycommands)
450 @interfaceutil.implementer(repository.ipeerlegacycommands)
451 class locallegacypeer(localpeer):
451 class locallegacypeer(localpeer):
452 """peer extension which implements legacy methods too; used for tests with
452 """peer extension which implements legacy methods too; used for tests with
453 restricted capabilities"""
453 restricted capabilities"""
454
454
455 def __init__(self, repo):
455 def __init__(self, repo):
456 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
456 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
457
457
458 # Begin of baselegacywirecommands interface.
458 # Begin of baselegacywirecommands interface.
459
459
460 def between(self, pairs):
460 def between(self, pairs):
461 return self._repo.between(pairs)
461 return self._repo.between(pairs)
462
462
463 def branches(self, nodes):
463 def branches(self, nodes):
464 return self._repo.branches(nodes)
464 return self._repo.branches(nodes)
465
465
466 def changegroup(self, nodes, source):
466 def changegroup(self, nodes, source):
467 outgoing = discovery.outgoing(
467 outgoing = discovery.outgoing(
468 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
468 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
469 )
469 )
470 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
470 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
471
471
472 def changegroupsubset(self, bases, heads, source):
472 def changegroupsubset(self, bases, heads, source):
473 outgoing = discovery.outgoing(
473 outgoing = discovery.outgoing(
474 self._repo, missingroots=bases, ancestorsof=heads
474 self._repo, missingroots=bases, ancestorsof=heads
475 )
475 )
476 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
476 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
477
477
478 # End of baselegacywirecommands interface.
478 # End of baselegacywirecommands interface.
479
479
480
480
481 # Functions receiving (ui, features) that extensions can register to impact
481 # Functions receiving (ui, features) that extensions can register to impact
482 # the ability to load repositories with custom requirements. Only
482 # the ability to load repositories with custom requirements. Only
483 # functions defined in loaded extensions are called.
483 # functions defined in loaded extensions are called.
484 #
484 #
485 # The function receives a set of requirement strings that the repository
485 # The function receives a set of requirement strings that the repository
486 # is capable of opening. Functions will typically add elements to the
486 # is capable of opening. Functions will typically add elements to the
487 # set to reflect that the extension knows how to handle that requirements.
487 # set to reflect that the extension knows how to handle that requirements.
488 featuresetupfuncs = set()
488 featuresetupfuncs = set()
489
489
490
490
491 def _getsharedvfs(hgvfs, requirements):
491 def _getsharedvfs(hgvfs, requirements):
492 """returns the vfs object pointing to root of shared source
492 """returns the vfs object pointing to root of shared source
493 repo for a shared repository
493 repo for a shared repository
494
494
495 hgvfs is vfs pointing at .hg/ of current repo (shared one)
495 hgvfs is vfs pointing at .hg/ of current repo (shared one)
496 requirements is a set of requirements of current repo (shared one)
496 requirements is a set of requirements of current repo (shared one)
497 """
497 """
498 # The ``shared`` or ``relshared`` requirements indicate the
498 # The ``shared`` or ``relshared`` requirements indicate the
499 # store lives in the path contained in the ``.hg/sharedpath`` file.
499 # store lives in the path contained in the ``.hg/sharedpath`` file.
500 # This is an absolute path for ``shared`` and relative to
500 # This is an absolute path for ``shared`` and relative to
501 # ``.hg/`` for ``relshared``.
501 # ``.hg/`` for ``relshared``.
502 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
502 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
503 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
503 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
504 sharedpath = util.normpath(hgvfs.join(sharedpath))
504 sharedpath = util.normpath(hgvfs.join(sharedpath))
505
505
506 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
506 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
507
507
508 if not sharedvfs.exists():
508 if not sharedvfs.exists():
509 raise error.RepoError(
509 raise error.RepoError(
510 _(b'.hg/sharedpath points to nonexistent directory %s')
510 _(b'.hg/sharedpath points to nonexistent directory %s')
511 % sharedvfs.base
511 % sharedvfs.base
512 )
512 )
513 return sharedvfs
513 return sharedvfs
514
514
515
515
516 def _readrequires(vfs, allowmissing):
516 def _readrequires(vfs, allowmissing):
517 """reads the require file present at root of this vfs
517 """reads the require file present at root of this vfs
518 and return a set of requirements
518 and return a set of requirements
519
519
520 If allowmissing is True, we suppress ENOENT if raised"""
520 If allowmissing is True, we suppress ENOENT if raised"""
521 # requires file contains a newline-delimited list of
521 # requires file contains a newline-delimited list of
522 # features/capabilities the opener (us) must have in order to use
522 # features/capabilities the opener (us) must have in order to use
523 # the repository. This file was introduced in Mercurial 0.9.2,
523 # the repository. This file was introduced in Mercurial 0.9.2,
524 # which means very old repositories may not have one. We assume
524 # which means very old repositories may not have one. We assume
525 # a missing file translates to no requirements.
525 # a missing file translates to no requirements.
526 try:
526 try:
527 requirements = set(vfs.read(b'requires').splitlines())
527 requirements = set(vfs.read(b'requires').splitlines())
528 except IOError as e:
528 except IOError as e:
529 if not (allowmissing and e.errno == errno.ENOENT):
529 if not (allowmissing and e.errno == errno.ENOENT):
530 raise
530 raise
531 requirements = set()
531 requirements = set()
532 return requirements
532 return requirements
533
533
534
534
535 def makelocalrepository(baseui, path, intents=None):
535 def makelocalrepository(baseui, path, intents=None):
536 """Create a local repository object.
536 """Create a local repository object.
537
537
538 Given arguments needed to construct a local repository, this function
538 Given arguments needed to construct a local repository, this function
539 performs various early repository loading functionality (such as
539 performs various early repository loading functionality (such as
540 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
540 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
541 the repository can be opened, derives a type suitable for representing
541 the repository can be opened, derives a type suitable for representing
542 that repository, and returns an instance of it.
542 that repository, and returns an instance of it.
543
543
544 The returned object conforms to the ``repository.completelocalrepository``
544 The returned object conforms to the ``repository.completelocalrepository``
545 interface.
545 interface.
546
546
547 The repository type is derived by calling a series of factory functions
547 The repository type is derived by calling a series of factory functions
548 for each aspect/interface of the final repository. These are defined by
548 for each aspect/interface of the final repository. These are defined by
549 ``REPO_INTERFACES``.
549 ``REPO_INTERFACES``.
550
550
551 Each factory function is called to produce a type implementing a specific
551 Each factory function is called to produce a type implementing a specific
552 interface. The cumulative list of returned types will be combined into a
552 interface. The cumulative list of returned types will be combined into a
553 new type and that type will be instantiated to represent the local
553 new type and that type will be instantiated to represent the local
554 repository.
554 repository.
555
555
556 The factory functions each receive various state that may be consulted
556 The factory functions each receive various state that may be consulted
557 as part of deriving a type.
557 as part of deriving a type.
558
558
559 Extensions should wrap these factory functions to customize repository type
559 Extensions should wrap these factory functions to customize repository type
560 creation. Note that an extension's wrapped function may be called even if
560 creation. Note that an extension's wrapped function may be called even if
561 that extension is not loaded for the repo being constructed. Extensions
561 that extension is not loaded for the repo being constructed. Extensions
562 should check if their ``__name__`` appears in the
562 should check if their ``__name__`` appears in the
563 ``extensionmodulenames`` set passed to the factory function and no-op if
563 ``extensionmodulenames`` set passed to the factory function and no-op if
564 not.
564 not.
565 """
565 """
566 ui = baseui.copy()
566 ui = baseui.copy()
567 # Prevent copying repo configuration.
567 # Prevent copying repo configuration.
568 ui.copy = baseui.copy
568 ui.copy = baseui.copy
569
569
570 # Working directory VFS rooted at repository root.
570 # Working directory VFS rooted at repository root.
571 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
571 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
572
572
573 # Main VFS for .hg/ directory.
573 # Main VFS for .hg/ directory.
574 hgpath = wdirvfs.join(b'.hg')
574 hgpath = wdirvfs.join(b'.hg')
575 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
575 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
576 # Whether this repository is shared one or not
576 # Whether this repository is shared one or not
577 shared = False
577 shared = False
578 # If this repository is shared, vfs pointing to shared repo
578 # If this repository is shared, vfs pointing to shared repo
579 sharedvfs = None
579 sharedvfs = None
580
580
581 # The .hg/ path should exist and should be a directory. All other
581 # The .hg/ path should exist and should be a directory. All other
582 # cases are errors.
582 # cases are errors.
583 if not hgvfs.isdir():
583 if not hgvfs.isdir():
584 try:
584 try:
585 hgvfs.stat()
585 hgvfs.stat()
586 except OSError as e:
586 except OSError as e:
587 if e.errno != errno.ENOENT:
587 if e.errno != errno.ENOENT:
588 raise
588 raise
589 except ValueError as e:
589 except ValueError as e:
590 # Can be raised on Python 3.8 when path is invalid.
590 # Can be raised on Python 3.8 when path is invalid.
591 raise error.Abort(
591 raise error.Abort(
592 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
592 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
593 )
593 )
594
594
595 raise error.RepoError(_(b'repository %s not found') % path)
595 raise error.RepoError(_(b'repository %s not found') % path)
596
596
597 requirements = _readrequires(hgvfs, True)
597 requirements = _readrequires(hgvfs, True)
598 shared = (
598 shared = (
599 requirementsmod.SHARED_REQUIREMENT in requirements
599 requirementsmod.SHARED_REQUIREMENT in requirements
600 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
600 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
601 )
601 )
602 storevfs = None
602 storevfs = None
603 if shared:
603 if shared:
604 # This is a shared repo
604 # This is a shared repo
605 sharedvfs = _getsharedvfs(hgvfs, requirements)
605 sharedvfs = _getsharedvfs(hgvfs, requirements)
606 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
606 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
607 else:
607 else:
608 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
608 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
609
609
610 # if .hg/requires contains the sharesafe requirement, it means
610 # if .hg/requires contains the sharesafe requirement, it means
611 # there exists a `.hg/store/requires` too and we should read it
611 # there exists a `.hg/store/requires` too and we should read it
612 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
612 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
613 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
613 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
614 # is not present, refer checkrequirementscompat() for that
614 # is not present, refer checkrequirementscompat() for that
615 #
615 #
616 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
616 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
617 # repository was shared the old way. We check the share source .hg/requires
617 # repository was shared the old way. We check the share source .hg/requires
618 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
618 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
619 # to be reshared
619 # to be reshared
620 hint = _(b"see `hg help config.format.use-share-safe` for more information")
620 hint = _(b"see `hg help config.format.use-share-safe` for more information")
621 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
621 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
622
622
623 if (
623 if (
624 shared
624 shared
625 and requirementsmod.SHARESAFE_REQUIREMENT
625 and requirementsmod.SHARESAFE_REQUIREMENT
626 not in _readrequires(sharedvfs, True)
626 not in _readrequires(sharedvfs, True)
627 ):
627 ):
628 mismatch_warn = ui.configbool(
628 mismatch_warn = ui.configbool(
629 b'share', b'safe-mismatch.source-not-safe.warn'
629 b'share', b'safe-mismatch.source-not-safe.warn'
630 )
630 )
631 mismatch_config = ui.config(
631 mismatch_config = ui.config(
632 b'share', b'safe-mismatch.source-not-safe'
632 b'share', b'safe-mismatch.source-not-safe'
633 )
633 )
634 if mismatch_config in (
634 if mismatch_config in (
635 b'downgrade-allow',
635 b'downgrade-allow',
636 b'allow',
636 b'allow',
637 b'downgrade-abort',
637 b'downgrade-abort',
638 ):
638 ):
639 # prevent cyclic import localrepo -> upgrade -> localrepo
639 # prevent cyclic import localrepo -> upgrade -> localrepo
640 from . import upgrade
640 from . import upgrade
641
641
642 upgrade.downgrade_share_to_non_safe(
642 upgrade.downgrade_share_to_non_safe(
643 ui,
643 ui,
644 hgvfs,
644 hgvfs,
645 sharedvfs,
645 sharedvfs,
646 requirements,
646 requirements,
647 mismatch_config,
647 mismatch_config,
648 mismatch_warn,
648 mismatch_warn,
649 )
649 )
650 elif mismatch_config == b'abort':
650 elif mismatch_config == b'abort':
651 raise error.Abort(
651 raise error.Abort(
652 _(b"share source does not support share-safe requirement"),
652 _(b"share source does not support share-safe requirement"),
653 hint=hint,
653 hint=hint,
654 )
654 )
655 else:
655 else:
656 raise error.Abort(
656 raise error.Abort(
657 _(
657 _(
658 b"share-safe mismatch with source.\nUnrecognized"
658 b"share-safe mismatch with source.\nUnrecognized"
659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
659 b" value '%s' of `share.safe-mismatch.source-not-safe`"
660 b" set."
660 b" set."
661 )
661 )
662 % mismatch_config,
662 % mismatch_config,
663 hint=hint,
663 hint=hint,
664 )
664 )
665 else:
665 else:
666 requirements |= _readrequires(storevfs, False)
666 requirements |= _readrequires(storevfs, False)
667 elif shared:
667 elif shared:
668 sourcerequires = _readrequires(sharedvfs, False)
668 sourcerequires = _readrequires(sharedvfs, False)
669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
669 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
670 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
671 mismatch_warn = ui.configbool(
671 mismatch_warn = ui.configbool(
672 b'share', b'safe-mismatch.source-safe.warn'
672 b'share', b'safe-mismatch.source-safe.warn'
673 )
673 )
674 if mismatch_config in (
674 if mismatch_config in (
675 b'upgrade-allow',
675 b'upgrade-allow',
676 b'allow',
676 b'allow',
677 b'upgrade-abort',
677 b'upgrade-abort',
678 ):
678 ):
679 # prevent cyclic import localrepo -> upgrade -> localrepo
679 # prevent cyclic import localrepo -> upgrade -> localrepo
680 from . import upgrade
680 from . import upgrade
681
681
682 upgrade.upgrade_share_to_safe(
682 upgrade.upgrade_share_to_safe(
683 ui,
683 ui,
684 hgvfs,
684 hgvfs,
685 storevfs,
685 storevfs,
686 requirements,
686 requirements,
687 mismatch_config,
687 mismatch_config,
688 mismatch_warn,
688 mismatch_warn,
689 )
689 )
690 elif mismatch_config == b'abort':
690 elif mismatch_config == b'abort':
691 raise error.Abort(
691 raise error.Abort(
692 _(
692 _(
693 b'version mismatch: source uses share-safe'
693 b'version mismatch: source uses share-safe'
694 b' functionality while the current share does not'
694 b' functionality while the current share does not'
695 ),
695 ),
696 hint=hint,
696 hint=hint,
697 )
697 )
698 else:
698 else:
699 raise error.Abort(
699 raise error.Abort(
700 _(
700 _(
701 b"share-safe mismatch with source.\nUnrecognized"
701 b"share-safe mismatch with source.\nUnrecognized"
702 b" value '%s' of `share.safe-mismatch.source-safe` set."
702 b" value '%s' of `share.safe-mismatch.source-safe` set."
703 )
703 )
704 % mismatch_config,
704 % mismatch_config,
705 hint=hint,
705 hint=hint,
706 )
706 )
707
707
708 # The .hg/hgrc file may load extensions or contain config options
708 # The .hg/hgrc file may load extensions or contain config options
709 # that influence repository construction. Attempt to load it and
709 # that influence repository construction. Attempt to load it and
710 # process any new extensions that it may have pulled in.
710 # process any new extensions that it may have pulled in.
711 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
711 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
712 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
712 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
713 extensions.loadall(ui)
713 extensions.loadall(ui)
714 extensions.populateui(ui)
714 extensions.populateui(ui)
715
715
716 # Set of module names of extensions loaded for this repository.
716 # Set of module names of extensions loaded for this repository.
717 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
717 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
718
718
719 supportedrequirements = gathersupportedrequirements(ui)
719 supportedrequirements = gathersupportedrequirements(ui)
720
720
721 # We first validate the requirements are known.
721 # We first validate the requirements are known.
722 ensurerequirementsrecognized(requirements, supportedrequirements)
722 ensurerequirementsrecognized(requirements, supportedrequirements)
723
723
724 # Then we validate that the known set is reasonable to use together.
724 # Then we validate that the known set is reasonable to use together.
725 ensurerequirementscompatible(ui, requirements)
725 ensurerequirementscompatible(ui, requirements)
726
726
727 # TODO there are unhandled edge cases related to opening repositories with
727 # TODO there are unhandled edge cases related to opening repositories with
728 # shared storage. If storage is shared, we should also test for requirements
728 # shared storage. If storage is shared, we should also test for requirements
729 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
729 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
730 # that repo, as that repo may load extensions needed to open it. This is a
730 # that repo, as that repo may load extensions needed to open it. This is a
731 # bit complicated because we don't want the other hgrc to overwrite settings
731 # bit complicated because we don't want the other hgrc to overwrite settings
732 # in this hgrc.
732 # in this hgrc.
733 #
733 #
734 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
734 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
735 # file when sharing repos. But if a requirement is added after the share is
735 # file when sharing repos. But if a requirement is added after the share is
736 # performed, thereby introducing a new requirement for the opener, we may
736 # performed, thereby introducing a new requirement for the opener, we may
737 # will not see that and could encounter a run-time error interacting with
737 # will not see that and could encounter a run-time error interacting with
738 # that shared store since it has an unknown-to-us requirement.
738 # that shared store since it has an unknown-to-us requirement.
739
739
740 # At this point, we know we should be capable of opening the repository.
740 # At this point, we know we should be capable of opening the repository.
741 # Now get on with doing that.
741 # Now get on with doing that.
742
742
743 features = set()
743 features = set()
744
744
745 # The "store" part of the repository holds versioned data. How it is
745 # The "store" part of the repository holds versioned data. How it is
746 # accessed is determined by various requirements. If `shared` or
746 # accessed is determined by various requirements. If `shared` or
747 # `relshared` requirements are present, this indicates current repository
747 # `relshared` requirements are present, this indicates current repository
748 # is a share and store exists in path mentioned in `.hg/sharedpath`
748 # is a share and store exists in path mentioned in `.hg/sharedpath`
749 if shared:
749 if shared:
750 storebasepath = sharedvfs.base
750 storebasepath = sharedvfs.base
751 cachepath = sharedvfs.join(b'cache')
751 cachepath = sharedvfs.join(b'cache')
752 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
752 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
753 else:
753 else:
754 storebasepath = hgvfs.base
754 storebasepath = hgvfs.base
755 cachepath = hgvfs.join(b'cache')
755 cachepath = hgvfs.join(b'cache')
756 wcachepath = hgvfs.join(b'wcache')
756 wcachepath = hgvfs.join(b'wcache')
757
757
758 # The store has changed over time and the exact layout is dictated by
758 # The store has changed over time and the exact layout is dictated by
759 # requirements. The store interface abstracts differences across all
759 # requirements. The store interface abstracts differences across all
760 # of them.
760 # of them.
761 store = makestore(
761 store = makestore(
762 requirements,
762 requirements,
763 storebasepath,
763 storebasepath,
764 lambda base: vfsmod.vfs(base, cacheaudited=True),
764 lambda base: vfsmod.vfs(base, cacheaudited=True),
765 )
765 )
766 hgvfs.createmode = store.createmode
766 hgvfs.createmode = store.createmode
767
767
768 storevfs = store.vfs
768 storevfs = store.vfs
769 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
769 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
770
770
771 if (
771 if (
772 requirementsmod.REVLOGV2_REQUIREMENT in requirements
772 requirementsmod.REVLOGV2_REQUIREMENT in requirements
773 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
773 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
774 ):
774 ):
775 features.add(repository.REPO_FEATURE_SIDE_DATA)
775 features.add(repository.REPO_FEATURE_SIDE_DATA)
776 # the revlogv2 docket introduced race condition that we need to fix
776 # the revlogv2 docket introduced race condition that we need to fix
777 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
777 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
778
778
779 # The cache vfs is used to manage cache files.
779 # The cache vfs is used to manage cache files.
780 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
780 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
781 cachevfs.createmode = store.createmode
781 cachevfs.createmode = store.createmode
782 # The cache vfs is used to manage cache files related to the working copy
782 # The cache vfs is used to manage cache files related to the working copy
783 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
783 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
784 wcachevfs.createmode = store.createmode
784 wcachevfs.createmode = store.createmode
785
785
786 # Now resolve the type for the repository object. We do this by repeatedly
786 # Now resolve the type for the repository object. We do this by repeatedly
787 # calling a factory function to produces types for specific aspects of the
787 # calling a factory function to produces types for specific aspects of the
788 # repo's operation. The aggregate returned types are used as base classes
788 # repo's operation. The aggregate returned types are used as base classes
789 # for a dynamically-derived type, which will represent our new repository.
789 # for a dynamically-derived type, which will represent our new repository.
790
790
791 bases = []
791 bases = []
792 extrastate = {}
792 extrastate = {}
793
793
794 for iface, fn in REPO_INTERFACES:
794 for iface, fn in REPO_INTERFACES:
795 # We pass all potentially useful state to give extensions tons of
795 # We pass all potentially useful state to give extensions tons of
796 # flexibility.
796 # flexibility.
797 typ = fn()(
797 typ = fn()(
798 ui=ui,
798 ui=ui,
799 intents=intents,
799 intents=intents,
800 requirements=requirements,
800 requirements=requirements,
801 features=features,
801 features=features,
802 wdirvfs=wdirvfs,
802 wdirvfs=wdirvfs,
803 hgvfs=hgvfs,
803 hgvfs=hgvfs,
804 store=store,
804 store=store,
805 storevfs=storevfs,
805 storevfs=storevfs,
806 storeoptions=storevfs.options,
806 storeoptions=storevfs.options,
807 cachevfs=cachevfs,
807 cachevfs=cachevfs,
808 wcachevfs=wcachevfs,
808 wcachevfs=wcachevfs,
809 extensionmodulenames=extensionmodulenames,
809 extensionmodulenames=extensionmodulenames,
810 extrastate=extrastate,
810 extrastate=extrastate,
811 baseclasses=bases,
811 baseclasses=bases,
812 )
812 )
813
813
814 if not isinstance(typ, type):
814 if not isinstance(typ, type):
815 raise error.ProgrammingError(
815 raise error.ProgrammingError(
816 b'unable to construct type for %s' % iface
816 b'unable to construct type for %s' % iface
817 )
817 )
818
818
819 bases.append(typ)
819 bases.append(typ)
820
820
821 # type() allows you to use characters in type names that wouldn't be
821 # type() allows you to use characters in type names that wouldn't be
822 # recognized as Python symbols in source code. We abuse that to add
822 # recognized as Python symbols in source code. We abuse that to add
823 # rich information about our constructed repo.
823 # rich information about our constructed repo.
824 name = pycompat.sysstr(
824 name = pycompat.sysstr(
825 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
825 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
826 )
826 )
827
827
828 cls = type(name, tuple(bases), {})
828 cls = type(name, tuple(bases), {})
829
829
830 return cls(
830 return cls(
831 baseui=baseui,
831 baseui=baseui,
832 ui=ui,
832 ui=ui,
833 origroot=path,
833 origroot=path,
834 wdirvfs=wdirvfs,
834 wdirvfs=wdirvfs,
835 hgvfs=hgvfs,
835 hgvfs=hgvfs,
836 requirements=requirements,
836 requirements=requirements,
837 supportedrequirements=supportedrequirements,
837 supportedrequirements=supportedrequirements,
838 sharedpath=storebasepath,
838 sharedpath=storebasepath,
839 store=store,
839 store=store,
840 cachevfs=cachevfs,
840 cachevfs=cachevfs,
841 wcachevfs=wcachevfs,
841 wcachevfs=wcachevfs,
842 features=features,
842 features=features,
843 intents=intents,
843 intents=intents,
844 )
844 )
845
845
846
846
847 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
847 def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
848 """Load hgrc files/content into a ui instance.
848 """Load hgrc files/content into a ui instance.
849
849
850 This is called during repository opening to load any additional
850 This is called during repository opening to load any additional
851 config files or settings relevant to the current repository.
851 config files or settings relevant to the current repository.
852
852
853 Returns a bool indicating whether any additional configs were loaded.
853 Returns a bool indicating whether any additional configs were loaded.
854
854
855 Extensions should monkeypatch this function to modify how per-repo
855 Extensions should monkeypatch this function to modify how per-repo
856 configs are loaded. For example, an extension may wish to pull in
856 configs are loaded. For example, an extension may wish to pull in
857 configs from alternate files or sources.
857 configs from alternate files or sources.
858
858
859 sharedvfs is vfs object pointing to source repo if the current one is a
859 sharedvfs is vfs object pointing to source repo if the current one is a
860 shared one
860 shared one
861 """
861 """
862 if not rcutil.use_repo_hgrc():
862 if not rcutil.use_repo_hgrc():
863 return False
863 return False
864
864
865 ret = False
865 ret = False
866 # first load config from shared source if we has to
866 # first load config from shared source if we has to
867 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
867 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
868 try:
868 try:
869 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
869 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
870 ret = True
870 ret = True
871 except IOError:
871 except IOError:
872 pass
872 pass
873
873
874 try:
874 try:
875 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
875 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
876 ret = True
876 ret = True
877 except IOError:
877 except IOError:
878 pass
878 pass
879
879
880 try:
880 try:
881 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
881 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
882 ret = True
882 ret = True
883 except IOError:
883 except IOError:
884 pass
884 pass
885
885
886 return ret
886 return ret
887
887
888
888
889 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
889 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
890 """Perform additional actions after .hg/hgrc is loaded.
890 """Perform additional actions after .hg/hgrc is loaded.
891
891
892 This function is called during repository loading immediately after
892 This function is called during repository loading immediately after
893 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
893 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
894
894
895 The function can be used to validate configs, automatically add
895 The function can be used to validate configs, automatically add
896 options (including extensions) based on requirements, etc.
896 options (including extensions) based on requirements, etc.
897 """
897 """
898
898
899 # Map of requirements to list of extensions to load automatically when
899 # Map of requirements to list of extensions to load automatically when
900 # requirement is present.
900 # requirement is present.
901 autoextensions = {
901 autoextensions = {
902 b'git': [b'git'],
902 b'git': [b'git'],
903 b'largefiles': [b'largefiles'],
903 b'largefiles': [b'largefiles'],
904 b'lfs': [b'lfs'],
904 b'lfs': [b'lfs'],
905 }
905 }
906
906
907 for requirement, names in sorted(autoextensions.items()):
907 for requirement, names in sorted(autoextensions.items()):
908 if requirement not in requirements:
908 if requirement not in requirements:
909 continue
909 continue
910
910
911 for name in names:
911 for name in names:
912 if not ui.hasconfig(b'extensions', name):
912 if not ui.hasconfig(b'extensions', name):
913 ui.setconfig(b'extensions', name, b'', source=b'autoload')
913 ui.setconfig(b'extensions', name, b'', source=b'autoload')
914
914
915
915
916 def gathersupportedrequirements(ui):
916 def gathersupportedrequirements(ui):
917 """Determine the complete set of recognized requirements."""
917 """Determine the complete set of recognized requirements."""
918 # Start with all requirements supported by this file.
918 # Start with all requirements supported by this file.
919 supported = set(localrepository._basesupported)
919 supported = set(localrepository._basesupported)
920
920
921 # Execute ``featuresetupfuncs`` entries if they belong to an extension
921 # Execute ``featuresetupfuncs`` entries if they belong to an extension
922 # relevant to this ui instance.
922 # relevant to this ui instance.
923 modules = {m.__name__ for n, m in extensions.extensions(ui)}
923 modules = {m.__name__ for n, m in extensions.extensions(ui)}
924
924
925 for fn in featuresetupfuncs:
925 for fn in featuresetupfuncs:
926 if fn.__module__ in modules:
926 if fn.__module__ in modules:
927 fn(ui, supported)
927 fn(ui, supported)
928
928
929 # Add derived requirements from registered compression engines.
929 # Add derived requirements from registered compression engines.
930 for name in util.compengines:
930 for name in util.compengines:
931 engine = util.compengines[name]
931 engine = util.compengines[name]
932 if engine.available() and engine.revlogheader():
932 if engine.available() and engine.revlogheader():
933 supported.add(b'exp-compression-%s' % name)
933 supported.add(b'exp-compression-%s' % name)
934 if engine.name() == b'zstd':
934 if engine.name() == b'zstd':
935 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
935 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
936
936
937 return supported
937 return supported
938
938
939
939
940 def ensurerequirementsrecognized(requirements, supported):
940 def ensurerequirementsrecognized(requirements, supported):
941 """Validate that a set of local requirements is recognized.
941 """Validate that a set of local requirements is recognized.
942
942
943 Receives a set of requirements. Raises an ``error.RepoError`` if there
943 Receives a set of requirements. Raises an ``error.RepoError`` if there
944 exists any requirement in that set that currently loaded code doesn't
944 exists any requirement in that set that currently loaded code doesn't
945 recognize.
945 recognize.
946
946
947 Returns a set of supported requirements.
947 Returns a set of supported requirements.
948 """
948 """
949 missing = set()
949 missing = set()
950
950
951 for requirement in requirements:
951 for requirement in requirements:
952 if requirement in supported:
952 if requirement in supported:
953 continue
953 continue
954
954
955 if not requirement or not requirement[0:1].isalnum():
955 if not requirement or not requirement[0:1].isalnum():
956 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
956 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
957
957
958 missing.add(requirement)
958 missing.add(requirement)
959
959
960 if missing:
960 if missing:
961 raise error.RequirementError(
961 raise error.RequirementError(
962 _(b'repository requires features unknown to this Mercurial: %s')
962 _(b'repository requires features unknown to this Mercurial: %s')
963 % b' '.join(sorted(missing)),
963 % b' '.join(sorted(missing)),
964 hint=_(
964 hint=_(
965 b'see https://mercurial-scm.org/wiki/MissingRequirement '
965 b'see https://mercurial-scm.org/wiki/MissingRequirement '
966 b'for more information'
966 b'for more information'
967 ),
967 ),
968 )
968 )
969
969
970
970
971 def ensurerequirementscompatible(ui, requirements):
971 def ensurerequirementscompatible(ui, requirements):
972 """Validates that a set of recognized requirements is mutually compatible.
972 """Validates that a set of recognized requirements is mutually compatible.
973
973
974 Some requirements may not be compatible with others or require
974 Some requirements may not be compatible with others or require
975 config options that aren't enabled. This function is called during
975 config options that aren't enabled. This function is called during
976 repository opening to ensure that the set of requirements needed
976 repository opening to ensure that the set of requirements needed
977 to open a repository is sane and compatible with config options.
977 to open a repository is sane and compatible with config options.
978
978
979 Extensions can monkeypatch this function to perform additional
979 Extensions can monkeypatch this function to perform additional
980 checking.
980 checking.
981
981
982 ``error.RepoError`` should be raised on failure.
982 ``error.RepoError`` should be raised on failure.
983 """
983 """
984 if (
984 if (
985 requirementsmod.SPARSE_REQUIREMENT in requirements
985 requirementsmod.SPARSE_REQUIREMENT in requirements
986 and not sparse.enabled
986 and not sparse.enabled
987 ):
987 ):
988 raise error.RepoError(
988 raise error.RepoError(
989 _(
989 _(
990 b'repository is using sparse feature but '
990 b'repository is using sparse feature but '
991 b'sparse is not enabled; enable the '
991 b'sparse is not enabled; enable the '
992 b'"sparse" extensions to access'
992 b'"sparse" extensions to access'
993 )
993 )
994 )
994 )
995
995
996
996
997 def makestore(requirements, path, vfstype):
997 def makestore(requirements, path, vfstype):
998 """Construct a storage object for a repository."""
998 """Construct a storage object for a repository."""
999 if requirementsmod.STORE_REQUIREMENT in requirements:
999 if requirementsmod.STORE_REQUIREMENT in requirements:
1000 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1000 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1001 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1001 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1002 return storemod.fncachestore(path, vfstype, dotencode)
1002 return storemod.fncachestore(path, vfstype, dotencode)
1003
1003
1004 return storemod.encodedstore(path, vfstype)
1004 return storemod.encodedstore(path, vfstype)
1005
1005
1006 return storemod.basicstore(path, vfstype)
1006 return storemod.basicstore(path, vfstype)
1007
1007
1008
1008
1009 def resolvestorevfsoptions(ui, requirements, features):
1009 def resolvestorevfsoptions(ui, requirements, features):
1010 """Resolve the options to pass to the store vfs opener.
1010 """Resolve the options to pass to the store vfs opener.
1011
1011
1012 The returned dict is used to influence behavior of the storage layer.
1012 The returned dict is used to influence behavior of the storage layer.
1013 """
1013 """
1014 options = {}
1014 options = {}
1015
1015
1016 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1016 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1017 options[b'treemanifest'] = True
1017 options[b'treemanifest'] = True
1018
1018
1019 # experimental config: format.manifestcachesize
1019 # experimental config: format.manifestcachesize
1020 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1020 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1021 if manifestcachesize is not None:
1021 if manifestcachesize is not None:
1022 options[b'manifestcachesize'] = manifestcachesize
1022 options[b'manifestcachesize'] = manifestcachesize
1023
1023
1024 # In the absence of another requirement superseding a revlog-related
1024 # In the absence of another requirement superseding a revlog-related
1025 # requirement, we have to assume the repo is using revlog version 0.
1025 # requirement, we have to assume the repo is using revlog version 0.
1026 # This revlog format is super old and we don't bother trying to parse
1026 # This revlog format is super old and we don't bother trying to parse
1027 # opener options for it because those options wouldn't do anything
1027 # opener options for it because those options wouldn't do anything
1028 # meaningful on such old repos.
1028 # meaningful on such old repos.
1029 if (
1029 if (
1030 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1030 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1031 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1031 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1032 ):
1032 ):
1033 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1033 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1034 else: # explicitly mark repo as using revlogv0
1034 else: # explicitly mark repo as using revlogv0
1035 options[b'revlogv0'] = True
1035 options[b'revlogv0'] = True
1036
1036
1037 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1037 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1038 options[b'copies-storage'] = b'changeset-sidedata'
1038 options[b'copies-storage'] = b'changeset-sidedata'
1039 else:
1039 else:
1040 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1040 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1041 copiesextramode = (b'changeset-only', b'compatibility')
1041 copiesextramode = (b'changeset-only', b'compatibility')
1042 if writecopiesto in copiesextramode:
1042 if writecopiesto in copiesextramode:
1043 options[b'copies-storage'] = b'extra'
1043 options[b'copies-storage'] = b'extra'
1044
1044
1045 return options
1045 return options
1046
1046
1047
1047
1048 def resolverevlogstorevfsoptions(ui, requirements, features):
1048 def resolverevlogstorevfsoptions(ui, requirements, features):
1049 """Resolve opener options specific to revlogs."""
1049 """Resolve opener options specific to revlogs."""
1050
1050
1051 options = {}
1051 options = {}
1052 options[b'flagprocessors'] = {}
1052 options[b'flagprocessors'] = {}
1053
1053
1054 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1054 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1055 options[b'revlogv1'] = True
1055 options[b'revlogv1'] = True
1056 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1056 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1057 options[b'revlogv2'] = True
1057 options[b'revlogv2'] = True
1058 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1058 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1059 options[b'changelogv2'] = True
1059 options[b'changelogv2'] = True
1060
1060
1061 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1061 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1062 options[b'generaldelta'] = True
1062 options[b'generaldelta'] = True
1063
1063
1064 # experimental config: format.chunkcachesize
1064 # experimental config: format.chunkcachesize
1065 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1065 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1066 if chunkcachesize is not None:
1066 if chunkcachesize is not None:
1067 options[b'chunkcachesize'] = chunkcachesize
1067 options[b'chunkcachesize'] = chunkcachesize
1068
1068
1069 deltabothparents = ui.configbool(
1069 deltabothparents = ui.configbool(
1070 b'storage', b'revlog.optimize-delta-parent-choice'
1070 b'storage', b'revlog.optimize-delta-parent-choice'
1071 )
1071 )
1072 options[b'deltabothparents'] = deltabothparents
1072 options[b'deltabothparents'] = deltabothparents
1073
1073
1074 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1074 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1075 options[b'issue6528.fix-incoming'] = issue6528
1075 options[b'issue6528.fix-incoming'] = issue6528
1076
1076
1077 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1077 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1078 lazydeltabase = False
1078 lazydeltabase = False
1079 if lazydelta:
1079 if lazydelta:
1080 lazydeltabase = ui.configbool(
1080 lazydeltabase = ui.configbool(
1081 b'storage', b'revlog.reuse-external-delta-parent'
1081 b'storage', b'revlog.reuse-external-delta-parent'
1082 )
1082 )
1083 if lazydeltabase is None:
1083 if lazydeltabase is None:
1084 lazydeltabase = not scmutil.gddeltaconfig(ui)
1084 lazydeltabase = not scmutil.gddeltaconfig(ui)
1085 options[b'lazydelta'] = lazydelta
1085 options[b'lazydelta'] = lazydelta
1086 options[b'lazydeltabase'] = lazydeltabase
1086 options[b'lazydeltabase'] = lazydeltabase
1087
1087
1088 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1088 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1089 if 0 <= chainspan:
1089 if 0 <= chainspan:
1090 options[b'maxdeltachainspan'] = chainspan
1090 options[b'maxdeltachainspan'] = chainspan
1091
1091
1092 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1092 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1093 if mmapindexthreshold is not None:
1093 if mmapindexthreshold is not None:
1094 options[b'mmapindexthreshold'] = mmapindexthreshold
1094 options[b'mmapindexthreshold'] = mmapindexthreshold
1095
1095
1096 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1096 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1097 srdensitythres = float(
1097 srdensitythres = float(
1098 ui.config(b'experimental', b'sparse-read.density-threshold')
1098 ui.config(b'experimental', b'sparse-read.density-threshold')
1099 )
1099 )
1100 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1100 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1101 options[b'with-sparse-read'] = withsparseread
1101 options[b'with-sparse-read'] = withsparseread
1102 options[b'sparse-read-density-threshold'] = srdensitythres
1102 options[b'sparse-read-density-threshold'] = srdensitythres
1103 options[b'sparse-read-min-gap-size'] = srmingapsize
1103 options[b'sparse-read-min-gap-size'] = srmingapsize
1104
1104
1105 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1105 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1106 options[b'sparse-revlog'] = sparserevlog
1106 options[b'sparse-revlog'] = sparserevlog
1107 if sparserevlog:
1107 if sparserevlog:
1108 options[b'generaldelta'] = True
1108 options[b'generaldelta'] = True
1109
1109
1110 maxchainlen = None
1110 maxchainlen = None
1111 if sparserevlog:
1111 if sparserevlog:
1112 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1112 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1113 # experimental config: format.maxchainlen
1113 # experimental config: format.maxchainlen
1114 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1114 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1115 if maxchainlen is not None:
1115 if maxchainlen is not None:
1116 options[b'maxchainlen'] = maxchainlen
1116 options[b'maxchainlen'] = maxchainlen
1117
1117
1118 for r in requirements:
1118 for r in requirements:
1119 # we allow multiple compression engine requirement to co-exist because
1119 # we allow multiple compression engine requirement to co-exist because
1120 # strickly speaking, revlog seems to support mixed compression style.
1120 # strickly speaking, revlog seems to support mixed compression style.
1121 #
1121 #
1122 # The compression used for new entries will be "the last one"
1122 # The compression used for new entries will be "the last one"
1123 prefix = r.startswith
1123 prefix = r.startswith
1124 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1124 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1125 options[b'compengine'] = r.split(b'-', 2)[2]
1125 options[b'compengine'] = r.split(b'-', 2)[2]
1126
1126
1127 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1127 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1128 if options[b'zlib.level'] is not None:
1128 if options[b'zlib.level'] is not None:
1129 if not (0 <= options[b'zlib.level'] <= 9):
1129 if not (0 <= options[b'zlib.level'] <= 9):
1130 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1130 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1131 raise error.Abort(msg % options[b'zlib.level'])
1131 raise error.Abort(msg % options[b'zlib.level'])
1132 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1132 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1133 if options[b'zstd.level'] is not None:
1133 if options[b'zstd.level'] is not None:
1134 if not (0 <= options[b'zstd.level'] <= 22):
1134 if not (0 <= options[b'zstd.level'] <= 22):
1135 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1135 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1136 raise error.Abort(msg % options[b'zstd.level'])
1136 raise error.Abort(msg % options[b'zstd.level'])
1137
1137
1138 if requirementsmod.NARROW_REQUIREMENT in requirements:
1138 if requirementsmod.NARROW_REQUIREMENT in requirements:
1139 options[b'enableellipsis'] = True
1139 options[b'enableellipsis'] = True
1140
1140
1141 if ui.configbool(b'experimental', b'rust.index'):
1141 if ui.configbool(b'experimental', b'rust.index'):
1142 options[b'rust.index'] = True
1142 options[b'rust.index'] = True
1143 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1143 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1144 slow_path = ui.config(
1144 slow_path = ui.config(
1145 b'storage', b'revlog.persistent-nodemap.slow-path'
1145 b'storage', b'revlog.persistent-nodemap.slow-path'
1146 )
1146 )
1147 if slow_path not in (b'allow', b'warn', b'abort'):
1147 if slow_path not in (b'allow', b'warn', b'abort'):
1148 default = ui.config_default(
1148 default = ui.config_default(
1149 b'storage', b'revlog.persistent-nodemap.slow-path'
1149 b'storage', b'revlog.persistent-nodemap.slow-path'
1150 )
1150 )
1151 msg = _(
1151 msg = _(
1152 b'unknown value for config '
1152 b'unknown value for config '
1153 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1153 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1154 )
1154 )
1155 ui.warn(msg % slow_path)
1155 ui.warn(msg % slow_path)
1156 if not ui.quiet:
1156 if not ui.quiet:
1157 ui.warn(_(b'falling back to default value: %s\n') % default)
1157 ui.warn(_(b'falling back to default value: %s\n') % default)
1158 slow_path = default
1158 slow_path = default
1159
1159
1160 msg = _(
1160 msg = _(
1161 b"accessing `persistent-nodemap` repository without associated "
1161 b"accessing `persistent-nodemap` repository without associated "
1162 b"fast implementation."
1162 b"fast implementation."
1163 )
1163 )
1164 hint = _(
1164 hint = _(
1165 b"check `hg help config.format.use-persistent-nodemap` "
1165 b"check `hg help config.format.use-persistent-nodemap` "
1166 b"for details"
1166 b"for details"
1167 )
1167 )
1168 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1168 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1169 if slow_path == b'warn':
1169 if slow_path == b'warn':
1170 msg = b"warning: " + msg + b'\n'
1170 msg = b"warning: " + msg + b'\n'
1171 ui.warn(msg)
1171 ui.warn(msg)
1172 if not ui.quiet:
1172 if not ui.quiet:
1173 hint = b'(' + hint + b')\n'
1173 hint = b'(' + hint + b')\n'
1174 ui.warn(hint)
1174 ui.warn(hint)
1175 if slow_path == b'abort':
1175 if slow_path == b'abort':
1176 raise error.Abort(msg, hint=hint)
1176 raise error.Abort(msg, hint=hint)
1177 options[b'persistent-nodemap'] = True
1177 options[b'persistent-nodemap'] = True
1178 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1178 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1179 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1179 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1180 if slow_path not in (b'allow', b'warn', b'abort'):
1180 if slow_path not in (b'allow', b'warn', b'abort'):
1181 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1181 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1182 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1182 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1183 ui.warn(msg % slow_path)
1183 ui.warn(msg % slow_path)
1184 if not ui.quiet:
1184 if not ui.quiet:
1185 ui.warn(_(b'falling back to default value: %s\n') % default)
1185 ui.warn(_(b'falling back to default value: %s\n') % default)
1186 slow_path = default
1186 slow_path = default
1187
1187
1188 msg = _(
1188 msg = _(
1189 b"accessing `dirstate-v2` repository without associated "
1189 b"accessing `dirstate-v2` repository without associated "
1190 b"fast implementation."
1190 b"fast implementation."
1191 )
1191 )
1192 hint = _(
1192 hint = _(
1193 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1193 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1194 )
1194 )
1195 if not dirstate.HAS_FAST_DIRSTATE_V2:
1195 if not dirstate.HAS_FAST_DIRSTATE_V2:
1196 if slow_path == b'warn':
1196 if slow_path == b'warn':
1197 msg = b"warning: " + msg + b'\n'
1197 msg = b"warning: " + msg + b'\n'
1198 ui.warn(msg)
1198 ui.warn(msg)
1199 if not ui.quiet:
1199 if not ui.quiet:
1200 hint = b'(' + hint + b')\n'
1200 hint = b'(' + hint + b')\n'
1201 ui.warn(hint)
1201 ui.warn(hint)
1202 if slow_path == b'abort':
1202 if slow_path == b'abort':
1203 raise error.Abort(msg, hint=hint)
1203 raise error.Abort(msg, hint=hint)
1204 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1204 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1205 options[b'persistent-nodemap.mmap'] = True
1205 options[b'persistent-nodemap.mmap'] = True
1206 if ui.configbool(b'devel', b'persistent-nodemap'):
1206 if ui.configbool(b'devel', b'persistent-nodemap'):
1207 options[b'devel-force-nodemap'] = True
1207 options[b'devel-force-nodemap'] = True
1208
1208
1209 return options
1209 return options
1210
1210
1211
1211
1212 def makemain(**kwargs):
1212 def makemain(**kwargs):
1213 """Produce a type conforming to ``ilocalrepositorymain``."""
1213 """Produce a type conforming to ``ilocalrepositorymain``."""
1214 return localrepository
1214 return localrepository
1215
1215
1216
1216
1217 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1217 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1218 class revlogfilestorage(object):
1218 class revlogfilestorage(object):
1219 """File storage when using revlogs."""
1219 """File storage when using revlogs."""
1220
1220
1221 def file(self, path):
1221 def file(self, path):
1222 if path.startswith(b'/'):
1222 if path.startswith(b'/'):
1223 path = path[1:]
1223 path = path[1:]
1224
1224
1225 return filelog.filelog(self.svfs, path)
1225 return filelog.filelog(self.svfs, path)
1226
1226
1227
1227
1228 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1228 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1229 class revlognarrowfilestorage(object):
1229 class revlognarrowfilestorage(object):
1230 """File storage when using revlogs and narrow files."""
1230 """File storage when using revlogs and narrow files."""
1231
1231
1232 def file(self, path):
1232 def file(self, path):
1233 if path.startswith(b'/'):
1233 if path.startswith(b'/'):
1234 path = path[1:]
1234 path = path[1:]
1235
1235
1236 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1236 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1237
1237
1238
1238
1239 def makefilestorage(requirements, features, **kwargs):
1239 def makefilestorage(requirements, features, **kwargs):
1240 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1240 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1241 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1241 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1242 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1242 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1243
1243
1244 if requirementsmod.NARROW_REQUIREMENT in requirements:
1244 if requirementsmod.NARROW_REQUIREMENT in requirements:
1245 return revlognarrowfilestorage
1245 return revlognarrowfilestorage
1246 else:
1246 else:
1247 return revlogfilestorage
1247 return revlogfilestorage
1248
1248
1249
1249
1250 # List of repository interfaces and factory functions for them. Each
1250 # List of repository interfaces and factory functions for them. Each
1251 # will be called in order during ``makelocalrepository()`` to iteratively
1251 # will be called in order during ``makelocalrepository()`` to iteratively
1252 # derive the final type for a local repository instance. We capture the
1252 # derive the final type for a local repository instance. We capture the
1253 # function as a lambda so we don't hold a reference and the module-level
1253 # function as a lambda so we don't hold a reference and the module-level
1254 # functions can be wrapped.
1254 # functions can be wrapped.
1255 REPO_INTERFACES = [
1255 REPO_INTERFACES = [
1256 (repository.ilocalrepositorymain, lambda: makemain),
1256 (repository.ilocalrepositorymain, lambda: makemain),
1257 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1257 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1258 ]
1258 ]
1259
1259
1260
1260
1261 @interfaceutil.implementer(repository.ilocalrepositorymain)
1261 @interfaceutil.implementer(repository.ilocalrepositorymain)
1262 class localrepository(object):
1262 class localrepository(object):
1263 """Main class for representing local repositories.
1263 """Main class for representing local repositories.
1264
1264
1265 All local repositories are instances of this class.
1265 All local repositories are instances of this class.
1266
1266
1267 Constructed on its own, instances of this class are not usable as
1267 Constructed on its own, instances of this class are not usable as
1268 repository objects. To obtain a usable repository object, call
1268 repository objects. To obtain a usable repository object, call
1269 ``hg.repository()``, ``localrepo.instance()``, or
1269 ``hg.repository()``, ``localrepo.instance()``, or
1270 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1270 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1271 ``instance()`` adds support for creating new repositories.
1271 ``instance()`` adds support for creating new repositories.
1272 ``hg.repository()`` adds more extension integration, including calling
1272 ``hg.repository()`` adds more extension integration, including calling
1273 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1273 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1274 used.
1274 used.
1275 """
1275 """
1276
1276
1277 _basesupported = {
1277 _basesupported = {
1278 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1278 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1279 requirementsmod.CHANGELOGV2_REQUIREMENT,
1279 requirementsmod.CHANGELOGV2_REQUIREMENT,
1280 requirementsmod.COPIESSDC_REQUIREMENT,
1280 requirementsmod.COPIESSDC_REQUIREMENT,
1281 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1281 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1282 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1282 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1283 requirementsmod.DOTENCODE_REQUIREMENT,
1283 requirementsmod.DOTENCODE_REQUIREMENT,
1284 requirementsmod.FNCACHE_REQUIREMENT,
1284 requirementsmod.FNCACHE_REQUIREMENT,
1285 requirementsmod.GENERALDELTA_REQUIREMENT,
1285 requirementsmod.GENERALDELTA_REQUIREMENT,
1286 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1286 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1287 requirementsmod.NODEMAP_REQUIREMENT,
1287 requirementsmod.NODEMAP_REQUIREMENT,
1288 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1288 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1289 requirementsmod.REVLOGV1_REQUIREMENT,
1289 requirementsmod.REVLOGV1_REQUIREMENT,
1290 requirementsmod.REVLOGV2_REQUIREMENT,
1290 requirementsmod.REVLOGV2_REQUIREMENT,
1291 requirementsmod.SHARED_REQUIREMENT,
1291 requirementsmod.SHARED_REQUIREMENT,
1292 requirementsmod.SHARESAFE_REQUIREMENT,
1292 requirementsmod.SHARESAFE_REQUIREMENT,
1293 requirementsmod.SPARSE_REQUIREMENT,
1293 requirementsmod.SPARSE_REQUIREMENT,
1294 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1294 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1295 requirementsmod.STORE_REQUIREMENT,
1295 requirementsmod.STORE_REQUIREMENT,
1296 requirementsmod.TREEMANIFEST_REQUIREMENT,
1296 requirementsmod.TREEMANIFEST_REQUIREMENT,
1297 }
1297 }
1298
1298
1299 # list of prefix for file which can be written without 'wlock'
1299 # list of prefix for file which can be written without 'wlock'
1300 # Extensions should extend this list when needed
1300 # Extensions should extend this list when needed
1301 _wlockfreeprefix = {
1301 _wlockfreeprefix = {
1302 # We migh consider requiring 'wlock' for the next
1302 # We migh consider requiring 'wlock' for the next
1303 # two, but pretty much all the existing code assume
1303 # two, but pretty much all the existing code assume
1304 # wlock is not needed so we keep them excluded for
1304 # wlock is not needed so we keep them excluded for
1305 # now.
1305 # now.
1306 b'hgrc',
1306 b'hgrc',
1307 b'requires',
1307 b'requires',
1308 # XXX cache is a complicatged business someone
1308 # XXX cache is a complicatged business someone
1309 # should investigate this in depth at some point
1309 # should investigate this in depth at some point
1310 b'cache/',
1310 b'cache/',
1311 # XXX shouldn't be dirstate covered by the wlock?
1311 # XXX shouldn't be dirstate covered by the wlock?
1312 b'dirstate',
1312 b'dirstate',
1313 # XXX bisect was still a bit too messy at the time
1313 # XXX bisect was still a bit too messy at the time
1314 # this changeset was introduced. Someone should fix
1314 # this changeset was introduced. Someone should fix
1315 # the remainig bit and drop this line
1315 # the remainig bit and drop this line
1316 b'bisect.state',
1316 b'bisect.state',
1317 }
1317 }
1318
1318
1319 def __init__(
1319 def __init__(
1320 self,
1320 self,
1321 baseui,
1321 baseui,
1322 ui,
1322 ui,
1323 origroot,
1323 origroot,
1324 wdirvfs,
1324 wdirvfs,
1325 hgvfs,
1325 hgvfs,
1326 requirements,
1326 requirements,
1327 supportedrequirements,
1327 supportedrequirements,
1328 sharedpath,
1328 sharedpath,
1329 store,
1329 store,
1330 cachevfs,
1330 cachevfs,
1331 wcachevfs,
1331 wcachevfs,
1332 features,
1332 features,
1333 intents=None,
1333 intents=None,
1334 ):
1334 ):
1335 """Create a new local repository instance.
1335 """Create a new local repository instance.
1336
1336
1337 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1337 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1338 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1338 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1339 object.
1339 object.
1340
1340
1341 Arguments:
1341 Arguments:
1342
1342
1343 baseui
1343 baseui
1344 ``ui.ui`` instance that ``ui`` argument was based off of.
1344 ``ui.ui`` instance that ``ui`` argument was based off of.
1345
1345
1346 ui
1346 ui
1347 ``ui.ui`` instance for use by the repository.
1347 ``ui.ui`` instance for use by the repository.
1348
1348
1349 origroot
1349 origroot
1350 ``bytes`` path to working directory root of this repository.
1350 ``bytes`` path to working directory root of this repository.
1351
1351
1352 wdirvfs
1352 wdirvfs
1353 ``vfs.vfs`` rooted at the working directory.
1353 ``vfs.vfs`` rooted at the working directory.
1354
1354
1355 hgvfs
1355 hgvfs
1356 ``vfs.vfs`` rooted at .hg/
1356 ``vfs.vfs`` rooted at .hg/
1357
1357
1358 requirements
1358 requirements
1359 ``set`` of bytestrings representing repository opening requirements.
1359 ``set`` of bytestrings representing repository opening requirements.
1360
1360
1361 supportedrequirements
1361 supportedrequirements
1362 ``set`` of bytestrings representing repository requirements that we
1362 ``set`` of bytestrings representing repository requirements that we
1363 know how to open. May be a supetset of ``requirements``.
1363 know how to open. May be a supetset of ``requirements``.
1364
1364
1365 sharedpath
1365 sharedpath
1366 ``bytes`` Defining path to storage base directory. Points to a
1366 ``bytes`` Defining path to storage base directory. Points to a
1367 ``.hg/`` directory somewhere.
1367 ``.hg/`` directory somewhere.
1368
1368
1369 store
1369 store
1370 ``store.basicstore`` (or derived) instance providing access to
1370 ``store.basicstore`` (or derived) instance providing access to
1371 versioned storage.
1371 versioned storage.
1372
1372
1373 cachevfs
1373 cachevfs
1374 ``vfs.vfs`` used for cache files.
1374 ``vfs.vfs`` used for cache files.
1375
1375
1376 wcachevfs
1376 wcachevfs
1377 ``vfs.vfs`` used for cache files related to the working copy.
1377 ``vfs.vfs`` used for cache files related to the working copy.
1378
1378
1379 features
1379 features
1380 ``set`` of bytestrings defining features/capabilities of this
1380 ``set`` of bytestrings defining features/capabilities of this
1381 instance.
1381 instance.
1382
1382
1383 intents
1383 intents
1384 ``set`` of system strings indicating what this repo will be used
1384 ``set`` of system strings indicating what this repo will be used
1385 for.
1385 for.
1386 """
1386 """
1387 self.baseui = baseui
1387 self.baseui = baseui
1388 self.ui = ui
1388 self.ui = ui
1389 self.origroot = origroot
1389 self.origroot = origroot
1390 # vfs rooted at working directory.
1390 # vfs rooted at working directory.
1391 self.wvfs = wdirvfs
1391 self.wvfs = wdirvfs
1392 self.root = wdirvfs.base
1392 self.root = wdirvfs.base
1393 # vfs rooted at .hg/. Used to access most non-store paths.
1393 # vfs rooted at .hg/. Used to access most non-store paths.
1394 self.vfs = hgvfs
1394 self.vfs = hgvfs
1395 self.path = hgvfs.base
1395 self.path = hgvfs.base
1396 self.requirements = requirements
1396 self.requirements = requirements
1397 self.nodeconstants = sha1nodeconstants
1397 self.nodeconstants = sha1nodeconstants
1398 self.nullid = self.nodeconstants.nullid
1398 self.nullid = self.nodeconstants.nullid
1399 self.supported = supportedrequirements
1399 self.supported = supportedrequirements
1400 self.sharedpath = sharedpath
1400 self.sharedpath = sharedpath
1401 self.store = store
1401 self.store = store
1402 self.cachevfs = cachevfs
1402 self.cachevfs = cachevfs
1403 self.wcachevfs = wcachevfs
1403 self.wcachevfs = wcachevfs
1404 self.features = features
1404 self.features = features
1405
1405
1406 self.filtername = None
1406 self.filtername = None
1407
1407
1408 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1408 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1409 b'devel', b'check-locks'
1409 b'devel', b'check-locks'
1410 ):
1410 ):
1411 self.vfs.audit = self._getvfsward(self.vfs.audit)
1411 self.vfs.audit = self._getvfsward(self.vfs.audit)
1412 # A list of callback to shape the phase if no data were found.
1412 # A list of callback to shape the phase if no data were found.
1413 # Callback are in the form: func(repo, roots) --> processed root.
1413 # Callback are in the form: func(repo, roots) --> processed root.
1414 # This list it to be filled by extension during repo setup
1414 # This list it to be filled by extension during repo setup
1415 self._phasedefaults = []
1415 self._phasedefaults = []
1416
1416
1417 color.setup(self.ui)
1417 color.setup(self.ui)
1418
1418
1419 self.spath = self.store.path
1419 self.spath = self.store.path
1420 self.svfs = self.store.vfs
1420 self.svfs = self.store.vfs
1421 self.sjoin = self.store.join
1421 self.sjoin = self.store.join
1422 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1422 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1423 b'devel', b'check-locks'
1423 b'devel', b'check-locks'
1424 ):
1424 ):
1425 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1425 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1426 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1426 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1427 else: # standard vfs
1427 else: # standard vfs
1428 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1428 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1429
1429
1430 self._dirstatevalidatewarned = False
1430 self._dirstatevalidatewarned = False
1431
1431
1432 self._branchcaches = branchmap.BranchMapCache()
1432 self._branchcaches = branchmap.BranchMapCache()
1433 self._revbranchcache = None
1433 self._revbranchcache = None
1434 self._filterpats = {}
1434 self._filterpats = {}
1435 self._datafilters = {}
1435 self._datafilters = {}
1436 self._transref = self._lockref = self._wlockref = None
1436 self._transref = self._lockref = self._wlockref = None
1437
1437
1438 # A cache for various files under .hg/ that tracks file changes,
1438 # A cache for various files under .hg/ that tracks file changes,
1439 # (used by the filecache decorator)
1439 # (used by the filecache decorator)
1440 #
1440 #
1441 # Maps a property name to its util.filecacheentry
1441 # Maps a property name to its util.filecacheentry
1442 self._filecache = {}
1442 self._filecache = {}
1443
1443
1444 # hold sets of revision to be filtered
1444 # hold sets of revision to be filtered
1445 # should be cleared when something might have changed the filter value:
1445 # should be cleared when something might have changed the filter value:
1446 # - new changesets,
1446 # - new changesets,
1447 # - phase change,
1447 # - phase change,
1448 # - new obsolescence marker,
1448 # - new obsolescence marker,
1449 # - working directory parent change,
1449 # - working directory parent change,
1450 # - bookmark changes
1450 # - bookmark changes
1451 self.filteredrevcache = {}
1451 self.filteredrevcache = {}
1452
1452
1453 # post-dirstate-status hooks
1453 # post-dirstate-status hooks
1454 self._postdsstatus = []
1454 self._postdsstatus = []
1455
1455
1456 # generic mapping between names and nodes
1456 # generic mapping between names and nodes
1457 self.names = namespaces.namespaces()
1457 self.names = namespaces.namespaces()
1458
1458
1459 # Key to signature value.
1459 # Key to signature value.
1460 self._sparsesignaturecache = {}
1460 self._sparsesignaturecache = {}
1461 # Signature to cached matcher instance.
1461 # Signature to cached matcher instance.
1462 self._sparsematchercache = {}
1462 self._sparsematchercache = {}
1463
1463
1464 self._extrafilterid = repoview.extrafilter(ui)
1464 self._extrafilterid = repoview.extrafilter(ui)
1465
1465
1466 self.filecopiesmode = None
1466 self.filecopiesmode = None
1467 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1467 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1468 self.filecopiesmode = b'changeset-sidedata'
1468 self.filecopiesmode = b'changeset-sidedata'
1469
1469
1470 self._wanted_sidedata = set()
1470 self._wanted_sidedata = set()
1471 self._sidedata_computers = {}
1471 self._sidedata_computers = {}
1472 sidedatamod.set_sidedata_spec_for_repo(self)
1472 sidedatamod.set_sidedata_spec_for_repo(self)
1473
1473
1474 def _getvfsward(self, origfunc):
1474 def _getvfsward(self, origfunc):
1475 """build a ward for self.vfs"""
1475 """build a ward for self.vfs"""
1476 rref = weakref.ref(self)
1476 rref = weakref.ref(self)
1477
1477
1478 def checkvfs(path, mode=None):
1478 def checkvfs(path, mode=None):
1479 ret = origfunc(path, mode=mode)
1479 ret = origfunc(path, mode=mode)
1480 repo = rref()
1480 repo = rref()
1481 if (
1481 if (
1482 repo is None
1482 repo is None
1483 or not util.safehasattr(repo, b'_wlockref')
1483 or not util.safehasattr(repo, b'_wlockref')
1484 or not util.safehasattr(repo, b'_lockref')
1484 or not util.safehasattr(repo, b'_lockref')
1485 ):
1485 ):
1486 return
1486 return
1487 if mode in (None, b'r', b'rb'):
1487 if mode in (None, b'r', b'rb'):
1488 return
1488 return
1489 if path.startswith(repo.path):
1489 if path.startswith(repo.path):
1490 # truncate name relative to the repository (.hg)
1490 # truncate name relative to the repository (.hg)
1491 path = path[len(repo.path) + 1 :]
1491 path = path[len(repo.path) + 1 :]
1492 if path.startswith(b'cache/'):
1492 if path.startswith(b'cache/'):
1493 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1493 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1494 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1494 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1495 # path prefixes covered by 'lock'
1495 # path prefixes covered by 'lock'
1496 vfs_path_prefixes = (
1496 vfs_path_prefixes = (
1497 b'journal.',
1497 b'journal.',
1498 b'undo.',
1498 b'undo.',
1499 b'strip-backup/',
1499 b'strip-backup/',
1500 b'cache/',
1500 b'cache/',
1501 )
1501 )
1502 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1502 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1503 if repo._currentlock(repo._lockref) is None:
1503 if repo._currentlock(repo._lockref) is None:
1504 repo.ui.develwarn(
1504 repo.ui.develwarn(
1505 b'write with no lock: "%s"' % path,
1505 b'write with no lock: "%s"' % path,
1506 stacklevel=3,
1506 stacklevel=3,
1507 config=b'check-locks',
1507 config=b'check-locks',
1508 )
1508 )
1509 elif repo._currentlock(repo._wlockref) is None:
1509 elif repo._currentlock(repo._wlockref) is None:
1510 # rest of vfs files are covered by 'wlock'
1510 # rest of vfs files are covered by 'wlock'
1511 #
1511 #
1512 # exclude special files
1512 # exclude special files
1513 for prefix in self._wlockfreeprefix:
1513 for prefix in self._wlockfreeprefix:
1514 if path.startswith(prefix):
1514 if path.startswith(prefix):
1515 return
1515 return
1516 repo.ui.develwarn(
1516 repo.ui.develwarn(
1517 b'write with no wlock: "%s"' % path,
1517 b'write with no wlock: "%s"' % path,
1518 stacklevel=3,
1518 stacklevel=3,
1519 config=b'check-locks',
1519 config=b'check-locks',
1520 )
1520 )
1521 return ret
1521 return ret
1522
1522
1523 return checkvfs
1523 return checkvfs
1524
1524
1525 def _getsvfsward(self, origfunc):
1525 def _getsvfsward(self, origfunc):
1526 """build a ward for self.svfs"""
1526 """build a ward for self.svfs"""
1527 rref = weakref.ref(self)
1527 rref = weakref.ref(self)
1528
1528
1529 def checksvfs(path, mode=None):
1529 def checksvfs(path, mode=None):
1530 ret = origfunc(path, mode=mode)
1530 ret = origfunc(path, mode=mode)
1531 repo = rref()
1531 repo = rref()
1532 if repo is None or not util.safehasattr(repo, b'_lockref'):
1532 if repo is None or not util.safehasattr(repo, b'_lockref'):
1533 return
1533 return
1534 if mode in (None, b'r', b'rb'):
1534 if mode in (None, b'r', b'rb'):
1535 return
1535 return
1536 if path.startswith(repo.sharedpath):
1536 if path.startswith(repo.sharedpath):
1537 # truncate name relative to the repository (.hg)
1537 # truncate name relative to the repository (.hg)
1538 path = path[len(repo.sharedpath) + 1 :]
1538 path = path[len(repo.sharedpath) + 1 :]
1539 if repo._currentlock(repo._lockref) is None:
1539 if repo._currentlock(repo._lockref) is None:
1540 repo.ui.develwarn(
1540 repo.ui.develwarn(
1541 b'write with no lock: "%s"' % path, stacklevel=4
1541 b'write with no lock: "%s"' % path, stacklevel=4
1542 )
1542 )
1543 return ret
1543 return ret
1544
1544
1545 return checksvfs
1545 return checksvfs
1546
1546
1547 def close(self):
1547 def close(self):
1548 self._writecaches()
1548 self._writecaches()
1549
1549
1550 def _writecaches(self):
1550 def _writecaches(self):
1551 if self._revbranchcache:
1551 if self._revbranchcache:
1552 self._revbranchcache.write()
1552 self._revbranchcache.write()
1553
1553
1554 def _restrictcapabilities(self, caps):
1554 def _restrictcapabilities(self, caps):
1555 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1555 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1556 caps = set(caps)
1556 caps = set(caps)
1557 capsblob = bundle2.encodecaps(
1557 capsblob = bundle2.encodecaps(
1558 bundle2.getrepocaps(self, role=b'client')
1558 bundle2.getrepocaps(self, role=b'client')
1559 )
1559 )
1560 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1560 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1561 if self.ui.configbool(b'experimental', b'narrow'):
1561 if self.ui.configbool(b'experimental', b'narrow'):
1562 caps.add(wireprototypes.NARROWCAP)
1562 caps.add(wireprototypes.NARROWCAP)
1563 return caps
1563 return caps
1564
1564
1565 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1565 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1566 # self -> auditor -> self._checknested -> self
1566 # self -> auditor -> self._checknested -> self
1567
1567
1568 @property
1568 @property
1569 def auditor(self):
1569 def auditor(self):
1570 # This is only used by context.workingctx.match in order to
1570 # This is only used by context.workingctx.match in order to
1571 # detect files in subrepos.
1571 # detect files in subrepos.
1572 return pathutil.pathauditor(self.root, callback=self._checknested)
1572 return pathutil.pathauditor(self.root, callback=self._checknested)
1573
1573
1574 @property
1574 @property
1575 def nofsauditor(self):
1575 def nofsauditor(self):
1576 # This is only used by context.basectx.match in order to detect
1576 # This is only used by context.basectx.match in order to detect
1577 # files in subrepos.
1577 # files in subrepos.
1578 return pathutil.pathauditor(
1578 return pathutil.pathauditor(
1579 self.root, callback=self._checknested, realfs=False, cached=True
1579 self.root, callback=self._checknested, realfs=False, cached=True
1580 )
1580 )
1581
1581
1582 def _checknested(self, path):
1582 def _checknested(self, path):
1583 """Determine if path is a legal nested repository."""
1583 """Determine if path is a legal nested repository."""
1584 if not path.startswith(self.root):
1584 if not path.startswith(self.root):
1585 return False
1585 return False
1586 subpath = path[len(self.root) + 1 :]
1586 subpath = path[len(self.root) + 1 :]
1587 normsubpath = util.pconvert(subpath)
1587 normsubpath = util.pconvert(subpath)
1588
1588
1589 # XXX: Checking against the current working copy is wrong in
1589 # XXX: Checking against the current working copy is wrong in
1590 # the sense that it can reject things like
1590 # the sense that it can reject things like
1591 #
1591 #
1592 # $ hg cat -r 10 sub/x.txt
1592 # $ hg cat -r 10 sub/x.txt
1593 #
1593 #
1594 # if sub/ is no longer a subrepository in the working copy
1594 # if sub/ is no longer a subrepository in the working copy
1595 # parent revision.
1595 # parent revision.
1596 #
1596 #
1597 # However, it can of course also allow things that would have
1597 # However, it can of course also allow things that would have
1598 # been rejected before, such as the above cat command if sub/
1598 # been rejected before, such as the above cat command if sub/
1599 # is a subrepository now, but was a normal directory before.
1599 # is a subrepository now, but was a normal directory before.
1600 # The old path auditor would have rejected by mistake since it
1600 # The old path auditor would have rejected by mistake since it
1601 # panics when it sees sub/.hg/.
1601 # panics when it sees sub/.hg/.
1602 #
1602 #
1603 # All in all, checking against the working copy seems sensible
1603 # All in all, checking against the working copy seems sensible
1604 # since we want to prevent access to nested repositories on
1604 # since we want to prevent access to nested repositories on
1605 # the filesystem *now*.
1605 # the filesystem *now*.
1606 ctx = self[None]
1606 ctx = self[None]
1607 parts = util.splitpath(subpath)
1607 parts = util.splitpath(subpath)
1608 while parts:
1608 while parts:
1609 prefix = b'/'.join(parts)
1609 prefix = b'/'.join(parts)
1610 if prefix in ctx.substate:
1610 if prefix in ctx.substate:
1611 if prefix == normsubpath:
1611 if prefix == normsubpath:
1612 return True
1612 return True
1613 else:
1613 else:
1614 sub = ctx.sub(prefix)
1614 sub = ctx.sub(prefix)
1615 return sub.checknested(subpath[len(prefix) + 1 :])
1615 return sub.checknested(subpath[len(prefix) + 1 :])
1616 else:
1616 else:
1617 parts.pop()
1617 parts.pop()
1618 return False
1618 return False
1619
1619
1620 def peer(self):
1620 def peer(self):
1621 return localpeer(self) # not cached to avoid reference cycle
1621 return localpeer(self) # not cached to avoid reference cycle
1622
1622
1623 def unfiltered(self):
1623 def unfiltered(self):
1624 """Return unfiltered version of the repository
1624 """Return unfiltered version of the repository
1625
1625
1626 Intended to be overwritten by filtered repo."""
1626 Intended to be overwritten by filtered repo."""
1627 return self
1627 return self
1628
1628
1629 def filtered(self, name, visibilityexceptions=None):
1629 def filtered(self, name, visibilityexceptions=None):
1630 """Return a filtered version of a repository
1630 """Return a filtered version of a repository
1631
1631
1632 The `name` parameter is the identifier of the requested view. This
1632 The `name` parameter is the identifier of the requested view. This
1633 will return a repoview object set "exactly" to the specified view.
1633 will return a repoview object set "exactly" to the specified view.
1634
1634
1635 This function does not apply recursive filtering to a repository. For
1635 This function does not apply recursive filtering to a repository. For
1636 example calling `repo.filtered("served")` will return a repoview using
1636 example calling `repo.filtered("served")` will return a repoview using
1637 the "served" view, regardless of the initial view used by `repo`.
1637 the "served" view, regardless of the initial view used by `repo`.
1638
1638
1639 In other word, there is always only one level of `repoview` "filtering".
1639 In other word, there is always only one level of `repoview` "filtering".
1640 """
1640 """
1641 if self._extrafilterid is not None and b'%' not in name:
1641 if self._extrafilterid is not None and b'%' not in name:
1642 name = name + b'%' + self._extrafilterid
1642 name = name + b'%' + self._extrafilterid
1643
1643
1644 cls = repoview.newtype(self.unfiltered().__class__)
1644 cls = repoview.newtype(self.unfiltered().__class__)
1645 return cls(self, name, visibilityexceptions)
1645 return cls(self, name, visibilityexceptions)
1646
1646
1647 @mixedrepostorecache(
1647 @mixedrepostorecache(
1648 (b'bookmarks', b'plain'),
1648 (b'bookmarks', b'plain'),
1649 (b'bookmarks.current', b'plain'),
1649 (b'bookmarks.current', b'plain'),
1650 (b'bookmarks', b''),
1650 (b'bookmarks', b''),
1651 (b'00changelog.i', b''),
1651 (b'00changelog.i', b''),
1652 )
1652 )
1653 def _bookmarks(self):
1653 def _bookmarks(self):
1654 # Since the multiple files involved in the transaction cannot be
1654 # Since the multiple files involved in the transaction cannot be
1655 # written atomically (with current repository format), there is a race
1655 # written atomically (with current repository format), there is a race
1656 # condition here.
1656 # condition here.
1657 #
1657 #
1658 # 1) changelog content A is read
1658 # 1) changelog content A is read
1659 # 2) outside transaction update changelog to content B
1659 # 2) outside transaction update changelog to content B
1660 # 3) outside transaction update bookmark file referring to content B
1660 # 3) outside transaction update bookmark file referring to content B
1661 # 4) bookmarks file content is read and filtered against changelog-A
1661 # 4) bookmarks file content is read and filtered against changelog-A
1662 #
1662 #
1663 # When this happens, bookmarks against nodes missing from A are dropped.
1663 # When this happens, bookmarks against nodes missing from A are dropped.
1664 #
1664 #
1665 # Having this happening during read is not great, but it become worse
1665 # Having this happening during read is not great, but it become worse
1666 # when this happen during write because the bookmarks to the "unknown"
1666 # when this happen during write because the bookmarks to the "unknown"
1667 # nodes will be dropped for good. However, writes happen within locks.
1667 # nodes will be dropped for good. However, writes happen within locks.
1668 # This locking makes it possible to have a race free consistent read.
1668 # This locking makes it possible to have a race free consistent read.
1669 # For this purpose data read from disc before locking are
1669 # For this purpose data read from disc before locking are
1670 # "invalidated" right after the locks are taken. This invalidations are
1670 # "invalidated" right after the locks are taken. This invalidations are
1671 # "light", the `filecache` mechanism keep the data in memory and will
1671 # "light", the `filecache` mechanism keep the data in memory and will
1672 # reuse them if the underlying files did not changed. Not parsing the
1672 # reuse them if the underlying files did not changed. Not parsing the
1673 # same data multiple times helps performances.
1673 # same data multiple times helps performances.
1674 #
1674 #
1675 # Unfortunately in the case describe above, the files tracked by the
1675 # Unfortunately in the case describe above, the files tracked by the
1676 # bookmarks file cache might not have changed, but the in-memory
1676 # bookmarks file cache might not have changed, but the in-memory
1677 # content is still "wrong" because we used an older changelog content
1677 # content is still "wrong" because we used an older changelog content
1678 # to process the on-disk data. So after locking, the changelog would be
1678 # to process the on-disk data. So after locking, the changelog would be
1679 # refreshed but `_bookmarks` would be preserved.
1679 # refreshed but `_bookmarks` would be preserved.
1680 # Adding `00changelog.i` to the list of tracked file is not
1680 # Adding `00changelog.i` to the list of tracked file is not
1681 # enough, because at the time we build the content for `_bookmarks` in
1681 # enough, because at the time we build the content for `_bookmarks` in
1682 # (4), the changelog file has already diverged from the content used
1682 # (4), the changelog file has already diverged from the content used
1683 # for loading `changelog` in (1)
1683 # for loading `changelog` in (1)
1684 #
1684 #
1685 # To prevent the issue, we force the changelog to be explicitly
1685 # To prevent the issue, we force the changelog to be explicitly
1686 # reloaded while computing `_bookmarks`. The data race can still happen
1686 # reloaded while computing `_bookmarks`. The data race can still happen
1687 # without the lock (with a narrower window), but it would no longer go
1687 # without the lock (with a narrower window), but it would no longer go
1688 # undetected during the lock time refresh.
1688 # undetected during the lock time refresh.
1689 #
1689 #
1690 # The new schedule is as follow
1690 # The new schedule is as follow
1691 #
1691 #
1692 # 1) filecache logic detect that `_bookmarks` needs to be computed
1692 # 1) filecache logic detect that `_bookmarks` needs to be computed
1693 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1693 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1694 # 3) We force `changelog` filecache to be tested
1694 # 3) We force `changelog` filecache to be tested
1695 # 4) cachestat for `changelog` are captured (for changelog)
1695 # 4) cachestat for `changelog` are captured (for changelog)
1696 # 5) `_bookmarks` is computed and cached
1696 # 5) `_bookmarks` is computed and cached
1697 #
1697 #
1698 # The step in (3) ensure we have a changelog at least as recent as the
1698 # The step in (3) ensure we have a changelog at least as recent as the
1699 # cache stat computed in (1). As a result at locking time:
1699 # cache stat computed in (1). As a result at locking time:
1700 # * if the changelog did not changed since (1) -> we can reuse the data
1700 # * if the changelog did not changed since (1) -> we can reuse the data
1701 # * otherwise -> the bookmarks get refreshed.
1701 # * otherwise -> the bookmarks get refreshed.
1702 self._refreshchangelog()
1702 self._refreshchangelog()
1703 return bookmarks.bmstore(self)
1703 return bookmarks.bmstore(self)
1704
1704
1705 def _refreshchangelog(self):
1705 def _refreshchangelog(self):
1706 """make sure the in memory changelog match the on-disk one"""
1706 """make sure the in memory changelog match the on-disk one"""
1707 if 'changelog' in vars(self) and self.currenttransaction() is None:
1707 if 'changelog' in vars(self) and self.currenttransaction() is None:
1708 del self.changelog
1708 del self.changelog
1709
1709
1710 @property
1710 @property
1711 def _activebookmark(self):
1711 def _activebookmark(self):
1712 return self._bookmarks.active
1712 return self._bookmarks.active
1713
1713
1714 # _phasesets depend on changelog. what we need is to call
1714 # _phasesets depend on changelog. what we need is to call
1715 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1715 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1716 # can't be easily expressed in filecache mechanism.
1716 # can't be easily expressed in filecache mechanism.
1717 @storecache(b'phaseroots', b'00changelog.i')
1717 @storecache(b'phaseroots', b'00changelog.i')
1718 def _phasecache(self):
1718 def _phasecache(self):
1719 return phases.phasecache(self, self._phasedefaults)
1719 return phases.phasecache(self, self._phasedefaults)
1720
1720
1721 @storecache(b'obsstore')
1721 @storecache(b'obsstore')
1722 def obsstore(self):
1722 def obsstore(self):
1723 return obsolete.makestore(self.ui, self)
1723 return obsolete.makestore(self.ui, self)
1724
1724
1725 @changelogcache()
1725 @changelogcache()
1726 def changelog(repo):
1726 def changelog(repo):
1727 # load dirstate before changelog to avoid race see issue6303
1727 # load dirstate before changelog to avoid race see issue6303
1728 repo.dirstate.prefetch_parents()
1728 repo.dirstate.prefetch_parents()
1729 return repo.store.changelog(
1729 return repo.store.changelog(
1730 txnutil.mayhavepending(repo.root),
1730 txnutil.mayhavepending(repo.root),
1731 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1731 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1732 )
1732 )
1733
1733
1734 @manifestlogcache()
1734 @manifestlogcache()
1735 def manifestlog(self):
1735 def manifestlog(self):
1736 return self.store.manifestlog(self, self._storenarrowmatch)
1736 return self.store.manifestlog(self, self._storenarrowmatch)
1737
1737
1738 @repofilecache(b'dirstate')
1738 @repofilecache(b'dirstate')
1739 def dirstate(self):
1739 def dirstate(self):
1740 return self._makedirstate()
1740 return self._makedirstate()
1741
1741
1742 def _makedirstate(self):
1742 def _makedirstate(self):
1743 """Extension point for wrapping the dirstate per-repo."""
1743 """Extension point for wrapping the dirstate per-repo."""
1744 sparsematchfn = lambda: sparse.matcher(self)
1744 sparsematchfn = lambda: sparse.matcher(self)
1745 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1745 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1746 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1746 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1747 use_dirstate_v2 = v2_req in self.requirements
1747 use_dirstate_v2 = v2_req in self.requirements
1748 use_tracked_hint = th in self.requirements
1748 use_tracked_hint = th in self.requirements
1749
1749
1750 return dirstate.dirstate(
1750 return dirstate.dirstate(
1751 self.vfs,
1751 self.vfs,
1752 self.ui,
1752 self.ui,
1753 self.root,
1753 self.root,
1754 self._dirstatevalidate,
1754 self._dirstatevalidate,
1755 sparsematchfn,
1755 sparsematchfn,
1756 self.nodeconstants,
1756 self.nodeconstants,
1757 use_dirstate_v2,
1757 use_dirstate_v2,
1758 use_tracked_hint=use_tracked_hint,
1758 use_tracked_hint=use_tracked_hint,
1759 )
1759 )
1760
1760
1761 def _dirstatevalidate(self, node):
1761 def _dirstatevalidate(self, node):
1762 try:
1762 try:
1763 self.changelog.rev(node)
1763 self.changelog.rev(node)
1764 return node
1764 return node
1765 except error.LookupError:
1765 except error.LookupError:
1766 if not self._dirstatevalidatewarned:
1766 if not self._dirstatevalidatewarned:
1767 self._dirstatevalidatewarned = True
1767 self._dirstatevalidatewarned = True
1768 self.ui.warn(
1768 self.ui.warn(
1769 _(b"warning: ignoring unknown working parent %s!\n")
1769 _(b"warning: ignoring unknown working parent %s!\n")
1770 % short(node)
1770 % short(node)
1771 )
1771 )
1772 return self.nullid
1772 return self.nullid
1773
1773
1774 @storecache(narrowspec.FILENAME)
1774 @storecache(narrowspec.FILENAME)
1775 def narrowpats(self):
1775 def narrowpats(self):
1776 """matcher patterns for this repository's narrowspec
1776 """matcher patterns for this repository's narrowspec
1777
1777
1778 A tuple of (includes, excludes).
1778 A tuple of (includes, excludes).
1779 """
1779 """
1780 return narrowspec.load(self)
1780 return narrowspec.load(self)
1781
1781
1782 @storecache(narrowspec.FILENAME)
1782 @storecache(narrowspec.FILENAME)
1783 def _storenarrowmatch(self):
1783 def _storenarrowmatch(self):
1784 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1784 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1785 return matchmod.always()
1785 return matchmod.always()
1786 include, exclude = self.narrowpats
1786 include, exclude = self.narrowpats
1787 return narrowspec.match(self.root, include=include, exclude=exclude)
1787 return narrowspec.match(self.root, include=include, exclude=exclude)
1788
1788
1789 @storecache(narrowspec.FILENAME)
1789 @storecache(narrowspec.FILENAME)
1790 def _narrowmatch(self):
1790 def _narrowmatch(self):
1791 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1791 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1792 return matchmod.always()
1792 return matchmod.always()
1793 narrowspec.checkworkingcopynarrowspec(self)
1793 narrowspec.checkworkingcopynarrowspec(self)
1794 include, exclude = self.narrowpats
1794 include, exclude = self.narrowpats
1795 return narrowspec.match(self.root, include=include, exclude=exclude)
1795 return narrowspec.match(self.root, include=include, exclude=exclude)
1796
1796
1797 def narrowmatch(self, match=None, includeexact=False):
1797 def narrowmatch(self, match=None, includeexact=False):
1798 """matcher corresponding the the repo's narrowspec
1798 """matcher corresponding the the repo's narrowspec
1799
1799
1800 If `match` is given, then that will be intersected with the narrow
1800 If `match` is given, then that will be intersected with the narrow
1801 matcher.
1801 matcher.
1802
1802
1803 If `includeexact` is True, then any exact matches from `match` will
1803 If `includeexact` is True, then any exact matches from `match` will
1804 be included even if they're outside the narrowspec.
1804 be included even if they're outside the narrowspec.
1805 """
1805 """
1806 if match:
1806 if match:
1807 if includeexact and not self._narrowmatch.always():
1807 if includeexact and not self._narrowmatch.always():
1808 # do not exclude explicitly-specified paths so that they can
1808 # do not exclude explicitly-specified paths so that they can
1809 # be warned later on
1809 # be warned later on
1810 em = matchmod.exact(match.files())
1810 em = matchmod.exact(match.files())
1811 nm = matchmod.unionmatcher([self._narrowmatch, em])
1811 nm = matchmod.unionmatcher([self._narrowmatch, em])
1812 return matchmod.intersectmatchers(match, nm)
1812 return matchmod.intersectmatchers(match, nm)
1813 return matchmod.intersectmatchers(match, self._narrowmatch)
1813 return matchmod.intersectmatchers(match, self._narrowmatch)
1814 return self._narrowmatch
1814 return self._narrowmatch
1815
1815
1816 def setnarrowpats(self, newincludes, newexcludes):
1816 def setnarrowpats(self, newincludes, newexcludes):
1817 narrowspec.save(self, newincludes, newexcludes)
1817 narrowspec.save(self, newincludes, newexcludes)
1818 self.invalidate(clearfilecache=True)
1818 self.invalidate(clearfilecache=True)
1819
1819
1820 @unfilteredpropertycache
1820 @unfilteredpropertycache
1821 def _quick_access_changeid_null(self):
1821 def _quick_access_changeid_null(self):
1822 return {
1822 return {
1823 b'null': (nullrev, self.nodeconstants.nullid),
1823 b'null': (nullrev, self.nodeconstants.nullid),
1824 nullrev: (nullrev, self.nodeconstants.nullid),
1824 nullrev: (nullrev, self.nodeconstants.nullid),
1825 self.nullid: (nullrev, self.nullid),
1825 self.nullid: (nullrev, self.nullid),
1826 }
1826 }
1827
1827
1828 @unfilteredpropertycache
1828 @unfilteredpropertycache
1829 def _quick_access_changeid_wc(self):
1829 def _quick_access_changeid_wc(self):
1830 # also fast path access to the working copy parents
1830 # also fast path access to the working copy parents
1831 # however, only do it for filter that ensure wc is visible.
1831 # however, only do it for filter that ensure wc is visible.
1832 quick = self._quick_access_changeid_null.copy()
1832 quick = self._quick_access_changeid_null.copy()
1833 cl = self.unfiltered().changelog
1833 cl = self.unfiltered().changelog
1834 for node in self.dirstate.parents():
1834 for node in self.dirstate.parents():
1835 if node == self.nullid:
1835 if node == self.nullid:
1836 continue
1836 continue
1837 rev = cl.index.get_rev(node)
1837 rev = cl.index.get_rev(node)
1838 if rev is None:
1838 if rev is None:
1839 # unknown working copy parent case:
1839 # unknown working copy parent case:
1840 #
1840 #
1841 # skip the fast path and let higher code deal with it
1841 # skip the fast path and let higher code deal with it
1842 continue
1842 continue
1843 pair = (rev, node)
1843 pair = (rev, node)
1844 quick[rev] = pair
1844 quick[rev] = pair
1845 quick[node] = pair
1845 quick[node] = pair
1846 # also add the parents of the parents
1846 # also add the parents of the parents
1847 for r in cl.parentrevs(rev):
1847 for r in cl.parentrevs(rev):
1848 if r == nullrev:
1848 if r == nullrev:
1849 continue
1849 continue
1850 n = cl.node(r)
1850 n = cl.node(r)
1851 pair = (r, n)
1851 pair = (r, n)
1852 quick[r] = pair
1852 quick[r] = pair
1853 quick[n] = pair
1853 quick[n] = pair
1854 p1node = self.dirstate.p1()
1854 p1node = self.dirstate.p1()
1855 if p1node != self.nullid:
1855 if p1node != self.nullid:
1856 quick[b'.'] = quick[p1node]
1856 quick[b'.'] = quick[p1node]
1857 return quick
1857 return quick
1858
1858
1859 @unfilteredmethod
1859 @unfilteredmethod
1860 def _quick_access_changeid_invalidate(self):
1860 def _quick_access_changeid_invalidate(self):
1861 if '_quick_access_changeid_wc' in vars(self):
1861 if '_quick_access_changeid_wc' in vars(self):
1862 del self.__dict__['_quick_access_changeid_wc']
1862 del self.__dict__['_quick_access_changeid_wc']
1863
1863
1864 @property
1864 @property
1865 def _quick_access_changeid(self):
1865 def _quick_access_changeid(self):
1866 """an helper dictionnary for __getitem__ calls
1866 """an helper dictionnary for __getitem__ calls
1867
1867
1868 This contains a list of symbol we can recognise right away without
1868 This contains a list of symbol we can recognise right away without
1869 further processing.
1869 further processing.
1870 """
1870 """
1871 if self.filtername in repoview.filter_has_wc:
1871 if self.filtername in repoview.filter_has_wc:
1872 return self._quick_access_changeid_wc
1872 return self._quick_access_changeid_wc
1873 return self._quick_access_changeid_null
1873 return self._quick_access_changeid_null
1874
1874
1875 def __getitem__(self, changeid):
1875 def __getitem__(self, changeid):
1876 # dealing with special cases
1876 # dealing with special cases
1877 if changeid is None:
1877 if changeid is None:
1878 return context.workingctx(self)
1878 return context.workingctx(self)
1879 if isinstance(changeid, context.basectx):
1879 if isinstance(changeid, context.basectx):
1880 return changeid
1880 return changeid
1881
1881
1882 # dealing with multiple revisions
1882 # dealing with multiple revisions
1883 if isinstance(changeid, slice):
1883 if isinstance(changeid, slice):
1884 # wdirrev isn't contiguous so the slice shouldn't include it
1884 # wdirrev isn't contiguous so the slice shouldn't include it
1885 return [
1885 return [
1886 self[i]
1886 self[i]
1887 for i in pycompat.xrange(*changeid.indices(len(self)))
1887 for i in pycompat.xrange(*changeid.indices(len(self)))
1888 if i not in self.changelog.filteredrevs
1888 if i not in self.changelog.filteredrevs
1889 ]
1889 ]
1890
1890
1891 # dealing with some special values
1891 # dealing with some special values
1892 quick_access = self._quick_access_changeid.get(changeid)
1892 quick_access = self._quick_access_changeid.get(changeid)
1893 if quick_access is not None:
1893 if quick_access is not None:
1894 rev, node = quick_access
1894 rev, node = quick_access
1895 return context.changectx(self, rev, node, maybe_filtered=False)
1895 return context.changectx(self, rev, node, maybe_filtered=False)
1896 if changeid == b'tip':
1896 if changeid == b'tip':
1897 node = self.changelog.tip()
1897 node = self.changelog.tip()
1898 rev = self.changelog.rev(node)
1898 rev = self.changelog.rev(node)
1899 return context.changectx(self, rev, node)
1899 return context.changectx(self, rev, node)
1900
1900
1901 # dealing with arbitrary values
1901 # dealing with arbitrary values
1902 try:
1902 try:
1903 if isinstance(changeid, int):
1903 if isinstance(changeid, int):
1904 node = self.changelog.node(changeid)
1904 node = self.changelog.node(changeid)
1905 rev = changeid
1905 rev = changeid
1906 elif changeid == b'.':
1906 elif changeid == b'.':
1907 # this is a hack to delay/avoid loading obsmarkers
1907 # this is a hack to delay/avoid loading obsmarkers
1908 # when we know that '.' won't be hidden
1908 # when we know that '.' won't be hidden
1909 node = self.dirstate.p1()
1909 node = self.dirstate.p1()
1910 rev = self.unfiltered().changelog.rev(node)
1910 rev = self.unfiltered().changelog.rev(node)
1911 elif len(changeid) == self.nodeconstants.nodelen:
1911 elif len(changeid) == self.nodeconstants.nodelen:
1912 try:
1912 try:
1913 node = changeid
1913 node = changeid
1914 rev = self.changelog.rev(changeid)
1914 rev = self.changelog.rev(changeid)
1915 except error.FilteredLookupError:
1915 except error.FilteredLookupError:
1916 changeid = hex(changeid) # for the error message
1916 changeid = hex(changeid) # for the error message
1917 raise
1917 raise
1918 except LookupError:
1918 except LookupError:
1919 # check if it might have come from damaged dirstate
1919 # check if it might have come from damaged dirstate
1920 #
1920 #
1921 # XXX we could avoid the unfiltered if we had a recognizable
1921 # XXX we could avoid the unfiltered if we had a recognizable
1922 # exception for filtered changeset access
1922 # exception for filtered changeset access
1923 if (
1923 if (
1924 self.local()
1924 self.local()
1925 and changeid in self.unfiltered().dirstate.parents()
1925 and changeid in self.unfiltered().dirstate.parents()
1926 ):
1926 ):
1927 msg = _(b"working directory has unknown parent '%s'!")
1927 msg = _(b"working directory has unknown parent '%s'!")
1928 raise error.Abort(msg % short(changeid))
1928 raise error.Abort(msg % short(changeid))
1929 changeid = hex(changeid) # for the error message
1929 changeid = hex(changeid) # for the error message
1930 raise
1930 raise
1931
1931
1932 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1932 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1933 node = bin(changeid)
1933 node = bin(changeid)
1934 rev = self.changelog.rev(node)
1934 rev = self.changelog.rev(node)
1935 else:
1935 else:
1936 raise error.ProgrammingError(
1936 raise error.ProgrammingError(
1937 b"unsupported changeid '%s' of type %s"
1937 b"unsupported changeid '%s' of type %s"
1938 % (changeid, pycompat.bytestr(type(changeid)))
1938 % (changeid, pycompat.bytestr(type(changeid)))
1939 )
1939 )
1940
1940
1941 return context.changectx(self, rev, node)
1941 return context.changectx(self, rev, node)
1942
1942
1943 except (error.FilteredIndexError, error.FilteredLookupError):
1943 except (error.FilteredIndexError, error.FilteredLookupError):
1944 raise error.FilteredRepoLookupError(
1944 raise error.FilteredRepoLookupError(
1945 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1945 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1946 )
1946 )
1947 except (IndexError, LookupError):
1947 except (IndexError, LookupError):
1948 raise error.RepoLookupError(
1948 raise error.RepoLookupError(
1949 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1949 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1950 )
1950 )
1951 except error.WdirUnsupported:
1951 except error.WdirUnsupported:
1952 return context.workingctx(self)
1952 return context.workingctx(self)
1953
1953
1954 def __contains__(self, changeid):
1954 def __contains__(self, changeid):
1955 """True if the given changeid exists"""
1955 """True if the given changeid exists"""
1956 try:
1956 try:
1957 self[changeid]
1957 self[changeid]
1958 return True
1958 return True
1959 except error.RepoLookupError:
1959 except error.RepoLookupError:
1960 return False
1960 return False
1961
1961
1962 def __nonzero__(self):
1962 def __nonzero__(self):
1963 return True
1963 return True
1964
1964
1965 __bool__ = __nonzero__
1965 __bool__ = __nonzero__
1966
1966
1967 def __len__(self):
1967 def __len__(self):
1968 # no need to pay the cost of repoview.changelog
1968 # no need to pay the cost of repoview.changelog
1969 unfi = self.unfiltered()
1969 unfi = self.unfiltered()
1970 return len(unfi.changelog)
1970 return len(unfi.changelog)
1971
1971
1972 def __iter__(self):
1972 def __iter__(self):
1973 return iter(self.changelog)
1973 return iter(self.changelog)
1974
1974
1975 def revs(self, expr, *args):
1975 def revs(self, expr, *args):
1976 """Find revisions matching a revset.
1976 """Find revisions matching a revset.
1977
1977
1978 The revset is specified as a string ``expr`` that may contain
1978 The revset is specified as a string ``expr`` that may contain
1979 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1979 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1980
1980
1981 Revset aliases from the configuration are not expanded. To expand
1981 Revset aliases from the configuration are not expanded. To expand
1982 user aliases, consider calling ``scmutil.revrange()`` or
1982 user aliases, consider calling ``scmutil.revrange()`` or
1983 ``repo.anyrevs([expr], user=True)``.
1983 ``repo.anyrevs([expr], user=True)``.
1984
1984
1985 Returns a smartset.abstractsmartset, which is a list-like interface
1985 Returns a smartset.abstractsmartset, which is a list-like interface
1986 that contains integer revisions.
1986 that contains integer revisions.
1987 """
1987 """
1988 tree = revsetlang.spectree(expr, *args)
1988 tree = revsetlang.spectree(expr, *args)
1989 return revset.makematcher(tree)(self)
1989 return revset.makematcher(tree)(self)
1990
1990
1991 def set(self, expr, *args):
1991 def set(self, expr, *args):
1992 """Find revisions matching a revset and emit changectx instances.
1992 """Find revisions matching a revset and emit changectx instances.
1993
1993
1994 This is a convenience wrapper around ``revs()`` that iterates the
1994 This is a convenience wrapper around ``revs()`` that iterates the
1995 result and is a generator of changectx instances.
1995 result and is a generator of changectx instances.
1996
1996
1997 Revset aliases from the configuration are not expanded. To expand
1997 Revset aliases from the configuration are not expanded. To expand
1998 user aliases, consider calling ``scmutil.revrange()``.
1998 user aliases, consider calling ``scmutil.revrange()``.
1999 """
1999 """
2000 for r in self.revs(expr, *args):
2000 for r in self.revs(expr, *args):
2001 yield self[r]
2001 yield self[r]
2002
2002
2003 def anyrevs(self, specs, user=False, localalias=None):
2003 def anyrevs(self, specs, user=False, localalias=None):
2004 """Find revisions matching one of the given revsets.
2004 """Find revisions matching one of the given revsets.
2005
2005
2006 Revset aliases from the configuration are not expanded by default. To
2006 Revset aliases from the configuration are not expanded by default. To
2007 expand user aliases, specify ``user=True``. To provide some local
2007 expand user aliases, specify ``user=True``. To provide some local
2008 definitions overriding user aliases, set ``localalias`` to
2008 definitions overriding user aliases, set ``localalias`` to
2009 ``{name: definitionstring}``.
2009 ``{name: definitionstring}``.
2010 """
2010 """
2011 if specs == [b'null']:
2011 if specs == [b'null']:
2012 return revset.baseset([nullrev])
2012 return revset.baseset([nullrev])
2013 if specs == [b'.']:
2013 if specs == [b'.']:
2014 quick_data = self._quick_access_changeid.get(b'.')
2014 quick_data = self._quick_access_changeid.get(b'.')
2015 if quick_data is not None:
2015 if quick_data is not None:
2016 return revset.baseset([quick_data[0]])
2016 return revset.baseset([quick_data[0]])
2017 if user:
2017 if user:
2018 m = revset.matchany(
2018 m = revset.matchany(
2019 self.ui,
2019 self.ui,
2020 specs,
2020 specs,
2021 lookup=revset.lookupfn(self),
2021 lookup=revset.lookupfn(self),
2022 localalias=localalias,
2022 localalias=localalias,
2023 )
2023 )
2024 else:
2024 else:
2025 m = revset.matchany(None, specs, localalias=localalias)
2025 m = revset.matchany(None, specs, localalias=localalias)
2026 return m(self)
2026 return m(self)
2027
2027
2028 def url(self):
2028 def url(self):
2029 return b'file:' + self.root
2029 return b'file:' + self.root
2030
2030
2031 def hook(self, name, throw=False, **args):
2031 def hook(self, name, throw=False, **args):
2032 """Call a hook, passing this repo instance.
2032 """Call a hook, passing this repo instance.
2033
2033
2034 This a convenience method to aid invoking hooks. Extensions likely
2034 This a convenience method to aid invoking hooks. Extensions likely
2035 won't call this unless they have registered a custom hook or are
2035 won't call this unless they have registered a custom hook or are
2036 replacing code that is expected to call a hook.
2036 replacing code that is expected to call a hook.
2037 """
2037 """
2038 return hook.hook(self.ui, self, name, throw, **args)
2038 return hook.hook(self.ui, self, name, throw, **args)
2039
2039
2040 @filteredpropertycache
2040 @filteredpropertycache
2041 def _tagscache(self):
2041 def _tagscache(self):
2042 """Returns a tagscache object that contains various tags related
2042 """Returns a tagscache object that contains various tags related
2043 caches."""
2043 caches."""
2044
2044
2045 # This simplifies its cache management by having one decorated
2045 # This simplifies its cache management by having one decorated
2046 # function (this one) and the rest simply fetch things from it.
2046 # function (this one) and the rest simply fetch things from it.
2047 class tagscache(object):
2047 class tagscache(object):
2048 def __init__(self):
2048 def __init__(self):
2049 # These two define the set of tags for this repository. tags
2049 # These two define the set of tags for this repository. tags
2050 # maps tag name to node; tagtypes maps tag name to 'global' or
2050 # maps tag name to node; tagtypes maps tag name to 'global' or
2051 # 'local'. (Global tags are defined by .hgtags across all
2051 # 'local'. (Global tags are defined by .hgtags across all
2052 # heads, and local tags are defined in .hg/localtags.)
2052 # heads, and local tags are defined in .hg/localtags.)
2053 # They constitute the in-memory cache of tags.
2053 # They constitute the in-memory cache of tags.
2054 self.tags = self.tagtypes = None
2054 self.tags = self.tagtypes = None
2055
2055
2056 self.nodetagscache = self.tagslist = None
2056 self.nodetagscache = self.tagslist = None
2057
2057
2058 cache = tagscache()
2058 cache = tagscache()
2059 cache.tags, cache.tagtypes = self._findtags()
2059 cache.tags, cache.tagtypes = self._findtags()
2060
2060
2061 return cache
2061 return cache
2062
2062
2063 def tags(self):
2063 def tags(self):
2064 '''return a mapping of tag to node'''
2064 '''return a mapping of tag to node'''
2065 t = {}
2065 t = {}
2066 if self.changelog.filteredrevs:
2066 if self.changelog.filteredrevs:
2067 tags, tt = self._findtags()
2067 tags, tt = self._findtags()
2068 else:
2068 else:
2069 tags = self._tagscache.tags
2069 tags = self._tagscache.tags
2070 rev = self.changelog.rev
2070 rev = self.changelog.rev
2071 for k, v in pycompat.iteritems(tags):
2071 for k, v in pycompat.iteritems(tags):
2072 try:
2072 try:
2073 # ignore tags to unknown nodes
2073 # ignore tags to unknown nodes
2074 rev(v)
2074 rev(v)
2075 t[k] = v
2075 t[k] = v
2076 except (error.LookupError, ValueError):
2076 except (error.LookupError, ValueError):
2077 pass
2077 pass
2078 return t
2078 return t
2079
2079
2080 def _findtags(self):
2080 def _findtags(self):
2081 """Do the hard work of finding tags. Return a pair of dicts
2081 """Do the hard work of finding tags. Return a pair of dicts
2082 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2082 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2083 maps tag name to a string like \'global\' or \'local\'.
2083 maps tag name to a string like \'global\' or \'local\'.
2084 Subclasses or extensions are free to add their own tags, but
2084 Subclasses or extensions are free to add their own tags, but
2085 should be aware that the returned dicts will be retained for the
2085 should be aware that the returned dicts will be retained for the
2086 duration of the localrepo object."""
2086 duration of the localrepo object."""
2087
2087
2088 # XXX what tagtype should subclasses/extensions use? Currently
2088 # XXX what tagtype should subclasses/extensions use? Currently
2089 # mq and bookmarks add tags, but do not set the tagtype at all.
2089 # mq and bookmarks add tags, but do not set the tagtype at all.
2090 # Should each extension invent its own tag type? Should there
2090 # Should each extension invent its own tag type? Should there
2091 # be one tagtype for all such "virtual" tags? Or is the status
2091 # be one tagtype for all such "virtual" tags? Or is the status
2092 # quo fine?
2092 # quo fine?
2093
2093
2094 # map tag name to (node, hist)
2094 # map tag name to (node, hist)
2095 alltags = tagsmod.findglobaltags(self.ui, self)
2095 alltags = tagsmod.findglobaltags(self.ui, self)
2096 # map tag name to tag type
2096 # map tag name to tag type
2097 tagtypes = {tag: b'global' for tag in alltags}
2097 tagtypes = {tag: b'global' for tag in alltags}
2098
2098
2099 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2099 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2100
2100
2101 # Build the return dicts. Have to re-encode tag names because
2101 # Build the return dicts. Have to re-encode tag names because
2102 # the tags module always uses UTF-8 (in order not to lose info
2102 # the tags module always uses UTF-8 (in order not to lose info
2103 # writing to the cache), but the rest of Mercurial wants them in
2103 # writing to the cache), but the rest of Mercurial wants them in
2104 # local encoding.
2104 # local encoding.
2105 tags = {}
2105 tags = {}
2106 for (name, (node, hist)) in pycompat.iteritems(alltags):
2106 for (name, (node, hist)) in pycompat.iteritems(alltags):
2107 if node != self.nullid:
2107 if node != self.nullid:
2108 tags[encoding.tolocal(name)] = node
2108 tags[encoding.tolocal(name)] = node
2109 tags[b'tip'] = self.changelog.tip()
2109 tags[b'tip'] = self.changelog.tip()
2110 tagtypes = {
2110 tagtypes = {
2111 encoding.tolocal(name): value
2111 encoding.tolocal(name): value
2112 for (name, value) in pycompat.iteritems(tagtypes)
2112 for (name, value) in pycompat.iteritems(tagtypes)
2113 }
2113 }
2114 return (tags, tagtypes)
2114 return (tags, tagtypes)
2115
2115
2116 def tagtype(self, tagname):
2116 def tagtype(self, tagname):
2117 """
2117 """
2118 return the type of the given tag. result can be:
2118 return the type of the given tag. result can be:
2119
2119
2120 'local' : a local tag
2120 'local' : a local tag
2121 'global' : a global tag
2121 'global' : a global tag
2122 None : tag does not exist
2122 None : tag does not exist
2123 """
2123 """
2124
2124
2125 return self._tagscache.tagtypes.get(tagname)
2125 return self._tagscache.tagtypes.get(tagname)
2126
2126
2127 def tagslist(self):
2127 def tagslist(self):
2128 '''return a list of tags ordered by revision'''
2128 '''return a list of tags ordered by revision'''
2129 if not self._tagscache.tagslist:
2129 if not self._tagscache.tagslist:
2130 l = []
2130 l = []
2131 for t, n in pycompat.iteritems(self.tags()):
2131 for t, n in pycompat.iteritems(self.tags()):
2132 l.append((self.changelog.rev(n), t, n))
2132 l.append((self.changelog.rev(n), t, n))
2133 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2133 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2134
2134
2135 return self._tagscache.tagslist
2135 return self._tagscache.tagslist
2136
2136
2137 def nodetags(self, node):
2137 def nodetags(self, node):
2138 '''return the tags associated with a node'''
2138 '''return the tags associated with a node'''
2139 if not self._tagscache.nodetagscache:
2139 if not self._tagscache.nodetagscache:
2140 nodetagscache = {}
2140 nodetagscache = {}
2141 for t, n in pycompat.iteritems(self._tagscache.tags):
2141 for t, n in pycompat.iteritems(self._tagscache.tags):
2142 nodetagscache.setdefault(n, []).append(t)
2142 nodetagscache.setdefault(n, []).append(t)
2143 for tags in pycompat.itervalues(nodetagscache):
2143 for tags in pycompat.itervalues(nodetagscache):
2144 tags.sort()
2144 tags.sort()
2145 self._tagscache.nodetagscache = nodetagscache
2145 self._tagscache.nodetagscache = nodetagscache
2146 return self._tagscache.nodetagscache.get(node, [])
2146 return self._tagscache.nodetagscache.get(node, [])
2147
2147
2148 def nodebookmarks(self, node):
2148 def nodebookmarks(self, node):
2149 """return the list of bookmarks pointing to the specified node"""
2149 """return the list of bookmarks pointing to the specified node"""
2150 return self._bookmarks.names(node)
2150 return self._bookmarks.names(node)
2151
2151
2152 def branchmap(self):
2152 def branchmap(self):
2153 """returns a dictionary {branch: [branchheads]} with branchheads
2153 """returns a dictionary {branch: [branchheads]} with branchheads
2154 ordered by increasing revision number"""
2154 ordered by increasing revision number"""
2155 return self._branchcaches[self]
2155 return self._branchcaches[self]
2156
2156
2157 @unfilteredmethod
2157 @unfilteredmethod
2158 def revbranchcache(self):
2158 def revbranchcache(self):
2159 if not self._revbranchcache:
2159 if not self._revbranchcache:
2160 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2160 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2161 return self._revbranchcache
2161 return self._revbranchcache
2162
2162
2163 def register_changeset(self, rev, changelogrevision):
2163 def register_changeset(self, rev, changelogrevision):
2164 self.revbranchcache().setdata(rev, changelogrevision)
2164 self.revbranchcache().setdata(rev, changelogrevision)
2165
2165
2166 def branchtip(self, branch, ignoremissing=False):
2166 def branchtip(self, branch, ignoremissing=False):
2167 """return the tip node for a given branch
2167 """return the tip node for a given branch
2168
2168
2169 If ignoremissing is True, then this method will not raise an error.
2169 If ignoremissing is True, then this method will not raise an error.
2170 This is helpful for callers that only expect None for a missing branch
2170 This is helpful for callers that only expect None for a missing branch
2171 (e.g. namespace).
2171 (e.g. namespace).
2172
2172
2173 """
2173 """
2174 try:
2174 try:
2175 return self.branchmap().branchtip(branch)
2175 return self.branchmap().branchtip(branch)
2176 except KeyError:
2176 except KeyError:
2177 if not ignoremissing:
2177 if not ignoremissing:
2178 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2178 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2179 else:
2179 else:
2180 pass
2180 pass
2181
2181
2182 def lookup(self, key):
2182 def lookup(self, key):
2183 node = scmutil.revsymbol(self, key).node()
2183 node = scmutil.revsymbol(self, key).node()
2184 if node is None:
2184 if node is None:
2185 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2185 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2186 return node
2186 return node
2187
2187
2188 def lookupbranch(self, key):
2188 def lookupbranch(self, key):
2189 if self.branchmap().hasbranch(key):
2189 if self.branchmap().hasbranch(key):
2190 return key
2190 return key
2191
2191
2192 return scmutil.revsymbol(self, key).branch()
2192 return scmutil.revsymbol(self, key).branch()
2193
2193
2194 def known(self, nodes):
2194 def known(self, nodes):
2195 cl = self.changelog
2195 cl = self.changelog
2196 get_rev = cl.index.get_rev
2196 get_rev = cl.index.get_rev
2197 filtered = cl.filteredrevs
2197 filtered = cl.filteredrevs
2198 result = []
2198 result = []
2199 for n in nodes:
2199 for n in nodes:
2200 r = get_rev(n)
2200 r = get_rev(n)
2201 resp = not (r is None or r in filtered)
2201 resp = not (r is None or r in filtered)
2202 result.append(resp)
2202 result.append(resp)
2203 return result
2203 return result
2204
2204
2205 def local(self):
2205 def local(self):
2206 return self
2206 return self
2207
2207
2208 def publishing(self):
2208 def publishing(self):
2209 # it's safe (and desirable) to trust the publish flag unconditionally
2209 # it's safe (and desirable) to trust the publish flag unconditionally
2210 # so that we don't finalize changes shared between users via ssh or nfs
2210 # so that we don't finalize changes shared between users via ssh or nfs
2211 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2211 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2212
2212
2213 def cancopy(self):
2213 def cancopy(self):
2214 # so statichttprepo's override of local() works
2214 # so statichttprepo's override of local() works
2215 if not self.local():
2215 if not self.local():
2216 return False
2216 return False
2217 if not self.publishing():
2217 if not self.publishing():
2218 return True
2218 return True
2219 # if publishing we can't copy if there is filtered content
2219 # if publishing we can't copy if there is filtered content
2220 return not self.filtered(b'visible').changelog.filteredrevs
2220 return not self.filtered(b'visible').changelog.filteredrevs
2221
2221
2222 def shared(self):
2222 def shared(self):
2223 '''the type of shared repository (None if not shared)'''
2223 '''the type of shared repository (None if not shared)'''
2224 if self.sharedpath != self.path:
2224 if self.sharedpath != self.path:
2225 return b'store'
2225 return b'store'
2226 return None
2226 return None
2227
2227
2228 def wjoin(self, f, *insidef):
2228 def wjoin(self, f, *insidef):
2229 return self.vfs.reljoin(self.root, f, *insidef)
2229 return self.vfs.reljoin(self.root, f, *insidef)
2230
2230
2231 def setparents(self, p1, p2=None):
2231 def setparents(self, p1, p2=None):
2232 if p2 is None:
2232 if p2 is None:
2233 p2 = self.nullid
2233 p2 = self.nullid
2234 self[None].setparents(p1, p2)
2234 self[None].setparents(p1, p2)
2235 self._quick_access_changeid_invalidate()
2235 self._quick_access_changeid_invalidate()
2236
2236
2237 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2237 def filectx(self, path, changeid=None, fileid=None, changectx=None):
2238 """changeid must be a changeset revision, if specified.
2238 """changeid must be a changeset revision, if specified.
2239 fileid can be a file revision or node."""
2239 fileid can be a file revision or node."""
2240 return context.filectx(
2240 return context.filectx(
2241 self, path, changeid, fileid, changectx=changectx
2241 self, path, changeid, fileid, changectx=changectx
2242 )
2242 )
2243
2243
2244 def getcwd(self):
2244 def getcwd(self):
2245 return self.dirstate.getcwd()
2245 return self.dirstate.getcwd()
2246
2246
2247 def pathto(self, f, cwd=None):
2247 def pathto(self, f, cwd=None):
2248 return self.dirstate.pathto(f, cwd)
2248 return self.dirstate.pathto(f, cwd)
2249
2249
2250 def _loadfilter(self, filter):
2250 def _loadfilter(self, filter):
2251 if filter not in self._filterpats:
2251 if filter not in self._filterpats:
2252 l = []
2252 l = []
2253 for pat, cmd in self.ui.configitems(filter):
2253 for pat, cmd in self.ui.configitems(filter):
2254 if cmd == b'!':
2254 if cmd == b'!':
2255 continue
2255 continue
2256 mf = matchmod.match(self.root, b'', [pat])
2256 mf = matchmod.match(self.root, b'', [pat])
2257 fn = None
2257 fn = None
2258 params = cmd
2258 params = cmd
2259 for name, filterfn in pycompat.iteritems(self._datafilters):
2259 for name, filterfn in pycompat.iteritems(self._datafilters):
2260 if cmd.startswith(name):
2260 if cmd.startswith(name):
2261 fn = filterfn
2261 fn = filterfn
2262 params = cmd[len(name) :].lstrip()
2262 params = cmd[len(name) :].lstrip()
2263 break
2263 break
2264 if not fn:
2264 if not fn:
2265 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2265 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2266 fn.__name__ = 'commandfilter'
2266 fn.__name__ = 'commandfilter'
2267 # Wrap old filters not supporting keyword arguments
2267 # Wrap old filters not supporting keyword arguments
2268 if not pycompat.getargspec(fn)[2]:
2268 if not pycompat.getargspec(fn)[2]:
2269 oldfn = fn
2269 oldfn = fn
2270 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2270 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2271 fn.__name__ = 'compat-' + oldfn.__name__
2271 fn.__name__ = 'compat-' + oldfn.__name__
2272 l.append((mf, fn, params))
2272 l.append((mf, fn, params))
2273 self._filterpats[filter] = l
2273 self._filterpats[filter] = l
2274 return self._filterpats[filter]
2274 return self._filterpats[filter]
2275
2275
2276 def _filter(self, filterpats, filename, data):
2276 def _filter(self, filterpats, filename, data):
2277 for mf, fn, cmd in filterpats:
2277 for mf, fn, cmd in filterpats:
2278 if mf(filename):
2278 if mf(filename):
2279 self.ui.debug(
2279 self.ui.debug(
2280 b"filtering %s through %s\n"
2280 b"filtering %s through %s\n"
2281 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2281 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2282 )
2282 )
2283 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2283 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2284 break
2284 break
2285
2285
2286 return data
2286 return data
2287
2287
2288 @unfilteredpropertycache
2288 @unfilteredpropertycache
2289 def _encodefilterpats(self):
2289 def _encodefilterpats(self):
2290 return self._loadfilter(b'encode')
2290 return self._loadfilter(b'encode')
2291
2291
2292 @unfilteredpropertycache
2292 @unfilteredpropertycache
2293 def _decodefilterpats(self):
2293 def _decodefilterpats(self):
2294 return self._loadfilter(b'decode')
2294 return self._loadfilter(b'decode')
2295
2295
2296 def adddatafilter(self, name, filter):
2296 def adddatafilter(self, name, filter):
2297 self._datafilters[name] = filter
2297 self._datafilters[name] = filter
2298
2298
2299 def wread(self, filename):
2299 def wread(self, filename):
2300 if self.wvfs.islink(filename):
2300 if self.wvfs.islink(filename):
2301 data = self.wvfs.readlink(filename)
2301 data = self.wvfs.readlink(filename)
2302 else:
2302 else:
2303 data = self.wvfs.read(filename)
2303 data = self.wvfs.read(filename)
2304 return self._filter(self._encodefilterpats, filename, data)
2304 return self._filter(self._encodefilterpats, filename, data)
2305
2305
2306 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2306 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2307 """write ``data`` into ``filename`` in the working directory
2307 """write ``data`` into ``filename`` in the working directory
2308
2308
2309 This returns length of written (maybe decoded) data.
2309 This returns length of written (maybe decoded) data.
2310 """
2310 """
2311 data = self._filter(self._decodefilterpats, filename, data)
2311 data = self._filter(self._decodefilterpats, filename, data)
2312 if b'l' in flags:
2312 if b'l' in flags:
2313 self.wvfs.symlink(data, filename)
2313 self.wvfs.symlink(data, filename)
2314 else:
2314 else:
2315 self.wvfs.write(
2315 self.wvfs.write(
2316 filename, data, backgroundclose=backgroundclose, **kwargs
2316 filename, data, backgroundclose=backgroundclose, **kwargs
2317 )
2317 )
2318 if b'x' in flags:
2318 if b'x' in flags:
2319 self.wvfs.setflags(filename, False, True)
2319 self.wvfs.setflags(filename, False, True)
2320 else:
2320 else:
2321 self.wvfs.setflags(filename, False, False)
2321 self.wvfs.setflags(filename, False, False)
2322 return len(data)
2322 return len(data)
2323
2323
2324 def wwritedata(self, filename, data):
2324 def wwritedata(self, filename, data):
2325 return self._filter(self._decodefilterpats, filename, data)
2325 return self._filter(self._decodefilterpats, filename, data)
2326
2326
2327 def currenttransaction(self):
2327 def currenttransaction(self):
2328 """return the current transaction or None if non exists"""
2328 """return the current transaction or None if non exists"""
2329 if self._transref:
2329 if self._transref:
2330 tr = self._transref()
2330 tr = self._transref()
2331 else:
2331 else:
2332 tr = None
2332 tr = None
2333
2333
2334 if tr and tr.running():
2334 if tr and tr.running():
2335 return tr
2335 return tr
2336 return None
2336 return None
2337
2337
2338 def transaction(self, desc, report=None):
2338 def transaction(self, desc, report=None):
2339 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2339 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2340 b'devel', b'check-locks'
2340 b'devel', b'check-locks'
2341 ):
2341 ):
2342 if self._currentlock(self._lockref) is None:
2342 if self._currentlock(self._lockref) is None:
2343 raise error.ProgrammingError(b'transaction requires locking')
2343 raise error.ProgrammingError(b'transaction requires locking')
2344 tr = self.currenttransaction()
2344 tr = self.currenttransaction()
2345 if tr is not None:
2345 if tr is not None:
2346 return tr.nest(name=desc)
2346 return tr.nest(name=desc)
2347
2347
2348 # abort here if the journal already exists
2348 # abort here if the journal already exists
2349 if self.svfs.exists(b"journal"):
2349 if self.svfs.exists(b"journal"):
2350 raise error.RepoError(
2350 raise error.RepoError(
2351 _(b"abandoned transaction found"),
2351 _(b"abandoned transaction found"),
2352 hint=_(b"run 'hg recover' to clean up transaction"),
2352 hint=_(b"run 'hg recover' to clean up transaction"),
2353 )
2353 )
2354
2354
2355 idbase = b"%.40f#%f" % (random.random(), time.time())
2355 idbase = b"%.40f#%f" % (random.random(), time.time())
2356 ha = hex(hashutil.sha1(idbase).digest())
2356 ha = hex(hashutil.sha1(idbase).digest())
2357 txnid = b'TXN:' + ha
2357 txnid = b'TXN:' + ha
2358 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2358 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2359
2359
2360 self._writejournal(desc)
2360 self._writejournal(desc)
2361 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2361 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2362 if report:
2362 if report:
2363 rp = report
2363 rp = report
2364 else:
2364 else:
2365 rp = self.ui.warn
2365 rp = self.ui.warn
2366 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2366 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2367 # we must avoid cyclic reference between repo and transaction.
2367 # we must avoid cyclic reference between repo and transaction.
2368 reporef = weakref.ref(self)
2368 reporef = weakref.ref(self)
2369 # Code to track tag movement
2369 # Code to track tag movement
2370 #
2370 #
2371 # Since tags are all handled as file content, it is actually quite hard
2371 # Since tags are all handled as file content, it is actually quite hard
2372 # to track these movement from a code perspective. So we fallback to a
2372 # to track these movement from a code perspective. So we fallback to a
2373 # tracking at the repository level. One could envision to track changes
2373 # tracking at the repository level. One could envision to track changes
2374 # to the '.hgtags' file through changegroup apply but that fails to
2374 # to the '.hgtags' file through changegroup apply but that fails to
2375 # cope with case where transaction expose new heads without changegroup
2375 # cope with case where transaction expose new heads without changegroup
2376 # being involved (eg: phase movement).
2376 # being involved (eg: phase movement).
2377 #
2377 #
2378 # For now, We gate the feature behind a flag since this likely comes
2378 # For now, We gate the feature behind a flag since this likely comes
2379 # with performance impacts. The current code run more often than needed
2379 # with performance impacts. The current code run more often than needed
2380 # and do not use caches as much as it could. The current focus is on
2380 # and do not use caches as much as it could. The current focus is on
2381 # the behavior of the feature so we disable it by default. The flag
2381 # the behavior of the feature so we disable it by default. The flag
2382 # will be removed when we are happy with the performance impact.
2382 # will be removed when we are happy with the performance impact.
2383 #
2383 #
2384 # Once this feature is no longer experimental move the following
2384 # Once this feature is no longer experimental move the following
2385 # documentation to the appropriate help section:
2385 # documentation to the appropriate help section:
2386 #
2386 #
2387 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2387 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2388 # tags (new or changed or deleted tags). In addition the details of
2388 # tags (new or changed or deleted tags). In addition the details of
2389 # these changes are made available in a file at:
2389 # these changes are made available in a file at:
2390 # ``REPOROOT/.hg/changes/tags.changes``.
2390 # ``REPOROOT/.hg/changes/tags.changes``.
2391 # Make sure you check for HG_TAG_MOVED before reading that file as it
2391 # Make sure you check for HG_TAG_MOVED before reading that file as it
2392 # might exist from a previous transaction even if no tag were touched
2392 # might exist from a previous transaction even if no tag were touched
2393 # in this one. Changes are recorded in a line base format::
2393 # in this one. Changes are recorded in a line base format::
2394 #
2394 #
2395 # <action> <hex-node> <tag-name>\n
2395 # <action> <hex-node> <tag-name>\n
2396 #
2396 #
2397 # Actions are defined as follow:
2397 # Actions are defined as follow:
2398 # "-R": tag is removed,
2398 # "-R": tag is removed,
2399 # "+A": tag is added,
2399 # "+A": tag is added,
2400 # "-M": tag is moved (old value),
2400 # "-M": tag is moved (old value),
2401 # "+M": tag is moved (new value),
2401 # "+M": tag is moved (new value),
2402 tracktags = lambda x: None
2402 tracktags = lambda x: None
2403 # experimental config: experimental.hook-track-tags
2403 # experimental config: experimental.hook-track-tags
2404 shouldtracktags = self.ui.configbool(
2404 shouldtracktags = self.ui.configbool(
2405 b'experimental', b'hook-track-tags'
2405 b'experimental', b'hook-track-tags'
2406 )
2406 )
2407 if desc != b'strip' and shouldtracktags:
2407 if desc != b'strip' and shouldtracktags:
2408 oldheads = self.changelog.headrevs()
2408 oldheads = self.changelog.headrevs()
2409
2409
2410 def tracktags(tr2):
2410 def tracktags(tr2):
2411 repo = reporef()
2411 repo = reporef()
2412 assert repo is not None # help pytype
2412 assert repo is not None # help pytype
2413 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2413 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2414 newheads = repo.changelog.headrevs()
2414 newheads = repo.changelog.headrevs()
2415 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2415 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2416 # notes: we compare lists here.
2416 # notes: we compare lists here.
2417 # As we do it only once buiding set would not be cheaper
2417 # As we do it only once buiding set would not be cheaper
2418 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2418 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2419 if changes:
2419 if changes:
2420 tr2.hookargs[b'tag_moved'] = b'1'
2420 tr2.hookargs[b'tag_moved'] = b'1'
2421 with repo.vfs(
2421 with repo.vfs(
2422 b'changes/tags.changes', b'w', atomictemp=True
2422 b'changes/tags.changes', b'w', atomictemp=True
2423 ) as changesfile:
2423 ) as changesfile:
2424 # note: we do not register the file to the transaction
2424 # note: we do not register the file to the transaction
2425 # because we needs it to still exist on the transaction
2425 # because we needs it to still exist on the transaction
2426 # is close (for txnclose hooks)
2426 # is close (for txnclose hooks)
2427 tagsmod.writediff(changesfile, changes)
2427 tagsmod.writediff(changesfile, changes)
2428
2428
2429 def validate(tr2):
2429 def validate(tr2):
2430 """will run pre-closing hooks"""
2430 """will run pre-closing hooks"""
2431 # XXX the transaction API is a bit lacking here so we take a hacky
2431 # XXX the transaction API is a bit lacking here so we take a hacky
2432 # path for now
2432 # path for now
2433 #
2433 #
2434 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2434 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2435 # dict is copied before these run. In addition we needs the data
2435 # dict is copied before these run. In addition we needs the data
2436 # available to in memory hooks too.
2436 # available to in memory hooks too.
2437 #
2437 #
2438 # Moreover, we also need to make sure this runs before txnclose
2438 # Moreover, we also need to make sure this runs before txnclose
2439 # hooks and there is no "pending" mechanism that would execute
2439 # hooks and there is no "pending" mechanism that would execute
2440 # logic only if hooks are about to run.
2440 # logic only if hooks are about to run.
2441 #
2441 #
2442 # Fixing this limitation of the transaction is also needed to track
2442 # Fixing this limitation of the transaction is also needed to track
2443 # other families of changes (bookmarks, phases, obsolescence).
2443 # other families of changes (bookmarks, phases, obsolescence).
2444 #
2444 #
2445 # This will have to be fixed before we remove the experimental
2445 # This will have to be fixed before we remove the experimental
2446 # gating.
2446 # gating.
2447 tracktags(tr2)
2447 tracktags(tr2)
2448 repo = reporef()
2448 repo = reporef()
2449 assert repo is not None # help pytype
2449 assert repo is not None # help pytype
2450
2450
2451 singleheadopt = (b'experimental', b'single-head-per-branch')
2451 singleheadopt = (b'experimental', b'single-head-per-branch')
2452 singlehead = repo.ui.configbool(*singleheadopt)
2452 singlehead = repo.ui.configbool(*singleheadopt)
2453 if singlehead:
2453 if singlehead:
2454 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2454 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2455 accountclosed = singleheadsub.get(
2455 accountclosed = singleheadsub.get(
2456 b"account-closed-heads", False
2456 b"account-closed-heads", False
2457 )
2457 )
2458 if singleheadsub.get(b"public-changes-only", False):
2458 if singleheadsub.get(b"public-changes-only", False):
2459 filtername = b"immutable"
2459 filtername = b"immutable"
2460 else:
2460 else:
2461 filtername = b"visible"
2461 filtername = b"visible"
2462 scmutil.enforcesinglehead(
2462 scmutil.enforcesinglehead(
2463 repo, tr2, desc, accountclosed, filtername
2463 repo, tr2, desc, accountclosed, filtername
2464 )
2464 )
2465 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2465 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2466 for name, (old, new) in sorted(
2466 for name, (old, new) in sorted(
2467 tr.changes[b'bookmarks'].items()
2467 tr.changes[b'bookmarks'].items()
2468 ):
2468 ):
2469 args = tr.hookargs.copy()
2469 args = tr.hookargs.copy()
2470 args.update(bookmarks.preparehookargs(name, old, new))
2470 args.update(bookmarks.preparehookargs(name, old, new))
2471 repo.hook(
2471 repo.hook(
2472 b'pretxnclose-bookmark',
2472 b'pretxnclose-bookmark',
2473 throw=True,
2473 throw=True,
2474 **pycompat.strkwargs(args)
2474 **pycompat.strkwargs(args)
2475 )
2475 )
2476 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2476 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2477 cl = repo.unfiltered().changelog
2477 cl = repo.unfiltered().changelog
2478 for revs, (old, new) in tr.changes[b'phases']:
2478 for revs, (old, new) in tr.changes[b'phases']:
2479 for rev in revs:
2479 for rev in revs:
2480 args = tr.hookargs.copy()
2480 args = tr.hookargs.copy()
2481 node = hex(cl.node(rev))
2481 node = hex(cl.node(rev))
2482 args.update(phases.preparehookargs(node, old, new))
2482 args.update(phases.preparehookargs(node, old, new))
2483 repo.hook(
2483 repo.hook(
2484 b'pretxnclose-phase',
2484 b'pretxnclose-phase',
2485 throw=True,
2485 throw=True,
2486 **pycompat.strkwargs(args)
2486 **pycompat.strkwargs(args)
2487 )
2487 )
2488
2488
2489 repo.hook(
2489 repo.hook(
2490 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2490 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2491 )
2491 )
2492
2492
2493 def releasefn(tr, success):
2493 def releasefn(tr, success):
2494 repo = reporef()
2494 repo = reporef()
2495 if repo is None:
2495 if repo is None:
2496 # If the repo has been GC'd (and this release function is being
2496 # If the repo has been GC'd (and this release function is being
2497 # called from transaction.__del__), there's not much we can do,
2497 # called from transaction.__del__), there's not much we can do,
2498 # so just leave the unfinished transaction there and let the
2498 # so just leave the unfinished transaction there and let the
2499 # user run `hg recover`.
2499 # user run `hg recover`.
2500 return
2500 return
2501 if success:
2501 if success:
2502 # this should be explicitly invoked here, because
2502 # this should be explicitly invoked here, because
2503 # in-memory changes aren't written out at closing
2503 # in-memory changes aren't written out at closing
2504 # transaction, if tr.addfilegenerator (via
2504 # transaction, if tr.addfilegenerator (via
2505 # dirstate.write or so) isn't invoked while
2505 # dirstate.write or so) isn't invoked while
2506 # transaction running
2506 # transaction running
2507 repo.dirstate.write(None)
2507 repo.dirstate.write(None)
2508 else:
2508 else:
2509 # discard all changes (including ones already written
2509 # discard all changes (including ones already written
2510 # out) in this transaction
2510 # out) in this transaction
2511 narrowspec.restorebackup(self, b'journal.narrowspec')
2511 narrowspec.restorebackup(self, b'journal.narrowspec')
2512 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2512 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2513 repo.dirstate.restorebackup(None, b'journal.dirstate')
2513 repo.dirstate.restorebackup(None, b'journal.dirstate')
2514
2514
2515 repo.invalidate(clearfilecache=True)
2515 repo.invalidate(clearfilecache=True)
2516
2516
2517 tr = transaction.transaction(
2517 tr = transaction.transaction(
2518 rp,
2518 rp,
2519 self.svfs,
2519 self.svfs,
2520 vfsmap,
2520 vfsmap,
2521 b"journal",
2521 b"journal",
2522 b"undo",
2522 b"undo",
2523 aftertrans(renames),
2523 aftertrans(renames),
2524 self.store.createmode,
2524 self.store.createmode,
2525 validator=validate,
2525 validator=validate,
2526 releasefn=releasefn,
2526 releasefn=releasefn,
2527 checkambigfiles=_cachedfiles,
2527 checkambigfiles=_cachedfiles,
2528 name=desc,
2528 name=desc,
2529 )
2529 )
2530 tr.changes[b'origrepolen'] = len(self)
2530 tr.changes[b'origrepolen'] = len(self)
2531 tr.changes[b'obsmarkers'] = set()
2531 tr.changes[b'obsmarkers'] = set()
2532 tr.changes[b'phases'] = []
2532 tr.changes[b'phases'] = []
2533 tr.changes[b'bookmarks'] = {}
2533 tr.changes[b'bookmarks'] = {}
2534
2534
2535 tr.hookargs[b'txnid'] = txnid
2535 tr.hookargs[b'txnid'] = txnid
2536 tr.hookargs[b'txnname'] = desc
2536 tr.hookargs[b'txnname'] = desc
2537 tr.hookargs[b'changes'] = tr.changes
2537 tr.hookargs[b'changes'] = tr.changes
2538 # note: writing the fncache only during finalize mean that the file is
2538 # note: writing the fncache only during finalize mean that the file is
2539 # outdated when running hooks. As fncache is used for streaming clone,
2539 # outdated when running hooks. As fncache is used for streaming clone,
2540 # this is not expected to break anything that happen during the hooks.
2540 # this is not expected to break anything that happen during the hooks.
2541 tr.addfinalize(b'flush-fncache', self.store.write)
2541 tr.addfinalize(b'flush-fncache', self.store.write)
2542
2542
2543 def txnclosehook(tr2):
2543 def txnclosehook(tr2):
2544 """To be run if transaction is successful, will schedule a hook run"""
2544 """To be run if transaction is successful, will schedule a hook run"""
2545 # Don't reference tr2 in hook() so we don't hold a reference.
2545 # Don't reference tr2 in hook() so we don't hold a reference.
2546 # This reduces memory consumption when there are multiple
2546 # This reduces memory consumption when there are multiple
2547 # transactions per lock. This can likely go away if issue5045
2547 # transactions per lock. This can likely go away if issue5045
2548 # fixes the function accumulation.
2548 # fixes the function accumulation.
2549 hookargs = tr2.hookargs
2549 hookargs = tr2.hookargs
2550
2550
2551 def hookfunc(unused_success):
2551 def hookfunc(unused_success):
2552 repo = reporef()
2552 repo = reporef()
2553 assert repo is not None # help pytype
2553 assert repo is not None # help pytype
2554
2554
2555 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2555 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2556 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2556 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2557 for name, (old, new) in bmchanges:
2557 for name, (old, new) in bmchanges:
2558 args = tr.hookargs.copy()
2558 args = tr.hookargs.copy()
2559 args.update(bookmarks.preparehookargs(name, old, new))
2559 args.update(bookmarks.preparehookargs(name, old, new))
2560 repo.hook(
2560 repo.hook(
2561 b'txnclose-bookmark',
2561 b'txnclose-bookmark',
2562 throw=False,
2562 throw=False,
2563 **pycompat.strkwargs(args)
2563 **pycompat.strkwargs(args)
2564 )
2564 )
2565
2565
2566 if hook.hashook(repo.ui, b'txnclose-phase'):
2566 if hook.hashook(repo.ui, b'txnclose-phase'):
2567 cl = repo.unfiltered().changelog
2567 cl = repo.unfiltered().changelog
2568 phasemv = sorted(
2568 phasemv = sorted(
2569 tr.changes[b'phases'], key=lambda r: r[0][0]
2569 tr.changes[b'phases'], key=lambda r: r[0][0]
2570 )
2570 )
2571 for revs, (old, new) in phasemv:
2571 for revs, (old, new) in phasemv:
2572 for rev in revs:
2572 for rev in revs:
2573 args = tr.hookargs.copy()
2573 args = tr.hookargs.copy()
2574 node = hex(cl.node(rev))
2574 node = hex(cl.node(rev))
2575 args.update(phases.preparehookargs(node, old, new))
2575 args.update(phases.preparehookargs(node, old, new))
2576 repo.hook(
2576 repo.hook(
2577 b'txnclose-phase',
2577 b'txnclose-phase',
2578 throw=False,
2578 throw=False,
2579 **pycompat.strkwargs(args)
2579 **pycompat.strkwargs(args)
2580 )
2580 )
2581
2581
2582 repo.hook(
2582 repo.hook(
2583 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2583 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2584 )
2584 )
2585
2585
2586 repo = reporef()
2586 repo = reporef()
2587 assert repo is not None # help pytype
2587 assert repo is not None # help pytype
2588 repo._afterlock(hookfunc)
2588 repo._afterlock(hookfunc)
2589
2589
2590 tr.addfinalize(b'txnclose-hook', txnclosehook)
2590 tr.addfinalize(b'txnclose-hook', txnclosehook)
2591 # Include a leading "-" to make it happen before the transaction summary
2591 # Include a leading "-" to make it happen before the transaction summary
2592 # reports registered via scmutil.registersummarycallback() whose names
2592 # reports registered via scmutil.registersummarycallback() whose names
2593 # are 00-txnreport etc. That way, the caches will be warm when the
2593 # are 00-txnreport etc. That way, the caches will be warm when the
2594 # callbacks run.
2594 # callbacks run.
2595 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2595 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2596
2596
2597 def txnaborthook(tr2):
2597 def txnaborthook(tr2):
2598 """To be run if transaction is aborted"""
2598 """To be run if transaction is aborted"""
2599 repo = reporef()
2599 repo = reporef()
2600 assert repo is not None # help pytype
2600 assert repo is not None # help pytype
2601 repo.hook(
2601 repo.hook(
2602 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2602 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2603 )
2603 )
2604
2604
2605 tr.addabort(b'txnabort-hook', txnaborthook)
2605 tr.addabort(b'txnabort-hook', txnaborthook)
2606 # avoid eager cache invalidation. in-memory data should be identical
2606 # avoid eager cache invalidation. in-memory data should be identical
2607 # to stored data if transaction has no error.
2607 # to stored data if transaction has no error.
2608 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2608 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2609 self._transref = weakref.ref(tr)
2609 self._transref = weakref.ref(tr)
2610 scmutil.registersummarycallback(self, tr, desc)
2610 scmutil.registersummarycallback(self, tr, desc)
2611 return tr
2611 return tr
2612
2612
2613 def _journalfiles(self):
2613 def _journalfiles(self):
2614 return (
2614 return (
2615 (self.svfs, b'journal'),
2615 (self.svfs, b'journal'),
2616 (self.svfs, b'journal.narrowspec'),
2616 (self.svfs, b'journal.narrowspec'),
2617 (self.vfs, b'journal.narrowspec.dirstate'),
2617 (self.vfs, b'journal.narrowspec.dirstate'),
2618 (self.vfs, b'journal.dirstate'),
2618 (self.vfs, b'journal.dirstate'),
2619 (self.vfs, b'journal.branch'),
2619 (self.vfs, b'journal.branch'),
2620 (self.vfs, b'journal.desc'),
2620 (self.vfs, b'journal.desc'),
2621 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2621 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2622 (self.svfs, b'journal.phaseroots'),
2622 (self.svfs, b'journal.phaseroots'),
2623 )
2623 )
2624
2624
2625 def undofiles(self):
2625 def undofiles(self):
2626 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2626 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2627
2627
2628 @unfilteredmethod
2628 @unfilteredmethod
2629 def _writejournal(self, desc):
2629 def _writejournal(self, desc):
2630 self.dirstate.savebackup(None, b'journal.dirstate')
2630 self.dirstate.savebackup(None, b'journal.dirstate')
2631 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2631 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2632 narrowspec.savebackup(self, b'journal.narrowspec')
2632 narrowspec.savebackup(self, b'journal.narrowspec')
2633 self.vfs.write(
2633 self.vfs.write(
2634 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2634 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2635 )
2635 )
2636 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2636 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2637 bookmarksvfs = bookmarks.bookmarksvfs(self)
2637 bookmarksvfs = bookmarks.bookmarksvfs(self)
2638 bookmarksvfs.write(
2638 bookmarksvfs.write(
2639 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2639 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2640 )
2640 )
2641 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2641 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2642
2642
2643 def recover(self):
2643 def recover(self):
2644 with self.lock():
2644 with self.lock():
2645 if self.svfs.exists(b"journal"):
2645 if self.svfs.exists(b"journal"):
2646 self.ui.status(_(b"rolling back interrupted transaction\n"))
2646 self.ui.status(_(b"rolling back interrupted transaction\n"))
2647 vfsmap = {
2647 vfsmap = {
2648 b'': self.svfs,
2648 b'': self.svfs,
2649 b'plain': self.vfs,
2649 b'plain': self.vfs,
2650 }
2650 }
2651 transaction.rollback(
2651 transaction.rollback(
2652 self.svfs,
2652 self.svfs,
2653 vfsmap,
2653 vfsmap,
2654 b"journal",
2654 b"journal",
2655 self.ui.warn,
2655 self.ui.warn,
2656 checkambigfiles=_cachedfiles,
2656 checkambigfiles=_cachedfiles,
2657 )
2657 )
2658 self.invalidate()
2658 self.invalidate()
2659 return True
2659 return True
2660 else:
2660 else:
2661 self.ui.warn(_(b"no interrupted transaction available\n"))
2661 self.ui.warn(_(b"no interrupted transaction available\n"))
2662 return False
2662 return False
2663
2663
2664 def rollback(self, dryrun=False, force=False):
2664 def rollback(self, dryrun=False, force=False):
2665 wlock = lock = dsguard = None
2665 wlock = lock = dsguard = None
2666 try:
2666 try:
2667 wlock = self.wlock()
2667 wlock = self.wlock()
2668 lock = self.lock()
2668 lock = self.lock()
2669 if self.svfs.exists(b"undo"):
2669 if self.svfs.exists(b"undo"):
2670 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2670 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2671
2671
2672 return self._rollback(dryrun, force, dsguard)
2672 return self._rollback(dryrun, force, dsguard)
2673 else:
2673 else:
2674 self.ui.warn(_(b"no rollback information available\n"))
2674 self.ui.warn(_(b"no rollback information available\n"))
2675 return 1
2675 return 1
2676 finally:
2676 finally:
2677 release(dsguard, lock, wlock)
2677 release(dsguard, lock, wlock)
2678
2678
2679 @unfilteredmethod # Until we get smarter cache management
2679 @unfilteredmethod # Until we get smarter cache management
2680 def _rollback(self, dryrun, force, dsguard):
2680 def _rollback(self, dryrun, force, dsguard):
2681 ui = self.ui
2681 ui = self.ui
2682 try:
2682 try:
2683 args = self.vfs.read(b'undo.desc').splitlines()
2683 args = self.vfs.read(b'undo.desc').splitlines()
2684 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2684 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2685 if len(args) >= 3:
2685 if len(args) >= 3:
2686 detail = args[2]
2686 detail = args[2]
2687 oldtip = oldlen - 1
2687 oldtip = oldlen - 1
2688
2688
2689 if detail and ui.verbose:
2689 if detail and ui.verbose:
2690 msg = _(
2690 msg = _(
2691 b'repository tip rolled back to revision %d'
2691 b'repository tip rolled back to revision %d'
2692 b' (undo %s: %s)\n'
2692 b' (undo %s: %s)\n'
2693 ) % (oldtip, desc, detail)
2693 ) % (oldtip, desc, detail)
2694 else:
2694 else:
2695 msg = _(
2695 msg = _(
2696 b'repository tip rolled back to revision %d (undo %s)\n'
2696 b'repository tip rolled back to revision %d (undo %s)\n'
2697 ) % (oldtip, desc)
2697 ) % (oldtip, desc)
2698 except IOError:
2698 except IOError:
2699 msg = _(b'rolling back unknown transaction\n')
2699 msg = _(b'rolling back unknown transaction\n')
2700 desc = None
2700 desc = None
2701
2701
2702 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2702 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2703 raise error.Abort(
2703 raise error.Abort(
2704 _(
2704 _(
2705 b'rollback of last commit while not checked out '
2705 b'rollback of last commit while not checked out '
2706 b'may lose data'
2706 b'may lose data'
2707 ),
2707 ),
2708 hint=_(b'use -f to force'),
2708 hint=_(b'use -f to force'),
2709 )
2709 )
2710
2710
2711 ui.status(msg)
2711 ui.status(msg)
2712 if dryrun:
2712 if dryrun:
2713 return 0
2713 return 0
2714
2714
2715 parents = self.dirstate.parents()
2715 parents = self.dirstate.parents()
2716 self.destroying()
2716 self.destroying()
2717 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2717 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2718 transaction.rollback(
2718 transaction.rollback(
2719 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2719 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2720 )
2720 )
2721 bookmarksvfs = bookmarks.bookmarksvfs(self)
2721 bookmarksvfs = bookmarks.bookmarksvfs(self)
2722 if bookmarksvfs.exists(b'undo.bookmarks'):
2722 if bookmarksvfs.exists(b'undo.bookmarks'):
2723 bookmarksvfs.rename(
2723 bookmarksvfs.rename(
2724 b'undo.bookmarks', b'bookmarks', checkambig=True
2724 b'undo.bookmarks', b'bookmarks', checkambig=True
2725 )
2725 )
2726 if self.svfs.exists(b'undo.phaseroots'):
2726 if self.svfs.exists(b'undo.phaseroots'):
2727 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2727 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2728 self.invalidate()
2728 self.invalidate()
2729
2729
2730 has_node = self.changelog.index.has_node
2730 has_node = self.changelog.index.has_node
2731 parentgone = any(not has_node(p) for p in parents)
2731 parentgone = any(not has_node(p) for p in parents)
2732 if parentgone:
2732 if parentgone:
2733 # prevent dirstateguard from overwriting already restored one
2733 # prevent dirstateguard from overwriting already restored one
2734 dsguard.close()
2734 dsguard.close()
2735
2735
2736 narrowspec.restorebackup(self, b'undo.narrowspec')
2736 narrowspec.restorebackup(self, b'undo.narrowspec')
2737 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2737 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2738 self.dirstate.restorebackup(None, b'undo.dirstate')
2738 self.dirstate.restorebackup(None, b'undo.dirstate')
2739 try:
2739 try:
2740 branch = self.vfs.read(b'undo.branch')
2740 branch = self.vfs.read(b'undo.branch')
2741 self.dirstate.setbranch(encoding.tolocal(branch))
2741 self.dirstate.setbranch(encoding.tolocal(branch))
2742 except IOError:
2742 except IOError:
2743 ui.warn(
2743 ui.warn(
2744 _(
2744 _(
2745 b'named branch could not be reset: '
2745 b'named branch could not be reset: '
2746 b'current branch is still \'%s\'\n'
2746 b'current branch is still \'%s\'\n'
2747 )
2747 )
2748 % self.dirstate.branch()
2748 % self.dirstate.branch()
2749 )
2749 )
2750
2750
2751 parents = tuple([p.rev() for p in self[None].parents()])
2751 parents = tuple([p.rev() for p in self[None].parents()])
2752 if len(parents) > 1:
2752 if len(parents) > 1:
2753 ui.status(
2753 ui.status(
2754 _(
2754 _(
2755 b'working directory now based on '
2755 b'working directory now based on '
2756 b'revisions %d and %d\n'
2756 b'revisions %d and %d\n'
2757 )
2757 )
2758 % parents
2758 % parents
2759 )
2759 )
2760 else:
2760 else:
2761 ui.status(
2761 ui.status(
2762 _(b'working directory now based on revision %d\n') % parents
2762 _(b'working directory now based on revision %d\n') % parents
2763 )
2763 )
2764 mergestatemod.mergestate.clean(self)
2764 mergestatemod.mergestate.clean(self)
2765
2765
2766 # TODO: if we know which new heads may result from this rollback, pass
2766 # TODO: if we know which new heads may result from this rollback, pass
2767 # them to destroy(), which will prevent the branchhead cache from being
2767 # them to destroy(), which will prevent the branchhead cache from being
2768 # invalidated.
2768 # invalidated.
2769 self.destroyed()
2769 self.destroyed()
2770 return 0
2770 return 0
2771
2771
2772 def _buildcacheupdater(self, newtransaction):
2772 def _buildcacheupdater(self, newtransaction):
2773 """called during transaction to build the callback updating cache
2773 """called during transaction to build the callback updating cache
2774
2774
2775 Lives on the repository to help extension who might want to augment
2775 Lives on the repository to help extension who might want to augment
2776 this logic. For this purpose, the created transaction is passed to the
2776 this logic. For this purpose, the created transaction is passed to the
2777 method.
2777 method.
2778 """
2778 """
2779 # we must avoid cyclic reference between repo and transaction.
2779 # we must avoid cyclic reference between repo and transaction.
2780 reporef = weakref.ref(self)
2780 reporef = weakref.ref(self)
2781
2781
2782 def updater(tr):
2782 def updater(tr):
2783 repo = reporef()
2783 repo = reporef()
2784 assert repo is not None # help pytype
2784 assert repo is not None # help pytype
2785 repo.updatecaches(tr)
2785 repo.updatecaches(tr)
2786
2786
2787 return updater
2787 return updater
2788
2788
2789 @unfilteredmethod
2789 @unfilteredmethod
2790 def updatecaches(self, tr=None, full=False, caches=None):
2790 def updatecaches(self, tr=None, full=False, caches=None):
2791 """warm appropriate caches
2791 """warm appropriate caches
2792
2792
2793 If this function is called after a transaction closed. The transaction
2793 If this function is called after a transaction closed. The transaction
2794 will be available in the 'tr' argument. This can be used to selectively
2794 will be available in the 'tr' argument. This can be used to selectively
2795 update caches relevant to the changes in that transaction.
2795 update caches relevant to the changes in that transaction.
2796
2796
2797 If 'full' is set, make sure all caches the function knows about have
2797 If 'full' is set, make sure all caches the function knows about have
2798 up-to-date data. Even the ones usually loaded more lazily.
2798 up-to-date data. Even the ones usually loaded more lazily.
2799
2799
2800 The `full` argument can take a special "post-clone" value. In this case
2800 The `full` argument can take a special "post-clone" value. In this case
2801 the cache warming is made after a clone and of the slower cache might
2801 the cache warming is made after a clone and of the slower cache might
2802 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2802 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2803 as we plan for a cleaner way to deal with this for 5.9.
2803 as we plan for a cleaner way to deal with this for 5.9.
2804 """
2804 """
2805 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2805 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2806 # During strip, many caches are invalid but
2806 # During strip, many caches are invalid but
2807 # later call to `destroyed` will refresh them.
2807 # later call to `destroyed` will refresh them.
2808 return
2808 return
2809
2809
2810 unfi = self.unfiltered()
2810 unfi = self.unfiltered()
2811
2811
2812 if full:
2812 if full:
2813 msg = (
2813 msg = (
2814 "`full` argument for `repo.updatecaches` is deprecated\n"
2814 "`full` argument for `repo.updatecaches` is deprecated\n"
2815 "(use `caches=repository.CACHE_ALL` instead)"
2815 "(use `caches=repository.CACHE_ALL` instead)"
2816 )
2816 )
2817 self.ui.deprecwarn(msg, b"5.9")
2817 self.ui.deprecwarn(msg, b"5.9")
2818 caches = repository.CACHES_ALL
2818 caches = repository.CACHES_ALL
2819 if full == b"post-clone":
2819 if full == b"post-clone":
2820 caches = repository.CACHES_POST_CLONE
2820 caches = repository.CACHES_POST_CLONE
2821 caches = repository.CACHES_ALL
2821 caches = repository.CACHES_ALL
2822 elif caches is None:
2822 elif caches is None:
2823 caches = repository.CACHES_DEFAULT
2823 caches = repository.CACHES_DEFAULT
2824
2824
2825 if repository.CACHE_BRANCHMAP_SERVED in caches:
2825 if repository.CACHE_BRANCHMAP_SERVED in caches:
2826 if tr is None or tr.changes[b'origrepolen'] < len(self):
2826 if tr is None or tr.changes[b'origrepolen'] < len(self):
2827 # accessing the 'served' branchmap should refresh all the others,
2827 # accessing the 'served' branchmap should refresh all the others,
2828 self.ui.debug(b'updating the branch cache\n')
2828 self.ui.debug(b'updating the branch cache\n')
2829 self.filtered(b'served').branchmap()
2829 self.filtered(b'served').branchmap()
2830 self.filtered(b'served.hidden').branchmap()
2830 self.filtered(b'served.hidden').branchmap()
2831 # flush all possibly delayed write.
2831 # flush all possibly delayed write.
2832 self._branchcaches.write_delayed(self)
2832 self._branchcaches.write_delayed(self)
2833
2833
2834 if repository.CACHE_CHANGELOG_CACHE in caches:
2834 if repository.CACHE_CHANGELOG_CACHE in caches:
2835 self.changelog.update_caches(transaction=tr)
2835 self.changelog.update_caches(transaction=tr)
2836
2836
2837 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2837 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2838 self.manifestlog.update_caches(transaction=tr)
2838 self.manifestlog.update_caches(transaction=tr)
2839
2839
2840 if repository.CACHE_REV_BRANCH in caches:
2840 if repository.CACHE_REV_BRANCH in caches:
2841 rbc = unfi.revbranchcache()
2841 rbc = unfi.revbranchcache()
2842 for r in unfi.changelog:
2842 for r in unfi.changelog:
2843 rbc.branchinfo(r)
2843 rbc.branchinfo(r)
2844 rbc.write()
2844 rbc.write()
2845
2845
2846 if repository.CACHE_FULL_MANIFEST in caches:
2846 if repository.CACHE_FULL_MANIFEST in caches:
2847 # ensure the working copy parents are in the manifestfulltextcache
2847 # ensure the working copy parents are in the manifestfulltextcache
2848 for ctx in self[b'.'].parents():
2848 for ctx in self[b'.'].parents():
2849 ctx.manifest() # accessing the manifest is enough
2849 ctx.manifest() # accessing the manifest is enough
2850
2850
2851 if repository.CACHE_FILE_NODE_TAGS in caches:
2851 if repository.CACHE_FILE_NODE_TAGS in caches:
2852 # accessing fnode cache warms the cache
2852 # accessing fnode cache warms the cache
2853 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2853 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2854
2854
2855 if repository.CACHE_TAGS_DEFAULT in caches:
2855 if repository.CACHE_TAGS_DEFAULT in caches:
2856 # accessing tags warm the cache
2856 # accessing tags warm the cache
2857 self.tags()
2857 self.tags()
2858 if repository.CACHE_TAGS_SERVED in caches:
2858 if repository.CACHE_TAGS_SERVED in caches:
2859 self.filtered(b'served').tags()
2859 self.filtered(b'served').tags()
2860
2860
2861 if repository.CACHE_BRANCHMAP_ALL in caches:
2861 if repository.CACHE_BRANCHMAP_ALL in caches:
2862 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2862 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2863 # so we're forcing a write to cause these caches to be warmed up
2863 # so we're forcing a write to cause these caches to be warmed up
2864 # even if they haven't explicitly been requested yet (if they've
2864 # even if they haven't explicitly been requested yet (if they've
2865 # never been used by hg, they won't ever have been written, even if
2865 # never been used by hg, they won't ever have been written, even if
2866 # they're a subset of another kind of cache that *has* been used).
2866 # they're a subset of another kind of cache that *has* been used).
2867 for filt in repoview.filtertable.keys():
2867 for filt in repoview.filtertable.keys():
2868 filtered = self.filtered(filt)
2868 filtered = self.filtered(filt)
2869 filtered.branchmap().write(filtered)
2869 filtered.branchmap().write(filtered)
2870
2870
2871 def invalidatecaches(self):
2871 def invalidatecaches(self):
2872
2872
2873 if '_tagscache' in vars(self):
2873 if '_tagscache' in vars(self):
2874 # can't use delattr on proxy
2874 # can't use delattr on proxy
2875 del self.__dict__['_tagscache']
2875 del self.__dict__['_tagscache']
2876
2876
2877 self._branchcaches.clear()
2877 self._branchcaches.clear()
2878 self.invalidatevolatilesets()
2878 self.invalidatevolatilesets()
2879 self._sparsesignaturecache.clear()
2879 self._sparsesignaturecache.clear()
2880
2880
2881 def invalidatevolatilesets(self):
2881 def invalidatevolatilesets(self):
2882 self.filteredrevcache.clear()
2882 self.filteredrevcache.clear()
2883 obsolete.clearobscaches(self)
2883 obsolete.clearobscaches(self)
2884 self._quick_access_changeid_invalidate()
2884 self._quick_access_changeid_invalidate()
2885
2885
2886 def invalidatedirstate(self):
2886 def invalidatedirstate(self):
2887 """Invalidates the dirstate, causing the next call to dirstate
2887 """Invalidates the dirstate, causing the next call to dirstate
2888 to check if it was modified since the last time it was read,
2888 to check if it was modified since the last time it was read,
2889 rereading it if it has.
2889 rereading it if it has.
2890
2890
2891 This is different to dirstate.invalidate() that it doesn't always
2891 This is different to dirstate.invalidate() that it doesn't always
2892 rereads the dirstate. Use dirstate.invalidate() if you want to
2892 rereads the dirstate. Use dirstate.invalidate() if you want to
2893 explicitly read the dirstate again (i.e. restoring it to a previous
2893 explicitly read the dirstate again (i.e. restoring it to a previous
2894 known good state)."""
2894 known good state)."""
2895 if hasunfilteredcache(self, 'dirstate'):
2895 if hasunfilteredcache(self, 'dirstate'):
2896 for k in self.dirstate._filecache:
2896 for k in self.dirstate._filecache:
2897 try:
2897 try:
2898 delattr(self.dirstate, k)
2898 delattr(self.dirstate, k)
2899 except AttributeError:
2899 except AttributeError:
2900 pass
2900 pass
2901 delattr(self.unfiltered(), 'dirstate')
2901 delattr(self.unfiltered(), 'dirstate')
2902
2902
2903 def invalidate(self, clearfilecache=False):
2903 def invalidate(self, clearfilecache=False):
2904 """Invalidates both store and non-store parts other than dirstate
2904 """Invalidates both store and non-store parts other than dirstate
2905
2905
2906 If a transaction is running, invalidation of store is omitted,
2906 If a transaction is running, invalidation of store is omitted,
2907 because discarding in-memory changes might cause inconsistency
2907 because discarding in-memory changes might cause inconsistency
2908 (e.g. incomplete fncache causes unintentional failure, but
2908 (e.g. incomplete fncache causes unintentional failure, but
2909 redundant one doesn't).
2909 redundant one doesn't).
2910 """
2910 """
2911 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2911 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2912 for k in list(self._filecache.keys()):
2912 for k in list(self._filecache.keys()):
2913 # dirstate is invalidated separately in invalidatedirstate()
2913 # dirstate is invalidated separately in invalidatedirstate()
2914 if k == b'dirstate':
2914 if k == b'dirstate':
2915 continue
2915 continue
2916 if (
2916 if (
2917 k == b'changelog'
2917 k == b'changelog'
2918 and self.currenttransaction()
2918 and self.currenttransaction()
2919 and self.changelog._delayed
2919 and self.changelog._delayed
2920 ):
2920 ):
2921 # The changelog object may store unwritten revisions. We don't
2921 # The changelog object may store unwritten revisions. We don't
2922 # want to lose them.
2922 # want to lose them.
2923 # TODO: Solve the problem instead of working around it.
2923 # TODO: Solve the problem instead of working around it.
2924 continue
2924 continue
2925
2925
2926 if clearfilecache:
2926 if clearfilecache:
2927 del self._filecache[k]
2927 del self._filecache[k]
2928 try:
2928 try:
2929 delattr(unfiltered, k)
2929 delattr(unfiltered, k)
2930 except AttributeError:
2930 except AttributeError:
2931 pass
2931 pass
2932 self.invalidatecaches()
2932 self.invalidatecaches()
2933 if not self.currenttransaction():
2933 if not self.currenttransaction():
2934 # TODO: Changing contents of store outside transaction
2934 # TODO: Changing contents of store outside transaction
2935 # causes inconsistency. We should make in-memory store
2935 # causes inconsistency. We should make in-memory store
2936 # changes detectable, and abort if changed.
2936 # changes detectable, and abort if changed.
2937 self.store.invalidatecaches()
2937 self.store.invalidatecaches()
2938
2938
2939 def invalidateall(self):
2939 def invalidateall(self):
2940 """Fully invalidates both store and non-store parts, causing the
2940 """Fully invalidates both store and non-store parts, causing the
2941 subsequent operation to reread any outside changes."""
2941 subsequent operation to reread any outside changes."""
2942 # extension should hook this to invalidate its caches
2942 # extension should hook this to invalidate its caches
2943 self.invalidate()
2943 self.invalidate()
2944 self.invalidatedirstate()
2944 self.invalidatedirstate()
2945
2945
2946 @unfilteredmethod
2946 @unfilteredmethod
2947 def _refreshfilecachestats(self, tr):
2947 def _refreshfilecachestats(self, tr):
2948 """Reload stats of cached files so that they are flagged as valid"""
2948 """Reload stats of cached files so that they are flagged as valid"""
2949 for k, ce in self._filecache.items():
2949 for k, ce in self._filecache.items():
2950 k = pycompat.sysstr(k)
2950 k = pycompat.sysstr(k)
2951 if k == 'dirstate' or k not in self.__dict__:
2951 if k == 'dirstate' or k not in self.__dict__:
2952 continue
2952 continue
2953 ce.refresh()
2953 ce.refresh()
2954
2954
2955 def _lock(
2955 def _lock(
2956 self,
2956 self,
2957 vfs,
2957 vfs,
2958 lockname,
2958 lockname,
2959 wait,
2959 wait,
2960 releasefn,
2960 releasefn,
2961 acquirefn,
2961 acquirefn,
2962 desc,
2962 desc,
2963 ):
2963 ):
2964 timeout = 0
2964 timeout = 0
2965 warntimeout = 0
2965 warntimeout = 0
2966 if wait:
2966 if wait:
2967 timeout = self.ui.configint(b"ui", b"timeout")
2967 timeout = self.ui.configint(b"ui", b"timeout")
2968 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2968 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2969 # internal config: ui.signal-safe-lock
2969 # internal config: ui.signal-safe-lock
2970 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2970 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2971
2971
2972 l = lockmod.trylock(
2972 l = lockmod.trylock(
2973 self.ui,
2973 self.ui,
2974 vfs,
2974 vfs,
2975 lockname,
2975 lockname,
2976 timeout,
2976 timeout,
2977 warntimeout,
2977 warntimeout,
2978 releasefn=releasefn,
2978 releasefn=releasefn,
2979 acquirefn=acquirefn,
2979 acquirefn=acquirefn,
2980 desc=desc,
2980 desc=desc,
2981 signalsafe=signalsafe,
2981 signalsafe=signalsafe,
2982 )
2982 )
2983 return l
2983 return l
2984
2984
2985 def _afterlock(self, callback):
2985 def _afterlock(self, callback):
2986 """add a callback to be run when the repository is fully unlocked
2986 """add a callback to be run when the repository is fully unlocked
2987
2987
2988 The callback will be executed when the outermost lock is released
2988 The callback will be executed when the outermost lock is released
2989 (with wlock being higher level than 'lock')."""
2989 (with wlock being higher level than 'lock')."""
2990 for ref in (self._wlockref, self._lockref):
2990 for ref in (self._wlockref, self._lockref):
2991 l = ref and ref()
2991 l = ref and ref()
2992 if l and l.held:
2992 if l and l.held:
2993 l.postrelease.append(callback)
2993 l.postrelease.append(callback)
2994 break
2994 break
2995 else: # no lock have been found.
2995 else: # no lock have been found.
2996 callback(True)
2996 callback(True)
2997
2997
2998 def lock(self, wait=True):
2998 def lock(self, wait=True):
2999 """Lock the repository store (.hg/store) and return a weak reference
2999 """Lock the repository store (.hg/store) and return a weak reference
3000 to the lock. Use this before modifying the store (e.g. committing or
3000 to the lock. Use this before modifying the store (e.g. committing or
3001 stripping). If you are opening a transaction, get a lock as well.)
3001 stripping). If you are opening a transaction, get a lock as well.)
3002
3002
3003 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3003 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3004 'wlock' first to avoid a dead-lock hazard."""
3004 'wlock' first to avoid a dead-lock hazard."""
3005 l = self._currentlock(self._lockref)
3005 l = self._currentlock(self._lockref)
3006 if l is not None:
3006 if l is not None:
3007 l.lock()
3007 l.lock()
3008 return l
3008 return l
3009
3009
3010 l = self._lock(
3010 l = self._lock(
3011 vfs=self.svfs,
3011 vfs=self.svfs,
3012 lockname=b"lock",
3012 lockname=b"lock",
3013 wait=wait,
3013 wait=wait,
3014 releasefn=None,
3014 releasefn=None,
3015 acquirefn=self.invalidate,
3015 acquirefn=self.invalidate,
3016 desc=_(b'repository %s') % self.origroot,
3016 desc=_(b'repository %s') % self.origroot,
3017 )
3017 )
3018 self._lockref = weakref.ref(l)
3018 self._lockref = weakref.ref(l)
3019 return l
3019 return l
3020
3020
3021 def wlock(self, wait=True):
3021 def wlock(self, wait=True):
3022 """Lock the non-store parts of the repository (everything under
3022 """Lock the non-store parts of the repository (everything under
3023 .hg except .hg/store) and return a weak reference to the lock.
3023 .hg except .hg/store) and return a weak reference to the lock.
3024
3024
3025 Use this before modifying files in .hg.
3025 Use this before modifying files in .hg.
3026
3026
3027 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3027 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3028 'wlock' first to avoid a dead-lock hazard."""
3028 'wlock' first to avoid a dead-lock hazard."""
3029 l = self._wlockref() if self._wlockref else None
3029 l = self._wlockref() if self._wlockref else None
3030 if l is not None and l.held:
3030 if l is not None and l.held:
3031 l.lock()
3031 l.lock()
3032 return l
3032 return l
3033
3033
3034 # We do not need to check for non-waiting lock acquisition. Such
3034 # We do not need to check for non-waiting lock acquisition. Such
3035 # acquisition would not cause dead-lock as they would just fail.
3035 # acquisition would not cause dead-lock as they would just fail.
3036 if wait and (
3036 if wait and (
3037 self.ui.configbool(b'devel', b'all-warnings')
3037 self.ui.configbool(b'devel', b'all-warnings')
3038 or self.ui.configbool(b'devel', b'check-locks')
3038 or self.ui.configbool(b'devel', b'check-locks')
3039 ):
3039 ):
3040 if self._currentlock(self._lockref) is not None:
3040 if self._currentlock(self._lockref) is not None:
3041 self.ui.develwarn(b'"wlock" acquired after "lock"')
3041 self.ui.develwarn(b'"wlock" acquired after "lock"')
3042
3042
3043 def unlock():
3043 def unlock():
3044 if self.dirstate.pendingparentchange():
3044 if self.dirstate.pendingparentchange():
3045 self.dirstate.invalidate()
3045 self.dirstate.invalidate()
3046 else:
3046 else:
3047 self.dirstate.write(None)
3047 self.dirstate.write(None)
3048
3048
3049 self._filecache[b'dirstate'].refresh()
3049 self._filecache[b'dirstate'].refresh()
3050
3050
3051 l = self._lock(
3051 l = self._lock(
3052 self.vfs,
3052 self.vfs,
3053 b"wlock",
3053 b"wlock",
3054 wait,
3054 wait,
3055 unlock,
3055 unlock,
3056 self.invalidatedirstate,
3056 self.invalidatedirstate,
3057 _(b'working directory of %s') % self.origroot,
3057 _(b'working directory of %s') % self.origroot,
3058 )
3058 )
3059 self._wlockref = weakref.ref(l)
3059 self._wlockref = weakref.ref(l)
3060 return l
3060 return l
3061
3061
3062 def _currentlock(self, lockref):
3062 def _currentlock(self, lockref):
3063 """Returns the lock if it's held, or None if it's not."""
3063 """Returns the lock if it's held, or None if it's not."""
3064 if lockref is None:
3064 if lockref is None:
3065 return None
3065 return None
3066 l = lockref()
3066 l = lockref()
3067 if l is None or not l.held:
3067 if l is None or not l.held:
3068 return None
3068 return None
3069 return l
3069 return l
3070
3070
3071 def currentwlock(self):
3071 def currentwlock(self):
3072 """Returns the wlock if it's held, or None if it's not."""
3072 """Returns the wlock if it's held, or None if it's not."""
3073 return self._currentlock(self._wlockref)
3073 return self._currentlock(self._wlockref)
3074
3074
3075 def checkcommitpatterns(self, wctx, match, status, fail):
3075 def checkcommitpatterns(self, wctx, match, status, fail):
3076 """check for commit arguments that aren't committable"""
3076 """check for commit arguments that aren't committable"""
3077 if match.isexact() or match.prefix():
3077 if match.isexact() or match.prefix():
3078 matched = set(status.modified + status.added + status.removed)
3078 matched = set(status.modified + status.added + status.removed)
3079
3079
3080 for f in match.files():
3080 for f in match.files():
3081 f = self.dirstate.normalize(f)
3081 f = self.dirstate.normalize(f)
3082 if f == b'.' or f in matched or f in wctx.substate:
3082 if f == b'.' or f in matched or f in wctx.substate:
3083 continue
3083 continue
3084 if f in status.deleted:
3084 if f in status.deleted:
3085 fail(f, _(b'file not found!'))
3085 fail(f, _(b'file not found!'))
3086 # Is it a directory that exists or used to exist?
3086 # Is it a directory that exists or used to exist?
3087 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3087 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3088 d = f + b'/'
3088 d = f + b'/'
3089 for mf in matched:
3089 for mf in matched:
3090 if mf.startswith(d):
3090 if mf.startswith(d):
3091 break
3091 break
3092 else:
3092 else:
3093 fail(f, _(b"no match under directory!"))
3093 fail(f, _(b"no match under directory!"))
3094 elif f not in self.dirstate:
3094 elif f not in self.dirstate:
3095 fail(f, _(b"file not tracked!"))
3095 fail(f, _(b"file not tracked!"))
3096
3096
3097 @unfilteredmethod
3097 @unfilteredmethod
3098 def commit(
3098 def commit(
3099 self,
3099 self,
3100 text=b"",
3100 text=b"",
3101 user=None,
3101 user=None,
3102 date=None,
3102 date=None,
3103 match=None,
3103 match=None,
3104 force=False,
3104 force=False,
3105 editor=None,
3105 editor=None,
3106 extra=None,
3106 extra=None,
3107 ):
3107 ):
3108 """Add a new revision to current repository.
3108 """Add a new revision to current repository.
3109
3109
3110 Revision information is gathered from the working directory,
3110 Revision information is gathered from the working directory,
3111 match can be used to filter the committed files. If editor is
3111 match can be used to filter the committed files. If editor is
3112 supplied, it is called to get a commit message.
3112 supplied, it is called to get a commit message.
3113 """
3113 """
3114 if extra is None:
3114 if extra is None:
3115 extra = {}
3115 extra = {}
3116
3116
3117 def fail(f, msg):
3117 def fail(f, msg):
3118 raise error.InputError(b'%s: %s' % (f, msg))
3118 raise error.InputError(b'%s: %s' % (f, msg))
3119
3119
3120 if not match:
3120 if not match:
3121 match = matchmod.always()
3121 match = matchmod.always()
3122
3122
3123 if not force:
3123 if not force:
3124 match.bad = fail
3124 match.bad = fail
3125
3125
3126 # lock() for recent changelog (see issue4368)
3126 # lock() for recent changelog (see issue4368)
3127 with self.wlock(), self.lock():
3127 with self.wlock(), self.lock():
3128 wctx = self[None]
3128 wctx = self[None]
3129 merge = len(wctx.parents()) > 1
3129 merge = len(wctx.parents()) > 1
3130
3130
3131 if not force and merge and not match.always():
3131 if not force and merge and not match.always():
3132 raise error.Abort(
3132 raise error.Abort(
3133 _(
3133 _(
3134 b'cannot partially commit a merge '
3134 b'cannot partially commit a merge '
3135 b'(do not specify files or patterns)'
3135 b'(do not specify files or patterns)'
3136 )
3136 )
3137 )
3137 )
3138
3138
3139 status = self.status(match=match, clean=force)
3139 status = self.status(match=match, clean=force)
3140 if force:
3140 if force:
3141 status.modified.extend(
3141 status.modified.extend(
3142 status.clean
3142 status.clean
3143 ) # mq may commit clean files
3143 ) # mq may commit clean files
3144
3144
3145 # check subrepos
3145 # check subrepos
3146 subs, commitsubs, newstate = subrepoutil.precommit(
3146 subs, commitsubs, newstate = subrepoutil.precommit(
3147 self.ui, wctx, status, match, force=force
3147 self.ui, wctx, status, match, force=force
3148 )
3148 )
3149
3149
3150 # make sure all explicit patterns are matched
3150 # make sure all explicit patterns are matched
3151 if not force:
3151 if not force:
3152 self.checkcommitpatterns(wctx, match, status, fail)
3152 self.checkcommitpatterns(wctx, match, status, fail)
3153
3153
3154 cctx = context.workingcommitctx(
3154 cctx = context.workingcommitctx(
3155 self, status, text, user, date, extra
3155 self, status, text, user, date, extra
3156 )
3156 )
3157
3157
3158 ms = mergestatemod.mergestate.read(self)
3158 ms = mergestatemod.mergestate.read(self)
3159 mergeutil.checkunresolved(ms)
3159 mergeutil.checkunresolved(ms)
3160
3160
3161 # internal config: ui.allowemptycommit
3161 # internal config: ui.allowemptycommit
3162 if cctx.isempty() and not self.ui.configbool(
3162 if cctx.isempty() and not self.ui.configbool(
3163 b'ui', b'allowemptycommit'
3163 b'ui', b'allowemptycommit'
3164 ):
3164 ):
3165 self.ui.debug(b'nothing to commit, clearing merge state\n')
3165 self.ui.debug(b'nothing to commit, clearing merge state\n')
3166 ms.reset()
3166 ms.reset()
3167 return None
3167 return None
3168
3168
3169 if merge and cctx.deleted():
3169 if merge and cctx.deleted():
3170 raise error.Abort(_(b"cannot commit merge with missing files"))
3170 raise error.Abort(_(b"cannot commit merge with missing files"))
3171
3171
3172 if editor:
3172 if editor:
3173 cctx._text = editor(self, cctx, subs)
3173 cctx._text = editor(self, cctx, subs)
3174 edited = text != cctx._text
3174 edited = text != cctx._text
3175
3175
3176 # Save commit message in case this transaction gets rolled back
3176 # Save commit message in case this transaction gets rolled back
3177 # (e.g. by a pretxncommit hook). Leave the content alone on
3177 # (e.g. by a pretxncommit hook). Leave the content alone on
3178 # the assumption that the user will use the same editor again.
3178 # the assumption that the user will use the same editor again.
3179 msgfn = self.savecommitmessage(cctx._text)
3179 msg_path = self.savecommitmessage(cctx._text)
3180
3180
3181 # commit subs and write new state
3181 # commit subs and write new state
3182 if subs:
3182 if subs:
3183 uipathfn = scmutil.getuipathfn(self)
3183 uipathfn = scmutil.getuipathfn(self)
3184 for s in sorted(commitsubs):
3184 for s in sorted(commitsubs):
3185 sub = wctx.sub(s)
3185 sub = wctx.sub(s)
3186 self.ui.status(
3186 self.ui.status(
3187 _(b'committing subrepository %s\n')
3187 _(b'committing subrepository %s\n')
3188 % uipathfn(subrepoutil.subrelpath(sub))
3188 % uipathfn(subrepoutil.subrelpath(sub))
3189 )
3189 )
3190 sr = sub.commit(cctx._text, user, date)
3190 sr = sub.commit(cctx._text, user, date)
3191 newstate[s] = (newstate[s][0], sr)
3191 newstate[s] = (newstate[s][0], sr)
3192 subrepoutil.writestate(self, newstate)
3192 subrepoutil.writestate(self, newstate)
3193
3193
3194 p1, p2 = self.dirstate.parents()
3194 p1, p2 = self.dirstate.parents()
3195 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3195 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3196 try:
3196 try:
3197 self.hook(
3197 self.hook(
3198 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3198 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3199 )
3199 )
3200 with self.transaction(b'commit'):
3200 with self.transaction(b'commit'):
3201 ret = self.commitctx(cctx, True)
3201 ret = self.commitctx(cctx, True)
3202 # update bookmarks, dirstate and mergestate
3202 # update bookmarks, dirstate and mergestate
3203 bookmarks.update(self, [p1, p2], ret)
3203 bookmarks.update(self, [p1, p2], ret)
3204 cctx.markcommitted(ret)
3204 cctx.markcommitted(ret)
3205 ms.reset()
3205 ms.reset()
3206 except: # re-raises
3206 except: # re-raises
3207 if edited:
3207 if edited:
3208 self.ui.write(
3208 self.ui.write(
3209 _(b'note: commit message saved in %s\n') % msgfn
3209 _(b'note: commit message saved in %s\n') % msg_path
3210 )
3210 )
3211 self.ui.write(
3211 self.ui.write(
3212 _(
3212 _(
3213 b"note: use 'hg commit --logfile "
3213 b"note: use 'hg commit --logfile "
3214 b".hg/last-message.txt --edit' to reuse it\n"
3214 b"%s --edit' to reuse it\n"
3215 )
3215 )
3216 % msg_path
3216 )
3217 )
3217 raise
3218 raise
3218
3219
3219 def commithook(unused_success):
3220 def commithook(unused_success):
3220 # hack for command that use a temporary commit (eg: histedit)
3221 # hack for command that use a temporary commit (eg: histedit)
3221 # temporary commit got stripped before hook release
3222 # temporary commit got stripped before hook release
3222 if self.changelog.hasnode(ret):
3223 if self.changelog.hasnode(ret):
3223 self.hook(
3224 self.hook(
3224 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3225 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3225 )
3226 )
3226
3227
3227 self._afterlock(commithook)
3228 self._afterlock(commithook)
3228 return ret
3229 return ret
3229
3230
3230 @unfilteredmethod
3231 @unfilteredmethod
3231 def commitctx(self, ctx, error=False, origctx=None):
3232 def commitctx(self, ctx, error=False, origctx=None):
3232 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3233 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3233
3234
3234 @unfilteredmethod
3235 @unfilteredmethod
3235 def destroying(self):
3236 def destroying(self):
3236 """Inform the repository that nodes are about to be destroyed.
3237 """Inform the repository that nodes are about to be destroyed.
3237 Intended for use by strip and rollback, so there's a common
3238 Intended for use by strip and rollback, so there's a common
3238 place for anything that has to be done before destroying history.
3239 place for anything that has to be done before destroying history.
3239
3240
3240 This is mostly useful for saving state that is in memory and waiting
3241 This is mostly useful for saving state that is in memory and waiting
3241 to be flushed when the current lock is released. Because a call to
3242 to be flushed when the current lock is released. Because a call to
3242 destroyed is imminent, the repo will be invalidated causing those
3243 destroyed is imminent, the repo will be invalidated causing those
3243 changes to stay in memory (waiting for the next unlock), or vanish
3244 changes to stay in memory (waiting for the next unlock), or vanish
3244 completely.
3245 completely.
3245 """
3246 """
3246 # When using the same lock to commit and strip, the phasecache is left
3247 # When using the same lock to commit and strip, the phasecache is left
3247 # dirty after committing. Then when we strip, the repo is invalidated,
3248 # dirty after committing. Then when we strip, the repo is invalidated,
3248 # causing those changes to disappear.
3249 # causing those changes to disappear.
3249 if '_phasecache' in vars(self):
3250 if '_phasecache' in vars(self):
3250 self._phasecache.write()
3251 self._phasecache.write()
3251
3252
3252 @unfilteredmethod
3253 @unfilteredmethod
3253 def destroyed(self):
3254 def destroyed(self):
3254 """Inform the repository that nodes have been destroyed.
3255 """Inform the repository that nodes have been destroyed.
3255 Intended for use by strip and rollback, so there's a common
3256 Intended for use by strip and rollback, so there's a common
3256 place for anything that has to be done after destroying history.
3257 place for anything that has to be done after destroying history.
3257 """
3258 """
3258 # When one tries to:
3259 # When one tries to:
3259 # 1) destroy nodes thus calling this method (e.g. strip)
3260 # 1) destroy nodes thus calling this method (e.g. strip)
3260 # 2) use phasecache somewhere (e.g. commit)
3261 # 2) use phasecache somewhere (e.g. commit)
3261 #
3262 #
3262 # then 2) will fail because the phasecache contains nodes that were
3263 # then 2) will fail because the phasecache contains nodes that were
3263 # removed. We can either remove phasecache from the filecache,
3264 # removed. We can either remove phasecache from the filecache,
3264 # causing it to reload next time it is accessed, or simply filter
3265 # causing it to reload next time it is accessed, or simply filter
3265 # the removed nodes now and write the updated cache.
3266 # the removed nodes now and write the updated cache.
3266 self._phasecache.filterunknown(self)
3267 self._phasecache.filterunknown(self)
3267 self._phasecache.write()
3268 self._phasecache.write()
3268
3269
3269 # refresh all repository caches
3270 # refresh all repository caches
3270 self.updatecaches()
3271 self.updatecaches()
3271
3272
3272 # Ensure the persistent tag cache is updated. Doing it now
3273 # Ensure the persistent tag cache is updated. Doing it now
3273 # means that the tag cache only has to worry about destroyed
3274 # means that the tag cache only has to worry about destroyed
3274 # heads immediately after a strip/rollback. That in turn
3275 # heads immediately after a strip/rollback. That in turn
3275 # guarantees that "cachetip == currenttip" (comparing both rev
3276 # guarantees that "cachetip == currenttip" (comparing both rev
3276 # and node) always means no nodes have been added or destroyed.
3277 # and node) always means no nodes have been added or destroyed.
3277
3278
3278 # XXX this is suboptimal when qrefresh'ing: we strip the current
3279 # XXX this is suboptimal when qrefresh'ing: we strip the current
3279 # head, refresh the tag cache, then immediately add a new head.
3280 # head, refresh the tag cache, then immediately add a new head.
3280 # But I think doing it this way is necessary for the "instant
3281 # But I think doing it this way is necessary for the "instant
3281 # tag cache retrieval" case to work.
3282 # tag cache retrieval" case to work.
3282 self.invalidate()
3283 self.invalidate()
3283
3284
3284 def status(
3285 def status(
3285 self,
3286 self,
3286 node1=b'.',
3287 node1=b'.',
3287 node2=None,
3288 node2=None,
3288 match=None,
3289 match=None,
3289 ignored=False,
3290 ignored=False,
3290 clean=False,
3291 clean=False,
3291 unknown=False,
3292 unknown=False,
3292 listsubrepos=False,
3293 listsubrepos=False,
3293 ):
3294 ):
3294 '''a convenience method that calls node1.status(node2)'''
3295 '''a convenience method that calls node1.status(node2)'''
3295 return self[node1].status(
3296 return self[node1].status(
3296 node2, match, ignored, clean, unknown, listsubrepos
3297 node2, match, ignored, clean, unknown, listsubrepos
3297 )
3298 )
3298
3299
3299 def addpostdsstatus(self, ps):
3300 def addpostdsstatus(self, ps):
3300 """Add a callback to run within the wlock, at the point at which status
3301 """Add a callback to run within the wlock, at the point at which status
3301 fixups happen.
3302 fixups happen.
3302
3303
3303 On status completion, callback(wctx, status) will be called with the
3304 On status completion, callback(wctx, status) will be called with the
3304 wlock held, unless the dirstate has changed from underneath or the wlock
3305 wlock held, unless the dirstate has changed from underneath or the wlock
3305 couldn't be grabbed.
3306 couldn't be grabbed.
3306
3307
3307 Callbacks should not capture and use a cached copy of the dirstate --
3308 Callbacks should not capture and use a cached copy of the dirstate --
3308 it might change in the meanwhile. Instead, they should access the
3309 it might change in the meanwhile. Instead, they should access the
3309 dirstate via wctx.repo().dirstate.
3310 dirstate via wctx.repo().dirstate.
3310
3311
3311 This list is emptied out after each status run -- extensions should
3312 This list is emptied out after each status run -- extensions should
3312 make sure it adds to this list each time dirstate.status is called.
3313 make sure it adds to this list each time dirstate.status is called.
3313 Extensions should also make sure they don't call this for statuses
3314 Extensions should also make sure they don't call this for statuses
3314 that don't involve the dirstate.
3315 that don't involve the dirstate.
3315 """
3316 """
3316
3317
3317 # The list is located here for uniqueness reasons -- it is actually
3318 # The list is located here for uniqueness reasons -- it is actually
3318 # managed by the workingctx, but that isn't unique per-repo.
3319 # managed by the workingctx, but that isn't unique per-repo.
3319 self._postdsstatus.append(ps)
3320 self._postdsstatus.append(ps)
3320
3321
3321 def postdsstatus(self):
3322 def postdsstatus(self):
3322 """Used by workingctx to get the list of post-dirstate-status hooks."""
3323 """Used by workingctx to get the list of post-dirstate-status hooks."""
3323 return self._postdsstatus
3324 return self._postdsstatus
3324
3325
3325 def clearpostdsstatus(self):
3326 def clearpostdsstatus(self):
3326 """Used by workingctx to clear post-dirstate-status hooks."""
3327 """Used by workingctx to clear post-dirstate-status hooks."""
3327 del self._postdsstatus[:]
3328 del self._postdsstatus[:]
3328
3329
3329 def heads(self, start=None):
3330 def heads(self, start=None):
3330 if start is None:
3331 if start is None:
3331 cl = self.changelog
3332 cl = self.changelog
3332 headrevs = reversed(cl.headrevs())
3333 headrevs = reversed(cl.headrevs())
3333 return [cl.node(rev) for rev in headrevs]
3334 return [cl.node(rev) for rev in headrevs]
3334
3335
3335 heads = self.changelog.heads(start)
3336 heads = self.changelog.heads(start)
3336 # sort the output in rev descending order
3337 # sort the output in rev descending order
3337 return sorted(heads, key=self.changelog.rev, reverse=True)
3338 return sorted(heads, key=self.changelog.rev, reverse=True)
3338
3339
3339 def branchheads(self, branch=None, start=None, closed=False):
3340 def branchheads(self, branch=None, start=None, closed=False):
3340 """return a (possibly filtered) list of heads for the given branch
3341 """return a (possibly filtered) list of heads for the given branch
3341
3342
3342 Heads are returned in topological order, from newest to oldest.
3343 Heads are returned in topological order, from newest to oldest.
3343 If branch is None, use the dirstate branch.
3344 If branch is None, use the dirstate branch.
3344 If start is not None, return only heads reachable from start.
3345 If start is not None, return only heads reachable from start.
3345 If closed is True, return heads that are marked as closed as well.
3346 If closed is True, return heads that are marked as closed as well.
3346 """
3347 """
3347 if branch is None:
3348 if branch is None:
3348 branch = self[None].branch()
3349 branch = self[None].branch()
3349 branches = self.branchmap()
3350 branches = self.branchmap()
3350 if not branches.hasbranch(branch):
3351 if not branches.hasbranch(branch):
3351 return []
3352 return []
3352 # the cache returns heads ordered lowest to highest
3353 # the cache returns heads ordered lowest to highest
3353 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3354 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3354 if start is not None:
3355 if start is not None:
3355 # filter out the heads that cannot be reached from startrev
3356 # filter out the heads that cannot be reached from startrev
3356 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3357 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3357 bheads = [h for h in bheads if h in fbheads]
3358 bheads = [h for h in bheads if h in fbheads]
3358 return bheads
3359 return bheads
3359
3360
3360 def branches(self, nodes):
3361 def branches(self, nodes):
3361 if not nodes:
3362 if not nodes:
3362 nodes = [self.changelog.tip()]
3363 nodes = [self.changelog.tip()]
3363 b = []
3364 b = []
3364 for n in nodes:
3365 for n in nodes:
3365 t = n
3366 t = n
3366 while True:
3367 while True:
3367 p = self.changelog.parents(n)
3368 p = self.changelog.parents(n)
3368 if p[1] != self.nullid or p[0] == self.nullid:
3369 if p[1] != self.nullid or p[0] == self.nullid:
3369 b.append((t, n, p[0], p[1]))
3370 b.append((t, n, p[0], p[1]))
3370 break
3371 break
3371 n = p[0]
3372 n = p[0]
3372 return b
3373 return b
3373
3374
3374 def between(self, pairs):
3375 def between(self, pairs):
3375 r = []
3376 r = []
3376
3377
3377 for top, bottom in pairs:
3378 for top, bottom in pairs:
3378 n, l, i = top, [], 0
3379 n, l, i = top, [], 0
3379 f = 1
3380 f = 1
3380
3381
3381 while n != bottom and n != self.nullid:
3382 while n != bottom and n != self.nullid:
3382 p = self.changelog.parents(n)[0]
3383 p = self.changelog.parents(n)[0]
3383 if i == f:
3384 if i == f:
3384 l.append(n)
3385 l.append(n)
3385 f = f * 2
3386 f = f * 2
3386 n = p
3387 n = p
3387 i += 1
3388 i += 1
3388
3389
3389 r.append(l)
3390 r.append(l)
3390
3391
3391 return r
3392 return r
3392
3393
3393 def checkpush(self, pushop):
3394 def checkpush(self, pushop):
3394 """Extensions can override this function if additional checks have
3395 """Extensions can override this function if additional checks have
3395 to be performed before pushing, or call it if they override push
3396 to be performed before pushing, or call it if they override push
3396 command.
3397 command.
3397 """
3398 """
3398
3399
3399 @unfilteredpropertycache
3400 @unfilteredpropertycache
3400 def prepushoutgoinghooks(self):
3401 def prepushoutgoinghooks(self):
3401 """Return util.hooks consists of a pushop with repo, remote, outgoing
3402 """Return util.hooks consists of a pushop with repo, remote, outgoing
3402 methods, which are called before pushing changesets.
3403 methods, which are called before pushing changesets.
3403 """
3404 """
3404 return util.hooks()
3405 return util.hooks()
3405
3406
3406 def pushkey(self, namespace, key, old, new):
3407 def pushkey(self, namespace, key, old, new):
3407 try:
3408 try:
3408 tr = self.currenttransaction()
3409 tr = self.currenttransaction()
3409 hookargs = {}
3410 hookargs = {}
3410 if tr is not None:
3411 if tr is not None:
3411 hookargs.update(tr.hookargs)
3412 hookargs.update(tr.hookargs)
3412 hookargs = pycompat.strkwargs(hookargs)
3413 hookargs = pycompat.strkwargs(hookargs)
3413 hookargs['namespace'] = namespace
3414 hookargs['namespace'] = namespace
3414 hookargs['key'] = key
3415 hookargs['key'] = key
3415 hookargs['old'] = old
3416 hookargs['old'] = old
3416 hookargs['new'] = new
3417 hookargs['new'] = new
3417 self.hook(b'prepushkey', throw=True, **hookargs)
3418 self.hook(b'prepushkey', throw=True, **hookargs)
3418 except error.HookAbort as exc:
3419 except error.HookAbort as exc:
3419 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3420 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3420 if exc.hint:
3421 if exc.hint:
3421 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3422 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3422 return False
3423 return False
3423 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3424 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3424 ret = pushkey.push(self, namespace, key, old, new)
3425 ret = pushkey.push(self, namespace, key, old, new)
3425
3426
3426 def runhook(unused_success):
3427 def runhook(unused_success):
3427 self.hook(
3428 self.hook(
3428 b'pushkey',
3429 b'pushkey',
3429 namespace=namespace,
3430 namespace=namespace,
3430 key=key,
3431 key=key,
3431 old=old,
3432 old=old,
3432 new=new,
3433 new=new,
3433 ret=ret,
3434 ret=ret,
3434 )
3435 )
3435
3436
3436 self._afterlock(runhook)
3437 self._afterlock(runhook)
3437 return ret
3438 return ret
3438
3439
3439 def listkeys(self, namespace):
3440 def listkeys(self, namespace):
3440 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3441 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3441 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3442 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3442 values = pushkey.list(self, namespace)
3443 values = pushkey.list(self, namespace)
3443 self.hook(b'listkeys', namespace=namespace, values=values)
3444 self.hook(b'listkeys', namespace=namespace, values=values)
3444 return values
3445 return values
3445
3446
3446 def debugwireargs(self, one, two, three=None, four=None, five=None):
3447 def debugwireargs(self, one, two, three=None, four=None, five=None):
3447 '''used to test argument passing over the wire'''
3448 '''used to test argument passing over the wire'''
3448 return b"%s %s %s %s %s" % (
3449 return b"%s %s %s %s %s" % (
3449 one,
3450 one,
3450 two,
3451 two,
3451 pycompat.bytestr(three),
3452 pycompat.bytestr(three),
3452 pycompat.bytestr(four),
3453 pycompat.bytestr(four),
3453 pycompat.bytestr(five),
3454 pycompat.bytestr(five),
3454 )
3455 )
3455
3456
3456 def savecommitmessage(self, text):
3457 def savecommitmessage(self, text):
3457 fp = self.vfs(b'last-message.txt', b'wb')
3458 fp = self.vfs(b'last-message.txt', b'wb')
3458 try:
3459 try:
3459 fp.write(text)
3460 fp.write(text)
3460 finally:
3461 finally:
3461 fp.close()
3462 fp.close()
3462 return self.pathto(fp.name[len(self.root) + 1 :])
3463 return self.pathto(fp.name[len(self.root) + 1 :])
3463
3464
3464 def register_wanted_sidedata(self, category):
3465 def register_wanted_sidedata(self, category):
3465 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3466 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3466 # Only revlogv2 repos can want sidedata.
3467 # Only revlogv2 repos can want sidedata.
3467 return
3468 return
3468 self._wanted_sidedata.add(pycompat.bytestr(category))
3469 self._wanted_sidedata.add(pycompat.bytestr(category))
3469
3470
3470 def register_sidedata_computer(
3471 def register_sidedata_computer(
3471 self, kind, category, keys, computer, flags, replace=False
3472 self, kind, category, keys, computer, flags, replace=False
3472 ):
3473 ):
3473 if kind not in revlogconst.ALL_KINDS:
3474 if kind not in revlogconst.ALL_KINDS:
3474 msg = _(b"unexpected revlog kind '%s'.")
3475 msg = _(b"unexpected revlog kind '%s'.")
3475 raise error.ProgrammingError(msg % kind)
3476 raise error.ProgrammingError(msg % kind)
3476 category = pycompat.bytestr(category)
3477 category = pycompat.bytestr(category)
3477 already_registered = category in self._sidedata_computers.get(kind, [])
3478 already_registered = category in self._sidedata_computers.get(kind, [])
3478 if already_registered and not replace:
3479 if already_registered and not replace:
3479 msg = _(
3480 msg = _(
3480 b"cannot register a sidedata computer twice for category '%s'."
3481 b"cannot register a sidedata computer twice for category '%s'."
3481 )
3482 )
3482 raise error.ProgrammingError(msg % category)
3483 raise error.ProgrammingError(msg % category)
3483 if replace and not already_registered:
3484 if replace and not already_registered:
3484 msg = _(
3485 msg = _(
3485 b"cannot replace a sidedata computer that isn't registered "
3486 b"cannot replace a sidedata computer that isn't registered "
3486 b"for category '%s'."
3487 b"for category '%s'."
3487 )
3488 )
3488 raise error.ProgrammingError(msg % category)
3489 raise error.ProgrammingError(msg % category)
3489 self._sidedata_computers.setdefault(kind, {})
3490 self._sidedata_computers.setdefault(kind, {})
3490 self._sidedata_computers[kind][category] = (keys, computer, flags)
3491 self._sidedata_computers[kind][category] = (keys, computer, flags)
3491
3492
3492
3493
3493 # used to avoid circular references so destructors work
3494 # used to avoid circular references so destructors work
3494 def aftertrans(files):
3495 def aftertrans(files):
3495 renamefiles = [tuple(t) for t in files]
3496 renamefiles = [tuple(t) for t in files]
3496
3497
3497 def a():
3498 def a():
3498 for vfs, src, dest in renamefiles:
3499 for vfs, src, dest in renamefiles:
3499 # if src and dest refer to a same file, vfs.rename is a no-op,
3500 # if src and dest refer to a same file, vfs.rename is a no-op,
3500 # leaving both src and dest on disk. delete dest to make sure
3501 # leaving both src and dest on disk. delete dest to make sure
3501 # the rename couldn't be such a no-op.
3502 # the rename couldn't be such a no-op.
3502 vfs.tryunlink(dest)
3503 vfs.tryunlink(dest)
3503 try:
3504 try:
3504 vfs.rename(src, dest)
3505 vfs.rename(src, dest)
3505 except OSError as exc: # journal file does not yet exist
3506 except OSError as exc: # journal file does not yet exist
3506 if exc.errno != errno.ENOENT:
3507 if exc.errno != errno.ENOENT:
3507 raise
3508 raise
3508
3509
3509 return a
3510 return a
3510
3511
3511
3512
3512 def undoname(fn):
3513 def undoname(fn):
3513 base, name = os.path.split(fn)
3514 base, name = os.path.split(fn)
3514 assert name.startswith(b'journal')
3515 assert name.startswith(b'journal')
3515 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3516 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3516
3517
3517
3518
3518 def instance(ui, path, create, intents=None, createopts=None):
3519 def instance(ui, path, create, intents=None, createopts=None):
3519 localpath = urlutil.urllocalpath(path)
3520 localpath = urlutil.urllocalpath(path)
3520 if create:
3521 if create:
3521 createrepository(ui, localpath, createopts=createopts)
3522 createrepository(ui, localpath, createopts=createopts)
3522
3523
3523 return makelocalrepository(ui, localpath, intents=intents)
3524 return makelocalrepository(ui, localpath, intents=intents)
3524
3525
3525
3526
3526 def islocal(path):
3527 def islocal(path):
3527 return True
3528 return True
3528
3529
3529
3530
3530 def defaultcreateopts(ui, createopts=None):
3531 def defaultcreateopts(ui, createopts=None):
3531 """Populate the default creation options for a repository.
3532 """Populate the default creation options for a repository.
3532
3533
3533 A dictionary of explicitly requested creation options can be passed
3534 A dictionary of explicitly requested creation options can be passed
3534 in. Missing keys will be populated.
3535 in. Missing keys will be populated.
3535 """
3536 """
3536 createopts = dict(createopts or {})
3537 createopts = dict(createopts or {})
3537
3538
3538 if b'backend' not in createopts:
3539 if b'backend' not in createopts:
3539 # experimental config: storage.new-repo-backend
3540 # experimental config: storage.new-repo-backend
3540 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3541 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3541
3542
3542 return createopts
3543 return createopts
3543
3544
3544
3545
3545 def clone_requirements(ui, createopts, srcrepo):
3546 def clone_requirements(ui, createopts, srcrepo):
3546 """clone the requirements of a local repo for a local clone
3547 """clone the requirements of a local repo for a local clone
3547
3548
3548 The store requirements are unchanged while the working copy requirements
3549 The store requirements are unchanged while the working copy requirements
3549 depends on the configuration
3550 depends on the configuration
3550 """
3551 """
3551 target_requirements = set()
3552 target_requirements = set()
3552 if not srcrepo.requirements:
3553 if not srcrepo.requirements:
3553 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3554 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3554 # with it.
3555 # with it.
3555 return target_requirements
3556 return target_requirements
3556 createopts = defaultcreateopts(ui, createopts=createopts)
3557 createopts = defaultcreateopts(ui, createopts=createopts)
3557 for r in newreporequirements(ui, createopts):
3558 for r in newreporequirements(ui, createopts):
3558 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3559 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3559 target_requirements.add(r)
3560 target_requirements.add(r)
3560
3561
3561 for r in srcrepo.requirements:
3562 for r in srcrepo.requirements:
3562 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3563 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3563 target_requirements.add(r)
3564 target_requirements.add(r)
3564 return target_requirements
3565 return target_requirements
3565
3566
3566
3567
3567 def newreporequirements(ui, createopts):
3568 def newreporequirements(ui, createopts):
3568 """Determine the set of requirements for a new local repository.
3569 """Determine the set of requirements for a new local repository.
3569
3570
3570 Extensions can wrap this function to specify custom requirements for
3571 Extensions can wrap this function to specify custom requirements for
3571 new repositories.
3572 new repositories.
3572 """
3573 """
3573
3574
3574 if b'backend' not in createopts:
3575 if b'backend' not in createopts:
3575 raise error.ProgrammingError(
3576 raise error.ProgrammingError(
3576 b'backend key not present in createopts; '
3577 b'backend key not present in createopts; '
3577 b'was defaultcreateopts() called?'
3578 b'was defaultcreateopts() called?'
3578 )
3579 )
3579
3580
3580 if createopts[b'backend'] != b'revlogv1':
3581 if createopts[b'backend'] != b'revlogv1':
3581 raise error.Abort(
3582 raise error.Abort(
3582 _(
3583 _(
3583 b'unable to determine repository requirements for '
3584 b'unable to determine repository requirements for '
3584 b'storage backend: %s'
3585 b'storage backend: %s'
3585 )
3586 )
3586 % createopts[b'backend']
3587 % createopts[b'backend']
3587 )
3588 )
3588
3589
3589 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3590 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3590 if ui.configbool(b'format', b'usestore'):
3591 if ui.configbool(b'format', b'usestore'):
3591 requirements.add(requirementsmod.STORE_REQUIREMENT)
3592 requirements.add(requirementsmod.STORE_REQUIREMENT)
3592 if ui.configbool(b'format', b'usefncache'):
3593 if ui.configbool(b'format', b'usefncache'):
3593 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3594 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3594 if ui.configbool(b'format', b'dotencode'):
3595 if ui.configbool(b'format', b'dotencode'):
3595 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3596 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3596
3597
3597 compengines = ui.configlist(b'format', b'revlog-compression')
3598 compengines = ui.configlist(b'format', b'revlog-compression')
3598 for compengine in compengines:
3599 for compengine in compengines:
3599 if compengine in util.compengines:
3600 if compengine in util.compengines:
3600 engine = util.compengines[compengine]
3601 engine = util.compengines[compengine]
3601 if engine.available() and engine.revlogheader():
3602 if engine.available() and engine.revlogheader():
3602 break
3603 break
3603 else:
3604 else:
3604 raise error.Abort(
3605 raise error.Abort(
3605 _(
3606 _(
3606 b'compression engines %s defined by '
3607 b'compression engines %s defined by '
3607 b'format.revlog-compression not available'
3608 b'format.revlog-compression not available'
3608 )
3609 )
3609 % b', '.join(b'"%s"' % e for e in compengines),
3610 % b', '.join(b'"%s"' % e for e in compengines),
3610 hint=_(
3611 hint=_(
3611 b'run "hg debuginstall" to list available '
3612 b'run "hg debuginstall" to list available '
3612 b'compression engines'
3613 b'compression engines'
3613 ),
3614 ),
3614 )
3615 )
3615
3616
3616 # zlib is the historical default and doesn't need an explicit requirement.
3617 # zlib is the historical default and doesn't need an explicit requirement.
3617 if compengine == b'zstd':
3618 if compengine == b'zstd':
3618 requirements.add(b'revlog-compression-zstd')
3619 requirements.add(b'revlog-compression-zstd')
3619 elif compengine != b'zlib':
3620 elif compengine != b'zlib':
3620 requirements.add(b'exp-compression-%s' % compengine)
3621 requirements.add(b'exp-compression-%s' % compengine)
3621
3622
3622 if scmutil.gdinitconfig(ui):
3623 if scmutil.gdinitconfig(ui):
3623 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3624 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3624 if ui.configbool(b'format', b'sparse-revlog'):
3625 if ui.configbool(b'format', b'sparse-revlog'):
3625 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3626 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3626
3627
3627 # experimental config: format.use-dirstate-v2
3628 # experimental config: format.use-dirstate-v2
3628 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3629 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3629 if ui.configbool(b'format', b'use-dirstate-v2'):
3630 if ui.configbool(b'format', b'use-dirstate-v2'):
3630 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3631 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3631
3632
3632 # experimental config: format.exp-use-copies-side-data-changeset
3633 # experimental config: format.exp-use-copies-side-data-changeset
3633 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3634 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3634 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3635 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3635 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3636 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3636 if ui.configbool(b'experimental', b'treemanifest'):
3637 if ui.configbool(b'experimental', b'treemanifest'):
3637 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3638 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3638
3639
3639 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3640 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3640 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3641 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3641 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3642 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3642
3643
3643 revlogv2 = ui.config(b'experimental', b'revlogv2')
3644 revlogv2 = ui.config(b'experimental', b'revlogv2')
3644 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3645 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3645 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3646 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3646 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3647 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3647 # experimental config: format.internal-phase
3648 # experimental config: format.internal-phase
3648 if ui.configbool(b'format', b'internal-phase'):
3649 if ui.configbool(b'format', b'internal-phase'):
3649 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3650 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3650
3651
3651 if createopts.get(b'narrowfiles'):
3652 if createopts.get(b'narrowfiles'):
3652 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3653 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3653
3654
3654 if createopts.get(b'lfs'):
3655 if createopts.get(b'lfs'):
3655 requirements.add(b'lfs')
3656 requirements.add(b'lfs')
3656
3657
3657 if ui.configbool(b'format', b'bookmarks-in-store'):
3658 if ui.configbool(b'format', b'bookmarks-in-store'):
3658 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3659 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3659
3660
3660 if ui.configbool(b'format', b'use-persistent-nodemap'):
3661 if ui.configbool(b'format', b'use-persistent-nodemap'):
3661 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3662 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3662
3663
3663 # if share-safe is enabled, let's create the new repository with the new
3664 # if share-safe is enabled, let's create the new repository with the new
3664 # requirement
3665 # requirement
3665 if ui.configbool(b'format', b'use-share-safe'):
3666 if ui.configbool(b'format', b'use-share-safe'):
3666 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3667 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3667
3668
3668 # if we are creating a share-repoΒΉ we have to handle requirement
3669 # if we are creating a share-repoΒΉ we have to handle requirement
3669 # differently.
3670 # differently.
3670 #
3671 #
3671 # [1] (i.e. reusing the store from another repository, just having a
3672 # [1] (i.e. reusing the store from another repository, just having a
3672 # working copy)
3673 # working copy)
3673 if b'sharedrepo' in createopts:
3674 if b'sharedrepo' in createopts:
3674 source_requirements = set(createopts[b'sharedrepo'].requirements)
3675 source_requirements = set(createopts[b'sharedrepo'].requirements)
3675
3676
3676 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3677 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3677 # share to an old school repository, we have to copy the
3678 # share to an old school repository, we have to copy the
3678 # requirements and hope for the best.
3679 # requirements and hope for the best.
3679 requirements = source_requirements
3680 requirements = source_requirements
3680 else:
3681 else:
3681 # We have control on the working copy only, so "copy" the non
3682 # We have control on the working copy only, so "copy" the non
3682 # working copy part over, ignoring previous logic.
3683 # working copy part over, ignoring previous logic.
3683 to_drop = set()
3684 to_drop = set()
3684 for req in requirements:
3685 for req in requirements:
3685 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3686 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3686 continue
3687 continue
3687 if req in source_requirements:
3688 if req in source_requirements:
3688 continue
3689 continue
3689 to_drop.add(req)
3690 to_drop.add(req)
3690 requirements -= to_drop
3691 requirements -= to_drop
3691 requirements |= source_requirements
3692 requirements |= source_requirements
3692
3693
3693 if createopts.get(b'sharedrelative'):
3694 if createopts.get(b'sharedrelative'):
3694 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3695 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3695 else:
3696 else:
3696 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3697 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3697
3698
3698 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3699 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3699 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3700 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3700 msg = _("ignoring unknown tracked key version: %d\n")
3701 msg = _("ignoring unknown tracked key version: %d\n")
3701 hint = _("see `hg help config.format.use-dirstate-tracked-hint-version")
3702 hint = _("see `hg help config.format.use-dirstate-tracked-hint-version")
3702 if version != 1:
3703 if version != 1:
3703 ui.warn(msg % version, hint=hint)
3704 ui.warn(msg % version, hint=hint)
3704 else:
3705 else:
3705 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3706 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3706
3707
3707 return requirements
3708 return requirements
3708
3709
3709
3710
3710 def checkrequirementscompat(ui, requirements):
3711 def checkrequirementscompat(ui, requirements):
3711 """Checks compatibility of repository requirements enabled and disabled.
3712 """Checks compatibility of repository requirements enabled and disabled.
3712
3713
3713 Returns a set of requirements which needs to be dropped because dependend
3714 Returns a set of requirements which needs to be dropped because dependend
3714 requirements are not enabled. Also warns users about it"""
3715 requirements are not enabled. Also warns users about it"""
3715
3716
3716 dropped = set()
3717 dropped = set()
3717
3718
3718 if requirementsmod.STORE_REQUIREMENT not in requirements:
3719 if requirementsmod.STORE_REQUIREMENT not in requirements:
3719 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3720 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3720 ui.warn(
3721 ui.warn(
3721 _(
3722 _(
3722 b'ignoring enabled \'format.bookmarks-in-store\' config '
3723 b'ignoring enabled \'format.bookmarks-in-store\' config '
3723 b'beacuse it is incompatible with disabled '
3724 b'beacuse it is incompatible with disabled '
3724 b'\'format.usestore\' config\n'
3725 b'\'format.usestore\' config\n'
3725 )
3726 )
3726 )
3727 )
3727 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3728 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3728
3729
3729 if (
3730 if (
3730 requirementsmod.SHARED_REQUIREMENT in requirements
3731 requirementsmod.SHARED_REQUIREMENT in requirements
3731 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3732 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3732 ):
3733 ):
3733 raise error.Abort(
3734 raise error.Abort(
3734 _(
3735 _(
3735 b"cannot create shared repository as source was created"
3736 b"cannot create shared repository as source was created"
3736 b" with 'format.usestore' config disabled"
3737 b" with 'format.usestore' config disabled"
3737 )
3738 )
3738 )
3739 )
3739
3740
3740 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3741 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3741 if ui.hasconfig(b'format', b'use-share-safe'):
3742 if ui.hasconfig(b'format', b'use-share-safe'):
3742 msg = _(
3743 msg = _(
3743 b"ignoring enabled 'format.use-share-safe' config because "
3744 b"ignoring enabled 'format.use-share-safe' config because "
3744 b"it is incompatible with disabled 'format.usestore'"
3745 b"it is incompatible with disabled 'format.usestore'"
3745 b" config\n"
3746 b" config\n"
3746 )
3747 )
3747 ui.warn(msg)
3748 ui.warn(msg)
3748 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3749 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3749
3750
3750 return dropped
3751 return dropped
3751
3752
3752
3753
3753 def filterknowncreateopts(ui, createopts):
3754 def filterknowncreateopts(ui, createopts):
3754 """Filters a dict of repo creation options against options that are known.
3755 """Filters a dict of repo creation options against options that are known.
3755
3756
3756 Receives a dict of repo creation options and returns a dict of those
3757 Receives a dict of repo creation options and returns a dict of those
3757 options that we don't know how to handle.
3758 options that we don't know how to handle.
3758
3759
3759 This function is called as part of repository creation. If the
3760 This function is called as part of repository creation. If the
3760 returned dict contains any items, repository creation will not
3761 returned dict contains any items, repository creation will not
3761 be allowed, as it means there was a request to create a repository
3762 be allowed, as it means there was a request to create a repository
3762 with options not recognized by loaded code.
3763 with options not recognized by loaded code.
3763
3764
3764 Extensions can wrap this function to filter out creation options
3765 Extensions can wrap this function to filter out creation options
3765 they know how to handle.
3766 they know how to handle.
3766 """
3767 """
3767 known = {
3768 known = {
3768 b'backend',
3769 b'backend',
3769 b'lfs',
3770 b'lfs',
3770 b'narrowfiles',
3771 b'narrowfiles',
3771 b'sharedrepo',
3772 b'sharedrepo',
3772 b'sharedrelative',
3773 b'sharedrelative',
3773 b'shareditems',
3774 b'shareditems',
3774 b'shallowfilestore',
3775 b'shallowfilestore',
3775 }
3776 }
3776
3777
3777 return {k: v for k, v in createopts.items() if k not in known}
3778 return {k: v for k, v in createopts.items() if k not in known}
3778
3779
3779
3780
3780 def createrepository(ui, path, createopts=None, requirements=None):
3781 def createrepository(ui, path, createopts=None, requirements=None):
3781 """Create a new repository in a vfs.
3782 """Create a new repository in a vfs.
3782
3783
3783 ``path`` path to the new repo's working directory.
3784 ``path`` path to the new repo's working directory.
3784 ``createopts`` options for the new repository.
3785 ``createopts`` options for the new repository.
3785 ``requirement`` predefined set of requirements.
3786 ``requirement`` predefined set of requirements.
3786 (incompatible with ``createopts``)
3787 (incompatible with ``createopts``)
3787
3788
3788 The following keys for ``createopts`` are recognized:
3789 The following keys for ``createopts`` are recognized:
3789
3790
3790 backend
3791 backend
3791 The storage backend to use.
3792 The storage backend to use.
3792 lfs
3793 lfs
3793 Repository will be created with ``lfs`` requirement. The lfs extension
3794 Repository will be created with ``lfs`` requirement. The lfs extension
3794 will automatically be loaded when the repository is accessed.
3795 will automatically be loaded when the repository is accessed.
3795 narrowfiles
3796 narrowfiles
3796 Set up repository to support narrow file storage.
3797 Set up repository to support narrow file storage.
3797 sharedrepo
3798 sharedrepo
3798 Repository object from which storage should be shared.
3799 Repository object from which storage should be shared.
3799 sharedrelative
3800 sharedrelative
3800 Boolean indicating if the path to the shared repo should be
3801 Boolean indicating if the path to the shared repo should be
3801 stored as relative. By default, the pointer to the "parent" repo
3802 stored as relative. By default, the pointer to the "parent" repo
3802 is stored as an absolute path.
3803 is stored as an absolute path.
3803 shareditems
3804 shareditems
3804 Set of items to share to the new repository (in addition to storage).
3805 Set of items to share to the new repository (in addition to storage).
3805 shallowfilestore
3806 shallowfilestore
3806 Indicates that storage for files should be shallow (not all ancestor
3807 Indicates that storage for files should be shallow (not all ancestor
3807 revisions are known).
3808 revisions are known).
3808 """
3809 """
3809
3810
3810 if requirements is not None:
3811 if requirements is not None:
3811 if createopts is not None:
3812 if createopts is not None:
3812 msg = b'cannot specify both createopts and requirements'
3813 msg = b'cannot specify both createopts and requirements'
3813 raise error.ProgrammingError(msg)
3814 raise error.ProgrammingError(msg)
3814 createopts = {}
3815 createopts = {}
3815 else:
3816 else:
3816 createopts = defaultcreateopts(ui, createopts=createopts)
3817 createopts = defaultcreateopts(ui, createopts=createopts)
3817
3818
3818 unknownopts = filterknowncreateopts(ui, createopts)
3819 unknownopts = filterknowncreateopts(ui, createopts)
3819
3820
3820 if not isinstance(unknownopts, dict):
3821 if not isinstance(unknownopts, dict):
3821 raise error.ProgrammingError(
3822 raise error.ProgrammingError(
3822 b'filterknowncreateopts() did not return a dict'
3823 b'filterknowncreateopts() did not return a dict'
3823 )
3824 )
3824
3825
3825 if unknownopts:
3826 if unknownopts:
3826 raise error.Abort(
3827 raise error.Abort(
3827 _(
3828 _(
3828 b'unable to create repository because of unknown '
3829 b'unable to create repository because of unknown '
3829 b'creation option: %s'
3830 b'creation option: %s'
3830 )
3831 )
3831 % b', '.join(sorted(unknownopts)),
3832 % b', '.join(sorted(unknownopts)),
3832 hint=_(b'is a required extension not loaded?'),
3833 hint=_(b'is a required extension not loaded?'),
3833 )
3834 )
3834
3835
3835 requirements = newreporequirements(ui, createopts=createopts)
3836 requirements = newreporequirements(ui, createopts=createopts)
3836 requirements -= checkrequirementscompat(ui, requirements)
3837 requirements -= checkrequirementscompat(ui, requirements)
3837
3838
3838 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3839 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3839
3840
3840 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3841 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3841 if hgvfs.exists():
3842 if hgvfs.exists():
3842 raise error.RepoError(_(b'repository %s already exists') % path)
3843 raise error.RepoError(_(b'repository %s already exists') % path)
3843
3844
3844 if b'sharedrepo' in createopts:
3845 if b'sharedrepo' in createopts:
3845 sharedpath = createopts[b'sharedrepo'].sharedpath
3846 sharedpath = createopts[b'sharedrepo'].sharedpath
3846
3847
3847 if createopts.get(b'sharedrelative'):
3848 if createopts.get(b'sharedrelative'):
3848 try:
3849 try:
3849 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3850 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3850 sharedpath = util.pconvert(sharedpath)
3851 sharedpath = util.pconvert(sharedpath)
3851 except (IOError, ValueError) as e:
3852 except (IOError, ValueError) as e:
3852 # ValueError is raised on Windows if the drive letters differ
3853 # ValueError is raised on Windows if the drive letters differ
3853 # on each path.
3854 # on each path.
3854 raise error.Abort(
3855 raise error.Abort(
3855 _(b'cannot calculate relative path'),
3856 _(b'cannot calculate relative path'),
3856 hint=stringutil.forcebytestr(e),
3857 hint=stringutil.forcebytestr(e),
3857 )
3858 )
3858
3859
3859 if not wdirvfs.exists():
3860 if not wdirvfs.exists():
3860 wdirvfs.makedirs()
3861 wdirvfs.makedirs()
3861
3862
3862 hgvfs.makedir(notindexed=True)
3863 hgvfs.makedir(notindexed=True)
3863 if b'sharedrepo' not in createopts:
3864 if b'sharedrepo' not in createopts:
3864 hgvfs.mkdir(b'cache')
3865 hgvfs.mkdir(b'cache')
3865 hgvfs.mkdir(b'wcache')
3866 hgvfs.mkdir(b'wcache')
3866
3867
3867 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3868 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3868 if has_store and b'sharedrepo' not in createopts:
3869 if has_store and b'sharedrepo' not in createopts:
3869 hgvfs.mkdir(b'store')
3870 hgvfs.mkdir(b'store')
3870
3871
3871 # We create an invalid changelog outside the store so very old
3872 # We create an invalid changelog outside the store so very old
3872 # Mercurial versions (which didn't know about the requirements
3873 # Mercurial versions (which didn't know about the requirements
3873 # file) encounter an error on reading the changelog. This
3874 # file) encounter an error on reading the changelog. This
3874 # effectively locks out old clients and prevents them from
3875 # effectively locks out old clients and prevents them from
3875 # mucking with a repo in an unknown format.
3876 # mucking with a repo in an unknown format.
3876 #
3877 #
3877 # The revlog header has version 65535, which won't be recognized by
3878 # The revlog header has version 65535, which won't be recognized by
3878 # such old clients.
3879 # such old clients.
3879 hgvfs.append(
3880 hgvfs.append(
3880 b'00changelog.i',
3881 b'00changelog.i',
3881 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3882 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3882 b'layout',
3883 b'layout',
3883 )
3884 )
3884
3885
3885 # Filter the requirements into working copy and store ones
3886 # Filter the requirements into working copy and store ones
3886 wcreq, storereq = scmutil.filterrequirements(requirements)
3887 wcreq, storereq = scmutil.filterrequirements(requirements)
3887 # write working copy ones
3888 # write working copy ones
3888 scmutil.writerequires(hgvfs, wcreq)
3889 scmutil.writerequires(hgvfs, wcreq)
3889 # If there are store requirements and the current repository
3890 # If there are store requirements and the current repository
3890 # is not a shared one, write stored requirements
3891 # is not a shared one, write stored requirements
3891 # For new shared repository, we don't need to write the store
3892 # For new shared repository, we don't need to write the store
3892 # requirements as they are already present in store requires
3893 # requirements as they are already present in store requires
3893 if storereq and b'sharedrepo' not in createopts:
3894 if storereq and b'sharedrepo' not in createopts:
3894 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3895 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3895 scmutil.writerequires(storevfs, storereq)
3896 scmutil.writerequires(storevfs, storereq)
3896
3897
3897 # Write out file telling readers where to find the shared store.
3898 # Write out file telling readers where to find the shared store.
3898 if b'sharedrepo' in createopts:
3899 if b'sharedrepo' in createopts:
3899 hgvfs.write(b'sharedpath', sharedpath)
3900 hgvfs.write(b'sharedpath', sharedpath)
3900
3901
3901 if createopts.get(b'shareditems'):
3902 if createopts.get(b'shareditems'):
3902 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3903 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3903 hgvfs.write(b'shared', shared)
3904 hgvfs.write(b'shared', shared)
3904
3905
3905
3906
3906 def poisonrepository(repo):
3907 def poisonrepository(repo):
3907 """Poison a repository instance so it can no longer be used."""
3908 """Poison a repository instance so it can no longer be used."""
3908 # Perform any cleanup on the instance.
3909 # Perform any cleanup on the instance.
3909 repo.close()
3910 repo.close()
3910
3911
3911 # Our strategy is to replace the type of the object with one that
3912 # Our strategy is to replace the type of the object with one that
3912 # has all attribute lookups result in error.
3913 # has all attribute lookups result in error.
3913 #
3914 #
3914 # But we have to allow the close() method because some constructors
3915 # But we have to allow the close() method because some constructors
3915 # of repos call close() on repo references.
3916 # of repos call close() on repo references.
3916 class poisonedrepository(object):
3917 class poisonedrepository(object):
3917 def __getattribute__(self, item):
3918 def __getattribute__(self, item):
3918 if item == 'close':
3919 if item == 'close':
3919 return object.__getattribute__(self, item)
3920 return object.__getattribute__(self, item)
3920
3921
3921 raise error.ProgrammingError(
3922 raise error.ProgrammingError(
3922 b'repo instances should not be used after unshare'
3923 b'repo instances should not be used after unshare'
3923 )
3924 )
3924
3925
3925 def close(self):
3926 def close(self):
3926 pass
3927 pass
3927
3928
3928 # We may have a repoview, which intercepts __setattr__. So be sure
3929 # We may have a repoview, which intercepts __setattr__. So be sure
3929 # we operate at the lowest level possible.
3930 # we operate at the lowest level possible.
3930 object.__setattr__(repo, '__class__', poisonedrepository)
3931 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,556 +1,559
1 $ . "$TESTDIR/histedit-helpers.sh"
1 $ . "$TESTDIR/histedit-helpers.sh"
2
2
3 $ cat >> $HGRCPATH <<EOF
3 $ cat >> $HGRCPATH <<EOF
4 > [extensions]
4 > [extensions]
5 > histedit=
5 > histedit=
6 > strip=
6 > strip=
7 > mockmakedate = $TESTDIR/mockmakedate.py
7 > mockmakedate = $TESTDIR/mockmakedate.py
8 > EOF
8 > EOF
9
9
10 $ initrepo ()
10 $ initrepo ()
11 > {
11 > {
12 > hg init r
12 > hg init r
13 > cd r
13 > cd r
14 > for x in a b c d e f g; do
14 > for x in a b c d e f g; do
15 > echo $x > $x
15 > echo $x > $x
16 > hg add $x
16 > hg add $x
17 > hg ci -m $x
17 > hg ci -m $x
18 > done
18 > done
19 > }
19 > }
20
20
21 $ initrepo
21 $ initrepo
22
22
23 log before edit
23 log before edit
24 $ hg log --graph
24 $ hg log --graph
25 @ changeset: 6:3c6a8ed2ebe8
25 @ changeset: 6:3c6a8ed2ebe8
26 | tag: tip
26 | tag: tip
27 | user: test
27 | user: test
28 | date: Thu Jan 01 00:00:00 1970 +0000
28 | date: Thu Jan 01 00:00:00 1970 +0000
29 | summary: g
29 | summary: g
30 |
30 |
31 o changeset: 5:652413bf663e
31 o changeset: 5:652413bf663e
32 | user: test
32 | user: test
33 | date: Thu Jan 01 00:00:00 1970 +0000
33 | date: Thu Jan 01 00:00:00 1970 +0000
34 | summary: f
34 | summary: f
35 |
35 |
36 o changeset: 4:e860deea161a
36 o changeset: 4:e860deea161a
37 | user: test
37 | user: test
38 | date: Thu Jan 01 00:00:00 1970 +0000
38 | date: Thu Jan 01 00:00:00 1970 +0000
39 | summary: e
39 | summary: e
40 |
40 |
41 o changeset: 3:055a42cdd887
41 o changeset: 3:055a42cdd887
42 | user: test
42 | user: test
43 | date: Thu Jan 01 00:00:00 1970 +0000
43 | date: Thu Jan 01 00:00:00 1970 +0000
44 | summary: d
44 | summary: d
45 |
45 |
46 o changeset: 2:177f92b77385
46 o changeset: 2:177f92b77385
47 | user: test
47 | user: test
48 | date: Thu Jan 01 00:00:00 1970 +0000
48 | date: Thu Jan 01 00:00:00 1970 +0000
49 | summary: c
49 | summary: c
50 |
50 |
51 o changeset: 1:d2ae7f538514
51 o changeset: 1:d2ae7f538514
52 | user: test
52 | user: test
53 | date: Thu Jan 01 00:00:00 1970 +0000
53 | date: Thu Jan 01 00:00:00 1970 +0000
54 | summary: b
54 | summary: b
55 |
55 |
56 o changeset: 0:cb9a9f314b8b
56 o changeset: 0:cb9a9f314b8b
57 user: test
57 user: test
58 date: Thu Jan 01 00:00:00 1970 +0000
58 date: Thu Jan 01 00:00:00 1970 +0000
59 summary: a
59 summary: a
60
60
61 dirty a file
61 dirty a file
62 $ echo a > g
62 $ echo a > g
63 $ hg histedit 177f92b77385 --commands - 2>&1 << EOF
63 $ hg histedit 177f92b77385 --commands - 2>&1 << EOF
64 > EOF
64 > EOF
65 abort: uncommitted changes
65 abort: uncommitted changes
66 [20]
66 [20]
67 $ echo g > g
67 $ echo g > g
68
68
69 edit the history
69 edit the history
70 $ hg histedit 177f92b77385 --commands - 2>&1 << EOF| fixbundle
70 $ hg histedit 177f92b77385 --commands - 2>&1 << EOF| fixbundle
71 > pick 177f92b77385 c
71 > pick 177f92b77385 c
72 > pick 055a42cdd887 d
72 > pick 055a42cdd887 d
73 > edit e860deea161a e
73 > edit e860deea161a e
74 > pick 652413bf663e f
74 > pick 652413bf663e f
75 > pick 3c6a8ed2ebe8 g
75 > pick 3c6a8ed2ebe8 g
76 > EOF
76 > EOF
77 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
77 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
78 Editing (e860deea161a), commit as needed now to split the change
78 Editing (e860deea161a), commit as needed now to split the change
79 (to edit e860deea161a, `hg histedit --continue` after making changes)
79 (to edit e860deea161a, `hg histedit --continue` after making changes)
80
80
81 try to update and get an error
81 try to update and get an error
82 $ hg update tip
82 $ hg update tip
83 abort: histedit in progress
83 abort: histedit in progress
84 (use 'hg histedit --continue' or 'hg histedit --abort')
84 (use 'hg histedit --continue' or 'hg histedit --abort')
85 [20]
85 [20]
86
86
87 edit the plan via the editor
87 edit the plan via the editor
88 $ cat >> $TESTTMP/editplan.sh <<EOF
88 $ cat >> $TESTTMP/editplan.sh <<EOF
89 > cat > \$1 <<EOF2
89 > cat > \$1 <<EOF2
90 > drop e860deea161a e
90 > drop e860deea161a e
91 > drop 652413bf663e f
91 > drop 652413bf663e f
92 > drop 3c6a8ed2ebe8 g
92 > drop 3c6a8ed2ebe8 g
93 > EOF2
93 > EOF2
94 > EOF
94 > EOF
95 $ HGEDITOR="sh $TESTTMP/editplan.sh" hg histedit --edit-plan
95 $ HGEDITOR="sh $TESTTMP/editplan.sh" hg histedit --edit-plan
96 $ cat .hg/histedit-state
96 $ cat .hg/histedit-state
97 v1
97 v1
98 055a42cdd88768532f9cf79daa407fc8d138de9b
98 055a42cdd88768532f9cf79daa407fc8d138de9b
99 3c6a8ed2ebe862cc949d2caa30775dd6f16fb799
99 3c6a8ed2ebe862cc949d2caa30775dd6f16fb799
100 False
100 False
101 3
101 3
102 drop
102 drop
103 e860deea161a2f77de56603b340ebbb4536308ae
103 e860deea161a2f77de56603b340ebbb4536308ae
104 drop
104 drop
105 652413bf663ef2a641cab26574e46d5f5a64a55a
105 652413bf663ef2a641cab26574e46d5f5a64a55a
106 drop
106 drop
107 3c6a8ed2ebe862cc949d2caa30775dd6f16fb799
107 3c6a8ed2ebe862cc949d2caa30775dd6f16fb799
108 0
108 0
109 strip-backup/177f92b77385-0ebe6a8f-histedit.hg
109 strip-backup/177f92b77385-0ebe6a8f-histedit.hg
110
110
111 edit the plan via --commands
111 edit the plan via --commands
112 $ hg histedit --edit-plan --commands - 2>&1 << EOF
112 $ hg histedit --edit-plan --commands - 2>&1 << EOF
113 > edit e860deea161a e
113 > edit e860deea161a e
114 > pick 652413bf663e f
114 > pick 652413bf663e f
115 > drop 3c6a8ed2ebe8 g
115 > drop 3c6a8ed2ebe8 g
116 > EOF
116 > EOF
117 $ cat .hg/histedit-state
117 $ cat .hg/histedit-state
118 v1
118 v1
119 055a42cdd88768532f9cf79daa407fc8d138de9b
119 055a42cdd88768532f9cf79daa407fc8d138de9b
120 3c6a8ed2ebe862cc949d2caa30775dd6f16fb799
120 3c6a8ed2ebe862cc949d2caa30775dd6f16fb799
121 False
121 False
122 3
122 3
123 edit
123 edit
124 e860deea161a2f77de56603b340ebbb4536308ae
124 e860deea161a2f77de56603b340ebbb4536308ae
125 pick
125 pick
126 652413bf663ef2a641cab26574e46d5f5a64a55a
126 652413bf663ef2a641cab26574e46d5f5a64a55a
127 drop
127 drop
128 3c6a8ed2ebe862cc949d2caa30775dd6f16fb799
128 3c6a8ed2ebe862cc949d2caa30775dd6f16fb799
129 0
129 0
130 strip-backup/177f92b77385-0ebe6a8f-histedit.hg
130 strip-backup/177f92b77385-0ebe6a8f-histedit.hg
131
131
132 Go at a random point and try to continue
132 Go at a random point and try to continue
133
133
134 $ hg id -n
134 $ hg id -n
135 3+
135 3+
136 $ hg up 0
136 $ hg up 0
137 abort: histedit in progress
137 abort: histedit in progress
138 (use 'hg histedit --continue' or 'hg histedit --abort')
138 (use 'hg histedit --continue' or 'hg histedit --abort')
139 [20]
139 [20]
140
140
141 Try to delete necessary commit
141 Try to delete necessary commit
142 $ hg strip -r 652413b
142 $ hg strip -r 652413b
143 abort: histedit in progress, can't strip 652413bf663e
143 abort: histedit in progress, can't strip 652413bf663e
144 [255]
144 [255]
145
145
146 commit, then edit the revision
146 commit, then edit the revision
147 $ hg ci -m 'wat'
147 $ hg ci -m 'wat'
148 created new head
148 created new head
149 $ echo a > e
149 $ echo a > e
150
150
151 qnew should fail while we're in the middle of the edit step
151 qnew should fail while we're in the middle of the edit step
152
152
153 $ hg --config extensions.mq= qnew please-fail
153 $ hg --config extensions.mq= qnew please-fail
154 abort: histedit in progress
154 abort: histedit in progress
155 (use 'hg histedit --continue' or 'hg histedit --abort')
155 (use 'hg histedit --continue' or 'hg histedit --abort')
156 [20]
156 [20]
157 $ HGEDITOR='echo foobaz > ' hg histedit --continue 2>&1 | fixbundle
157 $ HGEDITOR='echo foobaz > ' hg histedit --continue 2>&1 | fixbundle
158
158
159 $ hg log --graph
159 $ hg log --graph
160 @ changeset: 6:b5f70786f9b0
160 @ changeset: 6:b5f70786f9b0
161 | tag: tip
161 | tag: tip
162 | user: test
162 | user: test
163 | date: Thu Jan 01 00:00:00 1970 +0000
163 | date: Thu Jan 01 00:00:00 1970 +0000
164 | summary: f
164 | summary: f
165 |
165 |
166 o changeset: 5:a5e1ba2f7afb
166 o changeset: 5:a5e1ba2f7afb
167 | user: test
167 | user: test
168 | date: Thu Jan 01 00:00:00 1970 +0000
168 | date: Thu Jan 01 00:00:00 1970 +0000
169 | summary: foobaz
169 | summary: foobaz
170 |
170 |
171 o changeset: 4:1a60820cd1f6
171 o changeset: 4:1a60820cd1f6
172 | user: test
172 | user: test
173 | date: Thu Jan 01 00:00:00 1970 +0000
173 | date: Thu Jan 01 00:00:00 1970 +0000
174 | summary: wat
174 | summary: wat
175 |
175 |
176 o changeset: 3:055a42cdd887
176 o changeset: 3:055a42cdd887
177 | user: test
177 | user: test
178 | date: Thu Jan 01 00:00:00 1970 +0000
178 | date: Thu Jan 01 00:00:00 1970 +0000
179 | summary: d
179 | summary: d
180 |
180 |
181 o changeset: 2:177f92b77385
181 o changeset: 2:177f92b77385
182 | user: test
182 | user: test
183 | date: Thu Jan 01 00:00:00 1970 +0000
183 | date: Thu Jan 01 00:00:00 1970 +0000
184 | summary: c
184 | summary: c
185 |
185 |
186 o changeset: 1:d2ae7f538514
186 o changeset: 1:d2ae7f538514
187 | user: test
187 | user: test
188 | date: Thu Jan 01 00:00:00 1970 +0000
188 | date: Thu Jan 01 00:00:00 1970 +0000
189 | summary: b
189 | summary: b
190 |
190 |
191 o changeset: 0:cb9a9f314b8b
191 o changeset: 0:cb9a9f314b8b
192 user: test
192 user: test
193 date: Thu Jan 01 00:00:00 1970 +0000
193 date: Thu Jan 01 00:00:00 1970 +0000
194 summary: a
194 summary: a
195
195
196
196
197 $ hg cat e
197 $ hg cat e
198 a
198 a
199
199
200 Stripping necessary commits should not break --abort
200 Stripping necessary commits should not break --abort
201
201
202 $ hg histedit 1a60820cd1f6 --commands - 2>&1 << EOF| fixbundle
202 $ hg histedit 1a60820cd1f6 --commands - 2>&1 << EOF| fixbundle
203 > edit 1a60820cd1f6 wat
203 > edit 1a60820cd1f6 wat
204 > pick a5e1ba2f7afb foobaz
204 > pick a5e1ba2f7afb foobaz
205 > pick b5f70786f9b0 g
205 > pick b5f70786f9b0 g
206 > EOF
206 > EOF
207 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
207 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
208 Editing (1a60820cd1f6), commit as needed now to split the change
208 Editing (1a60820cd1f6), commit as needed now to split the change
209 (to edit 1a60820cd1f6, `hg histedit --continue` after making changes)
209 (to edit 1a60820cd1f6, `hg histedit --continue` after making changes)
210
210
211 $ mv .hg/histedit-state .hg/histedit-state.bak
211 $ mv .hg/histedit-state .hg/histedit-state.bak
212 $ hg strip -q -r b5f70786f9b0
212 $ hg strip -q -r b5f70786f9b0
213 $ mv .hg/histedit-state.bak .hg/histedit-state
213 $ mv .hg/histedit-state.bak .hg/histedit-state
214 $ hg histedit --abort
214 $ hg histedit --abort
215 adding changesets
215 adding changesets
216 adding manifests
216 adding manifests
217 adding file changes
217 adding file changes
218 added 1 changesets with 1 changes to 3 files
218 added 1 changesets with 1 changes to 3 files
219 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
219 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
220 $ hg log -r .
220 $ hg log -r .
221 changeset: 6:b5f70786f9b0
221 changeset: 6:b5f70786f9b0
222 tag: tip
222 tag: tip
223 user: test
223 user: test
224 date: Thu Jan 01 00:00:00 1970 +0000
224 date: Thu Jan 01 00:00:00 1970 +0000
225 summary: f
225 summary: f
226
226
227
227
228 check histedit_source
228 check histedit_source
229
229
230 $ hg log --debug --rev 5
230 $ hg log --debug --rev 5
231 changeset: 5:a5e1ba2f7afb899ef1581cea528fd885d2fca70d
231 changeset: 5:a5e1ba2f7afb899ef1581cea528fd885d2fca70d
232 phase: draft
232 phase: draft
233 parent: 4:1a60820cd1f6004a362aa622ebc47d59bc48eb34
233 parent: 4:1a60820cd1f6004a362aa622ebc47d59bc48eb34
234 parent: -1:0000000000000000000000000000000000000000
234 parent: -1:0000000000000000000000000000000000000000
235 manifest: 5:5ad3be8791f39117565557781f5464363b918a45
235 manifest: 5:5ad3be8791f39117565557781f5464363b918a45
236 user: test
236 user: test
237 date: Thu Jan 01 00:00:00 1970 +0000
237 date: Thu Jan 01 00:00:00 1970 +0000
238 files: e
238 files: e
239 extra: branch=default
239 extra: branch=default
240 extra: histedit_source=e860deea161a2f77de56603b340ebbb4536308ae
240 extra: histedit_source=e860deea161a2f77de56603b340ebbb4536308ae
241 description:
241 description:
242 foobaz
242 foobaz
243
243
244
244
245
245
246 $ hg histedit tip --commands - 2>&1 <<EOF| fixbundle
246 $ hg histedit tip --commands - 2>&1 <<EOF| fixbundle
247 > edit b5f70786f9b0 f
247 > edit b5f70786f9b0 f
248 > EOF
248 > EOF
249 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
249 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
250 Editing (b5f70786f9b0), commit as needed now to split the change
250 Editing (b5f70786f9b0), commit as needed now to split the change
251 (to edit b5f70786f9b0, `hg histedit --continue` after making changes)
251 (to edit b5f70786f9b0, `hg histedit --continue` after making changes)
252 $ hg status
252 $ hg status
253 A f
253 A f
254
254
255 $ hg summary
255 $ hg summary
256 parent: 5:a5e1ba2f7afb
256 parent: 5:a5e1ba2f7afb
257 foobaz
257 foobaz
258 branch: default
258 branch: default
259 commit: 1 added (new branch head)
259 commit: 1 added (new branch head)
260 update: 1 new changesets (update)
260 update: 1 new changesets (update)
261 phases: 7 draft
261 phases: 7 draft
262 hist: 1 remaining (histedit --continue)
262 hist: 1 remaining (histedit --continue)
263
263
264 (test also that editor is invoked if histedit is continued for
264 (test also that editor is invoked if histedit is continued for
265 "edit" action)
265 "edit" action)
266
266
267 $ HGEDITOR='cat' hg histedit --continue
267 $ HGEDITOR='cat' hg histedit --continue
268 f
268 f
269
269
270
270
271 HG: Enter commit message. Lines beginning with 'HG:' are removed.
271 HG: Enter commit message. Lines beginning with 'HG:' are removed.
272 HG: Leave message empty to abort commit.
272 HG: Leave message empty to abort commit.
273 HG: --
273 HG: --
274 HG: user: test
274 HG: user: test
275 HG: branch 'default'
275 HG: branch 'default'
276 HG: added f
276 HG: added f
277 saved backup bundle to $TESTTMP/r/.hg/strip-backup/b5f70786f9b0-c28d9c86-histedit.hg
277 saved backup bundle to $TESTTMP/r/.hg/strip-backup/b5f70786f9b0-c28d9c86-histedit.hg
278
278
279 $ hg status
279 $ hg status
280
280
281 log after edit
281 log after edit
282 $ hg log --limit 1
282 $ hg log --limit 1
283 changeset: 6:a107ee126658
283 changeset: 6:a107ee126658
284 tag: tip
284 tag: tip
285 user: test
285 user: test
286 date: Thu Jan 01 00:00:00 1970 +0000
286 date: Thu Jan 01 00:00:00 1970 +0000
287 summary: f
287 summary: f
288
288
289
289
290 say we'll change the message, but don't.
290 say we'll change the message, but don't.
291 $ cat > ../edit.sh <<EOF
291 $ cat > ../edit.sh <<EOF
292 > cat "\$1" | sed s/pick/mess/ > tmp
292 > cat "\$1" | sed s/pick/mess/ > tmp
293 > mv tmp "\$1"
293 > mv tmp "\$1"
294 > EOF
294 > EOF
295 $ HGEDITOR="sh ../edit.sh" hg histedit tip 2>&1 | fixbundle
295 $ HGEDITOR="sh ../edit.sh" hg histedit tip 2>&1 | fixbundle
296 $ hg status
296 $ hg status
297 $ hg log --limit 1
297 $ hg log --limit 1
298 changeset: 6:1fd3b2fe7754
298 changeset: 6:1fd3b2fe7754
299 tag: tip
299 tag: tip
300 user: test
300 user: test
301 date: Thu Jan 01 00:00:00 1970 +0000
301 date: Thu Jan 01 00:00:00 1970 +0000
302 summary: f
302 summary: f
303
303
304
304
305 modify the message
305 modify the message
306
306
307 check saving last-message.txt, at first
307 check saving last-message.txt, at first
308
308
309 $ cat > $TESTTMP/commitfailure.py <<EOF
309 $ cat > $TESTTMP/commitfailure.py <<EOF
310 > from mercurial import error
310 > from mercurial import error
311 > def reposetup(ui, repo):
311 > def reposetup(ui, repo):
312 > class commitfailure(repo.__class__):
312 > class commitfailure(repo.__class__):
313 > def commit(self, *args, **kwargs):
313 > def commit(self, *args, **kwargs):
314 > raise error.Abort(b'emulating unexpected abort')
314 > raise error.Abort(b'emulating unexpected abort')
315 > repo.__class__ = commitfailure
315 > repo.__class__ = commitfailure
316 > EOF
316 > EOF
317 $ cat >> .hg/hgrc <<EOF
317 $ cat >> .hg/hgrc <<EOF
318 > [extensions]
318 > [extensions]
319 > # this failure occurs before editor invocation
319 > # this failure occurs before editor invocation
320 > commitfailure = $TESTTMP/commitfailure.py
320 > commitfailure = $TESTTMP/commitfailure.py
321 > EOF
321 > EOF
322
322
323 $ cat > $TESTTMP/editor.sh <<EOF
323 $ cat > $TESTTMP/editor.sh <<EOF
324 > echo "==== before editing"
324 > echo "==== before editing"
325 > cat \$1
325 > cat \$1
326 > echo "===="
326 > echo "===="
327 > echo "check saving last-message.txt" >> \$1
327 > echo "check saving last-message.txt" >> \$1
328 > EOF
328 > EOF
329
329
330 (test that editor is not invoked before transaction starting)
330 (test that editor is not invoked before transaction starting)
331
331
332 $ rm -f .hg/last-message.txt
332 $ rm -f .hg/last-message.txt
333 $ HGEDITOR="sh $TESTTMP/editor.sh" hg histedit tip --commands - 2>&1 << EOF | fixbundle
333 $ HGEDITOR="sh $TESTTMP/editor.sh" hg histedit tip --commands - 2>&1 << EOF | fixbundle
334 > mess 1fd3b2fe7754 f
334 > mess 1fd3b2fe7754 f
335 > EOF
335 > EOF
336 abort: emulating unexpected abort
336 abort: emulating unexpected abort
337 $ test -f .hg/last-message.txt
337 $ test -f .hg/last-message.txt
338 [1]
338 [1]
339
339
340 $ cat >> .hg/hgrc <<EOF
340 $ cat >> .hg/hgrc <<EOF
341 > [extensions]
341 > [extensions]
342 > commitfailure = !
342 > commitfailure = !
343 > EOF
343 > EOF
344 $ hg histedit --abort -q
344 $ hg histedit --abort -q
345
345
346 (test that editor is invoked and commit message is saved into
346 (test that editor is invoked and commit message is saved into
347 "last-message.txt")
347 "last-message.txt")
348
348
349 $ cat >> .hg/hgrc <<EOF
349 $ cat >> .hg/hgrc <<EOF
350 > [hooks]
350 > [hooks]
351 > # this failure occurs after editor invocation
351 > # this failure occurs after editor invocation
352 > pretxncommit.unexpectedabort = false
352 > pretxncommit.unexpectedabort = false
353 > EOF
353 > EOF
354
354
355 $ hg status --rev '1fd3b2fe7754^1' --rev 1fd3b2fe7754
355 $ hg status --rev '1fd3b2fe7754^1' --rev 1fd3b2fe7754
356 A f
356 A f
357
357
358 $ rm -f .hg/last-message.txt
358 $ rm -f .hg/last-message.txt
359 $ mkdir dir
360 $ cd dir
359 $ HGEDITOR="sh $TESTTMP/editor.sh" hg histedit tip --commands - 2>&1 << EOF
361 $ HGEDITOR="sh $TESTTMP/editor.sh" hg histedit tip --commands - 2>&1 << EOF
360 > mess 1fd3b2fe7754 f
362 > mess 1fd3b2fe7754 f
361 > EOF
363 > EOF
362 ==== before editing
364 ==== before editing
363 f
365 f
364
366
365
367
366 HG: Enter commit message. Lines beginning with 'HG:' are removed.
368 HG: Enter commit message. Lines beginning with 'HG:' are removed.
367 HG: Leave message empty to abort commit.
369 HG: Leave message empty to abort commit.
368 HG: --
370 HG: --
369 HG: user: test
371 HG: user: test
370 HG: branch 'default'
372 HG: branch 'default'
371 HG: added f
373 HG: added f
372 ====
374 ====
373 transaction abort!
375 transaction abort!
374 rollback completed
376 rollback completed
375 note: commit message saved in .hg/last-message.txt
377 note: commit message saved in ../.hg/last-message.txt
376 note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
378 note: use 'hg commit --logfile ../.hg/last-message.txt --edit' to reuse it
377 abort: pretxncommit.unexpectedabort hook exited with status 1
379 abort: pretxncommit.unexpectedabort hook exited with status 1
378 [40]
380 [40]
381 $ cd ..
379 $ cat .hg/last-message.txt
382 $ cat .hg/last-message.txt
380 f
383 f
381
384
382
385
383 check saving last-message.txt
386 check saving last-message.txt
384
387
385 (test also that editor is invoked if histedit is continued for "message"
388 (test also that editor is invoked if histedit is continued for "message"
386 action)
389 action)
387
390
388 $ HGEDITOR=cat hg histedit --continue
391 $ HGEDITOR=cat hg histedit --continue
389 f
392 f
390
393
391
394
392 HG: Enter commit message. Lines beginning with 'HG:' are removed.
395 HG: Enter commit message. Lines beginning with 'HG:' are removed.
393 HG: Leave message empty to abort commit.
396 HG: Leave message empty to abort commit.
394 HG: --
397 HG: --
395 HG: user: test
398 HG: user: test
396 HG: branch 'default'
399 HG: branch 'default'
397 HG: added f
400 HG: added f
398 transaction abort!
401 transaction abort!
399 rollback completed
402 rollback completed
400 note: commit message saved in .hg/last-message.txt
403 note: commit message saved in .hg/last-message.txt
401 note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
404 note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
402 abort: pretxncommit.unexpectedabort hook exited with status 1
405 abort: pretxncommit.unexpectedabort hook exited with status 1
403 [40]
406 [40]
404
407
405 $ cat >> .hg/hgrc <<EOF
408 $ cat >> .hg/hgrc <<EOF
406 > [hooks]
409 > [hooks]
407 > pretxncommit.unexpectedabort =
410 > pretxncommit.unexpectedabort =
408 > EOF
411 > EOF
409 $ hg histedit --abort -q
412 $ hg histedit --abort -q
410
413
411 then, check "modify the message" itself
414 then, check "modify the message" itself
412
415
413 $ hg histedit tip --commands - 2>&1 << EOF | fixbundle
416 $ hg histedit tip --commands - 2>&1 << EOF | fixbundle
414 > mess 1fd3b2fe7754 f
417 > mess 1fd3b2fe7754 f
415 > EOF
418 > EOF
416 $ hg status
419 $ hg status
417 $ hg log --limit 1
420 $ hg log --limit 1
418 changeset: 6:62feedb1200e
421 changeset: 6:62feedb1200e
419 tag: tip
422 tag: tip
420 user: test
423 user: test
421 date: Thu Jan 01 00:00:00 1970 +0000
424 date: Thu Jan 01 00:00:00 1970 +0000
422 summary: f
425 summary: f
423
426
424
427
425 rollback should not work after a histedit
428 rollback should not work after a histedit
426 $ hg rollback
429 $ hg rollback
427 no rollback information available
430 no rollback information available
428 [1]
431 [1]
429
432
430 $ cd ..
433 $ cd ..
431 $ hg clone -qr0 r r0
434 $ hg clone -qr0 r r0
432 $ cd r0
435 $ cd r0
433 $ hg phase -fdr0
436 $ hg phase -fdr0
434 $ hg histedit --commands - 0 2>&1 << EOF
437 $ hg histedit --commands - 0 2>&1 << EOF
435 > edit cb9a9f314b8b a > $EDITED
438 > edit cb9a9f314b8b a > $EDITED
436 > EOF
439 > EOF
437 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
440 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
438 Editing (cb9a9f314b8b), commit as needed now to split the change
441 Editing (cb9a9f314b8b), commit as needed now to split the change
439 (to edit cb9a9f314b8b, `hg histedit --continue` after making changes)
442 (to edit cb9a9f314b8b, `hg histedit --continue` after making changes)
440 [240]
443 [240]
441 $ HGEDITOR=true hg histedit --continue
444 $ HGEDITOR=true hg histedit --continue
442 saved backup bundle to $TESTTMP/r0/.hg/strip-backup/cb9a9f314b8b-cc5ccb0b-histedit.hg
445 saved backup bundle to $TESTTMP/r0/.hg/strip-backup/cb9a9f314b8b-cc5ccb0b-histedit.hg
443
446
444 $ hg log -G
447 $ hg log -G
445 @ changeset: 0:0efcea34f18a
448 @ changeset: 0:0efcea34f18a
446 tag: tip
449 tag: tip
447 user: test
450 user: test
448 date: Thu Jan 01 00:00:00 1970 +0000
451 date: Thu Jan 01 00:00:00 1970 +0000
449 summary: a
452 summary: a
450
453
451 $ echo foo >> b
454 $ echo foo >> b
452 $ hg addr
455 $ hg addr
453 adding b
456 adding b
454 $ hg ci -m 'add b'
457 $ hg ci -m 'add b'
455 $ echo foo >> a
458 $ echo foo >> a
456 $ hg ci -m 'extend a'
459 $ hg ci -m 'extend a'
457 $ hg phase --public 1
460 $ hg phase --public 1
458 Attempting to fold a change into a public change should not work:
461 Attempting to fold a change into a public change should not work:
459 $ cat > ../edit.sh <<EOF
462 $ cat > ../edit.sh <<EOF
460 > cat "\$1" | sed s/pick/fold/ > tmp
463 > cat "\$1" | sed s/pick/fold/ > tmp
461 > mv tmp "\$1"
464 > mv tmp "\$1"
462 > EOF
465 > EOF
463 $ HGEDITOR="sh ../edit.sh" hg histedit 2
466 $ HGEDITOR="sh ../edit.sh" hg histedit 2
464 warning: histedit rules saved to: .hg/histedit-last-edit.txt
467 warning: histedit rules saved to: .hg/histedit-last-edit.txt
465 hg: parse error: first changeset cannot use verb "fold"
468 hg: parse error: first changeset cannot use verb "fold"
466 [10]
469 [10]
467 $ cat .hg/histedit-last-edit.txt
470 $ cat .hg/histedit-last-edit.txt
468 fold 0012be4a27ea 2 extend a
471 fold 0012be4a27ea 2 extend a
469
472
470 # Edit history between 0012be4a27ea and 0012be4a27ea
473 # Edit history between 0012be4a27ea and 0012be4a27ea
471 #
474 #
472 # Commits are listed from least to most recent
475 # Commits are listed from least to most recent
473 #
476 #
474 # You can reorder changesets by reordering the lines
477 # You can reorder changesets by reordering the lines
475 #
478 #
476 # Commands:
479 # Commands:
477 #
480 #
478 # e, edit = use commit, but allow edits before making new commit
481 # e, edit = use commit, but allow edits before making new commit
479 # m, mess = edit commit message without changing commit content
482 # m, mess = edit commit message without changing commit content
480 # p, fold = use commit
483 # p, fold = use commit
481 # b, base = checkout changeset and apply further changesets from there
484 # b, base = checkout changeset and apply further changesets from there
482 # d, drop = remove commit from history
485 # d, drop = remove commit from history
483 # f, fold = use commit, but combine it with the one above
486 # f, fold = use commit, but combine it with the one above
484 # r, roll = like fold, but discard this commit's description and date
487 # r, roll = like fold, but discard this commit's description and date
485 #
488 #
486
489
487 $ cd ..
490 $ cd ..
488
491
489 ============================================
492 ============================================
490 Test update-timestamp config option in mess|
493 Test update-timestamp config option in mess|
491 ============================================
494 ============================================
492
495
493 $ addwithdate ()
496 $ addwithdate ()
494 > {
497 > {
495 > echo $1 > $1
498 > echo $1 > $1
496 > hg add $1
499 > hg add $1
497 > hg ci -m $1 -d "$2 0"
500 > hg ci -m $1 -d "$2 0"
498 > }
501 > }
499
502
500 $ initrepo ()
503 $ initrepo ()
501 > {
504 > {
502 > hg init r2
505 > hg init r2
503 > cd r2
506 > cd r2
504 > addwithdate a 1
507 > addwithdate a 1
505 > addwithdate b 2
508 > addwithdate b 2
506 > addwithdate c 3
509 > addwithdate c 3
507 > addwithdate d 4
510 > addwithdate d 4
508 > addwithdate e 5
511 > addwithdate e 5
509 > addwithdate f 6
512 > addwithdate f 6
510 > }
513 > }
511
514
512 $ initrepo
515 $ initrepo
513
516
514 log before edit
517 log before edit
515
518
516 $ hg log --limit 1
519 $ hg log --limit 1
517 changeset: 5:178e35e0ce73
520 changeset: 5:178e35e0ce73
518 tag: tip
521 tag: tip
519 user: test
522 user: test
520 date: Thu Jan 01 00:00:06 1970 +0000
523 date: Thu Jan 01 00:00:06 1970 +0000
521 summary: f
524 summary: f
522
525
523 $ hg histedit tip --commands - 2>&1 --config rewrite.update-timestamp=True << EOF | fixbundle
526 $ hg histedit tip --commands - 2>&1 --config rewrite.update-timestamp=True << EOF | fixbundle
524 > mess 178e35e0ce73 f
527 > mess 178e35e0ce73 f
525 > EOF
528 > EOF
526
529
527 log after edit
530 log after edit
528
531
529 $ hg log --limit 1
532 $ hg log --limit 1
530 changeset: 5:98bf456d476b
533 changeset: 5:98bf456d476b
531 tag: tip
534 tag: tip
532 user: test
535 user: test
533 date: Thu Jan 01 00:00:00 1970 +0000
536 date: Thu Jan 01 00:00:00 1970 +0000
534 summary: f
537 summary: f
535
538
536
539
537 $ cd ..
540 $ cd ..
538
541
539 warn the user on editing tagged commits
542 warn the user on editing tagged commits
540
543
541 $ hg init issue4017
544 $ hg init issue4017
542 $ cd issue4017
545 $ cd issue4017
543 $ echo > a
546 $ echo > a
544 $ hg ci -Am 'add a'
547 $ hg ci -Am 'add a'
545 adding a
548 adding a
546 $ hg tag a
549 $ hg tag a
547 $ hg tags
550 $ hg tags
548 tip 1:bd7ee4f3939b
551 tip 1:bd7ee4f3939b
549 a 0:a8a82d372bb3
552 a 0:a8a82d372bb3
550 $ hg histedit
553 $ hg histedit
551 warning: tags associated with the given changeset will be lost after histedit.
554 warning: tags associated with the given changeset will be lost after histedit.
552 do you want to continue (yN)? n
555 do you want to continue (yN)? n
553 abort: histedit cancelled
556 abort: histedit cancelled
554
557
555 [250]
558 [250]
556 $ cd ..
559 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now