##// END OF EJS Templates
rollback: detect "parentgone" case earlier...
marmoute -
r50964:81870c92 default
parent child Browse files
Show More
@@ -1,3978 +1,3979 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 # coding: utf-8
2 # coding: utf-8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9
9
10 import functools
10 import functools
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from concurrent import futures
17 from concurrent import futures
18 from typing import (
18 from typing import (
19 Optional,
19 Optional,
20 )
20 )
21
21
22 from .i18n import _
22 from .i18n import _
23 from .node import (
23 from .node import (
24 bin,
24 bin,
25 hex,
25 hex,
26 nullrev,
26 nullrev,
27 sha1nodeconstants,
27 sha1nodeconstants,
28 short,
28 short,
29 )
29 )
30 from .pycompat import (
30 from .pycompat import (
31 delattr,
31 delattr,
32 getattr,
32 getattr,
33 )
33 )
34 from . import (
34 from . import (
35 bookmarks,
35 bookmarks,
36 branchmap,
36 branchmap,
37 bundle2,
37 bundle2,
38 bundlecaches,
38 bundlecaches,
39 changegroup,
39 changegroup,
40 color,
40 color,
41 commit,
41 commit,
42 context,
42 context,
43 dirstate,
43 dirstate,
44 dirstateguard,
44 dirstateguard,
45 discovery,
45 discovery,
46 encoding,
46 encoding,
47 error,
47 error,
48 exchange,
48 exchange,
49 extensions,
49 extensions,
50 filelog,
50 filelog,
51 hook,
51 hook,
52 lock as lockmod,
52 lock as lockmod,
53 match as matchmod,
53 match as matchmod,
54 mergestate as mergestatemod,
54 mergestate as mergestatemod,
55 mergeutil,
55 mergeutil,
56 namespaces,
56 namespaces,
57 narrowspec,
57 narrowspec,
58 obsolete,
58 obsolete,
59 pathutil,
59 pathutil,
60 phases,
60 phases,
61 pushkey,
61 pushkey,
62 pycompat,
62 pycompat,
63 rcutil,
63 rcutil,
64 repoview,
64 repoview,
65 requirements as requirementsmod,
65 requirements as requirementsmod,
66 revlog,
66 revlog,
67 revset,
67 revset,
68 revsetlang,
68 revsetlang,
69 scmutil,
69 scmutil,
70 sparse,
70 sparse,
71 store as storemod,
71 store as storemod,
72 subrepoutil,
72 subrepoutil,
73 tags as tagsmod,
73 tags as tagsmod,
74 transaction,
74 transaction,
75 txnutil,
75 txnutil,
76 util,
76 util,
77 vfs as vfsmod,
77 vfs as vfsmod,
78 wireprototypes,
78 wireprototypes,
79 )
79 )
80
80
81 from .interfaces import (
81 from .interfaces import (
82 repository,
82 repository,
83 util as interfaceutil,
83 util as interfaceutil,
84 )
84 )
85
85
86 from .utils import (
86 from .utils import (
87 hashutil,
87 hashutil,
88 procutil,
88 procutil,
89 stringutil,
89 stringutil,
90 urlutil,
90 urlutil,
91 )
91 )
92
92
93 from .revlogutils import (
93 from .revlogutils import (
94 concurrency_checker as revlogchecker,
94 concurrency_checker as revlogchecker,
95 constants as revlogconst,
95 constants as revlogconst,
96 sidedata as sidedatamod,
96 sidedata as sidedatamod,
97 )
97 )
98
98
99 release = lockmod.release
99 release = lockmod.release
100 urlerr = util.urlerr
100 urlerr = util.urlerr
101 urlreq = util.urlreq
101 urlreq = util.urlreq
102
102
103 # set of (path, vfs-location) tuples. vfs-location is:
103 # set of (path, vfs-location) tuples. vfs-location is:
104 # - 'plain for vfs relative paths
104 # - 'plain for vfs relative paths
105 # - '' for svfs relative paths
105 # - '' for svfs relative paths
106 _cachedfiles = set()
106 _cachedfiles = set()
107
107
108
108
109 class _basefilecache(scmutil.filecache):
109 class _basefilecache(scmutil.filecache):
110 """All filecache usage on repo are done for logic that should be unfiltered"""
110 """All filecache usage on repo are done for logic that should be unfiltered"""
111
111
112 def __get__(self, repo, type=None):
112 def __get__(self, repo, type=None):
113 if repo is None:
113 if repo is None:
114 return self
114 return self
115 # proxy to unfiltered __dict__ since filtered repo has no entry
115 # proxy to unfiltered __dict__ since filtered repo has no entry
116 unfi = repo.unfiltered()
116 unfi = repo.unfiltered()
117 try:
117 try:
118 return unfi.__dict__[self.sname]
118 return unfi.__dict__[self.sname]
119 except KeyError:
119 except KeyError:
120 pass
120 pass
121 return super(_basefilecache, self).__get__(unfi, type)
121 return super(_basefilecache, self).__get__(unfi, type)
122
122
123 def set(self, repo, value):
123 def set(self, repo, value):
124 return super(_basefilecache, self).set(repo.unfiltered(), value)
124 return super(_basefilecache, self).set(repo.unfiltered(), value)
125
125
126
126
127 class repofilecache(_basefilecache):
127 class repofilecache(_basefilecache):
128 """filecache for files in .hg but outside of .hg/store"""
128 """filecache for files in .hg but outside of .hg/store"""
129
129
130 def __init__(self, *paths):
130 def __init__(self, *paths):
131 super(repofilecache, self).__init__(*paths)
131 super(repofilecache, self).__init__(*paths)
132 for path in paths:
132 for path in paths:
133 _cachedfiles.add((path, b'plain'))
133 _cachedfiles.add((path, b'plain'))
134
134
135 def join(self, obj, fname):
135 def join(self, obj, fname):
136 return obj.vfs.join(fname)
136 return obj.vfs.join(fname)
137
137
138
138
139 class storecache(_basefilecache):
139 class storecache(_basefilecache):
140 """filecache for files in the store"""
140 """filecache for files in the store"""
141
141
142 def __init__(self, *paths):
142 def __init__(self, *paths):
143 super(storecache, self).__init__(*paths)
143 super(storecache, self).__init__(*paths)
144 for path in paths:
144 for path in paths:
145 _cachedfiles.add((path, b''))
145 _cachedfiles.add((path, b''))
146
146
147 def join(self, obj, fname):
147 def join(self, obj, fname):
148 return obj.sjoin(fname)
148 return obj.sjoin(fname)
149
149
150
150
151 class changelogcache(storecache):
151 class changelogcache(storecache):
152 """filecache for the changelog"""
152 """filecache for the changelog"""
153
153
154 def __init__(self):
154 def __init__(self):
155 super(changelogcache, self).__init__()
155 super(changelogcache, self).__init__()
156 _cachedfiles.add((b'00changelog.i', b''))
156 _cachedfiles.add((b'00changelog.i', b''))
157 _cachedfiles.add((b'00changelog.n', b''))
157 _cachedfiles.add((b'00changelog.n', b''))
158
158
159 def tracked_paths(self, obj):
159 def tracked_paths(self, obj):
160 paths = [self.join(obj, b'00changelog.i')]
160 paths = [self.join(obj, b'00changelog.i')]
161 if obj.store.opener.options.get(b'persistent-nodemap', False):
161 if obj.store.opener.options.get(b'persistent-nodemap', False):
162 paths.append(self.join(obj, b'00changelog.n'))
162 paths.append(self.join(obj, b'00changelog.n'))
163 return paths
163 return paths
164
164
165
165
166 class manifestlogcache(storecache):
166 class manifestlogcache(storecache):
167 """filecache for the manifestlog"""
167 """filecache for the manifestlog"""
168
168
169 def __init__(self):
169 def __init__(self):
170 super(manifestlogcache, self).__init__()
170 super(manifestlogcache, self).__init__()
171 _cachedfiles.add((b'00manifest.i', b''))
171 _cachedfiles.add((b'00manifest.i', b''))
172 _cachedfiles.add((b'00manifest.n', b''))
172 _cachedfiles.add((b'00manifest.n', b''))
173
173
174 def tracked_paths(self, obj):
174 def tracked_paths(self, obj):
175 paths = [self.join(obj, b'00manifest.i')]
175 paths = [self.join(obj, b'00manifest.i')]
176 if obj.store.opener.options.get(b'persistent-nodemap', False):
176 if obj.store.opener.options.get(b'persistent-nodemap', False):
177 paths.append(self.join(obj, b'00manifest.n'))
177 paths.append(self.join(obj, b'00manifest.n'))
178 return paths
178 return paths
179
179
180
180
181 class mixedrepostorecache(_basefilecache):
181 class mixedrepostorecache(_basefilecache):
182 """filecache for a mix files in .hg/store and outside"""
182 """filecache for a mix files in .hg/store and outside"""
183
183
184 def __init__(self, *pathsandlocations):
184 def __init__(self, *pathsandlocations):
185 # scmutil.filecache only uses the path for passing back into our
185 # scmutil.filecache only uses the path for passing back into our
186 # join(), so we can safely pass a list of paths and locations
186 # join(), so we can safely pass a list of paths and locations
187 super(mixedrepostorecache, self).__init__(*pathsandlocations)
187 super(mixedrepostorecache, self).__init__(*pathsandlocations)
188 _cachedfiles.update(pathsandlocations)
188 _cachedfiles.update(pathsandlocations)
189
189
190 def join(self, obj, fnameandlocation):
190 def join(self, obj, fnameandlocation):
191 fname, location = fnameandlocation
191 fname, location = fnameandlocation
192 if location == b'plain':
192 if location == b'plain':
193 return obj.vfs.join(fname)
193 return obj.vfs.join(fname)
194 else:
194 else:
195 if location != b'':
195 if location != b'':
196 raise error.ProgrammingError(
196 raise error.ProgrammingError(
197 b'unexpected location: %s' % location
197 b'unexpected location: %s' % location
198 )
198 )
199 return obj.sjoin(fname)
199 return obj.sjoin(fname)
200
200
201
201
202 def isfilecached(repo, name):
202 def isfilecached(repo, name):
203 """check if a repo has already cached "name" filecache-ed property
203 """check if a repo has already cached "name" filecache-ed property
204
204
205 This returns (cachedobj-or-None, iscached) tuple.
205 This returns (cachedobj-or-None, iscached) tuple.
206 """
206 """
207 cacheentry = repo.unfiltered()._filecache.get(name, None)
207 cacheentry = repo.unfiltered()._filecache.get(name, None)
208 if not cacheentry:
208 if not cacheentry:
209 return None, False
209 return None, False
210 return cacheentry.obj, True
210 return cacheentry.obj, True
211
211
212
212
213 class unfilteredpropertycache(util.propertycache):
213 class unfilteredpropertycache(util.propertycache):
214 """propertycache that apply to unfiltered repo only"""
214 """propertycache that apply to unfiltered repo only"""
215
215
216 def __get__(self, repo, type=None):
216 def __get__(self, repo, type=None):
217 unfi = repo.unfiltered()
217 unfi = repo.unfiltered()
218 if unfi is repo:
218 if unfi is repo:
219 return super(unfilteredpropertycache, self).__get__(unfi)
219 return super(unfilteredpropertycache, self).__get__(unfi)
220 return getattr(unfi, self.name)
220 return getattr(unfi, self.name)
221
221
222
222
223 class filteredpropertycache(util.propertycache):
223 class filteredpropertycache(util.propertycache):
224 """propertycache that must take filtering in account"""
224 """propertycache that must take filtering in account"""
225
225
226 def cachevalue(self, obj, value):
226 def cachevalue(self, obj, value):
227 object.__setattr__(obj, self.name, value)
227 object.__setattr__(obj, self.name, value)
228
228
229
229
230 def hasunfilteredcache(repo, name):
230 def hasunfilteredcache(repo, name):
231 """check if a repo has an unfilteredpropertycache value for <name>"""
231 """check if a repo has an unfilteredpropertycache value for <name>"""
232 return name in vars(repo.unfiltered())
232 return name in vars(repo.unfiltered())
233
233
234
234
235 def unfilteredmethod(orig):
235 def unfilteredmethod(orig):
236 """decorate method that always need to be run on unfiltered version"""
236 """decorate method that always need to be run on unfiltered version"""
237
237
238 @functools.wraps(orig)
238 @functools.wraps(orig)
239 def wrapper(repo, *args, **kwargs):
239 def wrapper(repo, *args, **kwargs):
240 return orig(repo.unfiltered(), *args, **kwargs)
240 return orig(repo.unfiltered(), *args, **kwargs)
241
241
242 return wrapper
242 return wrapper
243
243
244
244
245 moderncaps = {
245 moderncaps = {
246 b'lookup',
246 b'lookup',
247 b'branchmap',
247 b'branchmap',
248 b'pushkey',
248 b'pushkey',
249 b'known',
249 b'known',
250 b'getbundle',
250 b'getbundle',
251 b'unbundle',
251 b'unbundle',
252 }
252 }
253 legacycaps = moderncaps.union({b'changegroupsubset'})
253 legacycaps = moderncaps.union({b'changegroupsubset'})
254
254
255
255
256 @interfaceutil.implementer(repository.ipeercommandexecutor)
256 @interfaceutil.implementer(repository.ipeercommandexecutor)
257 class localcommandexecutor:
257 class localcommandexecutor:
258 def __init__(self, peer):
258 def __init__(self, peer):
259 self._peer = peer
259 self._peer = peer
260 self._sent = False
260 self._sent = False
261 self._closed = False
261 self._closed = False
262
262
263 def __enter__(self):
263 def __enter__(self):
264 return self
264 return self
265
265
266 def __exit__(self, exctype, excvalue, exctb):
266 def __exit__(self, exctype, excvalue, exctb):
267 self.close()
267 self.close()
268
268
269 def callcommand(self, command, args):
269 def callcommand(self, command, args):
270 if self._sent:
270 if self._sent:
271 raise error.ProgrammingError(
271 raise error.ProgrammingError(
272 b'callcommand() cannot be used after sendcommands()'
272 b'callcommand() cannot be used after sendcommands()'
273 )
273 )
274
274
275 if self._closed:
275 if self._closed:
276 raise error.ProgrammingError(
276 raise error.ProgrammingError(
277 b'callcommand() cannot be used after close()'
277 b'callcommand() cannot be used after close()'
278 )
278 )
279
279
280 # We don't need to support anything fancy. Just call the named
280 # We don't need to support anything fancy. Just call the named
281 # method on the peer and return a resolved future.
281 # method on the peer and return a resolved future.
282 fn = getattr(self._peer, pycompat.sysstr(command))
282 fn = getattr(self._peer, pycompat.sysstr(command))
283
283
284 f = futures.Future()
284 f = futures.Future()
285
285
286 try:
286 try:
287 result = fn(**pycompat.strkwargs(args))
287 result = fn(**pycompat.strkwargs(args))
288 except Exception:
288 except Exception:
289 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
289 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
290 else:
290 else:
291 f.set_result(result)
291 f.set_result(result)
292
292
293 return f
293 return f
294
294
295 def sendcommands(self):
295 def sendcommands(self):
296 self._sent = True
296 self._sent = True
297
297
298 def close(self):
298 def close(self):
299 self._closed = True
299 self._closed = True
300
300
301
301
302 @interfaceutil.implementer(repository.ipeercommands)
302 @interfaceutil.implementer(repository.ipeercommands)
303 class localpeer(repository.peer):
303 class localpeer(repository.peer):
304 '''peer for a local repo; reflects only the most recent API'''
304 '''peer for a local repo; reflects only the most recent API'''
305
305
306 def __init__(self, repo, caps=None, path=None):
306 def __init__(self, repo, caps=None, path=None):
307 super(localpeer, self).__init__(repo.ui, path=path)
307 super(localpeer, self).__init__(repo.ui, path=path)
308
308
309 if caps is None:
309 if caps is None:
310 caps = moderncaps.copy()
310 caps = moderncaps.copy()
311 self._repo = repo.filtered(b'served')
311 self._repo = repo.filtered(b'served')
312
312
313 if repo._wanted_sidedata:
313 if repo._wanted_sidedata:
314 formatted = bundle2.format_remote_wanted_sidedata(repo)
314 formatted = bundle2.format_remote_wanted_sidedata(repo)
315 caps.add(b'exp-wanted-sidedata=' + formatted)
315 caps.add(b'exp-wanted-sidedata=' + formatted)
316
316
317 self._caps = repo._restrictcapabilities(caps)
317 self._caps = repo._restrictcapabilities(caps)
318
318
319 # Begin of _basepeer interface.
319 # Begin of _basepeer interface.
320
320
321 def url(self):
321 def url(self):
322 return self._repo.url()
322 return self._repo.url()
323
323
324 def local(self):
324 def local(self):
325 return self._repo
325 return self._repo
326
326
327 def canpush(self):
327 def canpush(self):
328 return True
328 return True
329
329
330 def close(self):
330 def close(self):
331 self._repo.close()
331 self._repo.close()
332
332
333 # End of _basepeer interface.
333 # End of _basepeer interface.
334
334
335 # Begin of _basewirecommands interface.
335 # Begin of _basewirecommands interface.
336
336
337 def branchmap(self):
337 def branchmap(self):
338 return self._repo.branchmap()
338 return self._repo.branchmap()
339
339
340 def capabilities(self):
340 def capabilities(self):
341 return self._caps
341 return self._caps
342
342
343 def clonebundles(self):
343 def clonebundles(self):
344 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
344 return self._repo.tryread(bundlecaches.CB_MANIFEST_FILE)
345
345
346 def debugwireargs(self, one, two, three=None, four=None, five=None):
346 def debugwireargs(self, one, two, three=None, four=None, five=None):
347 """Used to test argument passing over the wire"""
347 """Used to test argument passing over the wire"""
348 return b"%s %s %s %s %s" % (
348 return b"%s %s %s %s %s" % (
349 one,
349 one,
350 two,
350 two,
351 pycompat.bytestr(three),
351 pycompat.bytestr(three),
352 pycompat.bytestr(four),
352 pycompat.bytestr(four),
353 pycompat.bytestr(five),
353 pycompat.bytestr(five),
354 )
354 )
355
355
356 def getbundle(
356 def getbundle(
357 self,
357 self,
358 source,
358 source,
359 heads=None,
359 heads=None,
360 common=None,
360 common=None,
361 bundlecaps=None,
361 bundlecaps=None,
362 remote_sidedata=None,
362 remote_sidedata=None,
363 **kwargs
363 **kwargs
364 ):
364 ):
365 chunks = exchange.getbundlechunks(
365 chunks = exchange.getbundlechunks(
366 self._repo,
366 self._repo,
367 source,
367 source,
368 heads=heads,
368 heads=heads,
369 common=common,
369 common=common,
370 bundlecaps=bundlecaps,
370 bundlecaps=bundlecaps,
371 remote_sidedata=remote_sidedata,
371 remote_sidedata=remote_sidedata,
372 **kwargs
372 **kwargs
373 )[1]
373 )[1]
374 cb = util.chunkbuffer(chunks)
374 cb = util.chunkbuffer(chunks)
375
375
376 if exchange.bundle2requested(bundlecaps):
376 if exchange.bundle2requested(bundlecaps):
377 # When requesting a bundle2, getbundle returns a stream to make the
377 # When requesting a bundle2, getbundle returns a stream to make the
378 # wire level function happier. We need to build a proper object
378 # wire level function happier. We need to build a proper object
379 # from it in local peer.
379 # from it in local peer.
380 return bundle2.getunbundler(self.ui, cb)
380 return bundle2.getunbundler(self.ui, cb)
381 else:
381 else:
382 return changegroup.getunbundler(b'01', cb, None)
382 return changegroup.getunbundler(b'01', cb, None)
383
383
384 def heads(self):
384 def heads(self):
385 return self._repo.heads()
385 return self._repo.heads()
386
386
387 def known(self, nodes):
387 def known(self, nodes):
388 return self._repo.known(nodes)
388 return self._repo.known(nodes)
389
389
390 def listkeys(self, namespace):
390 def listkeys(self, namespace):
391 return self._repo.listkeys(namespace)
391 return self._repo.listkeys(namespace)
392
392
393 def lookup(self, key):
393 def lookup(self, key):
394 return self._repo.lookup(key)
394 return self._repo.lookup(key)
395
395
396 def pushkey(self, namespace, key, old, new):
396 def pushkey(self, namespace, key, old, new):
397 return self._repo.pushkey(namespace, key, old, new)
397 return self._repo.pushkey(namespace, key, old, new)
398
398
399 def stream_out(self):
399 def stream_out(self):
400 raise error.Abort(_(b'cannot perform stream clone against local peer'))
400 raise error.Abort(_(b'cannot perform stream clone against local peer'))
401
401
402 def unbundle(self, bundle, heads, url):
402 def unbundle(self, bundle, heads, url):
403 """apply a bundle on a repo
403 """apply a bundle on a repo
404
404
405 This function handles the repo locking itself."""
405 This function handles the repo locking itself."""
406 try:
406 try:
407 try:
407 try:
408 bundle = exchange.readbundle(self.ui, bundle, None)
408 bundle = exchange.readbundle(self.ui, bundle, None)
409 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
409 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
410 if util.safehasattr(ret, b'getchunks'):
410 if util.safehasattr(ret, b'getchunks'):
411 # This is a bundle20 object, turn it into an unbundler.
411 # This is a bundle20 object, turn it into an unbundler.
412 # This little dance should be dropped eventually when the
412 # This little dance should be dropped eventually when the
413 # API is finally improved.
413 # API is finally improved.
414 stream = util.chunkbuffer(ret.getchunks())
414 stream = util.chunkbuffer(ret.getchunks())
415 ret = bundle2.getunbundler(self.ui, stream)
415 ret = bundle2.getunbundler(self.ui, stream)
416 return ret
416 return ret
417 except Exception as exc:
417 except Exception as exc:
418 # If the exception contains output salvaged from a bundle2
418 # If the exception contains output salvaged from a bundle2
419 # reply, we need to make sure it is printed before continuing
419 # reply, we need to make sure it is printed before continuing
420 # to fail. So we build a bundle2 with such output and consume
420 # to fail. So we build a bundle2 with such output and consume
421 # it directly.
421 # it directly.
422 #
422 #
423 # This is not very elegant but allows a "simple" solution for
423 # This is not very elegant but allows a "simple" solution for
424 # issue4594
424 # issue4594
425 output = getattr(exc, '_bundle2salvagedoutput', ())
425 output = getattr(exc, '_bundle2salvagedoutput', ())
426 if output:
426 if output:
427 bundler = bundle2.bundle20(self._repo.ui)
427 bundler = bundle2.bundle20(self._repo.ui)
428 for out in output:
428 for out in output:
429 bundler.addpart(out)
429 bundler.addpart(out)
430 stream = util.chunkbuffer(bundler.getchunks())
430 stream = util.chunkbuffer(bundler.getchunks())
431 b = bundle2.getunbundler(self.ui, stream)
431 b = bundle2.getunbundler(self.ui, stream)
432 bundle2.processbundle(self._repo, b)
432 bundle2.processbundle(self._repo, b)
433 raise
433 raise
434 except error.PushRaced as exc:
434 except error.PushRaced as exc:
435 raise error.ResponseError(
435 raise error.ResponseError(
436 _(b'push failed:'), stringutil.forcebytestr(exc)
436 _(b'push failed:'), stringutil.forcebytestr(exc)
437 )
437 )
438
438
439 # End of _basewirecommands interface.
439 # End of _basewirecommands interface.
440
440
441 # Begin of peer interface.
441 # Begin of peer interface.
442
442
443 def commandexecutor(self):
443 def commandexecutor(self):
444 return localcommandexecutor(self)
444 return localcommandexecutor(self)
445
445
446 # End of peer interface.
446 # End of peer interface.
447
447
448
448
449 @interfaceutil.implementer(repository.ipeerlegacycommands)
449 @interfaceutil.implementer(repository.ipeerlegacycommands)
450 class locallegacypeer(localpeer):
450 class locallegacypeer(localpeer):
451 """peer extension which implements legacy methods too; used for tests with
451 """peer extension which implements legacy methods too; used for tests with
452 restricted capabilities"""
452 restricted capabilities"""
453
453
454 def __init__(self, repo, path=None):
454 def __init__(self, repo, path=None):
455 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
455 super(locallegacypeer, self).__init__(repo, caps=legacycaps, path=path)
456
456
457 # Begin of baselegacywirecommands interface.
457 # Begin of baselegacywirecommands interface.
458
458
459 def between(self, pairs):
459 def between(self, pairs):
460 return self._repo.between(pairs)
460 return self._repo.between(pairs)
461
461
462 def branches(self, nodes):
462 def branches(self, nodes):
463 return self._repo.branches(nodes)
463 return self._repo.branches(nodes)
464
464
465 def changegroup(self, nodes, source):
465 def changegroup(self, nodes, source):
466 outgoing = discovery.outgoing(
466 outgoing = discovery.outgoing(
467 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
467 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
468 )
468 )
469 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
469 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
470
470
471 def changegroupsubset(self, bases, heads, source):
471 def changegroupsubset(self, bases, heads, source):
472 outgoing = discovery.outgoing(
472 outgoing = discovery.outgoing(
473 self._repo, missingroots=bases, ancestorsof=heads
473 self._repo, missingroots=bases, ancestorsof=heads
474 )
474 )
475 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
475 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
476
476
477 # End of baselegacywirecommands interface.
477 # End of baselegacywirecommands interface.
478
478
479
479
480 # Functions receiving (ui, features) that extensions can register to impact
480 # Functions receiving (ui, features) that extensions can register to impact
481 # the ability to load repositories with custom requirements. Only
481 # the ability to load repositories with custom requirements. Only
482 # functions defined in loaded extensions are called.
482 # functions defined in loaded extensions are called.
483 #
483 #
484 # The function receives a set of requirement strings that the repository
484 # The function receives a set of requirement strings that the repository
485 # is capable of opening. Functions will typically add elements to the
485 # is capable of opening. Functions will typically add elements to the
486 # set to reflect that the extension knows how to handle that requirements.
486 # set to reflect that the extension knows how to handle that requirements.
487 featuresetupfuncs = set()
487 featuresetupfuncs = set()
488
488
489
489
490 def _getsharedvfs(hgvfs, requirements):
490 def _getsharedvfs(hgvfs, requirements):
491 """returns the vfs object pointing to root of shared source
491 """returns the vfs object pointing to root of shared source
492 repo for a shared repository
492 repo for a shared repository
493
493
494 hgvfs is vfs pointing at .hg/ of current repo (shared one)
494 hgvfs is vfs pointing at .hg/ of current repo (shared one)
495 requirements is a set of requirements of current repo (shared one)
495 requirements is a set of requirements of current repo (shared one)
496 """
496 """
497 # The ``shared`` or ``relshared`` requirements indicate the
497 # The ``shared`` or ``relshared`` requirements indicate the
498 # store lives in the path contained in the ``.hg/sharedpath`` file.
498 # store lives in the path contained in the ``.hg/sharedpath`` file.
499 # This is an absolute path for ``shared`` and relative to
499 # This is an absolute path for ``shared`` and relative to
500 # ``.hg/`` for ``relshared``.
500 # ``.hg/`` for ``relshared``.
501 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
501 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
502 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
502 if requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements:
503 sharedpath = util.normpath(hgvfs.join(sharedpath))
503 sharedpath = util.normpath(hgvfs.join(sharedpath))
504
504
505 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
505 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
506
506
507 if not sharedvfs.exists():
507 if not sharedvfs.exists():
508 raise error.RepoError(
508 raise error.RepoError(
509 _(b'.hg/sharedpath points to nonexistent directory %s')
509 _(b'.hg/sharedpath points to nonexistent directory %s')
510 % sharedvfs.base
510 % sharedvfs.base
511 )
511 )
512 return sharedvfs
512 return sharedvfs
513
513
514
514
515 def _readrequires(vfs, allowmissing):
515 def _readrequires(vfs, allowmissing):
516 """reads the require file present at root of this vfs
516 """reads the require file present at root of this vfs
517 and return a set of requirements
517 and return a set of requirements
518
518
519 If allowmissing is True, we suppress FileNotFoundError if raised"""
519 If allowmissing is True, we suppress FileNotFoundError if raised"""
520 # requires file contains a newline-delimited list of
520 # requires file contains a newline-delimited list of
521 # features/capabilities the opener (us) must have in order to use
521 # features/capabilities the opener (us) must have in order to use
522 # the repository. This file was introduced in Mercurial 0.9.2,
522 # the repository. This file was introduced in Mercurial 0.9.2,
523 # which means very old repositories may not have one. We assume
523 # which means very old repositories may not have one. We assume
524 # a missing file translates to no requirements.
524 # a missing file translates to no requirements.
525 read = vfs.tryread if allowmissing else vfs.read
525 read = vfs.tryread if allowmissing else vfs.read
526 return set(read(b'requires').splitlines())
526 return set(read(b'requires').splitlines())
527
527
528
528
529 def makelocalrepository(baseui, path: bytes, intents=None):
529 def makelocalrepository(baseui, path: bytes, intents=None):
530 """Create a local repository object.
530 """Create a local repository object.
531
531
532 Given arguments needed to construct a local repository, this function
532 Given arguments needed to construct a local repository, this function
533 performs various early repository loading functionality (such as
533 performs various early repository loading functionality (such as
534 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
534 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
535 the repository can be opened, derives a type suitable for representing
535 the repository can be opened, derives a type suitable for representing
536 that repository, and returns an instance of it.
536 that repository, and returns an instance of it.
537
537
538 The returned object conforms to the ``repository.completelocalrepository``
538 The returned object conforms to the ``repository.completelocalrepository``
539 interface.
539 interface.
540
540
541 The repository type is derived by calling a series of factory functions
541 The repository type is derived by calling a series of factory functions
542 for each aspect/interface of the final repository. These are defined by
542 for each aspect/interface of the final repository. These are defined by
543 ``REPO_INTERFACES``.
543 ``REPO_INTERFACES``.
544
544
545 Each factory function is called to produce a type implementing a specific
545 Each factory function is called to produce a type implementing a specific
546 interface. The cumulative list of returned types will be combined into a
546 interface. The cumulative list of returned types will be combined into a
547 new type and that type will be instantiated to represent the local
547 new type and that type will be instantiated to represent the local
548 repository.
548 repository.
549
549
550 The factory functions each receive various state that may be consulted
550 The factory functions each receive various state that may be consulted
551 as part of deriving a type.
551 as part of deriving a type.
552
552
553 Extensions should wrap these factory functions to customize repository type
553 Extensions should wrap these factory functions to customize repository type
554 creation. Note that an extension's wrapped function may be called even if
554 creation. Note that an extension's wrapped function may be called even if
555 that extension is not loaded for the repo being constructed. Extensions
555 that extension is not loaded for the repo being constructed. Extensions
556 should check if their ``__name__`` appears in the
556 should check if their ``__name__`` appears in the
557 ``extensionmodulenames`` set passed to the factory function and no-op if
557 ``extensionmodulenames`` set passed to the factory function and no-op if
558 not.
558 not.
559 """
559 """
560 ui = baseui.copy()
560 ui = baseui.copy()
561 # Prevent copying repo configuration.
561 # Prevent copying repo configuration.
562 ui.copy = baseui.copy
562 ui.copy = baseui.copy
563
563
564 # Working directory VFS rooted at repository root.
564 # Working directory VFS rooted at repository root.
565 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
565 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
566
566
567 # Main VFS for .hg/ directory.
567 # Main VFS for .hg/ directory.
568 hgpath = wdirvfs.join(b'.hg')
568 hgpath = wdirvfs.join(b'.hg')
569 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
569 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
570 # Whether this repository is shared one or not
570 # Whether this repository is shared one or not
571 shared = False
571 shared = False
572 # If this repository is shared, vfs pointing to shared repo
572 # If this repository is shared, vfs pointing to shared repo
573 sharedvfs = None
573 sharedvfs = None
574
574
575 # The .hg/ path should exist and should be a directory. All other
575 # The .hg/ path should exist and should be a directory. All other
576 # cases are errors.
576 # cases are errors.
577 if not hgvfs.isdir():
577 if not hgvfs.isdir():
578 try:
578 try:
579 hgvfs.stat()
579 hgvfs.stat()
580 except FileNotFoundError:
580 except FileNotFoundError:
581 pass
581 pass
582 except ValueError as e:
582 except ValueError as e:
583 # Can be raised on Python 3.8 when path is invalid.
583 # Can be raised on Python 3.8 when path is invalid.
584 raise error.Abort(
584 raise error.Abort(
585 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
585 _(b'invalid path %s: %s') % (path, stringutil.forcebytestr(e))
586 )
586 )
587
587
588 raise error.RepoError(_(b'repository %s not found') % path)
588 raise error.RepoError(_(b'repository %s not found') % path)
589
589
590 requirements = _readrequires(hgvfs, True)
590 requirements = _readrequires(hgvfs, True)
591 shared = (
591 shared = (
592 requirementsmod.SHARED_REQUIREMENT in requirements
592 requirementsmod.SHARED_REQUIREMENT in requirements
593 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
593 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
594 )
594 )
595 storevfs = None
595 storevfs = None
596 if shared:
596 if shared:
597 # This is a shared repo
597 # This is a shared repo
598 sharedvfs = _getsharedvfs(hgvfs, requirements)
598 sharedvfs = _getsharedvfs(hgvfs, requirements)
599 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
599 storevfs = vfsmod.vfs(sharedvfs.join(b'store'))
600 else:
600 else:
601 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
601 storevfs = vfsmod.vfs(hgvfs.join(b'store'))
602
602
603 # if .hg/requires contains the sharesafe requirement, it means
603 # if .hg/requires contains the sharesafe requirement, it means
604 # there exists a `.hg/store/requires` too and we should read it
604 # there exists a `.hg/store/requires` too and we should read it
605 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
605 # NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
606 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
606 # is present. We never write SHARESAFE_REQUIREMENT for a repo if store
607 # is not present, refer checkrequirementscompat() for that
607 # is not present, refer checkrequirementscompat() for that
608 #
608 #
609 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
609 # However, if SHARESAFE_REQUIREMENT is not present, it means that the
610 # repository was shared the old way. We check the share source .hg/requires
610 # repository was shared the old way. We check the share source .hg/requires
611 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
611 # for SHARESAFE_REQUIREMENT to detect whether the current repository needs
612 # to be reshared
612 # to be reshared
613 hint = _(b"see `hg help config.format.use-share-safe` for more information")
613 hint = _(b"see `hg help config.format.use-share-safe` for more information")
614 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
614 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
615 if (
615 if (
616 shared
616 shared
617 and requirementsmod.SHARESAFE_REQUIREMENT
617 and requirementsmod.SHARESAFE_REQUIREMENT
618 not in _readrequires(sharedvfs, True)
618 not in _readrequires(sharedvfs, True)
619 ):
619 ):
620 mismatch_warn = ui.configbool(
620 mismatch_warn = ui.configbool(
621 b'share', b'safe-mismatch.source-not-safe.warn'
621 b'share', b'safe-mismatch.source-not-safe.warn'
622 )
622 )
623 mismatch_config = ui.config(
623 mismatch_config = ui.config(
624 b'share', b'safe-mismatch.source-not-safe'
624 b'share', b'safe-mismatch.source-not-safe'
625 )
625 )
626 mismatch_verbose_upgrade = ui.configbool(
626 mismatch_verbose_upgrade = ui.configbool(
627 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
627 b'share', b'safe-mismatch.source-not-safe:verbose-upgrade'
628 )
628 )
629 if mismatch_config in (
629 if mismatch_config in (
630 b'downgrade-allow',
630 b'downgrade-allow',
631 b'allow',
631 b'allow',
632 b'downgrade-abort',
632 b'downgrade-abort',
633 ):
633 ):
634 # prevent cyclic import localrepo -> upgrade -> localrepo
634 # prevent cyclic import localrepo -> upgrade -> localrepo
635 from . import upgrade
635 from . import upgrade
636
636
637 upgrade.downgrade_share_to_non_safe(
637 upgrade.downgrade_share_to_non_safe(
638 ui,
638 ui,
639 hgvfs,
639 hgvfs,
640 sharedvfs,
640 sharedvfs,
641 requirements,
641 requirements,
642 mismatch_config,
642 mismatch_config,
643 mismatch_warn,
643 mismatch_warn,
644 mismatch_verbose_upgrade,
644 mismatch_verbose_upgrade,
645 )
645 )
646 elif mismatch_config == b'abort':
646 elif mismatch_config == b'abort':
647 raise error.Abort(
647 raise error.Abort(
648 _(b"share source does not support share-safe requirement"),
648 _(b"share source does not support share-safe requirement"),
649 hint=hint,
649 hint=hint,
650 )
650 )
651 else:
651 else:
652 raise error.Abort(
652 raise error.Abort(
653 _(
653 _(
654 b"share-safe mismatch with source.\nUnrecognized"
654 b"share-safe mismatch with source.\nUnrecognized"
655 b" value '%s' of `share.safe-mismatch.source-not-safe`"
655 b" value '%s' of `share.safe-mismatch.source-not-safe`"
656 b" set."
656 b" set."
657 )
657 )
658 % mismatch_config,
658 % mismatch_config,
659 hint=hint,
659 hint=hint,
660 )
660 )
661 else:
661 else:
662 requirements |= _readrequires(storevfs, False)
662 requirements |= _readrequires(storevfs, False)
663 elif shared:
663 elif shared:
664 sourcerequires = _readrequires(sharedvfs, False)
664 sourcerequires = _readrequires(sharedvfs, False)
665 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
665 if requirementsmod.SHARESAFE_REQUIREMENT in sourcerequires:
666 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
666 mismatch_config = ui.config(b'share', b'safe-mismatch.source-safe')
667 mismatch_warn = ui.configbool(
667 mismatch_warn = ui.configbool(
668 b'share', b'safe-mismatch.source-safe.warn'
668 b'share', b'safe-mismatch.source-safe.warn'
669 )
669 )
670 mismatch_verbose_upgrade = ui.configbool(
670 mismatch_verbose_upgrade = ui.configbool(
671 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
671 b'share', b'safe-mismatch.source-safe:verbose-upgrade'
672 )
672 )
673 if mismatch_config in (
673 if mismatch_config in (
674 b'upgrade-allow',
674 b'upgrade-allow',
675 b'allow',
675 b'allow',
676 b'upgrade-abort',
676 b'upgrade-abort',
677 ):
677 ):
678 # prevent cyclic import localrepo -> upgrade -> localrepo
678 # prevent cyclic import localrepo -> upgrade -> localrepo
679 from . import upgrade
679 from . import upgrade
680
680
681 upgrade.upgrade_share_to_safe(
681 upgrade.upgrade_share_to_safe(
682 ui,
682 ui,
683 hgvfs,
683 hgvfs,
684 storevfs,
684 storevfs,
685 requirements,
685 requirements,
686 mismatch_config,
686 mismatch_config,
687 mismatch_warn,
687 mismatch_warn,
688 mismatch_verbose_upgrade,
688 mismatch_verbose_upgrade,
689 )
689 )
690 elif mismatch_config == b'abort':
690 elif mismatch_config == b'abort':
691 raise error.Abort(
691 raise error.Abort(
692 _(
692 _(
693 b'version mismatch: source uses share-safe'
693 b'version mismatch: source uses share-safe'
694 b' functionality while the current share does not'
694 b' functionality while the current share does not'
695 ),
695 ),
696 hint=hint,
696 hint=hint,
697 )
697 )
698 else:
698 else:
699 raise error.Abort(
699 raise error.Abort(
700 _(
700 _(
701 b"share-safe mismatch with source.\nUnrecognized"
701 b"share-safe mismatch with source.\nUnrecognized"
702 b" value '%s' of `share.safe-mismatch.source-safe` set."
702 b" value '%s' of `share.safe-mismatch.source-safe` set."
703 )
703 )
704 % mismatch_config,
704 % mismatch_config,
705 hint=hint,
705 hint=hint,
706 )
706 )
707
707
708 # The .hg/hgrc file may load extensions or contain config options
708 # The .hg/hgrc file may load extensions or contain config options
709 # that influence repository construction. Attempt to load it and
709 # that influence repository construction. Attempt to load it and
710 # process any new extensions that it may have pulled in.
710 # process any new extensions that it may have pulled in.
711 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
711 if loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs):
712 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
712 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
713 extensions.loadall(ui)
713 extensions.loadall(ui)
714 extensions.populateui(ui)
714 extensions.populateui(ui)
715
715
716 # Set of module names of extensions loaded for this repository.
716 # Set of module names of extensions loaded for this repository.
717 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
717 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
718
718
719 supportedrequirements = gathersupportedrequirements(ui)
719 supportedrequirements = gathersupportedrequirements(ui)
720
720
721 # We first validate the requirements are known.
721 # We first validate the requirements are known.
722 ensurerequirementsrecognized(requirements, supportedrequirements)
722 ensurerequirementsrecognized(requirements, supportedrequirements)
723
723
724 # Then we validate that the known set is reasonable to use together.
724 # Then we validate that the known set is reasonable to use together.
725 ensurerequirementscompatible(ui, requirements)
725 ensurerequirementscompatible(ui, requirements)
726
726
727 # TODO there are unhandled edge cases related to opening repositories with
727 # TODO there are unhandled edge cases related to opening repositories with
728 # shared storage. If storage is shared, we should also test for requirements
728 # shared storage. If storage is shared, we should also test for requirements
729 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
729 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
730 # that repo, as that repo may load extensions needed to open it. This is a
730 # that repo, as that repo may load extensions needed to open it. This is a
731 # bit complicated because we don't want the other hgrc to overwrite settings
731 # bit complicated because we don't want the other hgrc to overwrite settings
732 # in this hgrc.
732 # in this hgrc.
733 #
733 #
734 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
734 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
735 # file when sharing repos. But if a requirement is added after the share is
735 # file when sharing repos. But if a requirement is added after the share is
736 # performed, thereby introducing a new requirement for the opener, we may
736 # performed, thereby introducing a new requirement for the opener, we may
737 # will not see that and could encounter a run-time error interacting with
737 # will not see that and could encounter a run-time error interacting with
738 # that shared store since it has an unknown-to-us requirement.
738 # that shared store since it has an unknown-to-us requirement.
739
739
740 # At this point, we know we should be capable of opening the repository.
740 # At this point, we know we should be capable of opening the repository.
741 # Now get on with doing that.
741 # Now get on with doing that.
742
742
743 features = set()
743 features = set()
744
744
745 # The "store" part of the repository holds versioned data. How it is
745 # The "store" part of the repository holds versioned data. How it is
746 # accessed is determined by various requirements. If `shared` or
746 # accessed is determined by various requirements. If `shared` or
747 # `relshared` requirements are present, this indicates current repository
747 # `relshared` requirements are present, this indicates current repository
748 # is a share and store exists in path mentioned in `.hg/sharedpath`
748 # is a share and store exists in path mentioned in `.hg/sharedpath`
749 if shared:
749 if shared:
750 storebasepath = sharedvfs.base
750 storebasepath = sharedvfs.base
751 cachepath = sharedvfs.join(b'cache')
751 cachepath = sharedvfs.join(b'cache')
752 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
752 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
753 else:
753 else:
754 storebasepath = hgvfs.base
754 storebasepath = hgvfs.base
755 cachepath = hgvfs.join(b'cache')
755 cachepath = hgvfs.join(b'cache')
756 wcachepath = hgvfs.join(b'wcache')
756 wcachepath = hgvfs.join(b'wcache')
757
757
758 # The store has changed over time and the exact layout is dictated by
758 # The store has changed over time and the exact layout is dictated by
759 # requirements. The store interface abstracts differences across all
759 # requirements. The store interface abstracts differences across all
760 # of them.
760 # of them.
761 store = makestore(
761 store = makestore(
762 requirements,
762 requirements,
763 storebasepath,
763 storebasepath,
764 lambda base: vfsmod.vfs(base, cacheaudited=True),
764 lambda base: vfsmod.vfs(base, cacheaudited=True),
765 )
765 )
766 hgvfs.createmode = store.createmode
766 hgvfs.createmode = store.createmode
767
767
768 storevfs = store.vfs
768 storevfs = store.vfs
769 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
769 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
770
770
771 if (
771 if (
772 requirementsmod.REVLOGV2_REQUIREMENT in requirements
772 requirementsmod.REVLOGV2_REQUIREMENT in requirements
773 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
773 or requirementsmod.CHANGELOGV2_REQUIREMENT in requirements
774 ):
774 ):
775 features.add(repository.REPO_FEATURE_SIDE_DATA)
775 features.add(repository.REPO_FEATURE_SIDE_DATA)
776 # the revlogv2 docket introduced race condition that we need to fix
776 # the revlogv2 docket introduced race condition that we need to fix
777 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
777 features.discard(repository.REPO_FEATURE_STREAM_CLONE)
778
778
779 # The cache vfs is used to manage cache files.
779 # The cache vfs is used to manage cache files.
780 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
780 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
781 cachevfs.createmode = store.createmode
781 cachevfs.createmode = store.createmode
782 # The cache vfs is used to manage cache files related to the working copy
782 # The cache vfs is used to manage cache files related to the working copy
783 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
783 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
784 wcachevfs.createmode = store.createmode
784 wcachevfs.createmode = store.createmode
785
785
786 # Now resolve the type for the repository object. We do this by repeatedly
786 # Now resolve the type for the repository object. We do this by repeatedly
787 # calling a factory function to produces types for specific aspects of the
787 # calling a factory function to produces types for specific aspects of the
788 # repo's operation. The aggregate returned types are used as base classes
788 # repo's operation. The aggregate returned types are used as base classes
789 # for a dynamically-derived type, which will represent our new repository.
789 # for a dynamically-derived type, which will represent our new repository.
790
790
791 bases = []
791 bases = []
792 extrastate = {}
792 extrastate = {}
793
793
794 for iface, fn in REPO_INTERFACES:
794 for iface, fn in REPO_INTERFACES:
795 # We pass all potentially useful state to give extensions tons of
795 # We pass all potentially useful state to give extensions tons of
796 # flexibility.
796 # flexibility.
797 typ = fn()(
797 typ = fn()(
798 ui=ui,
798 ui=ui,
799 intents=intents,
799 intents=intents,
800 requirements=requirements,
800 requirements=requirements,
801 features=features,
801 features=features,
802 wdirvfs=wdirvfs,
802 wdirvfs=wdirvfs,
803 hgvfs=hgvfs,
803 hgvfs=hgvfs,
804 store=store,
804 store=store,
805 storevfs=storevfs,
805 storevfs=storevfs,
806 storeoptions=storevfs.options,
806 storeoptions=storevfs.options,
807 cachevfs=cachevfs,
807 cachevfs=cachevfs,
808 wcachevfs=wcachevfs,
808 wcachevfs=wcachevfs,
809 extensionmodulenames=extensionmodulenames,
809 extensionmodulenames=extensionmodulenames,
810 extrastate=extrastate,
810 extrastate=extrastate,
811 baseclasses=bases,
811 baseclasses=bases,
812 )
812 )
813
813
814 if not isinstance(typ, type):
814 if not isinstance(typ, type):
815 raise error.ProgrammingError(
815 raise error.ProgrammingError(
816 b'unable to construct type for %s' % iface
816 b'unable to construct type for %s' % iface
817 )
817 )
818
818
819 bases.append(typ)
819 bases.append(typ)
820
820
821 # type() allows you to use characters in type names that wouldn't be
821 # type() allows you to use characters in type names that wouldn't be
822 # recognized as Python symbols in source code. We abuse that to add
822 # recognized as Python symbols in source code. We abuse that to add
823 # rich information about our constructed repo.
823 # rich information about our constructed repo.
824 name = pycompat.sysstr(
824 name = pycompat.sysstr(
825 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
825 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
826 )
826 )
827
827
828 cls = type(name, tuple(bases), {})
828 cls = type(name, tuple(bases), {})
829
829
830 return cls(
830 return cls(
831 baseui=baseui,
831 baseui=baseui,
832 ui=ui,
832 ui=ui,
833 origroot=path,
833 origroot=path,
834 wdirvfs=wdirvfs,
834 wdirvfs=wdirvfs,
835 hgvfs=hgvfs,
835 hgvfs=hgvfs,
836 requirements=requirements,
836 requirements=requirements,
837 supportedrequirements=supportedrequirements,
837 supportedrequirements=supportedrequirements,
838 sharedpath=storebasepath,
838 sharedpath=storebasepath,
839 store=store,
839 store=store,
840 cachevfs=cachevfs,
840 cachevfs=cachevfs,
841 wcachevfs=wcachevfs,
841 wcachevfs=wcachevfs,
842 features=features,
842 features=features,
843 intents=intents,
843 intents=intents,
844 )
844 )
845
845
846
846
847 def loadhgrc(
847 def loadhgrc(
848 ui,
848 ui,
849 wdirvfs: vfsmod.vfs,
849 wdirvfs: vfsmod.vfs,
850 hgvfs: vfsmod.vfs,
850 hgvfs: vfsmod.vfs,
851 requirements,
851 requirements,
852 sharedvfs: Optional[vfsmod.vfs] = None,
852 sharedvfs: Optional[vfsmod.vfs] = None,
853 ):
853 ):
854 """Load hgrc files/content into a ui instance.
854 """Load hgrc files/content into a ui instance.
855
855
856 This is called during repository opening to load any additional
856 This is called during repository opening to load any additional
857 config files or settings relevant to the current repository.
857 config files or settings relevant to the current repository.
858
858
859 Returns a bool indicating whether any additional configs were loaded.
859 Returns a bool indicating whether any additional configs were loaded.
860
860
861 Extensions should monkeypatch this function to modify how per-repo
861 Extensions should monkeypatch this function to modify how per-repo
862 configs are loaded. For example, an extension may wish to pull in
862 configs are loaded. For example, an extension may wish to pull in
863 configs from alternate files or sources.
863 configs from alternate files or sources.
864
864
865 sharedvfs is vfs object pointing to source repo if the current one is a
865 sharedvfs is vfs object pointing to source repo if the current one is a
866 shared one
866 shared one
867 """
867 """
868 if not rcutil.use_repo_hgrc():
868 if not rcutil.use_repo_hgrc():
869 return False
869 return False
870
870
871 ret = False
871 ret = False
872 # first load config from shared source if we has to
872 # first load config from shared source if we has to
873 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
873 if requirementsmod.SHARESAFE_REQUIREMENT in requirements and sharedvfs:
874 try:
874 try:
875 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
875 ui.readconfig(sharedvfs.join(b'hgrc'), root=sharedvfs.base)
876 ret = True
876 ret = True
877 except IOError:
877 except IOError:
878 pass
878 pass
879
879
880 try:
880 try:
881 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
881 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
882 ret = True
882 ret = True
883 except IOError:
883 except IOError:
884 pass
884 pass
885
885
886 try:
886 try:
887 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
887 ui.readconfig(hgvfs.join(b'hgrc-not-shared'), root=wdirvfs.base)
888 ret = True
888 ret = True
889 except IOError:
889 except IOError:
890 pass
890 pass
891
891
892 return ret
892 return ret
893
893
894
894
895 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
895 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
896 """Perform additional actions after .hg/hgrc is loaded.
896 """Perform additional actions after .hg/hgrc is loaded.
897
897
898 This function is called during repository loading immediately after
898 This function is called during repository loading immediately after
899 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
899 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
900
900
901 The function can be used to validate configs, automatically add
901 The function can be used to validate configs, automatically add
902 options (including extensions) based on requirements, etc.
902 options (including extensions) based on requirements, etc.
903 """
903 """
904
904
905 # Map of requirements to list of extensions to load automatically when
905 # Map of requirements to list of extensions to load automatically when
906 # requirement is present.
906 # requirement is present.
907 autoextensions = {
907 autoextensions = {
908 b'git': [b'git'],
908 b'git': [b'git'],
909 b'largefiles': [b'largefiles'],
909 b'largefiles': [b'largefiles'],
910 b'lfs': [b'lfs'],
910 b'lfs': [b'lfs'],
911 }
911 }
912
912
913 for requirement, names in sorted(autoextensions.items()):
913 for requirement, names in sorted(autoextensions.items()):
914 if requirement not in requirements:
914 if requirement not in requirements:
915 continue
915 continue
916
916
917 for name in names:
917 for name in names:
918 if not ui.hasconfig(b'extensions', name):
918 if not ui.hasconfig(b'extensions', name):
919 ui.setconfig(b'extensions', name, b'', source=b'autoload')
919 ui.setconfig(b'extensions', name, b'', source=b'autoload')
920
920
921
921
922 def gathersupportedrequirements(ui):
922 def gathersupportedrequirements(ui):
923 """Determine the complete set of recognized requirements."""
923 """Determine the complete set of recognized requirements."""
924 # Start with all requirements supported by this file.
924 # Start with all requirements supported by this file.
925 supported = set(localrepository._basesupported)
925 supported = set(localrepository._basesupported)
926
926
927 # Execute ``featuresetupfuncs`` entries if they belong to an extension
927 # Execute ``featuresetupfuncs`` entries if they belong to an extension
928 # relevant to this ui instance.
928 # relevant to this ui instance.
929 modules = {m.__name__ for n, m in extensions.extensions(ui)}
929 modules = {m.__name__ for n, m in extensions.extensions(ui)}
930
930
931 for fn in featuresetupfuncs:
931 for fn in featuresetupfuncs:
932 if fn.__module__ in modules:
932 if fn.__module__ in modules:
933 fn(ui, supported)
933 fn(ui, supported)
934
934
935 # Add derived requirements from registered compression engines.
935 # Add derived requirements from registered compression engines.
936 for name in util.compengines:
936 for name in util.compengines:
937 engine = util.compengines[name]
937 engine = util.compengines[name]
938 if engine.available() and engine.revlogheader():
938 if engine.available() and engine.revlogheader():
939 supported.add(b'exp-compression-%s' % name)
939 supported.add(b'exp-compression-%s' % name)
940 if engine.name() == b'zstd':
940 if engine.name() == b'zstd':
941 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
941 supported.add(requirementsmod.REVLOG_COMPRESSION_ZSTD)
942
942
943 return supported
943 return supported
944
944
945
945
946 def ensurerequirementsrecognized(requirements, supported):
946 def ensurerequirementsrecognized(requirements, supported):
947 """Validate that a set of local requirements is recognized.
947 """Validate that a set of local requirements is recognized.
948
948
949 Receives a set of requirements. Raises an ``error.RepoError`` if there
949 Receives a set of requirements. Raises an ``error.RepoError`` if there
950 exists any requirement in that set that currently loaded code doesn't
950 exists any requirement in that set that currently loaded code doesn't
951 recognize.
951 recognize.
952
952
953 Returns a set of supported requirements.
953 Returns a set of supported requirements.
954 """
954 """
955 missing = set()
955 missing = set()
956
956
957 for requirement in requirements:
957 for requirement in requirements:
958 if requirement in supported:
958 if requirement in supported:
959 continue
959 continue
960
960
961 if not requirement or not requirement[0:1].isalnum():
961 if not requirement or not requirement[0:1].isalnum():
962 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
962 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
963
963
964 missing.add(requirement)
964 missing.add(requirement)
965
965
966 if missing:
966 if missing:
967 raise error.RequirementError(
967 raise error.RequirementError(
968 _(b'repository requires features unknown to this Mercurial: %s')
968 _(b'repository requires features unknown to this Mercurial: %s')
969 % b' '.join(sorted(missing)),
969 % b' '.join(sorted(missing)),
970 hint=_(
970 hint=_(
971 b'see https://mercurial-scm.org/wiki/MissingRequirement '
971 b'see https://mercurial-scm.org/wiki/MissingRequirement '
972 b'for more information'
972 b'for more information'
973 ),
973 ),
974 )
974 )
975
975
976
976
977 def ensurerequirementscompatible(ui, requirements):
977 def ensurerequirementscompatible(ui, requirements):
978 """Validates that a set of recognized requirements is mutually compatible.
978 """Validates that a set of recognized requirements is mutually compatible.
979
979
980 Some requirements may not be compatible with others or require
980 Some requirements may not be compatible with others or require
981 config options that aren't enabled. This function is called during
981 config options that aren't enabled. This function is called during
982 repository opening to ensure that the set of requirements needed
982 repository opening to ensure that the set of requirements needed
983 to open a repository is sane and compatible with config options.
983 to open a repository is sane and compatible with config options.
984
984
985 Extensions can monkeypatch this function to perform additional
985 Extensions can monkeypatch this function to perform additional
986 checking.
986 checking.
987
987
988 ``error.RepoError`` should be raised on failure.
988 ``error.RepoError`` should be raised on failure.
989 """
989 """
990 if (
990 if (
991 requirementsmod.SPARSE_REQUIREMENT in requirements
991 requirementsmod.SPARSE_REQUIREMENT in requirements
992 and not sparse.enabled
992 and not sparse.enabled
993 ):
993 ):
994 raise error.RepoError(
994 raise error.RepoError(
995 _(
995 _(
996 b'repository is using sparse feature but '
996 b'repository is using sparse feature but '
997 b'sparse is not enabled; enable the '
997 b'sparse is not enabled; enable the '
998 b'"sparse" extensions to access'
998 b'"sparse" extensions to access'
999 )
999 )
1000 )
1000 )
1001
1001
1002
1002
1003 def makestore(requirements, path, vfstype):
1003 def makestore(requirements, path, vfstype):
1004 """Construct a storage object for a repository."""
1004 """Construct a storage object for a repository."""
1005 if requirementsmod.STORE_REQUIREMENT in requirements:
1005 if requirementsmod.STORE_REQUIREMENT in requirements:
1006 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1006 if requirementsmod.FNCACHE_REQUIREMENT in requirements:
1007 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1007 dotencode = requirementsmod.DOTENCODE_REQUIREMENT in requirements
1008 return storemod.fncachestore(path, vfstype, dotencode)
1008 return storemod.fncachestore(path, vfstype, dotencode)
1009
1009
1010 return storemod.encodedstore(path, vfstype)
1010 return storemod.encodedstore(path, vfstype)
1011
1011
1012 return storemod.basicstore(path, vfstype)
1012 return storemod.basicstore(path, vfstype)
1013
1013
1014
1014
1015 def resolvestorevfsoptions(ui, requirements, features):
1015 def resolvestorevfsoptions(ui, requirements, features):
1016 """Resolve the options to pass to the store vfs opener.
1016 """Resolve the options to pass to the store vfs opener.
1017
1017
1018 The returned dict is used to influence behavior of the storage layer.
1018 The returned dict is used to influence behavior of the storage layer.
1019 """
1019 """
1020 options = {}
1020 options = {}
1021
1021
1022 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1022 if requirementsmod.TREEMANIFEST_REQUIREMENT in requirements:
1023 options[b'treemanifest'] = True
1023 options[b'treemanifest'] = True
1024
1024
1025 # experimental config: format.manifestcachesize
1025 # experimental config: format.manifestcachesize
1026 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1026 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
1027 if manifestcachesize is not None:
1027 if manifestcachesize is not None:
1028 options[b'manifestcachesize'] = manifestcachesize
1028 options[b'manifestcachesize'] = manifestcachesize
1029
1029
1030 # In the absence of another requirement superseding a revlog-related
1030 # In the absence of another requirement superseding a revlog-related
1031 # requirement, we have to assume the repo is using revlog version 0.
1031 # requirement, we have to assume the repo is using revlog version 0.
1032 # This revlog format is super old and we don't bother trying to parse
1032 # This revlog format is super old and we don't bother trying to parse
1033 # opener options for it because those options wouldn't do anything
1033 # opener options for it because those options wouldn't do anything
1034 # meaningful on such old repos.
1034 # meaningful on such old repos.
1035 if (
1035 if (
1036 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1036 requirementsmod.REVLOGV1_REQUIREMENT in requirements
1037 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1037 or requirementsmod.REVLOGV2_REQUIREMENT in requirements
1038 ):
1038 ):
1039 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1039 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
1040 else: # explicitly mark repo as using revlogv0
1040 else: # explicitly mark repo as using revlogv0
1041 options[b'revlogv0'] = True
1041 options[b'revlogv0'] = True
1042
1042
1043 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1043 if requirementsmod.COPIESSDC_REQUIREMENT in requirements:
1044 options[b'copies-storage'] = b'changeset-sidedata'
1044 options[b'copies-storage'] = b'changeset-sidedata'
1045 else:
1045 else:
1046 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1046 writecopiesto = ui.config(b'experimental', b'copies.write-to')
1047 copiesextramode = (b'changeset-only', b'compatibility')
1047 copiesextramode = (b'changeset-only', b'compatibility')
1048 if writecopiesto in copiesextramode:
1048 if writecopiesto in copiesextramode:
1049 options[b'copies-storage'] = b'extra'
1049 options[b'copies-storage'] = b'extra'
1050
1050
1051 return options
1051 return options
1052
1052
1053
1053
1054 def resolverevlogstorevfsoptions(ui, requirements, features):
1054 def resolverevlogstorevfsoptions(ui, requirements, features):
1055 """Resolve opener options specific to revlogs."""
1055 """Resolve opener options specific to revlogs."""
1056
1056
1057 options = {}
1057 options = {}
1058 options[b'flagprocessors'] = {}
1058 options[b'flagprocessors'] = {}
1059
1059
1060 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1060 if requirementsmod.REVLOGV1_REQUIREMENT in requirements:
1061 options[b'revlogv1'] = True
1061 options[b'revlogv1'] = True
1062 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1062 if requirementsmod.REVLOGV2_REQUIREMENT in requirements:
1063 options[b'revlogv2'] = True
1063 options[b'revlogv2'] = True
1064 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1064 if requirementsmod.CHANGELOGV2_REQUIREMENT in requirements:
1065 options[b'changelogv2'] = True
1065 options[b'changelogv2'] = True
1066 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1066 cmp_rank = ui.configbool(b'experimental', b'changelog-v2.compute-rank')
1067 options[b'changelogv2.compute-rank'] = cmp_rank
1067 options[b'changelogv2.compute-rank'] = cmp_rank
1068
1068
1069 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1069 if requirementsmod.GENERALDELTA_REQUIREMENT in requirements:
1070 options[b'generaldelta'] = True
1070 options[b'generaldelta'] = True
1071
1071
1072 # experimental config: format.chunkcachesize
1072 # experimental config: format.chunkcachesize
1073 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1073 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
1074 if chunkcachesize is not None:
1074 if chunkcachesize is not None:
1075 options[b'chunkcachesize'] = chunkcachesize
1075 options[b'chunkcachesize'] = chunkcachesize
1076
1076
1077 deltabothparents = ui.configbool(
1077 deltabothparents = ui.configbool(
1078 b'storage', b'revlog.optimize-delta-parent-choice'
1078 b'storage', b'revlog.optimize-delta-parent-choice'
1079 )
1079 )
1080 options[b'deltabothparents'] = deltabothparents
1080 options[b'deltabothparents'] = deltabothparents
1081 dps_cgds = ui.configint(
1081 dps_cgds = ui.configint(
1082 b'storage',
1082 b'storage',
1083 b'revlog.delta-parent-search.candidate-group-chunk-size',
1083 b'revlog.delta-parent-search.candidate-group-chunk-size',
1084 )
1084 )
1085 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1085 options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds
1086 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1086 options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta')
1087
1087
1088 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1088 issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming')
1089 options[b'issue6528.fix-incoming'] = issue6528
1089 options[b'issue6528.fix-incoming'] = issue6528
1090
1090
1091 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1091 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
1092 lazydeltabase = False
1092 lazydeltabase = False
1093 if lazydelta:
1093 if lazydelta:
1094 lazydeltabase = ui.configbool(
1094 lazydeltabase = ui.configbool(
1095 b'storage', b'revlog.reuse-external-delta-parent'
1095 b'storage', b'revlog.reuse-external-delta-parent'
1096 )
1096 )
1097 if lazydeltabase is None:
1097 if lazydeltabase is None:
1098 lazydeltabase = not scmutil.gddeltaconfig(ui)
1098 lazydeltabase = not scmutil.gddeltaconfig(ui)
1099 options[b'lazydelta'] = lazydelta
1099 options[b'lazydelta'] = lazydelta
1100 options[b'lazydeltabase'] = lazydeltabase
1100 options[b'lazydeltabase'] = lazydeltabase
1101
1101
1102 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1102 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
1103 if 0 <= chainspan:
1103 if 0 <= chainspan:
1104 options[b'maxdeltachainspan'] = chainspan
1104 options[b'maxdeltachainspan'] = chainspan
1105
1105
1106 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1106 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
1107 if mmapindexthreshold is not None:
1107 if mmapindexthreshold is not None:
1108 options[b'mmapindexthreshold'] = mmapindexthreshold
1108 options[b'mmapindexthreshold'] = mmapindexthreshold
1109
1109
1110 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1110 withsparseread = ui.configbool(b'experimental', b'sparse-read')
1111 srdensitythres = float(
1111 srdensitythres = float(
1112 ui.config(b'experimental', b'sparse-read.density-threshold')
1112 ui.config(b'experimental', b'sparse-read.density-threshold')
1113 )
1113 )
1114 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1114 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
1115 options[b'with-sparse-read'] = withsparseread
1115 options[b'with-sparse-read'] = withsparseread
1116 options[b'sparse-read-density-threshold'] = srdensitythres
1116 options[b'sparse-read-density-threshold'] = srdensitythres
1117 options[b'sparse-read-min-gap-size'] = srmingapsize
1117 options[b'sparse-read-min-gap-size'] = srmingapsize
1118
1118
1119 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1119 sparserevlog = requirementsmod.SPARSEREVLOG_REQUIREMENT in requirements
1120 options[b'sparse-revlog'] = sparserevlog
1120 options[b'sparse-revlog'] = sparserevlog
1121 if sparserevlog:
1121 if sparserevlog:
1122 options[b'generaldelta'] = True
1122 options[b'generaldelta'] = True
1123
1123
1124 maxchainlen = None
1124 maxchainlen = None
1125 if sparserevlog:
1125 if sparserevlog:
1126 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1126 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
1127 # experimental config: format.maxchainlen
1127 # experimental config: format.maxchainlen
1128 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1128 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
1129 if maxchainlen is not None:
1129 if maxchainlen is not None:
1130 options[b'maxchainlen'] = maxchainlen
1130 options[b'maxchainlen'] = maxchainlen
1131
1131
1132 for r in requirements:
1132 for r in requirements:
1133 # we allow multiple compression engine requirement to co-exist because
1133 # we allow multiple compression engine requirement to co-exist because
1134 # strickly speaking, revlog seems to support mixed compression style.
1134 # strickly speaking, revlog seems to support mixed compression style.
1135 #
1135 #
1136 # The compression used for new entries will be "the last one"
1136 # The compression used for new entries will be "the last one"
1137 prefix = r.startswith
1137 prefix = r.startswith
1138 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1138 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
1139 options[b'compengine'] = r.split(b'-', 2)[2]
1139 options[b'compengine'] = r.split(b'-', 2)[2]
1140
1140
1141 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1141 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
1142 if options[b'zlib.level'] is not None:
1142 if options[b'zlib.level'] is not None:
1143 if not (0 <= options[b'zlib.level'] <= 9):
1143 if not (0 <= options[b'zlib.level'] <= 9):
1144 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1144 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
1145 raise error.Abort(msg % options[b'zlib.level'])
1145 raise error.Abort(msg % options[b'zlib.level'])
1146 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1146 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
1147 if options[b'zstd.level'] is not None:
1147 if options[b'zstd.level'] is not None:
1148 if not (0 <= options[b'zstd.level'] <= 22):
1148 if not (0 <= options[b'zstd.level'] <= 22):
1149 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1149 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
1150 raise error.Abort(msg % options[b'zstd.level'])
1150 raise error.Abort(msg % options[b'zstd.level'])
1151
1151
1152 if requirementsmod.NARROW_REQUIREMENT in requirements:
1152 if requirementsmod.NARROW_REQUIREMENT in requirements:
1153 options[b'enableellipsis'] = True
1153 options[b'enableellipsis'] = True
1154
1154
1155 if ui.configbool(b'experimental', b'rust.index'):
1155 if ui.configbool(b'experimental', b'rust.index'):
1156 options[b'rust.index'] = True
1156 options[b'rust.index'] = True
1157 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1157 if requirementsmod.NODEMAP_REQUIREMENT in requirements:
1158 slow_path = ui.config(
1158 slow_path = ui.config(
1159 b'storage', b'revlog.persistent-nodemap.slow-path'
1159 b'storage', b'revlog.persistent-nodemap.slow-path'
1160 )
1160 )
1161 if slow_path not in (b'allow', b'warn', b'abort'):
1161 if slow_path not in (b'allow', b'warn', b'abort'):
1162 default = ui.config_default(
1162 default = ui.config_default(
1163 b'storage', b'revlog.persistent-nodemap.slow-path'
1163 b'storage', b'revlog.persistent-nodemap.slow-path'
1164 )
1164 )
1165 msg = _(
1165 msg = _(
1166 b'unknown value for config '
1166 b'unknown value for config '
1167 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1167 b'"storage.revlog.persistent-nodemap.slow-path": "%s"\n'
1168 )
1168 )
1169 ui.warn(msg % slow_path)
1169 ui.warn(msg % slow_path)
1170 if not ui.quiet:
1170 if not ui.quiet:
1171 ui.warn(_(b'falling back to default value: %s\n') % default)
1171 ui.warn(_(b'falling back to default value: %s\n') % default)
1172 slow_path = default
1172 slow_path = default
1173
1173
1174 msg = _(
1174 msg = _(
1175 b"accessing `persistent-nodemap` repository without associated "
1175 b"accessing `persistent-nodemap` repository without associated "
1176 b"fast implementation."
1176 b"fast implementation."
1177 )
1177 )
1178 hint = _(
1178 hint = _(
1179 b"check `hg help config.format.use-persistent-nodemap` "
1179 b"check `hg help config.format.use-persistent-nodemap` "
1180 b"for details"
1180 b"for details"
1181 )
1181 )
1182 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1182 if not revlog.HAS_FAST_PERSISTENT_NODEMAP:
1183 if slow_path == b'warn':
1183 if slow_path == b'warn':
1184 msg = b"warning: " + msg + b'\n'
1184 msg = b"warning: " + msg + b'\n'
1185 ui.warn(msg)
1185 ui.warn(msg)
1186 if not ui.quiet:
1186 if not ui.quiet:
1187 hint = b'(' + hint + b')\n'
1187 hint = b'(' + hint + b')\n'
1188 ui.warn(hint)
1188 ui.warn(hint)
1189 if slow_path == b'abort':
1189 if slow_path == b'abort':
1190 raise error.Abort(msg, hint=hint)
1190 raise error.Abort(msg, hint=hint)
1191 options[b'persistent-nodemap'] = True
1191 options[b'persistent-nodemap'] = True
1192 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1192 if requirementsmod.DIRSTATE_V2_REQUIREMENT in requirements:
1193 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1193 slow_path = ui.config(b'storage', b'dirstate-v2.slow-path')
1194 if slow_path not in (b'allow', b'warn', b'abort'):
1194 if slow_path not in (b'allow', b'warn', b'abort'):
1195 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1195 default = ui.config_default(b'storage', b'dirstate-v2.slow-path')
1196 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1196 msg = _(b'unknown value for config "dirstate-v2.slow-path": "%s"\n')
1197 ui.warn(msg % slow_path)
1197 ui.warn(msg % slow_path)
1198 if not ui.quiet:
1198 if not ui.quiet:
1199 ui.warn(_(b'falling back to default value: %s\n') % default)
1199 ui.warn(_(b'falling back to default value: %s\n') % default)
1200 slow_path = default
1200 slow_path = default
1201
1201
1202 msg = _(
1202 msg = _(
1203 b"accessing `dirstate-v2` repository without associated "
1203 b"accessing `dirstate-v2` repository without associated "
1204 b"fast implementation."
1204 b"fast implementation."
1205 )
1205 )
1206 hint = _(
1206 hint = _(
1207 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1207 b"check `hg help config.format.use-dirstate-v2` " b"for details"
1208 )
1208 )
1209 if not dirstate.HAS_FAST_DIRSTATE_V2:
1209 if not dirstate.HAS_FAST_DIRSTATE_V2:
1210 if slow_path == b'warn':
1210 if slow_path == b'warn':
1211 msg = b"warning: " + msg + b'\n'
1211 msg = b"warning: " + msg + b'\n'
1212 ui.warn(msg)
1212 ui.warn(msg)
1213 if not ui.quiet:
1213 if not ui.quiet:
1214 hint = b'(' + hint + b')\n'
1214 hint = b'(' + hint + b')\n'
1215 ui.warn(hint)
1215 ui.warn(hint)
1216 if slow_path == b'abort':
1216 if slow_path == b'abort':
1217 raise error.Abort(msg, hint=hint)
1217 raise error.Abort(msg, hint=hint)
1218 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1218 if ui.configbool(b'storage', b'revlog.persistent-nodemap.mmap'):
1219 options[b'persistent-nodemap.mmap'] = True
1219 options[b'persistent-nodemap.mmap'] = True
1220 if ui.configbool(b'devel', b'persistent-nodemap'):
1220 if ui.configbool(b'devel', b'persistent-nodemap'):
1221 options[b'devel-force-nodemap'] = True
1221 options[b'devel-force-nodemap'] = True
1222
1222
1223 return options
1223 return options
1224
1224
1225
1225
1226 def makemain(**kwargs):
1226 def makemain(**kwargs):
1227 """Produce a type conforming to ``ilocalrepositorymain``."""
1227 """Produce a type conforming to ``ilocalrepositorymain``."""
1228 return localrepository
1228 return localrepository
1229
1229
1230
1230
1231 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1231 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1232 class revlogfilestorage:
1232 class revlogfilestorage:
1233 """File storage when using revlogs."""
1233 """File storage when using revlogs."""
1234
1234
1235 def file(self, path):
1235 def file(self, path):
1236 if path.startswith(b'/'):
1236 if path.startswith(b'/'):
1237 path = path[1:]
1237 path = path[1:]
1238
1238
1239 return filelog.filelog(self.svfs, path)
1239 return filelog.filelog(self.svfs, path)
1240
1240
1241
1241
1242 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1242 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1243 class revlognarrowfilestorage:
1243 class revlognarrowfilestorage:
1244 """File storage when using revlogs and narrow files."""
1244 """File storage when using revlogs and narrow files."""
1245
1245
1246 def file(self, path):
1246 def file(self, path):
1247 if path.startswith(b'/'):
1247 if path.startswith(b'/'):
1248 path = path[1:]
1248 path = path[1:]
1249
1249
1250 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1250 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
1251
1251
1252
1252
1253 def makefilestorage(requirements, features, **kwargs):
1253 def makefilestorage(requirements, features, **kwargs):
1254 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1254 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1255 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1255 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
1256 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1256 features.add(repository.REPO_FEATURE_STREAM_CLONE)
1257
1257
1258 if requirementsmod.NARROW_REQUIREMENT in requirements:
1258 if requirementsmod.NARROW_REQUIREMENT in requirements:
1259 return revlognarrowfilestorage
1259 return revlognarrowfilestorage
1260 else:
1260 else:
1261 return revlogfilestorage
1261 return revlogfilestorage
1262
1262
1263
1263
1264 # List of repository interfaces and factory functions for them. Each
1264 # List of repository interfaces and factory functions for them. Each
1265 # will be called in order during ``makelocalrepository()`` to iteratively
1265 # will be called in order during ``makelocalrepository()`` to iteratively
1266 # derive the final type for a local repository instance. We capture the
1266 # derive the final type for a local repository instance. We capture the
1267 # function as a lambda so we don't hold a reference and the module-level
1267 # function as a lambda so we don't hold a reference and the module-level
1268 # functions can be wrapped.
1268 # functions can be wrapped.
1269 REPO_INTERFACES = [
1269 REPO_INTERFACES = [
1270 (repository.ilocalrepositorymain, lambda: makemain),
1270 (repository.ilocalrepositorymain, lambda: makemain),
1271 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1271 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1272 ]
1272 ]
1273
1273
1274
1274
1275 @interfaceutil.implementer(repository.ilocalrepositorymain)
1275 @interfaceutil.implementer(repository.ilocalrepositorymain)
1276 class localrepository:
1276 class localrepository:
1277 """Main class for representing local repositories.
1277 """Main class for representing local repositories.
1278
1278
1279 All local repositories are instances of this class.
1279 All local repositories are instances of this class.
1280
1280
1281 Constructed on its own, instances of this class are not usable as
1281 Constructed on its own, instances of this class are not usable as
1282 repository objects. To obtain a usable repository object, call
1282 repository objects. To obtain a usable repository object, call
1283 ``hg.repository()``, ``localrepo.instance()``, or
1283 ``hg.repository()``, ``localrepo.instance()``, or
1284 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1284 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1285 ``instance()`` adds support for creating new repositories.
1285 ``instance()`` adds support for creating new repositories.
1286 ``hg.repository()`` adds more extension integration, including calling
1286 ``hg.repository()`` adds more extension integration, including calling
1287 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1287 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1288 used.
1288 used.
1289 """
1289 """
1290
1290
1291 _basesupported = {
1291 _basesupported = {
1292 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1292 requirementsmod.ARCHIVED_PHASE_REQUIREMENT,
1293 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1293 requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT,
1294 requirementsmod.CHANGELOGV2_REQUIREMENT,
1294 requirementsmod.CHANGELOGV2_REQUIREMENT,
1295 requirementsmod.COPIESSDC_REQUIREMENT,
1295 requirementsmod.COPIESSDC_REQUIREMENT,
1296 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1296 requirementsmod.DIRSTATE_TRACKED_HINT_V1,
1297 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1297 requirementsmod.DIRSTATE_V2_REQUIREMENT,
1298 requirementsmod.DOTENCODE_REQUIREMENT,
1298 requirementsmod.DOTENCODE_REQUIREMENT,
1299 requirementsmod.FNCACHE_REQUIREMENT,
1299 requirementsmod.FNCACHE_REQUIREMENT,
1300 requirementsmod.GENERALDELTA_REQUIREMENT,
1300 requirementsmod.GENERALDELTA_REQUIREMENT,
1301 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1301 requirementsmod.INTERNAL_PHASE_REQUIREMENT,
1302 requirementsmod.NODEMAP_REQUIREMENT,
1302 requirementsmod.NODEMAP_REQUIREMENT,
1303 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1303 requirementsmod.RELATIVE_SHARED_REQUIREMENT,
1304 requirementsmod.REVLOGV1_REQUIREMENT,
1304 requirementsmod.REVLOGV1_REQUIREMENT,
1305 requirementsmod.REVLOGV2_REQUIREMENT,
1305 requirementsmod.REVLOGV2_REQUIREMENT,
1306 requirementsmod.SHARED_REQUIREMENT,
1306 requirementsmod.SHARED_REQUIREMENT,
1307 requirementsmod.SHARESAFE_REQUIREMENT,
1307 requirementsmod.SHARESAFE_REQUIREMENT,
1308 requirementsmod.SPARSE_REQUIREMENT,
1308 requirementsmod.SPARSE_REQUIREMENT,
1309 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1309 requirementsmod.SPARSEREVLOG_REQUIREMENT,
1310 requirementsmod.STORE_REQUIREMENT,
1310 requirementsmod.STORE_REQUIREMENT,
1311 requirementsmod.TREEMANIFEST_REQUIREMENT,
1311 requirementsmod.TREEMANIFEST_REQUIREMENT,
1312 }
1312 }
1313
1313
1314 # list of prefix for file which can be written without 'wlock'
1314 # list of prefix for file which can be written without 'wlock'
1315 # Extensions should extend this list when needed
1315 # Extensions should extend this list when needed
1316 _wlockfreeprefix = {
1316 _wlockfreeprefix = {
1317 # We migh consider requiring 'wlock' for the next
1317 # We migh consider requiring 'wlock' for the next
1318 # two, but pretty much all the existing code assume
1318 # two, but pretty much all the existing code assume
1319 # wlock is not needed so we keep them excluded for
1319 # wlock is not needed so we keep them excluded for
1320 # now.
1320 # now.
1321 b'hgrc',
1321 b'hgrc',
1322 b'requires',
1322 b'requires',
1323 # XXX cache is a complicatged business someone
1323 # XXX cache is a complicatged business someone
1324 # should investigate this in depth at some point
1324 # should investigate this in depth at some point
1325 b'cache/',
1325 b'cache/',
1326 # XXX bisect was still a bit too messy at the time
1326 # XXX bisect was still a bit too messy at the time
1327 # this changeset was introduced. Someone should fix
1327 # this changeset was introduced. Someone should fix
1328 # the remainig bit and drop this line
1328 # the remainig bit and drop this line
1329 b'bisect.state',
1329 b'bisect.state',
1330 }
1330 }
1331
1331
1332 def __init__(
1332 def __init__(
1333 self,
1333 self,
1334 baseui,
1334 baseui,
1335 ui,
1335 ui,
1336 origroot: bytes,
1336 origroot: bytes,
1337 wdirvfs: vfsmod.vfs,
1337 wdirvfs: vfsmod.vfs,
1338 hgvfs: vfsmod.vfs,
1338 hgvfs: vfsmod.vfs,
1339 requirements,
1339 requirements,
1340 supportedrequirements,
1340 supportedrequirements,
1341 sharedpath: bytes,
1341 sharedpath: bytes,
1342 store,
1342 store,
1343 cachevfs: vfsmod.vfs,
1343 cachevfs: vfsmod.vfs,
1344 wcachevfs: vfsmod.vfs,
1344 wcachevfs: vfsmod.vfs,
1345 features,
1345 features,
1346 intents=None,
1346 intents=None,
1347 ):
1347 ):
1348 """Create a new local repository instance.
1348 """Create a new local repository instance.
1349
1349
1350 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1350 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1351 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1351 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1352 object.
1352 object.
1353
1353
1354 Arguments:
1354 Arguments:
1355
1355
1356 baseui
1356 baseui
1357 ``ui.ui`` instance that ``ui`` argument was based off of.
1357 ``ui.ui`` instance that ``ui`` argument was based off of.
1358
1358
1359 ui
1359 ui
1360 ``ui.ui`` instance for use by the repository.
1360 ``ui.ui`` instance for use by the repository.
1361
1361
1362 origroot
1362 origroot
1363 ``bytes`` path to working directory root of this repository.
1363 ``bytes`` path to working directory root of this repository.
1364
1364
1365 wdirvfs
1365 wdirvfs
1366 ``vfs.vfs`` rooted at the working directory.
1366 ``vfs.vfs`` rooted at the working directory.
1367
1367
1368 hgvfs
1368 hgvfs
1369 ``vfs.vfs`` rooted at .hg/
1369 ``vfs.vfs`` rooted at .hg/
1370
1370
1371 requirements
1371 requirements
1372 ``set`` of bytestrings representing repository opening requirements.
1372 ``set`` of bytestrings representing repository opening requirements.
1373
1373
1374 supportedrequirements
1374 supportedrequirements
1375 ``set`` of bytestrings representing repository requirements that we
1375 ``set`` of bytestrings representing repository requirements that we
1376 know how to open. May be a supetset of ``requirements``.
1376 know how to open. May be a supetset of ``requirements``.
1377
1377
1378 sharedpath
1378 sharedpath
1379 ``bytes`` Defining path to storage base directory. Points to a
1379 ``bytes`` Defining path to storage base directory. Points to a
1380 ``.hg/`` directory somewhere.
1380 ``.hg/`` directory somewhere.
1381
1381
1382 store
1382 store
1383 ``store.basicstore`` (or derived) instance providing access to
1383 ``store.basicstore`` (or derived) instance providing access to
1384 versioned storage.
1384 versioned storage.
1385
1385
1386 cachevfs
1386 cachevfs
1387 ``vfs.vfs`` used for cache files.
1387 ``vfs.vfs`` used for cache files.
1388
1388
1389 wcachevfs
1389 wcachevfs
1390 ``vfs.vfs`` used for cache files related to the working copy.
1390 ``vfs.vfs`` used for cache files related to the working copy.
1391
1391
1392 features
1392 features
1393 ``set`` of bytestrings defining features/capabilities of this
1393 ``set`` of bytestrings defining features/capabilities of this
1394 instance.
1394 instance.
1395
1395
1396 intents
1396 intents
1397 ``set`` of system strings indicating what this repo will be used
1397 ``set`` of system strings indicating what this repo will be used
1398 for.
1398 for.
1399 """
1399 """
1400 self.baseui = baseui
1400 self.baseui = baseui
1401 self.ui = ui
1401 self.ui = ui
1402 self.origroot = origroot
1402 self.origroot = origroot
1403 # vfs rooted at working directory.
1403 # vfs rooted at working directory.
1404 self.wvfs = wdirvfs
1404 self.wvfs = wdirvfs
1405 self.root = wdirvfs.base
1405 self.root = wdirvfs.base
1406 # vfs rooted at .hg/. Used to access most non-store paths.
1406 # vfs rooted at .hg/. Used to access most non-store paths.
1407 self.vfs = hgvfs
1407 self.vfs = hgvfs
1408 self.path = hgvfs.base
1408 self.path = hgvfs.base
1409 self.requirements = requirements
1409 self.requirements = requirements
1410 self.nodeconstants = sha1nodeconstants
1410 self.nodeconstants = sha1nodeconstants
1411 self.nullid = self.nodeconstants.nullid
1411 self.nullid = self.nodeconstants.nullid
1412 self.supported = supportedrequirements
1412 self.supported = supportedrequirements
1413 self.sharedpath = sharedpath
1413 self.sharedpath = sharedpath
1414 self.store = store
1414 self.store = store
1415 self.cachevfs = cachevfs
1415 self.cachevfs = cachevfs
1416 self.wcachevfs = wcachevfs
1416 self.wcachevfs = wcachevfs
1417 self.features = features
1417 self.features = features
1418
1418
1419 self.filtername = None
1419 self.filtername = None
1420
1420
1421 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1421 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1422 b'devel', b'check-locks'
1422 b'devel', b'check-locks'
1423 ):
1423 ):
1424 self.vfs.audit = self._getvfsward(self.vfs.audit)
1424 self.vfs.audit = self._getvfsward(self.vfs.audit)
1425 # A list of callback to shape the phase if no data were found.
1425 # A list of callback to shape the phase if no data were found.
1426 # Callback are in the form: func(repo, roots) --> processed root.
1426 # Callback are in the form: func(repo, roots) --> processed root.
1427 # This list it to be filled by extension during repo setup
1427 # This list it to be filled by extension during repo setup
1428 self._phasedefaults = []
1428 self._phasedefaults = []
1429
1429
1430 color.setup(self.ui)
1430 color.setup(self.ui)
1431
1431
1432 self.spath = self.store.path
1432 self.spath = self.store.path
1433 self.svfs = self.store.vfs
1433 self.svfs = self.store.vfs
1434 self.sjoin = self.store.join
1434 self.sjoin = self.store.join
1435 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1435 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1436 b'devel', b'check-locks'
1436 b'devel', b'check-locks'
1437 ):
1437 ):
1438 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1438 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1439 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1439 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1440 else: # standard vfs
1440 else: # standard vfs
1441 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1441 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1442
1442
1443 self._dirstatevalidatewarned = False
1443 self._dirstatevalidatewarned = False
1444
1444
1445 self._branchcaches = branchmap.BranchMapCache()
1445 self._branchcaches = branchmap.BranchMapCache()
1446 self._revbranchcache = None
1446 self._revbranchcache = None
1447 self._filterpats = {}
1447 self._filterpats = {}
1448 self._datafilters = {}
1448 self._datafilters = {}
1449 self._transref = self._lockref = self._wlockref = None
1449 self._transref = self._lockref = self._wlockref = None
1450
1450
1451 # A cache for various files under .hg/ that tracks file changes,
1451 # A cache for various files under .hg/ that tracks file changes,
1452 # (used by the filecache decorator)
1452 # (used by the filecache decorator)
1453 #
1453 #
1454 # Maps a property name to its util.filecacheentry
1454 # Maps a property name to its util.filecacheentry
1455 self._filecache = {}
1455 self._filecache = {}
1456
1456
1457 # hold sets of revision to be filtered
1457 # hold sets of revision to be filtered
1458 # should be cleared when something might have changed the filter value:
1458 # should be cleared when something might have changed the filter value:
1459 # - new changesets,
1459 # - new changesets,
1460 # - phase change,
1460 # - phase change,
1461 # - new obsolescence marker,
1461 # - new obsolescence marker,
1462 # - working directory parent change,
1462 # - working directory parent change,
1463 # - bookmark changes
1463 # - bookmark changes
1464 self.filteredrevcache = {}
1464 self.filteredrevcache = {}
1465
1465
1466 # post-dirstate-status hooks
1466 # post-dirstate-status hooks
1467 self._postdsstatus = []
1467 self._postdsstatus = []
1468
1468
1469 # generic mapping between names and nodes
1469 # generic mapping between names and nodes
1470 self.names = namespaces.namespaces()
1470 self.names = namespaces.namespaces()
1471
1471
1472 # Key to signature value.
1472 # Key to signature value.
1473 self._sparsesignaturecache = {}
1473 self._sparsesignaturecache = {}
1474 # Signature to cached matcher instance.
1474 # Signature to cached matcher instance.
1475 self._sparsematchercache = {}
1475 self._sparsematchercache = {}
1476
1476
1477 self._extrafilterid = repoview.extrafilter(ui)
1477 self._extrafilterid = repoview.extrafilter(ui)
1478
1478
1479 self.filecopiesmode = None
1479 self.filecopiesmode = None
1480 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1480 if requirementsmod.COPIESSDC_REQUIREMENT in self.requirements:
1481 self.filecopiesmode = b'changeset-sidedata'
1481 self.filecopiesmode = b'changeset-sidedata'
1482
1482
1483 self._wanted_sidedata = set()
1483 self._wanted_sidedata = set()
1484 self._sidedata_computers = {}
1484 self._sidedata_computers = {}
1485 sidedatamod.set_sidedata_spec_for_repo(self)
1485 sidedatamod.set_sidedata_spec_for_repo(self)
1486
1486
1487 def _getvfsward(self, origfunc):
1487 def _getvfsward(self, origfunc):
1488 """build a ward for self.vfs"""
1488 """build a ward for self.vfs"""
1489 rref = weakref.ref(self)
1489 rref = weakref.ref(self)
1490
1490
1491 def checkvfs(path, mode=None):
1491 def checkvfs(path, mode=None):
1492 ret = origfunc(path, mode=mode)
1492 ret = origfunc(path, mode=mode)
1493 repo = rref()
1493 repo = rref()
1494 if (
1494 if (
1495 repo is None
1495 repo is None
1496 or not util.safehasattr(repo, b'_wlockref')
1496 or not util.safehasattr(repo, b'_wlockref')
1497 or not util.safehasattr(repo, b'_lockref')
1497 or not util.safehasattr(repo, b'_lockref')
1498 ):
1498 ):
1499 return
1499 return
1500 if mode in (None, b'r', b'rb'):
1500 if mode in (None, b'r', b'rb'):
1501 return
1501 return
1502 if path.startswith(repo.path):
1502 if path.startswith(repo.path):
1503 # truncate name relative to the repository (.hg)
1503 # truncate name relative to the repository (.hg)
1504 path = path[len(repo.path) + 1 :]
1504 path = path[len(repo.path) + 1 :]
1505 if path.startswith(b'cache/'):
1505 if path.startswith(b'cache/'):
1506 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1506 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1507 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1507 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1508 # path prefixes covered by 'lock'
1508 # path prefixes covered by 'lock'
1509 vfs_path_prefixes = (
1509 vfs_path_prefixes = (
1510 b'journal.',
1510 b'journal.',
1511 b'undo.',
1511 b'undo.',
1512 b'strip-backup/',
1512 b'strip-backup/',
1513 b'cache/',
1513 b'cache/',
1514 )
1514 )
1515 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1515 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1516 if repo._currentlock(repo._lockref) is None:
1516 if repo._currentlock(repo._lockref) is None:
1517 repo.ui.develwarn(
1517 repo.ui.develwarn(
1518 b'write with no lock: "%s"' % path,
1518 b'write with no lock: "%s"' % path,
1519 stacklevel=3,
1519 stacklevel=3,
1520 config=b'check-locks',
1520 config=b'check-locks',
1521 )
1521 )
1522 elif repo._currentlock(repo._wlockref) is None:
1522 elif repo._currentlock(repo._wlockref) is None:
1523 # rest of vfs files are covered by 'wlock'
1523 # rest of vfs files are covered by 'wlock'
1524 #
1524 #
1525 # exclude special files
1525 # exclude special files
1526 for prefix in self._wlockfreeprefix:
1526 for prefix in self._wlockfreeprefix:
1527 if path.startswith(prefix):
1527 if path.startswith(prefix):
1528 return
1528 return
1529 repo.ui.develwarn(
1529 repo.ui.develwarn(
1530 b'write with no wlock: "%s"' % path,
1530 b'write with no wlock: "%s"' % path,
1531 stacklevel=3,
1531 stacklevel=3,
1532 config=b'check-locks',
1532 config=b'check-locks',
1533 )
1533 )
1534 return ret
1534 return ret
1535
1535
1536 return checkvfs
1536 return checkvfs
1537
1537
1538 def _getsvfsward(self, origfunc):
1538 def _getsvfsward(self, origfunc):
1539 """build a ward for self.svfs"""
1539 """build a ward for self.svfs"""
1540 rref = weakref.ref(self)
1540 rref = weakref.ref(self)
1541
1541
1542 def checksvfs(path, mode=None):
1542 def checksvfs(path, mode=None):
1543 ret = origfunc(path, mode=mode)
1543 ret = origfunc(path, mode=mode)
1544 repo = rref()
1544 repo = rref()
1545 if repo is None or not util.safehasattr(repo, b'_lockref'):
1545 if repo is None or not util.safehasattr(repo, b'_lockref'):
1546 return
1546 return
1547 if mode in (None, b'r', b'rb'):
1547 if mode in (None, b'r', b'rb'):
1548 return
1548 return
1549 if path.startswith(repo.sharedpath):
1549 if path.startswith(repo.sharedpath):
1550 # truncate name relative to the repository (.hg)
1550 # truncate name relative to the repository (.hg)
1551 path = path[len(repo.sharedpath) + 1 :]
1551 path = path[len(repo.sharedpath) + 1 :]
1552 if repo._currentlock(repo._lockref) is None:
1552 if repo._currentlock(repo._lockref) is None:
1553 repo.ui.develwarn(
1553 repo.ui.develwarn(
1554 b'write with no lock: "%s"' % path, stacklevel=4
1554 b'write with no lock: "%s"' % path, stacklevel=4
1555 )
1555 )
1556 return ret
1556 return ret
1557
1557
1558 return checksvfs
1558 return checksvfs
1559
1559
1560 def close(self):
1560 def close(self):
1561 self._writecaches()
1561 self._writecaches()
1562
1562
1563 def _writecaches(self):
1563 def _writecaches(self):
1564 if self._revbranchcache:
1564 if self._revbranchcache:
1565 self._revbranchcache.write()
1565 self._revbranchcache.write()
1566
1566
1567 def _restrictcapabilities(self, caps):
1567 def _restrictcapabilities(self, caps):
1568 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1568 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1569 caps = set(caps)
1569 caps = set(caps)
1570 capsblob = bundle2.encodecaps(
1570 capsblob = bundle2.encodecaps(
1571 bundle2.getrepocaps(self, role=b'client')
1571 bundle2.getrepocaps(self, role=b'client')
1572 )
1572 )
1573 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1573 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1574 if self.ui.configbool(b'experimental', b'narrow'):
1574 if self.ui.configbool(b'experimental', b'narrow'):
1575 caps.add(wireprototypes.NARROWCAP)
1575 caps.add(wireprototypes.NARROWCAP)
1576 return caps
1576 return caps
1577
1577
1578 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1578 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1579 # self -> auditor -> self._checknested -> self
1579 # self -> auditor -> self._checknested -> self
1580
1580
1581 @property
1581 @property
1582 def auditor(self):
1582 def auditor(self):
1583 # This is only used by context.workingctx.match in order to
1583 # This is only used by context.workingctx.match in order to
1584 # detect files in subrepos.
1584 # detect files in subrepos.
1585 return pathutil.pathauditor(self.root, callback=self._checknested)
1585 return pathutil.pathauditor(self.root, callback=self._checknested)
1586
1586
1587 @property
1587 @property
1588 def nofsauditor(self):
1588 def nofsauditor(self):
1589 # This is only used by context.basectx.match in order to detect
1589 # This is only used by context.basectx.match in order to detect
1590 # files in subrepos.
1590 # files in subrepos.
1591 return pathutil.pathauditor(
1591 return pathutil.pathauditor(
1592 self.root, callback=self._checknested, realfs=False, cached=True
1592 self.root, callback=self._checknested, realfs=False, cached=True
1593 )
1593 )
1594
1594
1595 def _checknested(self, path):
1595 def _checknested(self, path):
1596 """Determine if path is a legal nested repository."""
1596 """Determine if path is a legal nested repository."""
1597 if not path.startswith(self.root):
1597 if not path.startswith(self.root):
1598 return False
1598 return False
1599 subpath = path[len(self.root) + 1 :]
1599 subpath = path[len(self.root) + 1 :]
1600 normsubpath = util.pconvert(subpath)
1600 normsubpath = util.pconvert(subpath)
1601
1601
1602 # XXX: Checking against the current working copy is wrong in
1602 # XXX: Checking against the current working copy is wrong in
1603 # the sense that it can reject things like
1603 # the sense that it can reject things like
1604 #
1604 #
1605 # $ hg cat -r 10 sub/x.txt
1605 # $ hg cat -r 10 sub/x.txt
1606 #
1606 #
1607 # if sub/ is no longer a subrepository in the working copy
1607 # if sub/ is no longer a subrepository in the working copy
1608 # parent revision.
1608 # parent revision.
1609 #
1609 #
1610 # However, it can of course also allow things that would have
1610 # However, it can of course also allow things that would have
1611 # been rejected before, such as the above cat command if sub/
1611 # been rejected before, such as the above cat command if sub/
1612 # is a subrepository now, but was a normal directory before.
1612 # is a subrepository now, but was a normal directory before.
1613 # The old path auditor would have rejected by mistake since it
1613 # The old path auditor would have rejected by mistake since it
1614 # panics when it sees sub/.hg/.
1614 # panics when it sees sub/.hg/.
1615 #
1615 #
1616 # All in all, checking against the working copy seems sensible
1616 # All in all, checking against the working copy seems sensible
1617 # since we want to prevent access to nested repositories on
1617 # since we want to prevent access to nested repositories on
1618 # the filesystem *now*.
1618 # the filesystem *now*.
1619 ctx = self[None]
1619 ctx = self[None]
1620 parts = util.splitpath(subpath)
1620 parts = util.splitpath(subpath)
1621 while parts:
1621 while parts:
1622 prefix = b'/'.join(parts)
1622 prefix = b'/'.join(parts)
1623 if prefix in ctx.substate:
1623 if prefix in ctx.substate:
1624 if prefix == normsubpath:
1624 if prefix == normsubpath:
1625 return True
1625 return True
1626 else:
1626 else:
1627 sub = ctx.sub(prefix)
1627 sub = ctx.sub(prefix)
1628 return sub.checknested(subpath[len(prefix) + 1 :])
1628 return sub.checknested(subpath[len(prefix) + 1 :])
1629 else:
1629 else:
1630 parts.pop()
1630 parts.pop()
1631 return False
1631 return False
1632
1632
1633 def peer(self, path=None):
1633 def peer(self, path=None):
1634 return localpeer(self, path=path) # not cached to avoid reference cycle
1634 return localpeer(self, path=path) # not cached to avoid reference cycle
1635
1635
1636 def unfiltered(self):
1636 def unfiltered(self):
1637 """Return unfiltered version of the repository
1637 """Return unfiltered version of the repository
1638
1638
1639 Intended to be overwritten by filtered repo."""
1639 Intended to be overwritten by filtered repo."""
1640 return self
1640 return self
1641
1641
1642 def filtered(self, name, visibilityexceptions=None):
1642 def filtered(self, name, visibilityexceptions=None):
1643 """Return a filtered version of a repository
1643 """Return a filtered version of a repository
1644
1644
1645 The `name` parameter is the identifier of the requested view. This
1645 The `name` parameter is the identifier of the requested view. This
1646 will return a repoview object set "exactly" to the specified view.
1646 will return a repoview object set "exactly" to the specified view.
1647
1647
1648 This function does not apply recursive filtering to a repository. For
1648 This function does not apply recursive filtering to a repository. For
1649 example calling `repo.filtered("served")` will return a repoview using
1649 example calling `repo.filtered("served")` will return a repoview using
1650 the "served" view, regardless of the initial view used by `repo`.
1650 the "served" view, regardless of the initial view used by `repo`.
1651
1651
1652 In other word, there is always only one level of `repoview` "filtering".
1652 In other word, there is always only one level of `repoview` "filtering".
1653 """
1653 """
1654 if self._extrafilterid is not None and b'%' not in name:
1654 if self._extrafilterid is not None and b'%' not in name:
1655 name = name + b'%' + self._extrafilterid
1655 name = name + b'%' + self._extrafilterid
1656
1656
1657 cls = repoview.newtype(self.unfiltered().__class__)
1657 cls = repoview.newtype(self.unfiltered().__class__)
1658 return cls(self, name, visibilityexceptions)
1658 return cls(self, name, visibilityexceptions)
1659
1659
1660 @mixedrepostorecache(
1660 @mixedrepostorecache(
1661 (b'bookmarks', b'plain'),
1661 (b'bookmarks', b'plain'),
1662 (b'bookmarks.current', b'plain'),
1662 (b'bookmarks.current', b'plain'),
1663 (b'bookmarks', b''),
1663 (b'bookmarks', b''),
1664 (b'00changelog.i', b''),
1664 (b'00changelog.i', b''),
1665 )
1665 )
1666 def _bookmarks(self):
1666 def _bookmarks(self):
1667 # Since the multiple files involved in the transaction cannot be
1667 # Since the multiple files involved in the transaction cannot be
1668 # written atomically (with current repository format), there is a race
1668 # written atomically (with current repository format), there is a race
1669 # condition here.
1669 # condition here.
1670 #
1670 #
1671 # 1) changelog content A is read
1671 # 1) changelog content A is read
1672 # 2) outside transaction update changelog to content B
1672 # 2) outside transaction update changelog to content B
1673 # 3) outside transaction update bookmark file referring to content B
1673 # 3) outside transaction update bookmark file referring to content B
1674 # 4) bookmarks file content is read and filtered against changelog-A
1674 # 4) bookmarks file content is read and filtered against changelog-A
1675 #
1675 #
1676 # When this happens, bookmarks against nodes missing from A are dropped.
1676 # When this happens, bookmarks against nodes missing from A are dropped.
1677 #
1677 #
1678 # Having this happening during read is not great, but it become worse
1678 # Having this happening during read is not great, but it become worse
1679 # when this happen during write because the bookmarks to the "unknown"
1679 # when this happen during write because the bookmarks to the "unknown"
1680 # nodes will be dropped for good. However, writes happen within locks.
1680 # nodes will be dropped for good. However, writes happen within locks.
1681 # This locking makes it possible to have a race free consistent read.
1681 # This locking makes it possible to have a race free consistent read.
1682 # For this purpose data read from disc before locking are
1682 # For this purpose data read from disc before locking are
1683 # "invalidated" right after the locks are taken. This invalidations are
1683 # "invalidated" right after the locks are taken. This invalidations are
1684 # "light", the `filecache` mechanism keep the data in memory and will
1684 # "light", the `filecache` mechanism keep the data in memory and will
1685 # reuse them if the underlying files did not changed. Not parsing the
1685 # reuse them if the underlying files did not changed. Not parsing the
1686 # same data multiple times helps performances.
1686 # same data multiple times helps performances.
1687 #
1687 #
1688 # Unfortunately in the case describe above, the files tracked by the
1688 # Unfortunately in the case describe above, the files tracked by the
1689 # bookmarks file cache might not have changed, but the in-memory
1689 # bookmarks file cache might not have changed, but the in-memory
1690 # content is still "wrong" because we used an older changelog content
1690 # content is still "wrong" because we used an older changelog content
1691 # to process the on-disk data. So after locking, the changelog would be
1691 # to process the on-disk data. So after locking, the changelog would be
1692 # refreshed but `_bookmarks` would be preserved.
1692 # refreshed but `_bookmarks` would be preserved.
1693 # Adding `00changelog.i` to the list of tracked file is not
1693 # Adding `00changelog.i` to the list of tracked file is not
1694 # enough, because at the time we build the content for `_bookmarks` in
1694 # enough, because at the time we build the content for `_bookmarks` in
1695 # (4), the changelog file has already diverged from the content used
1695 # (4), the changelog file has already diverged from the content used
1696 # for loading `changelog` in (1)
1696 # for loading `changelog` in (1)
1697 #
1697 #
1698 # To prevent the issue, we force the changelog to be explicitly
1698 # To prevent the issue, we force the changelog to be explicitly
1699 # reloaded while computing `_bookmarks`. The data race can still happen
1699 # reloaded while computing `_bookmarks`. The data race can still happen
1700 # without the lock (with a narrower window), but it would no longer go
1700 # without the lock (with a narrower window), but it would no longer go
1701 # undetected during the lock time refresh.
1701 # undetected during the lock time refresh.
1702 #
1702 #
1703 # The new schedule is as follow
1703 # The new schedule is as follow
1704 #
1704 #
1705 # 1) filecache logic detect that `_bookmarks` needs to be computed
1705 # 1) filecache logic detect that `_bookmarks` needs to be computed
1706 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1706 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1707 # 3) We force `changelog` filecache to be tested
1707 # 3) We force `changelog` filecache to be tested
1708 # 4) cachestat for `changelog` are captured (for changelog)
1708 # 4) cachestat for `changelog` are captured (for changelog)
1709 # 5) `_bookmarks` is computed and cached
1709 # 5) `_bookmarks` is computed and cached
1710 #
1710 #
1711 # The step in (3) ensure we have a changelog at least as recent as the
1711 # The step in (3) ensure we have a changelog at least as recent as the
1712 # cache stat computed in (1). As a result at locking time:
1712 # cache stat computed in (1). As a result at locking time:
1713 # * if the changelog did not changed since (1) -> we can reuse the data
1713 # * if the changelog did not changed since (1) -> we can reuse the data
1714 # * otherwise -> the bookmarks get refreshed.
1714 # * otherwise -> the bookmarks get refreshed.
1715 self._refreshchangelog()
1715 self._refreshchangelog()
1716 return bookmarks.bmstore(self)
1716 return bookmarks.bmstore(self)
1717
1717
1718 def _refreshchangelog(self):
1718 def _refreshchangelog(self):
1719 """make sure the in memory changelog match the on-disk one"""
1719 """make sure the in memory changelog match the on-disk one"""
1720 if 'changelog' in vars(self) and self.currenttransaction() is None:
1720 if 'changelog' in vars(self) and self.currenttransaction() is None:
1721 del self.changelog
1721 del self.changelog
1722
1722
1723 @property
1723 @property
1724 def _activebookmark(self):
1724 def _activebookmark(self):
1725 return self._bookmarks.active
1725 return self._bookmarks.active
1726
1726
1727 # _phasesets depend on changelog. what we need is to call
1727 # _phasesets depend on changelog. what we need is to call
1728 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1728 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1729 # can't be easily expressed in filecache mechanism.
1729 # can't be easily expressed in filecache mechanism.
1730 @storecache(b'phaseroots', b'00changelog.i')
1730 @storecache(b'phaseroots', b'00changelog.i')
1731 def _phasecache(self):
1731 def _phasecache(self):
1732 return phases.phasecache(self, self._phasedefaults)
1732 return phases.phasecache(self, self._phasedefaults)
1733
1733
1734 @storecache(b'obsstore')
1734 @storecache(b'obsstore')
1735 def obsstore(self):
1735 def obsstore(self):
1736 return obsolete.makestore(self.ui, self)
1736 return obsolete.makestore(self.ui, self)
1737
1737
1738 @changelogcache()
1738 @changelogcache()
1739 def changelog(repo):
1739 def changelog(repo):
1740 # load dirstate before changelog to avoid race see issue6303
1740 # load dirstate before changelog to avoid race see issue6303
1741 repo.dirstate.prefetch_parents()
1741 repo.dirstate.prefetch_parents()
1742 return repo.store.changelog(
1742 return repo.store.changelog(
1743 txnutil.mayhavepending(repo.root),
1743 txnutil.mayhavepending(repo.root),
1744 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1744 concurrencychecker=revlogchecker.get_checker(repo.ui, b'changelog'),
1745 )
1745 )
1746
1746
1747 @manifestlogcache()
1747 @manifestlogcache()
1748 def manifestlog(self):
1748 def manifestlog(self):
1749 return self.store.manifestlog(self, self._storenarrowmatch)
1749 return self.store.manifestlog(self, self._storenarrowmatch)
1750
1750
1751 @repofilecache(b'dirstate')
1751 @repofilecache(b'dirstate')
1752 def dirstate(self):
1752 def dirstate(self):
1753 return self._makedirstate()
1753 return self._makedirstate()
1754
1754
1755 def _makedirstate(self):
1755 def _makedirstate(self):
1756 """Extension point for wrapping the dirstate per-repo."""
1756 """Extension point for wrapping the dirstate per-repo."""
1757 sparsematchfn = None
1757 sparsematchfn = None
1758 if sparse.use_sparse(self):
1758 if sparse.use_sparse(self):
1759 sparsematchfn = lambda: sparse.matcher(self)
1759 sparsematchfn = lambda: sparse.matcher(self)
1760 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1760 v2_req = requirementsmod.DIRSTATE_V2_REQUIREMENT
1761 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1761 th = requirementsmod.DIRSTATE_TRACKED_HINT_V1
1762 use_dirstate_v2 = v2_req in self.requirements
1762 use_dirstate_v2 = v2_req in self.requirements
1763 use_tracked_hint = th in self.requirements
1763 use_tracked_hint = th in self.requirements
1764
1764
1765 return dirstate.dirstate(
1765 return dirstate.dirstate(
1766 self.vfs,
1766 self.vfs,
1767 self.ui,
1767 self.ui,
1768 self.root,
1768 self.root,
1769 self._dirstatevalidate,
1769 self._dirstatevalidate,
1770 sparsematchfn,
1770 sparsematchfn,
1771 self.nodeconstants,
1771 self.nodeconstants,
1772 use_dirstate_v2,
1772 use_dirstate_v2,
1773 use_tracked_hint=use_tracked_hint,
1773 use_tracked_hint=use_tracked_hint,
1774 )
1774 )
1775
1775
1776 def _dirstatevalidate(self, node):
1776 def _dirstatevalidate(self, node):
1777 try:
1777 try:
1778 self.changelog.rev(node)
1778 self.changelog.rev(node)
1779 return node
1779 return node
1780 except error.LookupError:
1780 except error.LookupError:
1781 if not self._dirstatevalidatewarned:
1781 if not self._dirstatevalidatewarned:
1782 self._dirstatevalidatewarned = True
1782 self._dirstatevalidatewarned = True
1783 self.ui.warn(
1783 self.ui.warn(
1784 _(b"warning: ignoring unknown working parent %s!\n")
1784 _(b"warning: ignoring unknown working parent %s!\n")
1785 % short(node)
1785 % short(node)
1786 )
1786 )
1787 return self.nullid
1787 return self.nullid
1788
1788
1789 @storecache(narrowspec.FILENAME)
1789 @storecache(narrowspec.FILENAME)
1790 def narrowpats(self):
1790 def narrowpats(self):
1791 """matcher patterns for this repository's narrowspec
1791 """matcher patterns for this repository's narrowspec
1792
1792
1793 A tuple of (includes, excludes).
1793 A tuple of (includes, excludes).
1794 """
1794 """
1795 return narrowspec.load(self)
1795 return narrowspec.load(self)
1796
1796
1797 @storecache(narrowspec.FILENAME)
1797 @storecache(narrowspec.FILENAME)
1798 def _storenarrowmatch(self):
1798 def _storenarrowmatch(self):
1799 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1799 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1800 return matchmod.always()
1800 return matchmod.always()
1801 include, exclude = self.narrowpats
1801 include, exclude = self.narrowpats
1802 return narrowspec.match(self.root, include=include, exclude=exclude)
1802 return narrowspec.match(self.root, include=include, exclude=exclude)
1803
1803
1804 @storecache(narrowspec.FILENAME)
1804 @storecache(narrowspec.FILENAME)
1805 def _narrowmatch(self):
1805 def _narrowmatch(self):
1806 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1806 if requirementsmod.NARROW_REQUIREMENT not in self.requirements:
1807 return matchmod.always()
1807 return matchmod.always()
1808 narrowspec.checkworkingcopynarrowspec(self)
1808 narrowspec.checkworkingcopynarrowspec(self)
1809 include, exclude = self.narrowpats
1809 include, exclude = self.narrowpats
1810 return narrowspec.match(self.root, include=include, exclude=exclude)
1810 return narrowspec.match(self.root, include=include, exclude=exclude)
1811
1811
1812 def narrowmatch(self, match=None, includeexact=False):
1812 def narrowmatch(self, match=None, includeexact=False):
1813 """matcher corresponding the the repo's narrowspec
1813 """matcher corresponding the the repo's narrowspec
1814
1814
1815 If `match` is given, then that will be intersected with the narrow
1815 If `match` is given, then that will be intersected with the narrow
1816 matcher.
1816 matcher.
1817
1817
1818 If `includeexact` is True, then any exact matches from `match` will
1818 If `includeexact` is True, then any exact matches from `match` will
1819 be included even if they're outside the narrowspec.
1819 be included even if they're outside the narrowspec.
1820 """
1820 """
1821 if match:
1821 if match:
1822 if includeexact and not self._narrowmatch.always():
1822 if includeexact and not self._narrowmatch.always():
1823 # do not exclude explicitly-specified paths so that they can
1823 # do not exclude explicitly-specified paths so that they can
1824 # be warned later on
1824 # be warned later on
1825 em = matchmod.exact(match.files())
1825 em = matchmod.exact(match.files())
1826 nm = matchmod.unionmatcher([self._narrowmatch, em])
1826 nm = matchmod.unionmatcher([self._narrowmatch, em])
1827 return matchmod.intersectmatchers(match, nm)
1827 return matchmod.intersectmatchers(match, nm)
1828 return matchmod.intersectmatchers(match, self._narrowmatch)
1828 return matchmod.intersectmatchers(match, self._narrowmatch)
1829 return self._narrowmatch
1829 return self._narrowmatch
1830
1830
1831 def setnarrowpats(self, newincludes, newexcludes):
1831 def setnarrowpats(self, newincludes, newexcludes):
1832 narrowspec.save(self, newincludes, newexcludes)
1832 narrowspec.save(self, newincludes, newexcludes)
1833 self.invalidate(clearfilecache=True)
1833 self.invalidate(clearfilecache=True)
1834
1834
1835 @unfilteredpropertycache
1835 @unfilteredpropertycache
1836 def _quick_access_changeid_null(self):
1836 def _quick_access_changeid_null(self):
1837 return {
1837 return {
1838 b'null': (nullrev, self.nodeconstants.nullid),
1838 b'null': (nullrev, self.nodeconstants.nullid),
1839 nullrev: (nullrev, self.nodeconstants.nullid),
1839 nullrev: (nullrev, self.nodeconstants.nullid),
1840 self.nullid: (nullrev, self.nullid),
1840 self.nullid: (nullrev, self.nullid),
1841 }
1841 }
1842
1842
1843 @unfilteredpropertycache
1843 @unfilteredpropertycache
1844 def _quick_access_changeid_wc(self):
1844 def _quick_access_changeid_wc(self):
1845 # also fast path access to the working copy parents
1845 # also fast path access to the working copy parents
1846 # however, only do it for filter that ensure wc is visible.
1846 # however, only do it for filter that ensure wc is visible.
1847 quick = self._quick_access_changeid_null.copy()
1847 quick = self._quick_access_changeid_null.copy()
1848 cl = self.unfiltered().changelog
1848 cl = self.unfiltered().changelog
1849 for node in self.dirstate.parents():
1849 for node in self.dirstate.parents():
1850 if node == self.nullid:
1850 if node == self.nullid:
1851 continue
1851 continue
1852 rev = cl.index.get_rev(node)
1852 rev = cl.index.get_rev(node)
1853 if rev is None:
1853 if rev is None:
1854 # unknown working copy parent case:
1854 # unknown working copy parent case:
1855 #
1855 #
1856 # skip the fast path and let higher code deal with it
1856 # skip the fast path and let higher code deal with it
1857 continue
1857 continue
1858 pair = (rev, node)
1858 pair = (rev, node)
1859 quick[rev] = pair
1859 quick[rev] = pair
1860 quick[node] = pair
1860 quick[node] = pair
1861 # also add the parents of the parents
1861 # also add the parents of the parents
1862 for r in cl.parentrevs(rev):
1862 for r in cl.parentrevs(rev):
1863 if r == nullrev:
1863 if r == nullrev:
1864 continue
1864 continue
1865 n = cl.node(r)
1865 n = cl.node(r)
1866 pair = (r, n)
1866 pair = (r, n)
1867 quick[r] = pair
1867 quick[r] = pair
1868 quick[n] = pair
1868 quick[n] = pair
1869 p1node = self.dirstate.p1()
1869 p1node = self.dirstate.p1()
1870 if p1node != self.nullid:
1870 if p1node != self.nullid:
1871 quick[b'.'] = quick[p1node]
1871 quick[b'.'] = quick[p1node]
1872 return quick
1872 return quick
1873
1873
1874 @unfilteredmethod
1874 @unfilteredmethod
1875 def _quick_access_changeid_invalidate(self):
1875 def _quick_access_changeid_invalidate(self):
1876 if '_quick_access_changeid_wc' in vars(self):
1876 if '_quick_access_changeid_wc' in vars(self):
1877 del self.__dict__['_quick_access_changeid_wc']
1877 del self.__dict__['_quick_access_changeid_wc']
1878
1878
1879 @property
1879 @property
1880 def _quick_access_changeid(self):
1880 def _quick_access_changeid(self):
1881 """an helper dictionnary for __getitem__ calls
1881 """an helper dictionnary for __getitem__ calls
1882
1882
1883 This contains a list of symbol we can recognise right away without
1883 This contains a list of symbol we can recognise right away without
1884 further processing.
1884 further processing.
1885 """
1885 """
1886 if self.filtername in repoview.filter_has_wc:
1886 if self.filtername in repoview.filter_has_wc:
1887 return self._quick_access_changeid_wc
1887 return self._quick_access_changeid_wc
1888 return self._quick_access_changeid_null
1888 return self._quick_access_changeid_null
1889
1889
1890 def __getitem__(self, changeid):
1890 def __getitem__(self, changeid):
1891 # dealing with special cases
1891 # dealing with special cases
1892 if changeid is None:
1892 if changeid is None:
1893 return context.workingctx(self)
1893 return context.workingctx(self)
1894 if isinstance(changeid, context.basectx):
1894 if isinstance(changeid, context.basectx):
1895 return changeid
1895 return changeid
1896
1896
1897 # dealing with multiple revisions
1897 # dealing with multiple revisions
1898 if isinstance(changeid, slice):
1898 if isinstance(changeid, slice):
1899 # wdirrev isn't contiguous so the slice shouldn't include it
1899 # wdirrev isn't contiguous so the slice shouldn't include it
1900 return [
1900 return [
1901 self[i]
1901 self[i]
1902 for i in range(*changeid.indices(len(self)))
1902 for i in range(*changeid.indices(len(self)))
1903 if i not in self.changelog.filteredrevs
1903 if i not in self.changelog.filteredrevs
1904 ]
1904 ]
1905
1905
1906 # dealing with some special values
1906 # dealing with some special values
1907 quick_access = self._quick_access_changeid.get(changeid)
1907 quick_access = self._quick_access_changeid.get(changeid)
1908 if quick_access is not None:
1908 if quick_access is not None:
1909 rev, node = quick_access
1909 rev, node = quick_access
1910 return context.changectx(self, rev, node, maybe_filtered=False)
1910 return context.changectx(self, rev, node, maybe_filtered=False)
1911 if changeid == b'tip':
1911 if changeid == b'tip':
1912 node = self.changelog.tip()
1912 node = self.changelog.tip()
1913 rev = self.changelog.rev(node)
1913 rev = self.changelog.rev(node)
1914 return context.changectx(self, rev, node)
1914 return context.changectx(self, rev, node)
1915
1915
1916 # dealing with arbitrary values
1916 # dealing with arbitrary values
1917 try:
1917 try:
1918 if isinstance(changeid, int):
1918 if isinstance(changeid, int):
1919 node = self.changelog.node(changeid)
1919 node = self.changelog.node(changeid)
1920 rev = changeid
1920 rev = changeid
1921 elif changeid == b'.':
1921 elif changeid == b'.':
1922 # this is a hack to delay/avoid loading obsmarkers
1922 # this is a hack to delay/avoid loading obsmarkers
1923 # when we know that '.' won't be hidden
1923 # when we know that '.' won't be hidden
1924 node = self.dirstate.p1()
1924 node = self.dirstate.p1()
1925 rev = self.unfiltered().changelog.rev(node)
1925 rev = self.unfiltered().changelog.rev(node)
1926 elif len(changeid) == self.nodeconstants.nodelen:
1926 elif len(changeid) == self.nodeconstants.nodelen:
1927 try:
1927 try:
1928 node = changeid
1928 node = changeid
1929 rev = self.changelog.rev(changeid)
1929 rev = self.changelog.rev(changeid)
1930 except error.FilteredLookupError:
1930 except error.FilteredLookupError:
1931 changeid = hex(changeid) # for the error message
1931 changeid = hex(changeid) # for the error message
1932 raise
1932 raise
1933 except LookupError:
1933 except LookupError:
1934 # check if it might have come from damaged dirstate
1934 # check if it might have come from damaged dirstate
1935 #
1935 #
1936 # XXX we could avoid the unfiltered if we had a recognizable
1936 # XXX we could avoid the unfiltered if we had a recognizable
1937 # exception for filtered changeset access
1937 # exception for filtered changeset access
1938 if (
1938 if (
1939 self.local()
1939 self.local()
1940 and changeid in self.unfiltered().dirstate.parents()
1940 and changeid in self.unfiltered().dirstate.parents()
1941 ):
1941 ):
1942 msg = _(b"working directory has unknown parent '%s'!")
1942 msg = _(b"working directory has unknown parent '%s'!")
1943 raise error.Abort(msg % short(changeid))
1943 raise error.Abort(msg % short(changeid))
1944 changeid = hex(changeid) # for the error message
1944 changeid = hex(changeid) # for the error message
1945 raise
1945 raise
1946
1946
1947 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1947 elif len(changeid) == 2 * self.nodeconstants.nodelen:
1948 node = bin(changeid)
1948 node = bin(changeid)
1949 rev = self.changelog.rev(node)
1949 rev = self.changelog.rev(node)
1950 else:
1950 else:
1951 raise error.ProgrammingError(
1951 raise error.ProgrammingError(
1952 b"unsupported changeid '%s' of type %s"
1952 b"unsupported changeid '%s' of type %s"
1953 % (changeid, pycompat.bytestr(type(changeid)))
1953 % (changeid, pycompat.bytestr(type(changeid)))
1954 )
1954 )
1955
1955
1956 return context.changectx(self, rev, node)
1956 return context.changectx(self, rev, node)
1957
1957
1958 except (error.FilteredIndexError, error.FilteredLookupError):
1958 except (error.FilteredIndexError, error.FilteredLookupError):
1959 raise error.FilteredRepoLookupError(
1959 raise error.FilteredRepoLookupError(
1960 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1960 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1961 )
1961 )
1962 except (IndexError, LookupError):
1962 except (IndexError, LookupError):
1963 raise error.RepoLookupError(
1963 raise error.RepoLookupError(
1964 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1964 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1965 )
1965 )
1966 except error.WdirUnsupported:
1966 except error.WdirUnsupported:
1967 return context.workingctx(self)
1967 return context.workingctx(self)
1968
1968
1969 def __contains__(self, changeid):
1969 def __contains__(self, changeid):
1970 """True if the given changeid exists"""
1970 """True if the given changeid exists"""
1971 try:
1971 try:
1972 self[changeid]
1972 self[changeid]
1973 return True
1973 return True
1974 except error.RepoLookupError:
1974 except error.RepoLookupError:
1975 return False
1975 return False
1976
1976
1977 def __nonzero__(self):
1977 def __nonzero__(self):
1978 return True
1978 return True
1979
1979
1980 __bool__ = __nonzero__
1980 __bool__ = __nonzero__
1981
1981
1982 def __len__(self):
1982 def __len__(self):
1983 # no need to pay the cost of repoview.changelog
1983 # no need to pay the cost of repoview.changelog
1984 unfi = self.unfiltered()
1984 unfi = self.unfiltered()
1985 return len(unfi.changelog)
1985 return len(unfi.changelog)
1986
1986
1987 def __iter__(self):
1987 def __iter__(self):
1988 return iter(self.changelog)
1988 return iter(self.changelog)
1989
1989
1990 def revs(self, expr: bytes, *args):
1990 def revs(self, expr: bytes, *args):
1991 """Find revisions matching a revset.
1991 """Find revisions matching a revset.
1992
1992
1993 The revset is specified as a string ``expr`` that may contain
1993 The revset is specified as a string ``expr`` that may contain
1994 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1994 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1995
1995
1996 Revset aliases from the configuration are not expanded. To expand
1996 Revset aliases from the configuration are not expanded. To expand
1997 user aliases, consider calling ``scmutil.revrange()`` or
1997 user aliases, consider calling ``scmutil.revrange()`` or
1998 ``repo.anyrevs([expr], user=True)``.
1998 ``repo.anyrevs([expr], user=True)``.
1999
1999
2000 Returns a smartset.abstractsmartset, which is a list-like interface
2000 Returns a smartset.abstractsmartset, which is a list-like interface
2001 that contains integer revisions.
2001 that contains integer revisions.
2002 """
2002 """
2003 tree = revsetlang.spectree(expr, *args)
2003 tree = revsetlang.spectree(expr, *args)
2004 return revset.makematcher(tree)(self)
2004 return revset.makematcher(tree)(self)
2005
2005
2006 def set(self, expr: bytes, *args):
2006 def set(self, expr: bytes, *args):
2007 """Find revisions matching a revset and emit changectx instances.
2007 """Find revisions matching a revset and emit changectx instances.
2008
2008
2009 This is a convenience wrapper around ``revs()`` that iterates the
2009 This is a convenience wrapper around ``revs()`` that iterates the
2010 result and is a generator of changectx instances.
2010 result and is a generator of changectx instances.
2011
2011
2012 Revset aliases from the configuration are not expanded. To expand
2012 Revset aliases from the configuration are not expanded. To expand
2013 user aliases, consider calling ``scmutil.revrange()``.
2013 user aliases, consider calling ``scmutil.revrange()``.
2014 """
2014 """
2015 for r in self.revs(expr, *args):
2015 for r in self.revs(expr, *args):
2016 yield self[r]
2016 yield self[r]
2017
2017
2018 def anyrevs(self, specs: bytes, user=False, localalias=None):
2018 def anyrevs(self, specs: bytes, user=False, localalias=None):
2019 """Find revisions matching one of the given revsets.
2019 """Find revisions matching one of the given revsets.
2020
2020
2021 Revset aliases from the configuration are not expanded by default. To
2021 Revset aliases from the configuration are not expanded by default. To
2022 expand user aliases, specify ``user=True``. To provide some local
2022 expand user aliases, specify ``user=True``. To provide some local
2023 definitions overriding user aliases, set ``localalias`` to
2023 definitions overriding user aliases, set ``localalias`` to
2024 ``{name: definitionstring}``.
2024 ``{name: definitionstring}``.
2025 """
2025 """
2026 if specs == [b'null']:
2026 if specs == [b'null']:
2027 return revset.baseset([nullrev])
2027 return revset.baseset([nullrev])
2028 if specs == [b'.']:
2028 if specs == [b'.']:
2029 quick_data = self._quick_access_changeid.get(b'.')
2029 quick_data = self._quick_access_changeid.get(b'.')
2030 if quick_data is not None:
2030 if quick_data is not None:
2031 return revset.baseset([quick_data[0]])
2031 return revset.baseset([quick_data[0]])
2032 if user:
2032 if user:
2033 m = revset.matchany(
2033 m = revset.matchany(
2034 self.ui,
2034 self.ui,
2035 specs,
2035 specs,
2036 lookup=revset.lookupfn(self),
2036 lookup=revset.lookupfn(self),
2037 localalias=localalias,
2037 localalias=localalias,
2038 )
2038 )
2039 else:
2039 else:
2040 m = revset.matchany(None, specs, localalias=localalias)
2040 m = revset.matchany(None, specs, localalias=localalias)
2041 return m(self)
2041 return m(self)
2042
2042
2043 def url(self) -> bytes:
2043 def url(self) -> bytes:
2044 return b'file:' + self.root
2044 return b'file:' + self.root
2045
2045
2046 def hook(self, name, throw=False, **args):
2046 def hook(self, name, throw=False, **args):
2047 """Call a hook, passing this repo instance.
2047 """Call a hook, passing this repo instance.
2048
2048
2049 This a convenience method to aid invoking hooks. Extensions likely
2049 This a convenience method to aid invoking hooks. Extensions likely
2050 won't call this unless they have registered a custom hook or are
2050 won't call this unless they have registered a custom hook or are
2051 replacing code that is expected to call a hook.
2051 replacing code that is expected to call a hook.
2052 """
2052 """
2053 return hook.hook(self.ui, self, name, throw, **args)
2053 return hook.hook(self.ui, self, name, throw, **args)
2054
2054
2055 @filteredpropertycache
2055 @filteredpropertycache
2056 def _tagscache(self):
2056 def _tagscache(self):
2057 """Returns a tagscache object that contains various tags related
2057 """Returns a tagscache object that contains various tags related
2058 caches."""
2058 caches."""
2059
2059
2060 # This simplifies its cache management by having one decorated
2060 # This simplifies its cache management by having one decorated
2061 # function (this one) and the rest simply fetch things from it.
2061 # function (this one) and the rest simply fetch things from it.
2062 class tagscache:
2062 class tagscache:
2063 def __init__(self):
2063 def __init__(self):
2064 # These two define the set of tags for this repository. tags
2064 # These two define the set of tags for this repository. tags
2065 # maps tag name to node; tagtypes maps tag name to 'global' or
2065 # maps tag name to node; tagtypes maps tag name to 'global' or
2066 # 'local'. (Global tags are defined by .hgtags across all
2066 # 'local'. (Global tags are defined by .hgtags across all
2067 # heads, and local tags are defined in .hg/localtags.)
2067 # heads, and local tags are defined in .hg/localtags.)
2068 # They constitute the in-memory cache of tags.
2068 # They constitute the in-memory cache of tags.
2069 self.tags = self.tagtypes = None
2069 self.tags = self.tagtypes = None
2070
2070
2071 self.nodetagscache = self.tagslist = None
2071 self.nodetagscache = self.tagslist = None
2072
2072
2073 cache = tagscache()
2073 cache = tagscache()
2074 cache.tags, cache.tagtypes = self._findtags()
2074 cache.tags, cache.tagtypes = self._findtags()
2075
2075
2076 return cache
2076 return cache
2077
2077
2078 def tags(self):
2078 def tags(self):
2079 '''return a mapping of tag to node'''
2079 '''return a mapping of tag to node'''
2080 t = {}
2080 t = {}
2081 if self.changelog.filteredrevs:
2081 if self.changelog.filteredrevs:
2082 tags, tt = self._findtags()
2082 tags, tt = self._findtags()
2083 else:
2083 else:
2084 tags = self._tagscache.tags
2084 tags = self._tagscache.tags
2085 rev = self.changelog.rev
2085 rev = self.changelog.rev
2086 for k, v in tags.items():
2086 for k, v in tags.items():
2087 try:
2087 try:
2088 # ignore tags to unknown nodes
2088 # ignore tags to unknown nodes
2089 rev(v)
2089 rev(v)
2090 t[k] = v
2090 t[k] = v
2091 except (error.LookupError, ValueError):
2091 except (error.LookupError, ValueError):
2092 pass
2092 pass
2093 return t
2093 return t
2094
2094
2095 def _findtags(self):
2095 def _findtags(self):
2096 """Do the hard work of finding tags. Return a pair of dicts
2096 """Do the hard work of finding tags. Return a pair of dicts
2097 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2097 (tags, tagtypes) where tags maps tag name to node, and tagtypes
2098 maps tag name to a string like \'global\' or \'local\'.
2098 maps tag name to a string like \'global\' or \'local\'.
2099 Subclasses or extensions are free to add their own tags, but
2099 Subclasses or extensions are free to add their own tags, but
2100 should be aware that the returned dicts will be retained for the
2100 should be aware that the returned dicts will be retained for the
2101 duration of the localrepo object."""
2101 duration of the localrepo object."""
2102
2102
2103 # XXX what tagtype should subclasses/extensions use? Currently
2103 # XXX what tagtype should subclasses/extensions use? Currently
2104 # mq and bookmarks add tags, but do not set the tagtype at all.
2104 # mq and bookmarks add tags, but do not set the tagtype at all.
2105 # Should each extension invent its own tag type? Should there
2105 # Should each extension invent its own tag type? Should there
2106 # be one tagtype for all such "virtual" tags? Or is the status
2106 # be one tagtype for all such "virtual" tags? Or is the status
2107 # quo fine?
2107 # quo fine?
2108
2108
2109 # map tag name to (node, hist)
2109 # map tag name to (node, hist)
2110 alltags = tagsmod.findglobaltags(self.ui, self)
2110 alltags = tagsmod.findglobaltags(self.ui, self)
2111 # map tag name to tag type
2111 # map tag name to tag type
2112 tagtypes = {tag: b'global' for tag in alltags}
2112 tagtypes = {tag: b'global' for tag in alltags}
2113
2113
2114 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2114 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
2115
2115
2116 # Build the return dicts. Have to re-encode tag names because
2116 # Build the return dicts. Have to re-encode tag names because
2117 # the tags module always uses UTF-8 (in order not to lose info
2117 # the tags module always uses UTF-8 (in order not to lose info
2118 # writing to the cache), but the rest of Mercurial wants them in
2118 # writing to the cache), but the rest of Mercurial wants them in
2119 # local encoding.
2119 # local encoding.
2120 tags = {}
2120 tags = {}
2121 for name, (node, hist) in alltags.items():
2121 for name, (node, hist) in alltags.items():
2122 if node != self.nullid:
2122 if node != self.nullid:
2123 tags[encoding.tolocal(name)] = node
2123 tags[encoding.tolocal(name)] = node
2124 tags[b'tip'] = self.changelog.tip()
2124 tags[b'tip'] = self.changelog.tip()
2125 tagtypes = {
2125 tagtypes = {
2126 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2126 encoding.tolocal(name): value for (name, value) in tagtypes.items()
2127 }
2127 }
2128 return (tags, tagtypes)
2128 return (tags, tagtypes)
2129
2129
2130 def tagtype(self, tagname):
2130 def tagtype(self, tagname):
2131 """
2131 """
2132 return the type of the given tag. result can be:
2132 return the type of the given tag. result can be:
2133
2133
2134 'local' : a local tag
2134 'local' : a local tag
2135 'global' : a global tag
2135 'global' : a global tag
2136 None : tag does not exist
2136 None : tag does not exist
2137 """
2137 """
2138
2138
2139 return self._tagscache.tagtypes.get(tagname)
2139 return self._tagscache.tagtypes.get(tagname)
2140
2140
2141 def tagslist(self):
2141 def tagslist(self):
2142 '''return a list of tags ordered by revision'''
2142 '''return a list of tags ordered by revision'''
2143 if not self._tagscache.tagslist:
2143 if not self._tagscache.tagslist:
2144 l = []
2144 l = []
2145 for t, n in self.tags().items():
2145 for t, n in self.tags().items():
2146 l.append((self.changelog.rev(n), t, n))
2146 l.append((self.changelog.rev(n), t, n))
2147 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2147 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
2148
2148
2149 return self._tagscache.tagslist
2149 return self._tagscache.tagslist
2150
2150
2151 def nodetags(self, node):
2151 def nodetags(self, node):
2152 '''return the tags associated with a node'''
2152 '''return the tags associated with a node'''
2153 if not self._tagscache.nodetagscache:
2153 if not self._tagscache.nodetagscache:
2154 nodetagscache = {}
2154 nodetagscache = {}
2155 for t, n in self._tagscache.tags.items():
2155 for t, n in self._tagscache.tags.items():
2156 nodetagscache.setdefault(n, []).append(t)
2156 nodetagscache.setdefault(n, []).append(t)
2157 for tags in nodetagscache.values():
2157 for tags in nodetagscache.values():
2158 tags.sort()
2158 tags.sort()
2159 self._tagscache.nodetagscache = nodetagscache
2159 self._tagscache.nodetagscache = nodetagscache
2160 return self._tagscache.nodetagscache.get(node, [])
2160 return self._tagscache.nodetagscache.get(node, [])
2161
2161
2162 def nodebookmarks(self, node):
2162 def nodebookmarks(self, node):
2163 """return the list of bookmarks pointing to the specified node"""
2163 """return the list of bookmarks pointing to the specified node"""
2164 return self._bookmarks.names(node)
2164 return self._bookmarks.names(node)
2165
2165
2166 def branchmap(self):
2166 def branchmap(self):
2167 """returns a dictionary {branch: [branchheads]} with branchheads
2167 """returns a dictionary {branch: [branchheads]} with branchheads
2168 ordered by increasing revision number"""
2168 ordered by increasing revision number"""
2169 return self._branchcaches[self]
2169 return self._branchcaches[self]
2170
2170
2171 @unfilteredmethod
2171 @unfilteredmethod
2172 def revbranchcache(self):
2172 def revbranchcache(self):
2173 if not self._revbranchcache:
2173 if not self._revbranchcache:
2174 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2174 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
2175 return self._revbranchcache
2175 return self._revbranchcache
2176
2176
2177 def register_changeset(self, rev, changelogrevision):
2177 def register_changeset(self, rev, changelogrevision):
2178 self.revbranchcache().setdata(rev, changelogrevision)
2178 self.revbranchcache().setdata(rev, changelogrevision)
2179
2179
2180 def branchtip(self, branch, ignoremissing=False):
2180 def branchtip(self, branch, ignoremissing=False):
2181 """return the tip node for a given branch
2181 """return the tip node for a given branch
2182
2182
2183 If ignoremissing is True, then this method will not raise an error.
2183 If ignoremissing is True, then this method will not raise an error.
2184 This is helpful for callers that only expect None for a missing branch
2184 This is helpful for callers that only expect None for a missing branch
2185 (e.g. namespace).
2185 (e.g. namespace).
2186
2186
2187 """
2187 """
2188 try:
2188 try:
2189 return self.branchmap().branchtip(branch)
2189 return self.branchmap().branchtip(branch)
2190 except KeyError:
2190 except KeyError:
2191 if not ignoremissing:
2191 if not ignoremissing:
2192 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2192 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
2193 else:
2193 else:
2194 pass
2194 pass
2195
2195
2196 def lookup(self, key):
2196 def lookup(self, key):
2197 node = scmutil.revsymbol(self, key).node()
2197 node = scmutil.revsymbol(self, key).node()
2198 if node is None:
2198 if node is None:
2199 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2199 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
2200 return node
2200 return node
2201
2201
2202 def lookupbranch(self, key):
2202 def lookupbranch(self, key):
2203 if self.branchmap().hasbranch(key):
2203 if self.branchmap().hasbranch(key):
2204 return key
2204 return key
2205
2205
2206 return scmutil.revsymbol(self, key).branch()
2206 return scmutil.revsymbol(self, key).branch()
2207
2207
2208 def known(self, nodes):
2208 def known(self, nodes):
2209 cl = self.changelog
2209 cl = self.changelog
2210 get_rev = cl.index.get_rev
2210 get_rev = cl.index.get_rev
2211 filtered = cl.filteredrevs
2211 filtered = cl.filteredrevs
2212 result = []
2212 result = []
2213 for n in nodes:
2213 for n in nodes:
2214 r = get_rev(n)
2214 r = get_rev(n)
2215 resp = not (r is None or r in filtered)
2215 resp = not (r is None or r in filtered)
2216 result.append(resp)
2216 result.append(resp)
2217 return result
2217 return result
2218
2218
2219 def local(self):
2219 def local(self):
2220 return self
2220 return self
2221
2221
2222 def publishing(self):
2222 def publishing(self):
2223 # it's safe (and desirable) to trust the publish flag unconditionally
2223 # it's safe (and desirable) to trust the publish flag unconditionally
2224 # so that we don't finalize changes shared between users via ssh or nfs
2224 # so that we don't finalize changes shared between users via ssh or nfs
2225 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2225 return self.ui.configbool(b'phases', b'publish', untrusted=True)
2226
2226
2227 def cancopy(self):
2227 def cancopy(self):
2228 # so statichttprepo's override of local() works
2228 # so statichttprepo's override of local() works
2229 if not self.local():
2229 if not self.local():
2230 return False
2230 return False
2231 if not self.publishing():
2231 if not self.publishing():
2232 return True
2232 return True
2233 # if publishing we can't copy if there is filtered content
2233 # if publishing we can't copy if there is filtered content
2234 return not self.filtered(b'visible').changelog.filteredrevs
2234 return not self.filtered(b'visible').changelog.filteredrevs
2235
2235
2236 def shared(self):
2236 def shared(self):
2237 '''the type of shared repository (None if not shared)'''
2237 '''the type of shared repository (None if not shared)'''
2238 if self.sharedpath != self.path:
2238 if self.sharedpath != self.path:
2239 return b'store'
2239 return b'store'
2240 return None
2240 return None
2241
2241
2242 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2242 def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
2243 return self.vfs.reljoin(self.root, f, *insidef)
2243 return self.vfs.reljoin(self.root, f, *insidef)
2244
2244
2245 def setparents(self, p1, p2=None):
2245 def setparents(self, p1, p2=None):
2246 if p2 is None:
2246 if p2 is None:
2247 p2 = self.nullid
2247 p2 = self.nullid
2248 self[None].setparents(p1, p2)
2248 self[None].setparents(p1, p2)
2249 self._quick_access_changeid_invalidate()
2249 self._quick_access_changeid_invalidate()
2250
2250
2251 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2251 def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
2252 """changeid must be a changeset revision, if specified.
2252 """changeid must be a changeset revision, if specified.
2253 fileid can be a file revision or node."""
2253 fileid can be a file revision or node."""
2254 return context.filectx(
2254 return context.filectx(
2255 self, path, changeid, fileid, changectx=changectx
2255 self, path, changeid, fileid, changectx=changectx
2256 )
2256 )
2257
2257
2258 def getcwd(self) -> bytes:
2258 def getcwd(self) -> bytes:
2259 return self.dirstate.getcwd()
2259 return self.dirstate.getcwd()
2260
2260
2261 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2261 def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
2262 return self.dirstate.pathto(f, cwd)
2262 return self.dirstate.pathto(f, cwd)
2263
2263
2264 def _loadfilter(self, filter):
2264 def _loadfilter(self, filter):
2265 if filter not in self._filterpats:
2265 if filter not in self._filterpats:
2266 l = []
2266 l = []
2267 for pat, cmd in self.ui.configitems(filter):
2267 for pat, cmd in self.ui.configitems(filter):
2268 if cmd == b'!':
2268 if cmd == b'!':
2269 continue
2269 continue
2270 mf = matchmod.match(self.root, b'', [pat])
2270 mf = matchmod.match(self.root, b'', [pat])
2271 fn = None
2271 fn = None
2272 params = cmd
2272 params = cmd
2273 for name, filterfn in self._datafilters.items():
2273 for name, filterfn in self._datafilters.items():
2274 if cmd.startswith(name):
2274 if cmd.startswith(name):
2275 fn = filterfn
2275 fn = filterfn
2276 params = cmd[len(name) :].lstrip()
2276 params = cmd[len(name) :].lstrip()
2277 break
2277 break
2278 if not fn:
2278 if not fn:
2279 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2279 fn = lambda s, c, **kwargs: procutil.filter(s, c)
2280 fn.__name__ = 'commandfilter'
2280 fn.__name__ = 'commandfilter'
2281 # Wrap old filters not supporting keyword arguments
2281 # Wrap old filters not supporting keyword arguments
2282 if not pycompat.getargspec(fn)[2]:
2282 if not pycompat.getargspec(fn)[2]:
2283 oldfn = fn
2283 oldfn = fn
2284 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2284 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
2285 fn.__name__ = 'compat-' + oldfn.__name__
2285 fn.__name__ = 'compat-' + oldfn.__name__
2286 l.append((mf, fn, params))
2286 l.append((mf, fn, params))
2287 self._filterpats[filter] = l
2287 self._filterpats[filter] = l
2288 return self._filterpats[filter]
2288 return self._filterpats[filter]
2289
2289
2290 def _filter(self, filterpats, filename, data):
2290 def _filter(self, filterpats, filename, data):
2291 for mf, fn, cmd in filterpats:
2291 for mf, fn, cmd in filterpats:
2292 if mf(filename):
2292 if mf(filename):
2293 self.ui.debug(
2293 self.ui.debug(
2294 b"filtering %s through %s\n"
2294 b"filtering %s through %s\n"
2295 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2295 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2296 )
2296 )
2297 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2297 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2298 break
2298 break
2299
2299
2300 return data
2300 return data
2301
2301
2302 @unfilteredpropertycache
2302 @unfilteredpropertycache
2303 def _encodefilterpats(self):
2303 def _encodefilterpats(self):
2304 return self._loadfilter(b'encode')
2304 return self._loadfilter(b'encode')
2305
2305
2306 @unfilteredpropertycache
2306 @unfilteredpropertycache
2307 def _decodefilterpats(self):
2307 def _decodefilterpats(self):
2308 return self._loadfilter(b'decode')
2308 return self._loadfilter(b'decode')
2309
2309
2310 def adddatafilter(self, name, filter):
2310 def adddatafilter(self, name, filter):
2311 self._datafilters[name] = filter
2311 self._datafilters[name] = filter
2312
2312
2313 def wread(self, filename: bytes) -> bytes:
2313 def wread(self, filename: bytes) -> bytes:
2314 if self.wvfs.islink(filename):
2314 if self.wvfs.islink(filename):
2315 data = self.wvfs.readlink(filename)
2315 data = self.wvfs.readlink(filename)
2316 else:
2316 else:
2317 data = self.wvfs.read(filename)
2317 data = self.wvfs.read(filename)
2318 return self._filter(self._encodefilterpats, filename, data)
2318 return self._filter(self._encodefilterpats, filename, data)
2319
2319
2320 def wwrite(
2320 def wwrite(
2321 self,
2321 self,
2322 filename: bytes,
2322 filename: bytes,
2323 data: bytes,
2323 data: bytes,
2324 flags: bytes,
2324 flags: bytes,
2325 backgroundclose=False,
2325 backgroundclose=False,
2326 **kwargs
2326 **kwargs
2327 ) -> int:
2327 ) -> int:
2328 """write ``data`` into ``filename`` in the working directory
2328 """write ``data`` into ``filename`` in the working directory
2329
2329
2330 This returns length of written (maybe decoded) data.
2330 This returns length of written (maybe decoded) data.
2331 """
2331 """
2332 data = self._filter(self._decodefilterpats, filename, data)
2332 data = self._filter(self._decodefilterpats, filename, data)
2333 if b'l' in flags:
2333 if b'l' in flags:
2334 self.wvfs.symlink(data, filename)
2334 self.wvfs.symlink(data, filename)
2335 else:
2335 else:
2336 self.wvfs.write(
2336 self.wvfs.write(
2337 filename, data, backgroundclose=backgroundclose, **kwargs
2337 filename, data, backgroundclose=backgroundclose, **kwargs
2338 )
2338 )
2339 if b'x' in flags:
2339 if b'x' in flags:
2340 self.wvfs.setflags(filename, False, True)
2340 self.wvfs.setflags(filename, False, True)
2341 else:
2341 else:
2342 self.wvfs.setflags(filename, False, False)
2342 self.wvfs.setflags(filename, False, False)
2343 return len(data)
2343 return len(data)
2344
2344
2345 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2345 def wwritedata(self, filename: bytes, data: bytes) -> bytes:
2346 return self._filter(self._decodefilterpats, filename, data)
2346 return self._filter(self._decodefilterpats, filename, data)
2347
2347
2348 def currenttransaction(self):
2348 def currenttransaction(self):
2349 """return the current transaction or None if non exists"""
2349 """return the current transaction or None if non exists"""
2350 if self._transref:
2350 if self._transref:
2351 tr = self._transref()
2351 tr = self._transref()
2352 else:
2352 else:
2353 tr = None
2353 tr = None
2354
2354
2355 if tr and tr.running():
2355 if tr and tr.running():
2356 return tr
2356 return tr
2357 return None
2357 return None
2358
2358
2359 def transaction(self, desc, report=None):
2359 def transaction(self, desc, report=None):
2360 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2360 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2361 b'devel', b'check-locks'
2361 b'devel', b'check-locks'
2362 ):
2362 ):
2363 if self._currentlock(self._lockref) is None:
2363 if self._currentlock(self._lockref) is None:
2364 raise error.ProgrammingError(b'transaction requires locking')
2364 raise error.ProgrammingError(b'transaction requires locking')
2365 tr = self.currenttransaction()
2365 tr = self.currenttransaction()
2366 if tr is not None:
2366 if tr is not None:
2367 return tr.nest(name=desc)
2367 return tr.nest(name=desc)
2368
2368
2369 # abort here if the journal already exists
2369 # abort here if the journal already exists
2370 if self.svfs.exists(b"journal"):
2370 if self.svfs.exists(b"journal"):
2371 raise error.RepoError(
2371 raise error.RepoError(
2372 _(b"abandoned transaction found"),
2372 _(b"abandoned transaction found"),
2373 hint=_(b"run 'hg recover' to clean up transaction"),
2373 hint=_(b"run 'hg recover' to clean up transaction"),
2374 )
2374 )
2375
2375
2376 idbase = b"%.40f#%f" % (random.random(), time.time())
2376 idbase = b"%.40f#%f" % (random.random(), time.time())
2377 ha = hex(hashutil.sha1(idbase).digest())
2377 ha = hex(hashutil.sha1(idbase).digest())
2378 txnid = b'TXN:' + ha
2378 txnid = b'TXN:' + ha
2379 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2379 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2380
2380
2381 self._writejournal(desc)
2381 self._writejournal(desc)
2382 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2382 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2383 if report:
2383 if report:
2384 rp = report
2384 rp = report
2385 else:
2385 else:
2386 rp = self.ui.warn
2386 rp = self.ui.warn
2387 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2387 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2388 # we must avoid cyclic reference between repo and transaction.
2388 # we must avoid cyclic reference between repo and transaction.
2389 reporef = weakref.ref(self)
2389 reporef = weakref.ref(self)
2390 # Code to track tag movement
2390 # Code to track tag movement
2391 #
2391 #
2392 # Since tags are all handled as file content, it is actually quite hard
2392 # Since tags are all handled as file content, it is actually quite hard
2393 # to track these movement from a code perspective. So we fallback to a
2393 # to track these movement from a code perspective. So we fallback to a
2394 # tracking at the repository level. One could envision to track changes
2394 # tracking at the repository level. One could envision to track changes
2395 # to the '.hgtags' file through changegroup apply but that fails to
2395 # to the '.hgtags' file through changegroup apply but that fails to
2396 # cope with case where transaction expose new heads without changegroup
2396 # cope with case where transaction expose new heads without changegroup
2397 # being involved (eg: phase movement).
2397 # being involved (eg: phase movement).
2398 #
2398 #
2399 # For now, We gate the feature behind a flag since this likely comes
2399 # For now, We gate the feature behind a flag since this likely comes
2400 # with performance impacts. The current code run more often than needed
2400 # with performance impacts. The current code run more often than needed
2401 # and do not use caches as much as it could. The current focus is on
2401 # and do not use caches as much as it could. The current focus is on
2402 # the behavior of the feature so we disable it by default. The flag
2402 # the behavior of the feature so we disable it by default. The flag
2403 # will be removed when we are happy with the performance impact.
2403 # will be removed when we are happy with the performance impact.
2404 #
2404 #
2405 # Once this feature is no longer experimental move the following
2405 # Once this feature is no longer experimental move the following
2406 # documentation to the appropriate help section:
2406 # documentation to the appropriate help section:
2407 #
2407 #
2408 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2408 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2409 # tags (new or changed or deleted tags). In addition the details of
2409 # tags (new or changed or deleted tags). In addition the details of
2410 # these changes are made available in a file at:
2410 # these changes are made available in a file at:
2411 # ``REPOROOT/.hg/changes/tags.changes``.
2411 # ``REPOROOT/.hg/changes/tags.changes``.
2412 # Make sure you check for HG_TAG_MOVED before reading that file as it
2412 # Make sure you check for HG_TAG_MOVED before reading that file as it
2413 # might exist from a previous transaction even if no tag were touched
2413 # might exist from a previous transaction even if no tag were touched
2414 # in this one. Changes are recorded in a line base format::
2414 # in this one. Changes are recorded in a line base format::
2415 #
2415 #
2416 # <action> <hex-node> <tag-name>\n
2416 # <action> <hex-node> <tag-name>\n
2417 #
2417 #
2418 # Actions are defined as follow:
2418 # Actions are defined as follow:
2419 # "-R": tag is removed,
2419 # "-R": tag is removed,
2420 # "+A": tag is added,
2420 # "+A": tag is added,
2421 # "-M": tag is moved (old value),
2421 # "-M": tag is moved (old value),
2422 # "+M": tag is moved (new value),
2422 # "+M": tag is moved (new value),
2423 tracktags = lambda x: None
2423 tracktags = lambda x: None
2424 # experimental config: experimental.hook-track-tags
2424 # experimental config: experimental.hook-track-tags
2425 shouldtracktags = self.ui.configbool(
2425 shouldtracktags = self.ui.configbool(
2426 b'experimental', b'hook-track-tags'
2426 b'experimental', b'hook-track-tags'
2427 )
2427 )
2428 if desc != b'strip' and shouldtracktags:
2428 if desc != b'strip' and shouldtracktags:
2429 oldheads = self.changelog.headrevs()
2429 oldheads = self.changelog.headrevs()
2430
2430
2431 def tracktags(tr2):
2431 def tracktags(tr2):
2432 repo = reporef()
2432 repo = reporef()
2433 assert repo is not None # help pytype
2433 assert repo is not None # help pytype
2434 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2434 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2435 newheads = repo.changelog.headrevs()
2435 newheads = repo.changelog.headrevs()
2436 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2436 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2437 # notes: we compare lists here.
2437 # notes: we compare lists here.
2438 # As we do it only once buiding set would not be cheaper
2438 # As we do it only once buiding set would not be cheaper
2439 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2439 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2440 if changes:
2440 if changes:
2441 tr2.hookargs[b'tag_moved'] = b'1'
2441 tr2.hookargs[b'tag_moved'] = b'1'
2442 with repo.vfs(
2442 with repo.vfs(
2443 b'changes/tags.changes', b'w', atomictemp=True
2443 b'changes/tags.changes', b'w', atomictemp=True
2444 ) as changesfile:
2444 ) as changesfile:
2445 # note: we do not register the file to the transaction
2445 # note: we do not register the file to the transaction
2446 # because we needs it to still exist on the transaction
2446 # because we needs it to still exist on the transaction
2447 # is close (for txnclose hooks)
2447 # is close (for txnclose hooks)
2448 tagsmod.writediff(changesfile, changes)
2448 tagsmod.writediff(changesfile, changes)
2449
2449
2450 def validate(tr2):
2450 def validate(tr2):
2451 """will run pre-closing hooks"""
2451 """will run pre-closing hooks"""
2452 # XXX the transaction API is a bit lacking here so we take a hacky
2452 # XXX the transaction API is a bit lacking here so we take a hacky
2453 # path for now
2453 # path for now
2454 #
2454 #
2455 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2455 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2456 # dict is copied before these run. In addition we needs the data
2456 # dict is copied before these run. In addition we needs the data
2457 # available to in memory hooks too.
2457 # available to in memory hooks too.
2458 #
2458 #
2459 # Moreover, we also need to make sure this runs before txnclose
2459 # Moreover, we also need to make sure this runs before txnclose
2460 # hooks and there is no "pending" mechanism that would execute
2460 # hooks and there is no "pending" mechanism that would execute
2461 # logic only if hooks are about to run.
2461 # logic only if hooks are about to run.
2462 #
2462 #
2463 # Fixing this limitation of the transaction is also needed to track
2463 # Fixing this limitation of the transaction is also needed to track
2464 # other families of changes (bookmarks, phases, obsolescence).
2464 # other families of changes (bookmarks, phases, obsolescence).
2465 #
2465 #
2466 # This will have to be fixed before we remove the experimental
2466 # This will have to be fixed before we remove the experimental
2467 # gating.
2467 # gating.
2468 tracktags(tr2)
2468 tracktags(tr2)
2469 repo = reporef()
2469 repo = reporef()
2470 assert repo is not None # help pytype
2470 assert repo is not None # help pytype
2471
2471
2472 singleheadopt = (b'experimental', b'single-head-per-branch')
2472 singleheadopt = (b'experimental', b'single-head-per-branch')
2473 singlehead = repo.ui.configbool(*singleheadopt)
2473 singlehead = repo.ui.configbool(*singleheadopt)
2474 if singlehead:
2474 if singlehead:
2475 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2475 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2476 accountclosed = singleheadsub.get(
2476 accountclosed = singleheadsub.get(
2477 b"account-closed-heads", False
2477 b"account-closed-heads", False
2478 )
2478 )
2479 if singleheadsub.get(b"public-changes-only", False):
2479 if singleheadsub.get(b"public-changes-only", False):
2480 filtername = b"immutable"
2480 filtername = b"immutable"
2481 else:
2481 else:
2482 filtername = b"visible"
2482 filtername = b"visible"
2483 scmutil.enforcesinglehead(
2483 scmutil.enforcesinglehead(
2484 repo, tr2, desc, accountclosed, filtername
2484 repo, tr2, desc, accountclosed, filtername
2485 )
2485 )
2486 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2486 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2487 for name, (old, new) in sorted(
2487 for name, (old, new) in sorted(
2488 tr.changes[b'bookmarks'].items()
2488 tr.changes[b'bookmarks'].items()
2489 ):
2489 ):
2490 args = tr.hookargs.copy()
2490 args = tr.hookargs.copy()
2491 args.update(bookmarks.preparehookargs(name, old, new))
2491 args.update(bookmarks.preparehookargs(name, old, new))
2492 repo.hook(
2492 repo.hook(
2493 b'pretxnclose-bookmark',
2493 b'pretxnclose-bookmark',
2494 throw=True,
2494 throw=True,
2495 **pycompat.strkwargs(args)
2495 **pycompat.strkwargs(args)
2496 )
2496 )
2497 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2497 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2498 cl = repo.unfiltered().changelog
2498 cl = repo.unfiltered().changelog
2499 for revs, (old, new) in tr.changes[b'phases']:
2499 for revs, (old, new) in tr.changes[b'phases']:
2500 for rev in revs:
2500 for rev in revs:
2501 args = tr.hookargs.copy()
2501 args = tr.hookargs.copy()
2502 node = hex(cl.node(rev))
2502 node = hex(cl.node(rev))
2503 args.update(phases.preparehookargs(node, old, new))
2503 args.update(phases.preparehookargs(node, old, new))
2504 repo.hook(
2504 repo.hook(
2505 b'pretxnclose-phase',
2505 b'pretxnclose-phase',
2506 throw=True,
2506 throw=True,
2507 **pycompat.strkwargs(args)
2507 **pycompat.strkwargs(args)
2508 )
2508 )
2509
2509
2510 repo.hook(
2510 repo.hook(
2511 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2511 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2512 )
2512 )
2513
2513
2514 def releasefn(tr, success):
2514 def releasefn(tr, success):
2515 repo = reporef()
2515 repo = reporef()
2516 if repo is None:
2516 if repo is None:
2517 # If the repo has been GC'd (and this release function is being
2517 # If the repo has been GC'd (and this release function is being
2518 # called from transaction.__del__), there's not much we can do,
2518 # called from transaction.__del__), there's not much we can do,
2519 # so just leave the unfinished transaction there and let the
2519 # so just leave the unfinished transaction there and let the
2520 # user run `hg recover`.
2520 # user run `hg recover`.
2521 return
2521 return
2522 if success:
2522 if success:
2523 # this should be explicitly invoked here, because
2523 # this should be explicitly invoked here, because
2524 # in-memory changes aren't written out at closing
2524 # in-memory changes aren't written out at closing
2525 # transaction, if tr.addfilegenerator (via
2525 # transaction, if tr.addfilegenerator (via
2526 # dirstate.write or so) isn't invoked while
2526 # dirstate.write or so) isn't invoked while
2527 # transaction running
2527 # transaction running
2528 repo.dirstate.write(None)
2528 repo.dirstate.write(None)
2529 else:
2529 else:
2530 # discard all changes (including ones already written
2530 # discard all changes (including ones already written
2531 # out) in this transaction
2531 # out) in this transaction
2532 narrowspec.restorebackup(self, b'journal.narrowspec')
2532 narrowspec.restorebackup(self, b'journal.narrowspec')
2533 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2533 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2534 if repo.currentwlock() is not None:
2534 if repo.currentwlock() is not None:
2535 repo.dirstate.restorebackup(None, b'journal.dirstate')
2535 repo.dirstate.restorebackup(None, b'journal.dirstate')
2536
2536
2537 repo.invalidate(clearfilecache=True)
2537 repo.invalidate(clearfilecache=True)
2538
2538
2539 tr = transaction.transaction(
2539 tr = transaction.transaction(
2540 rp,
2540 rp,
2541 self.svfs,
2541 self.svfs,
2542 vfsmap,
2542 vfsmap,
2543 b"journal",
2543 b"journal",
2544 b"undo",
2544 b"undo",
2545 aftertrans(renames),
2545 aftertrans(renames),
2546 self.store.createmode,
2546 self.store.createmode,
2547 validator=validate,
2547 validator=validate,
2548 releasefn=releasefn,
2548 releasefn=releasefn,
2549 checkambigfiles=_cachedfiles,
2549 checkambigfiles=_cachedfiles,
2550 name=desc,
2550 name=desc,
2551 )
2551 )
2552 tr.changes[b'origrepolen'] = len(self)
2552 tr.changes[b'origrepolen'] = len(self)
2553 tr.changes[b'obsmarkers'] = set()
2553 tr.changes[b'obsmarkers'] = set()
2554 tr.changes[b'phases'] = []
2554 tr.changes[b'phases'] = []
2555 tr.changes[b'bookmarks'] = {}
2555 tr.changes[b'bookmarks'] = {}
2556
2556
2557 tr.hookargs[b'txnid'] = txnid
2557 tr.hookargs[b'txnid'] = txnid
2558 tr.hookargs[b'txnname'] = desc
2558 tr.hookargs[b'txnname'] = desc
2559 tr.hookargs[b'changes'] = tr.changes
2559 tr.hookargs[b'changes'] = tr.changes
2560 # note: writing the fncache only during finalize mean that the file is
2560 # note: writing the fncache only during finalize mean that the file is
2561 # outdated when running hooks. As fncache is used for streaming clone,
2561 # outdated when running hooks. As fncache is used for streaming clone,
2562 # this is not expected to break anything that happen during the hooks.
2562 # this is not expected to break anything that happen during the hooks.
2563 tr.addfinalize(b'flush-fncache', self.store.write)
2563 tr.addfinalize(b'flush-fncache', self.store.write)
2564
2564
2565 def txnclosehook(tr2):
2565 def txnclosehook(tr2):
2566 """To be run if transaction is successful, will schedule a hook run"""
2566 """To be run if transaction is successful, will schedule a hook run"""
2567 # Don't reference tr2 in hook() so we don't hold a reference.
2567 # Don't reference tr2 in hook() so we don't hold a reference.
2568 # This reduces memory consumption when there are multiple
2568 # This reduces memory consumption when there are multiple
2569 # transactions per lock. This can likely go away if issue5045
2569 # transactions per lock. This can likely go away if issue5045
2570 # fixes the function accumulation.
2570 # fixes the function accumulation.
2571 hookargs = tr2.hookargs
2571 hookargs = tr2.hookargs
2572
2572
2573 def hookfunc(unused_success):
2573 def hookfunc(unused_success):
2574 repo = reporef()
2574 repo = reporef()
2575 assert repo is not None # help pytype
2575 assert repo is not None # help pytype
2576
2576
2577 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2577 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2578 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2578 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2579 for name, (old, new) in bmchanges:
2579 for name, (old, new) in bmchanges:
2580 args = tr.hookargs.copy()
2580 args = tr.hookargs.copy()
2581 args.update(bookmarks.preparehookargs(name, old, new))
2581 args.update(bookmarks.preparehookargs(name, old, new))
2582 repo.hook(
2582 repo.hook(
2583 b'txnclose-bookmark',
2583 b'txnclose-bookmark',
2584 throw=False,
2584 throw=False,
2585 **pycompat.strkwargs(args)
2585 **pycompat.strkwargs(args)
2586 )
2586 )
2587
2587
2588 if hook.hashook(repo.ui, b'txnclose-phase'):
2588 if hook.hashook(repo.ui, b'txnclose-phase'):
2589 cl = repo.unfiltered().changelog
2589 cl = repo.unfiltered().changelog
2590 phasemv = sorted(
2590 phasemv = sorted(
2591 tr.changes[b'phases'], key=lambda r: r[0][0]
2591 tr.changes[b'phases'], key=lambda r: r[0][0]
2592 )
2592 )
2593 for revs, (old, new) in phasemv:
2593 for revs, (old, new) in phasemv:
2594 for rev in revs:
2594 for rev in revs:
2595 args = tr.hookargs.copy()
2595 args = tr.hookargs.copy()
2596 node = hex(cl.node(rev))
2596 node = hex(cl.node(rev))
2597 args.update(phases.preparehookargs(node, old, new))
2597 args.update(phases.preparehookargs(node, old, new))
2598 repo.hook(
2598 repo.hook(
2599 b'txnclose-phase',
2599 b'txnclose-phase',
2600 throw=False,
2600 throw=False,
2601 **pycompat.strkwargs(args)
2601 **pycompat.strkwargs(args)
2602 )
2602 )
2603
2603
2604 repo.hook(
2604 repo.hook(
2605 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2605 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2606 )
2606 )
2607
2607
2608 repo = reporef()
2608 repo = reporef()
2609 assert repo is not None # help pytype
2609 assert repo is not None # help pytype
2610 repo._afterlock(hookfunc)
2610 repo._afterlock(hookfunc)
2611
2611
2612 tr.addfinalize(b'txnclose-hook', txnclosehook)
2612 tr.addfinalize(b'txnclose-hook', txnclosehook)
2613 # Include a leading "-" to make it happen before the transaction summary
2613 # Include a leading "-" to make it happen before the transaction summary
2614 # reports registered via scmutil.registersummarycallback() whose names
2614 # reports registered via scmutil.registersummarycallback() whose names
2615 # are 00-txnreport etc. That way, the caches will be warm when the
2615 # are 00-txnreport etc. That way, the caches will be warm when the
2616 # callbacks run.
2616 # callbacks run.
2617 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2617 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2618
2618
2619 def txnaborthook(tr2):
2619 def txnaborthook(tr2):
2620 """To be run if transaction is aborted"""
2620 """To be run if transaction is aborted"""
2621 repo = reporef()
2621 repo = reporef()
2622 assert repo is not None # help pytype
2622 assert repo is not None # help pytype
2623 repo.hook(
2623 repo.hook(
2624 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2624 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2625 )
2625 )
2626
2626
2627 tr.addabort(b'txnabort-hook', txnaborthook)
2627 tr.addabort(b'txnabort-hook', txnaborthook)
2628 # avoid eager cache invalidation. in-memory data should be identical
2628 # avoid eager cache invalidation. in-memory data should be identical
2629 # to stored data if transaction has no error.
2629 # to stored data if transaction has no error.
2630 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2630 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2631 self._transref = weakref.ref(tr)
2631 self._transref = weakref.ref(tr)
2632 scmutil.registersummarycallback(self, tr, desc)
2632 scmutil.registersummarycallback(self, tr, desc)
2633 return tr
2633 return tr
2634
2634
2635 def _journalfiles(self):
2635 def _journalfiles(self):
2636 first = (
2636 first = (
2637 (self.svfs, b'journal'),
2637 (self.svfs, b'journal'),
2638 (self.svfs, b'journal.narrowspec'),
2638 (self.svfs, b'journal.narrowspec'),
2639 (self.vfs, b'journal.narrowspec.dirstate'),
2639 (self.vfs, b'journal.narrowspec.dirstate'),
2640 (self.vfs, b'journal.dirstate'),
2640 (self.vfs, b'journal.dirstate'),
2641 )
2641 )
2642 middle = []
2642 middle = []
2643 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2643 dirstate_data = self.dirstate.data_backup_filename(b'journal.dirstate')
2644 if dirstate_data is not None:
2644 if dirstate_data is not None:
2645 middle.append((self.vfs, dirstate_data))
2645 middle.append((self.vfs, dirstate_data))
2646 end = (
2646 end = (
2647 (self.vfs, b'journal.branch'),
2647 (self.vfs, b'journal.branch'),
2648 (self.vfs, b'journal.desc'),
2648 (self.vfs, b'journal.desc'),
2649 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2649 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2650 (self.svfs, b'journal.phaseroots'),
2650 (self.svfs, b'journal.phaseroots'),
2651 )
2651 )
2652 return first + tuple(middle) + end
2652 return first + tuple(middle) + end
2653
2653
2654 def undofiles(self):
2654 def undofiles(self):
2655 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2655 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2656
2656
2657 @unfilteredmethod
2657 @unfilteredmethod
2658 def _writejournal(self, desc):
2658 def _writejournal(self, desc):
2659 if self.currentwlock() is not None:
2659 if self.currentwlock() is not None:
2660 self.dirstate.savebackup(None, b'journal.dirstate')
2660 self.dirstate.savebackup(None, b'journal.dirstate')
2661 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2661 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2662 narrowspec.savebackup(self, b'journal.narrowspec')
2662 narrowspec.savebackup(self, b'journal.narrowspec')
2663 self.vfs.write(
2663 self.vfs.write(
2664 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2664 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2665 )
2665 )
2666 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2666 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2667 bookmarksvfs = bookmarks.bookmarksvfs(self)
2667 bookmarksvfs = bookmarks.bookmarksvfs(self)
2668 bookmarksvfs.write(
2668 bookmarksvfs.write(
2669 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2669 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2670 )
2670 )
2671 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2671 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2672
2672
2673 def recover(self):
2673 def recover(self):
2674 with self.lock():
2674 with self.lock():
2675 if self.svfs.exists(b"journal"):
2675 if self.svfs.exists(b"journal"):
2676 self.ui.status(_(b"rolling back interrupted transaction\n"))
2676 self.ui.status(_(b"rolling back interrupted transaction\n"))
2677 vfsmap = {
2677 vfsmap = {
2678 b'': self.svfs,
2678 b'': self.svfs,
2679 b'plain': self.vfs,
2679 b'plain': self.vfs,
2680 }
2680 }
2681 transaction.rollback(
2681 transaction.rollback(
2682 self.svfs,
2682 self.svfs,
2683 vfsmap,
2683 vfsmap,
2684 b"journal",
2684 b"journal",
2685 self.ui.warn,
2685 self.ui.warn,
2686 checkambigfiles=_cachedfiles,
2686 checkambigfiles=_cachedfiles,
2687 )
2687 )
2688 self.invalidate()
2688 self.invalidate()
2689 return True
2689 return True
2690 else:
2690 else:
2691 self.ui.warn(_(b"no interrupted transaction available\n"))
2691 self.ui.warn(_(b"no interrupted transaction available\n"))
2692 return False
2692 return False
2693
2693
2694 def rollback(self, dryrun=False, force=False):
2694 def rollback(self, dryrun=False, force=False):
2695 wlock = lock = dsguard = None
2695 wlock = lock = dsguard = None
2696 try:
2696 try:
2697 wlock = self.wlock()
2697 wlock = self.wlock()
2698 lock = self.lock()
2698 lock = self.lock()
2699 if self.svfs.exists(b"undo"):
2699 if self.svfs.exists(b"undo"):
2700 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2700 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2701
2701
2702 return self._rollback(dryrun, force, dsguard)
2702 return self._rollback(dryrun, force, dsguard)
2703 else:
2703 else:
2704 self.ui.warn(_(b"no rollback information available\n"))
2704 self.ui.warn(_(b"no rollback information available\n"))
2705 return 1
2705 return 1
2706 finally:
2706 finally:
2707 release(dsguard, lock, wlock)
2707 release(dsguard, lock, wlock)
2708
2708
2709 @unfilteredmethod # Until we get smarter cache management
2709 @unfilteredmethod # Until we get smarter cache management
2710 def _rollback(self, dryrun, force, dsguard):
2710 def _rollback(self, dryrun, force, dsguard):
2711 ui = self.ui
2711 ui = self.ui
2712
2713 parents = self.dirstate.parents()
2712 try:
2714 try:
2713 args = self.vfs.read(b'undo.desc').splitlines()
2715 args = self.vfs.read(b'undo.desc').splitlines()
2714 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2716 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2715 if len(args) >= 3:
2717 if len(args) >= 3:
2716 detail = args[2]
2718 detail = args[2]
2717 oldtip = oldlen - 1
2719 oldtip = oldlen - 1
2718
2720
2719 if detail and ui.verbose:
2721 if detail and ui.verbose:
2720 msg = _(
2722 msg = _(
2721 b'repository tip rolled back to revision %d'
2723 b'repository tip rolled back to revision %d'
2722 b' (undo %s: %s)\n'
2724 b' (undo %s: %s)\n'
2723 ) % (oldtip, desc, detail)
2725 ) % (oldtip, desc, detail)
2724 else:
2726 else:
2725 msg = _(
2727 msg = _(
2726 b'repository tip rolled back to revision %d (undo %s)\n'
2728 b'repository tip rolled back to revision %d (undo %s)\n'
2727 ) % (oldtip, desc)
2729 ) % (oldtip, desc)
2730 parentgone = any(self[p].rev() > oldtip for p in parents)
2728 except IOError:
2731 except IOError:
2729 msg = _(b'rolling back unknown transaction\n')
2732 msg = _(b'rolling back unknown transaction\n')
2730 desc = None
2733 desc = None
2734 parentgone = True
2731
2735
2732 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2736 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2733 raise error.Abort(
2737 raise error.Abort(
2734 _(
2738 _(
2735 b'rollback of last commit while not checked out '
2739 b'rollback of last commit while not checked out '
2736 b'may lose data'
2740 b'may lose data'
2737 ),
2741 ),
2738 hint=_(b'use -f to force'),
2742 hint=_(b'use -f to force'),
2739 )
2743 )
2740
2744
2741 ui.status(msg)
2745 ui.status(msg)
2742 if dryrun:
2746 if dryrun:
2743 return 0
2747 return 0
2744
2748
2745 parents = self.dirstate.parents()
2746 self.destroying()
2749 self.destroying()
2747 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2750 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2748 transaction.rollback(
2751 transaction.rollback(
2749 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2752 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2750 )
2753 )
2751 bookmarksvfs = bookmarks.bookmarksvfs(self)
2754 bookmarksvfs = bookmarks.bookmarksvfs(self)
2752 if bookmarksvfs.exists(b'undo.bookmarks'):
2755 if bookmarksvfs.exists(b'undo.bookmarks'):
2753 bookmarksvfs.rename(
2756 bookmarksvfs.rename(
2754 b'undo.bookmarks', b'bookmarks', checkambig=True
2757 b'undo.bookmarks', b'bookmarks', checkambig=True
2755 )
2758 )
2756 if self.svfs.exists(b'undo.phaseroots'):
2759 if self.svfs.exists(b'undo.phaseroots'):
2757 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2760 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2758 self.invalidate()
2761 self.invalidate()
2759
2762
2760 has_node = self.changelog.index.has_node
2761 parentgone = any(not has_node(p) for p in parents)
2762 if parentgone:
2763 if parentgone:
2763 # prevent dirstateguard from overwriting already restored one
2764 # prevent dirstateguard from overwriting already restored one
2764 dsguard.close()
2765 dsguard.close()
2765
2766
2766 narrowspec.restorebackup(self, b'undo.narrowspec')
2767 narrowspec.restorebackup(self, b'undo.narrowspec')
2767 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2768 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2768 self.dirstate.restorebackup(None, b'undo.dirstate')
2769 self.dirstate.restorebackup(None, b'undo.dirstate')
2769 try:
2770 try:
2770 branch = self.vfs.read(b'undo.branch')
2771 branch = self.vfs.read(b'undo.branch')
2771 self.dirstate.setbranch(encoding.tolocal(branch))
2772 self.dirstate.setbranch(encoding.tolocal(branch))
2772 except IOError:
2773 except IOError:
2773 ui.warn(
2774 ui.warn(
2774 _(
2775 _(
2775 b'named branch could not be reset: '
2776 b'named branch could not be reset: '
2776 b'current branch is still \'%s\'\n'
2777 b'current branch is still \'%s\'\n'
2777 )
2778 )
2778 % self.dirstate.branch()
2779 % self.dirstate.branch()
2779 )
2780 )
2780
2781
2781 parents = tuple([p.rev() for p in self[None].parents()])
2782 parents = tuple([p.rev() for p in self[None].parents()])
2782 if len(parents) > 1:
2783 if len(parents) > 1:
2783 ui.status(
2784 ui.status(
2784 _(
2785 _(
2785 b'working directory now based on '
2786 b'working directory now based on '
2786 b'revisions %d and %d\n'
2787 b'revisions %d and %d\n'
2787 )
2788 )
2788 % parents
2789 % parents
2789 )
2790 )
2790 else:
2791 else:
2791 ui.status(
2792 ui.status(
2792 _(b'working directory now based on revision %d\n') % parents
2793 _(b'working directory now based on revision %d\n') % parents
2793 )
2794 )
2794 mergestatemod.mergestate.clean(self)
2795 mergestatemod.mergestate.clean(self)
2795
2796
2796 # TODO: if we know which new heads may result from this rollback, pass
2797 # TODO: if we know which new heads may result from this rollback, pass
2797 # them to destroy(), which will prevent the branchhead cache from being
2798 # them to destroy(), which will prevent the branchhead cache from being
2798 # invalidated.
2799 # invalidated.
2799 self.destroyed()
2800 self.destroyed()
2800 return 0
2801 return 0
2801
2802
2802 def _buildcacheupdater(self, newtransaction):
2803 def _buildcacheupdater(self, newtransaction):
2803 """called during transaction to build the callback updating cache
2804 """called during transaction to build the callback updating cache
2804
2805
2805 Lives on the repository to help extension who might want to augment
2806 Lives on the repository to help extension who might want to augment
2806 this logic. For this purpose, the created transaction is passed to the
2807 this logic. For this purpose, the created transaction is passed to the
2807 method.
2808 method.
2808 """
2809 """
2809 # we must avoid cyclic reference between repo and transaction.
2810 # we must avoid cyclic reference between repo and transaction.
2810 reporef = weakref.ref(self)
2811 reporef = weakref.ref(self)
2811
2812
2812 def updater(tr):
2813 def updater(tr):
2813 repo = reporef()
2814 repo = reporef()
2814 assert repo is not None # help pytype
2815 assert repo is not None # help pytype
2815 repo.updatecaches(tr)
2816 repo.updatecaches(tr)
2816
2817
2817 return updater
2818 return updater
2818
2819
2819 @unfilteredmethod
2820 @unfilteredmethod
2820 def updatecaches(self, tr=None, full=False, caches=None):
2821 def updatecaches(self, tr=None, full=False, caches=None):
2821 """warm appropriate caches
2822 """warm appropriate caches
2822
2823
2823 If this function is called after a transaction closed. The transaction
2824 If this function is called after a transaction closed. The transaction
2824 will be available in the 'tr' argument. This can be used to selectively
2825 will be available in the 'tr' argument. This can be used to selectively
2825 update caches relevant to the changes in that transaction.
2826 update caches relevant to the changes in that transaction.
2826
2827
2827 If 'full' is set, make sure all caches the function knows about have
2828 If 'full' is set, make sure all caches the function knows about have
2828 up-to-date data. Even the ones usually loaded more lazily.
2829 up-to-date data. Even the ones usually loaded more lazily.
2829
2830
2830 The `full` argument can take a special "post-clone" value. In this case
2831 The `full` argument can take a special "post-clone" value. In this case
2831 the cache warming is made after a clone and of the slower cache might
2832 the cache warming is made after a clone and of the slower cache might
2832 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2833 be skipped, namely the `.fnodetags` one. This argument is 5.8 specific
2833 as we plan for a cleaner way to deal with this for 5.9.
2834 as we plan for a cleaner way to deal with this for 5.9.
2834 """
2835 """
2835 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2836 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2836 # During strip, many caches are invalid but
2837 # During strip, many caches are invalid but
2837 # later call to `destroyed` will refresh them.
2838 # later call to `destroyed` will refresh them.
2838 return
2839 return
2839
2840
2840 unfi = self.unfiltered()
2841 unfi = self.unfiltered()
2841
2842
2842 if full:
2843 if full:
2843 msg = (
2844 msg = (
2844 "`full` argument for `repo.updatecaches` is deprecated\n"
2845 "`full` argument for `repo.updatecaches` is deprecated\n"
2845 "(use `caches=repository.CACHE_ALL` instead)"
2846 "(use `caches=repository.CACHE_ALL` instead)"
2846 )
2847 )
2847 self.ui.deprecwarn(msg, b"5.9")
2848 self.ui.deprecwarn(msg, b"5.9")
2848 caches = repository.CACHES_ALL
2849 caches = repository.CACHES_ALL
2849 if full == b"post-clone":
2850 if full == b"post-clone":
2850 caches = repository.CACHES_POST_CLONE
2851 caches = repository.CACHES_POST_CLONE
2851 caches = repository.CACHES_ALL
2852 caches = repository.CACHES_ALL
2852 elif caches is None:
2853 elif caches is None:
2853 caches = repository.CACHES_DEFAULT
2854 caches = repository.CACHES_DEFAULT
2854
2855
2855 if repository.CACHE_BRANCHMAP_SERVED in caches:
2856 if repository.CACHE_BRANCHMAP_SERVED in caches:
2856 if tr is None or tr.changes[b'origrepolen'] < len(self):
2857 if tr is None or tr.changes[b'origrepolen'] < len(self):
2857 # accessing the 'served' branchmap should refresh all the others,
2858 # accessing the 'served' branchmap should refresh all the others,
2858 self.ui.debug(b'updating the branch cache\n')
2859 self.ui.debug(b'updating the branch cache\n')
2859 self.filtered(b'served').branchmap()
2860 self.filtered(b'served').branchmap()
2860 self.filtered(b'served.hidden').branchmap()
2861 self.filtered(b'served.hidden').branchmap()
2861 # flush all possibly delayed write.
2862 # flush all possibly delayed write.
2862 self._branchcaches.write_delayed(self)
2863 self._branchcaches.write_delayed(self)
2863
2864
2864 if repository.CACHE_CHANGELOG_CACHE in caches:
2865 if repository.CACHE_CHANGELOG_CACHE in caches:
2865 self.changelog.update_caches(transaction=tr)
2866 self.changelog.update_caches(transaction=tr)
2866
2867
2867 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2868 if repository.CACHE_MANIFESTLOG_CACHE in caches:
2868 self.manifestlog.update_caches(transaction=tr)
2869 self.manifestlog.update_caches(transaction=tr)
2869
2870
2870 if repository.CACHE_REV_BRANCH in caches:
2871 if repository.CACHE_REV_BRANCH in caches:
2871 rbc = unfi.revbranchcache()
2872 rbc = unfi.revbranchcache()
2872 for r in unfi.changelog:
2873 for r in unfi.changelog:
2873 rbc.branchinfo(r)
2874 rbc.branchinfo(r)
2874 rbc.write()
2875 rbc.write()
2875
2876
2876 if repository.CACHE_FULL_MANIFEST in caches:
2877 if repository.CACHE_FULL_MANIFEST in caches:
2877 # ensure the working copy parents are in the manifestfulltextcache
2878 # ensure the working copy parents are in the manifestfulltextcache
2878 for ctx in self[b'.'].parents():
2879 for ctx in self[b'.'].parents():
2879 ctx.manifest() # accessing the manifest is enough
2880 ctx.manifest() # accessing the manifest is enough
2880
2881
2881 if repository.CACHE_FILE_NODE_TAGS in caches:
2882 if repository.CACHE_FILE_NODE_TAGS in caches:
2882 # accessing fnode cache warms the cache
2883 # accessing fnode cache warms the cache
2883 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2884 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2884
2885
2885 if repository.CACHE_TAGS_DEFAULT in caches:
2886 if repository.CACHE_TAGS_DEFAULT in caches:
2886 # accessing tags warm the cache
2887 # accessing tags warm the cache
2887 self.tags()
2888 self.tags()
2888 if repository.CACHE_TAGS_SERVED in caches:
2889 if repository.CACHE_TAGS_SERVED in caches:
2889 self.filtered(b'served').tags()
2890 self.filtered(b'served').tags()
2890
2891
2891 if repository.CACHE_BRANCHMAP_ALL in caches:
2892 if repository.CACHE_BRANCHMAP_ALL in caches:
2892 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2893 # The CACHE_BRANCHMAP_ALL updates lazily-loaded caches immediately,
2893 # so we're forcing a write to cause these caches to be warmed up
2894 # so we're forcing a write to cause these caches to be warmed up
2894 # even if they haven't explicitly been requested yet (if they've
2895 # even if they haven't explicitly been requested yet (if they've
2895 # never been used by hg, they won't ever have been written, even if
2896 # never been used by hg, they won't ever have been written, even if
2896 # they're a subset of another kind of cache that *has* been used).
2897 # they're a subset of another kind of cache that *has* been used).
2897 for filt in repoview.filtertable.keys():
2898 for filt in repoview.filtertable.keys():
2898 filtered = self.filtered(filt)
2899 filtered = self.filtered(filt)
2899 filtered.branchmap().write(filtered)
2900 filtered.branchmap().write(filtered)
2900
2901
2901 def invalidatecaches(self):
2902 def invalidatecaches(self):
2902 if '_tagscache' in vars(self):
2903 if '_tagscache' in vars(self):
2903 # can't use delattr on proxy
2904 # can't use delattr on proxy
2904 del self.__dict__['_tagscache']
2905 del self.__dict__['_tagscache']
2905
2906
2906 self._branchcaches.clear()
2907 self._branchcaches.clear()
2907 self.invalidatevolatilesets()
2908 self.invalidatevolatilesets()
2908 self._sparsesignaturecache.clear()
2909 self._sparsesignaturecache.clear()
2909
2910
2910 def invalidatevolatilesets(self):
2911 def invalidatevolatilesets(self):
2911 self.filteredrevcache.clear()
2912 self.filteredrevcache.clear()
2912 obsolete.clearobscaches(self)
2913 obsolete.clearobscaches(self)
2913 self._quick_access_changeid_invalidate()
2914 self._quick_access_changeid_invalidate()
2914
2915
2915 def invalidatedirstate(self):
2916 def invalidatedirstate(self):
2916 """Invalidates the dirstate, causing the next call to dirstate
2917 """Invalidates the dirstate, causing the next call to dirstate
2917 to check if it was modified since the last time it was read,
2918 to check if it was modified since the last time it was read,
2918 rereading it if it has.
2919 rereading it if it has.
2919
2920
2920 This is different to dirstate.invalidate() that it doesn't always
2921 This is different to dirstate.invalidate() that it doesn't always
2921 rereads the dirstate. Use dirstate.invalidate() if you want to
2922 rereads the dirstate. Use dirstate.invalidate() if you want to
2922 explicitly read the dirstate again (i.e. restoring it to a previous
2923 explicitly read the dirstate again (i.e. restoring it to a previous
2923 known good state)."""
2924 known good state)."""
2924 if hasunfilteredcache(self, 'dirstate'):
2925 if hasunfilteredcache(self, 'dirstate'):
2925 for k in self.dirstate._filecache:
2926 for k in self.dirstate._filecache:
2926 try:
2927 try:
2927 delattr(self.dirstate, k)
2928 delattr(self.dirstate, k)
2928 except AttributeError:
2929 except AttributeError:
2929 pass
2930 pass
2930 delattr(self.unfiltered(), 'dirstate')
2931 delattr(self.unfiltered(), 'dirstate')
2931
2932
2932 def invalidate(self, clearfilecache=False):
2933 def invalidate(self, clearfilecache=False):
2933 """Invalidates both store and non-store parts other than dirstate
2934 """Invalidates both store and non-store parts other than dirstate
2934
2935
2935 If a transaction is running, invalidation of store is omitted,
2936 If a transaction is running, invalidation of store is omitted,
2936 because discarding in-memory changes might cause inconsistency
2937 because discarding in-memory changes might cause inconsistency
2937 (e.g. incomplete fncache causes unintentional failure, but
2938 (e.g. incomplete fncache causes unintentional failure, but
2938 redundant one doesn't).
2939 redundant one doesn't).
2939 """
2940 """
2940 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2941 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2941 for k in list(self._filecache.keys()):
2942 for k in list(self._filecache.keys()):
2942 # dirstate is invalidated separately in invalidatedirstate()
2943 # dirstate is invalidated separately in invalidatedirstate()
2943 if k == b'dirstate':
2944 if k == b'dirstate':
2944 continue
2945 continue
2945 if (
2946 if (
2946 k == b'changelog'
2947 k == b'changelog'
2947 and self.currenttransaction()
2948 and self.currenttransaction()
2948 and self.changelog._delayed
2949 and self.changelog._delayed
2949 ):
2950 ):
2950 # The changelog object may store unwritten revisions. We don't
2951 # The changelog object may store unwritten revisions. We don't
2951 # want to lose them.
2952 # want to lose them.
2952 # TODO: Solve the problem instead of working around it.
2953 # TODO: Solve the problem instead of working around it.
2953 continue
2954 continue
2954
2955
2955 if clearfilecache:
2956 if clearfilecache:
2956 del self._filecache[k]
2957 del self._filecache[k]
2957 try:
2958 try:
2958 delattr(unfiltered, k)
2959 delattr(unfiltered, k)
2959 except AttributeError:
2960 except AttributeError:
2960 pass
2961 pass
2961 self.invalidatecaches()
2962 self.invalidatecaches()
2962 if not self.currenttransaction():
2963 if not self.currenttransaction():
2963 # TODO: Changing contents of store outside transaction
2964 # TODO: Changing contents of store outside transaction
2964 # causes inconsistency. We should make in-memory store
2965 # causes inconsistency. We should make in-memory store
2965 # changes detectable, and abort if changed.
2966 # changes detectable, and abort if changed.
2966 self.store.invalidatecaches()
2967 self.store.invalidatecaches()
2967
2968
2968 def invalidateall(self):
2969 def invalidateall(self):
2969 """Fully invalidates both store and non-store parts, causing the
2970 """Fully invalidates both store and non-store parts, causing the
2970 subsequent operation to reread any outside changes."""
2971 subsequent operation to reread any outside changes."""
2971 # extension should hook this to invalidate its caches
2972 # extension should hook this to invalidate its caches
2972 self.invalidate()
2973 self.invalidate()
2973 self.invalidatedirstate()
2974 self.invalidatedirstate()
2974
2975
2975 @unfilteredmethod
2976 @unfilteredmethod
2976 def _refreshfilecachestats(self, tr):
2977 def _refreshfilecachestats(self, tr):
2977 """Reload stats of cached files so that they are flagged as valid"""
2978 """Reload stats of cached files so that they are flagged as valid"""
2978 for k, ce in self._filecache.items():
2979 for k, ce in self._filecache.items():
2979 k = pycompat.sysstr(k)
2980 k = pycompat.sysstr(k)
2980 if k == 'dirstate' or k not in self.__dict__:
2981 if k == 'dirstate' or k not in self.__dict__:
2981 continue
2982 continue
2982 ce.refresh()
2983 ce.refresh()
2983
2984
2984 def _lock(
2985 def _lock(
2985 self,
2986 self,
2986 vfs,
2987 vfs,
2987 lockname,
2988 lockname,
2988 wait,
2989 wait,
2989 releasefn,
2990 releasefn,
2990 acquirefn,
2991 acquirefn,
2991 desc,
2992 desc,
2992 ):
2993 ):
2993 timeout = 0
2994 timeout = 0
2994 warntimeout = 0
2995 warntimeout = 0
2995 if wait:
2996 if wait:
2996 timeout = self.ui.configint(b"ui", b"timeout")
2997 timeout = self.ui.configint(b"ui", b"timeout")
2997 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2998 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2998 # internal config: ui.signal-safe-lock
2999 # internal config: ui.signal-safe-lock
2999 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3000 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
3000
3001
3001 l = lockmod.trylock(
3002 l = lockmod.trylock(
3002 self.ui,
3003 self.ui,
3003 vfs,
3004 vfs,
3004 lockname,
3005 lockname,
3005 timeout,
3006 timeout,
3006 warntimeout,
3007 warntimeout,
3007 releasefn=releasefn,
3008 releasefn=releasefn,
3008 acquirefn=acquirefn,
3009 acquirefn=acquirefn,
3009 desc=desc,
3010 desc=desc,
3010 signalsafe=signalsafe,
3011 signalsafe=signalsafe,
3011 )
3012 )
3012 return l
3013 return l
3013
3014
3014 def _afterlock(self, callback):
3015 def _afterlock(self, callback):
3015 """add a callback to be run when the repository is fully unlocked
3016 """add a callback to be run when the repository is fully unlocked
3016
3017
3017 The callback will be executed when the outermost lock is released
3018 The callback will be executed when the outermost lock is released
3018 (with wlock being higher level than 'lock')."""
3019 (with wlock being higher level than 'lock')."""
3019 for ref in (self._wlockref, self._lockref):
3020 for ref in (self._wlockref, self._lockref):
3020 l = ref and ref()
3021 l = ref and ref()
3021 if l and l.held:
3022 if l and l.held:
3022 l.postrelease.append(callback)
3023 l.postrelease.append(callback)
3023 break
3024 break
3024 else: # no lock have been found.
3025 else: # no lock have been found.
3025 callback(True)
3026 callback(True)
3026
3027
3027 def lock(self, wait=True):
3028 def lock(self, wait=True):
3028 """Lock the repository store (.hg/store) and return a weak reference
3029 """Lock the repository store (.hg/store) and return a weak reference
3029 to the lock. Use this before modifying the store (e.g. committing or
3030 to the lock. Use this before modifying the store (e.g. committing or
3030 stripping). If you are opening a transaction, get a lock as well.)
3031 stripping). If you are opening a transaction, get a lock as well.)
3031
3032
3032 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3033 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3033 'wlock' first to avoid a dead-lock hazard."""
3034 'wlock' first to avoid a dead-lock hazard."""
3034 l = self._currentlock(self._lockref)
3035 l = self._currentlock(self._lockref)
3035 if l is not None:
3036 if l is not None:
3036 l.lock()
3037 l.lock()
3037 return l
3038 return l
3038
3039
3039 l = self._lock(
3040 l = self._lock(
3040 vfs=self.svfs,
3041 vfs=self.svfs,
3041 lockname=b"lock",
3042 lockname=b"lock",
3042 wait=wait,
3043 wait=wait,
3043 releasefn=None,
3044 releasefn=None,
3044 acquirefn=self.invalidate,
3045 acquirefn=self.invalidate,
3045 desc=_(b'repository %s') % self.origroot,
3046 desc=_(b'repository %s') % self.origroot,
3046 )
3047 )
3047 self._lockref = weakref.ref(l)
3048 self._lockref = weakref.ref(l)
3048 return l
3049 return l
3049
3050
3050 def wlock(self, wait=True):
3051 def wlock(self, wait=True):
3051 """Lock the non-store parts of the repository (everything under
3052 """Lock the non-store parts of the repository (everything under
3052 .hg except .hg/store) and return a weak reference to the lock.
3053 .hg except .hg/store) and return a weak reference to the lock.
3053
3054
3054 Use this before modifying files in .hg.
3055 Use this before modifying files in .hg.
3055
3056
3056 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3057 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
3057 'wlock' first to avoid a dead-lock hazard."""
3058 'wlock' first to avoid a dead-lock hazard."""
3058 l = self._wlockref() if self._wlockref else None
3059 l = self._wlockref() if self._wlockref else None
3059 if l is not None and l.held:
3060 if l is not None and l.held:
3060 l.lock()
3061 l.lock()
3061 return l
3062 return l
3062
3063
3063 # We do not need to check for non-waiting lock acquisition. Such
3064 # We do not need to check for non-waiting lock acquisition. Such
3064 # acquisition would not cause dead-lock as they would just fail.
3065 # acquisition would not cause dead-lock as they would just fail.
3065 if wait and (
3066 if wait and (
3066 self.ui.configbool(b'devel', b'all-warnings')
3067 self.ui.configbool(b'devel', b'all-warnings')
3067 or self.ui.configbool(b'devel', b'check-locks')
3068 or self.ui.configbool(b'devel', b'check-locks')
3068 ):
3069 ):
3069 if self._currentlock(self._lockref) is not None:
3070 if self._currentlock(self._lockref) is not None:
3070 self.ui.develwarn(b'"wlock" acquired after "lock"')
3071 self.ui.develwarn(b'"wlock" acquired after "lock"')
3071
3072
3072 def unlock():
3073 def unlock():
3073 if self.dirstate.is_changing_any:
3074 if self.dirstate.is_changing_any:
3074 msg = b"wlock release in the middle of a changing parents"
3075 msg = b"wlock release in the middle of a changing parents"
3075 self.ui.develwarn(msg)
3076 self.ui.develwarn(msg)
3076 self.dirstate.invalidate()
3077 self.dirstate.invalidate()
3077 else:
3078 else:
3078 if self.dirstate._dirty:
3079 if self.dirstate._dirty:
3079 msg = b"dirty dirstate on wlock release"
3080 msg = b"dirty dirstate on wlock release"
3080 self.ui.develwarn(msg)
3081 self.ui.develwarn(msg)
3081 self.dirstate.write(None)
3082 self.dirstate.write(None)
3082
3083
3083 self._filecache[b'dirstate'].refresh()
3084 self._filecache[b'dirstate'].refresh()
3084
3085
3085 l = self._lock(
3086 l = self._lock(
3086 self.vfs,
3087 self.vfs,
3087 b"wlock",
3088 b"wlock",
3088 wait,
3089 wait,
3089 unlock,
3090 unlock,
3090 self.invalidatedirstate,
3091 self.invalidatedirstate,
3091 _(b'working directory of %s') % self.origroot,
3092 _(b'working directory of %s') % self.origroot,
3092 )
3093 )
3093 self._wlockref = weakref.ref(l)
3094 self._wlockref = weakref.ref(l)
3094 return l
3095 return l
3095
3096
3096 def _currentlock(self, lockref):
3097 def _currentlock(self, lockref):
3097 """Returns the lock if it's held, or None if it's not."""
3098 """Returns the lock if it's held, or None if it's not."""
3098 if lockref is None:
3099 if lockref is None:
3099 return None
3100 return None
3100 l = lockref()
3101 l = lockref()
3101 if l is None or not l.held:
3102 if l is None or not l.held:
3102 return None
3103 return None
3103 return l
3104 return l
3104
3105
3105 def currentwlock(self):
3106 def currentwlock(self):
3106 """Returns the wlock if it's held, or None if it's not."""
3107 """Returns the wlock if it's held, or None if it's not."""
3107 return self._currentlock(self._wlockref)
3108 return self._currentlock(self._wlockref)
3108
3109
3109 def checkcommitpatterns(self, wctx, match, status, fail):
3110 def checkcommitpatterns(self, wctx, match, status, fail):
3110 """check for commit arguments that aren't committable"""
3111 """check for commit arguments that aren't committable"""
3111 if match.isexact() or match.prefix():
3112 if match.isexact() or match.prefix():
3112 matched = set(status.modified + status.added + status.removed)
3113 matched = set(status.modified + status.added + status.removed)
3113
3114
3114 for f in match.files():
3115 for f in match.files():
3115 f = self.dirstate.normalize(f)
3116 f = self.dirstate.normalize(f)
3116 if f == b'.' or f in matched or f in wctx.substate:
3117 if f == b'.' or f in matched or f in wctx.substate:
3117 continue
3118 continue
3118 if f in status.deleted:
3119 if f in status.deleted:
3119 fail(f, _(b'file not found!'))
3120 fail(f, _(b'file not found!'))
3120 # Is it a directory that exists or used to exist?
3121 # Is it a directory that exists or used to exist?
3121 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3122 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
3122 d = f + b'/'
3123 d = f + b'/'
3123 for mf in matched:
3124 for mf in matched:
3124 if mf.startswith(d):
3125 if mf.startswith(d):
3125 break
3126 break
3126 else:
3127 else:
3127 fail(f, _(b"no match under directory!"))
3128 fail(f, _(b"no match under directory!"))
3128 elif f not in self.dirstate:
3129 elif f not in self.dirstate:
3129 fail(f, _(b"file not tracked!"))
3130 fail(f, _(b"file not tracked!"))
3130
3131
3131 @unfilteredmethod
3132 @unfilteredmethod
3132 def commit(
3133 def commit(
3133 self,
3134 self,
3134 text=b"",
3135 text=b"",
3135 user=None,
3136 user=None,
3136 date=None,
3137 date=None,
3137 match=None,
3138 match=None,
3138 force=False,
3139 force=False,
3139 editor=None,
3140 editor=None,
3140 extra=None,
3141 extra=None,
3141 ):
3142 ):
3142 """Add a new revision to current repository.
3143 """Add a new revision to current repository.
3143
3144
3144 Revision information is gathered from the working directory,
3145 Revision information is gathered from the working directory,
3145 match can be used to filter the committed files. If editor is
3146 match can be used to filter the committed files. If editor is
3146 supplied, it is called to get a commit message.
3147 supplied, it is called to get a commit message.
3147 """
3148 """
3148 if extra is None:
3149 if extra is None:
3149 extra = {}
3150 extra = {}
3150
3151
3151 def fail(f, msg):
3152 def fail(f, msg):
3152 raise error.InputError(b'%s: %s' % (f, msg))
3153 raise error.InputError(b'%s: %s' % (f, msg))
3153
3154
3154 if not match:
3155 if not match:
3155 match = matchmod.always()
3156 match = matchmod.always()
3156
3157
3157 if not force:
3158 if not force:
3158 match.bad = fail
3159 match.bad = fail
3159
3160
3160 # lock() for recent changelog (see issue4368)
3161 # lock() for recent changelog (see issue4368)
3161 with self.wlock(), self.lock():
3162 with self.wlock(), self.lock():
3162 wctx = self[None]
3163 wctx = self[None]
3163 merge = len(wctx.parents()) > 1
3164 merge = len(wctx.parents()) > 1
3164
3165
3165 if not force and merge and not match.always():
3166 if not force and merge and not match.always():
3166 raise error.Abort(
3167 raise error.Abort(
3167 _(
3168 _(
3168 b'cannot partially commit a merge '
3169 b'cannot partially commit a merge '
3169 b'(do not specify files or patterns)'
3170 b'(do not specify files or patterns)'
3170 )
3171 )
3171 )
3172 )
3172
3173
3173 status = self.status(match=match, clean=force)
3174 status = self.status(match=match, clean=force)
3174 if force:
3175 if force:
3175 status.modified.extend(
3176 status.modified.extend(
3176 status.clean
3177 status.clean
3177 ) # mq may commit clean files
3178 ) # mq may commit clean files
3178
3179
3179 # check subrepos
3180 # check subrepos
3180 subs, commitsubs, newstate = subrepoutil.precommit(
3181 subs, commitsubs, newstate = subrepoutil.precommit(
3181 self.ui, wctx, status, match, force=force
3182 self.ui, wctx, status, match, force=force
3182 )
3183 )
3183
3184
3184 # make sure all explicit patterns are matched
3185 # make sure all explicit patterns are matched
3185 if not force:
3186 if not force:
3186 self.checkcommitpatterns(wctx, match, status, fail)
3187 self.checkcommitpatterns(wctx, match, status, fail)
3187
3188
3188 cctx = context.workingcommitctx(
3189 cctx = context.workingcommitctx(
3189 self, status, text, user, date, extra
3190 self, status, text, user, date, extra
3190 )
3191 )
3191
3192
3192 ms = mergestatemod.mergestate.read(self)
3193 ms = mergestatemod.mergestate.read(self)
3193 mergeutil.checkunresolved(ms)
3194 mergeutil.checkunresolved(ms)
3194
3195
3195 # internal config: ui.allowemptycommit
3196 # internal config: ui.allowemptycommit
3196 if cctx.isempty() and not self.ui.configbool(
3197 if cctx.isempty() and not self.ui.configbool(
3197 b'ui', b'allowemptycommit'
3198 b'ui', b'allowemptycommit'
3198 ):
3199 ):
3199 self.ui.debug(b'nothing to commit, clearing merge state\n')
3200 self.ui.debug(b'nothing to commit, clearing merge state\n')
3200 ms.reset()
3201 ms.reset()
3201 return None
3202 return None
3202
3203
3203 if merge and cctx.deleted():
3204 if merge and cctx.deleted():
3204 raise error.Abort(_(b"cannot commit merge with missing files"))
3205 raise error.Abort(_(b"cannot commit merge with missing files"))
3205
3206
3206 if editor:
3207 if editor:
3207 cctx._text = editor(self, cctx, subs)
3208 cctx._text = editor(self, cctx, subs)
3208 edited = text != cctx._text
3209 edited = text != cctx._text
3209
3210
3210 # Save commit message in case this transaction gets rolled back
3211 # Save commit message in case this transaction gets rolled back
3211 # (e.g. by a pretxncommit hook). Leave the content alone on
3212 # (e.g. by a pretxncommit hook). Leave the content alone on
3212 # the assumption that the user will use the same editor again.
3213 # the assumption that the user will use the same editor again.
3213 msg_path = self.savecommitmessage(cctx._text)
3214 msg_path = self.savecommitmessage(cctx._text)
3214
3215
3215 # commit subs and write new state
3216 # commit subs and write new state
3216 if subs:
3217 if subs:
3217 uipathfn = scmutil.getuipathfn(self)
3218 uipathfn = scmutil.getuipathfn(self)
3218 for s in sorted(commitsubs):
3219 for s in sorted(commitsubs):
3219 sub = wctx.sub(s)
3220 sub = wctx.sub(s)
3220 self.ui.status(
3221 self.ui.status(
3221 _(b'committing subrepository %s\n')
3222 _(b'committing subrepository %s\n')
3222 % uipathfn(subrepoutil.subrelpath(sub))
3223 % uipathfn(subrepoutil.subrelpath(sub))
3223 )
3224 )
3224 sr = sub.commit(cctx._text, user, date)
3225 sr = sub.commit(cctx._text, user, date)
3225 newstate[s] = (newstate[s][0], sr)
3226 newstate[s] = (newstate[s][0], sr)
3226 subrepoutil.writestate(self, newstate)
3227 subrepoutil.writestate(self, newstate)
3227
3228
3228 p1, p2 = self.dirstate.parents()
3229 p1, p2 = self.dirstate.parents()
3229 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3230 hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
3230 try:
3231 try:
3231 self.hook(
3232 self.hook(
3232 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3233 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3233 )
3234 )
3234 with self.transaction(b'commit'):
3235 with self.transaction(b'commit'):
3235 ret = self.commitctx(cctx, True)
3236 ret = self.commitctx(cctx, True)
3236 # update bookmarks, dirstate and mergestate
3237 # update bookmarks, dirstate and mergestate
3237 bookmarks.update(self, [p1, p2], ret)
3238 bookmarks.update(self, [p1, p2], ret)
3238 cctx.markcommitted(ret)
3239 cctx.markcommitted(ret)
3239 ms.reset()
3240 ms.reset()
3240 except: # re-raises
3241 except: # re-raises
3241 if edited:
3242 if edited:
3242 self.ui.write(
3243 self.ui.write(
3243 _(b'note: commit message saved in %s\n') % msg_path
3244 _(b'note: commit message saved in %s\n') % msg_path
3244 )
3245 )
3245 self.ui.write(
3246 self.ui.write(
3246 _(
3247 _(
3247 b"note: use 'hg commit --logfile "
3248 b"note: use 'hg commit --logfile "
3248 b"%s --edit' to reuse it\n"
3249 b"%s --edit' to reuse it\n"
3249 )
3250 )
3250 % msg_path
3251 % msg_path
3251 )
3252 )
3252 raise
3253 raise
3253
3254
3254 def commithook(unused_success):
3255 def commithook(unused_success):
3255 # hack for command that use a temporary commit (eg: histedit)
3256 # hack for command that use a temporary commit (eg: histedit)
3256 # temporary commit got stripped before hook release
3257 # temporary commit got stripped before hook release
3257 if self.changelog.hasnode(ret):
3258 if self.changelog.hasnode(ret):
3258 self.hook(
3259 self.hook(
3259 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3260 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3260 )
3261 )
3261
3262
3262 self._afterlock(commithook)
3263 self._afterlock(commithook)
3263 return ret
3264 return ret
3264
3265
3265 @unfilteredmethod
3266 @unfilteredmethod
3266 def commitctx(self, ctx, error=False, origctx=None):
3267 def commitctx(self, ctx, error=False, origctx=None):
3267 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3268 return commit.commitctx(self, ctx, error=error, origctx=origctx)
3268
3269
3269 @unfilteredmethod
3270 @unfilteredmethod
3270 def destroying(self):
3271 def destroying(self):
3271 """Inform the repository that nodes are about to be destroyed.
3272 """Inform the repository that nodes are about to be destroyed.
3272 Intended for use by strip and rollback, so there's a common
3273 Intended for use by strip and rollback, so there's a common
3273 place for anything that has to be done before destroying history.
3274 place for anything that has to be done before destroying history.
3274
3275
3275 This is mostly useful for saving state that is in memory and waiting
3276 This is mostly useful for saving state that is in memory and waiting
3276 to be flushed when the current lock is released. Because a call to
3277 to be flushed when the current lock is released. Because a call to
3277 destroyed is imminent, the repo will be invalidated causing those
3278 destroyed is imminent, the repo will be invalidated causing those
3278 changes to stay in memory (waiting for the next unlock), or vanish
3279 changes to stay in memory (waiting for the next unlock), or vanish
3279 completely.
3280 completely.
3280 """
3281 """
3281 # When using the same lock to commit and strip, the phasecache is left
3282 # When using the same lock to commit and strip, the phasecache is left
3282 # dirty after committing. Then when we strip, the repo is invalidated,
3283 # dirty after committing. Then when we strip, the repo is invalidated,
3283 # causing those changes to disappear.
3284 # causing those changes to disappear.
3284 if '_phasecache' in vars(self):
3285 if '_phasecache' in vars(self):
3285 self._phasecache.write()
3286 self._phasecache.write()
3286
3287
3287 @unfilteredmethod
3288 @unfilteredmethod
3288 def destroyed(self):
3289 def destroyed(self):
3289 """Inform the repository that nodes have been destroyed.
3290 """Inform the repository that nodes have been destroyed.
3290 Intended for use by strip and rollback, so there's a common
3291 Intended for use by strip and rollback, so there's a common
3291 place for anything that has to be done after destroying history.
3292 place for anything that has to be done after destroying history.
3292 """
3293 """
3293 # When one tries to:
3294 # When one tries to:
3294 # 1) destroy nodes thus calling this method (e.g. strip)
3295 # 1) destroy nodes thus calling this method (e.g. strip)
3295 # 2) use phasecache somewhere (e.g. commit)
3296 # 2) use phasecache somewhere (e.g. commit)
3296 #
3297 #
3297 # then 2) will fail because the phasecache contains nodes that were
3298 # then 2) will fail because the phasecache contains nodes that were
3298 # removed. We can either remove phasecache from the filecache,
3299 # removed. We can either remove phasecache from the filecache,
3299 # causing it to reload next time it is accessed, or simply filter
3300 # causing it to reload next time it is accessed, or simply filter
3300 # the removed nodes now and write the updated cache.
3301 # the removed nodes now and write the updated cache.
3301 self._phasecache.filterunknown(self)
3302 self._phasecache.filterunknown(self)
3302 self._phasecache.write()
3303 self._phasecache.write()
3303
3304
3304 # refresh all repository caches
3305 # refresh all repository caches
3305 self.updatecaches()
3306 self.updatecaches()
3306
3307
3307 # Ensure the persistent tag cache is updated. Doing it now
3308 # Ensure the persistent tag cache is updated. Doing it now
3308 # means that the tag cache only has to worry about destroyed
3309 # means that the tag cache only has to worry about destroyed
3309 # heads immediately after a strip/rollback. That in turn
3310 # heads immediately after a strip/rollback. That in turn
3310 # guarantees that "cachetip == currenttip" (comparing both rev
3311 # guarantees that "cachetip == currenttip" (comparing both rev
3311 # and node) always means no nodes have been added or destroyed.
3312 # and node) always means no nodes have been added or destroyed.
3312
3313
3313 # XXX this is suboptimal when qrefresh'ing: we strip the current
3314 # XXX this is suboptimal when qrefresh'ing: we strip the current
3314 # head, refresh the tag cache, then immediately add a new head.
3315 # head, refresh the tag cache, then immediately add a new head.
3315 # But I think doing it this way is necessary for the "instant
3316 # But I think doing it this way is necessary for the "instant
3316 # tag cache retrieval" case to work.
3317 # tag cache retrieval" case to work.
3317 self.invalidate()
3318 self.invalidate()
3318
3319
3319 def status(
3320 def status(
3320 self,
3321 self,
3321 node1=b'.',
3322 node1=b'.',
3322 node2=None,
3323 node2=None,
3323 match=None,
3324 match=None,
3324 ignored=False,
3325 ignored=False,
3325 clean=False,
3326 clean=False,
3326 unknown=False,
3327 unknown=False,
3327 listsubrepos=False,
3328 listsubrepos=False,
3328 ):
3329 ):
3329 '''a convenience method that calls node1.status(node2)'''
3330 '''a convenience method that calls node1.status(node2)'''
3330 return self[node1].status(
3331 return self[node1].status(
3331 node2, match, ignored, clean, unknown, listsubrepos
3332 node2, match, ignored, clean, unknown, listsubrepos
3332 )
3333 )
3333
3334
3334 def addpostdsstatus(self, ps):
3335 def addpostdsstatus(self, ps):
3335 """Add a callback to run within the wlock, at the point at which status
3336 """Add a callback to run within the wlock, at the point at which status
3336 fixups happen.
3337 fixups happen.
3337
3338
3338 On status completion, callback(wctx, status) will be called with the
3339 On status completion, callback(wctx, status) will be called with the
3339 wlock held, unless the dirstate has changed from underneath or the wlock
3340 wlock held, unless the dirstate has changed from underneath or the wlock
3340 couldn't be grabbed.
3341 couldn't be grabbed.
3341
3342
3342 Callbacks should not capture and use a cached copy of the dirstate --
3343 Callbacks should not capture and use a cached copy of the dirstate --
3343 it might change in the meanwhile. Instead, they should access the
3344 it might change in the meanwhile. Instead, they should access the
3344 dirstate via wctx.repo().dirstate.
3345 dirstate via wctx.repo().dirstate.
3345
3346
3346 This list is emptied out after each status run -- extensions should
3347 This list is emptied out after each status run -- extensions should
3347 make sure it adds to this list each time dirstate.status is called.
3348 make sure it adds to this list each time dirstate.status is called.
3348 Extensions should also make sure they don't call this for statuses
3349 Extensions should also make sure they don't call this for statuses
3349 that don't involve the dirstate.
3350 that don't involve the dirstate.
3350 """
3351 """
3351
3352
3352 # The list is located here for uniqueness reasons -- it is actually
3353 # The list is located here for uniqueness reasons -- it is actually
3353 # managed by the workingctx, but that isn't unique per-repo.
3354 # managed by the workingctx, but that isn't unique per-repo.
3354 self._postdsstatus.append(ps)
3355 self._postdsstatus.append(ps)
3355
3356
3356 def postdsstatus(self):
3357 def postdsstatus(self):
3357 """Used by workingctx to get the list of post-dirstate-status hooks."""
3358 """Used by workingctx to get the list of post-dirstate-status hooks."""
3358 return self._postdsstatus
3359 return self._postdsstatus
3359
3360
3360 def clearpostdsstatus(self):
3361 def clearpostdsstatus(self):
3361 """Used by workingctx to clear post-dirstate-status hooks."""
3362 """Used by workingctx to clear post-dirstate-status hooks."""
3362 del self._postdsstatus[:]
3363 del self._postdsstatus[:]
3363
3364
3364 def heads(self, start=None):
3365 def heads(self, start=None):
3365 if start is None:
3366 if start is None:
3366 cl = self.changelog
3367 cl = self.changelog
3367 headrevs = reversed(cl.headrevs())
3368 headrevs = reversed(cl.headrevs())
3368 return [cl.node(rev) for rev in headrevs]
3369 return [cl.node(rev) for rev in headrevs]
3369
3370
3370 heads = self.changelog.heads(start)
3371 heads = self.changelog.heads(start)
3371 # sort the output in rev descending order
3372 # sort the output in rev descending order
3372 return sorted(heads, key=self.changelog.rev, reverse=True)
3373 return sorted(heads, key=self.changelog.rev, reverse=True)
3373
3374
3374 def branchheads(self, branch=None, start=None, closed=False):
3375 def branchheads(self, branch=None, start=None, closed=False):
3375 """return a (possibly filtered) list of heads for the given branch
3376 """return a (possibly filtered) list of heads for the given branch
3376
3377
3377 Heads are returned in topological order, from newest to oldest.
3378 Heads are returned in topological order, from newest to oldest.
3378 If branch is None, use the dirstate branch.
3379 If branch is None, use the dirstate branch.
3379 If start is not None, return only heads reachable from start.
3380 If start is not None, return only heads reachable from start.
3380 If closed is True, return heads that are marked as closed as well.
3381 If closed is True, return heads that are marked as closed as well.
3381 """
3382 """
3382 if branch is None:
3383 if branch is None:
3383 branch = self[None].branch()
3384 branch = self[None].branch()
3384 branches = self.branchmap()
3385 branches = self.branchmap()
3385 if not branches.hasbranch(branch):
3386 if not branches.hasbranch(branch):
3386 return []
3387 return []
3387 # the cache returns heads ordered lowest to highest
3388 # the cache returns heads ordered lowest to highest
3388 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3389 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3389 if start is not None:
3390 if start is not None:
3390 # filter out the heads that cannot be reached from startrev
3391 # filter out the heads that cannot be reached from startrev
3391 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3392 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3392 bheads = [h for h in bheads if h in fbheads]
3393 bheads = [h for h in bheads if h in fbheads]
3393 return bheads
3394 return bheads
3394
3395
3395 def branches(self, nodes):
3396 def branches(self, nodes):
3396 if not nodes:
3397 if not nodes:
3397 nodes = [self.changelog.tip()]
3398 nodes = [self.changelog.tip()]
3398 b = []
3399 b = []
3399 for n in nodes:
3400 for n in nodes:
3400 t = n
3401 t = n
3401 while True:
3402 while True:
3402 p = self.changelog.parents(n)
3403 p = self.changelog.parents(n)
3403 if p[1] != self.nullid or p[0] == self.nullid:
3404 if p[1] != self.nullid or p[0] == self.nullid:
3404 b.append((t, n, p[0], p[1]))
3405 b.append((t, n, p[0], p[1]))
3405 break
3406 break
3406 n = p[0]
3407 n = p[0]
3407 return b
3408 return b
3408
3409
3409 def between(self, pairs):
3410 def between(self, pairs):
3410 r = []
3411 r = []
3411
3412
3412 for top, bottom in pairs:
3413 for top, bottom in pairs:
3413 n, l, i = top, [], 0
3414 n, l, i = top, [], 0
3414 f = 1
3415 f = 1
3415
3416
3416 while n != bottom and n != self.nullid:
3417 while n != bottom and n != self.nullid:
3417 p = self.changelog.parents(n)[0]
3418 p = self.changelog.parents(n)[0]
3418 if i == f:
3419 if i == f:
3419 l.append(n)
3420 l.append(n)
3420 f = f * 2
3421 f = f * 2
3421 n = p
3422 n = p
3422 i += 1
3423 i += 1
3423
3424
3424 r.append(l)
3425 r.append(l)
3425
3426
3426 return r
3427 return r
3427
3428
3428 def checkpush(self, pushop):
3429 def checkpush(self, pushop):
3429 """Extensions can override this function if additional checks have
3430 """Extensions can override this function if additional checks have
3430 to be performed before pushing, or call it if they override push
3431 to be performed before pushing, or call it if they override push
3431 command.
3432 command.
3432 """
3433 """
3433
3434
3434 @unfilteredpropertycache
3435 @unfilteredpropertycache
3435 def prepushoutgoinghooks(self):
3436 def prepushoutgoinghooks(self):
3436 """Return util.hooks consists of a pushop with repo, remote, outgoing
3437 """Return util.hooks consists of a pushop with repo, remote, outgoing
3437 methods, which are called before pushing changesets.
3438 methods, which are called before pushing changesets.
3438 """
3439 """
3439 return util.hooks()
3440 return util.hooks()
3440
3441
3441 def pushkey(self, namespace, key, old, new):
3442 def pushkey(self, namespace, key, old, new):
3442 try:
3443 try:
3443 tr = self.currenttransaction()
3444 tr = self.currenttransaction()
3444 hookargs = {}
3445 hookargs = {}
3445 if tr is not None:
3446 if tr is not None:
3446 hookargs.update(tr.hookargs)
3447 hookargs.update(tr.hookargs)
3447 hookargs = pycompat.strkwargs(hookargs)
3448 hookargs = pycompat.strkwargs(hookargs)
3448 hookargs['namespace'] = namespace
3449 hookargs['namespace'] = namespace
3449 hookargs['key'] = key
3450 hookargs['key'] = key
3450 hookargs['old'] = old
3451 hookargs['old'] = old
3451 hookargs['new'] = new
3452 hookargs['new'] = new
3452 self.hook(b'prepushkey', throw=True, **hookargs)
3453 self.hook(b'prepushkey', throw=True, **hookargs)
3453 except error.HookAbort as exc:
3454 except error.HookAbort as exc:
3454 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3455 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3455 if exc.hint:
3456 if exc.hint:
3456 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3457 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3457 return False
3458 return False
3458 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3459 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3459 ret = pushkey.push(self, namespace, key, old, new)
3460 ret = pushkey.push(self, namespace, key, old, new)
3460
3461
3461 def runhook(unused_success):
3462 def runhook(unused_success):
3462 self.hook(
3463 self.hook(
3463 b'pushkey',
3464 b'pushkey',
3464 namespace=namespace,
3465 namespace=namespace,
3465 key=key,
3466 key=key,
3466 old=old,
3467 old=old,
3467 new=new,
3468 new=new,
3468 ret=ret,
3469 ret=ret,
3469 )
3470 )
3470
3471
3471 self._afterlock(runhook)
3472 self._afterlock(runhook)
3472 return ret
3473 return ret
3473
3474
3474 def listkeys(self, namespace):
3475 def listkeys(self, namespace):
3475 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3476 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3476 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3477 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3477 values = pushkey.list(self, namespace)
3478 values = pushkey.list(self, namespace)
3478 self.hook(b'listkeys', namespace=namespace, values=values)
3479 self.hook(b'listkeys', namespace=namespace, values=values)
3479 return values
3480 return values
3480
3481
3481 def debugwireargs(self, one, two, three=None, four=None, five=None):
3482 def debugwireargs(self, one, two, three=None, four=None, five=None):
3482 '''used to test argument passing over the wire'''
3483 '''used to test argument passing over the wire'''
3483 return b"%s %s %s %s %s" % (
3484 return b"%s %s %s %s %s" % (
3484 one,
3485 one,
3485 two,
3486 two,
3486 pycompat.bytestr(three),
3487 pycompat.bytestr(three),
3487 pycompat.bytestr(four),
3488 pycompat.bytestr(four),
3488 pycompat.bytestr(five),
3489 pycompat.bytestr(five),
3489 )
3490 )
3490
3491
3491 def savecommitmessage(self, text):
3492 def savecommitmessage(self, text):
3492 fp = self.vfs(b'last-message.txt', b'wb')
3493 fp = self.vfs(b'last-message.txt', b'wb')
3493 try:
3494 try:
3494 fp.write(text)
3495 fp.write(text)
3495 finally:
3496 finally:
3496 fp.close()
3497 fp.close()
3497 return self.pathto(fp.name[len(self.root) + 1 :])
3498 return self.pathto(fp.name[len(self.root) + 1 :])
3498
3499
3499 def register_wanted_sidedata(self, category):
3500 def register_wanted_sidedata(self, category):
3500 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3501 if repository.REPO_FEATURE_SIDE_DATA not in self.features:
3501 # Only revlogv2 repos can want sidedata.
3502 # Only revlogv2 repos can want sidedata.
3502 return
3503 return
3503 self._wanted_sidedata.add(pycompat.bytestr(category))
3504 self._wanted_sidedata.add(pycompat.bytestr(category))
3504
3505
3505 def register_sidedata_computer(
3506 def register_sidedata_computer(
3506 self, kind, category, keys, computer, flags, replace=False
3507 self, kind, category, keys, computer, flags, replace=False
3507 ):
3508 ):
3508 if kind not in revlogconst.ALL_KINDS:
3509 if kind not in revlogconst.ALL_KINDS:
3509 msg = _(b"unexpected revlog kind '%s'.")
3510 msg = _(b"unexpected revlog kind '%s'.")
3510 raise error.ProgrammingError(msg % kind)
3511 raise error.ProgrammingError(msg % kind)
3511 category = pycompat.bytestr(category)
3512 category = pycompat.bytestr(category)
3512 already_registered = category in self._sidedata_computers.get(kind, [])
3513 already_registered = category in self._sidedata_computers.get(kind, [])
3513 if already_registered and not replace:
3514 if already_registered and not replace:
3514 msg = _(
3515 msg = _(
3515 b"cannot register a sidedata computer twice for category '%s'."
3516 b"cannot register a sidedata computer twice for category '%s'."
3516 )
3517 )
3517 raise error.ProgrammingError(msg % category)
3518 raise error.ProgrammingError(msg % category)
3518 if replace and not already_registered:
3519 if replace and not already_registered:
3519 msg = _(
3520 msg = _(
3520 b"cannot replace a sidedata computer that isn't registered "
3521 b"cannot replace a sidedata computer that isn't registered "
3521 b"for category '%s'."
3522 b"for category '%s'."
3522 )
3523 )
3523 raise error.ProgrammingError(msg % category)
3524 raise error.ProgrammingError(msg % category)
3524 self._sidedata_computers.setdefault(kind, {})
3525 self._sidedata_computers.setdefault(kind, {})
3525 self._sidedata_computers[kind][category] = (keys, computer, flags)
3526 self._sidedata_computers[kind][category] = (keys, computer, flags)
3526
3527
3527
3528
3528 # used to avoid circular references so destructors work
3529 # used to avoid circular references so destructors work
3529 def aftertrans(files):
3530 def aftertrans(files):
3530 renamefiles = [tuple(t) for t in files]
3531 renamefiles = [tuple(t) for t in files]
3531
3532
3532 def a():
3533 def a():
3533 for vfs, src, dest in renamefiles:
3534 for vfs, src, dest in renamefiles:
3534 # if src and dest refer to a same file, vfs.rename is a no-op,
3535 # if src and dest refer to a same file, vfs.rename is a no-op,
3535 # leaving both src and dest on disk. delete dest to make sure
3536 # leaving both src and dest on disk. delete dest to make sure
3536 # the rename couldn't be such a no-op.
3537 # the rename couldn't be such a no-op.
3537 vfs.tryunlink(dest)
3538 vfs.tryunlink(dest)
3538 try:
3539 try:
3539 vfs.rename(src, dest)
3540 vfs.rename(src, dest)
3540 except FileNotFoundError: # journal file does not yet exist
3541 except FileNotFoundError: # journal file does not yet exist
3541 pass
3542 pass
3542
3543
3543 return a
3544 return a
3544
3545
3545
3546
3546 def undoname(fn: bytes) -> bytes:
3547 def undoname(fn: bytes) -> bytes:
3547 base, name = os.path.split(fn)
3548 base, name = os.path.split(fn)
3548 assert name.startswith(b'journal')
3549 assert name.startswith(b'journal')
3549 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3550 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3550
3551
3551
3552
3552 def instance(ui, path: bytes, create, intents=None, createopts=None):
3553 def instance(ui, path: bytes, create, intents=None, createopts=None):
3553 # prevent cyclic import localrepo -> upgrade -> localrepo
3554 # prevent cyclic import localrepo -> upgrade -> localrepo
3554 from . import upgrade
3555 from . import upgrade
3555
3556
3556 localpath = urlutil.urllocalpath(path)
3557 localpath = urlutil.urllocalpath(path)
3557 if create:
3558 if create:
3558 createrepository(ui, localpath, createopts=createopts)
3559 createrepository(ui, localpath, createopts=createopts)
3559
3560
3560 def repo_maker():
3561 def repo_maker():
3561 return makelocalrepository(ui, localpath, intents=intents)
3562 return makelocalrepository(ui, localpath, intents=intents)
3562
3563
3563 repo = repo_maker()
3564 repo = repo_maker()
3564 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3565 repo = upgrade.may_auto_upgrade(repo, repo_maker)
3565 return repo
3566 return repo
3566
3567
3567
3568
3568 def islocal(path: bytes) -> bool:
3569 def islocal(path: bytes) -> bool:
3569 return True
3570 return True
3570
3571
3571
3572
3572 def defaultcreateopts(ui, createopts=None):
3573 def defaultcreateopts(ui, createopts=None):
3573 """Populate the default creation options for a repository.
3574 """Populate the default creation options for a repository.
3574
3575
3575 A dictionary of explicitly requested creation options can be passed
3576 A dictionary of explicitly requested creation options can be passed
3576 in. Missing keys will be populated.
3577 in. Missing keys will be populated.
3577 """
3578 """
3578 createopts = dict(createopts or {})
3579 createopts = dict(createopts or {})
3579
3580
3580 if b'backend' not in createopts:
3581 if b'backend' not in createopts:
3581 # experimental config: storage.new-repo-backend
3582 # experimental config: storage.new-repo-backend
3582 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3583 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3583
3584
3584 return createopts
3585 return createopts
3585
3586
3586
3587
3587 def clone_requirements(ui, createopts, srcrepo):
3588 def clone_requirements(ui, createopts, srcrepo):
3588 """clone the requirements of a local repo for a local clone
3589 """clone the requirements of a local repo for a local clone
3589
3590
3590 The store requirements are unchanged while the working copy requirements
3591 The store requirements are unchanged while the working copy requirements
3591 depends on the configuration
3592 depends on the configuration
3592 """
3593 """
3593 target_requirements = set()
3594 target_requirements = set()
3594 if not srcrepo.requirements:
3595 if not srcrepo.requirements:
3595 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3596 # this is a legacy revlog "v0" repository, we cannot do anything fancy
3596 # with it.
3597 # with it.
3597 return target_requirements
3598 return target_requirements
3598 createopts = defaultcreateopts(ui, createopts=createopts)
3599 createopts = defaultcreateopts(ui, createopts=createopts)
3599 for r in newreporequirements(ui, createopts):
3600 for r in newreporequirements(ui, createopts):
3600 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3601 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
3601 target_requirements.add(r)
3602 target_requirements.add(r)
3602
3603
3603 for r in srcrepo.requirements:
3604 for r in srcrepo.requirements:
3604 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3605 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS:
3605 target_requirements.add(r)
3606 target_requirements.add(r)
3606 return target_requirements
3607 return target_requirements
3607
3608
3608
3609
3609 def newreporequirements(ui, createopts):
3610 def newreporequirements(ui, createopts):
3610 """Determine the set of requirements for a new local repository.
3611 """Determine the set of requirements for a new local repository.
3611
3612
3612 Extensions can wrap this function to specify custom requirements for
3613 Extensions can wrap this function to specify custom requirements for
3613 new repositories.
3614 new repositories.
3614 """
3615 """
3615
3616
3616 if b'backend' not in createopts:
3617 if b'backend' not in createopts:
3617 raise error.ProgrammingError(
3618 raise error.ProgrammingError(
3618 b'backend key not present in createopts; '
3619 b'backend key not present in createopts; '
3619 b'was defaultcreateopts() called?'
3620 b'was defaultcreateopts() called?'
3620 )
3621 )
3621
3622
3622 if createopts[b'backend'] != b'revlogv1':
3623 if createopts[b'backend'] != b'revlogv1':
3623 raise error.Abort(
3624 raise error.Abort(
3624 _(
3625 _(
3625 b'unable to determine repository requirements for '
3626 b'unable to determine repository requirements for '
3626 b'storage backend: %s'
3627 b'storage backend: %s'
3627 )
3628 )
3628 % createopts[b'backend']
3629 % createopts[b'backend']
3629 )
3630 )
3630
3631
3631 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3632 requirements = {requirementsmod.REVLOGV1_REQUIREMENT}
3632 if ui.configbool(b'format', b'usestore'):
3633 if ui.configbool(b'format', b'usestore'):
3633 requirements.add(requirementsmod.STORE_REQUIREMENT)
3634 requirements.add(requirementsmod.STORE_REQUIREMENT)
3634 if ui.configbool(b'format', b'usefncache'):
3635 if ui.configbool(b'format', b'usefncache'):
3635 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3636 requirements.add(requirementsmod.FNCACHE_REQUIREMENT)
3636 if ui.configbool(b'format', b'dotencode'):
3637 if ui.configbool(b'format', b'dotencode'):
3637 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3638 requirements.add(requirementsmod.DOTENCODE_REQUIREMENT)
3638
3639
3639 compengines = ui.configlist(b'format', b'revlog-compression')
3640 compengines = ui.configlist(b'format', b'revlog-compression')
3640 for compengine in compengines:
3641 for compengine in compengines:
3641 if compengine in util.compengines:
3642 if compengine in util.compengines:
3642 engine = util.compengines[compengine]
3643 engine = util.compengines[compengine]
3643 if engine.available() and engine.revlogheader():
3644 if engine.available() and engine.revlogheader():
3644 break
3645 break
3645 else:
3646 else:
3646 raise error.Abort(
3647 raise error.Abort(
3647 _(
3648 _(
3648 b'compression engines %s defined by '
3649 b'compression engines %s defined by '
3649 b'format.revlog-compression not available'
3650 b'format.revlog-compression not available'
3650 )
3651 )
3651 % b', '.join(b'"%s"' % e for e in compengines),
3652 % b', '.join(b'"%s"' % e for e in compengines),
3652 hint=_(
3653 hint=_(
3653 b'run "hg debuginstall" to list available '
3654 b'run "hg debuginstall" to list available '
3654 b'compression engines'
3655 b'compression engines'
3655 ),
3656 ),
3656 )
3657 )
3657
3658
3658 # zlib is the historical default and doesn't need an explicit requirement.
3659 # zlib is the historical default and doesn't need an explicit requirement.
3659 if compengine == b'zstd':
3660 if compengine == b'zstd':
3660 requirements.add(b'revlog-compression-zstd')
3661 requirements.add(b'revlog-compression-zstd')
3661 elif compengine != b'zlib':
3662 elif compengine != b'zlib':
3662 requirements.add(b'exp-compression-%s' % compengine)
3663 requirements.add(b'exp-compression-%s' % compengine)
3663
3664
3664 if scmutil.gdinitconfig(ui):
3665 if scmutil.gdinitconfig(ui):
3665 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3666 requirements.add(requirementsmod.GENERALDELTA_REQUIREMENT)
3666 if ui.configbool(b'format', b'sparse-revlog'):
3667 if ui.configbool(b'format', b'sparse-revlog'):
3667 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3668 requirements.add(requirementsmod.SPARSEREVLOG_REQUIREMENT)
3668
3669
3669 # experimental config: format.use-dirstate-v2
3670 # experimental config: format.use-dirstate-v2
3670 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3671 # Keep this logic in sync with `has_dirstate_v2()` in `tests/hghave.py`
3671 if ui.configbool(b'format', b'use-dirstate-v2'):
3672 if ui.configbool(b'format', b'use-dirstate-v2'):
3672 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3673 requirements.add(requirementsmod.DIRSTATE_V2_REQUIREMENT)
3673
3674
3674 # experimental config: format.exp-use-copies-side-data-changeset
3675 # experimental config: format.exp-use-copies-side-data-changeset
3675 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3676 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3676 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3677 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3677 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3678 requirements.add(requirementsmod.COPIESSDC_REQUIREMENT)
3678 if ui.configbool(b'experimental', b'treemanifest'):
3679 if ui.configbool(b'experimental', b'treemanifest'):
3679 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3680 requirements.add(requirementsmod.TREEMANIFEST_REQUIREMENT)
3680
3681
3681 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3682 changelogv2 = ui.config(b'format', b'exp-use-changelog-v2')
3682 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3683 if changelogv2 == b'enable-unstable-format-and-corrupt-my-data':
3683 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3684 requirements.add(requirementsmod.CHANGELOGV2_REQUIREMENT)
3684
3685
3685 revlogv2 = ui.config(b'experimental', b'revlogv2')
3686 revlogv2 = ui.config(b'experimental', b'revlogv2')
3686 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3687 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3687 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3688 requirements.discard(requirementsmod.REVLOGV1_REQUIREMENT)
3688 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3689 requirements.add(requirementsmod.REVLOGV2_REQUIREMENT)
3689 # experimental config: format.internal-phase
3690 # experimental config: format.internal-phase
3690 if ui.configbool(b'format', b'use-internal-phase'):
3691 if ui.configbool(b'format', b'use-internal-phase'):
3691 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3692 requirements.add(requirementsmod.INTERNAL_PHASE_REQUIREMENT)
3692
3693
3693 # experimental config: format.exp-archived-phase
3694 # experimental config: format.exp-archived-phase
3694 if ui.configbool(b'format', b'exp-archived-phase'):
3695 if ui.configbool(b'format', b'exp-archived-phase'):
3695 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3696 requirements.add(requirementsmod.ARCHIVED_PHASE_REQUIREMENT)
3696
3697
3697 if createopts.get(b'narrowfiles'):
3698 if createopts.get(b'narrowfiles'):
3698 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3699 requirements.add(requirementsmod.NARROW_REQUIREMENT)
3699
3700
3700 if createopts.get(b'lfs'):
3701 if createopts.get(b'lfs'):
3701 requirements.add(b'lfs')
3702 requirements.add(b'lfs')
3702
3703
3703 if ui.configbool(b'format', b'bookmarks-in-store'):
3704 if ui.configbool(b'format', b'bookmarks-in-store'):
3704 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3705 requirements.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3705
3706
3706 if ui.configbool(b'format', b'use-persistent-nodemap'):
3707 if ui.configbool(b'format', b'use-persistent-nodemap'):
3707 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3708 requirements.add(requirementsmod.NODEMAP_REQUIREMENT)
3708
3709
3709 # if share-safe is enabled, let's create the new repository with the new
3710 # if share-safe is enabled, let's create the new repository with the new
3710 # requirement
3711 # requirement
3711 if ui.configbool(b'format', b'use-share-safe'):
3712 if ui.configbool(b'format', b'use-share-safe'):
3712 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3713 requirements.add(requirementsmod.SHARESAFE_REQUIREMENT)
3713
3714
3714 # if we are creating a share-repoΒΉ we have to handle requirement
3715 # if we are creating a share-repoΒΉ we have to handle requirement
3715 # differently.
3716 # differently.
3716 #
3717 #
3717 # [1] (i.e. reusing the store from another repository, just having a
3718 # [1] (i.e. reusing the store from another repository, just having a
3718 # working copy)
3719 # working copy)
3719 if b'sharedrepo' in createopts:
3720 if b'sharedrepo' in createopts:
3720 source_requirements = set(createopts[b'sharedrepo'].requirements)
3721 source_requirements = set(createopts[b'sharedrepo'].requirements)
3721
3722
3722 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3723 if requirementsmod.SHARESAFE_REQUIREMENT not in source_requirements:
3723 # share to an old school repository, we have to copy the
3724 # share to an old school repository, we have to copy the
3724 # requirements and hope for the best.
3725 # requirements and hope for the best.
3725 requirements = source_requirements
3726 requirements = source_requirements
3726 else:
3727 else:
3727 # We have control on the working copy only, so "copy" the non
3728 # We have control on the working copy only, so "copy" the non
3728 # working copy part over, ignoring previous logic.
3729 # working copy part over, ignoring previous logic.
3729 to_drop = set()
3730 to_drop = set()
3730 for req in requirements:
3731 for req in requirements:
3731 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3732 if req in requirementsmod.WORKING_DIR_REQUIREMENTS:
3732 continue
3733 continue
3733 if req in source_requirements:
3734 if req in source_requirements:
3734 continue
3735 continue
3735 to_drop.add(req)
3736 to_drop.add(req)
3736 requirements -= to_drop
3737 requirements -= to_drop
3737 requirements |= source_requirements
3738 requirements |= source_requirements
3738
3739
3739 if createopts.get(b'sharedrelative'):
3740 if createopts.get(b'sharedrelative'):
3740 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3741 requirements.add(requirementsmod.RELATIVE_SHARED_REQUIREMENT)
3741 else:
3742 else:
3742 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3743 requirements.add(requirementsmod.SHARED_REQUIREMENT)
3743
3744
3744 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3745 if ui.configbool(b'format', b'use-dirstate-tracked-hint'):
3745 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3746 version = ui.configint(b'format', b'use-dirstate-tracked-hint.version')
3746 msg = _(b"ignoring unknown tracked key version: %d\n")
3747 msg = _(b"ignoring unknown tracked key version: %d\n")
3747 hint = _(
3748 hint = _(
3748 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3749 b"see `hg help config.format.use-dirstate-tracked-hint-version"
3749 )
3750 )
3750 if version != 1:
3751 if version != 1:
3751 ui.warn(msg % version, hint=hint)
3752 ui.warn(msg % version, hint=hint)
3752 else:
3753 else:
3753 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3754 requirements.add(requirementsmod.DIRSTATE_TRACKED_HINT_V1)
3754
3755
3755 return requirements
3756 return requirements
3756
3757
3757
3758
3758 def checkrequirementscompat(ui, requirements):
3759 def checkrequirementscompat(ui, requirements):
3759 """Checks compatibility of repository requirements enabled and disabled.
3760 """Checks compatibility of repository requirements enabled and disabled.
3760
3761
3761 Returns a set of requirements which needs to be dropped because dependend
3762 Returns a set of requirements which needs to be dropped because dependend
3762 requirements are not enabled. Also warns users about it"""
3763 requirements are not enabled. Also warns users about it"""
3763
3764
3764 dropped = set()
3765 dropped = set()
3765
3766
3766 if requirementsmod.STORE_REQUIREMENT not in requirements:
3767 if requirementsmod.STORE_REQUIREMENT not in requirements:
3767 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3768 if requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3768 ui.warn(
3769 ui.warn(
3769 _(
3770 _(
3770 b'ignoring enabled \'format.bookmarks-in-store\' config '
3771 b'ignoring enabled \'format.bookmarks-in-store\' config '
3771 b'beacuse it is incompatible with disabled '
3772 b'beacuse it is incompatible with disabled '
3772 b'\'format.usestore\' config\n'
3773 b'\'format.usestore\' config\n'
3773 )
3774 )
3774 )
3775 )
3775 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3776 dropped.add(requirementsmod.BOOKMARKS_IN_STORE_REQUIREMENT)
3776
3777
3777 if (
3778 if (
3778 requirementsmod.SHARED_REQUIREMENT in requirements
3779 requirementsmod.SHARED_REQUIREMENT in requirements
3779 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3780 or requirementsmod.RELATIVE_SHARED_REQUIREMENT in requirements
3780 ):
3781 ):
3781 raise error.Abort(
3782 raise error.Abort(
3782 _(
3783 _(
3783 b"cannot create shared repository as source was created"
3784 b"cannot create shared repository as source was created"
3784 b" with 'format.usestore' config disabled"
3785 b" with 'format.usestore' config disabled"
3785 )
3786 )
3786 )
3787 )
3787
3788
3788 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3789 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
3789 if ui.hasconfig(b'format', b'use-share-safe'):
3790 if ui.hasconfig(b'format', b'use-share-safe'):
3790 msg = _(
3791 msg = _(
3791 b"ignoring enabled 'format.use-share-safe' config because "
3792 b"ignoring enabled 'format.use-share-safe' config because "
3792 b"it is incompatible with disabled 'format.usestore'"
3793 b"it is incompatible with disabled 'format.usestore'"
3793 b" config\n"
3794 b" config\n"
3794 )
3795 )
3795 ui.warn(msg)
3796 ui.warn(msg)
3796 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3797 dropped.add(requirementsmod.SHARESAFE_REQUIREMENT)
3797
3798
3798 return dropped
3799 return dropped
3799
3800
3800
3801
3801 def filterknowncreateopts(ui, createopts):
3802 def filterknowncreateopts(ui, createopts):
3802 """Filters a dict of repo creation options against options that are known.
3803 """Filters a dict of repo creation options against options that are known.
3803
3804
3804 Receives a dict of repo creation options and returns a dict of those
3805 Receives a dict of repo creation options and returns a dict of those
3805 options that we don't know how to handle.
3806 options that we don't know how to handle.
3806
3807
3807 This function is called as part of repository creation. If the
3808 This function is called as part of repository creation. If the
3808 returned dict contains any items, repository creation will not
3809 returned dict contains any items, repository creation will not
3809 be allowed, as it means there was a request to create a repository
3810 be allowed, as it means there was a request to create a repository
3810 with options not recognized by loaded code.
3811 with options not recognized by loaded code.
3811
3812
3812 Extensions can wrap this function to filter out creation options
3813 Extensions can wrap this function to filter out creation options
3813 they know how to handle.
3814 they know how to handle.
3814 """
3815 """
3815 known = {
3816 known = {
3816 b'backend',
3817 b'backend',
3817 b'lfs',
3818 b'lfs',
3818 b'narrowfiles',
3819 b'narrowfiles',
3819 b'sharedrepo',
3820 b'sharedrepo',
3820 b'sharedrelative',
3821 b'sharedrelative',
3821 b'shareditems',
3822 b'shareditems',
3822 b'shallowfilestore',
3823 b'shallowfilestore',
3823 }
3824 }
3824
3825
3825 return {k: v for k, v in createopts.items() if k not in known}
3826 return {k: v for k, v in createopts.items() if k not in known}
3826
3827
3827
3828
3828 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3829 def createrepository(ui, path: bytes, createopts=None, requirements=None):
3829 """Create a new repository in a vfs.
3830 """Create a new repository in a vfs.
3830
3831
3831 ``path`` path to the new repo's working directory.
3832 ``path`` path to the new repo's working directory.
3832 ``createopts`` options for the new repository.
3833 ``createopts`` options for the new repository.
3833 ``requirement`` predefined set of requirements.
3834 ``requirement`` predefined set of requirements.
3834 (incompatible with ``createopts``)
3835 (incompatible with ``createopts``)
3835
3836
3836 The following keys for ``createopts`` are recognized:
3837 The following keys for ``createopts`` are recognized:
3837
3838
3838 backend
3839 backend
3839 The storage backend to use.
3840 The storage backend to use.
3840 lfs
3841 lfs
3841 Repository will be created with ``lfs`` requirement. The lfs extension
3842 Repository will be created with ``lfs`` requirement. The lfs extension
3842 will automatically be loaded when the repository is accessed.
3843 will automatically be loaded when the repository is accessed.
3843 narrowfiles
3844 narrowfiles
3844 Set up repository to support narrow file storage.
3845 Set up repository to support narrow file storage.
3845 sharedrepo
3846 sharedrepo
3846 Repository object from which storage should be shared.
3847 Repository object from which storage should be shared.
3847 sharedrelative
3848 sharedrelative
3848 Boolean indicating if the path to the shared repo should be
3849 Boolean indicating if the path to the shared repo should be
3849 stored as relative. By default, the pointer to the "parent" repo
3850 stored as relative. By default, the pointer to the "parent" repo
3850 is stored as an absolute path.
3851 is stored as an absolute path.
3851 shareditems
3852 shareditems
3852 Set of items to share to the new repository (in addition to storage).
3853 Set of items to share to the new repository (in addition to storage).
3853 shallowfilestore
3854 shallowfilestore
3854 Indicates that storage for files should be shallow (not all ancestor
3855 Indicates that storage for files should be shallow (not all ancestor
3855 revisions are known).
3856 revisions are known).
3856 """
3857 """
3857
3858
3858 if requirements is not None:
3859 if requirements is not None:
3859 if createopts is not None:
3860 if createopts is not None:
3860 msg = b'cannot specify both createopts and requirements'
3861 msg = b'cannot specify both createopts and requirements'
3861 raise error.ProgrammingError(msg)
3862 raise error.ProgrammingError(msg)
3862 createopts = {}
3863 createopts = {}
3863 else:
3864 else:
3864 createopts = defaultcreateopts(ui, createopts=createopts)
3865 createopts = defaultcreateopts(ui, createopts=createopts)
3865
3866
3866 unknownopts = filterknowncreateopts(ui, createopts)
3867 unknownopts = filterknowncreateopts(ui, createopts)
3867
3868
3868 if not isinstance(unknownopts, dict):
3869 if not isinstance(unknownopts, dict):
3869 raise error.ProgrammingError(
3870 raise error.ProgrammingError(
3870 b'filterknowncreateopts() did not return a dict'
3871 b'filterknowncreateopts() did not return a dict'
3871 )
3872 )
3872
3873
3873 if unknownopts:
3874 if unknownopts:
3874 raise error.Abort(
3875 raise error.Abort(
3875 _(
3876 _(
3876 b'unable to create repository because of unknown '
3877 b'unable to create repository because of unknown '
3877 b'creation option: %s'
3878 b'creation option: %s'
3878 )
3879 )
3879 % b', '.join(sorted(unknownopts)),
3880 % b', '.join(sorted(unknownopts)),
3880 hint=_(b'is a required extension not loaded?'),
3881 hint=_(b'is a required extension not loaded?'),
3881 )
3882 )
3882
3883
3883 requirements = newreporequirements(ui, createopts=createopts)
3884 requirements = newreporequirements(ui, createopts=createopts)
3884 requirements -= checkrequirementscompat(ui, requirements)
3885 requirements -= checkrequirementscompat(ui, requirements)
3885
3886
3886 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3887 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3887
3888
3888 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3889 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3889 if hgvfs.exists():
3890 if hgvfs.exists():
3890 raise error.RepoError(_(b'repository %s already exists') % path)
3891 raise error.RepoError(_(b'repository %s already exists') % path)
3891
3892
3892 if b'sharedrepo' in createopts:
3893 if b'sharedrepo' in createopts:
3893 sharedpath = createopts[b'sharedrepo'].sharedpath
3894 sharedpath = createopts[b'sharedrepo'].sharedpath
3894
3895
3895 if createopts.get(b'sharedrelative'):
3896 if createopts.get(b'sharedrelative'):
3896 try:
3897 try:
3897 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3898 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3898 sharedpath = util.pconvert(sharedpath)
3899 sharedpath = util.pconvert(sharedpath)
3899 except (IOError, ValueError) as e:
3900 except (IOError, ValueError) as e:
3900 # ValueError is raised on Windows if the drive letters differ
3901 # ValueError is raised on Windows if the drive letters differ
3901 # on each path.
3902 # on each path.
3902 raise error.Abort(
3903 raise error.Abort(
3903 _(b'cannot calculate relative path'),
3904 _(b'cannot calculate relative path'),
3904 hint=stringutil.forcebytestr(e),
3905 hint=stringutil.forcebytestr(e),
3905 )
3906 )
3906
3907
3907 if not wdirvfs.exists():
3908 if not wdirvfs.exists():
3908 wdirvfs.makedirs()
3909 wdirvfs.makedirs()
3909
3910
3910 hgvfs.makedir(notindexed=True)
3911 hgvfs.makedir(notindexed=True)
3911 if b'sharedrepo' not in createopts:
3912 if b'sharedrepo' not in createopts:
3912 hgvfs.mkdir(b'cache')
3913 hgvfs.mkdir(b'cache')
3913 hgvfs.mkdir(b'wcache')
3914 hgvfs.mkdir(b'wcache')
3914
3915
3915 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3916 has_store = requirementsmod.STORE_REQUIREMENT in requirements
3916 if has_store and b'sharedrepo' not in createopts:
3917 if has_store and b'sharedrepo' not in createopts:
3917 hgvfs.mkdir(b'store')
3918 hgvfs.mkdir(b'store')
3918
3919
3919 # We create an invalid changelog outside the store so very old
3920 # We create an invalid changelog outside the store so very old
3920 # Mercurial versions (which didn't know about the requirements
3921 # Mercurial versions (which didn't know about the requirements
3921 # file) encounter an error on reading the changelog. This
3922 # file) encounter an error on reading the changelog. This
3922 # effectively locks out old clients and prevents them from
3923 # effectively locks out old clients and prevents them from
3923 # mucking with a repo in an unknown format.
3924 # mucking with a repo in an unknown format.
3924 #
3925 #
3925 # The revlog header has version 65535, which won't be recognized by
3926 # The revlog header has version 65535, which won't be recognized by
3926 # such old clients.
3927 # such old clients.
3927 hgvfs.append(
3928 hgvfs.append(
3928 b'00changelog.i',
3929 b'00changelog.i',
3929 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3930 b'\0\0\xFF\xFF dummy changelog to prevent using the old repo '
3930 b'layout',
3931 b'layout',
3931 )
3932 )
3932
3933
3933 # Filter the requirements into working copy and store ones
3934 # Filter the requirements into working copy and store ones
3934 wcreq, storereq = scmutil.filterrequirements(requirements)
3935 wcreq, storereq = scmutil.filterrequirements(requirements)
3935 # write working copy ones
3936 # write working copy ones
3936 scmutil.writerequires(hgvfs, wcreq)
3937 scmutil.writerequires(hgvfs, wcreq)
3937 # If there are store requirements and the current repository
3938 # If there are store requirements and the current repository
3938 # is not a shared one, write stored requirements
3939 # is not a shared one, write stored requirements
3939 # For new shared repository, we don't need to write the store
3940 # For new shared repository, we don't need to write the store
3940 # requirements as they are already present in store requires
3941 # requirements as they are already present in store requires
3941 if storereq and b'sharedrepo' not in createopts:
3942 if storereq and b'sharedrepo' not in createopts:
3942 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3943 storevfs = vfsmod.vfs(hgvfs.join(b'store'), cacheaudited=True)
3943 scmutil.writerequires(storevfs, storereq)
3944 scmutil.writerequires(storevfs, storereq)
3944
3945
3945 # Write out file telling readers where to find the shared store.
3946 # Write out file telling readers where to find the shared store.
3946 if b'sharedrepo' in createopts:
3947 if b'sharedrepo' in createopts:
3947 hgvfs.write(b'sharedpath', sharedpath)
3948 hgvfs.write(b'sharedpath', sharedpath)
3948
3949
3949 if createopts.get(b'shareditems'):
3950 if createopts.get(b'shareditems'):
3950 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3951 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3951 hgvfs.write(b'shared', shared)
3952 hgvfs.write(b'shared', shared)
3952
3953
3953
3954
3954 def poisonrepository(repo):
3955 def poisonrepository(repo):
3955 """Poison a repository instance so it can no longer be used."""
3956 """Poison a repository instance so it can no longer be used."""
3956 # Perform any cleanup on the instance.
3957 # Perform any cleanup on the instance.
3957 repo.close()
3958 repo.close()
3958
3959
3959 # Our strategy is to replace the type of the object with one that
3960 # Our strategy is to replace the type of the object with one that
3960 # has all attribute lookups result in error.
3961 # has all attribute lookups result in error.
3961 #
3962 #
3962 # But we have to allow the close() method because some constructors
3963 # But we have to allow the close() method because some constructors
3963 # of repos call close() on repo references.
3964 # of repos call close() on repo references.
3964 class poisonedrepository:
3965 class poisonedrepository:
3965 def __getattribute__(self, item):
3966 def __getattribute__(self, item):
3966 if item == 'close':
3967 if item == 'close':
3967 return object.__getattribute__(self, item)
3968 return object.__getattribute__(self, item)
3968
3969
3969 raise error.ProgrammingError(
3970 raise error.ProgrammingError(
3970 b'repo instances should not be used after unshare'
3971 b'repo instances should not be used after unshare'
3971 )
3972 )
3972
3973
3973 def close(self):
3974 def close(self):
3974 pass
3975 pass
3975
3976
3976 # We may have a repoview, which intercepts __setattr__. So be sure
3977 # We may have a repoview, which intercepts __setattr__. So be sure
3977 # we operate at the lowest level possible.
3978 # we operate at the lowest level possible.
3978 object.__setattr__(repo, '__class__', poisonedrepository)
3979 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,466 +1,467 b''
1 setup repo
1 setup repo
2 $ hg init t
2 $ hg init t
3 $ cd t
3 $ cd t
4 $ echo a > a
4 $ echo a > a
5 $ hg add a
5 $ hg add a
6 $ hg commit -m 'add a'
6 $ hg commit -m 'add a'
7 $ hg verify -q
7 $ hg verify -q
8 $ hg parents
8 $ hg parents
9 changeset: 0:1f0dee641bb7
9 changeset: 0:1f0dee641bb7
10 tag: tip
10 tag: tip
11 user: test
11 user: test
12 date: Thu Jan 01 00:00:00 1970 +0000
12 date: Thu Jan 01 00:00:00 1970 +0000
13 summary: add a
13 summary: add a
14
14
15
15
16 rollback to null revision
16 rollback to null revision
17 $ hg status
17 $ hg status
18 $ hg rollback
18 $ hg rollback
19 repository tip rolled back to revision -1 (undo commit)
19 repository tip rolled back to revision -1 (undo commit)
20 working directory now based on revision -1
20 working directory now based on revision -1
21 $ hg verify -q
21 $ hg verify -q
22 $ hg parents
22 $ hg parents
23 $ hg status
23 $ hg status
24 A a
24 A a
25
25
26 Two changesets this time so we rollback to a real changeset
26 Two changesets this time so we rollback to a real changeset
27 $ hg commit -m'add a again'
27 $ hg commit -m'add a again'
28 $ echo a >> a
28 $ echo a >> a
29 $ hg commit -m'modify a'
29 $ hg commit -m'modify a'
30
30
31 Test issue 902 (current branch is preserved)
31 Test issue 902 (current branch is preserved)
32 $ hg branch test
32 $ hg branch test
33 marked working directory as branch test
33 marked working directory as branch test
34 (branches are permanent and global, did you want a bookmark?)
34 (branches are permanent and global, did you want a bookmark?)
35 $ hg rollback
35 $ hg rollback
36 repository tip rolled back to revision 0 (undo commit)
36 repository tip rolled back to revision 0 (undo commit)
37 working directory now based on revision 0
37 working directory now based on revision 0
38 $ hg branch
38 $ hg branch
39 default
39 default
40
40
41 Test issue 1635 (commit message saved)
41 Test issue 1635 (commit message saved)
42 $ cat .hg/last-message.txt ; echo
42 $ cat .hg/last-message.txt ; echo
43 modify a
43 modify a
44
44
45 Test rollback of hg before issue 902 was fixed
45 Test rollback of hg before issue 902 was fixed
46
46
47 $ hg commit -m "test3"
47 $ hg commit -m "test3"
48 $ hg branch test
48 $ hg branch test
49 marked working directory as branch test
49 marked working directory as branch test
50 (branches are permanent and global, did you want a bookmark?)
50 (branches are permanent and global, did you want a bookmark?)
51 $ rm .hg/undo.branch
51 $ rm .hg/undo.branch
52 $ hg rollback
52 $ hg rollback
53 repository tip rolled back to revision 0 (undo commit)
53 repository tip rolled back to revision 0 (undo commit)
54 named branch could not be reset: current branch is still 'test'
54 named branch could not be reset: current branch is still 'test'
55 working directory now based on revision 0
55 working directory now based on revision 0
56 $ hg branch
56 $ hg branch
57 test
57 test
58
58
59 working dir unaffected by rollback: do not restore dirstate et. al.
59 working dir unaffected by rollback: do not restore dirstate et. al.
60 $ hg log --template '{rev} {branch} {desc|firstline}\n'
60 $ hg log --template '{rev} {branch} {desc|firstline}\n'
61 0 default add a again
61 0 default add a again
62 $ hg status
62 $ hg status
63 M a
63 M a
64 $ hg bookmark foo
64 $ hg bookmark foo
65 $ hg commit -m'modify a again'
65 $ hg commit -m'modify a again'
66 $ echo b > b
66 $ echo b > b
67 $ hg bookmark bar -r default #making bar active, before the transaction
67 $ hg bookmark bar -r default #making bar active, before the transaction
68 $ hg log -G --template '{rev} [{branch}] ({bookmarks}) {desc|firstline}\n'
68 $ hg log -G --template '{rev} [{branch}] ({bookmarks}) {desc|firstline}\n'
69 @ 1 [test] (foo) modify a again
69 @ 1 [test] (foo) modify a again
70 |
70 |
71 o 0 [default] (bar) add a again
71 o 0 [default] (bar) add a again
72
72
73 $ hg add b
73 $ hg add b
74 $ hg commit -m'add b'
74 $ hg commit -m'add b'
75 $ hg log -G --template '{rev} [{branch}] ({bookmarks}) {desc|firstline}\n'
75 $ hg log -G --template '{rev} [{branch}] ({bookmarks}) {desc|firstline}\n'
76 @ 2 [test] (foo) add b
76 @ 2 [test] (foo) add b
77 |
77 |
78 o 1 [test] () modify a again
78 o 1 [test] () modify a again
79 |
79 |
80 o 0 [default] (bar) add a again
80 o 0 [default] (bar) add a again
81
81
82 $ hg update bar
82 $ hg update bar
83 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
83 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
84 (activating bookmark bar)
84 (activating bookmark bar)
85 $ cat .hg/undo.branch ; echo
85 $ cat .hg/undo.branch ; echo
86 test
86 test
87 $ hg log -G --template '{rev} [{branch}] ({bookmarks}) {desc|firstline}\n'
87 $ hg log -G --template '{rev} [{branch}] ({bookmarks}) {desc|firstline}\n'
88 o 2 [test] (foo) add b
88 o 2 [test] (foo) add b
89 |
89 |
90 o 1 [test] () modify a again
90 o 1 [test] () modify a again
91 |
91 |
92 @ 0 [default] (bar) add a again
92 @ 0 [default] (bar) add a again
93
93
94 $ hg rollback
94 $ hg rollback
95 abort: rollback of last commit while not checked out may lose data
95 abort: rollback of last commit while not checked out may lose data
96 (use -f to force)
96 (use -f to force)
97 [255]
97 [255]
98 $ hg rollback -f
98 $ hg rollback -f
99 repository tip rolled back to revision 1 (undo commit)
99 repository tip rolled back to revision 1 (undo commit)
100 $ hg id -n
100 $ hg id -n
101 0
101 0
102 $ hg log -G --template '{rev} [{branch}] ({bookmarks}) {desc|firstline}\n'
102 $ hg log -G --template '{rev} [{branch}] ({bookmarks}) {desc|firstline}\n'
103 o 1 [test] (foo) modify a again
103 o 1 [test] (foo) modify a again
104 |
104 |
105 @ 0 [default] (bar) add a again
105 @ 0 [default] (bar) add a again
106
106
107 $ hg branch
107 $ hg branch
108 default
108 default
109 $ cat .hg/bookmarks.current ; echo
109 $ cat .hg/bookmarks.current ; echo
110 bar
110 bar
111 $ hg bookmark --delete foo bar
111 $ hg bookmark --delete foo bar
112
112
113 rollback by pretxncommit saves commit message (issue1635)
113 rollback by pretxncommit saves commit message (issue1635)
114
114
115 $ echo a >> a
115 $ echo a >> a
116 $ hg --config hooks.pretxncommit=false commit -m"precious commit message"
116 $ hg --config hooks.pretxncommit=false commit -m"precious commit message"
117 transaction abort!
117 transaction abort!
118 rollback completed
118 rollback completed
119 abort: pretxncommit hook exited with status * (glob)
119 abort: pretxncommit hook exited with status * (glob)
120 [40]
120 [40]
121 $ cat .hg/last-message.txt ; echo
121 $ cat .hg/last-message.txt ; echo
122 precious commit message
122 precious commit message
123
123
124 same thing, but run $EDITOR
124 same thing, but run $EDITOR
125
125
126 $ cat > editor.sh << '__EOF__'
126 $ cat > editor.sh << '__EOF__'
127 > echo "another precious commit message" > "$1"
127 > echo "another precious commit message" > "$1"
128 > __EOF__
128 > __EOF__
129 $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg --config hooks.pretxncommit=false commit 2>&1
129 $ HGEDITOR="\"sh\" \"`pwd`/editor.sh\"" hg --config hooks.pretxncommit=false commit 2>&1
130 transaction abort!
130 transaction abort!
131 rollback completed
131 rollback completed
132 note: commit message saved in .hg/last-message.txt
132 note: commit message saved in .hg/last-message.txt
133 note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
133 note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
134 abort: pretxncommit hook exited with status * (glob)
134 abort: pretxncommit hook exited with status * (glob)
135 [40]
135 [40]
136 $ cat .hg/last-message.txt
136 $ cat .hg/last-message.txt
137 another precious commit message
137 another precious commit message
138
138
139 test rollback on served repository
139 test rollback on served repository
140
140
141 #if serve
141 #if serve
142 $ hg commit -m "precious commit message"
142 $ hg commit -m "precious commit message"
143 $ hg serve -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
143 $ hg serve -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
144 $ cat hg.pid >> $DAEMON_PIDS
144 $ cat hg.pid >> $DAEMON_PIDS
145 $ cd ..
145 $ cd ..
146 $ hg clone http://localhost:$HGPORT u
146 $ hg clone http://localhost:$HGPORT u
147 requesting all changes
147 requesting all changes
148 adding changesets
148 adding changesets
149 adding manifests
149 adding manifests
150 adding file changes
150 adding file changes
151 added 3 changesets with 2 changes to 1 files (+1 heads)
151 added 3 changesets with 2 changes to 1 files (+1 heads)
152 new changesets 23b0221f3370:068774709090
152 new changesets 23b0221f3370:068774709090
153 updating to branch default
153 updating to branch default
154 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
154 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
155 $ cd u
155 $ cd u
156 $ hg id default
156 $ hg id default
157 068774709090
157 068774709090
158
158
159 now rollback and observe that 'hg serve' reloads the repository and
159 now rollback and observe that 'hg serve' reloads the repository and
160 presents the correct tip changeset:
160 presents the correct tip changeset:
161
161
162 $ hg -R ../t rollback
162 $ hg -R ../t rollback
163 repository tip rolled back to revision 1 (undo commit)
163 repository tip rolled back to revision 1 (undo commit)
164 working directory now based on revision 0
164 working directory now based on revision 0
165 $ hg id default
165 $ hg id default
166 791dd2169706
166 791dd2169706
167
167
168 $ killdaemons.py
168 $ killdaemons.py
169 #endif
169 #endif
170
170
171 update to older changeset and then refuse rollback, because
171 update to older changeset and then refuse rollback, because
172 that would lose data (issue2998)
172 that would lose data (issue2998)
173 $ cd ../t
173 $ cd ../t
174 $ hg -q update
174 $ hg -q update
175 $ rm `hg status -un`
175 $ rm `hg status -un`
176 $ template='{rev}:{node|short} [{branch}] {desc|firstline}\n'
176 $ template='{rev}:{node|short} [{branch}] {desc|firstline}\n'
177 $ echo 'valuable new file' > b
177 $ echo 'valuable new file' > b
178 $ echo 'valuable modification' >> a
178 $ echo 'valuable modification' >> a
179 $ hg commit -A -m'a valuable change'
179 $ hg commit -A -m'a valuable change'
180 adding b
180 adding b
181 $ hg update 0
181 $ hg update 0
182 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
182 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
183 $ hg rollback
183 $ hg rollback
184 abort: rollback of last commit while not checked out may lose data
184 abort: rollback of last commit while not checked out may lose data
185 (use -f to force)
185 (use -f to force)
186 [255]
186 [255]
187 $ hg tip -q
187 $ hg tip -q
188 2:4d9cd3795eea
188 2:4d9cd3795eea
189 $ hg rollback -f
189 $ hg rollback -f
190 repository tip rolled back to revision 1 (undo commit)
190 repository tip rolled back to revision 1 (undo commit)
191 $ hg status
191 $ hg status
192 $ hg log --removed b # yep, it's gone
192 $ hg log --removed b # yep, it's gone
193
193
194 same again, but emulate an old client that doesn't write undo.desc
194 same again, but emulate an old client that doesn't write undo.desc
195 $ hg -q update
195 $ hg -q update
196 $ echo 'valuable modification redux' >> a
196 $ echo 'valuable modification redux' >> a
197 $ hg commit -m'a valuable change redux'
197 $ hg commit -m'a valuable change redux'
198 $ rm .hg/undo.desc
198 $ rm .hg/undo.desc
199 $ hg update 0
199 $ hg update 0
200 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
200 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
201 $ hg rollback
201 $ hg rollback
202 rolling back unknown transaction
202 rolling back unknown transaction
203 working directory now based on revision 0
203 $ cat a
204 $ cat a
204 a
205 a
205
206
206 corrupt journal test
207 corrupt journal test
207 $ echo "foo" > .hg/store/journal
208 $ echo "foo" > .hg/store/journal
208 $ hg recover --verify -q
209 $ hg recover --verify -q
209 couldn't read journal entry 'foo\n'!
210 couldn't read journal entry 'foo\n'!
210
211
211 rollback disabled by config
212 rollback disabled by config
212 $ cat >> $HGRCPATH <<EOF
213 $ cat >> $HGRCPATH <<EOF
213 > [ui]
214 > [ui]
214 > rollback = false
215 > rollback = false
215 > EOF
216 > EOF
216 $ echo narf >> pinky-sayings.txt
217 $ echo narf >> pinky-sayings.txt
217 $ hg add pinky-sayings.txt
218 $ hg add pinky-sayings.txt
218 $ hg ci -m 'First one.'
219 $ hg ci -m 'First one.'
219 $ hg rollback
220 $ hg rollback
220 abort: rollback is disabled because it is unsafe
221 abort: rollback is disabled because it is unsafe
221 (see `hg help -v rollback` for information)
222 (see `hg help -v rollback` for information)
222 [255]
223 [255]
223
224
224 $ cd ..
225 $ cd ..
225
226
226 I/O errors on stdio are handled properly (issue5658)
227 I/O errors on stdio are handled properly (issue5658)
227
228
228 $ cat > badui.py << EOF
229 $ cat > badui.py << EOF
229 > import errno
230 > import errno
230 > from mercurial.i18n import _
231 > from mercurial.i18n import _
231 > from mercurial import (
232 > from mercurial import (
232 > error,
233 > error,
233 > registrar,
234 > registrar,
234 > ui as uimod,
235 > ui as uimod,
235 > )
236 > )
236 >
237 >
237 > configtable = {}
238 > configtable = {}
238 > configitem = registrar.configitem(configtable)
239 > configitem = registrar.configitem(configtable)
239 >
240 >
240 > configitem(b'ui', b'ioerrors',
241 > configitem(b'ui', b'ioerrors',
241 > default=list,
242 > default=list,
242 > )
243 > )
243 >
244 >
244 > def pretxncommit(ui, repo, **kwargs):
245 > def pretxncommit(ui, repo, **kwargs):
245 > ui.warn(b'warn during pretxncommit\n')
246 > ui.warn(b'warn during pretxncommit\n')
246 >
247 >
247 > def pretxnclose(ui, repo, **kwargs):
248 > def pretxnclose(ui, repo, **kwargs):
248 > ui.warn(b'warn during pretxnclose\n')
249 > ui.warn(b'warn during pretxnclose\n')
249 >
250 >
250 > def txnclose(ui, repo, **kwargs):
251 > def txnclose(ui, repo, **kwargs):
251 > ui.warn(b'warn during txnclose\n')
252 > ui.warn(b'warn during txnclose\n')
252 >
253 >
253 > def txnabort(ui, repo, **kwargs):
254 > def txnabort(ui, repo, **kwargs):
254 > ui.warn(b'warn during abort\n')
255 > ui.warn(b'warn during abort\n')
255 >
256 >
256 > class fdproxy(object):
257 > class fdproxy(object):
257 > def __init__(self, ui, o):
258 > def __init__(self, ui, o):
258 > self._ui = ui
259 > self._ui = ui
259 > self._o = o
260 > self._o = o
260 >
261 >
261 > def __getattr__(self, attr):
262 > def __getattr__(self, attr):
262 > return getattr(self._o, attr)
263 > return getattr(self._o, attr)
263 >
264 >
264 > def write(self, msg):
265 > def write(self, msg):
265 > errors = set(self._ui.configlist(b'ui', b'ioerrors'))
266 > errors = set(self._ui.configlist(b'ui', b'ioerrors'))
266 > pretxncommit = msg == b'warn during pretxncommit\n'
267 > pretxncommit = msg == b'warn during pretxncommit\n'
267 > pretxnclose = msg == b'warn during pretxnclose\n'
268 > pretxnclose = msg == b'warn during pretxnclose\n'
268 > txnclose = msg == b'warn during txnclose\n'
269 > txnclose = msg == b'warn during txnclose\n'
269 > txnabort = msg == b'warn during abort\n'
270 > txnabort = msg == b'warn during abort\n'
270 > msgabort = msg == _(b'transaction abort!\n')
271 > msgabort = msg == _(b'transaction abort!\n')
271 > msgrollback = msg == _(b'rollback completed\n')
272 > msgrollback = msg == _(b'rollback completed\n')
272 >
273 >
273 > if pretxncommit and b'pretxncommit' in errors:
274 > if pretxncommit and b'pretxncommit' in errors:
274 > raise IOError(errno.EPIPE, 'simulated epipe')
275 > raise IOError(errno.EPIPE, 'simulated epipe')
275 > if pretxnclose and b'pretxnclose' in errors:
276 > if pretxnclose and b'pretxnclose' in errors:
276 > raise IOError(errno.EIO, 'simulated eio')
277 > raise IOError(errno.EIO, 'simulated eio')
277 > if txnclose and b'txnclose' in errors:
278 > if txnclose and b'txnclose' in errors:
278 > raise IOError(errno.EBADF, 'simulated badf')
279 > raise IOError(errno.EBADF, 'simulated badf')
279 > if txnabort and b'txnabort' in errors:
280 > if txnabort and b'txnabort' in errors:
280 > raise IOError(errno.EPIPE, 'simulated epipe')
281 > raise IOError(errno.EPIPE, 'simulated epipe')
281 > if msgabort and b'msgabort' in errors:
282 > if msgabort and b'msgabort' in errors:
282 > raise IOError(errno.EBADF, 'simulated ebadf')
283 > raise IOError(errno.EBADF, 'simulated ebadf')
283 > if msgrollback and b'msgrollback' in errors:
284 > if msgrollback and b'msgrollback' in errors:
284 > raise IOError(errno.EIO, 'simulated eio')
285 > raise IOError(errno.EIO, 'simulated eio')
285 >
286 >
286 > return self._o.write(msg)
287 > return self._o.write(msg)
287 >
288 >
288 > def uisetup(ui):
289 > def uisetup(ui):
289 > class badui(ui.__class__):
290 > class badui(ui.__class__):
290 > def _write(self, dest, *args, **kwargs):
291 > def _write(self, dest, *args, **kwargs):
291 > olderr = self.ferr
292 > olderr = self.ferr
292 > try:
293 > try:
293 > if dest is self.ferr:
294 > if dest is self.ferr:
294 > self.ferr = dest = fdproxy(self, olderr)
295 > self.ferr = dest = fdproxy(self, olderr)
295 > return super(badui, self)._write(dest, *args, **kwargs)
296 > return super(badui, self)._write(dest, *args, **kwargs)
296 > finally:
297 > finally:
297 > self.ferr = olderr
298 > self.ferr = olderr
298 >
299 >
299 > ui.__class__ = badui
300 > ui.__class__ = badui
300 >
301 >
301 > def reposetup(ui, repo):
302 > def reposetup(ui, repo):
302 > ui.setconfig(b'hooks', b'pretxnclose.badui', pretxnclose, b'badui')
303 > ui.setconfig(b'hooks', b'pretxnclose.badui', pretxnclose, b'badui')
303 > ui.setconfig(b'hooks', b'txnclose.badui', txnclose, b'badui')
304 > ui.setconfig(b'hooks', b'txnclose.badui', txnclose, b'badui')
304 > ui.setconfig(b'hooks', b'pretxncommit.badui', pretxncommit, b'badui')
305 > ui.setconfig(b'hooks', b'pretxncommit.badui', pretxncommit, b'badui')
305 > ui.setconfig(b'hooks', b'txnabort.badui', txnabort, b'badui')
306 > ui.setconfig(b'hooks', b'txnabort.badui', txnabort, b'badui')
306 > EOF
307 > EOF
307
308
308 $ cat >> $HGRCPATH << EOF
309 $ cat >> $HGRCPATH << EOF
309 > [extensions]
310 > [extensions]
310 > badui = $TESTTMP/badui.py
311 > badui = $TESTTMP/badui.py
311 > EOF
312 > EOF
312
313
313 An I/O error during pretxncommit is handled
314 An I/O error during pretxncommit is handled
314
315
315 $ hg init ioerror-pretxncommit
316 $ hg init ioerror-pretxncommit
316 $ cd ioerror-pretxncommit
317 $ cd ioerror-pretxncommit
317 $ echo 0 > foo
318 $ echo 0 > foo
318 $ hg -q commit -A -m initial
319 $ hg -q commit -A -m initial
319 warn during pretxncommit
320 warn during pretxncommit
320 warn during pretxnclose
321 warn during pretxnclose
321 warn during txnclose
322 warn during txnclose
322 $ echo 1 > foo
323 $ echo 1 > foo
323 $ hg --config ui.ioerrors=pretxncommit commit -m 'error during pretxncommit'
324 $ hg --config ui.ioerrors=pretxncommit commit -m 'error during pretxncommit'
324 warn during pretxnclose
325 warn during pretxnclose
325 warn during txnclose
326 warn during txnclose
326
327
327 $ hg commit -m 'commit 1'
328 $ hg commit -m 'commit 1'
328 nothing changed
329 nothing changed
329 [1]
330 [1]
330
331
331 $ cd ..
332 $ cd ..
332
333
333 An I/O error during pretxnclose is handled
334 An I/O error during pretxnclose is handled
334
335
335 $ hg init ioerror-pretxnclose
336 $ hg init ioerror-pretxnclose
336 $ cd ioerror-pretxnclose
337 $ cd ioerror-pretxnclose
337 $ echo 0 > foo
338 $ echo 0 > foo
338 $ hg -q commit -A -m initial
339 $ hg -q commit -A -m initial
339 warn during pretxncommit
340 warn during pretxncommit
340 warn during pretxnclose
341 warn during pretxnclose
341 warn during txnclose
342 warn during txnclose
342
343
343 $ echo 1 > foo
344 $ echo 1 > foo
344 $ hg --config ui.ioerrors=pretxnclose commit -m 'error during pretxnclose'
345 $ hg --config ui.ioerrors=pretxnclose commit -m 'error during pretxnclose'
345 warn during pretxncommit
346 warn during pretxncommit
346 warn during txnclose
347 warn during txnclose
347
348
348 $ hg commit -m 'commit 1'
349 $ hg commit -m 'commit 1'
349 nothing changed
350 nothing changed
350 [1]
351 [1]
351
352
352 $ cd ..
353 $ cd ..
353
354
354 An I/O error during txnclose is handled
355 An I/O error during txnclose is handled
355
356
356 $ hg init ioerror-txnclose
357 $ hg init ioerror-txnclose
357 $ cd ioerror-txnclose
358 $ cd ioerror-txnclose
358 $ echo 0 > foo
359 $ echo 0 > foo
359 $ hg -q commit -A -m initial
360 $ hg -q commit -A -m initial
360 warn during pretxncommit
361 warn during pretxncommit
361 warn during pretxnclose
362 warn during pretxnclose
362 warn during txnclose
363 warn during txnclose
363
364
364 $ echo 1 > foo
365 $ echo 1 > foo
365 $ hg --config ui.ioerrors=txnclose commit -m 'error during txnclose'
366 $ hg --config ui.ioerrors=txnclose commit -m 'error during txnclose'
366 warn during pretxncommit
367 warn during pretxncommit
367 warn during pretxnclose
368 warn during pretxnclose
368
369
369 $ hg commit -m 'commit 1'
370 $ hg commit -m 'commit 1'
370 nothing changed
371 nothing changed
371 [1]
372 [1]
372
373
373 $ cd ..
374 $ cd ..
374
375
375 An I/O error writing "transaction abort" is handled
376 An I/O error writing "transaction abort" is handled
376
377
377 $ hg init ioerror-msgabort
378 $ hg init ioerror-msgabort
378 $ cd ioerror-msgabort
379 $ cd ioerror-msgabort
379
380
380 $ echo 0 > foo
381 $ echo 0 > foo
381 $ hg -q commit -A -m initial
382 $ hg -q commit -A -m initial
382 warn during pretxncommit
383 warn during pretxncommit
383 warn during pretxnclose
384 warn during pretxnclose
384 warn during txnclose
385 warn during txnclose
385
386
386 $ echo 1 > foo
387 $ echo 1 > foo
387 $ hg --config ui.ioerrors=msgabort --config hooks.pretxncommit=false commit -m 'error during abort message'
388 $ hg --config ui.ioerrors=msgabort --config hooks.pretxncommit=false commit -m 'error during abort message'
388 warn during abort
389 warn during abort
389 rollback completed
390 rollback completed
390 abort: pretxncommit hook exited with status 1
391 abort: pretxncommit hook exited with status 1
391 [40]
392 [40]
392
393
393 $ hg commit -m 'commit 1'
394 $ hg commit -m 'commit 1'
394 warn during pretxncommit
395 warn during pretxncommit
395 warn during pretxnclose
396 warn during pretxnclose
396 warn during txnclose
397 warn during txnclose
397
398
398 $ cd ..
399 $ cd ..
399
400
400 An I/O error during txnabort should still result in rollback
401 An I/O error during txnabort should still result in rollback
401
402
402 $ hg init ioerror-txnabort
403 $ hg init ioerror-txnabort
403 $ cd ioerror-txnabort
404 $ cd ioerror-txnabort
404
405
405 $ echo 0 > foo
406 $ echo 0 > foo
406 $ hg -q commit -A -m initial
407 $ hg -q commit -A -m initial
407 warn during pretxncommit
408 warn during pretxncommit
408 warn during pretxnclose
409 warn during pretxnclose
409 warn during txnclose
410 warn during txnclose
410
411
411 $ echo 1 > foo
412 $ echo 1 > foo
412 $ hg --config ui.ioerrors=txnabort --config hooks.pretxncommit=false commit -m 'error during abort'
413 $ hg --config ui.ioerrors=txnabort --config hooks.pretxncommit=false commit -m 'error during abort'
413 transaction abort!
414 transaction abort!
414 rollback completed
415 rollback completed
415 abort: pretxncommit hook exited with status 1
416 abort: pretxncommit hook exited with status 1
416 [40]
417 [40]
417
418
418 $ hg commit -m 'commit 1'
419 $ hg commit -m 'commit 1'
419 warn during pretxncommit
420 warn during pretxncommit
420 warn during pretxnclose
421 warn during pretxnclose
421 warn during txnclose
422 warn during txnclose
422
423
423 $ cd ..
424 $ cd ..
424
425
425 An I/O error writing "rollback completed" is handled
426 An I/O error writing "rollback completed" is handled
426
427
427 $ hg init ioerror-msgrollback
428 $ hg init ioerror-msgrollback
428 $ cd ioerror-msgrollback
429 $ cd ioerror-msgrollback
429
430
430 $ echo 0 > foo
431 $ echo 0 > foo
431 $ hg -q commit -A -m initial
432 $ hg -q commit -A -m initial
432 warn during pretxncommit
433 warn during pretxncommit
433 warn during pretxnclose
434 warn during pretxnclose
434 warn during txnclose
435 warn during txnclose
435
436
436 $ echo 1 > foo
437 $ echo 1 > foo
437
438
438 $ hg --config ui.ioerrors=msgrollback --config hooks.pretxncommit=false commit -m 'error during rollback message'
439 $ hg --config ui.ioerrors=msgrollback --config hooks.pretxncommit=false commit -m 'error during rollback message'
439 transaction abort!
440 transaction abort!
440 warn during abort
441 warn during abort
441 abort: pretxncommit hook exited with status 1
442 abort: pretxncommit hook exited with status 1
442 [40]
443 [40]
443
444
444 $ hg verify -q
445 $ hg verify -q
445
446
446 $ cd ..
447 $ cd ..
447
448
448 Multiple I/O errors after transaction open are handled.
449 Multiple I/O errors after transaction open are handled.
449 This is effectively what happens if a peer disconnects in the middle
450 This is effectively what happens if a peer disconnects in the middle
450 of a transaction.
451 of a transaction.
451
452
452 $ hg init ioerror-multiple
453 $ hg init ioerror-multiple
453 $ cd ioerror-multiple
454 $ cd ioerror-multiple
454 $ echo 0 > foo
455 $ echo 0 > foo
455 $ hg -q commit -A -m initial
456 $ hg -q commit -A -m initial
456 warn during pretxncommit
457 warn during pretxncommit
457 warn during pretxnclose
458 warn during pretxnclose
458 warn during txnclose
459 warn during txnclose
459
460
460 $ echo 1 > foo
461 $ echo 1 > foo
461
462
462 $ hg --config ui.ioerrors=pretxncommit,pretxnclose,txnclose,txnabort,msgabort,msgrollback commit -m 'multiple errors'
463 $ hg --config ui.ioerrors=pretxncommit,pretxnclose,txnclose,txnabort,msgabort,msgrollback commit -m 'multiple errors'
463
464
464 $ hg verify -q
465 $ hg verify -q
465
466
466 $ cd ..
467 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now