##// END OF EJS Templates
localrepo: only use 'bookmarksinstore' requirement if we have 'store'...
Pulkit Goyal -
r45857:dc457177 default
parent child Browse files
Show More
@@ -1,3473 +1,3496 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 commit,
35 commit,
36 context,
36 context,
37 dirstate,
37 dirstate,
38 dirstateguard,
38 dirstateguard,
39 discovery,
39 discovery,
40 encoding,
40 encoding,
41 error,
41 error,
42 exchange,
42 exchange,
43 extensions,
43 extensions,
44 filelog,
44 filelog,
45 hook,
45 hook,
46 lock as lockmod,
46 lock as lockmod,
47 match as matchmod,
47 match as matchmod,
48 mergestate as mergestatemod,
48 mergestate as mergestatemod,
49 mergeutil,
49 mergeutil,
50 namespaces,
50 namespaces,
51 narrowspec,
51 narrowspec,
52 obsolete,
52 obsolete,
53 pathutil,
53 pathutil,
54 phases,
54 phases,
55 pushkey,
55 pushkey,
56 pycompat,
56 pycompat,
57 rcutil,
57 rcutil,
58 repoview,
58 repoview,
59 revset,
59 revset,
60 revsetlang,
60 revsetlang,
61 scmutil,
61 scmutil,
62 sparse,
62 sparse,
63 store as storemod,
63 store as storemod,
64 subrepoutil,
64 subrepoutil,
65 tags as tagsmod,
65 tags as tagsmod,
66 transaction,
66 transaction,
67 txnutil,
67 txnutil,
68 util,
68 util,
69 vfs as vfsmod,
69 vfs as vfsmod,
70 )
70 )
71
71
72 from .interfaces import (
72 from .interfaces import (
73 repository,
73 repository,
74 util as interfaceutil,
74 util as interfaceutil,
75 )
75 )
76
76
77 from .utils import (
77 from .utils import (
78 hashutil,
78 hashutil,
79 procutil,
79 procutil,
80 stringutil,
80 stringutil,
81 )
81 )
82
82
83 from .revlogutils import constants as revlogconst
83 from .revlogutils import constants as revlogconst
84
84
85 release = lockmod.release
85 release = lockmod.release
86 urlerr = util.urlerr
86 urlerr = util.urlerr
87 urlreq = util.urlreq
87 urlreq = util.urlreq
88
88
89 # set of (path, vfs-location) tuples. vfs-location is:
89 # set of (path, vfs-location) tuples. vfs-location is:
90 # - 'plain for vfs relative paths
90 # - 'plain for vfs relative paths
91 # - '' for svfs relative paths
91 # - '' for svfs relative paths
92 _cachedfiles = set()
92 _cachedfiles = set()
93
93
94
94
95 class _basefilecache(scmutil.filecache):
95 class _basefilecache(scmutil.filecache):
96 """All filecache usage on repo are done for logic that should be unfiltered
96 """All filecache usage on repo are done for logic that should be unfiltered
97 """
97 """
98
98
99 def __get__(self, repo, type=None):
99 def __get__(self, repo, type=None):
100 if repo is None:
100 if repo is None:
101 return self
101 return self
102 # proxy to unfiltered __dict__ since filtered repo has no entry
102 # proxy to unfiltered __dict__ since filtered repo has no entry
103 unfi = repo.unfiltered()
103 unfi = repo.unfiltered()
104 try:
104 try:
105 return unfi.__dict__[self.sname]
105 return unfi.__dict__[self.sname]
106 except KeyError:
106 except KeyError:
107 pass
107 pass
108 return super(_basefilecache, self).__get__(unfi, type)
108 return super(_basefilecache, self).__get__(unfi, type)
109
109
110 def set(self, repo, value):
110 def set(self, repo, value):
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
112
112
113
113
114 class repofilecache(_basefilecache):
114 class repofilecache(_basefilecache):
115 """filecache for files in .hg but outside of .hg/store"""
115 """filecache for files in .hg but outside of .hg/store"""
116
116
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(repofilecache, self).__init__(*paths)
118 super(repofilecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, b'plain'))
120 _cachedfiles.add((path, b'plain'))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.vfs.join(fname)
123 return obj.vfs.join(fname)
124
124
125
125
126 class storecache(_basefilecache):
126 class storecache(_basefilecache):
127 """filecache for files in the store"""
127 """filecache for files in the store"""
128
128
129 def __init__(self, *paths):
129 def __init__(self, *paths):
130 super(storecache, self).__init__(*paths)
130 super(storecache, self).__init__(*paths)
131 for path in paths:
131 for path in paths:
132 _cachedfiles.add((path, b''))
132 _cachedfiles.add((path, b''))
133
133
134 def join(self, obj, fname):
134 def join(self, obj, fname):
135 return obj.sjoin(fname)
135 return obj.sjoin(fname)
136
136
137
137
138 class mixedrepostorecache(_basefilecache):
138 class mixedrepostorecache(_basefilecache):
139 """filecache for a mix files in .hg/store and outside"""
139 """filecache for a mix files in .hg/store and outside"""
140
140
141 def __init__(self, *pathsandlocations):
141 def __init__(self, *pathsandlocations):
142 # scmutil.filecache only uses the path for passing back into our
142 # scmutil.filecache only uses the path for passing back into our
143 # join(), so we can safely pass a list of paths and locations
143 # join(), so we can safely pass a list of paths and locations
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
146
146
147 def join(self, obj, fnameandlocation):
147 def join(self, obj, fnameandlocation):
148 fname, location = fnameandlocation
148 fname, location = fnameandlocation
149 if location == b'plain':
149 if location == b'plain':
150 return obj.vfs.join(fname)
150 return obj.vfs.join(fname)
151 else:
151 else:
152 if location != b'':
152 if location != b'':
153 raise error.ProgrammingError(
153 raise error.ProgrammingError(
154 b'unexpected location: %s' % location
154 b'unexpected location: %s' % location
155 )
155 )
156 return obj.sjoin(fname)
156 return obj.sjoin(fname)
157
157
158
158
159 def isfilecached(repo, name):
159 def isfilecached(repo, name):
160 """check if a repo has already cached "name" filecache-ed property
160 """check if a repo has already cached "name" filecache-ed property
161
161
162 This returns (cachedobj-or-None, iscached) tuple.
162 This returns (cachedobj-or-None, iscached) tuple.
163 """
163 """
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
165 if not cacheentry:
165 if not cacheentry:
166 return None, False
166 return None, False
167 return cacheentry.obj, True
167 return cacheentry.obj, True
168
168
169
169
170 class unfilteredpropertycache(util.propertycache):
170 class unfilteredpropertycache(util.propertycache):
171 """propertycache that apply to unfiltered repo only"""
171 """propertycache that apply to unfiltered repo only"""
172
172
173 def __get__(self, repo, type=None):
173 def __get__(self, repo, type=None):
174 unfi = repo.unfiltered()
174 unfi = repo.unfiltered()
175 if unfi is repo:
175 if unfi is repo:
176 return super(unfilteredpropertycache, self).__get__(unfi)
176 return super(unfilteredpropertycache, self).__get__(unfi)
177 return getattr(unfi, self.name)
177 return getattr(unfi, self.name)
178
178
179
179
180 class filteredpropertycache(util.propertycache):
180 class filteredpropertycache(util.propertycache):
181 """propertycache that must take filtering in account"""
181 """propertycache that must take filtering in account"""
182
182
183 def cachevalue(self, obj, value):
183 def cachevalue(self, obj, value):
184 object.__setattr__(obj, self.name, value)
184 object.__setattr__(obj, self.name, value)
185
185
186
186
187 def hasunfilteredcache(repo, name):
187 def hasunfilteredcache(repo, name):
188 """check if a repo has an unfilteredpropertycache value for <name>"""
188 """check if a repo has an unfilteredpropertycache value for <name>"""
189 return name in vars(repo.unfiltered())
189 return name in vars(repo.unfiltered())
190
190
191
191
192 def unfilteredmethod(orig):
192 def unfilteredmethod(orig):
193 """decorate method that always need to be run on unfiltered version"""
193 """decorate method that always need to be run on unfiltered version"""
194
194
195 def wrapper(repo, *args, **kwargs):
195 def wrapper(repo, *args, **kwargs):
196 return orig(repo.unfiltered(), *args, **kwargs)
196 return orig(repo.unfiltered(), *args, **kwargs)
197
197
198 return wrapper
198 return wrapper
199
199
200
200
201 moderncaps = {
201 moderncaps = {
202 b'lookup',
202 b'lookup',
203 b'branchmap',
203 b'branchmap',
204 b'pushkey',
204 b'pushkey',
205 b'known',
205 b'known',
206 b'getbundle',
206 b'getbundle',
207 b'unbundle',
207 b'unbundle',
208 }
208 }
209 legacycaps = moderncaps.union({b'changegroupsubset'})
209 legacycaps = moderncaps.union({b'changegroupsubset'})
210
210
211
211
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
213 class localcommandexecutor(object):
213 class localcommandexecutor(object):
214 def __init__(self, peer):
214 def __init__(self, peer):
215 self._peer = peer
215 self._peer = peer
216 self._sent = False
216 self._sent = False
217 self._closed = False
217 self._closed = False
218
218
219 def __enter__(self):
219 def __enter__(self):
220 return self
220 return self
221
221
222 def __exit__(self, exctype, excvalue, exctb):
222 def __exit__(self, exctype, excvalue, exctb):
223 self.close()
223 self.close()
224
224
225 def callcommand(self, command, args):
225 def callcommand(self, command, args):
226 if self._sent:
226 if self._sent:
227 raise error.ProgrammingError(
227 raise error.ProgrammingError(
228 b'callcommand() cannot be used after sendcommands()'
228 b'callcommand() cannot be used after sendcommands()'
229 )
229 )
230
230
231 if self._closed:
231 if self._closed:
232 raise error.ProgrammingError(
232 raise error.ProgrammingError(
233 b'callcommand() cannot be used after close()'
233 b'callcommand() cannot be used after close()'
234 )
234 )
235
235
236 # We don't need to support anything fancy. Just call the named
236 # We don't need to support anything fancy. Just call the named
237 # method on the peer and return a resolved future.
237 # method on the peer and return a resolved future.
238 fn = getattr(self._peer, pycompat.sysstr(command))
238 fn = getattr(self._peer, pycompat.sysstr(command))
239
239
240 f = pycompat.futures.Future()
240 f = pycompat.futures.Future()
241
241
242 try:
242 try:
243 result = fn(**pycompat.strkwargs(args))
243 result = fn(**pycompat.strkwargs(args))
244 except Exception:
244 except Exception:
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
246 else:
246 else:
247 f.set_result(result)
247 f.set_result(result)
248
248
249 return f
249 return f
250
250
251 def sendcommands(self):
251 def sendcommands(self):
252 self._sent = True
252 self._sent = True
253
253
254 def close(self):
254 def close(self):
255 self._closed = True
255 self._closed = True
256
256
257
257
258 @interfaceutil.implementer(repository.ipeercommands)
258 @interfaceutil.implementer(repository.ipeercommands)
259 class localpeer(repository.peer):
259 class localpeer(repository.peer):
260 '''peer for a local repo; reflects only the most recent API'''
260 '''peer for a local repo; reflects only the most recent API'''
261
261
262 def __init__(self, repo, caps=None):
262 def __init__(self, repo, caps=None):
263 super(localpeer, self).__init__()
263 super(localpeer, self).__init__()
264
264
265 if caps is None:
265 if caps is None:
266 caps = moderncaps.copy()
266 caps = moderncaps.copy()
267 self._repo = repo.filtered(b'served')
267 self._repo = repo.filtered(b'served')
268 self.ui = repo.ui
268 self.ui = repo.ui
269 self._caps = repo._restrictcapabilities(caps)
269 self._caps = repo._restrictcapabilities(caps)
270
270
271 # Begin of _basepeer interface.
271 # Begin of _basepeer interface.
272
272
273 def url(self):
273 def url(self):
274 return self._repo.url()
274 return self._repo.url()
275
275
276 def local(self):
276 def local(self):
277 return self._repo
277 return self._repo
278
278
279 def peer(self):
279 def peer(self):
280 return self
280 return self
281
281
282 def canpush(self):
282 def canpush(self):
283 return True
283 return True
284
284
285 def close(self):
285 def close(self):
286 self._repo.close()
286 self._repo.close()
287
287
288 # End of _basepeer interface.
288 # End of _basepeer interface.
289
289
290 # Begin of _basewirecommands interface.
290 # Begin of _basewirecommands interface.
291
291
292 def branchmap(self):
292 def branchmap(self):
293 return self._repo.branchmap()
293 return self._repo.branchmap()
294
294
295 def capabilities(self):
295 def capabilities(self):
296 return self._caps
296 return self._caps
297
297
298 def clonebundles(self):
298 def clonebundles(self):
299 return self._repo.tryread(b'clonebundles.manifest')
299 return self._repo.tryread(b'clonebundles.manifest')
300
300
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
302 """Used to test argument passing over the wire"""
302 """Used to test argument passing over the wire"""
303 return b"%s %s %s %s %s" % (
303 return b"%s %s %s %s %s" % (
304 one,
304 one,
305 two,
305 two,
306 pycompat.bytestr(three),
306 pycompat.bytestr(three),
307 pycompat.bytestr(four),
307 pycompat.bytestr(four),
308 pycompat.bytestr(five),
308 pycompat.bytestr(five),
309 )
309 )
310
310
311 def getbundle(
311 def getbundle(
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
313 ):
313 ):
314 chunks = exchange.getbundlechunks(
314 chunks = exchange.getbundlechunks(
315 self._repo,
315 self._repo,
316 source,
316 source,
317 heads=heads,
317 heads=heads,
318 common=common,
318 common=common,
319 bundlecaps=bundlecaps,
319 bundlecaps=bundlecaps,
320 **kwargs
320 **kwargs
321 )[1]
321 )[1]
322 cb = util.chunkbuffer(chunks)
322 cb = util.chunkbuffer(chunks)
323
323
324 if exchange.bundle2requested(bundlecaps):
324 if exchange.bundle2requested(bundlecaps):
325 # When requesting a bundle2, getbundle returns a stream to make the
325 # When requesting a bundle2, getbundle returns a stream to make the
326 # wire level function happier. We need to build a proper object
326 # wire level function happier. We need to build a proper object
327 # from it in local peer.
327 # from it in local peer.
328 return bundle2.getunbundler(self.ui, cb)
328 return bundle2.getunbundler(self.ui, cb)
329 else:
329 else:
330 return changegroup.getunbundler(b'01', cb, None)
330 return changegroup.getunbundler(b'01', cb, None)
331
331
332 def heads(self):
332 def heads(self):
333 return self._repo.heads()
333 return self._repo.heads()
334
334
335 def known(self, nodes):
335 def known(self, nodes):
336 return self._repo.known(nodes)
336 return self._repo.known(nodes)
337
337
338 def listkeys(self, namespace):
338 def listkeys(self, namespace):
339 return self._repo.listkeys(namespace)
339 return self._repo.listkeys(namespace)
340
340
341 def lookup(self, key):
341 def lookup(self, key):
342 return self._repo.lookup(key)
342 return self._repo.lookup(key)
343
343
344 def pushkey(self, namespace, key, old, new):
344 def pushkey(self, namespace, key, old, new):
345 return self._repo.pushkey(namespace, key, old, new)
345 return self._repo.pushkey(namespace, key, old, new)
346
346
347 def stream_out(self):
347 def stream_out(self):
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
349
349
350 def unbundle(self, bundle, heads, url):
350 def unbundle(self, bundle, heads, url):
351 """apply a bundle on a repo
351 """apply a bundle on a repo
352
352
353 This function handles the repo locking itself."""
353 This function handles the repo locking itself."""
354 try:
354 try:
355 try:
355 try:
356 bundle = exchange.readbundle(self.ui, bundle, None)
356 bundle = exchange.readbundle(self.ui, bundle, None)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
358 if util.safehasattr(ret, b'getchunks'):
358 if util.safehasattr(ret, b'getchunks'):
359 # This is a bundle20 object, turn it into an unbundler.
359 # This is a bundle20 object, turn it into an unbundler.
360 # This little dance should be dropped eventually when the
360 # This little dance should be dropped eventually when the
361 # API is finally improved.
361 # API is finally improved.
362 stream = util.chunkbuffer(ret.getchunks())
362 stream = util.chunkbuffer(ret.getchunks())
363 ret = bundle2.getunbundler(self.ui, stream)
363 ret = bundle2.getunbundler(self.ui, stream)
364 return ret
364 return ret
365 except Exception as exc:
365 except Exception as exc:
366 # If the exception contains output salvaged from a bundle2
366 # If the exception contains output salvaged from a bundle2
367 # reply, we need to make sure it is printed before continuing
367 # reply, we need to make sure it is printed before continuing
368 # to fail. So we build a bundle2 with such output and consume
368 # to fail. So we build a bundle2 with such output and consume
369 # it directly.
369 # it directly.
370 #
370 #
371 # This is not very elegant but allows a "simple" solution for
371 # This is not very elegant but allows a "simple" solution for
372 # issue4594
372 # issue4594
373 output = getattr(exc, '_bundle2salvagedoutput', ())
373 output = getattr(exc, '_bundle2salvagedoutput', ())
374 if output:
374 if output:
375 bundler = bundle2.bundle20(self._repo.ui)
375 bundler = bundle2.bundle20(self._repo.ui)
376 for out in output:
376 for out in output:
377 bundler.addpart(out)
377 bundler.addpart(out)
378 stream = util.chunkbuffer(bundler.getchunks())
378 stream = util.chunkbuffer(bundler.getchunks())
379 b = bundle2.getunbundler(self.ui, stream)
379 b = bundle2.getunbundler(self.ui, stream)
380 bundle2.processbundle(self._repo, b)
380 bundle2.processbundle(self._repo, b)
381 raise
381 raise
382 except error.PushRaced as exc:
382 except error.PushRaced as exc:
383 raise error.ResponseError(
383 raise error.ResponseError(
384 _(b'push failed:'), stringutil.forcebytestr(exc)
384 _(b'push failed:'), stringutil.forcebytestr(exc)
385 )
385 )
386
386
387 # End of _basewirecommands interface.
387 # End of _basewirecommands interface.
388
388
389 # Begin of peer interface.
389 # Begin of peer interface.
390
390
391 def commandexecutor(self):
391 def commandexecutor(self):
392 return localcommandexecutor(self)
392 return localcommandexecutor(self)
393
393
394 # End of peer interface.
394 # End of peer interface.
395
395
396
396
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
398 class locallegacypeer(localpeer):
398 class locallegacypeer(localpeer):
399 '''peer extension which implements legacy methods too; used for tests with
399 '''peer extension which implements legacy methods too; used for tests with
400 restricted capabilities'''
400 restricted capabilities'''
401
401
402 def __init__(self, repo):
402 def __init__(self, repo):
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
404
404
405 # Begin of baselegacywirecommands interface.
405 # Begin of baselegacywirecommands interface.
406
406
407 def between(self, pairs):
407 def between(self, pairs):
408 return self._repo.between(pairs)
408 return self._repo.between(pairs)
409
409
410 def branches(self, nodes):
410 def branches(self, nodes):
411 return self._repo.branches(nodes)
411 return self._repo.branches(nodes)
412
412
413 def changegroup(self, nodes, source):
413 def changegroup(self, nodes, source):
414 outgoing = discovery.outgoing(
414 outgoing = discovery.outgoing(
415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
415 self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
416 )
416 )
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
418
418
419 def changegroupsubset(self, bases, heads, source):
419 def changegroupsubset(self, bases, heads, source):
420 outgoing = discovery.outgoing(
420 outgoing = discovery.outgoing(
421 self._repo, missingroots=bases, ancestorsof=heads
421 self._repo, missingroots=bases, ancestorsof=heads
422 )
422 )
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424
424
425 # End of baselegacywirecommands interface.
425 # End of baselegacywirecommands interface.
426
426
427
427
428 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # Increment the sub-version when the revlog v2 format changes to lock out old
429 # clients.
429 # clients.
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
431
431
432 # A repository with the sparserevlog feature will have delta chains that
432 # A repository with the sparserevlog feature will have delta chains that
433 # can spread over a larger span. Sparse reading cuts these large spans into
433 # can spread over a larger span. Sparse reading cuts these large spans into
434 # pieces, so that each piece isn't too big.
434 # pieces, so that each piece isn't too big.
435 # Without the sparserevlog capability, reading from the repository could use
435 # Without the sparserevlog capability, reading from the repository could use
436 # huge amounts of memory, because the whole span would be read at once,
436 # huge amounts of memory, because the whole span would be read at once,
437 # including all the intermediate revisions that aren't pertinent for the chain.
437 # including all the intermediate revisions that aren't pertinent for the chain.
438 # This is why once a repository has enabled sparse-read, it becomes required.
438 # This is why once a repository has enabled sparse-read, it becomes required.
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
440
440
441 # A repository with the sidedataflag requirement will allow to store extra
441 # A repository with the sidedataflag requirement will allow to store extra
442 # information for revision without altering their original hashes.
442 # information for revision without altering their original hashes.
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
444
444
445 # A repository with the the copies-sidedata-changeset requirement will store
445 # A repository with the the copies-sidedata-changeset requirement will store
446 # copies related information in changeset's sidedata.
446 # copies related information in changeset's sidedata.
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
448
448
449 # The repository use persistent nodemap for the changelog and the manifest.
449 # The repository use persistent nodemap for the changelog and the manifest.
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
451
451
452 # Functions receiving (ui, features) that extensions can register to impact
452 # Functions receiving (ui, features) that extensions can register to impact
453 # the ability to load repositories with custom requirements. Only
453 # the ability to load repositories with custom requirements. Only
454 # functions defined in loaded extensions are called.
454 # functions defined in loaded extensions are called.
455 #
455 #
456 # The function receives a set of requirement strings that the repository
456 # The function receives a set of requirement strings that the repository
457 # is capable of opening. Functions will typically add elements to the
457 # is capable of opening. Functions will typically add elements to the
458 # set to reflect that the extension knows how to handle that requirements.
458 # set to reflect that the extension knows how to handle that requirements.
459 featuresetupfuncs = set()
459 featuresetupfuncs = set()
460
460
461
461
462 def makelocalrepository(baseui, path, intents=None):
462 def makelocalrepository(baseui, path, intents=None):
463 """Create a local repository object.
463 """Create a local repository object.
464
464
465 Given arguments needed to construct a local repository, this function
465 Given arguments needed to construct a local repository, this function
466 performs various early repository loading functionality (such as
466 performs various early repository loading functionality (such as
467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
468 the repository can be opened, derives a type suitable for representing
468 the repository can be opened, derives a type suitable for representing
469 that repository, and returns an instance of it.
469 that repository, and returns an instance of it.
470
470
471 The returned object conforms to the ``repository.completelocalrepository``
471 The returned object conforms to the ``repository.completelocalrepository``
472 interface.
472 interface.
473
473
474 The repository type is derived by calling a series of factory functions
474 The repository type is derived by calling a series of factory functions
475 for each aspect/interface of the final repository. These are defined by
475 for each aspect/interface of the final repository. These are defined by
476 ``REPO_INTERFACES``.
476 ``REPO_INTERFACES``.
477
477
478 Each factory function is called to produce a type implementing a specific
478 Each factory function is called to produce a type implementing a specific
479 interface. The cumulative list of returned types will be combined into a
479 interface. The cumulative list of returned types will be combined into a
480 new type and that type will be instantiated to represent the local
480 new type and that type will be instantiated to represent the local
481 repository.
481 repository.
482
482
483 The factory functions each receive various state that may be consulted
483 The factory functions each receive various state that may be consulted
484 as part of deriving a type.
484 as part of deriving a type.
485
485
486 Extensions should wrap these factory functions to customize repository type
486 Extensions should wrap these factory functions to customize repository type
487 creation. Note that an extension's wrapped function may be called even if
487 creation. Note that an extension's wrapped function may be called even if
488 that extension is not loaded for the repo being constructed. Extensions
488 that extension is not loaded for the repo being constructed. Extensions
489 should check if their ``__name__`` appears in the
489 should check if their ``__name__`` appears in the
490 ``extensionmodulenames`` set passed to the factory function and no-op if
490 ``extensionmodulenames`` set passed to the factory function and no-op if
491 not.
491 not.
492 """
492 """
493 ui = baseui.copy()
493 ui = baseui.copy()
494 # Prevent copying repo configuration.
494 # Prevent copying repo configuration.
495 ui.copy = baseui.copy
495 ui.copy = baseui.copy
496
496
497 # Working directory VFS rooted at repository root.
497 # Working directory VFS rooted at repository root.
498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
499
499
500 # Main VFS for .hg/ directory.
500 # Main VFS for .hg/ directory.
501 hgpath = wdirvfs.join(b'.hg')
501 hgpath = wdirvfs.join(b'.hg')
502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
503
503
504 # The .hg/ path should exist and should be a directory. All other
504 # The .hg/ path should exist and should be a directory. All other
505 # cases are errors.
505 # cases are errors.
506 if not hgvfs.isdir():
506 if not hgvfs.isdir():
507 try:
507 try:
508 hgvfs.stat()
508 hgvfs.stat()
509 except OSError as e:
509 except OSError as e:
510 if e.errno != errno.ENOENT:
510 if e.errno != errno.ENOENT:
511 raise
511 raise
512 except ValueError as e:
512 except ValueError as e:
513 # Can be raised on Python 3.8 when path is invalid.
513 # Can be raised on Python 3.8 when path is invalid.
514 raise error.Abort(
514 raise error.Abort(
515 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
515 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
516 )
516 )
517
517
518 raise error.RepoError(_(b'repository %s not found') % path)
518 raise error.RepoError(_(b'repository %s not found') % path)
519
519
520 # .hg/requires file contains a newline-delimited list of
520 # .hg/requires file contains a newline-delimited list of
521 # features/capabilities the opener (us) must have in order to use
521 # features/capabilities the opener (us) must have in order to use
522 # the repository. This file was introduced in Mercurial 0.9.2,
522 # the repository. This file was introduced in Mercurial 0.9.2,
523 # which means very old repositories may not have one. We assume
523 # which means very old repositories may not have one. We assume
524 # a missing file translates to no requirements.
524 # a missing file translates to no requirements.
525 try:
525 try:
526 requirements = set(hgvfs.read(b'requires').splitlines())
526 requirements = set(hgvfs.read(b'requires').splitlines())
527 except IOError as e:
527 except IOError as e:
528 if e.errno != errno.ENOENT:
528 if e.errno != errno.ENOENT:
529 raise
529 raise
530 requirements = set()
530 requirements = set()
531
531
532 # The .hg/hgrc file may load extensions or contain config options
532 # The .hg/hgrc file may load extensions or contain config options
533 # that influence repository construction. Attempt to load it and
533 # that influence repository construction. Attempt to load it and
534 # process any new extensions that it may have pulled in.
534 # process any new extensions that it may have pulled in.
535 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
535 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
536 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
536 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
537 extensions.loadall(ui)
537 extensions.loadall(ui)
538 extensions.populateui(ui)
538 extensions.populateui(ui)
539
539
540 # Set of module names of extensions loaded for this repository.
540 # Set of module names of extensions loaded for this repository.
541 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
541 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
542
542
543 supportedrequirements = gathersupportedrequirements(ui)
543 supportedrequirements = gathersupportedrequirements(ui)
544
544
545 # We first validate the requirements are known.
545 # We first validate the requirements are known.
546 ensurerequirementsrecognized(requirements, supportedrequirements)
546 ensurerequirementsrecognized(requirements, supportedrequirements)
547
547
548 # Then we validate that the known set is reasonable to use together.
548 # Then we validate that the known set is reasonable to use together.
549 ensurerequirementscompatible(ui, requirements)
549 ensurerequirementscompatible(ui, requirements)
550
550
551 # TODO there are unhandled edge cases related to opening repositories with
551 # TODO there are unhandled edge cases related to opening repositories with
552 # shared storage. If storage is shared, we should also test for requirements
552 # shared storage. If storage is shared, we should also test for requirements
553 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
553 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
554 # that repo, as that repo may load extensions needed to open it. This is a
554 # that repo, as that repo may load extensions needed to open it. This is a
555 # bit complicated because we don't want the other hgrc to overwrite settings
555 # bit complicated because we don't want the other hgrc to overwrite settings
556 # in this hgrc.
556 # in this hgrc.
557 #
557 #
558 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
558 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
559 # file when sharing repos. But if a requirement is added after the share is
559 # file when sharing repos. But if a requirement is added after the share is
560 # performed, thereby introducing a new requirement for the opener, we may
560 # performed, thereby introducing a new requirement for the opener, we may
561 # will not see that and could encounter a run-time error interacting with
561 # will not see that and could encounter a run-time error interacting with
562 # that shared store since it has an unknown-to-us requirement.
562 # that shared store since it has an unknown-to-us requirement.
563
563
564 # At this point, we know we should be capable of opening the repository.
564 # At this point, we know we should be capable of opening the repository.
565 # Now get on with doing that.
565 # Now get on with doing that.
566
566
567 features = set()
567 features = set()
568
568
569 # The "store" part of the repository holds versioned data. How it is
569 # The "store" part of the repository holds versioned data. How it is
570 # accessed is determined by various requirements. The ``shared`` or
570 # accessed is determined by various requirements. The ``shared`` or
571 # ``relshared`` requirements indicate the store lives in the path contained
571 # ``relshared`` requirements indicate the store lives in the path contained
572 # in the ``.hg/sharedpath`` file. This is an absolute path for
572 # in the ``.hg/sharedpath`` file. This is an absolute path for
573 # ``shared`` and relative to ``.hg/`` for ``relshared``.
573 # ``shared`` and relative to ``.hg/`` for ``relshared``.
574 if b'shared' in requirements or b'relshared' in requirements:
574 if b'shared' in requirements or b'relshared' in requirements:
575 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
575 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
576 if b'relshared' in requirements:
576 if b'relshared' in requirements:
577 sharedpath = hgvfs.join(sharedpath)
577 sharedpath = hgvfs.join(sharedpath)
578
578
579 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
579 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
580
580
581 if not sharedvfs.exists():
581 if not sharedvfs.exists():
582 raise error.RepoError(
582 raise error.RepoError(
583 _(b'.hg/sharedpath points to nonexistent directory %s')
583 _(b'.hg/sharedpath points to nonexistent directory %s')
584 % sharedvfs.base
584 % sharedvfs.base
585 )
585 )
586
586
587 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
587 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
588
588
589 storebasepath = sharedvfs.base
589 storebasepath = sharedvfs.base
590 cachepath = sharedvfs.join(b'cache')
590 cachepath = sharedvfs.join(b'cache')
591 else:
591 else:
592 storebasepath = hgvfs.base
592 storebasepath = hgvfs.base
593 cachepath = hgvfs.join(b'cache')
593 cachepath = hgvfs.join(b'cache')
594 wcachepath = hgvfs.join(b'wcache')
594 wcachepath = hgvfs.join(b'wcache')
595
595
596 # The store has changed over time and the exact layout is dictated by
596 # The store has changed over time and the exact layout is dictated by
597 # requirements. The store interface abstracts differences across all
597 # requirements. The store interface abstracts differences across all
598 # of them.
598 # of them.
599 store = makestore(
599 store = makestore(
600 requirements,
600 requirements,
601 storebasepath,
601 storebasepath,
602 lambda base: vfsmod.vfs(base, cacheaudited=True),
602 lambda base: vfsmod.vfs(base, cacheaudited=True),
603 )
603 )
604 hgvfs.createmode = store.createmode
604 hgvfs.createmode = store.createmode
605
605
606 storevfs = store.vfs
606 storevfs = store.vfs
607 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
607 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
608
608
609 # The cache vfs is used to manage cache files.
609 # The cache vfs is used to manage cache files.
610 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
610 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
611 cachevfs.createmode = store.createmode
611 cachevfs.createmode = store.createmode
612 # The cache vfs is used to manage cache files related to the working copy
612 # The cache vfs is used to manage cache files related to the working copy
613 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
613 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
614 wcachevfs.createmode = store.createmode
614 wcachevfs.createmode = store.createmode
615
615
616 # Now resolve the type for the repository object. We do this by repeatedly
616 # Now resolve the type for the repository object. We do this by repeatedly
617 # calling a factory function to produces types for specific aspects of the
617 # calling a factory function to produces types for specific aspects of the
618 # repo's operation. The aggregate returned types are used as base classes
618 # repo's operation. The aggregate returned types are used as base classes
619 # for a dynamically-derived type, which will represent our new repository.
619 # for a dynamically-derived type, which will represent our new repository.
620
620
621 bases = []
621 bases = []
622 extrastate = {}
622 extrastate = {}
623
623
624 for iface, fn in REPO_INTERFACES:
624 for iface, fn in REPO_INTERFACES:
625 # We pass all potentially useful state to give extensions tons of
625 # We pass all potentially useful state to give extensions tons of
626 # flexibility.
626 # flexibility.
627 typ = fn()(
627 typ = fn()(
628 ui=ui,
628 ui=ui,
629 intents=intents,
629 intents=intents,
630 requirements=requirements,
630 requirements=requirements,
631 features=features,
631 features=features,
632 wdirvfs=wdirvfs,
632 wdirvfs=wdirvfs,
633 hgvfs=hgvfs,
633 hgvfs=hgvfs,
634 store=store,
634 store=store,
635 storevfs=storevfs,
635 storevfs=storevfs,
636 storeoptions=storevfs.options,
636 storeoptions=storevfs.options,
637 cachevfs=cachevfs,
637 cachevfs=cachevfs,
638 wcachevfs=wcachevfs,
638 wcachevfs=wcachevfs,
639 extensionmodulenames=extensionmodulenames,
639 extensionmodulenames=extensionmodulenames,
640 extrastate=extrastate,
640 extrastate=extrastate,
641 baseclasses=bases,
641 baseclasses=bases,
642 )
642 )
643
643
644 if not isinstance(typ, type):
644 if not isinstance(typ, type):
645 raise error.ProgrammingError(
645 raise error.ProgrammingError(
646 b'unable to construct type for %s' % iface
646 b'unable to construct type for %s' % iface
647 )
647 )
648
648
649 bases.append(typ)
649 bases.append(typ)
650
650
651 # type() allows you to use characters in type names that wouldn't be
651 # type() allows you to use characters in type names that wouldn't be
652 # recognized as Python symbols in source code. We abuse that to add
652 # recognized as Python symbols in source code. We abuse that to add
653 # rich information about our constructed repo.
653 # rich information about our constructed repo.
654 name = pycompat.sysstr(
654 name = pycompat.sysstr(
655 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
655 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
656 )
656 )
657
657
658 cls = type(name, tuple(bases), {})
658 cls = type(name, tuple(bases), {})
659
659
660 return cls(
660 return cls(
661 baseui=baseui,
661 baseui=baseui,
662 ui=ui,
662 ui=ui,
663 origroot=path,
663 origroot=path,
664 wdirvfs=wdirvfs,
664 wdirvfs=wdirvfs,
665 hgvfs=hgvfs,
665 hgvfs=hgvfs,
666 requirements=requirements,
666 requirements=requirements,
667 supportedrequirements=supportedrequirements,
667 supportedrequirements=supportedrequirements,
668 sharedpath=storebasepath,
668 sharedpath=storebasepath,
669 store=store,
669 store=store,
670 cachevfs=cachevfs,
670 cachevfs=cachevfs,
671 wcachevfs=wcachevfs,
671 wcachevfs=wcachevfs,
672 features=features,
672 features=features,
673 intents=intents,
673 intents=intents,
674 )
674 )
675
675
676
676
677 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
677 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
678 """Load hgrc files/content into a ui instance.
678 """Load hgrc files/content into a ui instance.
679
679
680 This is called during repository opening to load any additional
680 This is called during repository opening to load any additional
681 config files or settings relevant to the current repository.
681 config files or settings relevant to the current repository.
682
682
683 Returns a bool indicating whether any additional configs were loaded.
683 Returns a bool indicating whether any additional configs were loaded.
684
684
685 Extensions should monkeypatch this function to modify how per-repo
685 Extensions should monkeypatch this function to modify how per-repo
686 configs are loaded. For example, an extension may wish to pull in
686 configs are loaded. For example, an extension may wish to pull in
687 configs from alternate files or sources.
687 configs from alternate files or sources.
688 """
688 """
689 if not rcutil.use_repo_hgrc():
689 if not rcutil.use_repo_hgrc():
690 return False
690 return False
691 try:
691 try:
692 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
692 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
693 return True
693 return True
694 except IOError:
694 except IOError:
695 return False
695 return False
696
696
697
697
698 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
698 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
699 """Perform additional actions after .hg/hgrc is loaded.
699 """Perform additional actions after .hg/hgrc is loaded.
700
700
701 This function is called during repository loading immediately after
701 This function is called during repository loading immediately after
702 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
702 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
703
703
704 The function can be used to validate configs, automatically add
704 The function can be used to validate configs, automatically add
705 options (including extensions) based on requirements, etc.
705 options (including extensions) based on requirements, etc.
706 """
706 """
707
707
708 # Map of requirements to list of extensions to load automatically when
708 # Map of requirements to list of extensions to load automatically when
709 # requirement is present.
709 # requirement is present.
710 autoextensions = {
710 autoextensions = {
711 b'git': [b'git'],
711 b'git': [b'git'],
712 b'largefiles': [b'largefiles'],
712 b'largefiles': [b'largefiles'],
713 b'lfs': [b'lfs'],
713 b'lfs': [b'lfs'],
714 }
714 }
715
715
716 for requirement, names in sorted(autoextensions.items()):
716 for requirement, names in sorted(autoextensions.items()):
717 if requirement not in requirements:
717 if requirement not in requirements:
718 continue
718 continue
719
719
720 for name in names:
720 for name in names:
721 if not ui.hasconfig(b'extensions', name):
721 if not ui.hasconfig(b'extensions', name):
722 ui.setconfig(b'extensions', name, b'', source=b'autoload')
722 ui.setconfig(b'extensions', name, b'', source=b'autoload')
723
723
724
724
725 def gathersupportedrequirements(ui):
725 def gathersupportedrequirements(ui):
726 """Determine the complete set of recognized requirements."""
726 """Determine the complete set of recognized requirements."""
727 # Start with all requirements supported by this file.
727 # Start with all requirements supported by this file.
728 supported = set(localrepository._basesupported)
728 supported = set(localrepository._basesupported)
729
729
730 # Execute ``featuresetupfuncs`` entries if they belong to an extension
730 # Execute ``featuresetupfuncs`` entries if they belong to an extension
731 # relevant to this ui instance.
731 # relevant to this ui instance.
732 modules = {m.__name__ for n, m in extensions.extensions(ui)}
732 modules = {m.__name__ for n, m in extensions.extensions(ui)}
733
733
734 for fn in featuresetupfuncs:
734 for fn in featuresetupfuncs:
735 if fn.__module__ in modules:
735 if fn.__module__ in modules:
736 fn(ui, supported)
736 fn(ui, supported)
737
737
738 # Add derived requirements from registered compression engines.
738 # Add derived requirements from registered compression engines.
739 for name in util.compengines:
739 for name in util.compengines:
740 engine = util.compengines[name]
740 engine = util.compengines[name]
741 if engine.available() and engine.revlogheader():
741 if engine.available() and engine.revlogheader():
742 supported.add(b'exp-compression-%s' % name)
742 supported.add(b'exp-compression-%s' % name)
743 if engine.name() == b'zstd':
743 if engine.name() == b'zstd':
744 supported.add(b'revlog-compression-zstd')
744 supported.add(b'revlog-compression-zstd')
745
745
746 return supported
746 return supported
747
747
748
748
749 def ensurerequirementsrecognized(requirements, supported):
749 def ensurerequirementsrecognized(requirements, supported):
750 """Validate that a set of local requirements is recognized.
750 """Validate that a set of local requirements is recognized.
751
751
752 Receives a set of requirements. Raises an ``error.RepoError`` if there
752 Receives a set of requirements. Raises an ``error.RepoError`` if there
753 exists any requirement in that set that currently loaded code doesn't
753 exists any requirement in that set that currently loaded code doesn't
754 recognize.
754 recognize.
755
755
756 Returns a set of supported requirements.
756 Returns a set of supported requirements.
757 """
757 """
758 missing = set()
758 missing = set()
759
759
760 for requirement in requirements:
760 for requirement in requirements:
761 if requirement in supported:
761 if requirement in supported:
762 continue
762 continue
763
763
764 if not requirement or not requirement[0:1].isalnum():
764 if not requirement or not requirement[0:1].isalnum():
765 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
765 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
766
766
767 missing.add(requirement)
767 missing.add(requirement)
768
768
769 if missing:
769 if missing:
770 raise error.RequirementError(
770 raise error.RequirementError(
771 _(b'repository requires features unknown to this Mercurial: %s')
771 _(b'repository requires features unknown to this Mercurial: %s')
772 % b' '.join(sorted(missing)),
772 % b' '.join(sorted(missing)),
773 hint=_(
773 hint=_(
774 b'see https://mercurial-scm.org/wiki/MissingRequirement '
774 b'see https://mercurial-scm.org/wiki/MissingRequirement '
775 b'for more information'
775 b'for more information'
776 ),
776 ),
777 )
777 )
778
778
779
779
780 def ensurerequirementscompatible(ui, requirements):
780 def ensurerequirementscompatible(ui, requirements):
781 """Validates that a set of recognized requirements is mutually compatible.
781 """Validates that a set of recognized requirements is mutually compatible.
782
782
783 Some requirements may not be compatible with others or require
783 Some requirements may not be compatible with others or require
784 config options that aren't enabled. This function is called during
784 config options that aren't enabled. This function is called during
785 repository opening to ensure that the set of requirements needed
785 repository opening to ensure that the set of requirements needed
786 to open a repository is sane and compatible with config options.
786 to open a repository is sane and compatible with config options.
787
787
788 Extensions can monkeypatch this function to perform additional
788 Extensions can monkeypatch this function to perform additional
789 checking.
789 checking.
790
790
791 ``error.RepoError`` should be raised on failure.
791 ``error.RepoError`` should be raised on failure.
792 """
792 """
793 if b'exp-sparse' in requirements and not sparse.enabled:
793 if b'exp-sparse' in requirements and not sparse.enabled:
794 raise error.RepoError(
794 raise error.RepoError(
795 _(
795 _(
796 b'repository is using sparse feature but '
796 b'repository is using sparse feature but '
797 b'sparse is not enabled; enable the '
797 b'sparse is not enabled; enable the '
798 b'"sparse" extensions to access'
798 b'"sparse" extensions to access'
799 )
799 )
800 )
800 )
801
801
802
802
803 def makestore(requirements, path, vfstype):
803 def makestore(requirements, path, vfstype):
804 """Construct a storage object for a repository."""
804 """Construct a storage object for a repository."""
805 if b'store' in requirements:
805 if b'store' in requirements:
806 if b'fncache' in requirements:
806 if b'fncache' in requirements:
807 return storemod.fncachestore(
807 return storemod.fncachestore(
808 path, vfstype, b'dotencode' in requirements
808 path, vfstype, b'dotencode' in requirements
809 )
809 )
810
810
811 return storemod.encodedstore(path, vfstype)
811 return storemod.encodedstore(path, vfstype)
812
812
813 return storemod.basicstore(path, vfstype)
813 return storemod.basicstore(path, vfstype)
814
814
815
815
816 def resolvestorevfsoptions(ui, requirements, features):
816 def resolvestorevfsoptions(ui, requirements, features):
817 """Resolve the options to pass to the store vfs opener.
817 """Resolve the options to pass to the store vfs opener.
818
818
819 The returned dict is used to influence behavior of the storage layer.
819 The returned dict is used to influence behavior of the storage layer.
820 """
820 """
821 options = {}
821 options = {}
822
822
823 if b'treemanifest' in requirements:
823 if b'treemanifest' in requirements:
824 options[b'treemanifest'] = True
824 options[b'treemanifest'] = True
825
825
826 # experimental config: format.manifestcachesize
826 # experimental config: format.manifestcachesize
827 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
827 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
828 if manifestcachesize is not None:
828 if manifestcachesize is not None:
829 options[b'manifestcachesize'] = manifestcachesize
829 options[b'manifestcachesize'] = manifestcachesize
830
830
831 # In the absence of another requirement superseding a revlog-related
831 # In the absence of another requirement superseding a revlog-related
832 # requirement, we have to assume the repo is using revlog version 0.
832 # requirement, we have to assume the repo is using revlog version 0.
833 # This revlog format is super old and we don't bother trying to parse
833 # This revlog format is super old and we don't bother trying to parse
834 # opener options for it because those options wouldn't do anything
834 # opener options for it because those options wouldn't do anything
835 # meaningful on such old repos.
835 # meaningful on such old repos.
836 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
836 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
837 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
837 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
838 else: # explicitly mark repo as using revlogv0
838 else: # explicitly mark repo as using revlogv0
839 options[b'revlogv0'] = True
839 options[b'revlogv0'] = True
840
840
841 if COPIESSDC_REQUIREMENT in requirements:
841 if COPIESSDC_REQUIREMENT in requirements:
842 options[b'copies-storage'] = b'changeset-sidedata'
842 options[b'copies-storage'] = b'changeset-sidedata'
843 else:
843 else:
844 writecopiesto = ui.config(b'experimental', b'copies.write-to')
844 writecopiesto = ui.config(b'experimental', b'copies.write-to')
845 copiesextramode = (b'changeset-only', b'compatibility')
845 copiesextramode = (b'changeset-only', b'compatibility')
846 if writecopiesto in copiesextramode:
846 if writecopiesto in copiesextramode:
847 options[b'copies-storage'] = b'extra'
847 options[b'copies-storage'] = b'extra'
848
848
849 return options
849 return options
850
850
851
851
852 def resolverevlogstorevfsoptions(ui, requirements, features):
852 def resolverevlogstorevfsoptions(ui, requirements, features):
853 """Resolve opener options specific to revlogs."""
853 """Resolve opener options specific to revlogs."""
854
854
855 options = {}
855 options = {}
856 options[b'flagprocessors'] = {}
856 options[b'flagprocessors'] = {}
857
857
858 if b'revlogv1' in requirements:
858 if b'revlogv1' in requirements:
859 options[b'revlogv1'] = True
859 options[b'revlogv1'] = True
860 if REVLOGV2_REQUIREMENT in requirements:
860 if REVLOGV2_REQUIREMENT in requirements:
861 options[b'revlogv2'] = True
861 options[b'revlogv2'] = True
862
862
863 if b'generaldelta' in requirements:
863 if b'generaldelta' in requirements:
864 options[b'generaldelta'] = True
864 options[b'generaldelta'] = True
865
865
866 # experimental config: format.chunkcachesize
866 # experimental config: format.chunkcachesize
867 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
867 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
868 if chunkcachesize is not None:
868 if chunkcachesize is not None:
869 options[b'chunkcachesize'] = chunkcachesize
869 options[b'chunkcachesize'] = chunkcachesize
870
870
871 deltabothparents = ui.configbool(
871 deltabothparents = ui.configbool(
872 b'storage', b'revlog.optimize-delta-parent-choice'
872 b'storage', b'revlog.optimize-delta-parent-choice'
873 )
873 )
874 options[b'deltabothparents'] = deltabothparents
874 options[b'deltabothparents'] = deltabothparents
875
875
876 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
876 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
877 lazydeltabase = False
877 lazydeltabase = False
878 if lazydelta:
878 if lazydelta:
879 lazydeltabase = ui.configbool(
879 lazydeltabase = ui.configbool(
880 b'storage', b'revlog.reuse-external-delta-parent'
880 b'storage', b'revlog.reuse-external-delta-parent'
881 )
881 )
882 if lazydeltabase is None:
882 if lazydeltabase is None:
883 lazydeltabase = not scmutil.gddeltaconfig(ui)
883 lazydeltabase = not scmutil.gddeltaconfig(ui)
884 options[b'lazydelta'] = lazydelta
884 options[b'lazydelta'] = lazydelta
885 options[b'lazydeltabase'] = lazydeltabase
885 options[b'lazydeltabase'] = lazydeltabase
886
886
887 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
887 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
888 if 0 <= chainspan:
888 if 0 <= chainspan:
889 options[b'maxdeltachainspan'] = chainspan
889 options[b'maxdeltachainspan'] = chainspan
890
890
891 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
891 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
892 if mmapindexthreshold is not None:
892 if mmapindexthreshold is not None:
893 options[b'mmapindexthreshold'] = mmapindexthreshold
893 options[b'mmapindexthreshold'] = mmapindexthreshold
894
894
895 withsparseread = ui.configbool(b'experimental', b'sparse-read')
895 withsparseread = ui.configbool(b'experimental', b'sparse-read')
896 srdensitythres = float(
896 srdensitythres = float(
897 ui.config(b'experimental', b'sparse-read.density-threshold')
897 ui.config(b'experimental', b'sparse-read.density-threshold')
898 )
898 )
899 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
899 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
900 options[b'with-sparse-read'] = withsparseread
900 options[b'with-sparse-read'] = withsparseread
901 options[b'sparse-read-density-threshold'] = srdensitythres
901 options[b'sparse-read-density-threshold'] = srdensitythres
902 options[b'sparse-read-min-gap-size'] = srmingapsize
902 options[b'sparse-read-min-gap-size'] = srmingapsize
903
903
904 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
904 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
905 options[b'sparse-revlog'] = sparserevlog
905 options[b'sparse-revlog'] = sparserevlog
906 if sparserevlog:
906 if sparserevlog:
907 options[b'generaldelta'] = True
907 options[b'generaldelta'] = True
908
908
909 sidedata = SIDEDATA_REQUIREMENT in requirements
909 sidedata = SIDEDATA_REQUIREMENT in requirements
910 options[b'side-data'] = sidedata
910 options[b'side-data'] = sidedata
911
911
912 maxchainlen = None
912 maxchainlen = None
913 if sparserevlog:
913 if sparserevlog:
914 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
914 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
915 # experimental config: format.maxchainlen
915 # experimental config: format.maxchainlen
916 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
916 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
917 if maxchainlen is not None:
917 if maxchainlen is not None:
918 options[b'maxchainlen'] = maxchainlen
918 options[b'maxchainlen'] = maxchainlen
919
919
920 for r in requirements:
920 for r in requirements:
921 # we allow multiple compression engine requirement to co-exist because
921 # we allow multiple compression engine requirement to co-exist because
922 # strickly speaking, revlog seems to support mixed compression style.
922 # strickly speaking, revlog seems to support mixed compression style.
923 #
923 #
924 # The compression used for new entries will be "the last one"
924 # The compression used for new entries will be "the last one"
925 prefix = r.startswith
925 prefix = r.startswith
926 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
926 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
927 options[b'compengine'] = r.split(b'-', 2)[2]
927 options[b'compengine'] = r.split(b'-', 2)[2]
928
928
929 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
929 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
930 if options[b'zlib.level'] is not None:
930 if options[b'zlib.level'] is not None:
931 if not (0 <= options[b'zlib.level'] <= 9):
931 if not (0 <= options[b'zlib.level'] <= 9):
932 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
932 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
933 raise error.Abort(msg % options[b'zlib.level'])
933 raise error.Abort(msg % options[b'zlib.level'])
934 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
934 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
935 if options[b'zstd.level'] is not None:
935 if options[b'zstd.level'] is not None:
936 if not (0 <= options[b'zstd.level'] <= 22):
936 if not (0 <= options[b'zstd.level'] <= 22):
937 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
937 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
938 raise error.Abort(msg % options[b'zstd.level'])
938 raise error.Abort(msg % options[b'zstd.level'])
939
939
940 if repository.NARROW_REQUIREMENT in requirements:
940 if repository.NARROW_REQUIREMENT in requirements:
941 options[b'enableellipsis'] = True
941 options[b'enableellipsis'] = True
942
942
943 if ui.configbool(b'experimental', b'rust.index'):
943 if ui.configbool(b'experimental', b'rust.index'):
944 options[b'rust.index'] = True
944 options[b'rust.index'] = True
945 if NODEMAP_REQUIREMENT in requirements:
945 if NODEMAP_REQUIREMENT in requirements:
946 options[b'persistent-nodemap'] = True
946 options[b'persistent-nodemap'] = True
947 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
947 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
948 options[b'persistent-nodemap.mmap'] = True
948 options[b'persistent-nodemap.mmap'] = True
949 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
949 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
950 options[b'persistent-nodemap.mode'] = epnm
950 options[b'persistent-nodemap.mode'] = epnm
951 if ui.configbool(b'devel', b'persistent-nodemap'):
951 if ui.configbool(b'devel', b'persistent-nodemap'):
952 options[b'devel-force-nodemap'] = True
952 options[b'devel-force-nodemap'] = True
953
953
954 return options
954 return options
955
955
956
956
957 def makemain(**kwargs):
957 def makemain(**kwargs):
958 """Produce a type conforming to ``ilocalrepositorymain``."""
958 """Produce a type conforming to ``ilocalrepositorymain``."""
959 return localrepository
959 return localrepository
960
960
961
961
962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
963 class revlogfilestorage(object):
963 class revlogfilestorage(object):
964 """File storage when using revlogs."""
964 """File storage when using revlogs."""
965
965
966 def file(self, path):
966 def file(self, path):
967 if path[0] == b'/':
967 if path[0] == b'/':
968 path = path[1:]
968 path = path[1:]
969
969
970 return filelog.filelog(self.svfs, path)
970 return filelog.filelog(self.svfs, path)
971
971
972
972
973 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
973 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
974 class revlognarrowfilestorage(object):
974 class revlognarrowfilestorage(object):
975 """File storage when using revlogs and narrow files."""
975 """File storage when using revlogs and narrow files."""
976
976
977 def file(self, path):
977 def file(self, path):
978 if path[0] == b'/':
978 if path[0] == b'/':
979 path = path[1:]
979 path = path[1:]
980
980
981 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
981 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
982
982
983
983
984 def makefilestorage(requirements, features, **kwargs):
984 def makefilestorage(requirements, features, **kwargs):
985 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
985 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
986 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
986 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
987 features.add(repository.REPO_FEATURE_STREAM_CLONE)
987 features.add(repository.REPO_FEATURE_STREAM_CLONE)
988
988
989 if repository.NARROW_REQUIREMENT in requirements:
989 if repository.NARROW_REQUIREMENT in requirements:
990 return revlognarrowfilestorage
990 return revlognarrowfilestorage
991 else:
991 else:
992 return revlogfilestorage
992 return revlogfilestorage
993
993
994
994
995 # List of repository interfaces and factory functions for them. Each
995 # List of repository interfaces and factory functions for them. Each
996 # will be called in order during ``makelocalrepository()`` to iteratively
996 # will be called in order during ``makelocalrepository()`` to iteratively
997 # derive the final type for a local repository instance. We capture the
997 # derive the final type for a local repository instance. We capture the
998 # function as a lambda so we don't hold a reference and the module-level
998 # function as a lambda so we don't hold a reference and the module-level
999 # functions can be wrapped.
999 # functions can be wrapped.
1000 REPO_INTERFACES = [
1000 REPO_INTERFACES = [
1001 (repository.ilocalrepositorymain, lambda: makemain),
1001 (repository.ilocalrepositorymain, lambda: makemain),
1002 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1002 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1003 ]
1003 ]
1004
1004
1005
1005
1006 @interfaceutil.implementer(repository.ilocalrepositorymain)
1006 @interfaceutil.implementer(repository.ilocalrepositorymain)
1007 class localrepository(object):
1007 class localrepository(object):
1008 """Main class for representing local repositories.
1008 """Main class for representing local repositories.
1009
1009
1010 All local repositories are instances of this class.
1010 All local repositories are instances of this class.
1011
1011
1012 Constructed on its own, instances of this class are not usable as
1012 Constructed on its own, instances of this class are not usable as
1013 repository objects. To obtain a usable repository object, call
1013 repository objects. To obtain a usable repository object, call
1014 ``hg.repository()``, ``localrepo.instance()``, or
1014 ``hg.repository()``, ``localrepo.instance()``, or
1015 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1015 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1016 ``instance()`` adds support for creating new repositories.
1016 ``instance()`` adds support for creating new repositories.
1017 ``hg.repository()`` adds more extension integration, including calling
1017 ``hg.repository()`` adds more extension integration, including calling
1018 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1018 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1019 used.
1019 used.
1020 """
1020 """
1021
1021
1022 # obsolete experimental requirements:
1022 # obsolete experimental requirements:
1023 # - manifestv2: An experimental new manifest format that allowed
1023 # - manifestv2: An experimental new manifest format that allowed
1024 # for stem compression of long paths. Experiment ended up not
1024 # for stem compression of long paths. Experiment ended up not
1025 # being successful (repository sizes went up due to worse delta
1025 # being successful (repository sizes went up due to worse delta
1026 # chains), and the code was deleted in 4.6.
1026 # chains), and the code was deleted in 4.6.
1027 supportedformats = {
1027 supportedformats = {
1028 b'revlogv1',
1028 b'revlogv1',
1029 b'generaldelta',
1029 b'generaldelta',
1030 b'treemanifest',
1030 b'treemanifest',
1031 COPIESSDC_REQUIREMENT,
1031 COPIESSDC_REQUIREMENT,
1032 REVLOGV2_REQUIREMENT,
1032 REVLOGV2_REQUIREMENT,
1033 SIDEDATA_REQUIREMENT,
1033 SIDEDATA_REQUIREMENT,
1034 SPARSEREVLOG_REQUIREMENT,
1034 SPARSEREVLOG_REQUIREMENT,
1035 NODEMAP_REQUIREMENT,
1035 NODEMAP_REQUIREMENT,
1036 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1036 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1037 }
1037 }
1038 _basesupported = supportedformats | {
1038 _basesupported = supportedformats | {
1039 b'store',
1039 b'store',
1040 b'fncache',
1040 b'fncache',
1041 b'shared',
1041 b'shared',
1042 b'relshared',
1042 b'relshared',
1043 b'dotencode',
1043 b'dotencode',
1044 b'exp-sparse',
1044 b'exp-sparse',
1045 b'internal-phase',
1045 b'internal-phase',
1046 }
1046 }
1047
1047
1048 # list of prefix for file which can be written without 'wlock'
1048 # list of prefix for file which can be written without 'wlock'
1049 # Extensions should extend this list when needed
1049 # Extensions should extend this list when needed
1050 _wlockfreeprefix = {
1050 _wlockfreeprefix = {
1051 # We migh consider requiring 'wlock' for the next
1051 # We migh consider requiring 'wlock' for the next
1052 # two, but pretty much all the existing code assume
1052 # two, but pretty much all the existing code assume
1053 # wlock is not needed so we keep them excluded for
1053 # wlock is not needed so we keep them excluded for
1054 # now.
1054 # now.
1055 b'hgrc',
1055 b'hgrc',
1056 b'requires',
1056 b'requires',
1057 # XXX cache is a complicatged business someone
1057 # XXX cache is a complicatged business someone
1058 # should investigate this in depth at some point
1058 # should investigate this in depth at some point
1059 b'cache/',
1059 b'cache/',
1060 # XXX shouldn't be dirstate covered by the wlock?
1060 # XXX shouldn't be dirstate covered by the wlock?
1061 b'dirstate',
1061 b'dirstate',
1062 # XXX bisect was still a bit too messy at the time
1062 # XXX bisect was still a bit too messy at the time
1063 # this changeset was introduced. Someone should fix
1063 # this changeset was introduced. Someone should fix
1064 # the remainig bit and drop this line
1064 # the remainig bit and drop this line
1065 b'bisect.state',
1065 b'bisect.state',
1066 }
1066 }
1067
1067
1068 def __init__(
1068 def __init__(
1069 self,
1069 self,
1070 baseui,
1070 baseui,
1071 ui,
1071 ui,
1072 origroot,
1072 origroot,
1073 wdirvfs,
1073 wdirvfs,
1074 hgvfs,
1074 hgvfs,
1075 requirements,
1075 requirements,
1076 supportedrequirements,
1076 supportedrequirements,
1077 sharedpath,
1077 sharedpath,
1078 store,
1078 store,
1079 cachevfs,
1079 cachevfs,
1080 wcachevfs,
1080 wcachevfs,
1081 features,
1081 features,
1082 intents=None,
1082 intents=None,
1083 ):
1083 ):
1084 """Create a new local repository instance.
1084 """Create a new local repository instance.
1085
1085
1086 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1086 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1087 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1087 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1088 object.
1088 object.
1089
1089
1090 Arguments:
1090 Arguments:
1091
1091
1092 baseui
1092 baseui
1093 ``ui.ui`` instance that ``ui`` argument was based off of.
1093 ``ui.ui`` instance that ``ui`` argument was based off of.
1094
1094
1095 ui
1095 ui
1096 ``ui.ui`` instance for use by the repository.
1096 ``ui.ui`` instance for use by the repository.
1097
1097
1098 origroot
1098 origroot
1099 ``bytes`` path to working directory root of this repository.
1099 ``bytes`` path to working directory root of this repository.
1100
1100
1101 wdirvfs
1101 wdirvfs
1102 ``vfs.vfs`` rooted at the working directory.
1102 ``vfs.vfs`` rooted at the working directory.
1103
1103
1104 hgvfs
1104 hgvfs
1105 ``vfs.vfs`` rooted at .hg/
1105 ``vfs.vfs`` rooted at .hg/
1106
1106
1107 requirements
1107 requirements
1108 ``set`` of bytestrings representing repository opening requirements.
1108 ``set`` of bytestrings representing repository opening requirements.
1109
1109
1110 supportedrequirements
1110 supportedrequirements
1111 ``set`` of bytestrings representing repository requirements that we
1111 ``set`` of bytestrings representing repository requirements that we
1112 know how to open. May be a supetset of ``requirements``.
1112 know how to open. May be a supetset of ``requirements``.
1113
1113
1114 sharedpath
1114 sharedpath
1115 ``bytes`` Defining path to storage base directory. Points to a
1115 ``bytes`` Defining path to storage base directory. Points to a
1116 ``.hg/`` directory somewhere.
1116 ``.hg/`` directory somewhere.
1117
1117
1118 store
1118 store
1119 ``store.basicstore`` (or derived) instance providing access to
1119 ``store.basicstore`` (or derived) instance providing access to
1120 versioned storage.
1120 versioned storage.
1121
1121
1122 cachevfs
1122 cachevfs
1123 ``vfs.vfs`` used for cache files.
1123 ``vfs.vfs`` used for cache files.
1124
1124
1125 wcachevfs
1125 wcachevfs
1126 ``vfs.vfs`` used for cache files related to the working copy.
1126 ``vfs.vfs`` used for cache files related to the working copy.
1127
1127
1128 features
1128 features
1129 ``set`` of bytestrings defining features/capabilities of this
1129 ``set`` of bytestrings defining features/capabilities of this
1130 instance.
1130 instance.
1131
1131
1132 intents
1132 intents
1133 ``set`` of system strings indicating what this repo will be used
1133 ``set`` of system strings indicating what this repo will be used
1134 for.
1134 for.
1135 """
1135 """
1136 self.baseui = baseui
1136 self.baseui = baseui
1137 self.ui = ui
1137 self.ui = ui
1138 self.origroot = origroot
1138 self.origroot = origroot
1139 # vfs rooted at working directory.
1139 # vfs rooted at working directory.
1140 self.wvfs = wdirvfs
1140 self.wvfs = wdirvfs
1141 self.root = wdirvfs.base
1141 self.root = wdirvfs.base
1142 # vfs rooted at .hg/. Used to access most non-store paths.
1142 # vfs rooted at .hg/. Used to access most non-store paths.
1143 self.vfs = hgvfs
1143 self.vfs = hgvfs
1144 self.path = hgvfs.base
1144 self.path = hgvfs.base
1145 self.requirements = requirements
1145 self.requirements = requirements
1146 self.supported = supportedrequirements
1146 self.supported = supportedrequirements
1147 self.sharedpath = sharedpath
1147 self.sharedpath = sharedpath
1148 self.store = store
1148 self.store = store
1149 self.cachevfs = cachevfs
1149 self.cachevfs = cachevfs
1150 self.wcachevfs = wcachevfs
1150 self.wcachevfs = wcachevfs
1151 self.features = features
1151 self.features = features
1152
1152
1153 self.filtername = None
1153 self.filtername = None
1154
1154
1155 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1155 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1156 b'devel', b'check-locks'
1156 b'devel', b'check-locks'
1157 ):
1157 ):
1158 self.vfs.audit = self._getvfsward(self.vfs.audit)
1158 self.vfs.audit = self._getvfsward(self.vfs.audit)
1159 # A list of callback to shape the phase if no data were found.
1159 # A list of callback to shape the phase if no data were found.
1160 # Callback are in the form: func(repo, roots) --> processed root.
1160 # Callback are in the form: func(repo, roots) --> processed root.
1161 # This list it to be filled by extension during repo setup
1161 # This list it to be filled by extension during repo setup
1162 self._phasedefaults = []
1162 self._phasedefaults = []
1163
1163
1164 color.setup(self.ui)
1164 color.setup(self.ui)
1165
1165
1166 self.spath = self.store.path
1166 self.spath = self.store.path
1167 self.svfs = self.store.vfs
1167 self.svfs = self.store.vfs
1168 self.sjoin = self.store.join
1168 self.sjoin = self.store.join
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1170 b'devel', b'check-locks'
1170 b'devel', b'check-locks'
1171 ):
1171 ):
1172 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1172 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1173 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1173 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1174 else: # standard vfs
1174 else: # standard vfs
1175 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1175 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1176
1176
1177 self._dirstatevalidatewarned = False
1177 self._dirstatevalidatewarned = False
1178
1178
1179 self._branchcaches = branchmap.BranchMapCache()
1179 self._branchcaches = branchmap.BranchMapCache()
1180 self._revbranchcache = None
1180 self._revbranchcache = None
1181 self._filterpats = {}
1181 self._filterpats = {}
1182 self._datafilters = {}
1182 self._datafilters = {}
1183 self._transref = self._lockref = self._wlockref = None
1183 self._transref = self._lockref = self._wlockref = None
1184
1184
1185 # A cache for various files under .hg/ that tracks file changes,
1185 # A cache for various files under .hg/ that tracks file changes,
1186 # (used by the filecache decorator)
1186 # (used by the filecache decorator)
1187 #
1187 #
1188 # Maps a property name to its util.filecacheentry
1188 # Maps a property name to its util.filecacheentry
1189 self._filecache = {}
1189 self._filecache = {}
1190
1190
1191 # hold sets of revision to be filtered
1191 # hold sets of revision to be filtered
1192 # should be cleared when something might have changed the filter value:
1192 # should be cleared when something might have changed the filter value:
1193 # - new changesets,
1193 # - new changesets,
1194 # - phase change,
1194 # - phase change,
1195 # - new obsolescence marker,
1195 # - new obsolescence marker,
1196 # - working directory parent change,
1196 # - working directory parent change,
1197 # - bookmark changes
1197 # - bookmark changes
1198 self.filteredrevcache = {}
1198 self.filteredrevcache = {}
1199
1199
1200 # post-dirstate-status hooks
1200 # post-dirstate-status hooks
1201 self._postdsstatus = []
1201 self._postdsstatus = []
1202
1202
1203 # generic mapping between names and nodes
1203 # generic mapping between names and nodes
1204 self.names = namespaces.namespaces()
1204 self.names = namespaces.namespaces()
1205
1205
1206 # Key to signature value.
1206 # Key to signature value.
1207 self._sparsesignaturecache = {}
1207 self._sparsesignaturecache = {}
1208 # Signature to cached matcher instance.
1208 # Signature to cached matcher instance.
1209 self._sparsematchercache = {}
1209 self._sparsematchercache = {}
1210
1210
1211 self._extrafilterid = repoview.extrafilter(ui)
1211 self._extrafilterid = repoview.extrafilter(ui)
1212
1212
1213 self.filecopiesmode = None
1213 self.filecopiesmode = None
1214 if COPIESSDC_REQUIREMENT in self.requirements:
1214 if COPIESSDC_REQUIREMENT in self.requirements:
1215 self.filecopiesmode = b'changeset-sidedata'
1215 self.filecopiesmode = b'changeset-sidedata'
1216
1216
1217 def _getvfsward(self, origfunc):
1217 def _getvfsward(self, origfunc):
1218 """build a ward for self.vfs"""
1218 """build a ward for self.vfs"""
1219 rref = weakref.ref(self)
1219 rref = weakref.ref(self)
1220
1220
1221 def checkvfs(path, mode=None):
1221 def checkvfs(path, mode=None):
1222 ret = origfunc(path, mode=mode)
1222 ret = origfunc(path, mode=mode)
1223 repo = rref()
1223 repo = rref()
1224 if (
1224 if (
1225 repo is None
1225 repo is None
1226 or not util.safehasattr(repo, b'_wlockref')
1226 or not util.safehasattr(repo, b'_wlockref')
1227 or not util.safehasattr(repo, b'_lockref')
1227 or not util.safehasattr(repo, b'_lockref')
1228 ):
1228 ):
1229 return
1229 return
1230 if mode in (None, b'r', b'rb'):
1230 if mode in (None, b'r', b'rb'):
1231 return
1231 return
1232 if path.startswith(repo.path):
1232 if path.startswith(repo.path):
1233 # truncate name relative to the repository (.hg)
1233 # truncate name relative to the repository (.hg)
1234 path = path[len(repo.path) + 1 :]
1234 path = path[len(repo.path) + 1 :]
1235 if path.startswith(b'cache/'):
1235 if path.startswith(b'cache/'):
1236 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1236 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1237 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1237 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1238 # path prefixes covered by 'lock'
1238 # path prefixes covered by 'lock'
1239 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1239 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1240 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1240 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1241 if repo._currentlock(repo._lockref) is None:
1241 if repo._currentlock(repo._lockref) is None:
1242 repo.ui.develwarn(
1242 repo.ui.develwarn(
1243 b'write with no lock: "%s"' % path,
1243 b'write with no lock: "%s"' % path,
1244 stacklevel=3,
1244 stacklevel=3,
1245 config=b'check-locks',
1245 config=b'check-locks',
1246 )
1246 )
1247 elif repo._currentlock(repo._wlockref) is None:
1247 elif repo._currentlock(repo._wlockref) is None:
1248 # rest of vfs files are covered by 'wlock'
1248 # rest of vfs files are covered by 'wlock'
1249 #
1249 #
1250 # exclude special files
1250 # exclude special files
1251 for prefix in self._wlockfreeprefix:
1251 for prefix in self._wlockfreeprefix:
1252 if path.startswith(prefix):
1252 if path.startswith(prefix):
1253 return
1253 return
1254 repo.ui.develwarn(
1254 repo.ui.develwarn(
1255 b'write with no wlock: "%s"' % path,
1255 b'write with no wlock: "%s"' % path,
1256 stacklevel=3,
1256 stacklevel=3,
1257 config=b'check-locks',
1257 config=b'check-locks',
1258 )
1258 )
1259 return ret
1259 return ret
1260
1260
1261 return checkvfs
1261 return checkvfs
1262
1262
1263 def _getsvfsward(self, origfunc):
1263 def _getsvfsward(self, origfunc):
1264 """build a ward for self.svfs"""
1264 """build a ward for self.svfs"""
1265 rref = weakref.ref(self)
1265 rref = weakref.ref(self)
1266
1266
1267 def checksvfs(path, mode=None):
1267 def checksvfs(path, mode=None):
1268 ret = origfunc(path, mode=mode)
1268 ret = origfunc(path, mode=mode)
1269 repo = rref()
1269 repo = rref()
1270 if repo is None or not util.safehasattr(repo, b'_lockref'):
1270 if repo is None or not util.safehasattr(repo, b'_lockref'):
1271 return
1271 return
1272 if mode in (None, b'r', b'rb'):
1272 if mode in (None, b'r', b'rb'):
1273 return
1273 return
1274 if path.startswith(repo.sharedpath):
1274 if path.startswith(repo.sharedpath):
1275 # truncate name relative to the repository (.hg)
1275 # truncate name relative to the repository (.hg)
1276 path = path[len(repo.sharedpath) + 1 :]
1276 path = path[len(repo.sharedpath) + 1 :]
1277 if repo._currentlock(repo._lockref) is None:
1277 if repo._currentlock(repo._lockref) is None:
1278 repo.ui.develwarn(
1278 repo.ui.develwarn(
1279 b'write with no lock: "%s"' % path, stacklevel=4
1279 b'write with no lock: "%s"' % path, stacklevel=4
1280 )
1280 )
1281 return ret
1281 return ret
1282
1282
1283 return checksvfs
1283 return checksvfs
1284
1284
1285 def close(self):
1285 def close(self):
1286 self._writecaches()
1286 self._writecaches()
1287
1287
1288 def _writecaches(self):
1288 def _writecaches(self):
1289 if self._revbranchcache:
1289 if self._revbranchcache:
1290 self._revbranchcache.write()
1290 self._revbranchcache.write()
1291
1291
1292 def _restrictcapabilities(self, caps):
1292 def _restrictcapabilities(self, caps):
1293 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1293 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1294 caps = set(caps)
1294 caps = set(caps)
1295 capsblob = bundle2.encodecaps(
1295 capsblob = bundle2.encodecaps(
1296 bundle2.getrepocaps(self, role=b'client')
1296 bundle2.getrepocaps(self, role=b'client')
1297 )
1297 )
1298 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1298 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1299 return caps
1299 return caps
1300
1300
1301 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1301 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1302 # self -> auditor -> self._checknested -> self
1302 # self -> auditor -> self._checknested -> self
1303
1303
1304 @property
1304 @property
1305 def auditor(self):
1305 def auditor(self):
1306 # This is only used by context.workingctx.match in order to
1306 # This is only used by context.workingctx.match in order to
1307 # detect files in subrepos.
1307 # detect files in subrepos.
1308 return pathutil.pathauditor(self.root, callback=self._checknested)
1308 return pathutil.pathauditor(self.root, callback=self._checknested)
1309
1309
1310 @property
1310 @property
1311 def nofsauditor(self):
1311 def nofsauditor(self):
1312 # This is only used by context.basectx.match in order to detect
1312 # This is only used by context.basectx.match in order to detect
1313 # files in subrepos.
1313 # files in subrepos.
1314 return pathutil.pathauditor(
1314 return pathutil.pathauditor(
1315 self.root, callback=self._checknested, realfs=False, cached=True
1315 self.root, callback=self._checknested, realfs=False, cached=True
1316 )
1316 )
1317
1317
1318 def _checknested(self, path):
1318 def _checknested(self, path):
1319 """Determine if path is a legal nested repository."""
1319 """Determine if path is a legal nested repository."""
1320 if not path.startswith(self.root):
1320 if not path.startswith(self.root):
1321 return False
1321 return False
1322 subpath = path[len(self.root) + 1 :]
1322 subpath = path[len(self.root) + 1 :]
1323 normsubpath = util.pconvert(subpath)
1323 normsubpath = util.pconvert(subpath)
1324
1324
1325 # XXX: Checking against the current working copy is wrong in
1325 # XXX: Checking against the current working copy is wrong in
1326 # the sense that it can reject things like
1326 # the sense that it can reject things like
1327 #
1327 #
1328 # $ hg cat -r 10 sub/x.txt
1328 # $ hg cat -r 10 sub/x.txt
1329 #
1329 #
1330 # if sub/ is no longer a subrepository in the working copy
1330 # if sub/ is no longer a subrepository in the working copy
1331 # parent revision.
1331 # parent revision.
1332 #
1332 #
1333 # However, it can of course also allow things that would have
1333 # However, it can of course also allow things that would have
1334 # been rejected before, such as the above cat command if sub/
1334 # been rejected before, such as the above cat command if sub/
1335 # is a subrepository now, but was a normal directory before.
1335 # is a subrepository now, but was a normal directory before.
1336 # The old path auditor would have rejected by mistake since it
1336 # The old path auditor would have rejected by mistake since it
1337 # panics when it sees sub/.hg/.
1337 # panics when it sees sub/.hg/.
1338 #
1338 #
1339 # All in all, checking against the working copy seems sensible
1339 # All in all, checking against the working copy seems sensible
1340 # since we want to prevent access to nested repositories on
1340 # since we want to prevent access to nested repositories on
1341 # the filesystem *now*.
1341 # the filesystem *now*.
1342 ctx = self[None]
1342 ctx = self[None]
1343 parts = util.splitpath(subpath)
1343 parts = util.splitpath(subpath)
1344 while parts:
1344 while parts:
1345 prefix = b'/'.join(parts)
1345 prefix = b'/'.join(parts)
1346 if prefix in ctx.substate:
1346 if prefix in ctx.substate:
1347 if prefix == normsubpath:
1347 if prefix == normsubpath:
1348 return True
1348 return True
1349 else:
1349 else:
1350 sub = ctx.sub(prefix)
1350 sub = ctx.sub(prefix)
1351 return sub.checknested(subpath[len(prefix) + 1 :])
1351 return sub.checknested(subpath[len(prefix) + 1 :])
1352 else:
1352 else:
1353 parts.pop()
1353 parts.pop()
1354 return False
1354 return False
1355
1355
1356 def peer(self):
1356 def peer(self):
1357 return localpeer(self) # not cached to avoid reference cycle
1357 return localpeer(self) # not cached to avoid reference cycle
1358
1358
1359 def unfiltered(self):
1359 def unfiltered(self):
1360 """Return unfiltered version of the repository
1360 """Return unfiltered version of the repository
1361
1361
1362 Intended to be overwritten by filtered repo."""
1362 Intended to be overwritten by filtered repo."""
1363 return self
1363 return self
1364
1364
1365 def filtered(self, name, visibilityexceptions=None):
1365 def filtered(self, name, visibilityexceptions=None):
1366 """Return a filtered version of a repository
1366 """Return a filtered version of a repository
1367
1367
1368 The `name` parameter is the identifier of the requested view. This
1368 The `name` parameter is the identifier of the requested view. This
1369 will return a repoview object set "exactly" to the specified view.
1369 will return a repoview object set "exactly" to the specified view.
1370
1370
1371 This function does not apply recursive filtering to a repository. For
1371 This function does not apply recursive filtering to a repository. For
1372 example calling `repo.filtered("served")` will return a repoview using
1372 example calling `repo.filtered("served")` will return a repoview using
1373 the "served" view, regardless of the initial view used by `repo`.
1373 the "served" view, regardless of the initial view used by `repo`.
1374
1374
1375 In other word, there is always only one level of `repoview` "filtering".
1375 In other word, there is always only one level of `repoview` "filtering".
1376 """
1376 """
1377 if self._extrafilterid is not None and b'%' not in name:
1377 if self._extrafilterid is not None and b'%' not in name:
1378 name = name + b'%' + self._extrafilterid
1378 name = name + b'%' + self._extrafilterid
1379
1379
1380 cls = repoview.newtype(self.unfiltered().__class__)
1380 cls = repoview.newtype(self.unfiltered().__class__)
1381 return cls(self, name, visibilityexceptions)
1381 return cls(self, name, visibilityexceptions)
1382
1382
1383 @mixedrepostorecache(
1383 @mixedrepostorecache(
1384 (b'bookmarks', b'plain'),
1384 (b'bookmarks', b'plain'),
1385 (b'bookmarks.current', b'plain'),
1385 (b'bookmarks.current', b'plain'),
1386 (b'bookmarks', b''),
1386 (b'bookmarks', b''),
1387 (b'00changelog.i', b''),
1387 (b'00changelog.i', b''),
1388 )
1388 )
1389 def _bookmarks(self):
1389 def _bookmarks(self):
1390 # Since the multiple files involved in the transaction cannot be
1390 # Since the multiple files involved in the transaction cannot be
1391 # written atomically (with current repository format), there is a race
1391 # written atomically (with current repository format), there is a race
1392 # condition here.
1392 # condition here.
1393 #
1393 #
1394 # 1) changelog content A is read
1394 # 1) changelog content A is read
1395 # 2) outside transaction update changelog to content B
1395 # 2) outside transaction update changelog to content B
1396 # 3) outside transaction update bookmark file referring to content B
1396 # 3) outside transaction update bookmark file referring to content B
1397 # 4) bookmarks file content is read and filtered against changelog-A
1397 # 4) bookmarks file content is read and filtered against changelog-A
1398 #
1398 #
1399 # When this happens, bookmarks against nodes missing from A are dropped.
1399 # When this happens, bookmarks against nodes missing from A are dropped.
1400 #
1400 #
1401 # Having this happening during read is not great, but it become worse
1401 # Having this happening during read is not great, but it become worse
1402 # when this happen during write because the bookmarks to the "unknown"
1402 # when this happen during write because the bookmarks to the "unknown"
1403 # nodes will be dropped for good. However, writes happen within locks.
1403 # nodes will be dropped for good. However, writes happen within locks.
1404 # This locking makes it possible to have a race free consistent read.
1404 # This locking makes it possible to have a race free consistent read.
1405 # For this purpose data read from disc before locking are
1405 # For this purpose data read from disc before locking are
1406 # "invalidated" right after the locks are taken. This invalidations are
1406 # "invalidated" right after the locks are taken. This invalidations are
1407 # "light", the `filecache` mechanism keep the data in memory and will
1407 # "light", the `filecache` mechanism keep the data in memory and will
1408 # reuse them if the underlying files did not changed. Not parsing the
1408 # reuse them if the underlying files did not changed. Not parsing the
1409 # same data multiple times helps performances.
1409 # same data multiple times helps performances.
1410 #
1410 #
1411 # Unfortunately in the case describe above, the files tracked by the
1411 # Unfortunately in the case describe above, the files tracked by the
1412 # bookmarks file cache might not have changed, but the in-memory
1412 # bookmarks file cache might not have changed, but the in-memory
1413 # content is still "wrong" because we used an older changelog content
1413 # content is still "wrong" because we used an older changelog content
1414 # to process the on-disk data. So after locking, the changelog would be
1414 # to process the on-disk data. So after locking, the changelog would be
1415 # refreshed but `_bookmarks` would be preserved.
1415 # refreshed but `_bookmarks` would be preserved.
1416 # Adding `00changelog.i` to the list of tracked file is not
1416 # Adding `00changelog.i` to the list of tracked file is not
1417 # enough, because at the time we build the content for `_bookmarks` in
1417 # enough, because at the time we build the content for `_bookmarks` in
1418 # (4), the changelog file has already diverged from the content used
1418 # (4), the changelog file has already diverged from the content used
1419 # for loading `changelog` in (1)
1419 # for loading `changelog` in (1)
1420 #
1420 #
1421 # To prevent the issue, we force the changelog to be explicitly
1421 # To prevent the issue, we force the changelog to be explicitly
1422 # reloaded while computing `_bookmarks`. The data race can still happen
1422 # reloaded while computing `_bookmarks`. The data race can still happen
1423 # without the lock (with a narrower window), but it would no longer go
1423 # without the lock (with a narrower window), but it would no longer go
1424 # undetected during the lock time refresh.
1424 # undetected during the lock time refresh.
1425 #
1425 #
1426 # The new schedule is as follow
1426 # The new schedule is as follow
1427 #
1427 #
1428 # 1) filecache logic detect that `_bookmarks` needs to be computed
1428 # 1) filecache logic detect that `_bookmarks` needs to be computed
1429 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1429 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1430 # 3) We force `changelog` filecache to be tested
1430 # 3) We force `changelog` filecache to be tested
1431 # 4) cachestat for `changelog` are captured (for changelog)
1431 # 4) cachestat for `changelog` are captured (for changelog)
1432 # 5) `_bookmarks` is computed and cached
1432 # 5) `_bookmarks` is computed and cached
1433 #
1433 #
1434 # The step in (3) ensure we have a changelog at least as recent as the
1434 # The step in (3) ensure we have a changelog at least as recent as the
1435 # cache stat computed in (1). As a result at locking time:
1435 # cache stat computed in (1). As a result at locking time:
1436 # * if the changelog did not changed since (1) -> we can reuse the data
1436 # * if the changelog did not changed since (1) -> we can reuse the data
1437 # * otherwise -> the bookmarks get refreshed.
1437 # * otherwise -> the bookmarks get refreshed.
1438 self._refreshchangelog()
1438 self._refreshchangelog()
1439 return bookmarks.bmstore(self)
1439 return bookmarks.bmstore(self)
1440
1440
1441 def _refreshchangelog(self):
1441 def _refreshchangelog(self):
1442 """make sure the in memory changelog match the on-disk one"""
1442 """make sure the in memory changelog match the on-disk one"""
1443 if 'changelog' in vars(self) and self.currenttransaction() is None:
1443 if 'changelog' in vars(self) and self.currenttransaction() is None:
1444 del self.changelog
1444 del self.changelog
1445
1445
1446 @property
1446 @property
1447 def _activebookmark(self):
1447 def _activebookmark(self):
1448 return self._bookmarks.active
1448 return self._bookmarks.active
1449
1449
1450 # _phasesets depend on changelog. what we need is to call
1450 # _phasesets depend on changelog. what we need is to call
1451 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1451 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1452 # can't be easily expressed in filecache mechanism.
1452 # can't be easily expressed in filecache mechanism.
1453 @storecache(b'phaseroots', b'00changelog.i')
1453 @storecache(b'phaseroots', b'00changelog.i')
1454 def _phasecache(self):
1454 def _phasecache(self):
1455 return phases.phasecache(self, self._phasedefaults)
1455 return phases.phasecache(self, self._phasedefaults)
1456
1456
1457 @storecache(b'obsstore')
1457 @storecache(b'obsstore')
1458 def obsstore(self):
1458 def obsstore(self):
1459 return obsolete.makestore(self.ui, self)
1459 return obsolete.makestore(self.ui, self)
1460
1460
1461 @storecache(b'00changelog.i')
1461 @storecache(b'00changelog.i')
1462 def changelog(self):
1462 def changelog(self):
1463 # load dirstate before changelog to avoid race see issue6303
1463 # load dirstate before changelog to avoid race see issue6303
1464 self.dirstate.prefetch_parents()
1464 self.dirstate.prefetch_parents()
1465 return self.store.changelog(txnutil.mayhavepending(self.root))
1465 return self.store.changelog(txnutil.mayhavepending(self.root))
1466
1466
1467 @storecache(b'00manifest.i')
1467 @storecache(b'00manifest.i')
1468 def manifestlog(self):
1468 def manifestlog(self):
1469 return self.store.manifestlog(self, self._storenarrowmatch)
1469 return self.store.manifestlog(self, self._storenarrowmatch)
1470
1470
1471 @repofilecache(b'dirstate')
1471 @repofilecache(b'dirstate')
1472 def dirstate(self):
1472 def dirstate(self):
1473 return self._makedirstate()
1473 return self._makedirstate()
1474
1474
1475 def _makedirstate(self):
1475 def _makedirstate(self):
1476 """Extension point for wrapping the dirstate per-repo."""
1476 """Extension point for wrapping the dirstate per-repo."""
1477 sparsematchfn = lambda: sparse.matcher(self)
1477 sparsematchfn = lambda: sparse.matcher(self)
1478
1478
1479 return dirstate.dirstate(
1479 return dirstate.dirstate(
1480 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1480 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1481 )
1481 )
1482
1482
1483 def _dirstatevalidate(self, node):
1483 def _dirstatevalidate(self, node):
1484 try:
1484 try:
1485 self.changelog.rev(node)
1485 self.changelog.rev(node)
1486 return node
1486 return node
1487 except error.LookupError:
1487 except error.LookupError:
1488 if not self._dirstatevalidatewarned:
1488 if not self._dirstatevalidatewarned:
1489 self._dirstatevalidatewarned = True
1489 self._dirstatevalidatewarned = True
1490 self.ui.warn(
1490 self.ui.warn(
1491 _(b"warning: ignoring unknown working parent %s!\n")
1491 _(b"warning: ignoring unknown working parent %s!\n")
1492 % short(node)
1492 % short(node)
1493 )
1493 )
1494 return nullid
1494 return nullid
1495
1495
1496 @storecache(narrowspec.FILENAME)
1496 @storecache(narrowspec.FILENAME)
1497 def narrowpats(self):
1497 def narrowpats(self):
1498 """matcher patterns for this repository's narrowspec
1498 """matcher patterns for this repository's narrowspec
1499
1499
1500 A tuple of (includes, excludes).
1500 A tuple of (includes, excludes).
1501 """
1501 """
1502 return narrowspec.load(self)
1502 return narrowspec.load(self)
1503
1503
1504 @storecache(narrowspec.FILENAME)
1504 @storecache(narrowspec.FILENAME)
1505 def _storenarrowmatch(self):
1505 def _storenarrowmatch(self):
1506 if repository.NARROW_REQUIREMENT not in self.requirements:
1506 if repository.NARROW_REQUIREMENT not in self.requirements:
1507 return matchmod.always()
1507 return matchmod.always()
1508 include, exclude = self.narrowpats
1508 include, exclude = self.narrowpats
1509 return narrowspec.match(self.root, include=include, exclude=exclude)
1509 return narrowspec.match(self.root, include=include, exclude=exclude)
1510
1510
1511 @storecache(narrowspec.FILENAME)
1511 @storecache(narrowspec.FILENAME)
1512 def _narrowmatch(self):
1512 def _narrowmatch(self):
1513 if repository.NARROW_REQUIREMENT not in self.requirements:
1513 if repository.NARROW_REQUIREMENT not in self.requirements:
1514 return matchmod.always()
1514 return matchmod.always()
1515 narrowspec.checkworkingcopynarrowspec(self)
1515 narrowspec.checkworkingcopynarrowspec(self)
1516 include, exclude = self.narrowpats
1516 include, exclude = self.narrowpats
1517 return narrowspec.match(self.root, include=include, exclude=exclude)
1517 return narrowspec.match(self.root, include=include, exclude=exclude)
1518
1518
1519 def narrowmatch(self, match=None, includeexact=False):
1519 def narrowmatch(self, match=None, includeexact=False):
1520 """matcher corresponding the the repo's narrowspec
1520 """matcher corresponding the the repo's narrowspec
1521
1521
1522 If `match` is given, then that will be intersected with the narrow
1522 If `match` is given, then that will be intersected with the narrow
1523 matcher.
1523 matcher.
1524
1524
1525 If `includeexact` is True, then any exact matches from `match` will
1525 If `includeexact` is True, then any exact matches from `match` will
1526 be included even if they're outside the narrowspec.
1526 be included even if they're outside the narrowspec.
1527 """
1527 """
1528 if match:
1528 if match:
1529 if includeexact and not self._narrowmatch.always():
1529 if includeexact and not self._narrowmatch.always():
1530 # do not exclude explicitly-specified paths so that they can
1530 # do not exclude explicitly-specified paths so that they can
1531 # be warned later on
1531 # be warned later on
1532 em = matchmod.exact(match.files())
1532 em = matchmod.exact(match.files())
1533 nm = matchmod.unionmatcher([self._narrowmatch, em])
1533 nm = matchmod.unionmatcher([self._narrowmatch, em])
1534 return matchmod.intersectmatchers(match, nm)
1534 return matchmod.intersectmatchers(match, nm)
1535 return matchmod.intersectmatchers(match, self._narrowmatch)
1535 return matchmod.intersectmatchers(match, self._narrowmatch)
1536 return self._narrowmatch
1536 return self._narrowmatch
1537
1537
1538 def setnarrowpats(self, newincludes, newexcludes):
1538 def setnarrowpats(self, newincludes, newexcludes):
1539 narrowspec.save(self, newincludes, newexcludes)
1539 narrowspec.save(self, newincludes, newexcludes)
1540 self.invalidate(clearfilecache=True)
1540 self.invalidate(clearfilecache=True)
1541
1541
1542 @unfilteredpropertycache
1542 @unfilteredpropertycache
1543 def _quick_access_changeid_null(self):
1543 def _quick_access_changeid_null(self):
1544 return {
1544 return {
1545 b'null': (nullrev, nullid),
1545 b'null': (nullrev, nullid),
1546 nullrev: (nullrev, nullid),
1546 nullrev: (nullrev, nullid),
1547 nullid: (nullrev, nullid),
1547 nullid: (nullrev, nullid),
1548 }
1548 }
1549
1549
1550 @unfilteredpropertycache
1550 @unfilteredpropertycache
1551 def _quick_access_changeid_wc(self):
1551 def _quick_access_changeid_wc(self):
1552 # also fast path access to the working copy parents
1552 # also fast path access to the working copy parents
1553 # however, only do it for filter that ensure wc is visible.
1553 # however, only do it for filter that ensure wc is visible.
1554 quick = {}
1554 quick = {}
1555 cl = self.unfiltered().changelog
1555 cl = self.unfiltered().changelog
1556 for node in self.dirstate.parents():
1556 for node in self.dirstate.parents():
1557 if node == nullid:
1557 if node == nullid:
1558 continue
1558 continue
1559 rev = cl.index.get_rev(node)
1559 rev = cl.index.get_rev(node)
1560 if rev is None:
1560 if rev is None:
1561 # unknown working copy parent case:
1561 # unknown working copy parent case:
1562 #
1562 #
1563 # skip the fast path and let higher code deal with it
1563 # skip the fast path and let higher code deal with it
1564 continue
1564 continue
1565 pair = (rev, node)
1565 pair = (rev, node)
1566 quick[rev] = pair
1566 quick[rev] = pair
1567 quick[node] = pair
1567 quick[node] = pair
1568 # also add the parents of the parents
1568 # also add the parents of the parents
1569 for r in cl.parentrevs(rev):
1569 for r in cl.parentrevs(rev):
1570 if r == nullrev:
1570 if r == nullrev:
1571 continue
1571 continue
1572 n = cl.node(r)
1572 n = cl.node(r)
1573 pair = (r, n)
1573 pair = (r, n)
1574 quick[r] = pair
1574 quick[r] = pair
1575 quick[n] = pair
1575 quick[n] = pair
1576 p1node = self.dirstate.p1()
1576 p1node = self.dirstate.p1()
1577 if p1node != nullid:
1577 if p1node != nullid:
1578 quick[b'.'] = quick[p1node]
1578 quick[b'.'] = quick[p1node]
1579 return quick
1579 return quick
1580
1580
1581 @unfilteredmethod
1581 @unfilteredmethod
1582 def _quick_access_changeid_invalidate(self):
1582 def _quick_access_changeid_invalidate(self):
1583 if '_quick_access_changeid_wc' in vars(self):
1583 if '_quick_access_changeid_wc' in vars(self):
1584 del self.__dict__['_quick_access_changeid_wc']
1584 del self.__dict__['_quick_access_changeid_wc']
1585
1585
1586 @property
1586 @property
1587 def _quick_access_changeid(self):
1587 def _quick_access_changeid(self):
1588 """an helper dictionnary for __getitem__ calls
1588 """an helper dictionnary for __getitem__ calls
1589
1589
1590 This contains a list of symbol we can recognise right away without
1590 This contains a list of symbol we can recognise right away without
1591 further processing.
1591 further processing.
1592 """
1592 """
1593 mapping = self._quick_access_changeid_null
1593 mapping = self._quick_access_changeid_null
1594 if self.filtername in repoview.filter_has_wc:
1594 if self.filtername in repoview.filter_has_wc:
1595 mapping = mapping.copy()
1595 mapping = mapping.copy()
1596 mapping.update(self._quick_access_changeid_wc)
1596 mapping.update(self._quick_access_changeid_wc)
1597 return mapping
1597 return mapping
1598
1598
1599 def __getitem__(self, changeid):
1599 def __getitem__(self, changeid):
1600 # dealing with special cases
1600 # dealing with special cases
1601 if changeid is None:
1601 if changeid is None:
1602 return context.workingctx(self)
1602 return context.workingctx(self)
1603 if isinstance(changeid, context.basectx):
1603 if isinstance(changeid, context.basectx):
1604 return changeid
1604 return changeid
1605
1605
1606 # dealing with multiple revisions
1606 # dealing with multiple revisions
1607 if isinstance(changeid, slice):
1607 if isinstance(changeid, slice):
1608 # wdirrev isn't contiguous so the slice shouldn't include it
1608 # wdirrev isn't contiguous so the slice shouldn't include it
1609 return [
1609 return [
1610 self[i]
1610 self[i]
1611 for i in pycompat.xrange(*changeid.indices(len(self)))
1611 for i in pycompat.xrange(*changeid.indices(len(self)))
1612 if i not in self.changelog.filteredrevs
1612 if i not in self.changelog.filteredrevs
1613 ]
1613 ]
1614
1614
1615 # dealing with some special values
1615 # dealing with some special values
1616 quick_access = self._quick_access_changeid.get(changeid)
1616 quick_access = self._quick_access_changeid.get(changeid)
1617 if quick_access is not None:
1617 if quick_access is not None:
1618 rev, node = quick_access
1618 rev, node = quick_access
1619 return context.changectx(self, rev, node, maybe_filtered=False)
1619 return context.changectx(self, rev, node, maybe_filtered=False)
1620 if changeid == b'tip':
1620 if changeid == b'tip':
1621 node = self.changelog.tip()
1621 node = self.changelog.tip()
1622 rev = self.changelog.rev(node)
1622 rev = self.changelog.rev(node)
1623 return context.changectx(self, rev, node)
1623 return context.changectx(self, rev, node)
1624
1624
1625 # dealing with arbitrary values
1625 # dealing with arbitrary values
1626 try:
1626 try:
1627 if isinstance(changeid, int):
1627 if isinstance(changeid, int):
1628 node = self.changelog.node(changeid)
1628 node = self.changelog.node(changeid)
1629 rev = changeid
1629 rev = changeid
1630 elif changeid == b'.':
1630 elif changeid == b'.':
1631 # this is a hack to delay/avoid loading obsmarkers
1631 # this is a hack to delay/avoid loading obsmarkers
1632 # when we know that '.' won't be hidden
1632 # when we know that '.' won't be hidden
1633 node = self.dirstate.p1()
1633 node = self.dirstate.p1()
1634 rev = self.unfiltered().changelog.rev(node)
1634 rev = self.unfiltered().changelog.rev(node)
1635 elif len(changeid) == 20:
1635 elif len(changeid) == 20:
1636 try:
1636 try:
1637 node = changeid
1637 node = changeid
1638 rev = self.changelog.rev(changeid)
1638 rev = self.changelog.rev(changeid)
1639 except error.FilteredLookupError:
1639 except error.FilteredLookupError:
1640 changeid = hex(changeid) # for the error message
1640 changeid = hex(changeid) # for the error message
1641 raise
1641 raise
1642 except LookupError:
1642 except LookupError:
1643 # check if it might have come from damaged dirstate
1643 # check if it might have come from damaged dirstate
1644 #
1644 #
1645 # XXX we could avoid the unfiltered if we had a recognizable
1645 # XXX we could avoid the unfiltered if we had a recognizable
1646 # exception for filtered changeset access
1646 # exception for filtered changeset access
1647 if (
1647 if (
1648 self.local()
1648 self.local()
1649 and changeid in self.unfiltered().dirstate.parents()
1649 and changeid in self.unfiltered().dirstate.parents()
1650 ):
1650 ):
1651 msg = _(b"working directory has unknown parent '%s'!")
1651 msg = _(b"working directory has unknown parent '%s'!")
1652 raise error.Abort(msg % short(changeid))
1652 raise error.Abort(msg % short(changeid))
1653 changeid = hex(changeid) # for the error message
1653 changeid = hex(changeid) # for the error message
1654 raise
1654 raise
1655
1655
1656 elif len(changeid) == 40:
1656 elif len(changeid) == 40:
1657 node = bin(changeid)
1657 node = bin(changeid)
1658 rev = self.changelog.rev(node)
1658 rev = self.changelog.rev(node)
1659 else:
1659 else:
1660 raise error.ProgrammingError(
1660 raise error.ProgrammingError(
1661 b"unsupported changeid '%s' of type %s"
1661 b"unsupported changeid '%s' of type %s"
1662 % (changeid, pycompat.bytestr(type(changeid)))
1662 % (changeid, pycompat.bytestr(type(changeid)))
1663 )
1663 )
1664
1664
1665 return context.changectx(self, rev, node)
1665 return context.changectx(self, rev, node)
1666
1666
1667 except (error.FilteredIndexError, error.FilteredLookupError):
1667 except (error.FilteredIndexError, error.FilteredLookupError):
1668 raise error.FilteredRepoLookupError(
1668 raise error.FilteredRepoLookupError(
1669 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1669 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1670 )
1670 )
1671 except (IndexError, LookupError):
1671 except (IndexError, LookupError):
1672 raise error.RepoLookupError(
1672 raise error.RepoLookupError(
1673 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1673 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1674 )
1674 )
1675 except error.WdirUnsupported:
1675 except error.WdirUnsupported:
1676 return context.workingctx(self)
1676 return context.workingctx(self)
1677
1677
1678 def __contains__(self, changeid):
1678 def __contains__(self, changeid):
1679 """True if the given changeid exists
1679 """True if the given changeid exists
1680
1680
1681 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1681 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1682 specified.
1682 specified.
1683 """
1683 """
1684 try:
1684 try:
1685 self[changeid]
1685 self[changeid]
1686 return True
1686 return True
1687 except error.RepoLookupError:
1687 except error.RepoLookupError:
1688 return False
1688 return False
1689
1689
1690 def __nonzero__(self):
1690 def __nonzero__(self):
1691 return True
1691 return True
1692
1692
1693 __bool__ = __nonzero__
1693 __bool__ = __nonzero__
1694
1694
1695 def __len__(self):
1695 def __len__(self):
1696 # no need to pay the cost of repoview.changelog
1696 # no need to pay the cost of repoview.changelog
1697 unfi = self.unfiltered()
1697 unfi = self.unfiltered()
1698 return len(unfi.changelog)
1698 return len(unfi.changelog)
1699
1699
1700 def __iter__(self):
1700 def __iter__(self):
1701 return iter(self.changelog)
1701 return iter(self.changelog)
1702
1702
1703 def revs(self, expr, *args):
1703 def revs(self, expr, *args):
1704 '''Find revisions matching a revset.
1704 '''Find revisions matching a revset.
1705
1705
1706 The revset is specified as a string ``expr`` that may contain
1706 The revset is specified as a string ``expr`` that may contain
1707 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1707 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1708
1708
1709 Revset aliases from the configuration are not expanded. To expand
1709 Revset aliases from the configuration are not expanded. To expand
1710 user aliases, consider calling ``scmutil.revrange()`` or
1710 user aliases, consider calling ``scmutil.revrange()`` or
1711 ``repo.anyrevs([expr], user=True)``.
1711 ``repo.anyrevs([expr], user=True)``.
1712
1712
1713 Returns a smartset.abstractsmartset, which is a list-like interface
1713 Returns a smartset.abstractsmartset, which is a list-like interface
1714 that contains integer revisions.
1714 that contains integer revisions.
1715 '''
1715 '''
1716 tree = revsetlang.spectree(expr, *args)
1716 tree = revsetlang.spectree(expr, *args)
1717 return revset.makematcher(tree)(self)
1717 return revset.makematcher(tree)(self)
1718
1718
1719 def set(self, expr, *args):
1719 def set(self, expr, *args):
1720 '''Find revisions matching a revset and emit changectx instances.
1720 '''Find revisions matching a revset and emit changectx instances.
1721
1721
1722 This is a convenience wrapper around ``revs()`` that iterates the
1722 This is a convenience wrapper around ``revs()`` that iterates the
1723 result and is a generator of changectx instances.
1723 result and is a generator of changectx instances.
1724
1724
1725 Revset aliases from the configuration are not expanded. To expand
1725 Revset aliases from the configuration are not expanded. To expand
1726 user aliases, consider calling ``scmutil.revrange()``.
1726 user aliases, consider calling ``scmutil.revrange()``.
1727 '''
1727 '''
1728 for r in self.revs(expr, *args):
1728 for r in self.revs(expr, *args):
1729 yield self[r]
1729 yield self[r]
1730
1730
1731 def anyrevs(self, specs, user=False, localalias=None):
1731 def anyrevs(self, specs, user=False, localalias=None):
1732 '''Find revisions matching one of the given revsets.
1732 '''Find revisions matching one of the given revsets.
1733
1733
1734 Revset aliases from the configuration are not expanded by default. To
1734 Revset aliases from the configuration are not expanded by default. To
1735 expand user aliases, specify ``user=True``. To provide some local
1735 expand user aliases, specify ``user=True``. To provide some local
1736 definitions overriding user aliases, set ``localalias`` to
1736 definitions overriding user aliases, set ``localalias`` to
1737 ``{name: definitionstring}``.
1737 ``{name: definitionstring}``.
1738 '''
1738 '''
1739 if specs == [b'null']:
1739 if specs == [b'null']:
1740 return revset.baseset([nullrev])
1740 return revset.baseset([nullrev])
1741 if specs == [b'.']:
1741 if specs == [b'.']:
1742 quick_data = self._quick_access_changeid.get(b'.')
1742 quick_data = self._quick_access_changeid.get(b'.')
1743 if quick_data is not None:
1743 if quick_data is not None:
1744 return revset.baseset([quick_data[0]])
1744 return revset.baseset([quick_data[0]])
1745 if user:
1745 if user:
1746 m = revset.matchany(
1746 m = revset.matchany(
1747 self.ui,
1747 self.ui,
1748 specs,
1748 specs,
1749 lookup=revset.lookupfn(self),
1749 lookup=revset.lookupfn(self),
1750 localalias=localalias,
1750 localalias=localalias,
1751 )
1751 )
1752 else:
1752 else:
1753 m = revset.matchany(None, specs, localalias=localalias)
1753 m = revset.matchany(None, specs, localalias=localalias)
1754 return m(self)
1754 return m(self)
1755
1755
1756 def url(self):
1756 def url(self):
1757 return b'file:' + self.root
1757 return b'file:' + self.root
1758
1758
1759 def hook(self, name, throw=False, **args):
1759 def hook(self, name, throw=False, **args):
1760 """Call a hook, passing this repo instance.
1760 """Call a hook, passing this repo instance.
1761
1761
1762 This a convenience method to aid invoking hooks. Extensions likely
1762 This a convenience method to aid invoking hooks. Extensions likely
1763 won't call this unless they have registered a custom hook or are
1763 won't call this unless they have registered a custom hook or are
1764 replacing code that is expected to call a hook.
1764 replacing code that is expected to call a hook.
1765 """
1765 """
1766 return hook.hook(self.ui, self, name, throw, **args)
1766 return hook.hook(self.ui, self, name, throw, **args)
1767
1767
1768 @filteredpropertycache
1768 @filteredpropertycache
1769 def _tagscache(self):
1769 def _tagscache(self):
1770 '''Returns a tagscache object that contains various tags related
1770 '''Returns a tagscache object that contains various tags related
1771 caches.'''
1771 caches.'''
1772
1772
1773 # This simplifies its cache management by having one decorated
1773 # This simplifies its cache management by having one decorated
1774 # function (this one) and the rest simply fetch things from it.
1774 # function (this one) and the rest simply fetch things from it.
1775 class tagscache(object):
1775 class tagscache(object):
1776 def __init__(self):
1776 def __init__(self):
1777 # These two define the set of tags for this repository. tags
1777 # These two define the set of tags for this repository. tags
1778 # maps tag name to node; tagtypes maps tag name to 'global' or
1778 # maps tag name to node; tagtypes maps tag name to 'global' or
1779 # 'local'. (Global tags are defined by .hgtags across all
1779 # 'local'. (Global tags are defined by .hgtags across all
1780 # heads, and local tags are defined in .hg/localtags.)
1780 # heads, and local tags are defined in .hg/localtags.)
1781 # They constitute the in-memory cache of tags.
1781 # They constitute the in-memory cache of tags.
1782 self.tags = self.tagtypes = None
1782 self.tags = self.tagtypes = None
1783
1783
1784 self.nodetagscache = self.tagslist = None
1784 self.nodetagscache = self.tagslist = None
1785
1785
1786 cache = tagscache()
1786 cache = tagscache()
1787 cache.tags, cache.tagtypes = self._findtags()
1787 cache.tags, cache.tagtypes = self._findtags()
1788
1788
1789 return cache
1789 return cache
1790
1790
1791 def tags(self):
1791 def tags(self):
1792 '''return a mapping of tag to node'''
1792 '''return a mapping of tag to node'''
1793 t = {}
1793 t = {}
1794 if self.changelog.filteredrevs:
1794 if self.changelog.filteredrevs:
1795 tags, tt = self._findtags()
1795 tags, tt = self._findtags()
1796 else:
1796 else:
1797 tags = self._tagscache.tags
1797 tags = self._tagscache.tags
1798 rev = self.changelog.rev
1798 rev = self.changelog.rev
1799 for k, v in pycompat.iteritems(tags):
1799 for k, v in pycompat.iteritems(tags):
1800 try:
1800 try:
1801 # ignore tags to unknown nodes
1801 # ignore tags to unknown nodes
1802 rev(v)
1802 rev(v)
1803 t[k] = v
1803 t[k] = v
1804 except (error.LookupError, ValueError):
1804 except (error.LookupError, ValueError):
1805 pass
1805 pass
1806 return t
1806 return t
1807
1807
1808 def _findtags(self):
1808 def _findtags(self):
1809 '''Do the hard work of finding tags. Return a pair of dicts
1809 '''Do the hard work of finding tags. Return a pair of dicts
1810 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1810 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1811 maps tag name to a string like \'global\' or \'local\'.
1811 maps tag name to a string like \'global\' or \'local\'.
1812 Subclasses or extensions are free to add their own tags, but
1812 Subclasses or extensions are free to add their own tags, but
1813 should be aware that the returned dicts will be retained for the
1813 should be aware that the returned dicts will be retained for the
1814 duration of the localrepo object.'''
1814 duration of the localrepo object.'''
1815
1815
1816 # XXX what tagtype should subclasses/extensions use? Currently
1816 # XXX what tagtype should subclasses/extensions use? Currently
1817 # mq and bookmarks add tags, but do not set the tagtype at all.
1817 # mq and bookmarks add tags, but do not set the tagtype at all.
1818 # Should each extension invent its own tag type? Should there
1818 # Should each extension invent its own tag type? Should there
1819 # be one tagtype for all such "virtual" tags? Or is the status
1819 # be one tagtype for all such "virtual" tags? Or is the status
1820 # quo fine?
1820 # quo fine?
1821
1821
1822 # map tag name to (node, hist)
1822 # map tag name to (node, hist)
1823 alltags = tagsmod.findglobaltags(self.ui, self)
1823 alltags = tagsmod.findglobaltags(self.ui, self)
1824 # map tag name to tag type
1824 # map tag name to tag type
1825 tagtypes = {tag: b'global' for tag in alltags}
1825 tagtypes = {tag: b'global' for tag in alltags}
1826
1826
1827 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1827 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1828
1828
1829 # Build the return dicts. Have to re-encode tag names because
1829 # Build the return dicts. Have to re-encode tag names because
1830 # the tags module always uses UTF-8 (in order not to lose info
1830 # the tags module always uses UTF-8 (in order not to lose info
1831 # writing to the cache), but the rest of Mercurial wants them in
1831 # writing to the cache), but the rest of Mercurial wants them in
1832 # local encoding.
1832 # local encoding.
1833 tags = {}
1833 tags = {}
1834 for (name, (node, hist)) in pycompat.iteritems(alltags):
1834 for (name, (node, hist)) in pycompat.iteritems(alltags):
1835 if node != nullid:
1835 if node != nullid:
1836 tags[encoding.tolocal(name)] = node
1836 tags[encoding.tolocal(name)] = node
1837 tags[b'tip'] = self.changelog.tip()
1837 tags[b'tip'] = self.changelog.tip()
1838 tagtypes = {
1838 tagtypes = {
1839 encoding.tolocal(name): value
1839 encoding.tolocal(name): value
1840 for (name, value) in pycompat.iteritems(tagtypes)
1840 for (name, value) in pycompat.iteritems(tagtypes)
1841 }
1841 }
1842 return (tags, tagtypes)
1842 return (tags, tagtypes)
1843
1843
1844 def tagtype(self, tagname):
1844 def tagtype(self, tagname):
1845 '''
1845 '''
1846 return the type of the given tag. result can be:
1846 return the type of the given tag. result can be:
1847
1847
1848 'local' : a local tag
1848 'local' : a local tag
1849 'global' : a global tag
1849 'global' : a global tag
1850 None : tag does not exist
1850 None : tag does not exist
1851 '''
1851 '''
1852
1852
1853 return self._tagscache.tagtypes.get(tagname)
1853 return self._tagscache.tagtypes.get(tagname)
1854
1854
1855 def tagslist(self):
1855 def tagslist(self):
1856 '''return a list of tags ordered by revision'''
1856 '''return a list of tags ordered by revision'''
1857 if not self._tagscache.tagslist:
1857 if not self._tagscache.tagslist:
1858 l = []
1858 l = []
1859 for t, n in pycompat.iteritems(self.tags()):
1859 for t, n in pycompat.iteritems(self.tags()):
1860 l.append((self.changelog.rev(n), t, n))
1860 l.append((self.changelog.rev(n), t, n))
1861 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1861 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1862
1862
1863 return self._tagscache.tagslist
1863 return self._tagscache.tagslist
1864
1864
1865 def nodetags(self, node):
1865 def nodetags(self, node):
1866 '''return the tags associated with a node'''
1866 '''return the tags associated with a node'''
1867 if not self._tagscache.nodetagscache:
1867 if not self._tagscache.nodetagscache:
1868 nodetagscache = {}
1868 nodetagscache = {}
1869 for t, n in pycompat.iteritems(self._tagscache.tags):
1869 for t, n in pycompat.iteritems(self._tagscache.tags):
1870 nodetagscache.setdefault(n, []).append(t)
1870 nodetagscache.setdefault(n, []).append(t)
1871 for tags in pycompat.itervalues(nodetagscache):
1871 for tags in pycompat.itervalues(nodetagscache):
1872 tags.sort()
1872 tags.sort()
1873 self._tagscache.nodetagscache = nodetagscache
1873 self._tagscache.nodetagscache = nodetagscache
1874 return self._tagscache.nodetagscache.get(node, [])
1874 return self._tagscache.nodetagscache.get(node, [])
1875
1875
1876 def nodebookmarks(self, node):
1876 def nodebookmarks(self, node):
1877 """return the list of bookmarks pointing to the specified node"""
1877 """return the list of bookmarks pointing to the specified node"""
1878 return self._bookmarks.names(node)
1878 return self._bookmarks.names(node)
1879
1879
1880 def branchmap(self):
1880 def branchmap(self):
1881 '''returns a dictionary {branch: [branchheads]} with branchheads
1881 '''returns a dictionary {branch: [branchheads]} with branchheads
1882 ordered by increasing revision number'''
1882 ordered by increasing revision number'''
1883 return self._branchcaches[self]
1883 return self._branchcaches[self]
1884
1884
1885 @unfilteredmethod
1885 @unfilteredmethod
1886 def revbranchcache(self):
1886 def revbranchcache(self):
1887 if not self._revbranchcache:
1887 if not self._revbranchcache:
1888 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1888 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1889 return self._revbranchcache
1889 return self._revbranchcache
1890
1890
1891 def branchtip(self, branch, ignoremissing=False):
1891 def branchtip(self, branch, ignoremissing=False):
1892 '''return the tip node for a given branch
1892 '''return the tip node for a given branch
1893
1893
1894 If ignoremissing is True, then this method will not raise an error.
1894 If ignoremissing is True, then this method will not raise an error.
1895 This is helpful for callers that only expect None for a missing branch
1895 This is helpful for callers that only expect None for a missing branch
1896 (e.g. namespace).
1896 (e.g. namespace).
1897
1897
1898 '''
1898 '''
1899 try:
1899 try:
1900 return self.branchmap().branchtip(branch)
1900 return self.branchmap().branchtip(branch)
1901 except KeyError:
1901 except KeyError:
1902 if not ignoremissing:
1902 if not ignoremissing:
1903 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1903 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1904 else:
1904 else:
1905 pass
1905 pass
1906
1906
1907 def lookup(self, key):
1907 def lookup(self, key):
1908 node = scmutil.revsymbol(self, key).node()
1908 node = scmutil.revsymbol(self, key).node()
1909 if node is None:
1909 if node is None:
1910 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1910 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1911 return node
1911 return node
1912
1912
1913 def lookupbranch(self, key):
1913 def lookupbranch(self, key):
1914 if self.branchmap().hasbranch(key):
1914 if self.branchmap().hasbranch(key):
1915 return key
1915 return key
1916
1916
1917 return scmutil.revsymbol(self, key).branch()
1917 return scmutil.revsymbol(self, key).branch()
1918
1918
1919 def known(self, nodes):
1919 def known(self, nodes):
1920 cl = self.changelog
1920 cl = self.changelog
1921 get_rev = cl.index.get_rev
1921 get_rev = cl.index.get_rev
1922 filtered = cl.filteredrevs
1922 filtered = cl.filteredrevs
1923 result = []
1923 result = []
1924 for n in nodes:
1924 for n in nodes:
1925 r = get_rev(n)
1925 r = get_rev(n)
1926 resp = not (r is None or r in filtered)
1926 resp = not (r is None or r in filtered)
1927 result.append(resp)
1927 result.append(resp)
1928 return result
1928 return result
1929
1929
1930 def local(self):
1930 def local(self):
1931 return self
1931 return self
1932
1932
1933 def publishing(self):
1933 def publishing(self):
1934 # it's safe (and desirable) to trust the publish flag unconditionally
1934 # it's safe (and desirable) to trust the publish flag unconditionally
1935 # so that we don't finalize changes shared between users via ssh or nfs
1935 # so that we don't finalize changes shared between users via ssh or nfs
1936 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1936 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1937
1937
1938 def cancopy(self):
1938 def cancopy(self):
1939 # so statichttprepo's override of local() works
1939 # so statichttprepo's override of local() works
1940 if not self.local():
1940 if not self.local():
1941 return False
1941 return False
1942 if not self.publishing():
1942 if not self.publishing():
1943 return True
1943 return True
1944 # if publishing we can't copy if there is filtered content
1944 # if publishing we can't copy if there is filtered content
1945 return not self.filtered(b'visible').changelog.filteredrevs
1945 return not self.filtered(b'visible').changelog.filteredrevs
1946
1946
1947 def shared(self):
1947 def shared(self):
1948 '''the type of shared repository (None if not shared)'''
1948 '''the type of shared repository (None if not shared)'''
1949 if self.sharedpath != self.path:
1949 if self.sharedpath != self.path:
1950 return b'store'
1950 return b'store'
1951 return None
1951 return None
1952
1952
1953 def wjoin(self, f, *insidef):
1953 def wjoin(self, f, *insidef):
1954 return self.vfs.reljoin(self.root, f, *insidef)
1954 return self.vfs.reljoin(self.root, f, *insidef)
1955
1955
1956 def setparents(self, p1, p2=nullid):
1956 def setparents(self, p1, p2=nullid):
1957 self[None].setparents(p1, p2)
1957 self[None].setparents(p1, p2)
1958 self._quick_access_changeid_invalidate()
1958 self._quick_access_changeid_invalidate()
1959
1959
1960 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1960 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1961 """changeid must be a changeset revision, if specified.
1961 """changeid must be a changeset revision, if specified.
1962 fileid can be a file revision or node."""
1962 fileid can be a file revision or node."""
1963 return context.filectx(
1963 return context.filectx(
1964 self, path, changeid, fileid, changectx=changectx
1964 self, path, changeid, fileid, changectx=changectx
1965 )
1965 )
1966
1966
1967 def getcwd(self):
1967 def getcwd(self):
1968 return self.dirstate.getcwd()
1968 return self.dirstate.getcwd()
1969
1969
1970 def pathto(self, f, cwd=None):
1970 def pathto(self, f, cwd=None):
1971 return self.dirstate.pathto(f, cwd)
1971 return self.dirstate.pathto(f, cwd)
1972
1972
1973 def _loadfilter(self, filter):
1973 def _loadfilter(self, filter):
1974 if filter not in self._filterpats:
1974 if filter not in self._filterpats:
1975 l = []
1975 l = []
1976 for pat, cmd in self.ui.configitems(filter):
1976 for pat, cmd in self.ui.configitems(filter):
1977 if cmd == b'!':
1977 if cmd == b'!':
1978 continue
1978 continue
1979 mf = matchmod.match(self.root, b'', [pat])
1979 mf = matchmod.match(self.root, b'', [pat])
1980 fn = None
1980 fn = None
1981 params = cmd
1981 params = cmd
1982 for name, filterfn in pycompat.iteritems(self._datafilters):
1982 for name, filterfn in pycompat.iteritems(self._datafilters):
1983 if cmd.startswith(name):
1983 if cmd.startswith(name):
1984 fn = filterfn
1984 fn = filterfn
1985 params = cmd[len(name) :].lstrip()
1985 params = cmd[len(name) :].lstrip()
1986 break
1986 break
1987 if not fn:
1987 if not fn:
1988 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1988 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1989 fn.__name__ = 'commandfilter'
1989 fn.__name__ = 'commandfilter'
1990 # Wrap old filters not supporting keyword arguments
1990 # Wrap old filters not supporting keyword arguments
1991 if not pycompat.getargspec(fn)[2]:
1991 if not pycompat.getargspec(fn)[2]:
1992 oldfn = fn
1992 oldfn = fn
1993 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1993 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1994 fn.__name__ = 'compat-' + oldfn.__name__
1994 fn.__name__ = 'compat-' + oldfn.__name__
1995 l.append((mf, fn, params))
1995 l.append((mf, fn, params))
1996 self._filterpats[filter] = l
1996 self._filterpats[filter] = l
1997 return self._filterpats[filter]
1997 return self._filterpats[filter]
1998
1998
1999 def _filter(self, filterpats, filename, data):
1999 def _filter(self, filterpats, filename, data):
2000 for mf, fn, cmd in filterpats:
2000 for mf, fn, cmd in filterpats:
2001 if mf(filename):
2001 if mf(filename):
2002 self.ui.debug(
2002 self.ui.debug(
2003 b"filtering %s through %s\n"
2003 b"filtering %s through %s\n"
2004 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2004 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2005 )
2005 )
2006 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2006 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2007 break
2007 break
2008
2008
2009 return data
2009 return data
2010
2010
2011 @unfilteredpropertycache
2011 @unfilteredpropertycache
2012 def _encodefilterpats(self):
2012 def _encodefilterpats(self):
2013 return self._loadfilter(b'encode')
2013 return self._loadfilter(b'encode')
2014
2014
2015 @unfilteredpropertycache
2015 @unfilteredpropertycache
2016 def _decodefilterpats(self):
2016 def _decodefilterpats(self):
2017 return self._loadfilter(b'decode')
2017 return self._loadfilter(b'decode')
2018
2018
2019 def adddatafilter(self, name, filter):
2019 def adddatafilter(self, name, filter):
2020 self._datafilters[name] = filter
2020 self._datafilters[name] = filter
2021
2021
2022 def wread(self, filename):
2022 def wread(self, filename):
2023 if self.wvfs.islink(filename):
2023 if self.wvfs.islink(filename):
2024 data = self.wvfs.readlink(filename)
2024 data = self.wvfs.readlink(filename)
2025 else:
2025 else:
2026 data = self.wvfs.read(filename)
2026 data = self.wvfs.read(filename)
2027 return self._filter(self._encodefilterpats, filename, data)
2027 return self._filter(self._encodefilterpats, filename, data)
2028
2028
2029 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2029 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2030 """write ``data`` into ``filename`` in the working directory
2030 """write ``data`` into ``filename`` in the working directory
2031
2031
2032 This returns length of written (maybe decoded) data.
2032 This returns length of written (maybe decoded) data.
2033 """
2033 """
2034 data = self._filter(self._decodefilterpats, filename, data)
2034 data = self._filter(self._decodefilterpats, filename, data)
2035 if b'l' in flags:
2035 if b'l' in flags:
2036 self.wvfs.symlink(data, filename)
2036 self.wvfs.symlink(data, filename)
2037 else:
2037 else:
2038 self.wvfs.write(
2038 self.wvfs.write(
2039 filename, data, backgroundclose=backgroundclose, **kwargs
2039 filename, data, backgroundclose=backgroundclose, **kwargs
2040 )
2040 )
2041 if b'x' in flags:
2041 if b'x' in flags:
2042 self.wvfs.setflags(filename, False, True)
2042 self.wvfs.setflags(filename, False, True)
2043 else:
2043 else:
2044 self.wvfs.setflags(filename, False, False)
2044 self.wvfs.setflags(filename, False, False)
2045 return len(data)
2045 return len(data)
2046
2046
2047 def wwritedata(self, filename, data):
2047 def wwritedata(self, filename, data):
2048 return self._filter(self._decodefilterpats, filename, data)
2048 return self._filter(self._decodefilterpats, filename, data)
2049
2049
2050 def currenttransaction(self):
2050 def currenttransaction(self):
2051 """return the current transaction or None if non exists"""
2051 """return the current transaction or None if non exists"""
2052 if self._transref:
2052 if self._transref:
2053 tr = self._transref()
2053 tr = self._transref()
2054 else:
2054 else:
2055 tr = None
2055 tr = None
2056
2056
2057 if tr and tr.running():
2057 if tr and tr.running():
2058 return tr
2058 return tr
2059 return None
2059 return None
2060
2060
2061 def transaction(self, desc, report=None):
2061 def transaction(self, desc, report=None):
2062 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2062 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2063 b'devel', b'check-locks'
2063 b'devel', b'check-locks'
2064 ):
2064 ):
2065 if self._currentlock(self._lockref) is None:
2065 if self._currentlock(self._lockref) is None:
2066 raise error.ProgrammingError(b'transaction requires locking')
2066 raise error.ProgrammingError(b'transaction requires locking')
2067 tr = self.currenttransaction()
2067 tr = self.currenttransaction()
2068 if tr is not None:
2068 if tr is not None:
2069 return tr.nest(name=desc)
2069 return tr.nest(name=desc)
2070
2070
2071 # abort here if the journal already exists
2071 # abort here if the journal already exists
2072 if self.svfs.exists(b"journal"):
2072 if self.svfs.exists(b"journal"):
2073 raise error.RepoError(
2073 raise error.RepoError(
2074 _(b"abandoned transaction found"),
2074 _(b"abandoned transaction found"),
2075 hint=_(b"run 'hg recover' to clean up transaction"),
2075 hint=_(b"run 'hg recover' to clean up transaction"),
2076 )
2076 )
2077
2077
2078 idbase = b"%.40f#%f" % (random.random(), time.time())
2078 idbase = b"%.40f#%f" % (random.random(), time.time())
2079 ha = hex(hashutil.sha1(idbase).digest())
2079 ha = hex(hashutil.sha1(idbase).digest())
2080 txnid = b'TXN:' + ha
2080 txnid = b'TXN:' + ha
2081 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2081 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2082
2082
2083 self._writejournal(desc)
2083 self._writejournal(desc)
2084 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2084 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2085 if report:
2085 if report:
2086 rp = report
2086 rp = report
2087 else:
2087 else:
2088 rp = self.ui.warn
2088 rp = self.ui.warn
2089 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2089 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2090 # we must avoid cyclic reference between repo and transaction.
2090 # we must avoid cyclic reference between repo and transaction.
2091 reporef = weakref.ref(self)
2091 reporef = weakref.ref(self)
2092 # Code to track tag movement
2092 # Code to track tag movement
2093 #
2093 #
2094 # Since tags are all handled as file content, it is actually quite hard
2094 # Since tags are all handled as file content, it is actually quite hard
2095 # to track these movement from a code perspective. So we fallback to a
2095 # to track these movement from a code perspective. So we fallback to a
2096 # tracking at the repository level. One could envision to track changes
2096 # tracking at the repository level. One could envision to track changes
2097 # to the '.hgtags' file through changegroup apply but that fails to
2097 # to the '.hgtags' file through changegroup apply but that fails to
2098 # cope with case where transaction expose new heads without changegroup
2098 # cope with case where transaction expose new heads without changegroup
2099 # being involved (eg: phase movement).
2099 # being involved (eg: phase movement).
2100 #
2100 #
2101 # For now, We gate the feature behind a flag since this likely comes
2101 # For now, We gate the feature behind a flag since this likely comes
2102 # with performance impacts. The current code run more often than needed
2102 # with performance impacts. The current code run more often than needed
2103 # and do not use caches as much as it could. The current focus is on
2103 # and do not use caches as much as it could. The current focus is on
2104 # the behavior of the feature so we disable it by default. The flag
2104 # the behavior of the feature so we disable it by default. The flag
2105 # will be removed when we are happy with the performance impact.
2105 # will be removed when we are happy with the performance impact.
2106 #
2106 #
2107 # Once this feature is no longer experimental move the following
2107 # Once this feature is no longer experimental move the following
2108 # documentation to the appropriate help section:
2108 # documentation to the appropriate help section:
2109 #
2109 #
2110 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2110 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2111 # tags (new or changed or deleted tags). In addition the details of
2111 # tags (new or changed or deleted tags). In addition the details of
2112 # these changes are made available in a file at:
2112 # these changes are made available in a file at:
2113 # ``REPOROOT/.hg/changes/tags.changes``.
2113 # ``REPOROOT/.hg/changes/tags.changes``.
2114 # Make sure you check for HG_TAG_MOVED before reading that file as it
2114 # Make sure you check for HG_TAG_MOVED before reading that file as it
2115 # might exist from a previous transaction even if no tag were touched
2115 # might exist from a previous transaction even if no tag were touched
2116 # in this one. Changes are recorded in a line base format::
2116 # in this one. Changes are recorded in a line base format::
2117 #
2117 #
2118 # <action> <hex-node> <tag-name>\n
2118 # <action> <hex-node> <tag-name>\n
2119 #
2119 #
2120 # Actions are defined as follow:
2120 # Actions are defined as follow:
2121 # "-R": tag is removed,
2121 # "-R": tag is removed,
2122 # "+A": tag is added,
2122 # "+A": tag is added,
2123 # "-M": tag is moved (old value),
2123 # "-M": tag is moved (old value),
2124 # "+M": tag is moved (new value),
2124 # "+M": tag is moved (new value),
2125 tracktags = lambda x: None
2125 tracktags = lambda x: None
2126 # experimental config: experimental.hook-track-tags
2126 # experimental config: experimental.hook-track-tags
2127 shouldtracktags = self.ui.configbool(
2127 shouldtracktags = self.ui.configbool(
2128 b'experimental', b'hook-track-tags'
2128 b'experimental', b'hook-track-tags'
2129 )
2129 )
2130 if desc != b'strip' and shouldtracktags:
2130 if desc != b'strip' and shouldtracktags:
2131 oldheads = self.changelog.headrevs()
2131 oldheads = self.changelog.headrevs()
2132
2132
2133 def tracktags(tr2):
2133 def tracktags(tr2):
2134 repo = reporef()
2134 repo = reporef()
2135 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2135 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2136 newheads = repo.changelog.headrevs()
2136 newheads = repo.changelog.headrevs()
2137 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2137 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2138 # notes: we compare lists here.
2138 # notes: we compare lists here.
2139 # As we do it only once buiding set would not be cheaper
2139 # As we do it only once buiding set would not be cheaper
2140 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2140 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2141 if changes:
2141 if changes:
2142 tr2.hookargs[b'tag_moved'] = b'1'
2142 tr2.hookargs[b'tag_moved'] = b'1'
2143 with repo.vfs(
2143 with repo.vfs(
2144 b'changes/tags.changes', b'w', atomictemp=True
2144 b'changes/tags.changes', b'w', atomictemp=True
2145 ) as changesfile:
2145 ) as changesfile:
2146 # note: we do not register the file to the transaction
2146 # note: we do not register the file to the transaction
2147 # because we needs it to still exist on the transaction
2147 # because we needs it to still exist on the transaction
2148 # is close (for txnclose hooks)
2148 # is close (for txnclose hooks)
2149 tagsmod.writediff(changesfile, changes)
2149 tagsmod.writediff(changesfile, changes)
2150
2150
2151 def validate(tr2):
2151 def validate(tr2):
2152 """will run pre-closing hooks"""
2152 """will run pre-closing hooks"""
2153 # XXX the transaction API is a bit lacking here so we take a hacky
2153 # XXX the transaction API is a bit lacking here so we take a hacky
2154 # path for now
2154 # path for now
2155 #
2155 #
2156 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2156 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2157 # dict is copied before these run. In addition we needs the data
2157 # dict is copied before these run. In addition we needs the data
2158 # available to in memory hooks too.
2158 # available to in memory hooks too.
2159 #
2159 #
2160 # Moreover, we also need to make sure this runs before txnclose
2160 # Moreover, we also need to make sure this runs before txnclose
2161 # hooks and there is no "pending" mechanism that would execute
2161 # hooks and there is no "pending" mechanism that would execute
2162 # logic only if hooks are about to run.
2162 # logic only if hooks are about to run.
2163 #
2163 #
2164 # Fixing this limitation of the transaction is also needed to track
2164 # Fixing this limitation of the transaction is also needed to track
2165 # other families of changes (bookmarks, phases, obsolescence).
2165 # other families of changes (bookmarks, phases, obsolescence).
2166 #
2166 #
2167 # This will have to be fixed before we remove the experimental
2167 # This will have to be fixed before we remove the experimental
2168 # gating.
2168 # gating.
2169 tracktags(tr2)
2169 tracktags(tr2)
2170 repo = reporef()
2170 repo = reporef()
2171
2171
2172 singleheadopt = (b'experimental', b'single-head-per-branch')
2172 singleheadopt = (b'experimental', b'single-head-per-branch')
2173 singlehead = repo.ui.configbool(*singleheadopt)
2173 singlehead = repo.ui.configbool(*singleheadopt)
2174 if singlehead:
2174 if singlehead:
2175 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2175 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2176 accountclosed = singleheadsub.get(
2176 accountclosed = singleheadsub.get(
2177 b"account-closed-heads", False
2177 b"account-closed-heads", False
2178 )
2178 )
2179 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2179 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2180 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2180 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2181 for name, (old, new) in sorted(
2181 for name, (old, new) in sorted(
2182 tr.changes[b'bookmarks'].items()
2182 tr.changes[b'bookmarks'].items()
2183 ):
2183 ):
2184 args = tr.hookargs.copy()
2184 args = tr.hookargs.copy()
2185 args.update(bookmarks.preparehookargs(name, old, new))
2185 args.update(bookmarks.preparehookargs(name, old, new))
2186 repo.hook(
2186 repo.hook(
2187 b'pretxnclose-bookmark',
2187 b'pretxnclose-bookmark',
2188 throw=True,
2188 throw=True,
2189 **pycompat.strkwargs(args)
2189 **pycompat.strkwargs(args)
2190 )
2190 )
2191 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2191 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2192 cl = repo.unfiltered().changelog
2192 cl = repo.unfiltered().changelog
2193 for revs, (old, new) in tr.changes[b'phases']:
2193 for revs, (old, new) in tr.changes[b'phases']:
2194 for rev in revs:
2194 for rev in revs:
2195 args = tr.hookargs.copy()
2195 args = tr.hookargs.copy()
2196 node = hex(cl.node(rev))
2196 node = hex(cl.node(rev))
2197 args.update(phases.preparehookargs(node, old, new))
2197 args.update(phases.preparehookargs(node, old, new))
2198 repo.hook(
2198 repo.hook(
2199 b'pretxnclose-phase',
2199 b'pretxnclose-phase',
2200 throw=True,
2200 throw=True,
2201 **pycompat.strkwargs(args)
2201 **pycompat.strkwargs(args)
2202 )
2202 )
2203
2203
2204 repo.hook(
2204 repo.hook(
2205 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2205 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2206 )
2206 )
2207
2207
2208 def releasefn(tr, success):
2208 def releasefn(tr, success):
2209 repo = reporef()
2209 repo = reporef()
2210 if repo is None:
2210 if repo is None:
2211 # If the repo has been GC'd (and this release function is being
2211 # If the repo has been GC'd (and this release function is being
2212 # called from transaction.__del__), there's not much we can do,
2212 # called from transaction.__del__), there's not much we can do,
2213 # so just leave the unfinished transaction there and let the
2213 # so just leave the unfinished transaction there and let the
2214 # user run `hg recover`.
2214 # user run `hg recover`.
2215 return
2215 return
2216 if success:
2216 if success:
2217 # this should be explicitly invoked here, because
2217 # this should be explicitly invoked here, because
2218 # in-memory changes aren't written out at closing
2218 # in-memory changes aren't written out at closing
2219 # transaction, if tr.addfilegenerator (via
2219 # transaction, if tr.addfilegenerator (via
2220 # dirstate.write or so) isn't invoked while
2220 # dirstate.write or so) isn't invoked while
2221 # transaction running
2221 # transaction running
2222 repo.dirstate.write(None)
2222 repo.dirstate.write(None)
2223 else:
2223 else:
2224 # discard all changes (including ones already written
2224 # discard all changes (including ones already written
2225 # out) in this transaction
2225 # out) in this transaction
2226 narrowspec.restorebackup(self, b'journal.narrowspec')
2226 narrowspec.restorebackup(self, b'journal.narrowspec')
2227 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2227 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2228 repo.dirstate.restorebackup(None, b'journal.dirstate')
2228 repo.dirstate.restorebackup(None, b'journal.dirstate')
2229
2229
2230 repo.invalidate(clearfilecache=True)
2230 repo.invalidate(clearfilecache=True)
2231
2231
2232 tr = transaction.transaction(
2232 tr = transaction.transaction(
2233 rp,
2233 rp,
2234 self.svfs,
2234 self.svfs,
2235 vfsmap,
2235 vfsmap,
2236 b"journal",
2236 b"journal",
2237 b"undo",
2237 b"undo",
2238 aftertrans(renames),
2238 aftertrans(renames),
2239 self.store.createmode,
2239 self.store.createmode,
2240 validator=validate,
2240 validator=validate,
2241 releasefn=releasefn,
2241 releasefn=releasefn,
2242 checkambigfiles=_cachedfiles,
2242 checkambigfiles=_cachedfiles,
2243 name=desc,
2243 name=desc,
2244 )
2244 )
2245 tr.changes[b'origrepolen'] = len(self)
2245 tr.changes[b'origrepolen'] = len(self)
2246 tr.changes[b'obsmarkers'] = set()
2246 tr.changes[b'obsmarkers'] = set()
2247 tr.changes[b'phases'] = []
2247 tr.changes[b'phases'] = []
2248 tr.changes[b'bookmarks'] = {}
2248 tr.changes[b'bookmarks'] = {}
2249
2249
2250 tr.hookargs[b'txnid'] = txnid
2250 tr.hookargs[b'txnid'] = txnid
2251 tr.hookargs[b'txnname'] = desc
2251 tr.hookargs[b'txnname'] = desc
2252 tr.hookargs[b'changes'] = tr.changes
2252 tr.hookargs[b'changes'] = tr.changes
2253 # note: writing the fncache only during finalize mean that the file is
2253 # note: writing the fncache only during finalize mean that the file is
2254 # outdated when running hooks. As fncache is used for streaming clone,
2254 # outdated when running hooks. As fncache is used for streaming clone,
2255 # this is not expected to break anything that happen during the hooks.
2255 # this is not expected to break anything that happen during the hooks.
2256 tr.addfinalize(b'flush-fncache', self.store.write)
2256 tr.addfinalize(b'flush-fncache', self.store.write)
2257
2257
2258 def txnclosehook(tr2):
2258 def txnclosehook(tr2):
2259 """To be run if transaction is successful, will schedule a hook run
2259 """To be run if transaction is successful, will schedule a hook run
2260 """
2260 """
2261 # Don't reference tr2 in hook() so we don't hold a reference.
2261 # Don't reference tr2 in hook() so we don't hold a reference.
2262 # This reduces memory consumption when there are multiple
2262 # This reduces memory consumption when there are multiple
2263 # transactions per lock. This can likely go away if issue5045
2263 # transactions per lock. This can likely go away if issue5045
2264 # fixes the function accumulation.
2264 # fixes the function accumulation.
2265 hookargs = tr2.hookargs
2265 hookargs = tr2.hookargs
2266
2266
2267 def hookfunc(unused_success):
2267 def hookfunc(unused_success):
2268 repo = reporef()
2268 repo = reporef()
2269 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2269 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2270 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2270 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2271 for name, (old, new) in bmchanges:
2271 for name, (old, new) in bmchanges:
2272 args = tr.hookargs.copy()
2272 args = tr.hookargs.copy()
2273 args.update(bookmarks.preparehookargs(name, old, new))
2273 args.update(bookmarks.preparehookargs(name, old, new))
2274 repo.hook(
2274 repo.hook(
2275 b'txnclose-bookmark',
2275 b'txnclose-bookmark',
2276 throw=False,
2276 throw=False,
2277 **pycompat.strkwargs(args)
2277 **pycompat.strkwargs(args)
2278 )
2278 )
2279
2279
2280 if hook.hashook(repo.ui, b'txnclose-phase'):
2280 if hook.hashook(repo.ui, b'txnclose-phase'):
2281 cl = repo.unfiltered().changelog
2281 cl = repo.unfiltered().changelog
2282 phasemv = sorted(
2282 phasemv = sorted(
2283 tr.changes[b'phases'], key=lambda r: r[0][0]
2283 tr.changes[b'phases'], key=lambda r: r[0][0]
2284 )
2284 )
2285 for revs, (old, new) in phasemv:
2285 for revs, (old, new) in phasemv:
2286 for rev in revs:
2286 for rev in revs:
2287 args = tr.hookargs.copy()
2287 args = tr.hookargs.copy()
2288 node = hex(cl.node(rev))
2288 node = hex(cl.node(rev))
2289 args.update(phases.preparehookargs(node, old, new))
2289 args.update(phases.preparehookargs(node, old, new))
2290 repo.hook(
2290 repo.hook(
2291 b'txnclose-phase',
2291 b'txnclose-phase',
2292 throw=False,
2292 throw=False,
2293 **pycompat.strkwargs(args)
2293 **pycompat.strkwargs(args)
2294 )
2294 )
2295
2295
2296 repo.hook(
2296 repo.hook(
2297 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2297 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2298 )
2298 )
2299
2299
2300 reporef()._afterlock(hookfunc)
2300 reporef()._afterlock(hookfunc)
2301
2301
2302 tr.addfinalize(b'txnclose-hook', txnclosehook)
2302 tr.addfinalize(b'txnclose-hook', txnclosehook)
2303 # Include a leading "-" to make it happen before the transaction summary
2303 # Include a leading "-" to make it happen before the transaction summary
2304 # reports registered via scmutil.registersummarycallback() whose names
2304 # reports registered via scmutil.registersummarycallback() whose names
2305 # are 00-txnreport etc. That way, the caches will be warm when the
2305 # are 00-txnreport etc. That way, the caches will be warm when the
2306 # callbacks run.
2306 # callbacks run.
2307 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2307 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2308
2308
2309 def txnaborthook(tr2):
2309 def txnaborthook(tr2):
2310 """To be run if transaction is aborted
2310 """To be run if transaction is aborted
2311 """
2311 """
2312 reporef().hook(
2312 reporef().hook(
2313 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2313 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2314 )
2314 )
2315
2315
2316 tr.addabort(b'txnabort-hook', txnaborthook)
2316 tr.addabort(b'txnabort-hook', txnaborthook)
2317 # avoid eager cache invalidation. in-memory data should be identical
2317 # avoid eager cache invalidation. in-memory data should be identical
2318 # to stored data if transaction has no error.
2318 # to stored data if transaction has no error.
2319 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2319 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2320 self._transref = weakref.ref(tr)
2320 self._transref = weakref.ref(tr)
2321 scmutil.registersummarycallback(self, tr, desc)
2321 scmutil.registersummarycallback(self, tr, desc)
2322 return tr
2322 return tr
2323
2323
2324 def _journalfiles(self):
2324 def _journalfiles(self):
2325 return (
2325 return (
2326 (self.svfs, b'journal'),
2326 (self.svfs, b'journal'),
2327 (self.svfs, b'journal.narrowspec'),
2327 (self.svfs, b'journal.narrowspec'),
2328 (self.vfs, b'journal.narrowspec.dirstate'),
2328 (self.vfs, b'journal.narrowspec.dirstate'),
2329 (self.vfs, b'journal.dirstate'),
2329 (self.vfs, b'journal.dirstate'),
2330 (self.vfs, b'journal.branch'),
2330 (self.vfs, b'journal.branch'),
2331 (self.vfs, b'journal.desc'),
2331 (self.vfs, b'journal.desc'),
2332 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2332 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2333 (self.svfs, b'journal.phaseroots'),
2333 (self.svfs, b'journal.phaseroots'),
2334 )
2334 )
2335
2335
2336 def undofiles(self):
2336 def undofiles(self):
2337 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2337 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2338
2338
2339 @unfilteredmethod
2339 @unfilteredmethod
2340 def _writejournal(self, desc):
2340 def _writejournal(self, desc):
2341 self.dirstate.savebackup(None, b'journal.dirstate')
2341 self.dirstate.savebackup(None, b'journal.dirstate')
2342 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2342 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2343 narrowspec.savebackup(self, b'journal.narrowspec')
2343 narrowspec.savebackup(self, b'journal.narrowspec')
2344 self.vfs.write(
2344 self.vfs.write(
2345 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2345 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2346 )
2346 )
2347 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2347 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2348 bookmarksvfs = bookmarks.bookmarksvfs(self)
2348 bookmarksvfs = bookmarks.bookmarksvfs(self)
2349 bookmarksvfs.write(
2349 bookmarksvfs.write(
2350 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2350 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2351 )
2351 )
2352 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2352 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2353
2353
2354 def recover(self):
2354 def recover(self):
2355 with self.lock():
2355 with self.lock():
2356 if self.svfs.exists(b"journal"):
2356 if self.svfs.exists(b"journal"):
2357 self.ui.status(_(b"rolling back interrupted transaction\n"))
2357 self.ui.status(_(b"rolling back interrupted transaction\n"))
2358 vfsmap = {
2358 vfsmap = {
2359 b'': self.svfs,
2359 b'': self.svfs,
2360 b'plain': self.vfs,
2360 b'plain': self.vfs,
2361 }
2361 }
2362 transaction.rollback(
2362 transaction.rollback(
2363 self.svfs,
2363 self.svfs,
2364 vfsmap,
2364 vfsmap,
2365 b"journal",
2365 b"journal",
2366 self.ui.warn,
2366 self.ui.warn,
2367 checkambigfiles=_cachedfiles,
2367 checkambigfiles=_cachedfiles,
2368 )
2368 )
2369 self.invalidate()
2369 self.invalidate()
2370 return True
2370 return True
2371 else:
2371 else:
2372 self.ui.warn(_(b"no interrupted transaction available\n"))
2372 self.ui.warn(_(b"no interrupted transaction available\n"))
2373 return False
2373 return False
2374
2374
2375 def rollback(self, dryrun=False, force=False):
2375 def rollback(self, dryrun=False, force=False):
2376 wlock = lock = dsguard = None
2376 wlock = lock = dsguard = None
2377 try:
2377 try:
2378 wlock = self.wlock()
2378 wlock = self.wlock()
2379 lock = self.lock()
2379 lock = self.lock()
2380 if self.svfs.exists(b"undo"):
2380 if self.svfs.exists(b"undo"):
2381 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2381 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2382
2382
2383 return self._rollback(dryrun, force, dsguard)
2383 return self._rollback(dryrun, force, dsguard)
2384 else:
2384 else:
2385 self.ui.warn(_(b"no rollback information available\n"))
2385 self.ui.warn(_(b"no rollback information available\n"))
2386 return 1
2386 return 1
2387 finally:
2387 finally:
2388 release(dsguard, lock, wlock)
2388 release(dsguard, lock, wlock)
2389
2389
2390 @unfilteredmethod # Until we get smarter cache management
2390 @unfilteredmethod # Until we get smarter cache management
2391 def _rollback(self, dryrun, force, dsguard):
2391 def _rollback(self, dryrun, force, dsguard):
2392 ui = self.ui
2392 ui = self.ui
2393 try:
2393 try:
2394 args = self.vfs.read(b'undo.desc').splitlines()
2394 args = self.vfs.read(b'undo.desc').splitlines()
2395 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2395 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2396 if len(args) >= 3:
2396 if len(args) >= 3:
2397 detail = args[2]
2397 detail = args[2]
2398 oldtip = oldlen - 1
2398 oldtip = oldlen - 1
2399
2399
2400 if detail and ui.verbose:
2400 if detail and ui.verbose:
2401 msg = _(
2401 msg = _(
2402 b'repository tip rolled back to revision %d'
2402 b'repository tip rolled back to revision %d'
2403 b' (undo %s: %s)\n'
2403 b' (undo %s: %s)\n'
2404 ) % (oldtip, desc, detail)
2404 ) % (oldtip, desc, detail)
2405 else:
2405 else:
2406 msg = _(
2406 msg = _(
2407 b'repository tip rolled back to revision %d (undo %s)\n'
2407 b'repository tip rolled back to revision %d (undo %s)\n'
2408 ) % (oldtip, desc)
2408 ) % (oldtip, desc)
2409 except IOError:
2409 except IOError:
2410 msg = _(b'rolling back unknown transaction\n')
2410 msg = _(b'rolling back unknown transaction\n')
2411 desc = None
2411 desc = None
2412
2412
2413 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2413 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2414 raise error.Abort(
2414 raise error.Abort(
2415 _(
2415 _(
2416 b'rollback of last commit while not checked out '
2416 b'rollback of last commit while not checked out '
2417 b'may lose data'
2417 b'may lose data'
2418 ),
2418 ),
2419 hint=_(b'use -f to force'),
2419 hint=_(b'use -f to force'),
2420 )
2420 )
2421
2421
2422 ui.status(msg)
2422 ui.status(msg)
2423 if dryrun:
2423 if dryrun:
2424 return 0
2424 return 0
2425
2425
2426 parents = self.dirstate.parents()
2426 parents = self.dirstate.parents()
2427 self.destroying()
2427 self.destroying()
2428 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2428 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2429 transaction.rollback(
2429 transaction.rollback(
2430 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2430 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2431 )
2431 )
2432 bookmarksvfs = bookmarks.bookmarksvfs(self)
2432 bookmarksvfs = bookmarks.bookmarksvfs(self)
2433 if bookmarksvfs.exists(b'undo.bookmarks'):
2433 if bookmarksvfs.exists(b'undo.bookmarks'):
2434 bookmarksvfs.rename(
2434 bookmarksvfs.rename(
2435 b'undo.bookmarks', b'bookmarks', checkambig=True
2435 b'undo.bookmarks', b'bookmarks', checkambig=True
2436 )
2436 )
2437 if self.svfs.exists(b'undo.phaseroots'):
2437 if self.svfs.exists(b'undo.phaseroots'):
2438 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2438 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2439 self.invalidate()
2439 self.invalidate()
2440
2440
2441 has_node = self.changelog.index.has_node
2441 has_node = self.changelog.index.has_node
2442 parentgone = any(not has_node(p) for p in parents)
2442 parentgone = any(not has_node(p) for p in parents)
2443 if parentgone:
2443 if parentgone:
2444 # prevent dirstateguard from overwriting already restored one
2444 # prevent dirstateguard from overwriting already restored one
2445 dsguard.close()
2445 dsguard.close()
2446
2446
2447 narrowspec.restorebackup(self, b'undo.narrowspec')
2447 narrowspec.restorebackup(self, b'undo.narrowspec')
2448 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2448 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2449 self.dirstate.restorebackup(None, b'undo.dirstate')
2449 self.dirstate.restorebackup(None, b'undo.dirstate')
2450 try:
2450 try:
2451 branch = self.vfs.read(b'undo.branch')
2451 branch = self.vfs.read(b'undo.branch')
2452 self.dirstate.setbranch(encoding.tolocal(branch))
2452 self.dirstate.setbranch(encoding.tolocal(branch))
2453 except IOError:
2453 except IOError:
2454 ui.warn(
2454 ui.warn(
2455 _(
2455 _(
2456 b'named branch could not be reset: '
2456 b'named branch could not be reset: '
2457 b'current branch is still \'%s\'\n'
2457 b'current branch is still \'%s\'\n'
2458 )
2458 )
2459 % self.dirstate.branch()
2459 % self.dirstate.branch()
2460 )
2460 )
2461
2461
2462 parents = tuple([p.rev() for p in self[None].parents()])
2462 parents = tuple([p.rev() for p in self[None].parents()])
2463 if len(parents) > 1:
2463 if len(parents) > 1:
2464 ui.status(
2464 ui.status(
2465 _(
2465 _(
2466 b'working directory now based on '
2466 b'working directory now based on '
2467 b'revisions %d and %d\n'
2467 b'revisions %d and %d\n'
2468 )
2468 )
2469 % parents
2469 % parents
2470 )
2470 )
2471 else:
2471 else:
2472 ui.status(
2472 ui.status(
2473 _(b'working directory now based on revision %d\n') % parents
2473 _(b'working directory now based on revision %d\n') % parents
2474 )
2474 )
2475 mergestatemod.mergestate.clean(self, self[b'.'].node())
2475 mergestatemod.mergestate.clean(self, self[b'.'].node())
2476
2476
2477 # TODO: if we know which new heads may result from this rollback, pass
2477 # TODO: if we know which new heads may result from this rollback, pass
2478 # them to destroy(), which will prevent the branchhead cache from being
2478 # them to destroy(), which will prevent the branchhead cache from being
2479 # invalidated.
2479 # invalidated.
2480 self.destroyed()
2480 self.destroyed()
2481 return 0
2481 return 0
2482
2482
2483 def _buildcacheupdater(self, newtransaction):
2483 def _buildcacheupdater(self, newtransaction):
2484 """called during transaction to build the callback updating cache
2484 """called during transaction to build the callback updating cache
2485
2485
2486 Lives on the repository to help extension who might want to augment
2486 Lives on the repository to help extension who might want to augment
2487 this logic. For this purpose, the created transaction is passed to the
2487 this logic. For this purpose, the created transaction is passed to the
2488 method.
2488 method.
2489 """
2489 """
2490 # we must avoid cyclic reference between repo and transaction.
2490 # we must avoid cyclic reference between repo and transaction.
2491 reporef = weakref.ref(self)
2491 reporef = weakref.ref(self)
2492
2492
2493 def updater(tr):
2493 def updater(tr):
2494 repo = reporef()
2494 repo = reporef()
2495 repo.updatecaches(tr)
2495 repo.updatecaches(tr)
2496
2496
2497 return updater
2497 return updater
2498
2498
2499 @unfilteredmethod
2499 @unfilteredmethod
2500 def updatecaches(self, tr=None, full=False):
2500 def updatecaches(self, tr=None, full=False):
2501 """warm appropriate caches
2501 """warm appropriate caches
2502
2502
2503 If this function is called after a transaction closed. The transaction
2503 If this function is called after a transaction closed. The transaction
2504 will be available in the 'tr' argument. This can be used to selectively
2504 will be available in the 'tr' argument. This can be used to selectively
2505 update caches relevant to the changes in that transaction.
2505 update caches relevant to the changes in that transaction.
2506
2506
2507 If 'full' is set, make sure all caches the function knows about have
2507 If 'full' is set, make sure all caches the function knows about have
2508 up-to-date data. Even the ones usually loaded more lazily.
2508 up-to-date data. Even the ones usually loaded more lazily.
2509 """
2509 """
2510 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2510 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2511 # During strip, many caches are invalid but
2511 # During strip, many caches are invalid but
2512 # later call to `destroyed` will refresh them.
2512 # later call to `destroyed` will refresh them.
2513 return
2513 return
2514
2514
2515 if tr is None or tr.changes[b'origrepolen'] < len(self):
2515 if tr is None or tr.changes[b'origrepolen'] < len(self):
2516 # accessing the 'ser ved' branchmap should refresh all the others,
2516 # accessing the 'ser ved' branchmap should refresh all the others,
2517 self.ui.debug(b'updating the branch cache\n')
2517 self.ui.debug(b'updating the branch cache\n')
2518 self.filtered(b'served').branchmap()
2518 self.filtered(b'served').branchmap()
2519 self.filtered(b'served.hidden').branchmap()
2519 self.filtered(b'served.hidden').branchmap()
2520
2520
2521 if full:
2521 if full:
2522 unfi = self.unfiltered()
2522 unfi = self.unfiltered()
2523
2523
2524 self.changelog.update_caches(transaction=tr)
2524 self.changelog.update_caches(transaction=tr)
2525 self.manifestlog.update_caches(transaction=tr)
2525 self.manifestlog.update_caches(transaction=tr)
2526
2526
2527 rbc = unfi.revbranchcache()
2527 rbc = unfi.revbranchcache()
2528 for r in unfi.changelog:
2528 for r in unfi.changelog:
2529 rbc.branchinfo(r)
2529 rbc.branchinfo(r)
2530 rbc.write()
2530 rbc.write()
2531
2531
2532 # ensure the working copy parents are in the manifestfulltextcache
2532 # ensure the working copy parents are in the manifestfulltextcache
2533 for ctx in self[b'.'].parents():
2533 for ctx in self[b'.'].parents():
2534 ctx.manifest() # accessing the manifest is enough
2534 ctx.manifest() # accessing the manifest is enough
2535
2535
2536 # accessing fnode cache warms the cache
2536 # accessing fnode cache warms the cache
2537 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2537 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2538 # accessing tags warm the cache
2538 # accessing tags warm the cache
2539 self.tags()
2539 self.tags()
2540 self.filtered(b'served').tags()
2540 self.filtered(b'served').tags()
2541
2541
2542 # The `full` arg is documented as updating even the lazily-loaded
2542 # The `full` arg is documented as updating even the lazily-loaded
2543 # caches immediately, so we're forcing a write to cause these caches
2543 # caches immediately, so we're forcing a write to cause these caches
2544 # to be warmed up even if they haven't explicitly been requested
2544 # to be warmed up even if they haven't explicitly been requested
2545 # yet (if they've never been used by hg, they won't ever have been
2545 # yet (if they've never been used by hg, they won't ever have been
2546 # written, even if they're a subset of another kind of cache that
2546 # written, even if they're a subset of another kind of cache that
2547 # *has* been used).
2547 # *has* been used).
2548 for filt in repoview.filtertable.keys():
2548 for filt in repoview.filtertable.keys():
2549 filtered = self.filtered(filt)
2549 filtered = self.filtered(filt)
2550 filtered.branchmap().write(filtered)
2550 filtered.branchmap().write(filtered)
2551
2551
2552 def invalidatecaches(self):
2552 def invalidatecaches(self):
2553
2553
2554 if '_tagscache' in vars(self):
2554 if '_tagscache' in vars(self):
2555 # can't use delattr on proxy
2555 # can't use delattr on proxy
2556 del self.__dict__['_tagscache']
2556 del self.__dict__['_tagscache']
2557
2557
2558 self._branchcaches.clear()
2558 self._branchcaches.clear()
2559 self.invalidatevolatilesets()
2559 self.invalidatevolatilesets()
2560 self._sparsesignaturecache.clear()
2560 self._sparsesignaturecache.clear()
2561
2561
2562 def invalidatevolatilesets(self):
2562 def invalidatevolatilesets(self):
2563 self.filteredrevcache.clear()
2563 self.filteredrevcache.clear()
2564 obsolete.clearobscaches(self)
2564 obsolete.clearobscaches(self)
2565 self._quick_access_changeid_invalidate()
2565 self._quick_access_changeid_invalidate()
2566
2566
2567 def invalidatedirstate(self):
2567 def invalidatedirstate(self):
2568 '''Invalidates the dirstate, causing the next call to dirstate
2568 '''Invalidates the dirstate, causing the next call to dirstate
2569 to check if it was modified since the last time it was read,
2569 to check if it was modified since the last time it was read,
2570 rereading it if it has.
2570 rereading it if it has.
2571
2571
2572 This is different to dirstate.invalidate() that it doesn't always
2572 This is different to dirstate.invalidate() that it doesn't always
2573 rereads the dirstate. Use dirstate.invalidate() if you want to
2573 rereads the dirstate. Use dirstate.invalidate() if you want to
2574 explicitly read the dirstate again (i.e. restoring it to a previous
2574 explicitly read the dirstate again (i.e. restoring it to a previous
2575 known good state).'''
2575 known good state).'''
2576 if hasunfilteredcache(self, 'dirstate'):
2576 if hasunfilteredcache(self, 'dirstate'):
2577 for k in self.dirstate._filecache:
2577 for k in self.dirstate._filecache:
2578 try:
2578 try:
2579 delattr(self.dirstate, k)
2579 delattr(self.dirstate, k)
2580 except AttributeError:
2580 except AttributeError:
2581 pass
2581 pass
2582 delattr(self.unfiltered(), 'dirstate')
2582 delattr(self.unfiltered(), 'dirstate')
2583
2583
2584 def invalidate(self, clearfilecache=False):
2584 def invalidate(self, clearfilecache=False):
2585 '''Invalidates both store and non-store parts other than dirstate
2585 '''Invalidates both store and non-store parts other than dirstate
2586
2586
2587 If a transaction is running, invalidation of store is omitted,
2587 If a transaction is running, invalidation of store is omitted,
2588 because discarding in-memory changes might cause inconsistency
2588 because discarding in-memory changes might cause inconsistency
2589 (e.g. incomplete fncache causes unintentional failure, but
2589 (e.g. incomplete fncache causes unintentional failure, but
2590 redundant one doesn't).
2590 redundant one doesn't).
2591 '''
2591 '''
2592 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2592 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2593 for k in list(self._filecache.keys()):
2593 for k in list(self._filecache.keys()):
2594 # dirstate is invalidated separately in invalidatedirstate()
2594 # dirstate is invalidated separately in invalidatedirstate()
2595 if k == b'dirstate':
2595 if k == b'dirstate':
2596 continue
2596 continue
2597 if (
2597 if (
2598 k == b'changelog'
2598 k == b'changelog'
2599 and self.currenttransaction()
2599 and self.currenttransaction()
2600 and self.changelog._delayed
2600 and self.changelog._delayed
2601 ):
2601 ):
2602 # The changelog object may store unwritten revisions. We don't
2602 # The changelog object may store unwritten revisions. We don't
2603 # want to lose them.
2603 # want to lose them.
2604 # TODO: Solve the problem instead of working around it.
2604 # TODO: Solve the problem instead of working around it.
2605 continue
2605 continue
2606
2606
2607 if clearfilecache:
2607 if clearfilecache:
2608 del self._filecache[k]
2608 del self._filecache[k]
2609 try:
2609 try:
2610 delattr(unfiltered, k)
2610 delattr(unfiltered, k)
2611 except AttributeError:
2611 except AttributeError:
2612 pass
2612 pass
2613 self.invalidatecaches()
2613 self.invalidatecaches()
2614 if not self.currenttransaction():
2614 if not self.currenttransaction():
2615 # TODO: Changing contents of store outside transaction
2615 # TODO: Changing contents of store outside transaction
2616 # causes inconsistency. We should make in-memory store
2616 # causes inconsistency. We should make in-memory store
2617 # changes detectable, and abort if changed.
2617 # changes detectable, and abort if changed.
2618 self.store.invalidatecaches()
2618 self.store.invalidatecaches()
2619
2619
2620 def invalidateall(self):
2620 def invalidateall(self):
2621 '''Fully invalidates both store and non-store parts, causing the
2621 '''Fully invalidates both store and non-store parts, causing the
2622 subsequent operation to reread any outside changes.'''
2622 subsequent operation to reread any outside changes.'''
2623 # extension should hook this to invalidate its caches
2623 # extension should hook this to invalidate its caches
2624 self.invalidate()
2624 self.invalidate()
2625 self.invalidatedirstate()
2625 self.invalidatedirstate()
2626
2626
2627 @unfilteredmethod
2627 @unfilteredmethod
2628 def _refreshfilecachestats(self, tr):
2628 def _refreshfilecachestats(self, tr):
2629 """Reload stats of cached files so that they are flagged as valid"""
2629 """Reload stats of cached files so that they are flagged as valid"""
2630 for k, ce in self._filecache.items():
2630 for k, ce in self._filecache.items():
2631 k = pycompat.sysstr(k)
2631 k = pycompat.sysstr(k)
2632 if k == 'dirstate' or k not in self.__dict__:
2632 if k == 'dirstate' or k not in self.__dict__:
2633 continue
2633 continue
2634 ce.refresh()
2634 ce.refresh()
2635
2635
2636 def _lock(
2636 def _lock(
2637 self,
2637 self,
2638 vfs,
2638 vfs,
2639 lockname,
2639 lockname,
2640 wait,
2640 wait,
2641 releasefn,
2641 releasefn,
2642 acquirefn,
2642 acquirefn,
2643 desc,
2643 desc,
2644 inheritchecker=None,
2644 inheritchecker=None,
2645 parentenvvar=None,
2645 parentenvvar=None,
2646 ):
2646 ):
2647 parentlock = None
2647 parentlock = None
2648 # the contents of parentenvvar are used by the underlying lock to
2648 # the contents of parentenvvar are used by the underlying lock to
2649 # determine whether it can be inherited
2649 # determine whether it can be inherited
2650 if parentenvvar is not None:
2650 if parentenvvar is not None:
2651 parentlock = encoding.environ.get(parentenvvar)
2651 parentlock = encoding.environ.get(parentenvvar)
2652
2652
2653 timeout = 0
2653 timeout = 0
2654 warntimeout = 0
2654 warntimeout = 0
2655 if wait:
2655 if wait:
2656 timeout = self.ui.configint(b"ui", b"timeout")
2656 timeout = self.ui.configint(b"ui", b"timeout")
2657 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2657 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2658 # internal config: ui.signal-safe-lock
2658 # internal config: ui.signal-safe-lock
2659 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2659 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2660
2660
2661 l = lockmod.trylock(
2661 l = lockmod.trylock(
2662 self.ui,
2662 self.ui,
2663 vfs,
2663 vfs,
2664 lockname,
2664 lockname,
2665 timeout,
2665 timeout,
2666 warntimeout,
2666 warntimeout,
2667 releasefn=releasefn,
2667 releasefn=releasefn,
2668 acquirefn=acquirefn,
2668 acquirefn=acquirefn,
2669 desc=desc,
2669 desc=desc,
2670 inheritchecker=inheritchecker,
2670 inheritchecker=inheritchecker,
2671 parentlock=parentlock,
2671 parentlock=parentlock,
2672 signalsafe=signalsafe,
2672 signalsafe=signalsafe,
2673 )
2673 )
2674 return l
2674 return l
2675
2675
2676 def _afterlock(self, callback):
2676 def _afterlock(self, callback):
2677 """add a callback to be run when the repository is fully unlocked
2677 """add a callback to be run when the repository is fully unlocked
2678
2678
2679 The callback will be executed when the outermost lock is released
2679 The callback will be executed when the outermost lock is released
2680 (with wlock being higher level than 'lock')."""
2680 (with wlock being higher level than 'lock')."""
2681 for ref in (self._wlockref, self._lockref):
2681 for ref in (self._wlockref, self._lockref):
2682 l = ref and ref()
2682 l = ref and ref()
2683 if l and l.held:
2683 if l and l.held:
2684 l.postrelease.append(callback)
2684 l.postrelease.append(callback)
2685 break
2685 break
2686 else: # no lock have been found.
2686 else: # no lock have been found.
2687 callback(True)
2687 callback(True)
2688
2688
2689 def lock(self, wait=True):
2689 def lock(self, wait=True):
2690 '''Lock the repository store (.hg/store) and return a weak reference
2690 '''Lock the repository store (.hg/store) and return a weak reference
2691 to the lock. Use this before modifying the store (e.g. committing or
2691 to the lock. Use this before modifying the store (e.g. committing or
2692 stripping). If you are opening a transaction, get a lock as well.)
2692 stripping). If you are opening a transaction, get a lock as well.)
2693
2693
2694 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2694 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2695 'wlock' first to avoid a dead-lock hazard.'''
2695 'wlock' first to avoid a dead-lock hazard.'''
2696 l = self._currentlock(self._lockref)
2696 l = self._currentlock(self._lockref)
2697 if l is not None:
2697 if l is not None:
2698 l.lock()
2698 l.lock()
2699 return l
2699 return l
2700
2700
2701 l = self._lock(
2701 l = self._lock(
2702 vfs=self.svfs,
2702 vfs=self.svfs,
2703 lockname=b"lock",
2703 lockname=b"lock",
2704 wait=wait,
2704 wait=wait,
2705 releasefn=None,
2705 releasefn=None,
2706 acquirefn=self.invalidate,
2706 acquirefn=self.invalidate,
2707 desc=_(b'repository %s') % self.origroot,
2707 desc=_(b'repository %s') % self.origroot,
2708 )
2708 )
2709 self._lockref = weakref.ref(l)
2709 self._lockref = weakref.ref(l)
2710 return l
2710 return l
2711
2711
2712 def _wlockchecktransaction(self):
2712 def _wlockchecktransaction(self):
2713 if self.currenttransaction() is not None:
2713 if self.currenttransaction() is not None:
2714 raise error.LockInheritanceContractViolation(
2714 raise error.LockInheritanceContractViolation(
2715 b'wlock cannot be inherited in the middle of a transaction'
2715 b'wlock cannot be inherited in the middle of a transaction'
2716 )
2716 )
2717
2717
2718 def wlock(self, wait=True):
2718 def wlock(self, wait=True):
2719 '''Lock the non-store parts of the repository (everything under
2719 '''Lock the non-store parts of the repository (everything under
2720 .hg except .hg/store) and return a weak reference to the lock.
2720 .hg except .hg/store) and return a weak reference to the lock.
2721
2721
2722 Use this before modifying files in .hg.
2722 Use this before modifying files in .hg.
2723
2723
2724 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2724 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2725 'wlock' first to avoid a dead-lock hazard.'''
2725 'wlock' first to avoid a dead-lock hazard.'''
2726 l = self._wlockref and self._wlockref()
2726 l = self._wlockref and self._wlockref()
2727 if l is not None and l.held:
2727 if l is not None and l.held:
2728 l.lock()
2728 l.lock()
2729 return l
2729 return l
2730
2730
2731 # We do not need to check for non-waiting lock acquisition. Such
2731 # We do not need to check for non-waiting lock acquisition. Such
2732 # acquisition would not cause dead-lock as they would just fail.
2732 # acquisition would not cause dead-lock as they would just fail.
2733 if wait and (
2733 if wait and (
2734 self.ui.configbool(b'devel', b'all-warnings')
2734 self.ui.configbool(b'devel', b'all-warnings')
2735 or self.ui.configbool(b'devel', b'check-locks')
2735 or self.ui.configbool(b'devel', b'check-locks')
2736 ):
2736 ):
2737 if self._currentlock(self._lockref) is not None:
2737 if self._currentlock(self._lockref) is not None:
2738 self.ui.develwarn(b'"wlock" acquired after "lock"')
2738 self.ui.develwarn(b'"wlock" acquired after "lock"')
2739
2739
2740 def unlock():
2740 def unlock():
2741 if self.dirstate.pendingparentchange():
2741 if self.dirstate.pendingparentchange():
2742 self.dirstate.invalidate()
2742 self.dirstate.invalidate()
2743 else:
2743 else:
2744 self.dirstate.write(None)
2744 self.dirstate.write(None)
2745
2745
2746 self._filecache[b'dirstate'].refresh()
2746 self._filecache[b'dirstate'].refresh()
2747
2747
2748 l = self._lock(
2748 l = self._lock(
2749 self.vfs,
2749 self.vfs,
2750 b"wlock",
2750 b"wlock",
2751 wait,
2751 wait,
2752 unlock,
2752 unlock,
2753 self.invalidatedirstate,
2753 self.invalidatedirstate,
2754 _(b'working directory of %s') % self.origroot,
2754 _(b'working directory of %s') % self.origroot,
2755 inheritchecker=self._wlockchecktransaction,
2755 inheritchecker=self._wlockchecktransaction,
2756 parentenvvar=b'HG_WLOCK_LOCKER',
2756 parentenvvar=b'HG_WLOCK_LOCKER',
2757 )
2757 )
2758 self._wlockref = weakref.ref(l)
2758 self._wlockref = weakref.ref(l)
2759 return l
2759 return l
2760
2760
2761 def _currentlock(self, lockref):
2761 def _currentlock(self, lockref):
2762 """Returns the lock if it's held, or None if it's not."""
2762 """Returns the lock if it's held, or None if it's not."""
2763 if lockref is None:
2763 if lockref is None:
2764 return None
2764 return None
2765 l = lockref()
2765 l = lockref()
2766 if l is None or not l.held:
2766 if l is None or not l.held:
2767 return None
2767 return None
2768 return l
2768 return l
2769
2769
2770 def currentwlock(self):
2770 def currentwlock(self):
2771 """Returns the wlock if it's held, or None if it's not."""
2771 """Returns the wlock if it's held, or None if it's not."""
2772 return self._currentlock(self._wlockref)
2772 return self._currentlock(self._wlockref)
2773
2773
2774 def checkcommitpatterns(self, wctx, match, status, fail):
2774 def checkcommitpatterns(self, wctx, match, status, fail):
2775 """check for commit arguments that aren't committable"""
2775 """check for commit arguments that aren't committable"""
2776 if match.isexact() or match.prefix():
2776 if match.isexact() or match.prefix():
2777 matched = set(status.modified + status.added + status.removed)
2777 matched = set(status.modified + status.added + status.removed)
2778
2778
2779 for f in match.files():
2779 for f in match.files():
2780 f = self.dirstate.normalize(f)
2780 f = self.dirstate.normalize(f)
2781 if f == b'.' or f in matched or f in wctx.substate:
2781 if f == b'.' or f in matched or f in wctx.substate:
2782 continue
2782 continue
2783 if f in status.deleted:
2783 if f in status.deleted:
2784 fail(f, _(b'file not found!'))
2784 fail(f, _(b'file not found!'))
2785 # Is it a directory that exists or used to exist?
2785 # Is it a directory that exists or used to exist?
2786 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2786 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2787 d = f + b'/'
2787 d = f + b'/'
2788 for mf in matched:
2788 for mf in matched:
2789 if mf.startswith(d):
2789 if mf.startswith(d):
2790 break
2790 break
2791 else:
2791 else:
2792 fail(f, _(b"no match under directory!"))
2792 fail(f, _(b"no match under directory!"))
2793 elif f not in self.dirstate:
2793 elif f not in self.dirstate:
2794 fail(f, _(b"file not tracked!"))
2794 fail(f, _(b"file not tracked!"))
2795
2795
2796 @unfilteredmethod
2796 @unfilteredmethod
2797 def commit(
2797 def commit(
2798 self,
2798 self,
2799 text=b"",
2799 text=b"",
2800 user=None,
2800 user=None,
2801 date=None,
2801 date=None,
2802 match=None,
2802 match=None,
2803 force=False,
2803 force=False,
2804 editor=None,
2804 editor=None,
2805 extra=None,
2805 extra=None,
2806 ):
2806 ):
2807 """Add a new revision to current repository.
2807 """Add a new revision to current repository.
2808
2808
2809 Revision information is gathered from the working directory,
2809 Revision information is gathered from the working directory,
2810 match can be used to filter the committed files. If editor is
2810 match can be used to filter the committed files. If editor is
2811 supplied, it is called to get a commit message.
2811 supplied, it is called to get a commit message.
2812 """
2812 """
2813 if extra is None:
2813 if extra is None:
2814 extra = {}
2814 extra = {}
2815
2815
2816 def fail(f, msg):
2816 def fail(f, msg):
2817 raise error.Abort(b'%s: %s' % (f, msg))
2817 raise error.Abort(b'%s: %s' % (f, msg))
2818
2818
2819 if not match:
2819 if not match:
2820 match = matchmod.always()
2820 match = matchmod.always()
2821
2821
2822 if not force:
2822 if not force:
2823 match.bad = fail
2823 match.bad = fail
2824
2824
2825 # lock() for recent changelog (see issue4368)
2825 # lock() for recent changelog (see issue4368)
2826 with self.wlock(), self.lock():
2826 with self.wlock(), self.lock():
2827 wctx = self[None]
2827 wctx = self[None]
2828 merge = len(wctx.parents()) > 1
2828 merge = len(wctx.parents()) > 1
2829
2829
2830 if not force and merge and not match.always():
2830 if not force and merge and not match.always():
2831 raise error.Abort(
2831 raise error.Abort(
2832 _(
2832 _(
2833 b'cannot partially commit a merge '
2833 b'cannot partially commit a merge '
2834 b'(do not specify files or patterns)'
2834 b'(do not specify files or patterns)'
2835 )
2835 )
2836 )
2836 )
2837
2837
2838 status = self.status(match=match, clean=force)
2838 status = self.status(match=match, clean=force)
2839 if force:
2839 if force:
2840 status.modified.extend(
2840 status.modified.extend(
2841 status.clean
2841 status.clean
2842 ) # mq may commit clean files
2842 ) # mq may commit clean files
2843
2843
2844 # check subrepos
2844 # check subrepos
2845 subs, commitsubs, newstate = subrepoutil.precommit(
2845 subs, commitsubs, newstate = subrepoutil.precommit(
2846 self.ui, wctx, status, match, force=force
2846 self.ui, wctx, status, match, force=force
2847 )
2847 )
2848
2848
2849 # make sure all explicit patterns are matched
2849 # make sure all explicit patterns are matched
2850 if not force:
2850 if not force:
2851 self.checkcommitpatterns(wctx, match, status, fail)
2851 self.checkcommitpatterns(wctx, match, status, fail)
2852
2852
2853 cctx = context.workingcommitctx(
2853 cctx = context.workingcommitctx(
2854 self, status, text, user, date, extra
2854 self, status, text, user, date, extra
2855 )
2855 )
2856
2856
2857 ms = mergestatemod.mergestate.read(self)
2857 ms = mergestatemod.mergestate.read(self)
2858 mergeutil.checkunresolved(ms)
2858 mergeutil.checkunresolved(ms)
2859
2859
2860 # internal config: ui.allowemptycommit
2860 # internal config: ui.allowemptycommit
2861 if cctx.isempty() and not self.ui.configbool(
2861 if cctx.isempty() and not self.ui.configbool(
2862 b'ui', b'allowemptycommit'
2862 b'ui', b'allowemptycommit'
2863 ):
2863 ):
2864 self.ui.debug(b'nothing to commit, clearing merge state\n')
2864 self.ui.debug(b'nothing to commit, clearing merge state\n')
2865 ms.reset()
2865 ms.reset()
2866 return None
2866 return None
2867
2867
2868 if merge and cctx.deleted():
2868 if merge and cctx.deleted():
2869 raise error.Abort(_(b"cannot commit merge with missing files"))
2869 raise error.Abort(_(b"cannot commit merge with missing files"))
2870
2870
2871 if editor:
2871 if editor:
2872 cctx._text = editor(self, cctx, subs)
2872 cctx._text = editor(self, cctx, subs)
2873 edited = text != cctx._text
2873 edited = text != cctx._text
2874
2874
2875 # Save commit message in case this transaction gets rolled back
2875 # Save commit message in case this transaction gets rolled back
2876 # (e.g. by a pretxncommit hook). Leave the content alone on
2876 # (e.g. by a pretxncommit hook). Leave the content alone on
2877 # the assumption that the user will use the same editor again.
2877 # the assumption that the user will use the same editor again.
2878 msgfn = self.savecommitmessage(cctx._text)
2878 msgfn = self.savecommitmessage(cctx._text)
2879
2879
2880 # commit subs and write new state
2880 # commit subs and write new state
2881 if subs:
2881 if subs:
2882 uipathfn = scmutil.getuipathfn(self)
2882 uipathfn = scmutil.getuipathfn(self)
2883 for s in sorted(commitsubs):
2883 for s in sorted(commitsubs):
2884 sub = wctx.sub(s)
2884 sub = wctx.sub(s)
2885 self.ui.status(
2885 self.ui.status(
2886 _(b'committing subrepository %s\n')
2886 _(b'committing subrepository %s\n')
2887 % uipathfn(subrepoutil.subrelpath(sub))
2887 % uipathfn(subrepoutil.subrelpath(sub))
2888 )
2888 )
2889 sr = sub.commit(cctx._text, user, date)
2889 sr = sub.commit(cctx._text, user, date)
2890 newstate[s] = (newstate[s][0], sr)
2890 newstate[s] = (newstate[s][0], sr)
2891 subrepoutil.writestate(self, newstate)
2891 subrepoutil.writestate(self, newstate)
2892
2892
2893 p1, p2 = self.dirstate.parents()
2893 p1, p2 = self.dirstate.parents()
2894 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2894 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2895 try:
2895 try:
2896 self.hook(
2896 self.hook(
2897 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2897 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2898 )
2898 )
2899 with self.transaction(b'commit'):
2899 with self.transaction(b'commit'):
2900 ret = self.commitctx(cctx, True)
2900 ret = self.commitctx(cctx, True)
2901 # update bookmarks, dirstate and mergestate
2901 # update bookmarks, dirstate and mergestate
2902 bookmarks.update(self, [p1, p2], ret)
2902 bookmarks.update(self, [p1, p2], ret)
2903 cctx.markcommitted(ret)
2903 cctx.markcommitted(ret)
2904 ms.reset()
2904 ms.reset()
2905 except: # re-raises
2905 except: # re-raises
2906 if edited:
2906 if edited:
2907 self.ui.write(
2907 self.ui.write(
2908 _(b'note: commit message saved in %s\n') % msgfn
2908 _(b'note: commit message saved in %s\n') % msgfn
2909 )
2909 )
2910 self.ui.write(
2910 self.ui.write(
2911 _(
2911 _(
2912 b"note: use 'hg commit --logfile "
2912 b"note: use 'hg commit --logfile "
2913 b".hg/last-message.txt --edit' to reuse it\n"
2913 b".hg/last-message.txt --edit' to reuse it\n"
2914 )
2914 )
2915 )
2915 )
2916 raise
2916 raise
2917
2917
2918 def commithook(unused_success):
2918 def commithook(unused_success):
2919 # hack for command that use a temporary commit (eg: histedit)
2919 # hack for command that use a temporary commit (eg: histedit)
2920 # temporary commit got stripped before hook release
2920 # temporary commit got stripped before hook release
2921 if self.changelog.hasnode(ret):
2921 if self.changelog.hasnode(ret):
2922 self.hook(
2922 self.hook(
2923 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2923 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2924 )
2924 )
2925
2925
2926 self._afterlock(commithook)
2926 self._afterlock(commithook)
2927 return ret
2927 return ret
2928
2928
2929 @unfilteredmethod
2929 @unfilteredmethod
2930 def commitctx(self, ctx, error=False, origctx=None):
2930 def commitctx(self, ctx, error=False, origctx=None):
2931 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2931 return commit.commitctx(self, ctx, error=error, origctx=origctx)
2932
2932
2933 @unfilteredmethod
2933 @unfilteredmethod
2934 def destroying(self):
2934 def destroying(self):
2935 '''Inform the repository that nodes are about to be destroyed.
2935 '''Inform the repository that nodes are about to be destroyed.
2936 Intended for use by strip and rollback, so there's a common
2936 Intended for use by strip and rollback, so there's a common
2937 place for anything that has to be done before destroying history.
2937 place for anything that has to be done before destroying history.
2938
2938
2939 This is mostly useful for saving state that is in memory and waiting
2939 This is mostly useful for saving state that is in memory and waiting
2940 to be flushed when the current lock is released. Because a call to
2940 to be flushed when the current lock is released. Because a call to
2941 destroyed is imminent, the repo will be invalidated causing those
2941 destroyed is imminent, the repo will be invalidated causing those
2942 changes to stay in memory (waiting for the next unlock), or vanish
2942 changes to stay in memory (waiting for the next unlock), or vanish
2943 completely.
2943 completely.
2944 '''
2944 '''
2945 # When using the same lock to commit and strip, the phasecache is left
2945 # When using the same lock to commit and strip, the phasecache is left
2946 # dirty after committing. Then when we strip, the repo is invalidated,
2946 # dirty after committing. Then when we strip, the repo is invalidated,
2947 # causing those changes to disappear.
2947 # causing those changes to disappear.
2948 if '_phasecache' in vars(self):
2948 if '_phasecache' in vars(self):
2949 self._phasecache.write()
2949 self._phasecache.write()
2950
2950
2951 @unfilteredmethod
2951 @unfilteredmethod
2952 def destroyed(self):
2952 def destroyed(self):
2953 '''Inform the repository that nodes have been destroyed.
2953 '''Inform the repository that nodes have been destroyed.
2954 Intended for use by strip and rollback, so there's a common
2954 Intended for use by strip and rollback, so there's a common
2955 place for anything that has to be done after destroying history.
2955 place for anything that has to be done after destroying history.
2956 '''
2956 '''
2957 # When one tries to:
2957 # When one tries to:
2958 # 1) destroy nodes thus calling this method (e.g. strip)
2958 # 1) destroy nodes thus calling this method (e.g. strip)
2959 # 2) use phasecache somewhere (e.g. commit)
2959 # 2) use phasecache somewhere (e.g. commit)
2960 #
2960 #
2961 # then 2) will fail because the phasecache contains nodes that were
2961 # then 2) will fail because the phasecache contains nodes that were
2962 # removed. We can either remove phasecache from the filecache,
2962 # removed. We can either remove phasecache from the filecache,
2963 # causing it to reload next time it is accessed, or simply filter
2963 # causing it to reload next time it is accessed, or simply filter
2964 # the removed nodes now and write the updated cache.
2964 # the removed nodes now and write the updated cache.
2965 self._phasecache.filterunknown(self)
2965 self._phasecache.filterunknown(self)
2966 self._phasecache.write()
2966 self._phasecache.write()
2967
2967
2968 # refresh all repository caches
2968 # refresh all repository caches
2969 self.updatecaches()
2969 self.updatecaches()
2970
2970
2971 # Ensure the persistent tag cache is updated. Doing it now
2971 # Ensure the persistent tag cache is updated. Doing it now
2972 # means that the tag cache only has to worry about destroyed
2972 # means that the tag cache only has to worry about destroyed
2973 # heads immediately after a strip/rollback. That in turn
2973 # heads immediately after a strip/rollback. That in turn
2974 # guarantees that "cachetip == currenttip" (comparing both rev
2974 # guarantees that "cachetip == currenttip" (comparing both rev
2975 # and node) always means no nodes have been added or destroyed.
2975 # and node) always means no nodes have been added or destroyed.
2976
2976
2977 # XXX this is suboptimal when qrefresh'ing: we strip the current
2977 # XXX this is suboptimal when qrefresh'ing: we strip the current
2978 # head, refresh the tag cache, then immediately add a new head.
2978 # head, refresh the tag cache, then immediately add a new head.
2979 # But I think doing it this way is necessary for the "instant
2979 # But I think doing it this way is necessary for the "instant
2980 # tag cache retrieval" case to work.
2980 # tag cache retrieval" case to work.
2981 self.invalidate()
2981 self.invalidate()
2982
2982
2983 def status(
2983 def status(
2984 self,
2984 self,
2985 node1=b'.',
2985 node1=b'.',
2986 node2=None,
2986 node2=None,
2987 match=None,
2987 match=None,
2988 ignored=False,
2988 ignored=False,
2989 clean=False,
2989 clean=False,
2990 unknown=False,
2990 unknown=False,
2991 listsubrepos=False,
2991 listsubrepos=False,
2992 ):
2992 ):
2993 '''a convenience method that calls node1.status(node2)'''
2993 '''a convenience method that calls node1.status(node2)'''
2994 return self[node1].status(
2994 return self[node1].status(
2995 node2, match, ignored, clean, unknown, listsubrepos
2995 node2, match, ignored, clean, unknown, listsubrepos
2996 )
2996 )
2997
2997
2998 def addpostdsstatus(self, ps):
2998 def addpostdsstatus(self, ps):
2999 """Add a callback to run within the wlock, at the point at which status
2999 """Add a callback to run within the wlock, at the point at which status
3000 fixups happen.
3000 fixups happen.
3001
3001
3002 On status completion, callback(wctx, status) will be called with the
3002 On status completion, callback(wctx, status) will be called with the
3003 wlock held, unless the dirstate has changed from underneath or the wlock
3003 wlock held, unless the dirstate has changed from underneath or the wlock
3004 couldn't be grabbed.
3004 couldn't be grabbed.
3005
3005
3006 Callbacks should not capture and use a cached copy of the dirstate --
3006 Callbacks should not capture and use a cached copy of the dirstate --
3007 it might change in the meanwhile. Instead, they should access the
3007 it might change in the meanwhile. Instead, they should access the
3008 dirstate via wctx.repo().dirstate.
3008 dirstate via wctx.repo().dirstate.
3009
3009
3010 This list is emptied out after each status run -- extensions should
3010 This list is emptied out after each status run -- extensions should
3011 make sure it adds to this list each time dirstate.status is called.
3011 make sure it adds to this list each time dirstate.status is called.
3012 Extensions should also make sure they don't call this for statuses
3012 Extensions should also make sure they don't call this for statuses
3013 that don't involve the dirstate.
3013 that don't involve the dirstate.
3014 """
3014 """
3015
3015
3016 # The list is located here for uniqueness reasons -- it is actually
3016 # The list is located here for uniqueness reasons -- it is actually
3017 # managed by the workingctx, but that isn't unique per-repo.
3017 # managed by the workingctx, but that isn't unique per-repo.
3018 self._postdsstatus.append(ps)
3018 self._postdsstatus.append(ps)
3019
3019
3020 def postdsstatus(self):
3020 def postdsstatus(self):
3021 """Used by workingctx to get the list of post-dirstate-status hooks."""
3021 """Used by workingctx to get the list of post-dirstate-status hooks."""
3022 return self._postdsstatus
3022 return self._postdsstatus
3023
3023
3024 def clearpostdsstatus(self):
3024 def clearpostdsstatus(self):
3025 """Used by workingctx to clear post-dirstate-status hooks."""
3025 """Used by workingctx to clear post-dirstate-status hooks."""
3026 del self._postdsstatus[:]
3026 del self._postdsstatus[:]
3027
3027
3028 def heads(self, start=None):
3028 def heads(self, start=None):
3029 if start is None:
3029 if start is None:
3030 cl = self.changelog
3030 cl = self.changelog
3031 headrevs = reversed(cl.headrevs())
3031 headrevs = reversed(cl.headrevs())
3032 return [cl.node(rev) for rev in headrevs]
3032 return [cl.node(rev) for rev in headrevs]
3033
3033
3034 heads = self.changelog.heads(start)
3034 heads = self.changelog.heads(start)
3035 # sort the output in rev descending order
3035 # sort the output in rev descending order
3036 return sorted(heads, key=self.changelog.rev, reverse=True)
3036 return sorted(heads, key=self.changelog.rev, reverse=True)
3037
3037
3038 def branchheads(self, branch=None, start=None, closed=False):
3038 def branchheads(self, branch=None, start=None, closed=False):
3039 '''return a (possibly filtered) list of heads for the given branch
3039 '''return a (possibly filtered) list of heads for the given branch
3040
3040
3041 Heads are returned in topological order, from newest to oldest.
3041 Heads are returned in topological order, from newest to oldest.
3042 If branch is None, use the dirstate branch.
3042 If branch is None, use the dirstate branch.
3043 If start is not None, return only heads reachable from start.
3043 If start is not None, return only heads reachable from start.
3044 If closed is True, return heads that are marked as closed as well.
3044 If closed is True, return heads that are marked as closed as well.
3045 '''
3045 '''
3046 if branch is None:
3046 if branch is None:
3047 branch = self[None].branch()
3047 branch = self[None].branch()
3048 branches = self.branchmap()
3048 branches = self.branchmap()
3049 if not branches.hasbranch(branch):
3049 if not branches.hasbranch(branch):
3050 return []
3050 return []
3051 # the cache returns heads ordered lowest to highest
3051 # the cache returns heads ordered lowest to highest
3052 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3052 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3053 if start is not None:
3053 if start is not None:
3054 # filter out the heads that cannot be reached from startrev
3054 # filter out the heads that cannot be reached from startrev
3055 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3055 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3056 bheads = [h for h in bheads if h in fbheads]
3056 bheads = [h for h in bheads if h in fbheads]
3057 return bheads
3057 return bheads
3058
3058
3059 def branches(self, nodes):
3059 def branches(self, nodes):
3060 if not nodes:
3060 if not nodes:
3061 nodes = [self.changelog.tip()]
3061 nodes = [self.changelog.tip()]
3062 b = []
3062 b = []
3063 for n in nodes:
3063 for n in nodes:
3064 t = n
3064 t = n
3065 while True:
3065 while True:
3066 p = self.changelog.parents(n)
3066 p = self.changelog.parents(n)
3067 if p[1] != nullid or p[0] == nullid:
3067 if p[1] != nullid or p[0] == nullid:
3068 b.append((t, n, p[0], p[1]))
3068 b.append((t, n, p[0], p[1]))
3069 break
3069 break
3070 n = p[0]
3070 n = p[0]
3071 return b
3071 return b
3072
3072
3073 def between(self, pairs):
3073 def between(self, pairs):
3074 r = []
3074 r = []
3075
3075
3076 for top, bottom in pairs:
3076 for top, bottom in pairs:
3077 n, l, i = top, [], 0
3077 n, l, i = top, [], 0
3078 f = 1
3078 f = 1
3079
3079
3080 while n != bottom and n != nullid:
3080 while n != bottom and n != nullid:
3081 p = self.changelog.parents(n)[0]
3081 p = self.changelog.parents(n)[0]
3082 if i == f:
3082 if i == f:
3083 l.append(n)
3083 l.append(n)
3084 f = f * 2
3084 f = f * 2
3085 n = p
3085 n = p
3086 i += 1
3086 i += 1
3087
3087
3088 r.append(l)
3088 r.append(l)
3089
3089
3090 return r
3090 return r
3091
3091
3092 def checkpush(self, pushop):
3092 def checkpush(self, pushop):
3093 """Extensions can override this function if additional checks have
3093 """Extensions can override this function if additional checks have
3094 to be performed before pushing, or call it if they override push
3094 to be performed before pushing, or call it if they override push
3095 command.
3095 command.
3096 """
3096 """
3097
3097
3098 @unfilteredpropertycache
3098 @unfilteredpropertycache
3099 def prepushoutgoinghooks(self):
3099 def prepushoutgoinghooks(self):
3100 """Return util.hooks consists of a pushop with repo, remote, outgoing
3100 """Return util.hooks consists of a pushop with repo, remote, outgoing
3101 methods, which are called before pushing changesets.
3101 methods, which are called before pushing changesets.
3102 """
3102 """
3103 return util.hooks()
3103 return util.hooks()
3104
3104
3105 def pushkey(self, namespace, key, old, new):
3105 def pushkey(self, namespace, key, old, new):
3106 try:
3106 try:
3107 tr = self.currenttransaction()
3107 tr = self.currenttransaction()
3108 hookargs = {}
3108 hookargs = {}
3109 if tr is not None:
3109 if tr is not None:
3110 hookargs.update(tr.hookargs)
3110 hookargs.update(tr.hookargs)
3111 hookargs = pycompat.strkwargs(hookargs)
3111 hookargs = pycompat.strkwargs(hookargs)
3112 hookargs['namespace'] = namespace
3112 hookargs['namespace'] = namespace
3113 hookargs['key'] = key
3113 hookargs['key'] = key
3114 hookargs['old'] = old
3114 hookargs['old'] = old
3115 hookargs['new'] = new
3115 hookargs['new'] = new
3116 self.hook(b'prepushkey', throw=True, **hookargs)
3116 self.hook(b'prepushkey', throw=True, **hookargs)
3117 except error.HookAbort as exc:
3117 except error.HookAbort as exc:
3118 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3118 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3119 if exc.hint:
3119 if exc.hint:
3120 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3120 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3121 return False
3121 return False
3122 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3122 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3123 ret = pushkey.push(self, namespace, key, old, new)
3123 ret = pushkey.push(self, namespace, key, old, new)
3124
3124
3125 def runhook(unused_success):
3125 def runhook(unused_success):
3126 self.hook(
3126 self.hook(
3127 b'pushkey',
3127 b'pushkey',
3128 namespace=namespace,
3128 namespace=namespace,
3129 key=key,
3129 key=key,
3130 old=old,
3130 old=old,
3131 new=new,
3131 new=new,
3132 ret=ret,
3132 ret=ret,
3133 )
3133 )
3134
3134
3135 self._afterlock(runhook)
3135 self._afterlock(runhook)
3136 return ret
3136 return ret
3137
3137
3138 def listkeys(self, namespace):
3138 def listkeys(self, namespace):
3139 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3139 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3140 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3140 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3141 values = pushkey.list(self, namespace)
3141 values = pushkey.list(self, namespace)
3142 self.hook(b'listkeys', namespace=namespace, values=values)
3142 self.hook(b'listkeys', namespace=namespace, values=values)
3143 return values
3143 return values
3144
3144
3145 def debugwireargs(self, one, two, three=None, four=None, five=None):
3145 def debugwireargs(self, one, two, three=None, four=None, five=None):
3146 '''used to test argument passing over the wire'''
3146 '''used to test argument passing over the wire'''
3147 return b"%s %s %s %s %s" % (
3147 return b"%s %s %s %s %s" % (
3148 one,
3148 one,
3149 two,
3149 two,
3150 pycompat.bytestr(three),
3150 pycompat.bytestr(three),
3151 pycompat.bytestr(four),
3151 pycompat.bytestr(four),
3152 pycompat.bytestr(five),
3152 pycompat.bytestr(five),
3153 )
3153 )
3154
3154
3155 def savecommitmessage(self, text):
3155 def savecommitmessage(self, text):
3156 fp = self.vfs(b'last-message.txt', b'wb')
3156 fp = self.vfs(b'last-message.txt', b'wb')
3157 try:
3157 try:
3158 fp.write(text)
3158 fp.write(text)
3159 finally:
3159 finally:
3160 fp.close()
3160 fp.close()
3161 return self.pathto(fp.name[len(self.root) + 1 :])
3161 return self.pathto(fp.name[len(self.root) + 1 :])
3162
3162
3163
3163
3164 # used to avoid circular references so destructors work
3164 # used to avoid circular references so destructors work
3165 def aftertrans(files):
3165 def aftertrans(files):
3166 renamefiles = [tuple(t) for t in files]
3166 renamefiles = [tuple(t) for t in files]
3167
3167
3168 def a():
3168 def a():
3169 for vfs, src, dest in renamefiles:
3169 for vfs, src, dest in renamefiles:
3170 # if src and dest refer to a same file, vfs.rename is a no-op,
3170 # if src and dest refer to a same file, vfs.rename is a no-op,
3171 # leaving both src and dest on disk. delete dest to make sure
3171 # leaving both src and dest on disk. delete dest to make sure
3172 # the rename couldn't be such a no-op.
3172 # the rename couldn't be such a no-op.
3173 vfs.tryunlink(dest)
3173 vfs.tryunlink(dest)
3174 try:
3174 try:
3175 vfs.rename(src, dest)
3175 vfs.rename(src, dest)
3176 except OSError: # journal file does not yet exist
3176 except OSError: # journal file does not yet exist
3177 pass
3177 pass
3178
3178
3179 return a
3179 return a
3180
3180
3181
3181
3182 def undoname(fn):
3182 def undoname(fn):
3183 base, name = os.path.split(fn)
3183 base, name = os.path.split(fn)
3184 assert name.startswith(b'journal')
3184 assert name.startswith(b'journal')
3185 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3185 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3186
3186
3187
3187
3188 def instance(ui, path, create, intents=None, createopts=None):
3188 def instance(ui, path, create, intents=None, createopts=None):
3189 localpath = util.urllocalpath(path)
3189 localpath = util.urllocalpath(path)
3190 if create:
3190 if create:
3191 createrepository(ui, localpath, createopts=createopts)
3191 createrepository(ui, localpath, createopts=createopts)
3192
3192
3193 return makelocalrepository(ui, localpath, intents=intents)
3193 return makelocalrepository(ui, localpath, intents=intents)
3194
3194
3195
3195
3196 def islocal(path):
3196 def islocal(path):
3197 return True
3197 return True
3198
3198
3199
3199
3200 def defaultcreateopts(ui, createopts=None):
3200 def defaultcreateopts(ui, createopts=None):
3201 """Populate the default creation options for a repository.
3201 """Populate the default creation options for a repository.
3202
3202
3203 A dictionary of explicitly requested creation options can be passed
3203 A dictionary of explicitly requested creation options can be passed
3204 in. Missing keys will be populated.
3204 in. Missing keys will be populated.
3205 """
3205 """
3206 createopts = dict(createopts or {})
3206 createopts = dict(createopts or {})
3207
3207
3208 if b'backend' not in createopts:
3208 if b'backend' not in createopts:
3209 # experimental config: storage.new-repo-backend
3209 # experimental config: storage.new-repo-backend
3210 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3210 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3211
3211
3212 return createopts
3212 return createopts
3213
3213
3214
3214
3215 def newreporequirements(ui, createopts):
3215 def newreporequirements(ui, createopts):
3216 """Determine the set of requirements for a new local repository.
3216 """Determine the set of requirements for a new local repository.
3217
3217
3218 Extensions can wrap this function to specify custom requirements for
3218 Extensions can wrap this function to specify custom requirements for
3219 new repositories.
3219 new repositories.
3220 """
3220 """
3221 # If the repo is being created from a shared repository, we copy
3221 # If the repo is being created from a shared repository, we copy
3222 # its requirements.
3222 # its requirements.
3223 if b'sharedrepo' in createopts:
3223 if b'sharedrepo' in createopts:
3224 requirements = set(createopts[b'sharedrepo'].requirements)
3224 requirements = set(createopts[b'sharedrepo'].requirements)
3225 if createopts.get(b'sharedrelative'):
3225 if createopts.get(b'sharedrelative'):
3226 requirements.add(b'relshared')
3226 requirements.add(b'relshared')
3227 else:
3227 else:
3228 requirements.add(b'shared')
3228 requirements.add(b'shared')
3229
3229
3230 return requirements
3230 return requirements
3231
3231
3232 if b'backend' not in createopts:
3232 if b'backend' not in createopts:
3233 raise error.ProgrammingError(
3233 raise error.ProgrammingError(
3234 b'backend key not present in createopts; '
3234 b'backend key not present in createopts; '
3235 b'was defaultcreateopts() called?'
3235 b'was defaultcreateopts() called?'
3236 )
3236 )
3237
3237
3238 if createopts[b'backend'] != b'revlogv1':
3238 if createopts[b'backend'] != b'revlogv1':
3239 raise error.Abort(
3239 raise error.Abort(
3240 _(
3240 _(
3241 b'unable to determine repository requirements for '
3241 b'unable to determine repository requirements for '
3242 b'storage backend: %s'
3242 b'storage backend: %s'
3243 )
3243 )
3244 % createopts[b'backend']
3244 % createopts[b'backend']
3245 )
3245 )
3246
3246
3247 requirements = {b'revlogv1'}
3247 requirements = {b'revlogv1'}
3248 if ui.configbool(b'format', b'usestore'):
3248 if ui.configbool(b'format', b'usestore'):
3249 requirements.add(b'store')
3249 requirements.add(b'store')
3250 if ui.configbool(b'format', b'usefncache'):
3250 if ui.configbool(b'format', b'usefncache'):
3251 requirements.add(b'fncache')
3251 requirements.add(b'fncache')
3252 if ui.configbool(b'format', b'dotencode'):
3252 if ui.configbool(b'format', b'dotencode'):
3253 requirements.add(b'dotencode')
3253 requirements.add(b'dotencode')
3254
3254
3255 compengines = ui.configlist(b'format', b'revlog-compression')
3255 compengines = ui.configlist(b'format', b'revlog-compression')
3256 for compengine in compengines:
3256 for compengine in compengines:
3257 if compengine in util.compengines:
3257 if compengine in util.compengines:
3258 break
3258 break
3259 else:
3259 else:
3260 raise error.Abort(
3260 raise error.Abort(
3261 _(
3261 _(
3262 b'compression engines %s defined by '
3262 b'compression engines %s defined by '
3263 b'format.revlog-compression not available'
3263 b'format.revlog-compression not available'
3264 )
3264 )
3265 % b', '.join(b'"%s"' % e for e in compengines),
3265 % b', '.join(b'"%s"' % e for e in compengines),
3266 hint=_(
3266 hint=_(
3267 b'run "hg debuginstall" to list available '
3267 b'run "hg debuginstall" to list available '
3268 b'compression engines'
3268 b'compression engines'
3269 ),
3269 ),
3270 )
3270 )
3271
3271
3272 # zlib is the historical default and doesn't need an explicit requirement.
3272 # zlib is the historical default and doesn't need an explicit requirement.
3273 if compengine == b'zstd':
3273 if compengine == b'zstd':
3274 requirements.add(b'revlog-compression-zstd')
3274 requirements.add(b'revlog-compression-zstd')
3275 elif compengine != b'zlib':
3275 elif compengine != b'zlib':
3276 requirements.add(b'exp-compression-%s' % compengine)
3276 requirements.add(b'exp-compression-%s' % compengine)
3277
3277
3278 if scmutil.gdinitconfig(ui):
3278 if scmutil.gdinitconfig(ui):
3279 requirements.add(b'generaldelta')
3279 requirements.add(b'generaldelta')
3280 if ui.configbool(b'format', b'sparse-revlog'):
3280 if ui.configbool(b'format', b'sparse-revlog'):
3281 requirements.add(SPARSEREVLOG_REQUIREMENT)
3281 requirements.add(SPARSEREVLOG_REQUIREMENT)
3282
3282
3283 # experimental config: format.exp-use-side-data
3283 # experimental config: format.exp-use-side-data
3284 if ui.configbool(b'format', b'exp-use-side-data'):
3284 if ui.configbool(b'format', b'exp-use-side-data'):
3285 requirements.add(SIDEDATA_REQUIREMENT)
3285 requirements.add(SIDEDATA_REQUIREMENT)
3286 # experimental config: format.exp-use-copies-side-data-changeset
3286 # experimental config: format.exp-use-copies-side-data-changeset
3287 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3287 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3288 requirements.add(SIDEDATA_REQUIREMENT)
3288 requirements.add(SIDEDATA_REQUIREMENT)
3289 requirements.add(COPIESSDC_REQUIREMENT)
3289 requirements.add(COPIESSDC_REQUIREMENT)
3290 if ui.configbool(b'experimental', b'treemanifest'):
3290 if ui.configbool(b'experimental', b'treemanifest'):
3291 requirements.add(b'treemanifest')
3291 requirements.add(b'treemanifest')
3292
3292
3293 revlogv2 = ui.config(b'experimental', b'revlogv2')
3293 revlogv2 = ui.config(b'experimental', b'revlogv2')
3294 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3294 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3295 requirements.remove(b'revlogv1')
3295 requirements.remove(b'revlogv1')
3296 # generaldelta is implied by revlogv2.
3296 # generaldelta is implied by revlogv2.
3297 requirements.discard(b'generaldelta')
3297 requirements.discard(b'generaldelta')
3298 requirements.add(REVLOGV2_REQUIREMENT)
3298 requirements.add(REVLOGV2_REQUIREMENT)
3299 # experimental config: format.internal-phase
3299 # experimental config: format.internal-phase
3300 if ui.configbool(b'format', b'internal-phase'):
3300 if ui.configbool(b'format', b'internal-phase'):
3301 requirements.add(b'internal-phase')
3301 requirements.add(b'internal-phase')
3302
3302
3303 if createopts.get(b'narrowfiles'):
3303 if createopts.get(b'narrowfiles'):
3304 requirements.add(repository.NARROW_REQUIREMENT)
3304 requirements.add(repository.NARROW_REQUIREMENT)
3305
3305
3306 if createopts.get(b'lfs'):
3306 if createopts.get(b'lfs'):
3307 requirements.add(b'lfs')
3307 requirements.add(b'lfs')
3308
3308
3309 if ui.configbool(b'format', b'bookmarks-in-store'):
3309 if ui.configbool(b'format', b'bookmarks-in-store'):
3310 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3310 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3311
3311
3312 if ui.configbool(b'format', b'use-persistent-nodemap'):
3312 if ui.configbool(b'format', b'use-persistent-nodemap'):
3313 requirements.add(NODEMAP_REQUIREMENT)
3313 requirements.add(NODEMAP_REQUIREMENT)
3314
3314
3315 return requirements
3315 return requirements
3316
3316
3317
3317
3318 def checkrequirementscompat(ui, requirements):
3319 """ Checks compatibility of repository requirements enabled and disabled.
3320
3321 Returns a set of requirements which needs to be dropped because dependend
3322 requirements are not enabled. Also warns users about it """
3323
3324 dropped = set()
3325
3326 if b'store' not in requirements:
3327 if bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT in requirements:
3328 ui.warn(
3329 _(
3330 b'ignoring enabled \'format.bookmarks-in-store\' config '
3331 b'beacuse it is incompatible with disabled '
3332 b'\'format.usestore\' config\n'
3333 )
3334 )
3335 dropped.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3336
3337 return dropped
3338
3339
3318 def filterknowncreateopts(ui, createopts):
3340 def filterknowncreateopts(ui, createopts):
3319 """Filters a dict of repo creation options against options that are known.
3341 """Filters a dict of repo creation options against options that are known.
3320
3342
3321 Receives a dict of repo creation options and returns a dict of those
3343 Receives a dict of repo creation options and returns a dict of those
3322 options that we don't know how to handle.
3344 options that we don't know how to handle.
3323
3345
3324 This function is called as part of repository creation. If the
3346 This function is called as part of repository creation. If the
3325 returned dict contains any items, repository creation will not
3347 returned dict contains any items, repository creation will not
3326 be allowed, as it means there was a request to create a repository
3348 be allowed, as it means there was a request to create a repository
3327 with options not recognized by loaded code.
3349 with options not recognized by loaded code.
3328
3350
3329 Extensions can wrap this function to filter out creation options
3351 Extensions can wrap this function to filter out creation options
3330 they know how to handle.
3352 they know how to handle.
3331 """
3353 """
3332 known = {
3354 known = {
3333 b'backend',
3355 b'backend',
3334 b'lfs',
3356 b'lfs',
3335 b'narrowfiles',
3357 b'narrowfiles',
3336 b'sharedrepo',
3358 b'sharedrepo',
3337 b'sharedrelative',
3359 b'sharedrelative',
3338 b'shareditems',
3360 b'shareditems',
3339 b'shallowfilestore',
3361 b'shallowfilestore',
3340 }
3362 }
3341
3363
3342 return {k: v for k, v in createopts.items() if k not in known}
3364 return {k: v for k, v in createopts.items() if k not in known}
3343
3365
3344
3366
3345 def createrepository(ui, path, createopts=None):
3367 def createrepository(ui, path, createopts=None):
3346 """Create a new repository in a vfs.
3368 """Create a new repository in a vfs.
3347
3369
3348 ``path`` path to the new repo's working directory.
3370 ``path`` path to the new repo's working directory.
3349 ``createopts`` options for the new repository.
3371 ``createopts`` options for the new repository.
3350
3372
3351 The following keys for ``createopts`` are recognized:
3373 The following keys for ``createopts`` are recognized:
3352
3374
3353 backend
3375 backend
3354 The storage backend to use.
3376 The storage backend to use.
3355 lfs
3377 lfs
3356 Repository will be created with ``lfs`` requirement. The lfs extension
3378 Repository will be created with ``lfs`` requirement. The lfs extension
3357 will automatically be loaded when the repository is accessed.
3379 will automatically be loaded when the repository is accessed.
3358 narrowfiles
3380 narrowfiles
3359 Set up repository to support narrow file storage.
3381 Set up repository to support narrow file storage.
3360 sharedrepo
3382 sharedrepo
3361 Repository object from which storage should be shared.
3383 Repository object from which storage should be shared.
3362 sharedrelative
3384 sharedrelative
3363 Boolean indicating if the path to the shared repo should be
3385 Boolean indicating if the path to the shared repo should be
3364 stored as relative. By default, the pointer to the "parent" repo
3386 stored as relative. By default, the pointer to the "parent" repo
3365 is stored as an absolute path.
3387 is stored as an absolute path.
3366 shareditems
3388 shareditems
3367 Set of items to share to the new repository (in addition to storage).
3389 Set of items to share to the new repository (in addition to storage).
3368 shallowfilestore
3390 shallowfilestore
3369 Indicates that storage for files should be shallow (not all ancestor
3391 Indicates that storage for files should be shallow (not all ancestor
3370 revisions are known).
3392 revisions are known).
3371 """
3393 """
3372 createopts = defaultcreateopts(ui, createopts=createopts)
3394 createopts = defaultcreateopts(ui, createopts=createopts)
3373
3395
3374 unknownopts = filterknowncreateopts(ui, createopts)
3396 unknownopts = filterknowncreateopts(ui, createopts)
3375
3397
3376 if not isinstance(unknownopts, dict):
3398 if not isinstance(unknownopts, dict):
3377 raise error.ProgrammingError(
3399 raise error.ProgrammingError(
3378 b'filterknowncreateopts() did not return a dict'
3400 b'filterknowncreateopts() did not return a dict'
3379 )
3401 )
3380
3402
3381 if unknownopts:
3403 if unknownopts:
3382 raise error.Abort(
3404 raise error.Abort(
3383 _(
3405 _(
3384 b'unable to create repository because of unknown '
3406 b'unable to create repository because of unknown '
3385 b'creation option: %s'
3407 b'creation option: %s'
3386 )
3408 )
3387 % b', '.join(sorted(unknownopts)),
3409 % b', '.join(sorted(unknownopts)),
3388 hint=_(b'is a required extension not loaded?'),
3410 hint=_(b'is a required extension not loaded?'),
3389 )
3411 )
3390
3412
3391 requirements = newreporequirements(ui, createopts=createopts)
3413 requirements = newreporequirements(ui, createopts=createopts)
3414 requirements -= checkrequirementscompat(ui, requirements)
3392
3415
3393 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3416 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3394
3417
3395 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3418 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3396 if hgvfs.exists():
3419 if hgvfs.exists():
3397 raise error.RepoError(_(b'repository %s already exists') % path)
3420 raise error.RepoError(_(b'repository %s already exists') % path)
3398
3421
3399 if b'sharedrepo' in createopts:
3422 if b'sharedrepo' in createopts:
3400 sharedpath = createopts[b'sharedrepo'].sharedpath
3423 sharedpath = createopts[b'sharedrepo'].sharedpath
3401
3424
3402 if createopts.get(b'sharedrelative'):
3425 if createopts.get(b'sharedrelative'):
3403 try:
3426 try:
3404 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3427 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3405 except (IOError, ValueError) as e:
3428 except (IOError, ValueError) as e:
3406 # ValueError is raised on Windows if the drive letters differ
3429 # ValueError is raised on Windows if the drive letters differ
3407 # on each path.
3430 # on each path.
3408 raise error.Abort(
3431 raise error.Abort(
3409 _(b'cannot calculate relative path'),
3432 _(b'cannot calculate relative path'),
3410 hint=stringutil.forcebytestr(e),
3433 hint=stringutil.forcebytestr(e),
3411 )
3434 )
3412
3435
3413 if not wdirvfs.exists():
3436 if not wdirvfs.exists():
3414 wdirvfs.makedirs()
3437 wdirvfs.makedirs()
3415
3438
3416 hgvfs.makedir(notindexed=True)
3439 hgvfs.makedir(notindexed=True)
3417 if b'sharedrepo' not in createopts:
3440 if b'sharedrepo' not in createopts:
3418 hgvfs.mkdir(b'cache')
3441 hgvfs.mkdir(b'cache')
3419 hgvfs.mkdir(b'wcache')
3442 hgvfs.mkdir(b'wcache')
3420
3443
3421 if b'store' in requirements and b'sharedrepo' not in createopts:
3444 if b'store' in requirements and b'sharedrepo' not in createopts:
3422 hgvfs.mkdir(b'store')
3445 hgvfs.mkdir(b'store')
3423
3446
3424 # We create an invalid changelog outside the store so very old
3447 # We create an invalid changelog outside the store so very old
3425 # Mercurial versions (which didn't know about the requirements
3448 # Mercurial versions (which didn't know about the requirements
3426 # file) encounter an error on reading the changelog. This
3449 # file) encounter an error on reading the changelog. This
3427 # effectively locks out old clients and prevents them from
3450 # effectively locks out old clients and prevents them from
3428 # mucking with a repo in an unknown format.
3451 # mucking with a repo in an unknown format.
3429 #
3452 #
3430 # The revlog header has version 2, which won't be recognized by
3453 # The revlog header has version 2, which won't be recognized by
3431 # such old clients.
3454 # such old clients.
3432 hgvfs.append(
3455 hgvfs.append(
3433 b'00changelog.i',
3456 b'00changelog.i',
3434 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3457 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3435 b'layout',
3458 b'layout',
3436 )
3459 )
3437
3460
3438 scmutil.writerequires(hgvfs, requirements)
3461 scmutil.writerequires(hgvfs, requirements)
3439
3462
3440 # Write out file telling readers where to find the shared store.
3463 # Write out file telling readers where to find the shared store.
3441 if b'sharedrepo' in createopts:
3464 if b'sharedrepo' in createopts:
3442 hgvfs.write(b'sharedpath', sharedpath)
3465 hgvfs.write(b'sharedpath', sharedpath)
3443
3466
3444 if createopts.get(b'shareditems'):
3467 if createopts.get(b'shareditems'):
3445 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3468 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3446 hgvfs.write(b'shared', shared)
3469 hgvfs.write(b'shared', shared)
3447
3470
3448
3471
3449 def poisonrepository(repo):
3472 def poisonrepository(repo):
3450 """Poison a repository instance so it can no longer be used."""
3473 """Poison a repository instance so it can no longer be used."""
3451 # Perform any cleanup on the instance.
3474 # Perform any cleanup on the instance.
3452 repo.close()
3475 repo.close()
3453
3476
3454 # Our strategy is to replace the type of the object with one that
3477 # Our strategy is to replace the type of the object with one that
3455 # has all attribute lookups result in error.
3478 # has all attribute lookups result in error.
3456 #
3479 #
3457 # But we have to allow the close() method because some constructors
3480 # But we have to allow the close() method because some constructors
3458 # of repos call close() on repo references.
3481 # of repos call close() on repo references.
3459 class poisonedrepository(object):
3482 class poisonedrepository(object):
3460 def __getattribute__(self, item):
3483 def __getattribute__(self, item):
3461 if item == 'close':
3484 if item == 'close':
3462 return object.__getattribute__(self, item)
3485 return object.__getattribute__(self, item)
3463
3486
3464 raise error.ProgrammingError(
3487 raise error.ProgrammingError(
3465 b'repo instances should not be used after unshare'
3488 b'repo instances should not be used after unshare'
3466 )
3489 )
3467
3490
3468 def close(self):
3491 def close(self):
3469 pass
3492 pass
3470
3493
3471 # We may have a repoview, which intercepts __setattr__. So be sure
3494 # We may have a repoview, which intercepts __setattr__. So be sure
3472 # we operate at the lowest level possible.
3495 # we operate at the lowest level possible.
3473 object.__setattr__(repo, '__class__', poisonedrepository)
3496 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,281 +1,286 b''
1 #testcases vfs svfs
1 #testcases vfs svfs
2
2
3 $ echo "[extensions]" >> $HGRCPATH
3 $ echo "[extensions]" >> $HGRCPATH
4 $ echo "share = " >> $HGRCPATH
4 $ echo "share = " >> $HGRCPATH
5
5
6 #if svfs
6 #if svfs
7 $ echo "[format]" >> $HGRCPATH
7 $ echo "[format]" >> $HGRCPATH
8 $ echo "bookmarks-in-store = yes " >> $HGRCPATH
8 $ echo "bookmarks-in-store = yes " >> $HGRCPATH
9 #endif
9 #endif
10
10
11 prepare repo1
11 prepare repo1
12
12
13 $ hg init repo1
13 $ hg init repo1
14 $ cd repo1
14 $ cd repo1
15 $ echo a > a
15 $ echo a > a
16 $ hg commit -A -m'init'
16 $ hg commit -A -m'init'
17 adding a
17 adding a
18 $ echo a >> a
18 $ echo a >> a
19 $ hg commit -m'change in shared clone'
19 $ hg commit -m'change in shared clone'
20 $ echo b > b
20 $ echo b > b
21 $ hg commit -A -m'another file'
21 $ hg commit -A -m'another file'
22 adding b
22 adding b
23
23
24 share it
24 share it
25
25
26 $ cd ..
26 $ cd ..
27 $ hg share repo1 repo2
27 $ hg share repo1 repo2
28 updating working directory
28 updating working directory
29 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
30
30
31 test sharing bookmarks
31 test sharing bookmarks
32
32
33 $ hg share -B repo1 repo3
33 $ hg share -B repo1 repo3
34 updating working directory
34 updating working directory
35 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
35 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
36 $ cd repo1
36 $ cd repo1
37 $ hg bookmark bm1
37 $ hg bookmark bm1
38 $ hg bookmarks
38 $ hg bookmarks
39 * bm1 2:c2e0ac586386
39 * bm1 2:c2e0ac586386
40 $ cd ../repo2
40 $ cd ../repo2
41 $ hg book bm2
41 $ hg book bm2
42 $ hg bookmarks
42 $ hg bookmarks
43 bm1 2:c2e0ac586386 (svfs !)
43 bm1 2:c2e0ac586386 (svfs !)
44 * bm2 2:c2e0ac586386
44 * bm2 2:c2e0ac586386
45 $ cd ../repo3
45 $ cd ../repo3
46 $ hg bookmarks
46 $ hg bookmarks
47 bm1 2:c2e0ac586386
47 bm1 2:c2e0ac586386
48 bm2 2:c2e0ac586386 (svfs !)
48 bm2 2:c2e0ac586386 (svfs !)
49 $ hg book bm3
49 $ hg book bm3
50 $ hg bookmarks
50 $ hg bookmarks
51 bm1 2:c2e0ac586386
51 bm1 2:c2e0ac586386
52 bm2 2:c2e0ac586386 (svfs !)
52 bm2 2:c2e0ac586386 (svfs !)
53 * bm3 2:c2e0ac586386
53 * bm3 2:c2e0ac586386
54 $ cd ../repo1
54 $ cd ../repo1
55 $ hg bookmarks
55 $ hg bookmarks
56 * bm1 2:c2e0ac586386
56 * bm1 2:c2e0ac586386
57 bm2 2:c2e0ac586386 (svfs !)
57 bm2 2:c2e0ac586386 (svfs !)
58 bm3 2:c2e0ac586386
58 bm3 2:c2e0ac586386
59
59
60 check whether HG_PENDING makes pending changes only in relatd
60 check whether HG_PENDING makes pending changes only in relatd
61 repositories visible to an external hook.
61 repositories visible to an external hook.
62
62
63 In "hg share" case, another transaction can't run in other
63 In "hg share" case, another transaction can't run in other
64 repositories sharing same source repository, because starting
64 repositories sharing same source repository, because starting
65 transaction requires locking store of source repository.
65 transaction requires locking store of source repository.
66
66
67 Therefore, this test scenario ignores checking visibility of
67 Therefore, this test scenario ignores checking visibility of
68 .hg/bookmarks.pending in repo2, which shares repo1 without bookmarks.
68 .hg/bookmarks.pending in repo2, which shares repo1 without bookmarks.
69
69
70 $ cat > $TESTTMP/checkbookmarks.sh <<EOF
70 $ cat > $TESTTMP/checkbookmarks.sh <<EOF
71 > echo "@repo1"
71 > echo "@repo1"
72 > hg -R "$TESTTMP/repo1" bookmarks
72 > hg -R "$TESTTMP/repo1" bookmarks
73 > echo "@repo2"
73 > echo "@repo2"
74 > hg -R "$TESTTMP/repo2" bookmarks
74 > hg -R "$TESTTMP/repo2" bookmarks
75 > echo "@repo3"
75 > echo "@repo3"
76 > hg -R "$TESTTMP/repo3" bookmarks
76 > hg -R "$TESTTMP/repo3" bookmarks
77 > exit 1 # to avoid adding new bookmark for subsequent tests
77 > exit 1 # to avoid adding new bookmark for subsequent tests
78 > EOF
78 > EOF
79
79
80 $ cd ../repo1
80 $ cd ../repo1
81 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX
81 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX
82 @repo1
82 @repo1
83 bm1 2:c2e0ac586386
83 bm1 2:c2e0ac586386
84 bm2 2:c2e0ac586386 (svfs !)
84 bm2 2:c2e0ac586386 (svfs !)
85 bm3 2:c2e0ac586386
85 bm3 2:c2e0ac586386
86 * bmX 2:c2e0ac586386
86 * bmX 2:c2e0ac586386
87 @repo2
87 @repo2
88 bm1 2:c2e0ac586386 (svfs !)
88 bm1 2:c2e0ac586386 (svfs !)
89 * bm2 2:c2e0ac586386
89 * bm2 2:c2e0ac586386
90 bm3 2:c2e0ac586386 (svfs !)
90 bm3 2:c2e0ac586386 (svfs !)
91 @repo3
91 @repo3
92 bm1 2:c2e0ac586386
92 bm1 2:c2e0ac586386
93 bm2 2:c2e0ac586386 (svfs !)
93 bm2 2:c2e0ac586386 (svfs !)
94 * bm3 2:c2e0ac586386
94 * bm3 2:c2e0ac586386
95 bmX 2:c2e0ac586386 (vfs !)
95 bmX 2:c2e0ac586386 (vfs !)
96 transaction abort!
96 transaction abort!
97 rollback completed
97 rollback completed
98 abort: pretxnclose hook exited with status 1
98 abort: pretxnclose hook exited with status 1
99 [255]
99 [255]
100 $ hg book bm1
100 $ hg book bm1
101
101
102 FYI, in contrast to above test, bmX is invisible in repo1 (= shared
102 FYI, in contrast to above test, bmX is invisible in repo1 (= shared
103 src), because (1) HG_PENDING refers only repo3 and (2)
103 src), because (1) HG_PENDING refers only repo3 and (2)
104 "bookmarks.pending" is written only into repo3.
104 "bookmarks.pending" is written only into repo3.
105
105
106 $ cd ../repo3
106 $ cd ../repo3
107 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX
107 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX
108 @repo1
108 @repo1
109 * bm1 2:c2e0ac586386
109 * bm1 2:c2e0ac586386
110 bm2 2:c2e0ac586386 (svfs !)
110 bm2 2:c2e0ac586386 (svfs !)
111 bm3 2:c2e0ac586386
111 bm3 2:c2e0ac586386
112 @repo2
112 @repo2
113 bm1 2:c2e0ac586386 (svfs !)
113 bm1 2:c2e0ac586386 (svfs !)
114 * bm2 2:c2e0ac586386
114 * bm2 2:c2e0ac586386
115 bm3 2:c2e0ac586386 (svfs !)
115 bm3 2:c2e0ac586386 (svfs !)
116 @repo3
116 @repo3
117 bm1 2:c2e0ac586386
117 bm1 2:c2e0ac586386
118 bm2 2:c2e0ac586386 (svfs !)
118 bm2 2:c2e0ac586386 (svfs !)
119 bm3 2:c2e0ac586386
119 bm3 2:c2e0ac586386
120 * bmX 2:c2e0ac586386
120 * bmX 2:c2e0ac586386
121 transaction abort!
121 transaction abort!
122 rollback completed
122 rollback completed
123 abort: pretxnclose hook exited with status 1
123 abort: pretxnclose hook exited with status 1
124 [255]
124 [255]
125 $ hg book bm3
125 $ hg book bm3
126
126
127 clean up bm2 since it's uninteresting (not shared in the vfs case and
127 clean up bm2 since it's uninteresting (not shared in the vfs case and
128 same as bm3 in the svfs case)
128 same as bm3 in the svfs case)
129 $ cd ../repo2
129 $ cd ../repo2
130 $ hg book -d bm2
130 $ hg book -d bm2
131
131
132 $ cd ../repo1
132 $ cd ../repo1
133
133
134 test that commits work
134 test that commits work
135
135
136 $ echo 'shared bookmarks' > a
136 $ echo 'shared bookmarks' > a
137 $ hg commit -m 'testing shared bookmarks'
137 $ hg commit -m 'testing shared bookmarks'
138 $ hg bookmarks
138 $ hg bookmarks
139 * bm1 3:b87954705719
139 * bm1 3:b87954705719
140 bm3 2:c2e0ac586386
140 bm3 2:c2e0ac586386
141 $ cd ../repo3
141 $ cd ../repo3
142 $ hg bookmarks
142 $ hg bookmarks
143 bm1 3:b87954705719
143 bm1 3:b87954705719
144 * bm3 2:c2e0ac586386
144 * bm3 2:c2e0ac586386
145 $ echo 'more shared bookmarks' > a
145 $ echo 'more shared bookmarks' > a
146 $ hg commit -m 'testing shared bookmarks'
146 $ hg commit -m 'testing shared bookmarks'
147 created new head
147 created new head
148 $ hg bookmarks
148 $ hg bookmarks
149 bm1 3:b87954705719
149 bm1 3:b87954705719
150 * bm3 4:62f4ded848e4
150 * bm3 4:62f4ded848e4
151 $ cd ../repo1
151 $ cd ../repo1
152 $ hg bookmarks
152 $ hg bookmarks
153 * bm1 3:b87954705719
153 * bm1 3:b87954705719
154 bm3 4:62f4ded848e4
154 bm3 4:62f4ded848e4
155 $ cd ..
155 $ cd ..
156
156
157 test pushing bookmarks works
157 test pushing bookmarks works
158
158
159 $ hg clone repo3 repo4
159 $ hg clone repo3 repo4
160 updating to branch default
160 updating to branch default
161 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
161 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
162 $ cd repo4
162 $ cd repo4
163 $ hg boo bm4
163 $ hg boo bm4
164 $ echo foo > b
164 $ echo foo > b
165 $ hg commit -m 'foo in b'
165 $ hg commit -m 'foo in b'
166 $ hg boo
166 $ hg boo
167 bm1 3:b87954705719
167 bm1 3:b87954705719
168 bm3 4:62f4ded848e4
168 bm3 4:62f4ded848e4
169 * bm4 5:92793bfc8cad
169 * bm4 5:92793bfc8cad
170 $ hg push -B bm4
170 $ hg push -B bm4
171 pushing to $TESTTMP/repo3
171 pushing to $TESTTMP/repo3
172 searching for changes
172 searching for changes
173 adding changesets
173 adding changesets
174 adding manifests
174 adding manifests
175 adding file changes
175 adding file changes
176 added 1 changesets with 1 changes to 1 files
176 added 1 changesets with 1 changes to 1 files
177 exporting bookmark bm4
177 exporting bookmark bm4
178 $ cd ../repo1
178 $ cd ../repo1
179 $ hg bookmarks
179 $ hg bookmarks
180 * bm1 3:b87954705719
180 * bm1 3:b87954705719
181 bm3 4:62f4ded848e4
181 bm3 4:62f4ded848e4
182 bm4 5:92793bfc8cad
182 bm4 5:92793bfc8cad
183 $ cd ../repo3
183 $ cd ../repo3
184 $ hg bookmarks
184 $ hg bookmarks
185 bm1 3:b87954705719
185 bm1 3:b87954705719
186 * bm3 4:62f4ded848e4
186 * bm3 4:62f4ded848e4
187 bm4 5:92793bfc8cad
187 bm4 5:92793bfc8cad
188 $ cd ..
188 $ cd ..
189
189
190 test behavior when sharing a shared repo
190 test behavior when sharing a shared repo
191
191
192 $ hg share -B repo3 missingdir/repo5
192 $ hg share -B repo3 missingdir/repo5
193 updating working directory
193 updating working directory
194 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
194 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
195 $ cd missingdir/repo5
195 $ cd missingdir/repo5
196 $ hg book
196 $ hg book
197 bm1 3:b87954705719
197 bm1 3:b87954705719
198 bm3 4:62f4ded848e4
198 bm3 4:62f4ded848e4
199 bm4 5:92793bfc8cad
199 bm4 5:92793bfc8cad
200 $ cd ../..
200 $ cd ../..
201
201
202 test what happens when an active bookmark is deleted
202 test what happens when an active bookmark is deleted
203
203
204 $ cd repo1
204 $ cd repo1
205 $ hg boo -d bm3
205 $ hg boo -d bm3
206 $ hg boo
206 $ hg boo
207 * bm1 3:b87954705719
207 * bm1 3:b87954705719
208 bm4 5:92793bfc8cad
208 bm4 5:92793bfc8cad
209 $ cd ../repo3
209 $ cd ../repo3
210 $ hg boo
210 $ hg boo
211 bm1 3:b87954705719
211 bm1 3:b87954705719
212 bm4 5:92793bfc8cad
212 bm4 5:92793bfc8cad
213 $ cd ..
213 $ cd ..
214
214
215 verify that bookmarks are not written on failed transaction
215 verify that bookmarks are not written on failed transaction
216
216
217 $ cat > failpullbookmarks.py << EOF
217 $ cat > failpullbookmarks.py << EOF
218 > """A small extension that makes bookmark pulls fail, for testing"""
218 > """A small extension that makes bookmark pulls fail, for testing"""
219 > from __future__ import absolute_import
219 > from __future__ import absolute_import
220 > from mercurial import (
220 > from mercurial import (
221 > error,
221 > error,
222 > exchange,
222 > exchange,
223 > extensions,
223 > extensions,
224 > )
224 > )
225 > def _pullbookmarks(orig, pullop):
225 > def _pullbookmarks(orig, pullop):
226 > orig(pullop)
226 > orig(pullop)
227 > raise error.HookAbort('forced failure by extension')
227 > raise error.HookAbort('forced failure by extension')
228 > def extsetup(ui):
228 > def extsetup(ui):
229 > extensions.wrapfunction(exchange, '_pullbookmarks', _pullbookmarks)
229 > extensions.wrapfunction(exchange, '_pullbookmarks', _pullbookmarks)
230 > EOF
230 > EOF
231 $ cd repo4
231 $ cd repo4
232 $ hg boo
232 $ hg boo
233 bm1 3:b87954705719
233 bm1 3:b87954705719
234 bm3 4:62f4ded848e4
234 bm3 4:62f4ded848e4
235 * bm4 5:92793bfc8cad
235 * bm4 5:92793bfc8cad
236 $ cd ../repo3
236 $ cd ../repo3
237 $ hg boo
237 $ hg boo
238 bm1 3:b87954705719
238 bm1 3:b87954705719
239 bm4 5:92793bfc8cad
239 bm4 5:92793bfc8cad
240 $ hg --config "extensions.failpullbookmarks=$TESTTMP/failpullbookmarks.py" pull $TESTTMP/repo4
240 $ hg --config "extensions.failpullbookmarks=$TESTTMP/failpullbookmarks.py" pull $TESTTMP/repo4
241 pulling from $TESTTMP/repo4
241 pulling from $TESTTMP/repo4
242 searching for changes
242 searching for changes
243 no changes found
243 no changes found
244 adding remote bookmark bm3
244 adding remote bookmark bm3
245 abort: forced failure by extension
245 abort: forced failure by extension
246 [255]
246 [255]
247 $ hg boo
247 $ hg boo
248 bm1 3:b87954705719
248 bm1 3:b87954705719
249 bm4 5:92793bfc8cad
249 bm4 5:92793bfc8cad
250 $ hg pull $TESTTMP/repo4
250 $ hg pull $TESTTMP/repo4
251 pulling from $TESTTMP/repo4
251 pulling from $TESTTMP/repo4
252 searching for changes
252 searching for changes
253 no changes found
253 no changes found
254 adding remote bookmark bm3
254 adding remote bookmark bm3
255 1 local changesets published
255 1 local changesets published
256 $ hg boo
256 $ hg boo
257 bm1 3:b87954705719
257 bm1 3:b87954705719
258 * bm3 4:62f4ded848e4
258 * bm3 4:62f4ded848e4
259 bm4 5:92793bfc8cad
259 bm4 5:92793bfc8cad
260 $ cd ..
260 $ cd ..
261
261
262 verify bookmark behavior after unshare
262 verify bookmark behavior after unshare
263
263
264 $ cd repo3
264 $ cd repo3
265 $ hg unshare
265 $ hg unshare
266 $ hg boo
266 $ hg boo
267 bm1 3:b87954705719
267 bm1 3:b87954705719
268 * bm3 4:62f4ded848e4
268 * bm3 4:62f4ded848e4
269 bm4 5:92793bfc8cad
269 bm4 5:92793bfc8cad
270 $ hg boo -d bm4
270 $ hg boo -d bm4
271 $ hg boo bm5
271 $ hg boo bm5
272 $ hg boo
272 $ hg boo
273 bm1 3:b87954705719
273 bm1 3:b87954705719
274 bm3 4:62f4ded848e4
274 bm3 4:62f4ded848e4
275 * bm5 4:62f4ded848e4
275 * bm5 4:62f4ded848e4
276 $ cd ../repo1
276 $ cd ../repo1
277 $ hg boo
277 $ hg boo
278 * bm1 3:b87954705719
278 * bm1 3:b87954705719
279 bm3 4:62f4ded848e4
279 bm3 4:62f4ded848e4
280 bm4 5:92793bfc8cad
280 bm4 5:92793bfc8cad
281 $ cd ..
281 $ cd ..
282
283 Test that if store is disabled, we drop the bookmarksinstore requirement
284
285 $ hg init brokenrepo --config format.bookmarks-in-store=True --config format.usestore=false
286 ignoring enabled 'format.bookmarks-in-store' config beacuse it is incompatible with disabled 'format.usestore' config
General Comments 0
You need to be logged in to leave comments. Login now