##// END OF EJS Templates
commitctx: reorder some conditional for efficiency in _filecommit...
marmoute -
r45611:d4db0230 default draft
parent child Browse files
Show More
@@ -1,3811 +1,3811 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 context,
35 context,
36 dirstate,
36 dirstate,
37 dirstateguard,
37 dirstateguard,
38 discovery,
38 discovery,
39 encoding,
39 encoding,
40 error,
40 error,
41 exchange,
41 exchange,
42 extensions,
42 extensions,
43 filelog,
43 filelog,
44 hook,
44 hook,
45 lock as lockmod,
45 lock as lockmod,
46 match as matchmod,
46 match as matchmod,
47 mergestate as mergestatemod,
47 mergestate as mergestatemod,
48 mergeutil,
48 mergeutil,
49 metadata,
49 metadata,
50 namespaces,
50 namespaces,
51 narrowspec,
51 narrowspec,
52 obsolete,
52 obsolete,
53 pathutil,
53 pathutil,
54 phases,
54 phases,
55 pushkey,
55 pushkey,
56 pycompat,
56 pycompat,
57 rcutil,
57 rcutil,
58 repoview,
58 repoview,
59 revset,
59 revset,
60 revsetlang,
60 revsetlang,
61 scmutil,
61 scmutil,
62 sparse,
62 sparse,
63 store as storemod,
63 store as storemod,
64 subrepoutil,
64 subrepoutil,
65 tags as tagsmod,
65 tags as tagsmod,
66 transaction,
66 transaction,
67 txnutil,
67 txnutil,
68 util,
68 util,
69 vfs as vfsmod,
69 vfs as vfsmod,
70 )
70 )
71
71
72 from .interfaces import (
72 from .interfaces import (
73 repository,
73 repository,
74 util as interfaceutil,
74 util as interfaceutil,
75 )
75 )
76
76
77 from .utils import (
77 from .utils import (
78 hashutil,
78 hashutil,
79 procutil,
79 procutil,
80 stringutil,
80 stringutil,
81 )
81 )
82
82
83 from .revlogutils import constants as revlogconst
83 from .revlogutils import constants as revlogconst
84
84
85 release = lockmod.release
85 release = lockmod.release
86 urlerr = util.urlerr
86 urlerr = util.urlerr
87 urlreq = util.urlreq
87 urlreq = util.urlreq
88
88
89 # set of (path, vfs-location) tuples. vfs-location is:
89 # set of (path, vfs-location) tuples. vfs-location is:
90 # - 'plain for vfs relative paths
90 # - 'plain for vfs relative paths
91 # - '' for svfs relative paths
91 # - '' for svfs relative paths
92 _cachedfiles = set()
92 _cachedfiles = set()
93
93
94
94
95 class _basefilecache(scmutil.filecache):
95 class _basefilecache(scmutil.filecache):
96 """All filecache usage on repo are done for logic that should be unfiltered
96 """All filecache usage on repo are done for logic that should be unfiltered
97 """
97 """
98
98
99 def __get__(self, repo, type=None):
99 def __get__(self, repo, type=None):
100 if repo is None:
100 if repo is None:
101 return self
101 return self
102 # proxy to unfiltered __dict__ since filtered repo has no entry
102 # proxy to unfiltered __dict__ since filtered repo has no entry
103 unfi = repo.unfiltered()
103 unfi = repo.unfiltered()
104 try:
104 try:
105 return unfi.__dict__[self.sname]
105 return unfi.__dict__[self.sname]
106 except KeyError:
106 except KeyError:
107 pass
107 pass
108 return super(_basefilecache, self).__get__(unfi, type)
108 return super(_basefilecache, self).__get__(unfi, type)
109
109
110 def set(self, repo, value):
110 def set(self, repo, value):
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
112
112
113
113
114 class repofilecache(_basefilecache):
114 class repofilecache(_basefilecache):
115 """filecache for files in .hg but outside of .hg/store"""
115 """filecache for files in .hg but outside of .hg/store"""
116
116
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(repofilecache, self).__init__(*paths)
118 super(repofilecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, b'plain'))
120 _cachedfiles.add((path, b'plain'))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.vfs.join(fname)
123 return obj.vfs.join(fname)
124
124
125
125
126 class storecache(_basefilecache):
126 class storecache(_basefilecache):
127 """filecache for files in the store"""
127 """filecache for files in the store"""
128
128
129 def __init__(self, *paths):
129 def __init__(self, *paths):
130 super(storecache, self).__init__(*paths)
130 super(storecache, self).__init__(*paths)
131 for path in paths:
131 for path in paths:
132 _cachedfiles.add((path, b''))
132 _cachedfiles.add((path, b''))
133
133
134 def join(self, obj, fname):
134 def join(self, obj, fname):
135 return obj.sjoin(fname)
135 return obj.sjoin(fname)
136
136
137
137
138 class mixedrepostorecache(_basefilecache):
138 class mixedrepostorecache(_basefilecache):
139 """filecache for a mix files in .hg/store and outside"""
139 """filecache for a mix files in .hg/store and outside"""
140
140
141 def __init__(self, *pathsandlocations):
141 def __init__(self, *pathsandlocations):
142 # scmutil.filecache only uses the path for passing back into our
142 # scmutil.filecache only uses the path for passing back into our
143 # join(), so we can safely pass a list of paths and locations
143 # join(), so we can safely pass a list of paths and locations
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
146
146
147 def join(self, obj, fnameandlocation):
147 def join(self, obj, fnameandlocation):
148 fname, location = fnameandlocation
148 fname, location = fnameandlocation
149 if location == b'plain':
149 if location == b'plain':
150 return obj.vfs.join(fname)
150 return obj.vfs.join(fname)
151 else:
151 else:
152 if location != b'':
152 if location != b'':
153 raise error.ProgrammingError(
153 raise error.ProgrammingError(
154 b'unexpected location: %s' % location
154 b'unexpected location: %s' % location
155 )
155 )
156 return obj.sjoin(fname)
156 return obj.sjoin(fname)
157
157
158
158
159 def isfilecached(repo, name):
159 def isfilecached(repo, name):
160 """check if a repo has already cached "name" filecache-ed property
160 """check if a repo has already cached "name" filecache-ed property
161
161
162 This returns (cachedobj-or-None, iscached) tuple.
162 This returns (cachedobj-or-None, iscached) tuple.
163 """
163 """
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
165 if not cacheentry:
165 if not cacheentry:
166 return None, False
166 return None, False
167 return cacheentry.obj, True
167 return cacheentry.obj, True
168
168
169
169
170 class unfilteredpropertycache(util.propertycache):
170 class unfilteredpropertycache(util.propertycache):
171 """propertycache that apply to unfiltered repo only"""
171 """propertycache that apply to unfiltered repo only"""
172
172
173 def __get__(self, repo, type=None):
173 def __get__(self, repo, type=None):
174 unfi = repo.unfiltered()
174 unfi = repo.unfiltered()
175 if unfi is repo:
175 if unfi is repo:
176 return super(unfilteredpropertycache, self).__get__(unfi)
176 return super(unfilteredpropertycache, self).__get__(unfi)
177 return getattr(unfi, self.name)
177 return getattr(unfi, self.name)
178
178
179
179
180 class filteredpropertycache(util.propertycache):
180 class filteredpropertycache(util.propertycache):
181 """propertycache that must take filtering in account"""
181 """propertycache that must take filtering in account"""
182
182
183 def cachevalue(self, obj, value):
183 def cachevalue(self, obj, value):
184 object.__setattr__(obj, self.name, value)
184 object.__setattr__(obj, self.name, value)
185
185
186
186
187 def hasunfilteredcache(repo, name):
187 def hasunfilteredcache(repo, name):
188 """check if a repo has an unfilteredpropertycache value for <name>"""
188 """check if a repo has an unfilteredpropertycache value for <name>"""
189 return name in vars(repo.unfiltered())
189 return name in vars(repo.unfiltered())
190
190
191
191
192 def unfilteredmethod(orig):
192 def unfilteredmethod(orig):
193 """decorate method that always need to be run on unfiltered version"""
193 """decorate method that always need to be run on unfiltered version"""
194
194
195 def wrapper(repo, *args, **kwargs):
195 def wrapper(repo, *args, **kwargs):
196 return orig(repo.unfiltered(), *args, **kwargs)
196 return orig(repo.unfiltered(), *args, **kwargs)
197
197
198 return wrapper
198 return wrapper
199
199
200
200
201 moderncaps = {
201 moderncaps = {
202 b'lookup',
202 b'lookup',
203 b'branchmap',
203 b'branchmap',
204 b'pushkey',
204 b'pushkey',
205 b'known',
205 b'known',
206 b'getbundle',
206 b'getbundle',
207 b'unbundle',
207 b'unbundle',
208 }
208 }
209 legacycaps = moderncaps.union({b'changegroupsubset'})
209 legacycaps = moderncaps.union({b'changegroupsubset'})
210
210
211
211
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
213 class localcommandexecutor(object):
213 class localcommandexecutor(object):
214 def __init__(self, peer):
214 def __init__(self, peer):
215 self._peer = peer
215 self._peer = peer
216 self._sent = False
216 self._sent = False
217 self._closed = False
217 self._closed = False
218
218
219 def __enter__(self):
219 def __enter__(self):
220 return self
220 return self
221
221
222 def __exit__(self, exctype, excvalue, exctb):
222 def __exit__(self, exctype, excvalue, exctb):
223 self.close()
223 self.close()
224
224
225 def callcommand(self, command, args):
225 def callcommand(self, command, args):
226 if self._sent:
226 if self._sent:
227 raise error.ProgrammingError(
227 raise error.ProgrammingError(
228 b'callcommand() cannot be used after sendcommands()'
228 b'callcommand() cannot be used after sendcommands()'
229 )
229 )
230
230
231 if self._closed:
231 if self._closed:
232 raise error.ProgrammingError(
232 raise error.ProgrammingError(
233 b'callcommand() cannot be used after close()'
233 b'callcommand() cannot be used after close()'
234 )
234 )
235
235
236 # We don't need to support anything fancy. Just call the named
236 # We don't need to support anything fancy. Just call the named
237 # method on the peer and return a resolved future.
237 # method on the peer and return a resolved future.
238 fn = getattr(self._peer, pycompat.sysstr(command))
238 fn = getattr(self._peer, pycompat.sysstr(command))
239
239
240 f = pycompat.futures.Future()
240 f = pycompat.futures.Future()
241
241
242 try:
242 try:
243 result = fn(**pycompat.strkwargs(args))
243 result = fn(**pycompat.strkwargs(args))
244 except Exception:
244 except Exception:
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
246 else:
246 else:
247 f.set_result(result)
247 f.set_result(result)
248
248
249 return f
249 return f
250
250
251 def sendcommands(self):
251 def sendcommands(self):
252 self._sent = True
252 self._sent = True
253
253
254 def close(self):
254 def close(self):
255 self._closed = True
255 self._closed = True
256
256
257
257
258 @interfaceutil.implementer(repository.ipeercommands)
258 @interfaceutil.implementer(repository.ipeercommands)
259 class localpeer(repository.peer):
259 class localpeer(repository.peer):
260 '''peer for a local repo; reflects only the most recent API'''
260 '''peer for a local repo; reflects only the most recent API'''
261
261
262 def __init__(self, repo, caps=None):
262 def __init__(self, repo, caps=None):
263 super(localpeer, self).__init__()
263 super(localpeer, self).__init__()
264
264
265 if caps is None:
265 if caps is None:
266 caps = moderncaps.copy()
266 caps = moderncaps.copy()
267 self._repo = repo.filtered(b'served')
267 self._repo = repo.filtered(b'served')
268 self.ui = repo.ui
268 self.ui = repo.ui
269 self._caps = repo._restrictcapabilities(caps)
269 self._caps = repo._restrictcapabilities(caps)
270
270
271 # Begin of _basepeer interface.
271 # Begin of _basepeer interface.
272
272
273 def url(self):
273 def url(self):
274 return self._repo.url()
274 return self._repo.url()
275
275
276 def local(self):
276 def local(self):
277 return self._repo
277 return self._repo
278
278
279 def peer(self):
279 def peer(self):
280 return self
280 return self
281
281
282 def canpush(self):
282 def canpush(self):
283 return True
283 return True
284
284
285 def close(self):
285 def close(self):
286 self._repo.close()
286 self._repo.close()
287
287
288 # End of _basepeer interface.
288 # End of _basepeer interface.
289
289
290 # Begin of _basewirecommands interface.
290 # Begin of _basewirecommands interface.
291
291
292 def branchmap(self):
292 def branchmap(self):
293 return self._repo.branchmap()
293 return self._repo.branchmap()
294
294
295 def capabilities(self):
295 def capabilities(self):
296 return self._caps
296 return self._caps
297
297
298 def clonebundles(self):
298 def clonebundles(self):
299 return self._repo.tryread(b'clonebundles.manifest')
299 return self._repo.tryread(b'clonebundles.manifest')
300
300
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
302 """Used to test argument passing over the wire"""
302 """Used to test argument passing over the wire"""
303 return b"%s %s %s %s %s" % (
303 return b"%s %s %s %s %s" % (
304 one,
304 one,
305 two,
305 two,
306 pycompat.bytestr(three),
306 pycompat.bytestr(three),
307 pycompat.bytestr(four),
307 pycompat.bytestr(four),
308 pycompat.bytestr(five),
308 pycompat.bytestr(five),
309 )
309 )
310
310
311 def getbundle(
311 def getbundle(
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
313 ):
313 ):
314 chunks = exchange.getbundlechunks(
314 chunks = exchange.getbundlechunks(
315 self._repo,
315 self._repo,
316 source,
316 source,
317 heads=heads,
317 heads=heads,
318 common=common,
318 common=common,
319 bundlecaps=bundlecaps,
319 bundlecaps=bundlecaps,
320 **kwargs
320 **kwargs
321 )[1]
321 )[1]
322 cb = util.chunkbuffer(chunks)
322 cb = util.chunkbuffer(chunks)
323
323
324 if exchange.bundle2requested(bundlecaps):
324 if exchange.bundle2requested(bundlecaps):
325 # When requesting a bundle2, getbundle returns a stream to make the
325 # When requesting a bundle2, getbundle returns a stream to make the
326 # wire level function happier. We need to build a proper object
326 # wire level function happier. We need to build a proper object
327 # from it in local peer.
327 # from it in local peer.
328 return bundle2.getunbundler(self.ui, cb)
328 return bundle2.getunbundler(self.ui, cb)
329 else:
329 else:
330 return changegroup.getunbundler(b'01', cb, None)
330 return changegroup.getunbundler(b'01', cb, None)
331
331
332 def heads(self):
332 def heads(self):
333 return self._repo.heads()
333 return self._repo.heads()
334
334
335 def known(self, nodes):
335 def known(self, nodes):
336 return self._repo.known(nodes)
336 return self._repo.known(nodes)
337
337
338 def listkeys(self, namespace):
338 def listkeys(self, namespace):
339 return self._repo.listkeys(namespace)
339 return self._repo.listkeys(namespace)
340
340
341 def lookup(self, key):
341 def lookup(self, key):
342 return self._repo.lookup(key)
342 return self._repo.lookup(key)
343
343
344 def pushkey(self, namespace, key, old, new):
344 def pushkey(self, namespace, key, old, new):
345 return self._repo.pushkey(namespace, key, old, new)
345 return self._repo.pushkey(namespace, key, old, new)
346
346
347 def stream_out(self):
347 def stream_out(self):
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
349
349
350 def unbundle(self, bundle, heads, url):
350 def unbundle(self, bundle, heads, url):
351 """apply a bundle on a repo
351 """apply a bundle on a repo
352
352
353 This function handles the repo locking itself."""
353 This function handles the repo locking itself."""
354 try:
354 try:
355 try:
355 try:
356 bundle = exchange.readbundle(self.ui, bundle, None)
356 bundle = exchange.readbundle(self.ui, bundle, None)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
358 if util.safehasattr(ret, b'getchunks'):
358 if util.safehasattr(ret, b'getchunks'):
359 # This is a bundle20 object, turn it into an unbundler.
359 # This is a bundle20 object, turn it into an unbundler.
360 # This little dance should be dropped eventually when the
360 # This little dance should be dropped eventually when the
361 # API is finally improved.
361 # API is finally improved.
362 stream = util.chunkbuffer(ret.getchunks())
362 stream = util.chunkbuffer(ret.getchunks())
363 ret = bundle2.getunbundler(self.ui, stream)
363 ret = bundle2.getunbundler(self.ui, stream)
364 return ret
364 return ret
365 except Exception as exc:
365 except Exception as exc:
366 # If the exception contains output salvaged from a bundle2
366 # If the exception contains output salvaged from a bundle2
367 # reply, we need to make sure it is printed before continuing
367 # reply, we need to make sure it is printed before continuing
368 # to fail. So we build a bundle2 with such output and consume
368 # to fail. So we build a bundle2 with such output and consume
369 # it directly.
369 # it directly.
370 #
370 #
371 # This is not very elegant but allows a "simple" solution for
371 # This is not very elegant but allows a "simple" solution for
372 # issue4594
372 # issue4594
373 output = getattr(exc, '_bundle2salvagedoutput', ())
373 output = getattr(exc, '_bundle2salvagedoutput', ())
374 if output:
374 if output:
375 bundler = bundle2.bundle20(self._repo.ui)
375 bundler = bundle2.bundle20(self._repo.ui)
376 for out in output:
376 for out in output:
377 bundler.addpart(out)
377 bundler.addpart(out)
378 stream = util.chunkbuffer(bundler.getchunks())
378 stream = util.chunkbuffer(bundler.getchunks())
379 b = bundle2.getunbundler(self.ui, stream)
379 b = bundle2.getunbundler(self.ui, stream)
380 bundle2.processbundle(self._repo, b)
380 bundle2.processbundle(self._repo, b)
381 raise
381 raise
382 except error.PushRaced as exc:
382 except error.PushRaced as exc:
383 raise error.ResponseError(
383 raise error.ResponseError(
384 _(b'push failed:'), stringutil.forcebytestr(exc)
384 _(b'push failed:'), stringutil.forcebytestr(exc)
385 )
385 )
386
386
387 # End of _basewirecommands interface.
387 # End of _basewirecommands interface.
388
388
389 # Begin of peer interface.
389 # Begin of peer interface.
390
390
391 def commandexecutor(self):
391 def commandexecutor(self):
392 return localcommandexecutor(self)
392 return localcommandexecutor(self)
393
393
394 # End of peer interface.
394 # End of peer interface.
395
395
396
396
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
398 class locallegacypeer(localpeer):
398 class locallegacypeer(localpeer):
399 '''peer extension which implements legacy methods too; used for tests with
399 '''peer extension which implements legacy methods too; used for tests with
400 restricted capabilities'''
400 restricted capabilities'''
401
401
402 def __init__(self, repo):
402 def __init__(self, repo):
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
404
404
405 # Begin of baselegacywirecommands interface.
405 # Begin of baselegacywirecommands interface.
406
406
407 def between(self, pairs):
407 def between(self, pairs):
408 return self._repo.between(pairs)
408 return self._repo.between(pairs)
409
409
410 def branches(self, nodes):
410 def branches(self, nodes):
411 return self._repo.branches(nodes)
411 return self._repo.branches(nodes)
412
412
413 def changegroup(self, nodes, source):
413 def changegroup(self, nodes, source):
414 outgoing = discovery.outgoing(
414 outgoing = discovery.outgoing(
415 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 self._repo, missingroots=nodes, missingheads=self._repo.heads()
416 )
416 )
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
418
418
419 def changegroupsubset(self, bases, heads, source):
419 def changegroupsubset(self, bases, heads, source):
420 outgoing = discovery.outgoing(
420 outgoing = discovery.outgoing(
421 self._repo, missingroots=bases, missingheads=heads
421 self._repo, missingroots=bases, missingheads=heads
422 )
422 )
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424
424
425 # End of baselegacywirecommands interface.
425 # End of baselegacywirecommands interface.
426
426
427
427
428 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # Increment the sub-version when the revlog v2 format changes to lock out old
429 # clients.
429 # clients.
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
431
431
432 # A repository with the sparserevlog feature will have delta chains that
432 # A repository with the sparserevlog feature will have delta chains that
433 # can spread over a larger span. Sparse reading cuts these large spans into
433 # can spread over a larger span. Sparse reading cuts these large spans into
434 # pieces, so that each piece isn't too big.
434 # pieces, so that each piece isn't too big.
435 # Without the sparserevlog capability, reading from the repository could use
435 # Without the sparserevlog capability, reading from the repository could use
436 # huge amounts of memory, because the whole span would be read at once,
436 # huge amounts of memory, because the whole span would be read at once,
437 # including all the intermediate revisions that aren't pertinent for the chain.
437 # including all the intermediate revisions that aren't pertinent for the chain.
438 # This is why once a repository has enabled sparse-read, it becomes required.
438 # This is why once a repository has enabled sparse-read, it becomes required.
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
440
440
441 # A repository with the sidedataflag requirement will allow to store extra
441 # A repository with the sidedataflag requirement will allow to store extra
442 # information for revision without altering their original hashes.
442 # information for revision without altering their original hashes.
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
444
444
445 # A repository with the the copies-sidedata-changeset requirement will store
445 # A repository with the the copies-sidedata-changeset requirement will store
446 # copies related information in changeset's sidedata.
446 # copies related information in changeset's sidedata.
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
448
448
449 # The repository use persistent nodemap for the changelog and the manifest.
449 # The repository use persistent nodemap for the changelog and the manifest.
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
451
451
452 # Functions receiving (ui, features) that extensions can register to impact
452 # Functions receiving (ui, features) that extensions can register to impact
453 # the ability to load repositories with custom requirements. Only
453 # the ability to load repositories with custom requirements. Only
454 # functions defined in loaded extensions are called.
454 # functions defined in loaded extensions are called.
455 #
455 #
456 # The function receives a set of requirement strings that the repository
456 # The function receives a set of requirement strings that the repository
457 # is capable of opening. Functions will typically add elements to the
457 # is capable of opening. Functions will typically add elements to the
458 # set to reflect that the extension knows how to handle that requirements.
458 # set to reflect that the extension knows how to handle that requirements.
459 featuresetupfuncs = set()
459 featuresetupfuncs = set()
460
460
461
461
462 def makelocalrepository(baseui, path, intents=None):
462 def makelocalrepository(baseui, path, intents=None):
463 """Create a local repository object.
463 """Create a local repository object.
464
464
465 Given arguments needed to construct a local repository, this function
465 Given arguments needed to construct a local repository, this function
466 performs various early repository loading functionality (such as
466 performs various early repository loading functionality (such as
467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
468 the repository can be opened, derives a type suitable for representing
468 the repository can be opened, derives a type suitable for representing
469 that repository, and returns an instance of it.
469 that repository, and returns an instance of it.
470
470
471 The returned object conforms to the ``repository.completelocalrepository``
471 The returned object conforms to the ``repository.completelocalrepository``
472 interface.
472 interface.
473
473
474 The repository type is derived by calling a series of factory functions
474 The repository type is derived by calling a series of factory functions
475 for each aspect/interface of the final repository. These are defined by
475 for each aspect/interface of the final repository. These are defined by
476 ``REPO_INTERFACES``.
476 ``REPO_INTERFACES``.
477
477
478 Each factory function is called to produce a type implementing a specific
478 Each factory function is called to produce a type implementing a specific
479 interface. The cumulative list of returned types will be combined into a
479 interface. The cumulative list of returned types will be combined into a
480 new type and that type will be instantiated to represent the local
480 new type and that type will be instantiated to represent the local
481 repository.
481 repository.
482
482
483 The factory functions each receive various state that may be consulted
483 The factory functions each receive various state that may be consulted
484 as part of deriving a type.
484 as part of deriving a type.
485
485
486 Extensions should wrap these factory functions to customize repository type
486 Extensions should wrap these factory functions to customize repository type
487 creation. Note that an extension's wrapped function may be called even if
487 creation. Note that an extension's wrapped function may be called even if
488 that extension is not loaded for the repo being constructed. Extensions
488 that extension is not loaded for the repo being constructed. Extensions
489 should check if their ``__name__`` appears in the
489 should check if their ``__name__`` appears in the
490 ``extensionmodulenames`` set passed to the factory function and no-op if
490 ``extensionmodulenames`` set passed to the factory function and no-op if
491 not.
491 not.
492 """
492 """
493 ui = baseui.copy()
493 ui = baseui.copy()
494 # Prevent copying repo configuration.
494 # Prevent copying repo configuration.
495 ui.copy = baseui.copy
495 ui.copy = baseui.copy
496
496
497 # Working directory VFS rooted at repository root.
497 # Working directory VFS rooted at repository root.
498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
499
499
500 # Main VFS for .hg/ directory.
500 # Main VFS for .hg/ directory.
501 hgpath = wdirvfs.join(b'.hg')
501 hgpath = wdirvfs.join(b'.hg')
502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
503
503
504 # The .hg/ path should exist and should be a directory. All other
504 # The .hg/ path should exist and should be a directory. All other
505 # cases are errors.
505 # cases are errors.
506 if not hgvfs.isdir():
506 if not hgvfs.isdir():
507 try:
507 try:
508 hgvfs.stat()
508 hgvfs.stat()
509 except OSError as e:
509 except OSError as e:
510 if e.errno != errno.ENOENT:
510 if e.errno != errno.ENOENT:
511 raise
511 raise
512 except ValueError as e:
512 except ValueError as e:
513 # Can be raised on Python 3.8 when path is invalid.
513 # Can be raised on Python 3.8 when path is invalid.
514 raise error.Abort(
514 raise error.Abort(
515 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
515 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
516 )
516 )
517
517
518 raise error.RepoError(_(b'repository %s not found') % path)
518 raise error.RepoError(_(b'repository %s not found') % path)
519
519
520 # .hg/requires file contains a newline-delimited list of
520 # .hg/requires file contains a newline-delimited list of
521 # features/capabilities the opener (us) must have in order to use
521 # features/capabilities the opener (us) must have in order to use
522 # the repository. This file was introduced in Mercurial 0.9.2,
522 # the repository. This file was introduced in Mercurial 0.9.2,
523 # which means very old repositories may not have one. We assume
523 # which means very old repositories may not have one. We assume
524 # a missing file translates to no requirements.
524 # a missing file translates to no requirements.
525 try:
525 try:
526 requirements = set(hgvfs.read(b'requires').splitlines())
526 requirements = set(hgvfs.read(b'requires').splitlines())
527 except IOError as e:
527 except IOError as e:
528 if e.errno != errno.ENOENT:
528 if e.errno != errno.ENOENT:
529 raise
529 raise
530 requirements = set()
530 requirements = set()
531
531
532 # The .hg/hgrc file may load extensions or contain config options
532 # The .hg/hgrc file may load extensions or contain config options
533 # that influence repository construction. Attempt to load it and
533 # that influence repository construction. Attempt to load it and
534 # process any new extensions that it may have pulled in.
534 # process any new extensions that it may have pulled in.
535 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
535 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
536 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
536 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
537 extensions.loadall(ui)
537 extensions.loadall(ui)
538 extensions.populateui(ui)
538 extensions.populateui(ui)
539
539
540 # Set of module names of extensions loaded for this repository.
540 # Set of module names of extensions loaded for this repository.
541 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
541 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
542
542
543 supportedrequirements = gathersupportedrequirements(ui)
543 supportedrequirements = gathersupportedrequirements(ui)
544
544
545 # We first validate the requirements are known.
545 # We first validate the requirements are known.
546 ensurerequirementsrecognized(requirements, supportedrequirements)
546 ensurerequirementsrecognized(requirements, supportedrequirements)
547
547
548 # Then we validate that the known set is reasonable to use together.
548 # Then we validate that the known set is reasonable to use together.
549 ensurerequirementscompatible(ui, requirements)
549 ensurerequirementscompatible(ui, requirements)
550
550
551 # TODO there are unhandled edge cases related to opening repositories with
551 # TODO there are unhandled edge cases related to opening repositories with
552 # shared storage. If storage is shared, we should also test for requirements
552 # shared storage. If storage is shared, we should also test for requirements
553 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
553 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
554 # that repo, as that repo may load extensions needed to open it. This is a
554 # that repo, as that repo may load extensions needed to open it. This is a
555 # bit complicated because we don't want the other hgrc to overwrite settings
555 # bit complicated because we don't want the other hgrc to overwrite settings
556 # in this hgrc.
556 # in this hgrc.
557 #
557 #
558 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
558 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
559 # file when sharing repos. But if a requirement is added after the share is
559 # file when sharing repos. But if a requirement is added after the share is
560 # performed, thereby introducing a new requirement for the opener, we may
560 # performed, thereby introducing a new requirement for the opener, we may
561 # will not see that and could encounter a run-time error interacting with
561 # will not see that and could encounter a run-time error interacting with
562 # that shared store since it has an unknown-to-us requirement.
562 # that shared store since it has an unknown-to-us requirement.
563
563
564 # At this point, we know we should be capable of opening the repository.
564 # At this point, we know we should be capable of opening the repository.
565 # Now get on with doing that.
565 # Now get on with doing that.
566
566
567 features = set()
567 features = set()
568
568
569 # The "store" part of the repository holds versioned data. How it is
569 # The "store" part of the repository holds versioned data. How it is
570 # accessed is determined by various requirements. The ``shared`` or
570 # accessed is determined by various requirements. The ``shared`` or
571 # ``relshared`` requirements indicate the store lives in the path contained
571 # ``relshared`` requirements indicate the store lives in the path contained
572 # in the ``.hg/sharedpath`` file. This is an absolute path for
572 # in the ``.hg/sharedpath`` file. This is an absolute path for
573 # ``shared`` and relative to ``.hg/`` for ``relshared``.
573 # ``shared`` and relative to ``.hg/`` for ``relshared``.
574 if b'shared' in requirements or b'relshared' in requirements:
574 if b'shared' in requirements or b'relshared' in requirements:
575 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
575 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
576 if b'relshared' in requirements:
576 if b'relshared' in requirements:
577 sharedpath = hgvfs.join(sharedpath)
577 sharedpath = hgvfs.join(sharedpath)
578
578
579 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
579 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
580
580
581 if not sharedvfs.exists():
581 if not sharedvfs.exists():
582 raise error.RepoError(
582 raise error.RepoError(
583 _(b'.hg/sharedpath points to nonexistent directory %s')
583 _(b'.hg/sharedpath points to nonexistent directory %s')
584 % sharedvfs.base
584 % sharedvfs.base
585 )
585 )
586
586
587 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
587 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
588
588
589 storebasepath = sharedvfs.base
589 storebasepath = sharedvfs.base
590 cachepath = sharedvfs.join(b'cache')
590 cachepath = sharedvfs.join(b'cache')
591 else:
591 else:
592 storebasepath = hgvfs.base
592 storebasepath = hgvfs.base
593 cachepath = hgvfs.join(b'cache')
593 cachepath = hgvfs.join(b'cache')
594 wcachepath = hgvfs.join(b'wcache')
594 wcachepath = hgvfs.join(b'wcache')
595
595
596 # The store has changed over time and the exact layout is dictated by
596 # The store has changed over time and the exact layout is dictated by
597 # requirements. The store interface abstracts differences across all
597 # requirements. The store interface abstracts differences across all
598 # of them.
598 # of them.
599 store = makestore(
599 store = makestore(
600 requirements,
600 requirements,
601 storebasepath,
601 storebasepath,
602 lambda base: vfsmod.vfs(base, cacheaudited=True),
602 lambda base: vfsmod.vfs(base, cacheaudited=True),
603 )
603 )
604 hgvfs.createmode = store.createmode
604 hgvfs.createmode = store.createmode
605
605
606 storevfs = store.vfs
606 storevfs = store.vfs
607 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
607 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
608
608
609 # The cache vfs is used to manage cache files.
609 # The cache vfs is used to manage cache files.
610 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
610 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
611 cachevfs.createmode = store.createmode
611 cachevfs.createmode = store.createmode
612 # The cache vfs is used to manage cache files related to the working copy
612 # The cache vfs is used to manage cache files related to the working copy
613 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
613 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
614 wcachevfs.createmode = store.createmode
614 wcachevfs.createmode = store.createmode
615
615
616 # Now resolve the type for the repository object. We do this by repeatedly
616 # Now resolve the type for the repository object. We do this by repeatedly
617 # calling a factory function to produces types for specific aspects of the
617 # calling a factory function to produces types for specific aspects of the
618 # repo's operation. The aggregate returned types are used as base classes
618 # repo's operation. The aggregate returned types are used as base classes
619 # for a dynamically-derived type, which will represent our new repository.
619 # for a dynamically-derived type, which will represent our new repository.
620
620
621 bases = []
621 bases = []
622 extrastate = {}
622 extrastate = {}
623
623
624 for iface, fn in REPO_INTERFACES:
624 for iface, fn in REPO_INTERFACES:
625 # We pass all potentially useful state to give extensions tons of
625 # We pass all potentially useful state to give extensions tons of
626 # flexibility.
626 # flexibility.
627 typ = fn()(
627 typ = fn()(
628 ui=ui,
628 ui=ui,
629 intents=intents,
629 intents=intents,
630 requirements=requirements,
630 requirements=requirements,
631 features=features,
631 features=features,
632 wdirvfs=wdirvfs,
632 wdirvfs=wdirvfs,
633 hgvfs=hgvfs,
633 hgvfs=hgvfs,
634 store=store,
634 store=store,
635 storevfs=storevfs,
635 storevfs=storevfs,
636 storeoptions=storevfs.options,
636 storeoptions=storevfs.options,
637 cachevfs=cachevfs,
637 cachevfs=cachevfs,
638 wcachevfs=wcachevfs,
638 wcachevfs=wcachevfs,
639 extensionmodulenames=extensionmodulenames,
639 extensionmodulenames=extensionmodulenames,
640 extrastate=extrastate,
640 extrastate=extrastate,
641 baseclasses=bases,
641 baseclasses=bases,
642 )
642 )
643
643
644 if not isinstance(typ, type):
644 if not isinstance(typ, type):
645 raise error.ProgrammingError(
645 raise error.ProgrammingError(
646 b'unable to construct type for %s' % iface
646 b'unable to construct type for %s' % iface
647 )
647 )
648
648
649 bases.append(typ)
649 bases.append(typ)
650
650
651 # type() allows you to use characters in type names that wouldn't be
651 # type() allows you to use characters in type names that wouldn't be
652 # recognized as Python symbols in source code. We abuse that to add
652 # recognized as Python symbols in source code. We abuse that to add
653 # rich information about our constructed repo.
653 # rich information about our constructed repo.
654 name = pycompat.sysstr(
654 name = pycompat.sysstr(
655 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
655 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
656 )
656 )
657
657
658 cls = type(name, tuple(bases), {})
658 cls = type(name, tuple(bases), {})
659
659
660 return cls(
660 return cls(
661 baseui=baseui,
661 baseui=baseui,
662 ui=ui,
662 ui=ui,
663 origroot=path,
663 origroot=path,
664 wdirvfs=wdirvfs,
664 wdirvfs=wdirvfs,
665 hgvfs=hgvfs,
665 hgvfs=hgvfs,
666 requirements=requirements,
666 requirements=requirements,
667 supportedrequirements=supportedrequirements,
667 supportedrequirements=supportedrequirements,
668 sharedpath=storebasepath,
668 sharedpath=storebasepath,
669 store=store,
669 store=store,
670 cachevfs=cachevfs,
670 cachevfs=cachevfs,
671 wcachevfs=wcachevfs,
671 wcachevfs=wcachevfs,
672 features=features,
672 features=features,
673 intents=intents,
673 intents=intents,
674 )
674 )
675
675
676
676
677 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
677 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
678 """Load hgrc files/content into a ui instance.
678 """Load hgrc files/content into a ui instance.
679
679
680 This is called during repository opening to load any additional
680 This is called during repository opening to load any additional
681 config files or settings relevant to the current repository.
681 config files or settings relevant to the current repository.
682
682
683 Returns a bool indicating whether any additional configs were loaded.
683 Returns a bool indicating whether any additional configs were loaded.
684
684
685 Extensions should monkeypatch this function to modify how per-repo
685 Extensions should monkeypatch this function to modify how per-repo
686 configs are loaded. For example, an extension may wish to pull in
686 configs are loaded. For example, an extension may wish to pull in
687 configs from alternate files or sources.
687 configs from alternate files or sources.
688 """
688 """
689 if not rcutil.use_repo_hgrc():
689 if not rcutil.use_repo_hgrc():
690 return False
690 return False
691 try:
691 try:
692 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
692 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
693 return True
693 return True
694 except IOError:
694 except IOError:
695 return False
695 return False
696
696
697
697
698 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
698 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
699 """Perform additional actions after .hg/hgrc is loaded.
699 """Perform additional actions after .hg/hgrc is loaded.
700
700
701 This function is called during repository loading immediately after
701 This function is called during repository loading immediately after
702 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
702 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
703
703
704 The function can be used to validate configs, automatically add
704 The function can be used to validate configs, automatically add
705 options (including extensions) based on requirements, etc.
705 options (including extensions) based on requirements, etc.
706 """
706 """
707
707
708 # Map of requirements to list of extensions to load automatically when
708 # Map of requirements to list of extensions to load automatically when
709 # requirement is present.
709 # requirement is present.
710 autoextensions = {
710 autoextensions = {
711 b'git': [b'git'],
711 b'git': [b'git'],
712 b'largefiles': [b'largefiles'],
712 b'largefiles': [b'largefiles'],
713 b'lfs': [b'lfs'],
713 b'lfs': [b'lfs'],
714 }
714 }
715
715
716 for requirement, names in sorted(autoextensions.items()):
716 for requirement, names in sorted(autoextensions.items()):
717 if requirement not in requirements:
717 if requirement not in requirements:
718 continue
718 continue
719
719
720 for name in names:
720 for name in names:
721 if not ui.hasconfig(b'extensions', name):
721 if not ui.hasconfig(b'extensions', name):
722 ui.setconfig(b'extensions', name, b'', source=b'autoload')
722 ui.setconfig(b'extensions', name, b'', source=b'autoload')
723
723
724
724
725 def gathersupportedrequirements(ui):
725 def gathersupportedrequirements(ui):
726 """Determine the complete set of recognized requirements."""
726 """Determine the complete set of recognized requirements."""
727 # Start with all requirements supported by this file.
727 # Start with all requirements supported by this file.
728 supported = set(localrepository._basesupported)
728 supported = set(localrepository._basesupported)
729
729
730 # Execute ``featuresetupfuncs`` entries if they belong to an extension
730 # Execute ``featuresetupfuncs`` entries if they belong to an extension
731 # relevant to this ui instance.
731 # relevant to this ui instance.
732 modules = {m.__name__ for n, m in extensions.extensions(ui)}
732 modules = {m.__name__ for n, m in extensions.extensions(ui)}
733
733
734 for fn in featuresetupfuncs:
734 for fn in featuresetupfuncs:
735 if fn.__module__ in modules:
735 if fn.__module__ in modules:
736 fn(ui, supported)
736 fn(ui, supported)
737
737
738 # Add derived requirements from registered compression engines.
738 # Add derived requirements from registered compression engines.
739 for name in util.compengines:
739 for name in util.compengines:
740 engine = util.compengines[name]
740 engine = util.compengines[name]
741 if engine.available() and engine.revlogheader():
741 if engine.available() and engine.revlogheader():
742 supported.add(b'exp-compression-%s' % name)
742 supported.add(b'exp-compression-%s' % name)
743 if engine.name() == b'zstd':
743 if engine.name() == b'zstd':
744 supported.add(b'revlog-compression-zstd')
744 supported.add(b'revlog-compression-zstd')
745
745
746 return supported
746 return supported
747
747
748
748
749 def ensurerequirementsrecognized(requirements, supported):
749 def ensurerequirementsrecognized(requirements, supported):
750 """Validate that a set of local requirements is recognized.
750 """Validate that a set of local requirements is recognized.
751
751
752 Receives a set of requirements. Raises an ``error.RepoError`` if there
752 Receives a set of requirements. Raises an ``error.RepoError`` if there
753 exists any requirement in that set that currently loaded code doesn't
753 exists any requirement in that set that currently loaded code doesn't
754 recognize.
754 recognize.
755
755
756 Returns a set of supported requirements.
756 Returns a set of supported requirements.
757 """
757 """
758 missing = set()
758 missing = set()
759
759
760 for requirement in requirements:
760 for requirement in requirements:
761 if requirement in supported:
761 if requirement in supported:
762 continue
762 continue
763
763
764 if not requirement or not requirement[0:1].isalnum():
764 if not requirement or not requirement[0:1].isalnum():
765 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
765 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
766
766
767 missing.add(requirement)
767 missing.add(requirement)
768
768
769 if missing:
769 if missing:
770 raise error.RequirementError(
770 raise error.RequirementError(
771 _(b'repository requires features unknown to this Mercurial: %s')
771 _(b'repository requires features unknown to this Mercurial: %s')
772 % b' '.join(sorted(missing)),
772 % b' '.join(sorted(missing)),
773 hint=_(
773 hint=_(
774 b'see https://mercurial-scm.org/wiki/MissingRequirement '
774 b'see https://mercurial-scm.org/wiki/MissingRequirement '
775 b'for more information'
775 b'for more information'
776 ),
776 ),
777 )
777 )
778
778
779
779
780 def ensurerequirementscompatible(ui, requirements):
780 def ensurerequirementscompatible(ui, requirements):
781 """Validates that a set of recognized requirements is mutually compatible.
781 """Validates that a set of recognized requirements is mutually compatible.
782
782
783 Some requirements may not be compatible with others or require
783 Some requirements may not be compatible with others or require
784 config options that aren't enabled. This function is called during
784 config options that aren't enabled. This function is called during
785 repository opening to ensure that the set of requirements needed
785 repository opening to ensure that the set of requirements needed
786 to open a repository is sane and compatible with config options.
786 to open a repository is sane and compatible with config options.
787
787
788 Extensions can monkeypatch this function to perform additional
788 Extensions can monkeypatch this function to perform additional
789 checking.
789 checking.
790
790
791 ``error.RepoError`` should be raised on failure.
791 ``error.RepoError`` should be raised on failure.
792 """
792 """
793 if b'exp-sparse' in requirements and not sparse.enabled:
793 if b'exp-sparse' in requirements and not sparse.enabled:
794 raise error.RepoError(
794 raise error.RepoError(
795 _(
795 _(
796 b'repository is using sparse feature but '
796 b'repository is using sparse feature but '
797 b'sparse is not enabled; enable the '
797 b'sparse is not enabled; enable the '
798 b'"sparse" extensions to access'
798 b'"sparse" extensions to access'
799 )
799 )
800 )
800 )
801
801
802
802
803 def makestore(requirements, path, vfstype):
803 def makestore(requirements, path, vfstype):
804 """Construct a storage object for a repository."""
804 """Construct a storage object for a repository."""
805 if b'store' in requirements:
805 if b'store' in requirements:
806 if b'fncache' in requirements:
806 if b'fncache' in requirements:
807 return storemod.fncachestore(
807 return storemod.fncachestore(
808 path, vfstype, b'dotencode' in requirements
808 path, vfstype, b'dotencode' in requirements
809 )
809 )
810
810
811 return storemod.encodedstore(path, vfstype)
811 return storemod.encodedstore(path, vfstype)
812
812
813 return storemod.basicstore(path, vfstype)
813 return storemod.basicstore(path, vfstype)
814
814
815
815
816 def resolvestorevfsoptions(ui, requirements, features):
816 def resolvestorevfsoptions(ui, requirements, features):
817 """Resolve the options to pass to the store vfs opener.
817 """Resolve the options to pass to the store vfs opener.
818
818
819 The returned dict is used to influence behavior of the storage layer.
819 The returned dict is used to influence behavior of the storage layer.
820 """
820 """
821 options = {}
821 options = {}
822
822
823 if b'treemanifest' in requirements:
823 if b'treemanifest' in requirements:
824 options[b'treemanifest'] = True
824 options[b'treemanifest'] = True
825
825
826 # experimental config: format.manifestcachesize
826 # experimental config: format.manifestcachesize
827 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
827 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
828 if manifestcachesize is not None:
828 if manifestcachesize is not None:
829 options[b'manifestcachesize'] = manifestcachesize
829 options[b'manifestcachesize'] = manifestcachesize
830
830
831 # In the absence of another requirement superseding a revlog-related
831 # In the absence of another requirement superseding a revlog-related
832 # requirement, we have to assume the repo is using revlog version 0.
832 # requirement, we have to assume the repo is using revlog version 0.
833 # This revlog format is super old and we don't bother trying to parse
833 # This revlog format is super old and we don't bother trying to parse
834 # opener options for it because those options wouldn't do anything
834 # opener options for it because those options wouldn't do anything
835 # meaningful on such old repos.
835 # meaningful on such old repos.
836 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
836 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
837 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
837 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
838 else: # explicitly mark repo as using revlogv0
838 else: # explicitly mark repo as using revlogv0
839 options[b'revlogv0'] = True
839 options[b'revlogv0'] = True
840
840
841 if COPIESSDC_REQUIREMENT in requirements:
841 if COPIESSDC_REQUIREMENT in requirements:
842 options[b'copies-storage'] = b'changeset-sidedata'
842 options[b'copies-storage'] = b'changeset-sidedata'
843 else:
843 else:
844 writecopiesto = ui.config(b'experimental', b'copies.write-to')
844 writecopiesto = ui.config(b'experimental', b'copies.write-to')
845 copiesextramode = (b'changeset-only', b'compatibility')
845 copiesextramode = (b'changeset-only', b'compatibility')
846 if writecopiesto in copiesextramode:
846 if writecopiesto in copiesextramode:
847 options[b'copies-storage'] = b'extra'
847 options[b'copies-storage'] = b'extra'
848
848
849 return options
849 return options
850
850
851
851
852 def resolverevlogstorevfsoptions(ui, requirements, features):
852 def resolverevlogstorevfsoptions(ui, requirements, features):
853 """Resolve opener options specific to revlogs."""
853 """Resolve opener options specific to revlogs."""
854
854
855 options = {}
855 options = {}
856 options[b'flagprocessors'] = {}
856 options[b'flagprocessors'] = {}
857
857
858 if b'revlogv1' in requirements:
858 if b'revlogv1' in requirements:
859 options[b'revlogv1'] = True
859 options[b'revlogv1'] = True
860 if REVLOGV2_REQUIREMENT in requirements:
860 if REVLOGV2_REQUIREMENT in requirements:
861 options[b'revlogv2'] = True
861 options[b'revlogv2'] = True
862
862
863 if b'generaldelta' in requirements:
863 if b'generaldelta' in requirements:
864 options[b'generaldelta'] = True
864 options[b'generaldelta'] = True
865
865
866 # experimental config: format.chunkcachesize
866 # experimental config: format.chunkcachesize
867 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
867 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
868 if chunkcachesize is not None:
868 if chunkcachesize is not None:
869 options[b'chunkcachesize'] = chunkcachesize
869 options[b'chunkcachesize'] = chunkcachesize
870
870
871 deltabothparents = ui.configbool(
871 deltabothparents = ui.configbool(
872 b'storage', b'revlog.optimize-delta-parent-choice'
872 b'storage', b'revlog.optimize-delta-parent-choice'
873 )
873 )
874 options[b'deltabothparents'] = deltabothparents
874 options[b'deltabothparents'] = deltabothparents
875
875
876 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
876 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
877 lazydeltabase = False
877 lazydeltabase = False
878 if lazydelta:
878 if lazydelta:
879 lazydeltabase = ui.configbool(
879 lazydeltabase = ui.configbool(
880 b'storage', b'revlog.reuse-external-delta-parent'
880 b'storage', b'revlog.reuse-external-delta-parent'
881 )
881 )
882 if lazydeltabase is None:
882 if lazydeltabase is None:
883 lazydeltabase = not scmutil.gddeltaconfig(ui)
883 lazydeltabase = not scmutil.gddeltaconfig(ui)
884 options[b'lazydelta'] = lazydelta
884 options[b'lazydelta'] = lazydelta
885 options[b'lazydeltabase'] = lazydeltabase
885 options[b'lazydeltabase'] = lazydeltabase
886
886
887 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
887 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
888 if 0 <= chainspan:
888 if 0 <= chainspan:
889 options[b'maxdeltachainspan'] = chainspan
889 options[b'maxdeltachainspan'] = chainspan
890
890
891 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
891 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
892 if mmapindexthreshold is not None:
892 if mmapindexthreshold is not None:
893 options[b'mmapindexthreshold'] = mmapindexthreshold
893 options[b'mmapindexthreshold'] = mmapindexthreshold
894
894
895 withsparseread = ui.configbool(b'experimental', b'sparse-read')
895 withsparseread = ui.configbool(b'experimental', b'sparse-read')
896 srdensitythres = float(
896 srdensitythres = float(
897 ui.config(b'experimental', b'sparse-read.density-threshold')
897 ui.config(b'experimental', b'sparse-read.density-threshold')
898 )
898 )
899 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
899 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
900 options[b'with-sparse-read'] = withsparseread
900 options[b'with-sparse-read'] = withsparseread
901 options[b'sparse-read-density-threshold'] = srdensitythres
901 options[b'sparse-read-density-threshold'] = srdensitythres
902 options[b'sparse-read-min-gap-size'] = srmingapsize
902 options[b'sparse-read-min-gap-size'] = srmingapsize
903
903
904 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
904 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
905 options[b'sparse-revlog'] = sparserevlog
905 options[b'sparse-revlog'] = sparserevlog
906 if sparserevlog:
906 if sparserevlog:
907 options[b'generaldelta'] = True
907 options[b'generaldelta'] = True
908
908
909 sidedata = SIDEDATA_REQUIREMENT in requirements
909 sidedata = SIDEDATA_REQUIREMENT in requirements
910 options[b'side-data'] = sidedata
910 options[b'side-data'] = sidedata
911
911
912 maxchainlen = None
912 maxchainlen = None
913 if sparserevlog:
913 if sparserevlog:
914 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
914 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
915 # experimental config: format.maxchainlen
915 # experimental config: format.maxchainlen
916 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
916 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
917 if maxchainlen is not None:
917 if maxchainlen is not None:
918 options[b'maxchainlen'] = maxchainlen
918 options[b'maxchainlen'] = maxchainlen
919
919
920 for r in requirements:
920 for r in requirements:
921 # we allow multiple compression engine requirement to co-exist because
921 # we allow multiple compression engine requirement to co-exist because
922 # strickly speaking, revlog seems to support mixed compression style.
922 # strickly speaking, revlog seems to support mixed compression style.
923 #
923 #
924 # The compression used for new entries will be "the last one"
924 # The compression used for new entries will be "the last one"
925 prefix = r.startswith
925 prefix = r.startswith
926 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
926 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
927 options[b'compengine'] = r.split(b'-', 2)[2]
927 options[b'compengine'] = r.split(b'-', 2)[2]
928
928
929 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
929 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
930 if options[b'zlib.level'] is not None:
930 if options[b'zlib.level'] is not None:
931 if not (0 <= options[b'zlib.level'] <= 9):
931 if not (0 <= options[b'zlib.level'] <= 9):
932 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
932 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
933 raise error.Abort(msg % options[b'zlib.level'])
933 raise error.Abort(msg % options[b'zlib.level'])
934 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
934 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
935 if options[b'zstd.level'] is not None:
935 if options[b'zstd.level'] is not None:
936 if not (0 <= options[b'zstd.level'] <= 22):
936 if not (0 <= options[b'zstd.level'] <= 22):
937 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
937 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
938 raise error.Abort(msg % options[b'zstd.level'])
938 raise error.Abort(msg % options[b'zstd.level'])
939
939
940 if repository.NARROW_REQUIREMENT in requirements:
940 if repository.NARROW_REQUIREMENT in requirements:
941 options[b'enableellipsis'] = True
941 options[b'enableellipsis'] = True
942
942
943 if ui.configbool(b'experimental', b'rust.index'):
943 if ui.configbool(b'experimental', b'rust.index'):
944 options[b'rust.index'] = True
944 options[b'rust.index'] = True
945 if NODEMAP_REQUIREMENT in requirements:
945 if NODEMAP_REQUIREMENT in requirements:
946 options[b'persistent-nodemap'] = True
946 options[b'persistent-nodemap'] = True
947 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
947 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
948 options[b'persistent-nodemap.mmap'] = True
948 options[b'persistent-nodemap.mmap'] = True
949 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
949 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
950 options[b'persistent-nodemap.mode'] = epnm
950 options[b'persistent-nodemap.mode'] = epnm
951 if ui.configbool(b'devel', b'persistent-nodemap'):
951 if ui.configbool(b'devel', b'persistent-nodemap'):
952 options[b'devel-force-nodemap'] = True
952 options[b'devel-force-nodemap'] = True
953
953
954 return options
954 return options
955
955
956
956
957 def makemain(**kwargs):
957 def makemain(**kwargs):
958 """Produce a type conforming to ``ilocalrepositorymain``."""
958 """Produce a type conforming to ``ilocalrepositorymain``."""
959 return localrepository
959 return localrepository
960
960
961
961
962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
963 class revlogfilestorage(object):
963 class revlogfilestorage(object):
964 """File storage when using revlogs."""
964 """File storage when using revlogs."""
965
965
966 def file(self, path):
966 def file(self, path):
967 if path[0] == b'/':
967 if path[0] == b'/':
968 path = path[1:]
968 path = path[1:]
969
969
970 return filelog.filelog(self.svfs, path)
970 return filelog.filelog(self.svfs, path)
971
971
972
972
973 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
973 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
974 class revlognarrowfilestorage(object):
974 class revlognarrowfilestorage(object):
975 """File storage when using revlogs and narrow files."""
975 """File storage when using revlogs and narrow files."""
976
976
977 def file(self, path):
977 def file(self, path):
978 if path[0] == b'/':
978 if path[0] == b'/':
979 path = path[1:]
979 path = path[1:]
980
980
981 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
981 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
982
982
983
983
984 def makefilestorage(requirements, features, **kwargs):
984 def makefilestorage(requirements, features, **kwargs):
985 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
985 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
986 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
986 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
987 features.add(repository.REPO_FEATURE_STREAM_CLONE)
987 features.add(repository.REPO_FEATURE_STREAM_CLONE)
988
988
989 if repository.NARROW_REQUIREMENT in requirements:
989 if repository.NARROW_REQUIREMENT in requirements:
990 return revlognarrowfilestorage
990 return revlognarrowfilestorage
991 else:
991 else:
992 return revlogfilestorage
992 return revlogfilestorage
993
993
994
994
995 # List of repository interfaces and factory functions for them. Each
995 # List of repository interfaces and factory functions for them. Each
996 # will be called in order during ``makelocalrepository()`` to iteratively
996 # will be called in order during ``makelocalrepository()`` to iteratively
997 # derive the final type for a local repository instance. We capture the
997 # derive the final type for a local repository instance. We capture the
998 # function as a lambda so we don't hold a reference and the module-level
998 # function as a lambda so we don't hold a reference and the module-level
999 # functions can be wrapped.
999 # functions can be wrapped.
1000 REPO_INTERFACES = [
1000 REPO_INTERFACES = [
1001 (repository.ilocalrepositorymain, lambda: makemain),
1001 (repository.ilocalrepositorymain, lambda: makemain),
1002 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1002 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1003 ]
1003 ]
1004
1004
1005
1005
1006 @interfaceutil.implementer(repository.ilocalrepositorymain)
1006 @interfaceutil.implementer(repository.ilocalrepositorymain)
1007 class localrepository(object):
1007 class localrepository(object):
1008 """Main class for representing local repositories.
1008 """Main class for representing local repositories.
1009
1009
1010 All local repositories are instances of this class.
1010 All local repositories are instances of this class.
1011
1011
1012 Constructed on its own, instances of this class are not usable as
1012 Constructed on its own, instances of this class are not usable as
1013 repository objects. To obtain a usable repository object, call
1013 repository objects. To obtain a usable repository object, call
1014 ``hg.repository()``, ``localrepo.instance()``, or
1014 ``hg.repository()``, ``localrepo.instance()``, or
1015 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1015 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1016 ``instance()`` adds support for creating new repositories.
1016 ``instance()`` adds support for creating new repositories.
1017 ``hg.repository()`` adds more extension integration, including calling
1017 ``hg.repository()`` adds more extension integration, including calling
1018 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1018 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1019 used.
1019 used.
1020 """
1020 """
1021
1021
1022 # obsolete experimental requirements:
1022 # obsolete experimental requirements:
1023 # - manifestv2: An experimental new manifest format that allowed
1023 # - manifestv2: An experimental new manifest format that allowed
1024 # for stem compression of long paths. Experiment ended up not
1024 # for stem compression of long paths. Experiment ended up not
1025 # being successful (repository sizes went up due to worse delta
1025 # being successful (repository sizes went up due to worse delta
1026 # chains), and the code was deleted in 4.6.
1026 # chains), and the code was deleted in 4.6.
1027 supportedformats = {
1027 supportedformats = {
1028 b'revlogv1',
1028 b'revlogv1',
1029 b'generaldelta',
1029 b'generaldelta',
1030 b'treemanifest',
1030 b'treemanifest',
1031 COPIESSDC_REQUIREMENT,
1031 COPIESSDC_REQUIREMENT,
1032 REVLOGV2_REQUIREMENT,
1032 REVLOGV2_REQUIREMENT,
1033 SIDEDATA_REQUIREMENT,
1033 SIDEDATA_REQUIREMENT,
1034 SPARSEREVLOG_REQUIREMENT,
1034 SPARSEREVLOG_REQUIREMENT,
1035 NODEMAP_REQUIREMENT,
1035 NODEMAP_REQUIREMENT,
1036 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1036 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1037 }
1037 }
1038 _basesupported = supportedformats | {
1038 _basesupported = supportedformats | {
1039 b'store',
1039 b'store',
1040 b'fncache',
1040 b'fncache',
1041 b'shared',
1041 b'shared',
1042 b'relshared',
1042 b'relshared',
1043 b'dotencode',
1043 b'dotencode',
1044 b'exp-sparse',
1044 b'exp-sparse',
1045 b'internal-phase',
1045 b'internal-phase',
1046 }
1046 }
1047
1047
1048 # list of prefix for file which can be written without 'wlock'
1048 # list of prefix for file which can be written without 'wlock'
1049 # Extensions should extend this list when needed
1049 # Extensions should extend this list when needed
1050 _wlockfreeprefix = {
1050 _wlockfreeprefix = {
1051 # We migh consider requiring 'wlock' for the next
1051 # We migh consider requiring 'wlock' for the next
1052 # two, but pretty much all the existing code assume
1052 # two, but pretty much all the existing code assume
1053 # wlock is not needed so we keep them excluded for
1053 # wlock is not needed so we keep them excluded for
1054 # now.
1054 # now.
1055 b'hgrc',
1055 b'hgrc',
1056 b'requires',
1056 b'requires',
1057 # XXX cache is a complicatged business someone
1057 # XXX cache is a complicatged business someone
1058 # should investigate this in depth at some point
1058 # should investigate this in depth at some point
1059 b'cache/',
1059 b'cache/',
1060 # XXX shouldn't be dirstate covered by the wlock?
1060 # XXX shouldn't be dirstate covered by the wlock?
1061 b'dirstate',
1061 b'dirstate',
1062 # XXX bisect was still a bit too messy at the time
1062 # XXX bisect was still a bit too messy at the time
1063 # this changeset was introduced. Someone should fix
1063 # this changeset was introduced. Someone should fix
1064 # the remainig bit and drop this line
1064 # the remainig bit and drop this line
1065 b'bisect.state',
1065 b'bisect.state',
1066 }
1066 }
1067
1067
1068 def __init__(
1068 def __init__(
1069 self,
1069 self,
1070 baseui,
1070 baseui,
1071 ui,
1071 ui,
1072 origroot,
1072 origroot,
1073 wdirvfs,
1073 wdirvfs,
1074 hgvfs,
1074 hgvfs,
1075 requirements,
1075 requirements,
1076 supportedrequirements,
1076 supportedrequirements,
1077 sharedpath,
1077 sharedpath,
1078 store,
1078 store,
1079 cachevfs,
1079 cachevfs,
1080 wcachevfs,
1080 wcachevfs,
1081 features,
1081 features,
1082 intents=None,
1082 intents=None,
1083 ):
1083 ):
1084 """Create a new local repository instance.
1084 """Create a new local repository instance.
1085
1085
1086 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1086 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1087 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1087 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1088 object.
1088 object.
1089
1089
1090 Arguments:
1090 Arguments:
1091
1091
1092 baseui
1092 baseui
1093 ``ui.ui`` instance that ``ui`` argument was based off of.
1093 ``ui.ui`` instance that ``ui`` argument was based off of.
1094
1094
1095 ui
1095 ui
1096 ``ui.ui`` instance for use by the repository.
1096 ``ui.ui`` instance for use by the repository.
1097
1097
1098 origroot
1098 origroot
1099 ``bytes`` path to working directory root of this repository.
1099 ``bytes`` path to working directory root of this repository.
1100
1100
1101 wdirvfs
1101 wdirvfs
1102 ``vfs.vfs`` rooted at the working directory.
1102 ``vfs.vfs`` rooted at the working directory.
1103
1103
1104 hgvfs
1104 hgvfs
1105 ``vfs.vfs`` rooted at .hg/
1105 ``vfs.vfs`` rooted at .hg/
1106
1106
1107 requirements
1107 requirements
1108 ``set`` of bytestrings representing repository opening requirements.
1108 ``set`` of bytestrings representing repository opening requirements.
1109
1109
1110 supportedrequirements
1110 supportedrequirements
1111 ``set`` of bytestrings representing repository requirements that we
1111 ``set`` of bytestrings representing repository requirements that we
1112 know how to open. May be a supetset of ``requirements``.
1112 know how to open. May be a supetset of ``requirements``.
1113
1113
1114 sharedpath
1114 sharedpath
1115 ``bytes`` Defining path to storage base directory. Points to a
1115 ``bytes`` Defining path to storage base directory. Points to a
1116 ``.hg/`` directory somewhere.
1116 ``.hg/`` directory somewhere.
1117
1117
1118 store
1118 store
1119 ``store.basicstore`` (or derived) instance providing access to
1119 ``store.basicstore`` (or derived) instance providing access to
1120 versioned storage.
1120 versioned storage.
1121
1121
1122 cachevfs
1122 cachevfs
1123 ``vfs.vfs`` used for cache files.
1123 ``vfs.vfs`` used for cache files.
1124
1124
1125 wcachevfs
1125 wcachevfs
1126 ``vfs.vfs`` used for cache files related to the working copy.
1126 ``vfs.vfs`` used for cache files related to the working copy.
1127
1127
1128 features
1128 features
1129 ``set`` of bytestrings defining features/capabilities of this
1129 ``set`` of bytestrings defining features/capabilities of this
1130 instance.
1130 instance.
1131
1131
1132 intents
1132 intents
1133 ``set`` of system strings indicating what this repo will be used
1133 ``set`` of system strings indicating what this repo will be used
1134 for.
1134 for.
1135 """
1135 """
1136 self.baseui = baseui
1136 self.baseui = baseui
1137 self.ui = ui
1137 self.ui = ui
1138 self.origroot = origroot
1138 self.origroot = origroot
1139 # vfs rooted at working directory.
1139 # vfs rooted at working directory.
1140 self.wvfs = wdirvfs
1140 self.wvfs = wdirvfs
1141 self.root = wdirvfs.base
1141 self.root = wdirvfs.base
1142 # vfs rooted at .hg/. Used to access most non-store paths.
1142 # vfs rooted at .hg/. Used to access most non-store paths.
1143 self.vfs = hgvfs
1143 self.vfs = hgvfs
1144 self.path = hgvfs.base
1144 self.path = hgvfs.base
1145 self.requirements = requirements
1145 self.requirements = requirements
1146 self.supported = supportedrequirements
1146 self.supported = supportedrequirements
1147 self.sharedpath = sharedpath
1147 self.sharedpath = sharedpath
1148 self.store = store
1148 self.store = store
1149 self.cachevfs = cachevfs
1149 self.cachevfs = cachevfs
1150 self.wcachevfs = wcachevfs
1150 self.wcachevfs = wcachevfs
1151 self.features = features
1151 self.features = features
1152
1152
1153 self.filtername = None
1153 self.filtername = None
1154
1154
1155 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1155 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1156 b'devel', b'check-locks'
1156 b'devel', b'check-locks'
1157 ):
1157 ):
1158 self.vfs.audit = self._getvfsward(self.vfs.audit)
1158 self.vfs.audit = self._getvfsward(self.vfs.audit)
1159 # A list of callback to shape the phase if no data were found.
1159 # A list of callback to shape the phase if no data were found.
1160 # Callback are in the form: func(repo, roots) --> processed root.
1160 # Callback are in the form: func(repo, roots) --> processed root.
1161 # This list it to be filled by extension during repo setup
1161 # This list it to be filled by extension during repo setup
1162 self._phasedefaults = []
1162 self._phasedefaults = []
1163
1163
1164 color.setup(self.ui)
1164 color.setup(self.ui)
1165
1165
1166 self.spath = self.store.path
1166 self.spath = self.store.path
1167 self.svfs = self.store.vfs
1167 self.svfs = self.store.vfs
1168 self.sjoin = self.store.join
1168 self.sjoin = self.store.join
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1170 b'devel', b'check-locks'
1170 b'devel', b'check-locks'
1171 ):
1171 ):
1172 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1172 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1173 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1173 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1174 else: # standard vfs
1174 else: # standard vfs
1175 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1175 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1176
1176
1177 self._dirstatevalidatewarned = False
1177 self._dirstatevalidatewarned = False
1178
1178
1179 self._branchcaches = branchmap.BranchMapCache()
1179 self._branchcaches = branchmap.BranchMapCache()
1180 self._revbranchcache = None
1180 self._revbranchcache = None
1181 self._filterpats = {}
1181 self._filterpats = {}
1182 self._datafilters = {}
1182 self._datafilters = {}
1183 self._transref = self._lockref = self._wlockref = None
1183 self._transref = self._lockref = self._wlockref = None
1184
1184
1185 # A cache for various files under .hg/ that tracks file changes,
1185 # A cache for various files under .hg/ that tracks file changes,
1186 # (used by the filecache decorator)
1186 # (used by the filecache decorator)
1187 #
1187 #
1188 # Maps a property name to its util.filecacheentry
1188 # Maps a property name to its util.filecacheentry
1189 self._filecache = {}
1189 self._filecache = {}
1190
1190
1191 # hold sets of revision to be filtered
1191 # hold sets of revision to be filtered
1192 # should be cleared when something might have changed the filter value:
1192 # should be cleared when something might have changed the filter value:
1193 # - new changesets,
1193 # - new changesets,
1194 # - phase change,
1194 # - phase change,
1195 # - new obsolescence marker,
1195 # - new obsolescence marker,
1196 # - working directory parent change,
1196 # - working directory parent change,
1197 # - bookmark changes
1197 # - bookmark changes
1198 self.filteredrevcache = {}
1198 self.filteredrevcache = {}
1199
1199
1200 # post-dirstate-status hooks
1200 # post-dirstate-status hooks
1201 self._postdsstatus = []
1201 self._postdsstatus = []
1202
1202
1203 # generic mapping between names and nodes
1203 # generic mapping between names and nodes
1204 self.names = namespaces.namespaces()
1204 self.names = namespaces.namespaces()
1205
1205
1206 # Key to signature value.
1206 # Key to signature value.
1207 self._sparsesignaturecache = {}
1207 self._sparsesignaturecache = {}
1208 # Signature to cached matcher instance.
1208 # Signature to cached matcher instance.
1209 self._sparsematchercache = {}
1209 self._sparsematchercache = {}
1210
1210
1211 self._extrafilterid = repoview.extrafilter(ui)
1211 self._extrafilterid = repoview.extrafilter(ui)
1212
1212
1213 self.filecopiesmode = None
1213 self.filecopiesmode = None
1214 if COPIESSDC_REQUIREMENT in self.requirements:
1214 if COPIESSDC_REQUIREMENT in self.requirements:
1215 self.filecopiesmode = b'changeset-sidedata'
1215 self.filecopiesmode = b'changeset-sidedata'
1216
1216
1217 def _getvfsward(self, origfunc):
1217 def _getvfsward(self, origfunc):
1218 """build a ward for self.vfs"""
1218 """build a ward for self.vfs"""
1219 rref = weakref.ref(self)
1219 rref = weakref.ref(self)
1220
1220
1221 def checkvfs(path, mode=None):
1221 def checkvfs(path, mode=None):
1222 ret = origfunc(path, mode=mode)
1222 ret = origfunc(path, mode=mode)
1223 repo = rref()
1223 repo = rref()
1224 if (
1224 if (
1225 repo is None
1225 repo is None
1226 or not util.safehasattr(repo, b'_wlockref')
1226 or not util.safehasattr(repo, b'_wlockref')
1227 or not util.safehasattr(repo, b'_lockref')
1227 or not util.safehasattr(repo, b'_lockref')
1228 ):
1228 ):
1229 return
1229 return
1230 if mode in (None, b'r', b'rb'):
1230 if mode in (None, b'r', b'rb'):
1231 return
1231 return
1232 if path.startswith(repo.path):
1232 if path.startswith(repo.path):
1233 # truncate name relative to the repository (.hg)
1233 # truncate name relative to the repository (.hg)
1234 path = path[len(repo.path) + 1 :]
1234 path = path[len(repo.path) + 1 :]
1235 if path.startswith(b'cache/'):
1235 if path.startswith(b'cache/'):
1236 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1236 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1237 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1237 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1238 # path prefixes covered by 'lock'
1238 # path prefixes covered by 'lock'
1239 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1239 vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
1240 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1240 if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
1241 if repo._currentlock(repo._lockref) is None:
1241 if repo._currentlock(repo._lockref) is None:
1242 repo.ui.develwarn(
1242 repo.ui.develwarn(
1243 b'write with no lock: "%s"' % path,
1243 b'write with no lock: "%s"' % path,
1244 stacklevel=3,
1244 stacklevel=3,
1245 config=b'check-locks',
1245 config=b'check-locks',
1246 )
1246 )
1247 elif repo._currentlock(repo._wlockref) is None:
1247 elif repo._currentlock(repo._wlockref) is None:
1248 # rest of vfs files are covered by 'wlock'
1248 # rest of vfs files are covered by 'wlock'
1249 #
1249 #
1250 # exclude special files
1250 # exclude special files
1251 for prefix in self._wlockfreeprefix:
1251 for prefix in self._wlockfreeprefix:
1252 if path.startswith(prefix):
1252 if path.startswith(prefix):
1253 return
1253 return
1254 repo.ui.develwarn(
1254 repo.ui.develwarn(
1255 b'write with no wlock: "%s"' % path,
1255 b'write with no wlock: "%s"' % path,
1256 stacklevel=3,
1256 stacklevel=3,
1257 config=b'check-locks',
1257 config=b'check-locks',
1258 )
1258 )
1259 return ret
1259 return ret
1260
1260
1261 return checkvfs
1261 return checkvfs
1262
1262
1263 def _getsvfsward(self, origfunc):
1263 def _getsvfsward(self, origfunc):
1264 """build a ward for self.svfs"""
1264 """build a ward for self.svfs"""
1265 rref = weakref.ref(self)
1265 rref = weakref.ref(self)
1266
1266
1267 def checksvfs(path, mode=None):
1267 def checksvfs(path, mode=None):
1268 ret = origfunc(path, mode=mode)
1268 ret = origfunc(path, mode=mode)
1269 repo = rref()
1269 repo = rref()
1270 if repo is None or not util.safehasattr(repo, b'_lockref'):
1270 if repo is None or not util.safehasattr(repo, b'_lockref'):
1271 return
1271 return
1272 if mode in (None, b'r', b'rb'):
1272 if mode in (None, b'r', b'rb'):
1273 return
1273 return
1274 if path.startswith(repo.sharedpath):
1274 if path.startswith(repo.sharedpath):
1275 # truncate name relative to the repository (.hg)
1275 # truncate name relative to the repository (.hg)
1276 path = path[len(repo.sharedpath) + 1 :]
1276 path = path[len(repo.sharedpath) + 1 :]
1277 if repo._currentlock(repo._lockref) is None:
1277 if repo._currentlock(repo._lockref) is None:
1278 repo.ui.develwarn(
1278 repo.ui.develwarn(
1279 b'write with no lock: "%s"' % path, stacklevel=4
1279 b'write with no lock: "%s"' % path, stacklevel=4
1280 )
1280 )
1281 return ret
1281 return ret
1282
1282
1283 return checksvfs
1283 return checksvfs
1284
1284
1285 def close(self):
1285 def close(self):
1286 self._writecaches()
1286 self._writecaches()
1287
1287
1288 def _writecaches(self):
1288 def _writecaches(self):
1289 if self._revbranchcache:
1289 if self._revbranchcache:
1290 self._revbranchcache.write()
1290 self._revbranchcache.write()
1291
1291
1292 def _restrictcapabilities(self, caps):
1292 def _restrictcapabilities(self, caps):
1293 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1293 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1294 caps = set(caps)
1294 caps = set(caps)
1295 capsblob = bundle2.encodecaps(
1295 capsblob = bundle2.encodecaps(
1296 bundle2.getrepocaps(self, role=b'client')
1296 bundle2.getrepocaps(self, role=b'client')
1297 )
1297 )
1298 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1298 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1299 return caps
1299 return caps
1300
1300
1301 def _writerequirements(self):
1301 def _writerequirements(self):
1302 scmutil.writerequires(self.vfs, self.requirements)
1302 scmutil.writerequires(self.vfs, self.requirements)
1303
1303
1304 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1304 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1305 # self -> auditor -> self._checknested -> self
1305 # self -> auditor -> self._checknested -> self
1306
1306
1307 @property
1307 @property
1308 def auditor(self):
1308 def auditor(self):
1309 # This is only used by context.workingctx.match in order to
1309 # This is only used by context.workingctx.match in order to
1310 # detect files in subrepos.
1310 # detect files in subrepos.
1311 return pathutil.pathauditor(self.root, callback=self._checknested)
1311 return pathutil.pathauditor(self.root, callback=self._checknested)
1312
1312
1313 @property
1313 @property
1314 def nofsauditor(self):
1314 def nofsauditor(self):
1315 # This is only used by context.basectx.match in order to detect
1315 # This is only used by context.basectx.match in order to detect
1316 # files in subrepos.
1316 # files in subrepos.
1317 return pathutil.pathauditor(
1317 return pathutil.pathauditor(
1318 self.root, callback=self._checknested, realfs=False, cached=True
1318 self.root, callback=self._checknested, realfs=False, cached=True
1319 )
1319 )
1320
1320
1321 def _checknested(self, path):
1321 def _checknested(self, path):
1322 """Determine if path is a legal nested repository."""
1322 """Determine if path is a legal nested repository."""
1323 if not path.startswith(self.root):
1323 if not path.startswith(self.root):
1324 return False
1324 return False
1325 subpath = path[len(self.root) + 1 :]
1325 subpath = path[len(self.root) + 1 :]
1326 normsubpath = util.pconvert(subpath)
1326 normsubpath = util.pconvert(subpath)
1327
1327
1328 # XXX: Checking against the current working copy is wrong in
1328 # XXX: Checking against the current working copy is wrong in
1329 # the sense that it can reject things like
1329 # the sense that it can reject things like
1330 #
1330 #
1331 # $ hg cat -r 10 sub/x.txt
1331 # $ hg cat -r 10 sub/x.txt
1332 #
1332 #
1333 # if sub/ is no longer a subrepository in the working copy
1333 # if sub/ is no longer a subrepository in the working copy
1334 # parent revision.
1334 # parent revision.
1335 #
1335 #
1336 # However, it can of course also allow things that would have
1336 # However, it can of course also allow things that would have
1337 # been rejected before, such as the above cat command if sub/
1337 # been rejected before, such as the above cat command if sub/
1338 # is a subrepository now, but was a normal directory before.
1338 # is a subrepository now, but was a normal directory before.
1339 # The old path auditor would have rejected by mistake since it
1339 # The old path auditor would have rejected by mistake since it
1340 # panics when it sees sub/.hg/.
1340 # panics when it sees sub/.hg/.
1341 #
1341 #
1342 # All in all, checking against the working copy seems sensible
1342 # All in all, checking against the working copy seems sensible
1343 # since we want to prevent access to nested repositories on
1343 # since we want to prevent access to nested repositories on
1344 # the filesystem *now*.
1344 # the filesystem *now*.
1345 ctx = self[None]
1345 ctx = self[None]
1346 parts = util.splitpath(subpath)
1346 parts = util.splitpath(subpath)
1347 while parts:
1347 while parts:
1348 prefix = b'/'.join(parts)
1348 prefix = b'/'.join(parts)
1349 if prefix in ctx.substate:
1349 if prefix in ctx.substate:
1350 if prefix == normsubpath:
1350 if prefix == normsubpath:
1351 return True
1351 return True
1352 else:
1352 else:
1353 sub = ctx.sub(prefix)
1353 sub = ctx.sub(prefix)
1354 return sub.checknested(subpath[len(prefix) + 1 :])
1354 return sub.checknested(subpath[len(prefix) + 1 :])
1355 else:
1355 else:
1356 parts.pop()
1356 parts.pop()
1357 return False
1357 return False
1358
1358
1359 def peer(self):
1359 def peer(self):
1360 return localpeer(self) # not cached to avoid reference cycle
1360 return localpeer(self) # not cached to avoid reference cycle
1361
1361
1362 def unfiltered(self):
1362 def unfiltered(self):
1363 """Return unfiltered version of the repository
1363 """Return unfiltered version of the repository
1364
1364
1365 Intended to be overwritten by filtered repo."""
1365 Intended to be overwritten by filtered repo."""
1366 return self
1366 return self
1367
1367
1368 def filtered(self, name, visibilityexceptions=None):
1368 def filtered(self, name, visibilityexceptions=None):
1369 """Return a filtered version of a repository
1369 """Return a filtered version of a repository
1370
1370
1371 The `name` parameter is the identifier of the requested view. This
1371 The `name` parameter is the identifier of the requested view. This
1372 will return a repoview object set "exactly" to the specified view.
1372 will return a repoview object set "exactly" to the specified view.
1373
1373
1374 This function does not apply recursive filtering to a repository. For
1374 This function does not apply recursive filtering to a repository. For
1375 example calling `repo.filtered("served")` will return a repoview using
1375 example calling `repo.filtered("served")` will return a repoview using
1376 the "served" view, regardless of the initial view used by `repo`.
1376 the "served" view, regardless of the initial view used by `repo`.
1377
1377
1378 In other word, there is always only one level of `repoview` "filtering".
1378 In other word, there is always only one level of `repoview` "filtering".
1379 """
1379 """
1380 if self._extrafilterid is not None and b'%' not in name:
1380 if self._extrafilterid is not None and b'%' not in name:
1381 name = name + b'%' + self._extrafilterid
1381 name = name + b'%' + self._extrafilterid
1382
1382
1383 cls = repoview.newtype(self.unfiltered().__class__)
1383 cls = repoview.newtype(self.unfiltered().__class__)
1384 return cls(self, name, visibilityexceptions)
1384 return cls(self, name, visibilityexceptions)
1385
1385
1386 @mixedrepostorecache(
1386 @mixedrepostorecache(
1387 (b'bookmarks', b'plain'),
1387 (b'bookmarks', b'plain'),
1388 (b'bookmarks.current', b'plain'),
1388 (b'bookmarks.current', b'plain'),
1389 (b'bookmarks', b''),
1389 (b'bookmarks', b''),
1390 (b'00changelog.i', b''),
1390 (b'00changelog.i', b''),
1391 )
1391 )
1392 def _bookmarks(self):
1392 def _bookmarks(self):
1393 # Since the multiple files involved in the transaction cannot be
1393 # Since the multiple files involved in the transaction cannot be
1394 # written atomically (with current repository format), there is a race
1394 # written atomically (with current repository format), there is a race
1395 # condition here.
1395 # condition here.
1396 #
1396 #
1397 # 1) changelog content A is read
1397 # 1) changelog content A is read
1398 # 2) outside transaction update changelog to content B
1398 # 2) outside transaction update changelog to content B
1399 # 3) outside transaction update bookmark file referring to content B
1399 # 3) outside transaction update bookmark file referring to content B
1400 # 4) bookmarks file content is read and filtered against changelog-A
1400 # 4) bookmarks file content is read and filtered against changelog-A
1401 #
1401 #
1402 # When this happens, bookmarks against nodes missing from A are dropped.
1402 # When this happens, bookmarks against nodes missing from A are dropped.
1403 #
1403 #
1404 # Having this happening during read is not great, but it become worse
1404 # Having this happening during read is not great, but it become worse
1405 # when this happen during write because the bookmarks to the "unknown"
1405 # when this happen during write because the bookmarks to the "unknown"
1406 # nodes will be dropped for good. However, writes happen within locks.
1406 # nodes will be dropped for good. However, writes happen within locks.
1407 # This locking makes it possible to have a race free consistent read.
1407 # This locking makes it possible to have a race free consistent read.
1408 # For this purpose data read from disc before locking are
1408 # For this purpose data read from disc before locking are
1409 # "invalidated" right after the locks are taken. This invalidations are
1409 # "invalidated" right after the locks are taken. This invalidations are
1410 # "light", the `filecache` mechanism keep the data in memory and will
1410 # "light", the `filecache` mechanism keep the data in memory and will
1411 # reuse them if the underlying files did not changed. Not parsing the
1411 # reuse them if the underlying files did not changed. Not parsing the
1412 # same data multiple times helps performances.
1412 # same data multiple times helps performances.
1413 #
1413 #
1414 # Unfortunately in the case describe above, the files tracked by the
1414 # Unfortunately in the case describe above, the files tracked by the
1415 # bookmarks file cache might not have changed, but the in-memory
1415 # bookmarks file cache might not have changed, but the in-memory
1416 # content is still "wrong" because we used an older changelog content
1416 # content is still "wrong" because we used an older changelog content
1417 # to process the on-disk data. So after locking, the changelog would be
1417 # to process the on-disk data. So after locking, the changelog would be
1418 # refreshed but `_bookmarks` would be preserved.
1418 # refreshed but `_bookmarks` would be preserved.
1419 # Adding `00changelog.i` to the list of tracked file is not
1419 # Adding `00changelog.i` to the list of tracked file is not
1420 # enough, because at the time we build the content for `_bookmarks` in
1420 # enough, because at the time we build the content for `_bookmarks` in
1421 # (4), the changelog file has already diverged from the content used
1421 # (4), the changelog file has already diverged from the content used
1422 # for loading `changelog` in (1)
1422 # for loading `changelog` in (1)
1423 #
1423 #
1424 # To prevent the issue, we force the changelog to be explicitly
1424 # To prevent the issue, we force the changelog to be explicitly
1425 # reloaded while computing `_bookmarks`. The data race can still happen
1425 # reloaded while computing `_bookmarks`. The data race can still happen
1426 # without the lock (with a narrower window), but it would no longer go
1426 # without the lock (with a narrower window), but it would no longer go
1427 # undetected during the lock time refresh.
1427 # undetected during the lock time refresh.
1428 #
1428 #
1429 # The new schedule is as follow
1429 # The new schedule is as follow
1430 #
1430 #
1431 # 1) filecache logic detect that `_bookmarks` needs to be computed
1431 # 1) filecache logic detect that `_bookmarks` needs to be computed
1432 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1432 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1433 # 3) We force `changelog` filecache to be tested
1433 # 3) We force `changelog` filecache to be tested
1434 # 4) cachestat for `changelog` are captured (for changelog)
1434 # 4) cachestat for `changelog` are captured (for changelog)
1435 # 5) `_bookmarks` is computed and cached
1435 # 5) `_bookmarks` is computed and cached
1436 #
1436 #
1437 # The step in (3) ensure we have a changelog at least as recent as the
1437 # The step in (3) ensure we have a changelog at least as recent as the
1438 # cache stat computed in (1). As a result at locking time:
1438 # cache stat computed in (1). As a result at locking time:
1439 # * if the changelog did not changed since (1) -> we can reuse the data
1439 # * if the changelog did not changed since (1) -> we can reuse the data
1440 # * otherwise -> the bookmarks get refreshed.
1440 # * otherwise -> the bookmarks get refreshed.
1441 self._refreshchangelog()
1441 self._refreshchangelog()
1442 return bookmarks.bmstore(self)
1442 return bookmarks.bmstore(self)
1443
1443
1444 def _refreshchangelog(self):
1444 def _refreshchangelog(self):
1445 """make sure the in memory changelog match the on-disk one"""
1445 """make sure the in memory changelog match the on-disk one"""
1446 if 'changelog' in vars(self) and self.currenttransaction() is None:
1446 if 'changelog' in vars(self) and self.currenttransaction() is None:
1447 del self.changelog
1447 del self.changelog
1448
1448
1449 @property
1449 @property
1450 def _activebookmark(self):
1450 def _activebookmark(self):
1451 return self._bookmarks.active
1451 return self._bookmarks.active
1452
1452
1453 # _phasesets depend on changelog. what we need is to call
1453 # _phasesets depend on changelog. what we need is to call
1454 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1454 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1455 # can't be easily expressed in filecache mechanism.
1455 # can't be easily expressed in filecache mechanism.
1456 @storecache(b'phaseroots', b'00changelog.i')
1456 @storecache(b'phaseroots', b'00changelog.i')
1457 def _phasecache(self):
1457 def _phasecache(self):
1458 return phases.phasecache(self, self._phasedefaults)
1458 return phases.phasecache(self, self._phasedefaults)
1459
1459
1460 @storecache(b'obsstore')
1460 @storecache(b'obsstore')
1461 def obsstore(self):
1461 def obsstore(self):
1462 return obsolete.makestore(self.ui, self)
1462 return obsolete.makestore(self.ui, self)
1463
1463
1464 @storecache(b'00changelog.i')
1464 @storecache(b'00changelog.i')
1465 def changelog(self):
1465 def changelog(self):
1466 # load dirstate before changelog to avoid race see issue6303
1466 # load dirstate before changelog to avoid race see issue6303
1467 self.dirstate.prefetch_parents()
1467 self.dirstate.prefetch_parents()
1468 return self.store.changelog(txnutil.mayhavepending(self.root))
1468 return self.store.changelog(txnutil.mayhavepending(self.root))
1469
1469
1470 @storecache(b'00manifest.i')
1470 @storecache(b'00manifest.i')
1471 def manifestlog(self):
1471 def manifestlog(self):
1472 return self.store.manifestlog(self, self._storenarrowmatch)
1472 return self.store.manifestlog(self, self._storenarrowmatch)
1473
1473
1474 @repofilecache(b'dirstate')
1474 @repofilecache(b'dirstate')
1475 def dirstate(self):
1475 def dirstate(self):
1476 return self._makedirstate()
1476 return self._makedirstate()
1477
1477
1478 def _makedirstate(self):
1478 def _makedirstate(self):
1479 """Extension point for wrapping the dirstate per-repo."""
1479 """Extension point for wrapping the dirstate per-repo."""
1480 sparsematchfn = lambda: sparse.matcher(self)
1480 sparsematchfn = lambda: sparse.matcher(self)
1481
1481
1482 return dirstate.dirstate(
1482 return dirstate.dirstate(
1483 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1483 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1484 )
1484 )
1485
1485
1486 def _dirstatevalidate(self, node):
1486 def _dirstatevalidate(self, node):
1487 try:
1487 try:
1488 self.changelog.rev(node)
1488 self.changelog.rev(node)
1489 return node
1489 return node
1490 except error.LookupError:
1490 except error.LookupError:
1491 if not self._dirstatevalidatewarned:
1491 if not self._dirstatevalidatewarned:
1492 self._dirstatevalidatewarned = True
1492 self._dirstatevalidatewarned = True
1493 self.ui.warn(
1493 self.ui.warn(
1494 _(b"warning: ignoring unknown working parent %s!\n")
1494 _(b"warning: ignoring unknown working parent %s!\n")
1495 % short(node)
1495 % short(node)
1496 )
1496 )
1497 return nullid
1497 return nullid
1498
1498
1499 @storecache(narrowspec.FILENAME)
1499 @storecache(narrowspec.FILENAME)
1500 def narrowpats(self):
1500 def narrowpats(self):
1501 """matcher patterns for this repository's narrowspec
1501 """matcher patterns for this repository's narrowspec
1502
1502
1503 A tuple of (includes, excludes).
1503 A tuple of (includes, excludes).
1504 """
1504 """
1505 return narrowspec.load(self)
1505 return narrowspec.load(self)
1506
1506
1507 @storecache(narrowspec.FILENAME)
1507 @storecache(narrowspec.FILENAME)
1508 def _storenarrowmatch(self):
1508 def _storenarrowmatch(self):
1509 if repository.NARROW_REQUIREMENT not in self.requirements:
1509 if repository.NARROW_REQUIREMENT not in self.requirements:
1510 return matchmod.always()
1510 return matchmod.always()
1511 include, exclude = self.narrowpats
1511 include, exclude = self.narrowpats
1512 return narrowspec.match(self.root, include=include, exclude=exclude)
1512 return narrowspec.match(self.root, include=include, exclude=exclude)
1513
1513
1514 @storecache(narrowspec.FILENAME)
1514 @storecache(narrowspec.FILENAME)
1515 def _narrowmatch(self):
1515 def _narrowmatch(self):
1516 if repository.NARROW_REQUIREMENT not in self.requirements:
1516 if repository.NARROW_REQUIREMENT not in self.requirements:
1517 return matchmod.always()
1517 return matchmod.always()
1518 narrowspec.checkworkingcopynarrowspec(self)
1518 narrowspec.checkworkingcopynarrowspec(self)
1519 include, exclude = self.narrowpats
1519 include, exclude = self.narrowpats
1520 return narrowspec.match(self.root, include=include, exclude=exclude)
1520 return narrowspec.match(self.root, include=include, exclude=exclude)
1521
1521
1522 def narrowmatch(self, match=None, includeexact=False):
1522 def narrowmatch(self, match=None, includeexact=False):
1523 """matcher corresponding the the repo's narrowspec
1523 """matcher corresponding the the repo's narrowspec
1524
1524
1525 If `match` is given, then that will be intersected with the narrow
1525 If `match` is given, then that will be intersected with the narrow
1526 matcher.
1526 matcher.
1527
1527
1528 If `includeexact` is True, then any exact matches from `match` will
1528 If `includeexact` is True, then any exact matches from `match` will
1529 be included even if they're outside the narrowspec.
1529 be included even if they're outside the narrowspec.
1530 """
1530 """
1531 if match:
1531 if match:
1532 if includeexact and not self._narrowmatch.always():
1532 if includeexact and not self._narrowmatch.always():
1533 # do not exclude explicitly-specified paths so that they can
1533 # do not exclude explicitly-specified paths so that they can
1534 # be warned later on
1534 # be warned later on
1535 em = matchmod.exact(match.files())
1535 em = matchmod.exact(match.files())
1536 nm = matchmod.unionmatcher([self._narrowmatch, em])
1536 nm = matchmod.unionmatcher([self._narrowmatch, em])
1537 return matchmod.intersectmatchers(match, nm)
1537 return matchmod.intersectmatchers(match, nm)
1538 return matchmod.intersectmatchers(match, self._narrowmatch)
1538 return matchmod.intersectmatchers(match, self._narrowmatch)
1539 return self._narrowmatch
1539 return self._narrowmatch
1540
1540
1541 def setnarrowpats(self, newincludes, newexcludes):
1541 def setnarrowpats(self, newincludes, newexcludes):
1542 narrowspec.save(self, newincludes, newexcludes)
1542 narrowspec.save(self, newincludes, newexcludes)
1543 self.invalidate(clearfilecache=True)
1543 self.invalidate(clearfilecache=True)
1544
1544
1545 @unfilteredpropertycache
1545 @unfilteredpropertycache
1546 def _quick_access_changeid_null(self):
1546 def _quick_access_changeid_null(self):
1547 return {
1547 return {
1548 b'null': (nullrev, nullid),
1548 b'null': (nullrev, nullid),
1549 nullrev: (nullrev, nullid),
1549 nullrev: (nullrev, nullid),
1550 nullid: (nullrev, nullid),
1550 nullid: (nullrev, nullid),
1551 }
1551 }
1552
1552
1553 @unfilteredpropertycache
1553 @unfilteredpropertycache
1554 def _quick_access_changeid_wc(self):
1554 def _quick_access_changeid_wc(self):
1555 # also fast path access to the working copy parents
1555 # also fast path access to the working copy parents
1556 # however, only do it for filter that ensure wc is visible.
1556 # however, only do it for filter that ensure wc is visible.
1557 quick = {}
1557 quick = {}
1558 cl = self.unfiltered().changelog
1558 cl = self.unfiltered().changelog
1559 for node in self.dirstate.parents():
1559 for node in self.dirstate.parents():
1560 if node == nullid:
1560 if node == nullid:
1561 continue
1561 continue
1562 rev = cl.index.get_rev(node)
1562 rev = cl.index.get_rev(node)
1563 if rev is None:
1563 if rev is None:
1564 # unknown working copy parent case:
1564 # unknown working copy parent case:
1565 #
1565 #
1566 # skip the fast path and let higher code deal with it
1566 # skip the fast path and let higher code deal with it
1567 continue
1567 continue
1568 pair = (rev, node)
1568 pair = (rev, node)
1569 quick[rev] = pair
1569 quick[rev] = pair
1570 quick[node] = pair
1570 quick[node] = pair
1571 # also add the parents of the parents
1571 # also add the parents of the parents
1572 for r in cl.parentrevs(rev):
1572 for r in cl.parentrevs(rev):
1573 if r == nullrev:
1573 if r == nullrev:
1574 continue
1574 continue
1575 n = cl.node(r)
1575 n = cl.node(r)
1576 pair = (r, n)
1576 pair = (r, n)
1577 quick[r] = pair
1577 quick[r] = pair
1578 quick[n] = pair
1578 quick[n] = pair
1579 p1node = self.dirstate.p1()
1579 p1node = self.dirstate.p1()
1580 if p1node != nullid:
1580 if p1node != nullid:
1581 quick[b'.'] = quick[p1node]
1581 quick[b'.'] = quick[p1node]
1582 return quick
1582 return quick
1583
1583
1584 @unfilteredmethod
1584 @unfilteredmethod
1585 def _quick_access_changeid_invalidate(self):
1585 def _quick_access_changeid_invalidate(self):
1586 if '_quick_access_changeid_wc' in vars(self):
1586 if '_quick_access_changeid_wc' in vars(self):
1587 del self.__dict__['_quick_access_changeid_wc']
1587 del self.__dict__['_quick_access_changeid_wc']
1588
1588
1589 @property
1589 @property
1590 def _quick_access_changeid(self):
1590 def _quick_access_changeid(self):
1591 """an helper dictionnary for __getitem__ calls
1591 """an helper dictionnary for __getitem__ calls
1592
1592
1593 This contains a list of symbol we can recognise right away without
1593 This contains a list of symbol we can recognise right away without
1594 further processing.
1594 further processing.
1595 """
1595 """
1596 mapping = self._quick_access_changeid_null
1596 mapping = self._quick_access_changeid_null
1597 if self.filtername in repoview.filter_has_wc:
1597 if self.filtername in repoview.filter_has_wc:
1598 mapping = mapping.copy()
1598 mapping = mapping.copy()
1599 mapping.update(self._quick_access_changeid_wc)
1599 mapping.update(self._quick_access_changeid_wc)
1600 return mapping
1600 return mapping
1601
1601
1602 def __getitem__(self, changeid):
1602 def __getitem__(self, changeid):
1603 # dealing with special cases
1603 # dealing with special cases
1604 if changeid is None:
1604 if changeid is None:
1605 return context.workingctx(self)
1605 return context.workingctx(self)
1606 if isinstance(changeid, context.basectx):
1606 if isinstance(changeid, context.basectx):
1607 return changeid
1607 return changeid
1608
1608
1609 # dealing with multiple revisions
1609 # dealing with multiple revisions
1610 if isinstance(changeid, slice):
1610 if isinstance(changeid, slice):
1611 # wdirrev isn't contiguous so the slice shouldn't include it
1611 # wdirrev isn't contiguous so the slice shouldn't include it
1612 return [
1612 return [
1613 self[i]
1613 self[i]
1614 for i in pycompat.xrange(*changeid.indices(len(self)))
1614 for i in pycompat.xrange(*changeid.indices(len(self)))
1615 if i not in self.changelog.filteredrevs
1615 if i not in self.changelog.filteredrevs
1616 ]
1616 ]
1617
1617
1618 # dealing with some special values
1618 # dealing with some special values
1619 quick_access = self._quick_access_changeid.get(changeid)
1619 quick_access = self._quick_access_changeid.get(changeid)
1620 if quick_access is not None:
1620 if quick_access is not None:
1621 rev, node = quick_access
1621 rev, node = quick_access
1622 return context.changectx(self, rev, node, maybe_filtered=False)
1622 return context.changectx(self, rev, node, maybe_filtered=False)
1623 if changeid == b'tip':
1623 if changeid == b'tip':
1624 node = self.changelog.tip()
1624 node = self.changelog.tip()
1625 rev = self.changelog.rev(node)
1625 rev = self.changelog.rev(node)
1626 return context.changectx(self, rev, node)
1626 return context.changectx(self, rev, node)
1627
1627
1628 # dealing with arbitrary values
1628 # dealing with arbitrary values
1629 try:
1629 try:
1630 if isinstance(changeid, int):
1630 if isinstance(changeid, int):
1631 node = self.changelog.node(changeid)
1631 node = self.changelog.node(changeid)
1632 rev = changeid
1632 rev = changeid
1633 elif changeid == b'.':
1633 elif changeid == b'.':
1634 # this is a hack to delay/avoid loading obsmarkers
1634 # this is a hack to delay/avoid loading obsmarkers
1635 # when we know that '.' won't be hidden
1635 # when we know that '.' won't be hidden
1636 node = self.dirstate.p1()
1636 node = self.dirstate.p1()
1637 rev = self.unfiltered().changelog.rev(node)
1637 rev = self.unfiltered().changelog.rev(node)
1638 elif len(changeid) == 20:
1638 elif len(changeid) == 20:
1639 try:
1639 try:
1640 node = changeid
1640 node = changeid
1641 rev = self.changelog.rev(changeid)
1641 rev = self.changelog.rev(changeid)
1642 except error.FilteredLookupError:
1642 except error.FilteredLookupError:
1643 changeid = hex(changeid) # for the error message
1643 changeid = hex(changeid) # for the error message
1644 raise
1644 raise
1645 except LookupError:
1645 except LookupError:
1646 # check if it might have come from damaged dirstate
1646 # check if it might have come from damaged dirstate
1647 #
1647 #
1648 # XXX we could avoid the unfiltered if we had a recognizable
1648 # XXX we could avoid the unfiltered if we had a recognizable
1649 # exception for filtered changeset access
1649 # exception for filtered changeset access
1650 if (
1650 if (
1651 self.local()
1651 self.local()
1652 and changeid in self.unfiltered().dirstate.parents()
1652 and changeid in self.unfiltered().dirstate.parents()
1653 ):
1653 ):
1654 msg = _(b"working directory has unknown parent '%s'!")
1654 msg = _(b"working directory has unknown parent '%s'!")
1655 raise error.Abort(msg % short(changeid))
1655 raise error.Abort(msg % short(changeid))
1656 changeid = hex(changeid) # for the error message
1656 changeid = hex(changeid) # for the error message
1657 raise
1657 raise
1658
1658
1659 elif len(changeid) == 40:
1659 elif len(changeid) == 40:
1660 node = bin(changeid)
1660 node = bin(changeid)
1661 rev = self.changelog.rev(node)
1661 rev = self.changelog.rev(node)
1662 else:
1662 else:
1663 raise error.ProgrammingError(
1663 raise error.ProgrammingError(
1664 b"unsupported changeid '%s' of type %s"
1664 b"unsupported changeid '%s' of type %s"
1665 % (changeid, pycompat.bytestr(type(changeid)))
1665 % (changeid, pycompat.bytestr(type(changeid)))
1666 )
1666 )
1667
1667
1668 return context.changectx(self, rev, node)
1668 return context.changectx(self, rev, node)
1669
1669
1670 except (error.FilteredIndexError, error.FilteredLookupError):
1670 except (error.FilteredIndexError, error.FilteredLookupError):
1671 raise error.FilteredRepoLookupError(
1671 raise error.FilteredRepoLookupError(
1672 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1672 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1673 )
1673 )
1674 except (IndexError, LookupError):
1674 except (IndexError, LookupError):
1675 raise error.RepoLookupError(
1675 raise error.RepoLookupError(
1676 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1676 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1677 )
1677 )
1678 except error.WdirUnsupported:
1678 except error.WdirUnsupported:
1679 return context.workingctx(self)
1679 return context.workingctx(self)
1680
1680
1681 def __contains__(self, changeid):
1681 def __contains__(self, changeid):
1682 """True if the given changeid exists
1682 """True if the given changeid exists
1683
1683
1684 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1684 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1685 specified.
1685 specified.
1686 """
1686 """
1687 try:
1687 try:
1688 self[changeid]
1688 self[changeid]
1689 return True
1689 return True
1690 except error.RepoLookupError:
1690 except error.RepoLookupError:
1691 return False
1691 return False
1692
1692
1693 def __nonzero__(self):
1693 def __nonzero__(self):
1694 return True
1694 return True
1695
1695
1696 __bool__ = __nonzero__
1696 __bool__ = __nonzero__
1697
1697
1698 def __len__(self):
1698 def __len__(self):
1699 # no need to pay the cost of repoview.changelog
1699 # no need to pay the cost of repoview.changelog
1700 unfi = self.unfiltered()
1700 unfi = self.unfiltered()
1701 return len(unfi.changelog)
1701 return len(unfi.changelog)
1702
1702
1703 def __iter__(self):
1703 def __iter__(self):
1704 return iter(self.changelog)
1704 return iter(self.changelog)
1705
1705
1706 def revs(self, expr, *args):
1706 def revs(self, expr, *args):
1707 '''Find revisions matching a revset.
1707 '''Find revisions matching a revset.
1708
1708
1709 The revset is specified as a string ``expr`` that may contain
1709 The revset is specified as a string ``expr`` that may contain
1710 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1710 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1711
1711
1712 Revset aliases from the configuration are not expanded. To expand
1712 Revset aliases from the configuration are not expanded. To expand
1713 user aliases, consider calling ``scmutil.revrange()`` or
1713 user aliases, consider calling ``scmutil.revrange()`` or
1714 ``repo.anyrevs([expr], user=True)``.
1714 ``repo.anyrevs([expr], user=True)``.
1715
1715
1716 Returns a smartset.abstractsmartset, which is a list-like interface
1716 Returns a smartset.abstractsmartset, which is a list-like interface
1717 that contains integer revisions.
1717 that contains integer revisions.
1718 '''
1718 '''
1719 tree = revsetlang.spectree(expr, *args)
1719 tree = revsetlang.spectree(expr, *args)
1720 return revset.makematcher(tree)(self)
1720 return revset.makematcher(tree)(self)
1721
1721
1722 def set(self, expr, *args):
1722 def set(self, expr, *args):
1723 '''Find revisions matching a revset and emit changectx instances.
1723 '''Find revisions matching a revset and emit changectx instances.
1724
1724
1725 This is a convenience wrapper around ``revs()`` that iterates the
1725 This is a convenience wrapper around ``revs()`` that iterates the
1726 result and is a generator of changectx instances.
1726 result and is a generator of changectx instances.
1727
1727
1728 Revset aliases from the configuration are not expanded. To expand
1728 Revset aliases from the configuration are not expanded. To expand
1729 user aliases, consider calling ``scmutil.revrange()``.
1729 user aliases, consider calling ``scmutil.revrange()``.
1730 '''
1730 '''
1731 for r in self.revs(expr, *args):
1731 for r in self.revs(expr, *args):
1732 yield self[r]
1732 yield self[r]
1733
1733
1734 def anyrevs(self, specs, user=False, localalias=None):
1734 def anyrevs(self, specs, user=False, localalias=None):
1735 '''Find revisions matching one of the given revsets.
1735 '''Find revisions matching one of the given revsets.
1736
1736
1737 Revset aliases from the configuration are not expanded by default. To
1737 Revset aliases from the configuration are not expanded by default. To
1738 expand user aliases, specify ``user=True``. To provide some local
1738 expand user aliases, specify ``user=True``. To provide some local
1739 definitions overriding user aliases, set ``localalias`` to
1739 definitions overriding user aliases, set ``localalias`` to
1740 ``{name: definitionstring}``.
1740 ``{name: definitionstring}``.
1741 '''
1741 '''
1742 if specs == [b'null']:
1742 if specs == [b'null']:
1743 return revset.baseset([nullrev])
1743 return revset.baseset([nullrev])
1744 if specs == [b'.']:
1744 if specs == [b'.']:
1745 quick_data = self._quick_access_changeid.get(b'.')
1745 quick_data = self._quick_access_changeid.get(b'.')
1746 if quick_data is not None:
1746 if quick_data is not None:
1747 return revset.baseset([quick_data[0]])
1747 return revset.baseset([quick_data[0]])
1748 if user:
1748 if user:
1749 m = revset.matchany(
1749 m = revset.matchany(
1750 self.ui,
1750 self.ui,
1751 specs,
1751 specs,
1752 lookup=revset.lookupfn(self),
1752 lookup=revset.lookupfn(self),
1753 localalias=localalias,
1753 localalias=localalias,
1754 )
1754 )
1755 else:
1755 else:
1756 m = revset.matchany(None, specs, localalias=localalias)
1756 m = revset.matchany(None, specs, localalias=localalias)
1757 return m(self)
1757 return m(self)
1758
1758
1759 def url(self):
1759 def url(self):
1760 return b'file:' + self.root
1760 return b'file:' + self.root
1761
1761
1762 def hook(self, name, throw=False, **args):
1762 def hook(self, name, throw=False, **args):
1763 """Call a hook, passing this repo instance.
1763 """Call a hook, passing this repo instance.
1764
1764
1765 This a convenience method to aid invoking hooks. Extensions likely
1765 This a convenience method to aid invoking hooks. Extensions likely
1766 won't call this unless they have registered a custom hook or are
1766 won't call this unless they have registered a custom hook or are
1767 replacing code that is expected to call a hook.
1767 replacing code that is expected to call a hook.
1768 """
1768 """
1769 return hook.hook(self.ui, self, name, throw, **args)
1769 return hook.hook(self.ui, self, name, throw, **args)
1770
1770
1771 @filteredpropertycache
1771 @filteredpropertycache
1772 def _tagscache(self):
1772 def _tagscache(self):
1773 '''Returns a tagscache object that contains various tags related
1773 '''Returns a tagscache object that contains various tags related
1774 caches.'''
1774 caches.'''
1775
1775
1776 # This simplifies its cache management by having one decorated
1776 # This simplifies its cache management by having one decorated
1777 # function (this one) and the rest simply fetch things from it.
1777 # function (this one) and the rest simply fetch things from it.
1778 class tagscache(object):
1778 class tagscache(object):
1779 def __init__(self):
1779 def __init__(self):
1780 # These two define the set of tags for this repository. tags
1780 # These two define the set of tags for this repository. tags
1781 # maps tag name to node; tagtypes maps tag name to 'global' or
1781 # maps tag name to node; tagtypes maps tag name to 'global' or
1782 # 'local'. (Global tags are defined by .hgtags across all
1782 # 'local'. (Global tags are defined by .hgtags across all
1783 # heads, and local tags are defined in .hg/localtags.)
1783 # heads, and local tags are defined in .hg/localtags.)
1784 # They constitute the in-memory cache of tags.
1784 # They constitute the in-memory cache of tags.
1785 self.tags = self.tagtypes = None
1785 self.tags = self.tagtypes = None
1786
1786
1787 self.nodetagscache = self.tagslist = None
1787 self.nodetagscache = self.tagslist = None
1788
1788
1789 cache = tagscache()
1789 cache = tagscache()
1790 cache.tags, cache.tagtypes = self._findtags()
1790 cache.tags, cache.tagtypes = self._findtags()
1791
1791
1792 return cache
1792 return cache
1793
1793
1794 def tags(self):
1794 def tags(self):
1795 '''return a mapping of tag to node'''
1795 '''return a mapping of tag to node'''
1796 t = {}
1796 t = {}
1797 if self.changelog.filteredrevs:
1797 if self.changelog.filteredrevs:
1798 tags, tt = self._findtags()
1798 tags, tt = self._findtags()
1799 else:
1799 else:
1800 tags = self._tagscache.tags
1800 tags = self._tagscache.tags
1801 rev = self.changelog.rev
1801 rev = self.changelog.rev
1802 for k, v in pycompat.iteritems(tags):
1802 for k, v in pycompat.iteritems(tags):
1803 try:
1803 try:
1804 # ignore tags to unknown nodes
1804 # ignore tags to unknown nodes
1805 rev(v)
1805 rev(v)
1806 t[k] = v
1806 t[k] = v
1807 except (error.LookupError, ValueError):
1807 except (error.LookupError, ValueError):
1808 pass
1808 pass
1809 return t
1809 return t
1810
1810
1811 def _findtags(self):
1811 def _findtags(self):
1812 '''Do the hard work of finding tags. Return a pair of dicts
1812 '''Do the hard work of finding tags. Return a pair of dicts
1813 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1813 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1814 maps tag name to a string like \'global\' or \'local\'.
1814 maps tag name to a string like \'global\' or \'local\'.
1815 Subclasses or extensions are free to add their own tags, but
1815 Subclasses or extensions are free to add their own tags, but
1816 should be aware that the returned dicts will be retained for the
1816 should be aware that the returned dicts will be retained for the
1817 duration of the localrepo object.'''
1817 duration of the localrepo object.'''
1818
1818
1819 # XXX what tagtype should subclasses/extensions use? Currently
1819 # XXX what tagtype should subclasses/extensions use? Currently
1820 # mq and bookmarks add tags, but do not set the tagtype at all.
1820 # mq and bookmarks add tags, but do not set the tagtype at all.
1821 # Should each extension invent its own tag type? Should there
1821 # Should each extension invent its own tag type? Should there
1822 # be one tagtype for all such "virtual" tags? Or is the status
1822 # be one tagtype for all such "virtual" tags? Or is the status
1823 # quo fine?
1823 # quo fine?
1824
1824
1825 # map tag name to (node, hist)
1825 # map tag name to (node, hist)
1826 alltags = tagsmod.findglobaltags(self.ui, self)
1826 alltags = tagsmod.findglobaltags(self.ui, self)
1827 # map tag name to tag type
1827 # map tag name to tag type
1828 tagtypes = {tag: b'global' for tag in alltags}
1828 tagtypes = {tag: b'global' for tag in alltags}
1829
1829
1830 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1830 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1831
1831
1832 # Build the return dicts. Have to re-encode tag names because
1832 # Build the return dicts. Have to re-encode tag names because
1833 # the tags module always uses UTF-8 (in order not to lose info
1833 # the tags module always uses UTF-8 (in order not to lose info
1834 # writing to the cache), but the rest of Mercurial wants them in
1834 # writing to the cache), but the rest of Mercurial wants them in
1835 # local encoding.
1835 # local encoding.
1836 tags = {}
1836 tags = {}
1837 for (name, (node, hist)) in pycompat.iteritems(alltags):
1837 for (name, (node, hist)) in pycompat.iteritems(alltags):
1838 if node != nullid:
1838 if node != nullid:
1839 tags[encoding.tolocal(name)] = node
1839 tags[encoding.tolocal(name)] = node
1840 tags[b'tip'] = self.changelog.tip()
1840 tags[b'tip'] = self.changelog.tip()
1841 tagtypes = {
1841 tagtypes = {
1842 encoding.tolocal(name): value
1842 encoding.tolocal(name): value
1843 for (name, value) in pycompat.iteritems(tagtypes)
1843 for (name, value) in pycompat.iteritems(tagtypes)
1844 }
1844 }
1845 return (tags, tagtypes)
1845 return (tags, tagtypes)
1846
1846
1847 def tagtype(self, tagname):
1847 def tagtype(self, tagname):
1848 '''
1848 '''
1849 return the type of the given tag. result can be:
1849 return the type of the given tag. result can be:
1850
1850
1851 'local' : a local tag
1851 'local' : a local tag
1852 'global' : a global tag
1852 'global' : a global tag
1853 None : tag does not exist
1853 None : tag does not exist
1854 '''
1854 '''
1855
1855
1856 return self._tagscache.tagtypes.get(tagname)
1856 return self._tagscache.tagtypes.get(tagname)
1857
1857
1858 def tagslist(self):
1858 def tagslist(self):
1859 '''return a list of tags ordered by revision'''
1859 '''return a list of tags ordered by revision'''
1860 if not self._tagscache.tagslist:
1860 if not self._tagscache.tagslist:
1861 l = []
1861 l = []
1862 for t, n in pycompat.iteritems(self.tags()):
1862 for t, n in pycompat.iteritems(self.tags()):
1863 l.append((self.changelog.rev(n), t, n))
1863 l.append((self.changelog.rev(n), t, n))
1864 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1864 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1865
1865
1866 return self._tagscache.tagslist
1866 return self._tagscache.tagslist
1867
1867
1868 def nodetags(self, node):
1868 def nodetags(self, node):
1869 '''return the tags associated with a node'''
1869 '''return the tags associated with a node'''
1870 if not self._tagscache.nodetagscache:
1870 if not self._tagscache.nodetagscache:
1871 nodetagscache = {}
1871 nodetagscache = {}
1872 for t, n in pycompat.iteritems(self._tagscache.tags):
1872 for t, n in pycompat.iteritems(self._tagscache.tags):
1873 nodetagscache.setdefault(n, []).append(t)
1873 nodetagscache.setdefault(n, []).append(t)
1874 for tags in pycompat.itervalues(nodetagscache):
1874 for tags in pycompat.itervalues(nodetagscache):
1875 tags.sort()
1875 tags.sort()
1876 self._tagscache.nodetagscache = nodetagscache
1876 self._tagscache.nodetagscache = nodetagscache
1877 return self._tagscache.nodetagscache.get(node, [])
1877 return self._tagscache.nodetagscache.get(node, [])
1878
1878
1879 def nodebookmarks(self, node):
1879 def nodebookmarks(self, node):
1880 """return the list of bookmarks pointing to the specified node"""
1880 """return the list of bookmarks pointing to the specified node"""
1881 return self._bookmarks.names(node)
1881 return self._bookmarks.names(node)
1882
1882
1883 def branchmap(self):
1883 def branchmap(self):
1884 '''returns a dictionary {branch: [branchheads]} with branchheads
1884 '''returns a dictionary {branch: [branchheads]} with branchheads
1885 ordered by increasing revision number'''
1885 ordered by increasing revision number'''
1886 return self._branchcaches[self]
1886 return self._branchcaches[self]
1887
1887
1888 @unfilteredmethod
1888 @unfilteredmethod
1889 def revbranchcache(self):
1889 def revbranchcache(self):
1890 if not self._revbranchcache:
1890 if not self._revbranchcache:
1891 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1891 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1892 return self._revbranchcache
1892 return self._revbranchcache
1893
1893
1894 def branchtip(self, branch, ignoremissing=False):
1894 def branchtip(self, branch, ignoremissing=False):
1895 '''return the tip node for a given branch
1895 '''return the tip node for a given branch
1896
1896
1897 If ignoremissing is True, then this method will not raise an error.
1897 If ignoremissing is True, then this method will not raise an error.
1898 This is helpful for callers that only expect None for a missing branch
1898 This is helpful for callers that only expect None for a missing branch
1899 (e.g. namespace).
1899 (e.g. namespace).
1900
1900
1901 '''
1901 '''
1902 try:
1902 try:
1903 return self.branchmap().branchtip(branch)
1903 return self.branchmap().branchtip(branch)
1904 except KeyError:
1904 except KeyError:
1905 if not ignoremissing:
1905 if not ignoremissing:
1906 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1906 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1907 else:
1907 else:
1908 pass
1908 pass
1909
1909
1910 def lookup(self, key):
1910 def lookup(self, key):
1911 node = scmutil.revsymbol(self, key).node()
1911 node = scmutil.revsymbol(self, key).node()
1912 if node is None:
1912 if node is None:
1913 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1913 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1914 return node
1914 return node
1915
1915
1916 def lookupbranch(self, key):
1916 def lookupbranch(self, key):
1917 if self.branchmap().hasbranch(key):
1917 if self.branchmap().hasbranch(key):
1918 return key
1918 return key
1919
1919
1920 return scmutil.revsymbol(self, key).branch()
1920 return scmutil.revsymbol(self, key).branch()
1921
1921
1922 def known(self, nodes):
1922 def known(self, nodes):
1923 cl = self.changelog
1923 cl = self.changelog
1924 get_rev = cl.index.get_rev
1924 get_rev = cl.index.get_rev
1925 filtered = cl.filteredrevs
1925 filtered = cl.filteredrevs
1926 result = []
1926 result = []
1927 for n in nodes:
1927 for n in nodes:
1928 r = get_rev(n)
1928 r = get_rev(n)
1929 resp = not (r is None or r in filtered)
1929 resp = not (r is None or r in filtered)
1930 result.append(resp)
1930 result.append(resp)
1931 return result
1931 return result
1932
1932
1933 def local(self):
1933 def local(self):
1934 return self
1934 return self
1935
1935
1936 def publishing(self):
1936 def publishing(self):
1937 # it's safe (and desirable) to trust the publish flag unconditionally
1937 # it's safe (and desirable) to trust the publish flag unconditionally
1938 # so that we don't finalize changes shared between users via ssh or nfs
1938 # so that we don't finalize changes shared between users via ssh or nfs
1939 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1939 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1940
1940
1941 def cancopy(self):
1941 def cancopy(self):
1942 # so statichttprepo's override of local() works
1942 # so statichttprepo's override of local() works
1943 if not self.local():
1943 if not self.local():
1944 return False
1944 return False
1945 if not self.publishing():
1945 if not self.publishing():
1946 return True
1946 return True
1947 # if publishing we can't copy if there is filtered content
1947 # if publishing we can't copy if there is filtered content
1948 return not self.filtered(b'visible').changelog.filteredrevs
1948 return not self.filtered(b'visible').changelog.filteredrevs
1949
1949
1950 def shared(self):
1950 def shared(self):
1951 '''the type of shared repository (None if not shared)'''
1951 '''the type of shared repository (None if not shared)'''
1952 if self.sharedpath != self.path:
1952 if self.sharedpath != self.path:
1953 return b'store'
1953 return b'store'
1954 return None
1954 return None
1955
1955
1956 def wjoin(self, f, *insidef):
1956 def wjoin(self, f, *insidef):
1957 return self.vfs.reljoin(self.root, f, *insidef)
1957 return self.vfs.reljoin(self.root, f, *insidef)
1958
1958
1959 def setparents(self, p1, p2=nullid):
1959 def setparents(self, p1, p2=nullid):
1960 self[None].setparents(p1, p2)
1960 self[None].setparents(p1, p2)
1961 self._quick_access_changeid_invalidate()
1961 self._quick_access_changeid_invalidate()
1962
1962
1963 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1963 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1964 """changeid must be a changeset revision, if specified.
1964 """changeid must be a changeset revision, if specified.
1965 fileid can be a file revision or node."""
1965 fileid can be a file revision or node."""
1966 return context.filectx(
1966 return context.filectx(
1967 self, path, changeid, fileid, changectx=changectx
1967 self, path, changeid, fileid, changectx=changectx
1968 )
1968 )
1969
1969
1970 def getcwd(self):
1970 def getcwd(self):
1971 return self.dirstate.getcwd()
1971 return self.dirstate.getcwd()
1972
1972
1973 def pathto(self, f, cwd=None):
1973 def pathto(self, f, cwd=None):
1974 return self.dirstate.pathto(f, cwd)
1974 return self.dirstate.pathto(f, cwd)
1975
1975
1976 def _loadfilter(self, filter):
1976 def _loadfilter(self, filter):
1977 if filter not in self._filterpats:
1977 if filter not in self._filterpats:
1978 l = []
1978 l = []
1979 for pat, cmd in self.ui.configitems(filter):
1979 for pat, cmd in self.ui.configitems(filter):
1980 if cmd == b'!':
1980 if cmd == b'!':
1981 continue
1981 continue
1982 mf = matchmod.match(self.root, b'', [pat])
1982 mf = matchmod.match(self.root, b'', [pat])
1983 fn = None
1983 fn = None
1984 params = cmd
1984 params = cmd
1985 for name, filterfn in pycompat.iteritems(self._datafilters):
1985 for name, filterfn in pycompat.iteritems(self._datafilters):
1986 if cmd.startswith(name):
1986 if cmd.startswith(name):
1987 fn = filterfn
1987 fn = filterfn
1988 params = cmd[len(name) :].lstrip()
1988 params = cmd[len(name) :].lstrip()
1989 break
1989 break
1990 if not fn:
1990 if not fn:
1991 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1991 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1992 fn.__name__ = 'commandfilter'
1992 fn.__name__ = 'commandfilter'
1993 # Wrap old filters not supporting keyword arguments
1993 # Wrap old filters not supporting keyword arguments
1994 if not pycompat.getargspec(fn)[2]:
1994 if not pycompat.getargspec(fn)[2]:
1995 oldfn = fn
1995 oldfn = fn
1996 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1996 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1997 fn.__name__ = 'compat-' + oldfn.__name__
1997 fn.__name__ = 'compat-' + oldfn.__name__
1998 l.append((mf, fn, params))
1998 l.append((mf, fn, params))
1999 self._filterpats[filter] = l
1999 self._filterpats[filter] = l
2000 return self._filterpats[filter]
2000 return self._filterpats[filter]
2001
2001
2002 def _filter(self, filterpats, filename, data):
2002 def _filter(self, filterpats, filename, data):
2003 for mf, fn, cmd in filterpats:
2003 for mf, fn, cmd in filterpats:
2004 if mf(filename):
2004 if mf(filename):
2005 self.ui.debug(
2005 self.ui.debug(
2006 b"filtering %s through %s\n"
2006 b"filtering %s through %s\n"
2007 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2007 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2008 )
2008 )
2009 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2009 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2010 break
2010 break
2011
2011
2012 return data
2012 return data
2013
2013
2014 @unfilteredpropertycache
2014 @unfilteredpropertycache
2015 def _encodefilterpats(self):
2015 def _encodefilterpats(self):
2016 return self._loadfilter(b'encode')
2016 return self._loadfilter(b'encode')
2017
2017
2018 @unfilteredpropertycache
2018 @unfilteredpropertycache
2019 def _decodefilterpats(self):
2019 def _decodefilterpats(self):
2020 return self._loadfilter(b'decode')
2020 return self._loadfilter(b'decode')
2021
2021
2022 def adddatafilter(self, name, filter):
2022 def adddatafilter(self, name, filter):
2023 self._datafilters[name] = filter
2023 self._datafilters[name] = filter
2024
2024
2025 def wread(self, filename):
2025 def wread(self, filename):
2026 if self.wvfs.islink(filename):
2026 if self.wvfs.islink(filename):
2027 data = self.wvfs.readlink(filename)
2027 data = self.wvfs.readlink(filename)
2028 else:
2028 else:
2029 data = self.wvfs.read(filename)
2029 data = self.wvfs.read(filename)
2030 return self._filter(self._encodefilterpats, filename, data)
2030 return self._filter(self._encodefilterpats, filename, data)
2031
2031
2032 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2032 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2033 """write ``data`` into ``filename`` in the working directory
2033 """write ``data`` into ``filename`` in the working directory
2034
2034
2035 This returns length of written (maybe decoded) data.
2035 This returns length of written (maybe decoded) data.
2036 """
2036 """
2037 data = self._filter(self._decodefilterpats, filename, data)
2037 data = self._filter(self._decodefilterpats, filename, data)
2038 if b'l' in flags:
2038 if b'l' in flags:
2039 self.wvfs.symlink(data, filename)
2039 self.wvfs.symlink(data, filename)
2040 else:
2040 else:
2041 self.wvfs.write(
2041 self.wvfs.write(
2042 filename, data, backgroundclose=backgroundclose, **kwargs
2042 filename, data, backgroundclose=backgroundclose, **kwargs
2043 )
2043 )
2044 if b'x' in flags:
2044 if b'x' in flags:
2045 self.wvfs.setflags(filename, False, True)
2045 self.wvfs.setflags(filename, False, True)
2046 else:
2046 else:
2047 self.wvfs.setflags(filename, False, False)
2047 self.wvfs.setflags(filename, False, False)
2048 return len(data)
2048 return len(data)
2049
2049
2050 def wwritedata(self, filename, data):
2050 def wwritedata(self, filename, data):
2051 return self._filter(self._decodefilterpats, filename, data)
2051 return self._filter(self._decodefilterpats, filename, data)
2052
2052
2053 def currenttransaction(self):
2053 def currenttransaction(self):
2054 """return the current transaction or None if non exists"""
2054 """return the current transaction or None if non exists"""
2055 if self._transref:
2055 if self._transref:
2056 tr = self._transref()
2056 tr = self._transref()
2057 else:
2057 else:
2058 tr = None
2058 tr = None
2059
2059
2060 if tr and tr.running():
2060 if tr and tr.running():
2061 return tr
2061 return tr
2062 return None
2062 return None
2063
2063
2064 def transaction(self, desc, report=None):
2064 def transaction(self, desc, report=None):
2065 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2065 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2066 b'devel', b'check-locks'
2066 b'devel', b'check-locks'
2067 ):
2067 ):
2068 if self._currentlock(self._lockref) is None:
2068 if self._currentlock(self._lockref) is None:
2069 raise error.ProgrammingError(b'transaction requires locking')
2069 raise error.ProgrammingError(b'transaction requires locking')
2070 tr = self.currenttransaction()
2070 tr = self.currenttransaction()
2071 if tr is not None:
2071 if tr is not None:
2072 return tr.nest(name=desc)
2072 return tr.nest(name=desc)
2073
2073
2074 # abort here if the journal already exists
2074 # abort here if the journal already exists
2075 if self.svfs.exists(b"journal"):
2075 if self.svfs.exists(b"journal"):
2076 raise error.RepoError(
2076 raise error.RepoError(
2077 _(b"abandoned transaction found"),
2077 _(b"abandoned transaction found"),
2078 hint=_(b"run 'hg recover' to clean up transaction"),
2078 hint=_(b"run 'hg recover' to clean up transaction"),
2079 )
2079 )
2080
2080
2081 idbase = b"%.40f#%f" % (random.random(), time.time())
2081 idbase = b"%.40f#%f" % (random.random(), time.time())
2082 ha = hex(hashutil.sha1(idbase).digest())
2082 ha = hex(hashutil.sha1(idbase).digest())
2083 txnid = b'TXN:' + ha
2083 txnid = b'TXN:' + ha
2084 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2084 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2085
2085
2086 self._writejournal(desc)
2086 self._writejournal(desc)
2087 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2087 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2088 if report:
2088 if report:
2089 rp = report
2089 rp = report
2090 else:
2090 else:
2091 rp = self.ui.warn
2091 rp = self.ui.warn
2092 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2092 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2093 # we must avoid cyclic reference between repo and transaction.
2093 # we must avoid cyclic reference between repo and transaction.
2094 reporef = weakref.ref(self)
2094 reporef = weakref.ref(self)
2095 # Code to track tag movement
2095 # Code to track tag movement
2096 #
2096 #
2097 # Since tags are all handled as file content, it is actually quite hard
2097 # Since tags are all handled as file content, it is actually quite hard
2098 # to track these movement from a code perspective. So we fallback to a
2098 # to track these movement from a code perspective. So we fallback to a
2099 # tracking at the repository level. One could envision to track changes
2099 # tracking at the repository level. One could envision to track changes
2100 # to the '.hgtags' file through changegroup apply but that fails to
2100 # to the '.hgtags' file through changegroup apply but that fails to
2101 # cope with case where transaction expose new heads without changegroup
2101 # cope with case where transaction expose new heads without changegroup
2102 # being involved (eg: phase movement).
2102 # being involved (eg: phase movement).
2103 #
2103 #
2104 # For now, We gate the feature behind a flag since this likely comes
2104 # For now, We gate the feature behind a flag since this likely comes
2105 # with performance impacts. The current code run more often than needed
2105 # with performance impacts. The current code run more often than needed
2106 # and do not use caches as much as it could. The current focus is on
2106 # and do not use caches as much as it could. The current focus is on
2107 # the behavior of the feature so we disable it by default. The flag
2107 # the behavior of the feature so we disable it by default. The flag
2108 # will be removed when we are happy with the performance impact.
2108 # will be removed when we are happy with the performance impact.
2109 #
2109 #
2110 # Once this feature is no longer experimental move the following
2110 # Once this feature is no longer experimental move the following
2111 # documentation to the appropriate help section:
2111 # documentation to the appropriate help section:
2112 #
2112 #
2113 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2113 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2114 # tags (new or changed or deleted tags). In addition the details of
2114 # tags (new or changed or deleted tags). In addition the details of
2115 # these changes are made available in a file at:
2115 # these changes are made available in a file at:
2116 # ``REPOROOT/.hg/changes/tags.changes``.
2116 # ``REPOROOT/.hg/changes/tags.changes``.
2117 # Make sure you check for HG_TAG_MOVED before reading that file as it
2117 # Make sure you check for HG_TAG_MOVED before reading that file as it
2118 # might exist from a previous transaction even if no tag were touched
2118 # might exist from a previous transaction even if no tag were touched
2119 # in this one. Changes are recorded in a line base format::
2119 # in this one. Changes are recorded in a line base format::
2120 #
2120 #
2121 # <action> <hex-node> <tag-name>\n
2121 # <action> <hex-node> <tag-name>\n
2122 #
2122 #
2123 # Actions are defined as follow:
2123 # Actions are defined as follow:
2124 # "-R": tag is removed,
2124 # "-R": tag is removed,
2125 # "+A": tag is added,
2125 # "+A": tag is added,
2126 # "-M": tag is moved (old value),
2126 # "-M": tag is moved (old value),
2127 # "+M": tag is moved (new value),
2127 # "+M": tag is moved (new value),
2128 tracktags = lambda x: None
2128 tracktags = lambda x: None
2129 # experimental config: experimental.hook-track-tags
2129 # experimental config: experimental.hook-track-tags
2130 shouldtracktags = self.ui.configbool(
2130 shouldtracktags = self.ui.configbool(
2131 b'experimental', b'hook-track-tags'
2131 b'experimental', b'hook-track-tags'
2132 )
2132 )
2133 if desc != b'strip' and shouldtracktags:
2133 if desc != b'strip' and shouldtracktags:
2134 oldheads = self.changelog.headrevs()
2134 oldheads = self.changelog.headrevs()
2135
2135
2136 def tracktags(tr2):
2136 def tracktags(tr2):
2137 repo = reporef()
2137 repo = reporef()
2138 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2138 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2139 newheads = repo.changelog.headrevs()
2139 newheads = repo.changelog.headrevs()
2140 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2140 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2141 # notes: we compare lists here.
2141 # notes: we compare lists here.
2142 # As we do it only once buiding set would not be cheaper
2142 # As we do it only once buiding set would not be cheaper
2143 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2143 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2144 if changes:
2144 if changes:
2145 tr2.hookargs[b'tag_moved'] = b'1'
2145 tr2.hookargs[b'tag_moved'] = b'1'
2146 with repo.vfs(
2146 with repo.vfs(
2147 b'changes/tags.changes', b'w', atomictemp=True
2147 b'changes/tags.changes', b'w', atomictemp=True
2148 ) as changesfile:
2148 ) as changesfile:
2149 # note: we do not register the file to the transaction
2149 # note: we do not register the file to the transaction
2150 # because we needs it to still exist on the transaction
2150 # because we needs it to still exist on the transaction
2151 # is close (for txnclose hooks)
2151 # is close (for txnclose hooks)
2152 tagsmod.writediff(changesfile, changes)
2152 tagsmod.writediff(changesfile, changes)
2153
2153
2154 def validate(tr2):
2154 def validate(tr2):
2155 """will run pre-closing hooks"""
2155 """will run pre-closing hooks"""
2156 # XXX the transaction API is a bit lacking here so we take a hacky
2156 # XXX the transaction API is a bit lacking here so we take a hacky
2157 # path for now
2157 # path for now
2158 #
2158 #
2159 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2159 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2160 # dict is copied before these run. In addition we needs the data
2160 # dict is copied before these run. In addition we needs the data
2161 # available to in memory hooks too.
2161 # available to in memory hooks too.
2162 #
2162 #
2163 # Moreover, we also need to make sure this runs before txnclose
2163 # Moreover, we also need to make sure this runs before txnclose
2164 # hooks and there is no "pending" mechanism that would execute
2164 # hooks and there is no "pending" mechanism that would execute
2165 # logic only if hooks are about to run.
2165 # logic only if hooks are about to run.
2166 #
2166 #
2167 # Fixing this limitation of the transaction is also needed to track
2167 # Fixing this limitation of the transaction is also needed to track
2168 # other families of changes (bookmarks, phases, obsolescence).
2168 # other families of changes (bookmarks, phases, obsolescence).
2169 #
2169 #
2170 # This will have to be fixed before we remove the experimental
2170 # This will have to be fixed before we remove the experimental
2171 # gating.
2171 # gating.
2172 tracktags(tr2)
2172 tracktags(tr2)
2173 repo = reporef()
2173 repo = reporef()
2174
2174
2175 singleheadopt = (b'experimental', b'single-head-per-branch')
2175 singleheadopt = (b'experimental', b'single-head-per-branch')
2176 singlehead = repo.ui.configbool(*singleheadopt)
2176 singlehead = repo.ui.configbool(*singleheadopt)
2177 if singlehead:
2177 if singlehead:
2178 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2178 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2179 accountclosed = singleheadsub.get(
2179 accountclosed = singleheadsub.get(
2180 b"account-closed-heads", False
2180 b"account-closed-heads", False
2181 )
2181 )
2182 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2182 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2183 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2183 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2184 for name, (old, new) in sorted(
2184 for name, (old, new) in sorted(
2185 tr.changes[b'bookmarks'].items()
2185 tr.changes[b'bookmarks'].items()
2186 ):
2186 ):
2187 args = tr.hookargs.copy()
2187 args = tr.hookargs.copy()
2188 args.update(bookmarks.preparehookargs(name, old, new))
2188 args.update(bookmarks.preparehookargs(name, old, new))
2189 repo.hook(
2189 repo.hook(
2190 b'pretxnclose-bookmark',
2190 b'pretxnclose-bookmark',
2191 throw=True,
2191 throw=True,
2192 **pycompat.strkwargs(args)
2192 **pycompat.strkwargs(args)
2193 )
2193 )
2194 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2194 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2195 cl = repo.unfiltered().changelog
2195 cl = repo.unfiltered().changelog
2196 for revs, (old, new) in tr.changes[b'phases']:
2196 for revs, (old, new) in tr.changes[b'phases']:
2197 for rev in revs:
2197 for rev in revs:
2198 args = tr.hookargs.copy()
2198 args = tr.hookargs.copy()
2199 node = hex(cl.node(rev))
2199 node = hex(cl.node(rev))
2200 args.update(phases.preparehookargs(node, old, new))
2200 args.update(phases.preparehookargs(node, old, new))
2201 repo.hook(
2201 repo.hook(
2202 b'pretxnclose-phase',
2202 b'pretxnclose-phase',
2203 throw=True,
2203 throw=True,
2204 **pycompat.strkwargs(args)
2204 **pycompat.strkwargs(args)
2205 )
2205 )
2206
2206
2207 repo.hook(
2207 repo.hook(
2208 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2208 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2209 )
2209 )
2210
2210
2211 def releasefn(tr, success):
2211 def releasefn(tr, success):
2212 repo = reporef()
2212 repo = reporef()
2213 if repo is None:
2213 if repo is None:
2214 # If the repo has been GC'd (and this release function is being
2214 # If the repo has been GC'd (and this release function is being
2215 # called from transaction.__del__), there's not much we can do,
2215 # called from transaction.__del__), there's not much we can do,
2216 # so just leave the unfinished transaction there and let the
2216 # so just leave the unfinished transaction there and let the
2217 # user run `hg recover`.
2217 # user run `hg recover`.
2218 return
2218 return
2219 if success:
2219 if success:
2220 # this should be explicitly invoked here, because
2220 # this should be explicitly invoked here, because
2221 # in-memory changes aren't written out at closing
2221 # in-memory changes aren't written out at closing
2222 # transaction, if tr.addfilegenerator (via
2222 # transaction, if tr.addfilegenerator (via
2223 # dirstate.write or so) isn't invoked while
2223 # dirstate.write or so) isn't invoked while
2224 # transaction running
2224 # transaction running
2225 repo.dirstate.write(None)
2225 repo.dirstate.write(None)
2226 else:
2226 else:
2227 # discard all changes (including ones already written
2227 # discard all changes (including ones already written
2228 # out) in this transaction
2228 # out) in this transaction
2229 narrowspec.restorebackup(self, b'journal.narrowspec')
2229 narrowspec.restorebackup(self, b'journal.narrowspec')
2230 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2230 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2231 repo.dirstate.restorebackup(None, b'journal.dirstate')
2231 repo.dirstate.restorebackup(None, b'journal.dirstate')
2232
2232
2233 repo.invalidate(clearfilecache=True)
2233 repo.invalidate(clearfilecache=True)
2234
2234
2235 tr = transaction.transaction(
2235 tr = transaction.transaction(
2236 rp,
2236 rp,
2237 self.svfs,
2237 self.svfs,
2238 vfsmap,
2238 vfsmap,
2239 b"journal",
2239 b"journal",
2240 b"undo",
2240 b"undo",
2241 aftertrans(renames),
2241 aftertrans(renames),
2242 self.store.createmode,
2242 self.store.createmode,
2243 validator=validate,
2243 validator=validate,
2244 releasefn=releasefn,
2244 releasefn=releasefn,
2245 checkambigfiles=_cachedfiles,
2245 checkambigfiles=_cachedfiles,
2246 name=desc,
2246 name=desc,
2247 )
2247 )
2248 tr.changes[b'origrepolen'] = len(self)
2248 tr.changes[b'origrepolen'] = len(self)
2249 tr.changes[b'obsmarkers'] = set()
2249 tr.changes[b'obsmarkers'] = set()
2250 tr.changes[b'phases'] = []
2250 tr.changes[b'phases'] = []
2251 tr.changes[b'bookmarks'] = {}
2251 tr.changes[b'bookmarks'] = {}
2252
2252
2253 tr.hookargs[b'txnid'] = txnid
2253 tr.hookargs[b'txnid'] = txnid
2254 tr.hookargs[b'txnname'] = desc
2254 tr.hookargs[b'txnname'] = desc
2255 tr.hookargs[b'changes'] = tr.changes
2255 tr.hookargs[b'changes'] = tr.changes
2256 # note: writing the fncache only during finalize mean that the file is
2256 # note: writing the fncache only during finalize mean that the file is
2257 # outdated when running hooks. As fncache is used for streaming clone,
2257 # outdated when running hooks. As fncache is used for streaming clone,
2258 # this is not expected to break anything that happen during the hooks.
2258 # this is not expected to break anything that happen during the hooks.
2259 tr.addfinalize(b'flush-fncache', self.store.write)
2259 tr.addfinalize(b'flush-fncache', self.store.write)
2260
2260
2261 def txnclosehook(tr2):
2261 def txnclosehook(tr2):
2262 """To be run if transaction is successful, will schedule a hook run
2262 """To be run if transaction is successful, will schedule a hook run
2263 """
2263 """
2264 # Don't reference tr2 in hook() so we don't hold a reference.
2264 # Don't reference tr2 in hook() so we don't hold a reference.
2265 # This reduces memory consumption when there are multiple
2265 # This reduces memory consumption when there are multiple
2266 # transactions per lock. This can likely go away if issue5045
2266 # transactions per lock. This can likely go away if issue5045
2267 # fixes the function accumulation.
2267 # fixes the function accumulation.
2268 hookargs = tr2.hookargs
2268 hookargs = tr2.hookargs
2269
2269
2270 def hookfunc(unused_success):
2270 def hookfunc(unused_success):
2271 repo = reporef()
2271 repo = reporef()
2272 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2272 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2273 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2273 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2274 for name, (old, new) in bmchanges:
2274 for name, (old, new) in bmchanges:
2275 args = tr.hookargs.copy()
2275 args = tr.hookargs.copy()
2276 args.update(bookmarks.preparehookargs(name, old, new))
2276 args.update(bookmarks.preparehookargs(name, old, new))
2277 repo.hook(
2277 repo.hook(
2278 b'txnclose-bookmark',
2278 b'txnclose-bookmark',
2279 throw=False,
2279 throw=False,
2280 **pycompat.strkwargs(args)
2280 **pycompat.strkwargs(args)
2281 )
2281 )
2282
2282
2283 if hook.hashook(repo.ui, b'txnclose-phase'):
2283 if hook.hashook(repo.ui, b'txnclose-phase'):
2284 cl = repo.unfiltered().changelog
2284 cl = repo.unfiltered().changelog
2285 phasemv = sorted(
2285 phasemv = sorted(
2286 tr.changes[b'phases'], key=lambda r: r[0][0]
2286 tr.changes[b'phases'], key=lambda r: r[0][0]
2287 )
2287 )
2288 for revs, (old, new) in phasemv:
2288 for revs, (old, new) in phasemv:
2289 for rev in revs:
2289 for rev in revs:
2290 args = tr.hookargs.copy()
2290 args = tr.hookargs.copy()
2291 node = hex(cl.node(rev))
2291 node = hex(cl.node(rev))
2292 args.update(phases.preparehookargs(node, old, new))
2292 args.update(phases.preparehookargs(node, old, new))
2293 repo.hook(
2293 repo.hook(
2294 b'txnclose-phase',
2294 b'txnclose-phase',
2295 throw=False,
2295 throw=False,
2296 **pycompat.strkwargs(args)
2296 **pycompat.strkwargs(args)
2297 )
2297 )
2298
2298
2299 repo.hook(
2299 repo.hook(
2300 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2300 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2301 )
2301 )
2302
2302
2303 reporef()._afterlock(hookfunc)
2303 reporef()._afterlock(hookfunc)
2304
2304
2305 tr.addfinalize(b'txnclose-hook', txnclosehook)
2305 tr.addfinalize(b'txnclose-hook', txnclosehook)
2306 # Include a leading "-" to make it happen before the transaction summary
2306 # Include a leading "-" to make it happen before the transaction summary
2307 # reports registered via scmutil.registersummarycallback() whose names
2307 # reports registered via scmutil.registersummarycallback() whose names
2308 # are 00-txnreport etc. That way, the caches will be warm when the
2308 # are 00-txnreport etc. That way, the caches will be warm when the
2309 # callbacks run.
2309 # callbacks run.
2310 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2310 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2311
2311
2312 def txnaborthook(tr2):
2312 def txnaborthook(tr2):
2313 """To be run if transaction is aborted
2313 """To be run if transaction is aborted
2314 """
2314 """
2315 reporef().hook(
2315 reporef().hook(
2316 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2316 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2317 )
2317 )
2318
2318
2319 tr.addabort(b'txnabort-hook', txnaborthook)
2319 tr.addabort(b'txnabort-hook', txnaborthook)
2320 # avoid eager cache invalidation. in-memory data should be identical
2320 # avoid eager cache invalidation. in-memory data should be identical
2321 # to stored data if transaction has no error.
2321 # to stored data if transaction has no error.
2322 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2322 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2323 self._transref = weakref.ref(tr)
2323 self._transref = weakref.ref(tr)
2324 scmutil.registersummarycallback(self, tr, desc)
2324 scmutil.registersummarycallback(self, tr, desc)
2325 return tr
2325 return tr
2326
2326
2327 def _journalfiles(self):
2327 def _journalfiles(self):
2328 return (
2328 return (
2329 (self.svfs, b'journal'),
2329 (self.svfs, b'journal'),
2330 (self.svfs, b'journal.narrowspec'),
2330 (self.svfs, b'journal.narrowspec'),
2331 (self.vfs, b'journal.narrowspec.dirstate'),
2331 (self.vfs, b'journal.narrowspec.dirstate'),
2332 (self.vfs, b'journal.dirstate'),
2332 (self.vfs, b'journal.dirstate'),
2333 (self.vfs, b'journal.branch'),
2333 (self.vfs, b'journal.branch'),
2334 (self.vfs, b'journal.desc'),
2334 (self.vfs, b'journal.desc'),
2335 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2335 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2336 (self.svfs, b'journal.phaseroots'),
2336 (self.svfs, b'journal.phaseroots'),
2337 )
2337 )
2338
2338
2339 def undofiles(self):
2339 def undofiles(self):
2340 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2340 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2341
2341
2342 @unfilteredmethod
2342 @unfilteredmethod
2343 def _writejournal(self, desc):
2343 def _writejournal(self, desc):
2344 self.dirstate.savebackup(None, b'journal.dirstate')
2344 self.dirstate.savebackup(None, b'journal.dirstate')
2345 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2345 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2346 narrowspec.savebackup(self, b'journal.narrowspec')
2346 narrowspec.savebackup(self, b'journal.narrowspec')
2347 self.vfs.write(
2347 self.vfs.write(
2348 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2348 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2349 )
2349 )
2350 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2350 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2351 bookmarksvfs = bookmarks.bookmarksvfs(self)
2351 bookmarksvfs = bookmarks.bookmarksvfs(self)
2352 bookmarksvfs.write(
2352 bookmarksvfs.write(
2353 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2353 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2354 )
2354 )
2355 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2355 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2356
2356
2357 def recover(self):
2357 def recover(self):
2358 with self.lock():
2358 with self.lock():
2359 if self.svfs.exists(b"journal"):
2359 if self.svfs.exists(b"journal"):
2360 self.ui.status(_(b"rolling back interrupted transaction\n"))
2360 self.ui.status(_(b"rolling back interrupted transaction\n"))
2361 vfsmap = {
2361 vfsmap = {
2362 b'': self.svfs,
2362 b'': self.svfs,
2363 b'plain': self.vfs,
2363 b'plain': self.vfs,
2364 }
2364 }
2365 transaction.rollback(
2365 transaction.rollback(
2366 self.svfs,
2366 self.svfs,
2367 vfsmap,
2367 vfsmap,
2368 b"journal",
2368 b"journal",
2369 self.ui.warn,
2369 self.ui.warn,
2370 checkambigfiles=_cachedfiles,
2370 checkambigfiles=_cachedfiles,
2371 )
2371 )
2372 self.invalidate()
2372 self.invalidate()
2373 return True
2373 return True
2374 else:
2374 else:
2375 self.ui.warn(_(b"no interrupted transaction available\n"))
2375 self.ui.warn(_(b"no interrupted transaction available\n"))
2376 return False
2376 return False
2377
2377
2378 def rollback(self, dryrun=False, force=False):
2378 def rollback(self, dryrun=False, force=False):
2379 wlock = lock = dsguard = None
2379 wlock = lock = dsguard = None
2380 try:
2380 try:
2381 wlock = self.wlock()
2381 wlock = self.wlock()
2382 lock = self.lock()
2382 lock = self.lock()
2383 if self.svfs.exists(b"undo"):
2383 if self.svfs.exists(b"undo"):
2384 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2384 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2385
2385
2386 return self._rollback(dryrun, force, dsguard)
2386 return self._rollback(dryrun, force, dsguard)
2387 else:
2387 else:
2388 self.ui.warn(_(b"no rollback information available\n"))
2388 self.ui.warn(_(b"no rollback information available\n"))
2389 return 1
2389 return 1
2390 finally:
2390 finally:
2391 release(dsguard, lock, wlock)
2391 release(dsguard, lock, wlock)
2392
2392
2393 @unfilteredmethod # Until we get smarter cache management
2393 @unfilteredmethod # Until we get smarter cache management
2394 def _rollback(self, dryrun, force, dsguard):
2394 def _rollback(self, dryrun, force, dsguard):
2395 ui = self.ui
2395 ui = self.ui
2396 try:
2396 try:
2397 args = self.vfs.read(b'undo.desc').splitlines()
2397 args = self.vfs.read(b'undo.desc').splitlines()
2398 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2398 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2399 if len(args) >= 3:
2399 if len(args) >= 3:
2400 detail = args[2]
2400 detail = args[2]
2401 oldtip = oldlen - 1
2401 oldtip = oldlen - 1
2402
2402
2403 if detail and ui.verbose:
2403 if detail and ui.verbose:
2404 msg = _(
2404 msg = _(
2405 b'repository tip rolled back to revision %d'
2405 b'repository tip rolled back to revision %d'
2406 b' (undo %s: %s)\n'
2406 b' (undo %s: %s)\n'
2407 ) % (oldtip, desc, detail)
2407 ) % (oldtip, desc, detail)
2408 else:
2408 else:
2409 msg = _(
2409 msg = _(
2410 b'repository tip rolled back to revision %d (undo %s)\n'
2410 b'repository tip rolled back to revision %d (undo %s)\n'
2411 ) % (oldtip, desc)
2411 ) % (oldtip, desc)
2412 except IOError:
2412 except IOError:
2413 msg = _(b'rolling back unknown transaction\n')
2413 msg = _(b'rolling back unknown transaction\n')
2414 desc = None
2414 desc = None
2415
2415
2416 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2416 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2417 raise error.Abort(
2417 raise error.Abort(
2418 _(
2418 _(
2419 b'rollback of last commit while not checked out '
2419 b'rollback of last commit while not checked out '
2420 b'may lose data'
2420 b'may lose data'
2421 ),
2421 ),
2422 hint=_(b'use -f to force'),
2422 hint=_(b'use -f to force'),
2423 )
2423 )
2424
2424
2425 ui.status(msg)
2425 ui.status(msg)
2426 if dryrun:
2426 if dryrun:
2427 return 0
2427 return 0
2428
2428
2429 parents = self.dirstate.parents()
2429 parents = self.dirstate.parents()
2430 self.destroying()
2430 self.destroying()
2431 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2431 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2432 transaction.rollback(
2432 transaction.rollback(
2433 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2433 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2434 )
2434 )
2435 bookmarksvfs = bookmarks.bookmarksvfs(self)
2435 bookmarksvfs = bookmarks.bookmarksvfs(self)
2436 if bookmarksvfs.exists(b'undo.bookmarks'):
2436 if bookmarksvfs.exists(b'undo.bookmarks'):
2437 bookmarksvfs.rename(
2437 bookmarksvfs.rename(
2438 b'undo.bookmarks', b'bookmarks', checkambig=True
2438 b'undo.bookmarks', b'bookmarks', checkambig=True
2439 )
2439 )
2440 if self.svfs.exists(b'undo.phaseroots'):
2440 if self.svfs.exists(b'undo.phaseroots'):
2441 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2441 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2442 self.invalidate()
2442 self.invalidate()
2443
2443
2444 has_node = self.changelog.index.has_node
2444 has_node = self.changelog.index.has_node
2445 parentgone = any(not has_node(p) for p in parents)
2445 parentgone = any(not has_node(p) for p in parents)
2446 if parentgone:
2446 if parentgone:
2447 # prevent dirstateguard from overwriting already restored one
2447 # prevent dirstateguard from overwriting already restored one
2448 dsguard.close()
2448 dsguard.close()
2449
2449
2450 narrowspec.restorebackup(self, b'undo.narrowspec')
2450 narrowspec.restorebackup(self, b'undo.narrowspec')
2451 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2451 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2452 self.dirstate.restorebackup(None, b'undo.dirstate')
2452 self.dirstate.restorebackup(None, b'undo.dirstate')
2453 try:
2453 try:
2454 branch = self.vfs.read(b'undo.branch')
2454 branch = self.vfs.read(b'undo.branch')
2455 self.dirstate.setbranch(encoding.tolocal(branch))
2455 self.dirstate.setbranch(encoding.tolocal(branch))
2456 except IOError:
2456 except IOError:
2457 ui.warn(
2457 ui.warn(
2458 _(
2458 _(
2459 b'named branch could not be reset: '
2459 b'named branch could not be reset: '
2460 b'current branch is still \'%s\'\n'
2460 b'current branch is still \'%s\'\n'
2461 )
2461 )
2462 % self.dirstate.branch()
2462 % self.dirstate.branch()
2463 )
2463 )
2464
2464
2465 parents = tuple([p.rev() for p in self[None].parents()])
2465 parents = tuple([p.rev() for p in self[None].parents()])
2466 if len(parents) > 1:
2466 if len(parents) > 1:
2467 ui.status(
2467 ui.status(
2468 _(
2468 _(
2469 b'working directory now based on '
2469 b'working directory now based on '
2470 b'revisions %d and %d\n'
2470 b'revisions %d and %d\n'
2471 )
2471 )
2472 % parents
2472 % parents
2473 )
2473 )
2474 else:
2474 else:
2475 ui.status(
2475 ui.status(
2476 _(b'working directory now based on revision %d\n') % parents
2476 _(b'working directory now based on revision %d\n') % parents
2477 )
2477 )
2478 mergestatemod.mergestate.clean(self, self[b'.'].node())
2478 mergestatemod.mergestate.clean(self, self[b'.'].node())
2479
2479
2480 # TODO: if we know which new heads may result from this rollback, pass
2480 # TODO: if we know which new heads may result from this rollback, pass
2481 # them to destroy(), which will prevent the branchhead cache from being
2481 # them to destroy(), which will prevent the branchhead cache from being
2482 # invalidated.
2482 # invalidated.
2483 self.destroyed()
2483 self.destroyed()
2484 return 0
2484 return 0
2485
2485
2486 def _buildcacheupdater(self, newtransaction):
2486 def _buildcacheupdater(self, newtransaction):
2487 """called during transaction to build the callback updating cache
2487 """called during transaction to build the callback updating cache
2488
2488
2489 Lives on the repository to help extension who might want to augment
2489 Lives on the repository to help extension who might want to augment
2490 this logic. For this purpose, the created transaction is passed to the
2490 this logic. For this purpose, the created transaction is passed to the
2491 method.
2491 method.
2492 """
2492 """
2493 # we must avoid cyclic reference between repo and transaction.
2493 # we must avoid cyclic reference between repo and transaction.
2494 reporef = weakref.ref(self)
2494 reporef = weakref.ref(self)
2495
2495
2496 def updater(tr):
2496 def updater(tr):
2497 repo = reporef()
2497 repo = reporef()
2498 repo.updatecaches(tr)
2498 repo.updatecaches(tr)
2499
2499
2500 return updater
2500 return updater
2501
2501
2502 @unfilteredmethod
2502 @unfilteredmethod
2503 def updatecaches(self, tr=None, full=False):
2503 def updatecaches(self, tr=None, full=False):
2504 """warm appropriate caches
2504 """warm appropriate caches
2505
2505
2506 If this function is called after a transaction closed. The transaction
2506 If this function is called after a transaction closed. The transaction
2507 will be available in the 'tr' argument. This can be used to selectively
2507 will be available in the 'tr' argument. This can be used to selectively
2508 update caches relevant to the changes in that transaction.
2508 update caches relevant to the changes in that transaction.
2509
2509
2510 If 'full' is set, make sure all caches the function knows about have
2510 If 'full' is set, make sure all caches the function knows about have
2511 up-to-date data. Even the ones usually loaded more lazily.
2511 up-to-date data. Even the ones usually loaded more lazily.
2512 """
2512 """
2513 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2513 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2514 # During strip, many caches are invalid but
2514 # During strip, many caches are invalid but
2515 # later call to `destroyed` will refresh them.
2515 # later call to `destroyed` will refresh them.
2516 return
2516 return
2517
2517
2518 if tr is None or tr.changes[b'origrepolen'] < len(self):
2518 if tr is None or tr.changes[b'origrepolen'] < len(self):
2519 # accessing the 'ser ved' branchmap should refresh all the others,
2519 # accessing the 'ser ved' branchmap should refresh all the others,
2520 self.ui.debug(b'updating the branch cache\n')
2520 self.ui.debug(b'updating the branch cache\n')
2521 self.filtered(b'served').branchmap()
2521 self.filtered(b'served').branchmap()
2522 self.filtered(b'served.hidden').branchmap()
2522 self.filtered(b'served.hidden').branchmap()
2523
2523
2524 if full:
2524 if full:
2525 unfi = self.unfiltered()
2525 unfi = self.unfiltered()
2526
2526
2527 self.changelog.update_caches(transaction=tr)
2527 self.changelog.update_caches(transaction=tr)
2528 self.manifestlog.update_caches(transaction=tr)
2528 self.manifestlog.update_caches(transaction=tr)
2529
2529
2530 rbc = unfi.revbranchcache()
2530 rbc = unfi.revbranchcache()
2531 for r in unfi.changelog:
2531 for r in unfi.changelog:
2532 rbc.branchinfo(r)
2532 rbc.branchinfo(r)
2533 rbc.write()
2533 rbc.write()
2534
2534
2535 # ensure the working copy parents are in the manifestfulltextcache
2535 # ensure the working copy parents are in the manifestfulltextcache
2536 for ctx in self[b'.'].parents():
2536 for ctx in self[b'.'].parents():
2537 ctx.manifest() # accessing the manifest is enough
2537 ctx.manifest() # accessing the manifest is enough
2538
2538
2539 # accessing fnode cache warms the cache
2539 # accessing fnode cache warms the cache
2540 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2540 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2541 # accessing tags warm the cache
2541 # accessing tags warm the cache
2542 self.tags()
2542 self.tags()
2543 self.filtered(b'served').tags()
2543 self.filtered(b'served').tags()
2544
2544
2545 # The `full` arg is documented as updating even the lazily-loaded
2545 # The `full` arg is documented as updating even the lazily-loaded
2546 # caches immediately, so we're forcing a write to cause these caches
2546 # caches immediately, so we're forcing a write to cause these caches
2547 # to be warmed up even if they haven't explicitly been requested
2547 # to be warmed up even if they haven't explicitly been requested
2548 # yet (if they've never been used by hg, they won't ever have been
2548 # yet (if they've never been used by hg, they won't ever have been
2549 # written, even if they're a subset of another kind of cache that
2549 # written, even if they're a subset of another kind of cache that
2550 # *has* been used).
2550 # *has* been used).
2551 for filt in repoview.filtertable.keys():
2551 for filt in repoview.filtertable.keys():
2552 filtered = self.filtered(filt)
2552 filtered = self.filtered(filt)
2553 filtered.branchmap().write(filtered)
2553 filtered.branchmap().write(filtered)
2554
2554
2555 def invalidatecaches(self):
2555 def invalidatecaches(self):
2556
2556
2557 if '_tagscache' in vars(self):
2557 if '_tagscache' in vars(self):
2558 # can't use delattr on proxy
2558 # can't use delattr on proxy
2559 del self.__dict__['_tagscache']
2559 del self.__dict__['_tagscache']
2560
2560
2561 self._branchcaches.clear()
2561 self._branchcaches.clear()
2562 self.invalidatevolatilesets()
2562 self.invalidatevolatilesets()
2563 self._sparsesignaturecache.clear()
2563 self._sparsesignaturecache.clear()
2564
2564
2565 def invalidatevolatilesets(self):
2565 def invalidatevolatilesets(self):
2566 self.filteredrevcache.clear()
2566 self.filteredrevcache.clear()
2567 obsolete.clearobscaches(self)
2567 obsolete.clearobscaches(self)
2568 self._quick_access_changeid_invalidate()
2568 self._quick_access_changeid_invalidate()
2569
2569
2570 def invalidatedirstate(self):
2570 def invalidatedirstate(self):
2571 '''Invalidates the dirstate, causing the next call to dirstate
2571 '''Invalidates the dirstate, causing the next call to dirstate
2572 to check if it was modified since the last time it was read,
2572 to check if it was modified since the last time it was read,
2573 rereading it if it has.
2573 rereading it if it has.
2574
2574
2575 This is different to dirstate.invalidate() that it doesn't always
2575 This is different to dirstate.invalidate() that it doesn't always
2576 rereads the dirstate. Use dirstate.invalidate() if you want to
2576 rereads the dirstate. Use dirstate.invalidate() if you want to
2577 explicitly read the dirstate again (i.e. restoring it to a previous
2577 explicitly read the dirstate again (i.e. restoring it to a previous
2578 known good state).'''
2578 known good state).'''
2579 if hasunfilteredcache(self, 'dirstate'):
2579 if hasunfilteredcache(self, 'dirstate'):
2580 for k in self.dirstate._filecache:
2580 for k in self.dirstate._filecache:
2581 try:
2581 try:
2582 delattr(self.dirstate, k)
2582 delattr(self.dirstate, k)
2583 except AttributeError:
2583 except AttributeError:
2584 pass
2584 pass
2585 delattr(self.unfiltered(), 'dirstate')
2585 delattr(self.unfiltered(), 'dirstate')
2586
2586
2587 def invalidate(self, clearfilecache=False):
2587 def invalidate(self, clearfilecache=False):
2588 '''Invalidates both store and non-store parts other than dirstate
2588 '''Invalidates both store and non-store parts other than dirstate
2589
2589
2590 If a transaction is running, invalidation of store is omitted,
2590 If a transaction is running, invalidation of store is omitted,
2591 because discarding in-memory changes might cause inconsistency
2591 because discarding in-memory changes might cause inconsistency
2592 (e.g. incomplete fncache causes unintentional failure, but
2592 (e.g. incomplete fncache causes unintentional failure, but
2593 redundant one doesn't).
2593 redundant one doesn't).
2594 '''
2594 '''
2595 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2595 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2596 for k in list(self._filecache.keys()):
2596 for k in list(self._filecache.keys()):
2597 # dirstate is invalidated separately in invalidatedirstate()
2597 # dirstate is invalidated separately in invalidatedirstate()
2598 if k == b'dirstate':
2598 if k == b'dirstate':
2599 continue
2599 continue
2600 if (
2600 if (
2601 k == b'changelog'
2601 k == b'changelog'
2602 and self.currenttransaction()
2602 and self.currenttransaction()
2603 and self.changelog._delayed
2603 and self.changelog._delayed
2604 ):
2604 ):
2605 # The changelog object may store unwritten revisions. We don't
2605 # The changelog object may store unwritten revisions. We don't
2606 # want to lose them.
2606 # want to lose them.
2607 # TODO: Solve the problem instead of working around it.
2607 # TODO: Solve the problem instead of working around it.
2608 continue
2608 continue
2609
2609
2610 if clearfilecache:
2610 if clearfilecache:
2611 del self._filecache[k]
2611 del self._filecache[k]
2612 try:
2612 try:
2613 delattr(unfiltered, k)
2613 delattr(unfiltered, k)
2614 except AttributeError:
2614 except AttributeError:
2615 pass
2615 pass
2616 self.invalidatecaches()
2616 self.invalidatecaches()
2617 if not self.currenttransaction():
2617 if not self.currenttransaction():
2618 # TODO: Changing contents of store outside transaction
2618 # TODO: Changing contents of store outside transaction
2619 # causes inconsistency. We should make in-memory store
2619 # causes inconsistency. We should make in-memory store
2620 # changes detectable, and abort if changed.
2620 # changes detectable, and abort if changed.
2621 self.store.invalidatecaches()
2621 self.store.invalidatecaches()
2622
2622
2623 def invalidateall(self):
2623 def invalidateall(self):
2624 '''Fully invalidates both store and non-store parts, causing the
2624 '''Fully invalidates both store and non-store parts, causing the
2625 subsequent operation to reread any outside changes.'''
2625 subsequent operation to reread any outside changes.'''
2626 # extension should hook this to invalidate its caches
2626 # extension should hook this to invalidate its caches
2627 self.invalidate()
2627 self.invalidate()
2628 self.invalidatedirstate()
2628 self.invalidatedirstate()
2629
2629
2630 @unfilteredmethod
2630 @unfilteredmethod
2631 def _refreshfilecachestats(self, tr):
2631 def _refreshfilecachestats(self, tr):
2632 """Reload stats of cached files so that they are flagged as valid"""
2632 """Reload stats of cached files so that they are flagged as valid"""
2633 for k, ce in self._filecache.items():
2633 for k, ce in self._filecache.items():
2634 k = pycompat.sysstr(k)
2634 k = pycompat.sysstr(k)
2635 if k == 'dirstate' or k not in self.__dict__:
2635 if k == 'dirstate' or k not in self.__dict__:
2636 continue
2636 continue
2637 ce.refresh()
2637 ce.refresh()
2638
2638
2639 def _lock(
2639 def _lock(
2640 self,
2640 self,
2641 vfs,
2641 vfs,
2642 lockname,
2642 lockname,
2643 wait,
2643 wait,
2644 releasefn,
2644 releasefn,
2645 acquirefn,
2645 acquirefn,
2646 desc,
2646 desc,
2647 inheritchecker=None,
2647 inheritchecker=None,
2648 parentenvvar=None,
2648 parentenvvar=None,
2649 ):
2649 ):
2650 parentlock = None
2650 parentlock = None
2651 # the contents of parentenvvar are used by the underlying lock to
2651 # the contents of parentenvvar are used by the underlying lock to
2652 # determine whether it can be inherited
2652 # determine whether it can be inherited
2653 if parentenvvar is not None:
2653 if parentenvvar is not None:
2654 parentlock = encoding.environ.get(parentenvvar)
2654 parentlock = encoding.environ.get(parentenvvar)
2655
2655
2656 timeout = 0
2656 timeout = 0
2657 warntimeout = 0
2657 warntimeout = 0
2658 if wait:
2658 if wait:
2659 timeout = self.ui.configint(b"ui", b"timeout")
2659 timeout = self.ui.configint(b"ui", b"timeout")
2660 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2660 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2661 # internal config: ui.signal-safe-lock
2661 # internal config: ui.signal-safe-lock
2662 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2662 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2663
2663
2664 l = lockmod.trylock(
2664 l = lockmod.trylock(
2665 self.ui,
2665 self.ui,
2666 vfs,
2666 vfs,
2667 lockname,
2667 lockname,
2668 timeout,
2668 timeout,
2669 warntimeout,
2669 warntimeout,
2670 releasefn=releasefn,
2670 releasefn=releasefn,
2671 acquirefn=acquirefn,
2671 acquirefn=acquirefn,
2672 desc=desc,
2672 desc=desc,
2673 inheritchecker=inheritchecker,
2673 inheritchecker=inheritchecker,
2674 parentlock=parentlock,
2674 parentlock=parentlock,
2675 signalsafe=signalsafe,
2675 signalsafe=signalsafe,
2676 )
2676 )
2677 return l
2677 return l
2678
2678
2679 def _afterlock(self, callback):
2679 def _afterlock(self, callback):
2680 """add a callback to be run when the repository is fully unlocked
2680 """add a callback to be run when the repository is fully unlocked
2681
2681
2682 The callback will be executed when the outermost lock is released
2682 The callback will be executed when the outermost lock is released
2683 (with wlock being higher level than 'lock')."""
2683 (with wlock being higher level than 'lock')."""
2684 for ref in (self._wlockref, self._lockref):
2684 for ref in (self._wlockref, self._lockref):
2685 l = ref and ref()
2685 l = ref and ref()
2686 if l and l.held:
2686 if l and l.held:
2687 l.postrelease.append(callback)
2687 l.postrelease.append(callback)
2688 break
2688 break
2689 else: # no lock have been found.
2689 else: # no lock have been found.
2690 callback(True)
2690 callback(True)
2691
2691
2692 def lock(self, wait=True):
2692 def lock(self, wait=True):
2693 '''Lock the repository store (.hg/store) and return a weak reference
2693 '''Lock the repository store (.hg/store) and return a weak reference
2694 to the lock. Use this before modifying the store (e.g. committing or
2694 to the lock. Use this before modifying the store (e.g. committing or
2695 stripping). If you are opening a transaction, get a lock as well.)
2695 stripping). If you are opening a transaction, get a lock as well.)
2696
2696
2697 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2697 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2698 'wlock' first to avoid a dead-lock hazard.'''
2698 'wlock' first to avoid a dead-lock hazard.'''
2699 l = self._currentlock(self._lockref)
2699 l = self._currentlock(self._lockref)
2700 if l is not None:
2700 if l is not None:
2701 l.lock()
2701 l.lock()
2702 return l
2702 return l
2703
2703
2704 l = self._lock(
2704 l = self._lock(
2705 vfs=self.svfs,
2705 vfs=self.svfs,
2706 lockname=b"lock",
2706 lockname=b"lock",
2707 wait=wait,
2707 wait=wait,
2708 releasefn=None,
2708 releasefn=None,
2709 acquirefn=self.invalidate,
2709 acquirefn=self.invalidate,
2710 desc=_(b'repository %s') % self.origroot,
2710 desc=_(b'repository %s') % self.origroot,
2711 )
2711 )
2712 self._lockref = weakref.ref(l)
2712 self._lockref = weakref.ref(l)
2713 return l
2713 return l
2714
2714
2715 def _wlockchecktransaction(self):
2715 def _wlockchecktransaction(self):
2716 if self.currenttransaction() is not None:
2716 if self.currenttransaction() is not None:
2717 raise error.LockInheritanceContractViolation(
2717 raise error.LockInheritanceContractViolation(
2718 b'wlock cannot be inherited in the middle of a transaction'
2718 b'wlock cannot be inherited in the middle of a transaction'
2719 )
2719 )
2720
2720
2721 def wlock(self, wait=True):
2721 def wlock(self, wait=True):
2722 '''Lock the non-store parts of the repository (everything under
2722 '''Lock the non-store parts of the repository (everything under
2723 .hg except .hg/store) and return a weak reference to the lock.
2723 .hg except .hg/store) and return a weak reference to the lock.
2724
2724
2725 Use this before modifying files in .hg.
2725 Use this before modifying files in .hg.
2726
2726
2727 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2727 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2728 'wlock' first to avoid a dead-lock hazard.'''
2728 'wlock' first to avoid a dead-lock hazard.'''
2729 l = self._wlockref and self._wlockref()
2729 l = self._wlockref and self._wlockref()
2730 if l is not None and l.held:
2730 if l is not None and l.held:
2731 l.lock()
2731 l.lock()
2732 return l
2732 return l
2733
2733
2734 # We do not need to check for non-waiting lock acquisition. Such
2734 # We do not need to check for non-waiting lock acquisition. Such
2735 # acquisition would not cause dead-lock as they would just fail.
2735 # acquisition would not cause dead-lock as they would just fail.
2736 if wait and (
2736 if wait and (
2737 self.ui.configbool(b'devel', b'all-warnings')
2737 self.ui.configbool(b'devel', b'all-warnings')
2738 or self.ui.configbool(b'devel', b'check-locks')
2738 or self.ui.configbool(b'devel', b'check-locks')
2739 ):
2739 ):
2740 if self._currentlock(self._lockref) is not None:
2740 if self._currentlock(self._lockref) is not None:
2741 self.ui.develwarn(b'"wlock" acquired after "lock"')
2741 self.ui.develwarn(b'"wlock" acquired after "lock"')
2742
2742
2743 def unlock():
2743 def unlock():
2744 if self.dirstate.pendingparentchange():
2744 if self.dirstate.pendingparentchange():
2745 self.dirstate.invalidate()
2745 self.dirstate.invalidate()
2746 else:
2746 else:
2747 self.dirstate.write(None)
2747 self.dirstate.write(None)
2748
2748
2749 self._filecache[b'dirstate'].refresh()
2749 self._filecache[b'dirstate'].refresh()
2750
2750
2751 l = self._lock(
2751 l = self._lock(
2752 self.vfs,
2752 self.vfs,
2753 b"wlock",
2753 b"wlock",
2754 wait,
2754 wait,
2755 unlock,
2755 unlock,
2756 self.invalidatedirstate,
2756 self.invalidatedirstate,
2757 _(b'working directory of %s') % self.origroot,
2757 _(b'working directory of %s') % self.origroot,
2758 inheritchecker=self._wlockchecktransaction,
2758 inheritchecker=self._wlockchecktransaction,
2759 parentenvvar=b'HG_WLOCK_LOCKER',
2759 parentenvvar=b'HG_WLOCK_LOCKER',
2760 )
2760 )
2761 self._wlockref = weakref.ref(l)
2761 self._wlockref = weakref.ref(l)
2762 return l
2762 return l
2763
2763
2764 def _currentlock(self, lockref):
2764 def _currentlock(self, lockref):
2765 """Returns the lock if it's held, or None if it's not."""
2765 """Returns the lock if it's held, or None if it's not."""
2766 if lockref is None:
2766 if lockref is None:
2767 return None
2767 return None
2768 l = lockref()
2768 l = lockref()
2769 if l is None or not l.held:
2769 if l is None or not l.held:
2770 return None
2770 return None
2771 return l
2771 return l
2772
2772
2773 def currentwlock(self):
2773 def currentwlock(self):
2774 """Returns the wlock if it's held, or None if it's not."""
2774 """Returns the wlock if it's held, or None if it's not."""
2775 return self._currentlock(self._wlockref)
2775 return self._currentlock(self._wlockref)
2776
2776
2777 def _filecommit(
2777 def _filecommit(
2778 self,
2778 self,
2779 fctx,
2779 fctx,
2780 manifest1,
2780 manifest1,
2781 manifest2,
2781 manifest2,
2782 linkrev,
2782 linkrev,
2783 tr,
2783 tr,
2784 changelist,
2784 changelist,
2785 includecopymeta,
2785 includecopymeta,
2786 ):
2786 ):
2787 """
2787 """
2788 commit an individual file as part of a larger transaction
2788 commit an individual file as part of a larger transaction
2789
2789
2790 input:
2790 input:
2791
2791
2792 fctx: a file context with the content we are trying to commit
2792 fctx: a file context with the content we are trying to commit
2793 manifest1: manifest of changeset first parent
2793 manifest1: manifest of changeset first parent
2794 manifest2: manifest of changeset second parent
2794 manifest2: manifest of changeset second parent
2795 linkrev: revision number of the changeset being created
2795 linkrev: revision number of the changeset being created
2796 tr: current transation
2796 tr: current transation
2797 changelist: list of file being changed (modified inplace)
2797 changelist: list of file being changed (modified inplace)
2798 individual: boolean, set to False to skip storing the copy data
2798 individual: boolean, set to False to skip storing the copy data
2799 (only used by the Google specific feature of using
2799 (only used by the Google specific feature of using
2800 changeset extra as copy source of truth).
2800 changeset extra as copy source of truth).
2801
2801
2802 output:
2802 output:
2803
2803
2804 The resulting filenode
2804 The resulting filenode
2805 """
2805 """
2806
2806
2807 fname = fctx.path()
2807 fname = fctx.path()
2808 fparent1 = manifest1.get(fname, nullid)
2808 fparent1 = manifest1.get(fname, nullid)
2809 fparent2 = manifest2.get(fname, nullid)
2809 fparent2 = manifest2.get(fname, nullid)
2810 if isinstance(fctx, context.filectx):
2810 if isinstance(fctx, context.filectx):
2811 node = fctx.filenode()
2811 node = fctx.filenode()
2812 if node in [fparent1, fparent2]:
2812 if node in [fparent1, fparent2]:
2813 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2813 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2814 if (
2814 if (
2815 fparent1 != nullid
2815 fparent1 != nullid
2816 and manifest1.flags(fname) != fctx.flags()
2816 and manifest1.flags(fname) != fctx.flags()
2817 ) or (
2817 ) or (
2818 fparent2 != nullid
2818 fparent2 != nullid
2819 and manifest2.flags(fname) != fctx.flags()
2819 and manifest2.flags(fname) != fctx.flags()
2820 ):
2820 ):
2821 changelist.append(fname)
2821 changelist.append(fname)
2822 return node
2822 return node
2823
2823
2824 flog = self.file(fname)
2824 flog = self.file(fname)
2825 meta = {}
2825 meta = {}
2826 cfname = fctx.copysource()
2826 cfname = fctx.copysource()
2827 if cfname and cfname != fname:
2827 if cfname and cfname != fname:
2828 # Mark the new revision of this file as a copy of another
2828 # Mark the new revision of this file as a copy of another
2829 # file. This copy data will effectively act as a parent
2829 # file. This copy data will effectively act as a parent
2830 # of this new revision. If this is a merge, the first
2830 # of this new revision. If this is a merge, the first
2831 # parent will be the nullid (meaning "look up the copy data")
2831 # parent will be the nullid (meaning "look up the copy data")
2832 # and the second one will be the other parent. For example:
2832 # and the second one will be the other parent. For example:
2833 #
2833 #
2834 # 0 --- 1 --- 3 rev1 changes file foo
2834 # 0 --- 1 --- 3 rev1 changes file foo
2835 # \ / rev2 renames foo to bar and changes it
2835 # \ / rev2 renames foo to bar and changes it
2836 # \- 2 -/ rev3 should have bar with all changes and
2836 # \- 2 -/ rev3 should have bar with all changes and
2837 # should record that bar descends from
2837 # should record that bar descends from
2838 # bar in rev2 and foo in rev1
2838 # bar in rev2 and foo in rev1
2839 #
2839 #
2840 # this allows this merge to succeed:
2840 # this allows this merge to succeed:
2841 #
2841 #
2842 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2842 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2843 # \ / merging rev3 and rev4 should use bar@rev2
2843 # \ / merging rev3 and rev4 should use bar@rev2
2844 # \- 2 --- 4 as the merge base
2844 # \- 2 --- 4 as the merge base
2845 #
2845 #
2846
2846
2847 cnode = manifest1.get(cfname)
2847 cnode = manifest1.get(cfname)
2848 newfparent = fparent2
2848 newfparent = fparent2
2849
2849
2850 if manifest2: # branch merge
2850 if manifest2: # branch merge
2851 if fparent2 == nullid or cnode is None: # copied on remote side
2851 if fparent2 == nullid or cnode is None: # copied on remote side
2852 if cfname in manifest2:
2852 if cfname in manifest2:
2853 cnode = manifest2[cfname]
2853 cnode = manifest2[cfname]
2854 newfparent = fparent1
2854 newfparent = fparent1
2855
2855
2856 # Here, we used to search backwards through history to try to find
2856 # Here, we used to search backwards through history to try to find
2857 # where the file copy came from if the source of a copy was not in
2857 # where the file copy came from if the source of a copy was not in
2858 # the parent directory. However, this doesn't actually make sense to
2858 # the parent directory. However, this doesn't actually make sense to
2859 # do (what does a copy from something not in your working copy even
2859 # do (what does a copy from something not in your working copy even
2860 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2860 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2861 # the user that copy information was dropped, so if they didn't
2861 # the user that copy information was dropped, so if they didn't
2862 # expect this outcome it can be fixed, but this is the correct
2862 # expect this outcome it can be fixed, but this is the correct
2863 # behavior in this circumstance.
2863 # behavior in this circumstance.
2864
2864
2865 if cnode:
2865 if cnode:
2866 self.ui.debug(
2866 self.ui.debug(
2867 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2867 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2868 )
2868 )
2869 if includecopymeta:
2869 if includecopymeta:
2870 meta[b"copy"] = cfname
2870 meta[b"copy"] = cfname
2871 meta[b"copyrev"] = hex(cnode)
2871 meta[b"copyrev"] = hex(cnode)
2872 fparent1, fparent2 = nullid, newfparent
2872 fparent1, fparent2 = nullid, newfparent
2873 else:
2873 else:
2874 self.ui.warn(
2874 self.ui.warn(
2875 _(
2875 _(
2876 b"warning: can't find ancestor for '%s' "
2876 b"warning: can't find ancestor for '%s' "
2877 b"copied from '%s'!\n"
2877 b"copied from '%s'!\n"
2878 )
2878 )
2879 % (fname, cfname)
2879 % (fname, cfname)
2880 )
2880 )
2881
2881
2882 elif fparent1 == nullid:
2882 elif fparent1 == nullid:
2883 fparent1, fparent2 = fparent2, nullid
2883 fparent1, fparent2 = fparent2, nullid
2884 elif fparent2 != nullid:
2884 elif fparent2 != nullid:
2885 # is one parent an ancestor of the other?
2885 # is one parent an ancestor of the other?
2886 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2886 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2887 if fparent1 in fparentancestors:
2887 if fparent1 in fparentancestors:
2888 fparent1, fparent2 = fparent2, nullid
2888 fparent1, fparent2 = fparent2, nullid
2889 elif fparent2 in fparentancestors:
2889 elif fparent2 in fparentancestors:
2890 fparent2 = nullid
2890 fparent2 = nullid
2891 elif not fparentancestors:
2891 elif not fparentancestors:
2892 # TODO: this whole if-else might be simplified much more
2892 # TODO: this whole if-else might be simplified much more
2893 ms = mergestatemod.mergestate.read(self)
2893 ms = mergestatemod.mergestate.read(self)
2894 if (
2894 if (
2895 fname in ms
2895 fname in ms
2896 and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
2896 and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
2897 ):
2897 ):
2898 fparent1, fparent2 = fparent2, nullid
2898 fparent1, fparent2 = fparent2, nullid
2899
2899
2900 # is the file changed?
2900 # is the file changed?
2901 text = fctx.data()
2901 text = fctx.data()
2902 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2902 if fparent2 != nullid or meta or flog.cmp(fparent1, text):
2903 changelist.append(fname)
2903 changelist.append(fname)
2904 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2904 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2905 # are just the flags changed during merge?
2905 # are just the flags changed during merge?
2906 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2906 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2907 changelist.append(fname)
2907 changelist.append(fname)
2908
2908
2909 return fparent1
2909 return fparent1
2910
2910
2911 def checkcommitpatterns(self, wctx, match, status, fail):
2911 def checkcommitpatterns(self, wctx, match, status, fail):
2912 """check for commit arguments that aren't committable"""
2912 """check for commit arguments that aren't committable"""
2913 if match.isexact() or match.prefix():
2913 if match.isexact() or match.prefix():
2914 matched = set(status.modified + status.added + status.removed)
2914 matched = set(status.modified + status.added + status.removed)
2915
2915
2916 for f in match.files():
2916 for f in match.files():
2917 f = self.dirstate.normalize(f)
2917 f = self.dirstate.normalize(f)
2918 if f == b'.' or f in matched or f in wctx.substate:
2918 if f == b'.' or f in matched or f in wctx.substate:
2919 continue
2919 continue
2920 if f in status.deleted:
2920 if f in status.deleted:
2921 fail(f, _(b'file not found!'))
2921 fail(f, _(b'file not found!'))
2922 # Is it a directory that exists or used to exist?
2922 # Is it a directory that exists or used to exist?
2923 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2923 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2924 d = f + b'/'
2924 d = f + b'/'
2925 for mf in matched:
2925 for mf in matched:
2926 if mf.startswith(d):
2926 if mf.startswith(d):
2927 break
2927 break
2928 else:
2928 else:
2929 fail(f, _(b"no match under directory!"))
2929 fail(f, _(b"no match under directory!"))
2930 elif f not in self.dirstate:
2930 elif f not in self.dirstate:
2931 fail(f, _(b"file not tracked!"))
2931 fail(f, _(b"file not tracked!"))
2932
2932
2933 @unfilteredmethod
2933 @unfilteredmethod
2934 def commit(
2934 def commit(
2935 self,
2935 self,
2936 text=b"",
2936 text=b"",
2937 user=None,
2937 user=None,
2938 date=None,
2938 date=None,
2939 match=None,
2939 match=None,
2940 force=False,
2940 force=False,
2941 editor=None,
2941 editor=None,
2942 extra=None,
2942 extra=None,
2943 ):
2943 ):
2944 """Add a new revision to current repository.
2944 """Add a new revision to current repository.
2945
2945
2946 Revision information is gathered from the working directory,
2946 Revision information is gathered from the working directory,
2947 match can be used to filter the committed files. If editor is
2947 match can be used to filter the committed files. If editor is
2948 supplied, it is called to get a commit message.
2948 supplied, it is called to get a commit message.
2949 """
2949 """
2950 if extra is None:
2950 if extra is None:
2951 extra = {}
2951 extra = {}
2952
2952
2953 def fail(f, msg):
2953 def fail(f, msg):
2954 raise error.Abort(b'%s: %s' % (f, msg))
2954 raise error.Abort(b'%s: %s' % (f, msg))
2955
2955
2956 if not match:
2956 if not match:
2957 match = matchmod.always()
2957 match = matchmod.always()
2958
2958
2959 if not force:
2959 if not force:
2960 match.bad = fail
2960 match.bad = fail
2961
2961
2962 # lock() for recent changelog (see issue4368)
2962 # lock() for recent changelog (see issue4368)
2963 with self.wlock(), self.lock():
2963 with self.wlock(), self.lock():
2964 wctx = self[None]
2964 wctx = self[None]
2965 merge = len(wctx.parents()) > 1
2965 merge = len(wctx.parents()) > 1
2966
2966
2967 if not force and merge and not match.always():
2967 if not force and merge and not match.always():
2968 raise error.Abort(
2968 raise error.Abort(
2969 _(
2969 _(
2970 b'cannot partially commit a merge '
2970 b'cannot partially commit a merge '
2971 b'(do not specify files or patterns)'
2971 b'(do not specify files or patterns)'
2972 )
2972 )
2973 )
2973 )
2974
2974
2975 status = self.status(match=match, clean=force)
2975 status = self.status(match=match, clean=force)
2976 if force:
2976 if force:
2977 status.modified.extend(
2977 status.modified.extend(
2978 status.clean
2978 status.clean
2979 ) # mq may commit clean files
2979 ) # mq may commit clean files
2980
2980
2981 # check subrepos
2981 # check subrepos
2982 subs, commitsubs, newstate = subrepoutil.precommit(
2982 subs, commitsubs, newstate = subrepoutil.precommit(
2983 self.ui, wctx, status, match, force=force
2983 self.ui, wctx, status, match, force=force
2984 )
2984 )
2985
2985
2986 # make sure all explicit patterns are matched
2986 # make sure all explicit patterns are matched
2987 if not force:
2987 if not force:
2988 self.checkcommitpatterns(wctx, match, status, fail)
2988 self.checkcommitpatterns(wctx, match, status, fail)
2989
2989
2990 cctx = context.workingcommitctx(
2990 cctx = context.workingcommitctx(
2991 self, status, text, user, date, extra
2991 self, status, text, user, date, extra
2992 )
2992 )
2993
2993
2994 ms = mergestatemod.mergestate.read(self)
2994 ms = mergestatemod.mergestate.read(self)
2995 mergeutil.checkunresolved(ms)
2995 mergeutil.checkunresolved(ms)
2996
2996
2997 # internal config: ui.allowemptycommit
2997 # internal config: ui.allowemptycommit
2998 allowemptycommit = (
2998 allowemptycommit = (
2999 wctx.branch() != wctx.p1().branch()
2999 wctx.branch() != wctx.p1().branch()
3000 or extra.get(b'close')
3000 or extra.get(b'close')
3001 or merge
3001 or merge
3002 or cctx.files()
3002 or cctx.files()
3003 or self.ui.configbool(b'ui', b'allowemptycommit')
3003 or self.ui.configbool(b'ui', b'allowemptycommit')
3004 )
3004 )
3005 if not allowemptycommit:
3005 if not allowemptycommit:
3006 self.ui.debug(b'nothing to commit, clearing merge state\n')
3006 self.ui.debug(b'nothing to commit, clearing merge state\n')
3007 ms.reset()
3007 ms.reset()
3008 return None
3008 return None
3009
3009
3010 if merge and cctx.deleted():
3010 if merge and cctx.deleted():
3011 raise error.Abort(_(b"cannot commit merge with missing files"))
3011 raise error.Abort(_(b"cannot commit merge with missing files"))
3012
3012
3013 if editor:
3013 if editor:
3014 cctx._text = editor(self, cctx, subs)
3014 cctx._text = editor(self, cctx, subs)
3015 edited = text != cctx._text
3015 edited = text != cctx._text
3016
3016
3017 # Save commit message in case this transaction gets rolled back
3017 # Save commit message in case this transaction gets rolled back
3018 # (e.g. by a pretxncommit hook). Leave the content alone on
3018 # (e.g. by a pretxncommit hook). Leave the content alone on
3019 # the assumption that the user will use the same editor again.
3019 # the assumption that the user will use the same editor again.
3020 msgfn = self.savecommitmessage(cctx._text)
3020 msgfn = self.savecommitmessage(cctx._text)
3021
3021
3022 # commit subs and write new state
3022 # commit subs and write new state
3023 if subs:
3023 if subs:
3024 uipathfn = scmutil.getuipathfn(self)
3024 uipathfn = scmutil.getuipathfn(self)
3025 for s in sorted(commitsubs):
3025 for s in sorted(commitsubs):
3026 sub = wctx.sub(s)
3026 sub = wctx.sub(s)
3027 self.ui.status(
3027 self.ui.status(
3028 _(b'committing subrepository %s\n')
3028 _(b'committing subrepository %s\n')
3029 % uipathfn(subrepoutil.subrelpath(sub))
3029 % uipathfn(subrepoutil.subrelpath(sub))
3030 )
3030 )
3031 sr = sub.commit(cctx._text, user, date)
3031 sr = sub.commit(cctx._text, user, date)
3032 newstate[s] = (newstate[s][0], sr)
3032 newstate[s] = (newstate[s][0], sr)
3033 subrepoutil.writestate(self, newstate)
3033 subrepoutil.writestate(self, newstate)
3034
3034
3035 p1, p2 = self.dirstate.parents()
3035 p1, p2 = self.dirstate.parents()
3036 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3036 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3037 try:
3037 try:
3038 self.hook(
3038 self.hook(
3039 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3039 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3040 )
3040 )
3041 with self.transaction(b'commit'):
3041 with self.transaction(b'commit'):
3042 ret = self.commitctx(cctx, True)
3042 ret = self.commitctx(cctx, True)
3043 # update bookmarks, dirstate and mergestate
3043 # update bookmarks, dirstate and mergestate
3044 bookmarks.update(self, [p1, p2], ret)
3044 bookmarks.update(self, [p1, p2], ret)
3045 cctx.markcommitted(ret)
3045 cctx.markcommitted(ret)
3046 ms.reset()
3046 ms.reset()
3047 except: # re-raises
3047 except: # re-raises
3048 if edited:
3048 if edited:
3049 self.ui.write(
3049 self.ui.write(
3050 _(b'note: commit message saved in %s\n') % msgfn
3050 _(b'note: commit message saved in %s\n') % msgfn
3051 )
3051 )
3052 self.ui.write(
3052 self.ui.write(
3053 _(
3053 _(
3054 b"note: use 'hg commit --logfile "
3054 b"note: use 'hg commit --logfile "
3055 b".hg/last-message.txt --edit' to reuse it\n"
3055 b".hg/last-message.txt --edit' to reuse it\n"
3056 )
3056 )
3057 )
3057 )
3058 raise
3058 raise
3059
3059
3060 def commithook(unused_success):
3060 def commithook(unused_success):
3061 # hack for command that use a temporary commit (eg: histedit)
3061 # hack for command that use a temporary commit (eg: histedit)
3062 # temporary commit got stripped before hook release
3062 # temporary commit got stripped before hook release
3063 if self.changelog.hasnode(ret):
3063 if self.changelog.hasnode(ret):
3064 self.hook(
3064 self.hook(
3065 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3065 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3066 )
3066 )
3067
3067
3068 self._afterlock(commithook)
3068 self._afterlock(commithook)
3069 return ret
3069 return ret
3070
3070
3071 @unfilteredmethod
3071 @unfilteredmethod
3072 def commitctx(self, ctx, error=False, origctx=None):
3072 def commitctx(self, ctx, error=False, origctx=None):
3073 """Add a new revision to current repository.
3073 """Add a new revision to current repository.
3074 Revision information is passed via the context argument.
3074 Revision information is passed via the context argument.
3075
3075
3076 ctx.files() should list all files involved in this commit, i.e.
3076 ctx.files() should list all files involved in this commit, i.e.
3077 modified/added/removed files. On merge, it may be wider than the
3077 modified/added/removed files. On merge, it may be wider than the
3078 ctx.files() to be committed, since any file nodes derived directly
3078 ctx.files() to be committed, since any file nodes derived directly
3079 from p1 or p2 are excluded from the committed ctx.files().
3079 from p1 or p2 are excluded from the committed ctx.files().
3080
3080
3081 origctx is for convert to work around the problem that bug
3081 origctx is for convert to work around the problem that bug
3082 fixes to the files list in changesets change hashes. For
3082 fixes to the files list in changesets change hashes. For
3083 convert to be the identity, it can pass an origctx and this
3083 convert to be the identity, it can pass an origctx and this
3084 function will use the same files list when it makes sense to
3084 function will use the same files list when it makes sense to
3085 do so.
3085 do so.
3086 """
3086 """
3087
3087
3088 p1, p2 = ctx.p1(), ctx.p2()
3088 p1, p2 = ctx.p1(), ctx.p2()
3089 user = ctx.user()
3089 user = ctx.user()
3090
3090
3091 if self.filecopiesmode == b'changeset-sidedata':
3091 if self.filecopiesmode == b'changeset-sidedata':
3092 writechangesetcopy = True
3092 writechangesetcopy = True
3093 writefilecopymeta = True
3093 writefilecopymeta = True
3094 writecopiesto = None
3094 writecopiesto = None
3095 else:
3095 else:
3096 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3096 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3097 writefilecopymeta = writecopiesto != b'changeset-only'
3097 writefilecopymeta = writecopiesto != b'changeset-only'
3098 writechangesetcopy = writecopiesto in (
3098 writechangesetcopy = writecopiesto in (
3099 b'changeset-only',
3099 b'changeset-only',
3100 b'compatibility',
3100 b'compatibility',
3101 )
3101 )
3102 p1copies, p2copies = None, None
3102 p1copies, p2copies = None, None
3103 if writechangesetcopy:
3103 if writechangesetcopy:
3104 p1copies = ctx.p1copies()
3104 p1copies = ctx.p1copies()
3105 p2copies = ctx.p2copies()
3105 p2copies = ctx.p2copies()
3106 filesadded, filesremoved = None, None
3106 filesadded, filesremoved = None, None
3107 with self.lock(), self.transaction(b"commit") as tr:
3107 with self.lock(), self.transaction(b"commit") as tr:
3108 trp = weakref.proxy(tr)
3108 trp = weakref.proxy(tr)
3109
3109
3110 if ctx.manifestnode():
3110 if ctx.manifestnode():
3111 # reuse an existing manifest revision
3111 # reuse an existing manifest revision
3112 self.ui.debug(b'reusing known manifest\n')
3112 self.ui.debug(b'reusing known manifest\n')
3113 mn = ctx.manifestnode()
3113 mn = ctx.manifestnode()
3114 files = ctx.files()
3114 files = ctx.files()
3115 if writechangesetcopy:
3115 if writechangesetcopy:
3116 filesadded = ctx.filesadded()
3116 filesadded = ctx.filesadded()
3117 filesremoved = ctx.filesremoved()
3117 filesremoved = ctx.filesremoved()
3118 elif ctx.files():
3118 elif ctx.files():
3119 m1ctx = p1.manifestctx()
3119 m1ctx = p1.manifestctx()
3120 m2ctx = p2.manifestctx()
3120 m2ctx = p2.manifestctx()
3121 mctx = m1ctx.copy()
3121 mctx = m1ctx.copy()
3122
3122
3123 m = mctx.read()
3123 m = mctx.read()
3124 m1 = m1ctx.read()
3124 m1 = m1ctx.read()
3125 m2 = m2ctx.read()
3125 m2 = m2ctx.read()
3126
3126
3127 # check in files
3127 # check in files
3128 added = []
3128 added = []
3129 changed = []
3129 changed = []
3130 removed = list(ctx.removed())
3130 removed = list(ctx.removed())
3131 linkrev = len(self)
3131 linkrev = len(self)
3132 self.ui.note(_(b"committing files:\n"))
3132 self.ui.note(_(b"committing files:\n"))
3133 uipathfn = scmutil.getuipathfn(self)
3133 uipathfn = scmutil.getuipathfn(self)
3134 for f in sorted(ctx.modified() + ctx.added()):
3134 for f in sorted(ctx.modified() + ctx.added()):
3135 self.ui.note(uipathfn(f) + b"\n")
3135 self.ui.note(uipathfn(f) + b"\n")
3136 try:
3136 try:
3137 fctx = ctx[f]
3137 fctx = ctx[f]
3138 if fctx is None:
3138 if fctx is None:
3139 removed.append(f)
3139 removed.append(f)
3140 else:
3140 else:
3141 added.append(f)
3141 added.append(f)
3142 m[f] = self._filecommit(
3142 m[f] = self._filecommit(
3143 fctx,
3143 fctx,
3144 m1,
3144 m1,
3145 m2,
3145 m2,
3146 linkrev,
3146 linkrev,
3147 trp,
3147 trp,
3148 changed,
3148 changed,
3149 writefilecopymeta,
3149 writefilecopymeta,
3150 )
3150 )
3151 m.setflag(f, fctx.flags())
3151 m.setflag(f, fctx.flags())
3152 except OSError:
3152 except OSError:
3153 self.ui.warn(
3153 self.ui.warn(
3154 _(b"trouble committing %s!\n") % uipathfn(f)
3154 _(b"trouble committing %s!\n") % uipathfn(f)
3155 )
3155 )
3156 raise
3156 raise
3157 except IOError as inst:
3157 except IOError as inst:
3158 errcode = getattr(inst, 'errno', errno.ENOENT)
3158 errcode = getattr(inst, 'errno', errno.ENOENT)
3159 if error or errcode and errcode != errno.ENOENT:
3159 if error or errcode and errcode != errno.ENOENT:
3160 self.ui.warn(
3160 self.ui.warn(
3161 _(b"trouble committing %s!\n") % uipathfn(f)
3161 _(b"trouble committing %s!\n") % uipathfn(f)
3162 )
3162 )
3163 raise
3163 raise
3164
3164
3165 # update manifest
3165 # update manifest
3166 removed = [f for f in removed if f in m1 or f in m2]
3166 removed = [f for f in removed if f in m1 or f in m2]
3167 drop = sorted([f for f in removed if f in m])
3167 drop = sorted([f for f in removed if f in m])
3168 for f in drop:
3168 for f in drop:
3169 del m[f]
3169 del m[f]
3170 if p2.rev() != nullrev:
3170 if p2.rev() != nullrev:
3171 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
3171 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
3172 removed = [f for f in removed if not rf(f)]
3172 removed = [f for f in removed if not rf(f)]
3173
3173
3174 files = changed + removed
3174 files = changed + removed
3175 md = None
3175 md = None
3176 if not files:
3176 if not files:
3177 # if no "files" actually changed in terms of the changelog,
3177 # if no "files" actually changed in terms of the changelog,
3178 # try hard to detect unmodified manifest entry so that the
3178 # try hard to detect unmodified manifest entry so that the
3179 # exact same commit can be reproduced later on convert.
3179 # exact same commit can be reproduced later on convert.
3180 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3180 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3181 if not files and md:
3181 if not files and md:
3182 self.ui.debug(
3182 self.ui.debug(
3183 b'not reusing manifest (no file change in '
3183 b'not reusing manifest (no file change in '
3184 b'changelog, but manifest differs)\n'
3184 b'changelog, but manifest differs)\n'
3185 )
3185 )
3186 if files or md:
3186 if files or md:
3187 self.ui.note(_(b"committing manifest\n"))
3187 self.ui.note(_(b"committing manifest\n"))
3188 # we're using narrowmatch here since it's already applied at
3188 # we're using narrowmatch here since it's already applied at
3189 # other stages (such as dirstate.walk), so we're already
3189 # other stages (such as dirstate.walk), so we're already
3190 # ignoring things outside of narrowspec in most cases. The
3190 # ignoring things outside of narrowspec in most cases. The
3191 # one case where we might have files outside the narrowspec
3191 # one case where we might have files outside the narrowspec
3192 # at this point is merges, and we already error out in the
3192 # at this point is merges, and we already error out in the
3193 # case where the merge has files outside of the narrowspec,
3193 # case where the merge has files outside of the narrowspec,
3194 # so this is safe.
3194 # so this is safe.
3195 mn = mctx.write(
3195 mn = mctx.write(
3196 trp,
3196 trp,
3197 linkrev,
3197 linkrev,
3198 p1.manifestnode(),
3198 p1.manifestnode(),
3199 p2.manifestnode(),
3199 p2.manifestnode(),
3200 added,
3200 added,
3201 drop,
3201 drop,
3202 match=self.narrowmatch(),
3202 match=self.narrowmatch(),
3203 )
3203 )
3204
3204
3205 if writechangesetcopy:
3205 if writechangesetcopy:
3206 filesadded = [
3206 filesadded = [
3207 f for f in changed if not (f in m1 or f in m2)
3207 f for f in changed if not (f in m1 or f in m2)
3208 ]
3208 ]
3209 filesremoved = removed
3209 filesremoved = removed
3210 else:
3210 else:
3211 self.ui.debug(
3211 self.ui.debug(
3212 b'reusing manifest from p1 (listed files '
3212 b'reusing manifest from p1 (listed files '
3213 b'actually unchanged)\n'
3213 b'actually unchanged)\n'
3214 )
3214 )
3215 mn = p1.manifestnode()
3215 mn = p1.manifestnode()
3216 else:
3216 else:
3217 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3217 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3218 mn = p1.manifestnode()
3218 mn = p1.manifestnode()
3219 files = []
3219 files = []
3220
3220
3221 if writecopiesto == b'changeset-only':
3221 if writecopiesto == b'changeset-only':
3222 # If writing only to changeset extras, use None to indicate that
3222 # If writing only to changeset extras, use None to indicate that
3223 # no entry should be written. If writing to both, write an empty
3223 # no entry should be written. If writing to both, write an empty
3224 # entry to prevent the reader from falling back to reading
3224 # entry to prevent the reader from falling back to reading
3225 # filelogs.
3225 # filelogs.
3226 p1copies = p1copies or None
3226 p1copies = p1copies or None
3227 p2copies = p2copies or None
3227 p2copies = p2copies or None
3228 filesadded = filesadded or None
3228 filesadded = filesadded or None
3229 filesremoved = filesremoved or None
3229 filesremoved = filesremoved or None
3230
3230
3231 if origctx and origctx.manifestnode() == mn:
3231 if origctx and origctx.manifestnode() == mn:
3232 files = origctx.files()
3232 files = origctx.files()
3233
3233
3234 # update changelog
3234 # update changelog
3235 self.ui.note(_(b"committing changelog\n"))
3235 self.ui.note(_(b"committing changelog\n"))
3236 self.changelog.delayupdate(tr)
3236 self.changelog.delayupdate(tr)
3237 n = self.changelog.add(
3237 n = self.changelog.add(
3238 mn,
3238 mn,
3239 files,
3239 files,
3240 ctx.description(),
3240 ctx.description(),
3241 trp,
3241 trp,
3242 p1.node(),
3242 p1.node(),
3243 p2.node(),
3243 p2.node(),
3244 user,
3244 user,
3245 ctx.date(),
3245 ctx.date(),
3246 ctx.extra().copy(),
3246 ctx.extra().copy(),
3247 p1copies,
3247 p1copies,
3248 p2copies,
3248 p2copies,
3249 filesadded,
3249 filesadded,
3250 filesremoved,
3250 filesremoved,
3251 )
3251 )
3252 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3252 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3253 self.hook(
3253 self.hook(
3254 b'pretxncommit',
3254 b'pretxncommit',
3255 throw=True,
3255 throw=True,
3256 node=hex(n),
3256 node=hex(n),
3257 parent1=xp1,
3257 parent1=xp1,
3258 parent2=xp2,
3258 parent2=xp2,
3259 )
3259 )
3260 # set the new commit is proper phase
3260 # set the new commit is proper phase
3261 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3261 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3262 if targetphase:
3262 if targetphase:
3263 # retract boundary do not alter parent changeset.
3263 # retract boundary do not alter parent changeset.
3264 # if a parent have higher the resulting phase will
3264 # if a parent have higher the resulting phase will
3265 # be compliant anyway
3265 # be compliant anyway
3266 #
3266 #
3267 # if minimal phase was 0 we don't need to retract anything
3267 # if minimal phase was 0 we don't need to retract anything
3268 phases.registernew(self, tr, targetphase, [n])
3268 phases.registernew(self, tr, targetphase, [n])
3269 return n
3269 return n
3270
3270
3271 @unfilteredmethod
3271 @unfilteredmethod
3272 def destroying(self):
3272 def destroying(self):
3273 '''Inform the repository that nodes are about to be destroyed.
3273 '''Inform the repository that nodes are about to be destroyed.
3274 Intended for use by strip and rollback, so there's a common
3274 Intended for use by strip and rollback, so there's a common
3275 place for anything that has to be done before destroying history.
3275 place for anything that has to be done before destroying history.
3276
3276
3277 This is mostly useful for saving state that is in memory and waiting
3277 This is mostly useful for saving state that is in memory and waiting
3278 to be flushed when the current lock is released. Because a call to
3278 to be flushed when the current lock is released. Because a call to
3279 destroyed is imminent, the repo will be invalidated causing those
3279 destroyed is imminent, the repo will be invalidated causing those
3280 changes to stay in memory (waiting for the next unlock), or vanish
3280 changes to stay in memory (waiting for the next unlock), or vanish
3281 completely.
3281 completely.
3282 '''
3282 '''
3283 # When using the same lock to commit and strip, the phasecache is left
3283 # When using the same lock to commit and strip, the phasecache is left
3284 # dirty after committing. Then when we strip, the repo is invalidated,
3284 # dirty after committing. Then when we strip, the repo is invalidated,
3285 # causing those changes to disappear.
3285 # causing those changes to disappear.
3286 if '_phasecache' in vars(self):
3286 if '_phasecache' in vars(self):
3287 self._phasecache.write()
3287 self._phasecache.write()
3288
3288
3289 @unfilteredmethod
3289 @unfilteredmethod
3290 def destroyed(self):
3290 def destroyed(self):
3291 '''Inform the repository that nodes have been destroyed.
3291 '''Inform the repository that nodes have been destroyed.
3292 Intended for use by strip and rollback, so there's a common
3292 Intended for use by strip and rollback, so there's a common
3293 place for anything that has to be done after destroying history.
3293 place for anything that has to be done after destroying history.
3294 '''
3294 '''
3295 # When one tries to:
3295 # When one tries to:
3296 # 1) destroy nodes thus calling this method (e.g. strip)
3296 # 1) destroy nodes thus calling this method (e.g. strip)
3297 # 2) use phasecache somewhere (e.g. commit)
3297 # 2) use phasecache somewhere (e.g. commit)
3298 #
3298 #
3299 # then 2) will fail because the phasecache contains nodes that were
3299 # then 2) will fail because the phasecache contains nodes that were
3300 # removed. We can either remove phasecache from the filecache,
3300 # removed. We can either remove phasecache from the filecache,
3301 # causing it to reload next time it is accessed, or simply filter
3301 # causing it to reload next time it is accessed, or simply filter
3302 # the removed nodes now and write the updated cache.
3302 # the removed nodes now and write the updated cache.
3303 self._phasecache.filterunknown(self)
3303 self._phasecache.filterunknown(self)
3304 self._phasecache.write()
3304 self._phasecache.write()
3305
3305
3306 # refresh all repository caches
3306 # refresh all repository caches
3307 self.updatecaches()
3307 self.updatecaches()
3308
3308
3309 # Ensure the persistent tag cache is updated. Doing it now
3309 # Ensure the persistent tag cache is updated. Doing it now
3310 # means that the tag cache only has to worry about destroyed
3310 # means that the tag cache only has to worry about destroyed
3311 # heads immediately after a strip/rollback. That in turn
3311 # heads immediately after a strip/rollback. That in turn
3312 # guarantees that "cachetip == currenttip" (comparing both rev
3312 # guarantees that "cachetip == currenttip" (comparing both rev
3313 # and node) always means no nodes have been added or destroyed.
3313 # and node) always means no nodes have been added or destroyed.
3314
3314
3315 # XXX this is suboptimal when qrefresh'ing: we strip the current
3315 # XXX this is suboptimal when qrefresh'ing: we strip the current
3316 # head, refresh the tag cache, then immediately add a new head.
3316 # head, refresh the tag cache, then immediately add a new head.
3317 # But I think doing it this way is necessary for the "instant
3317 # But I think doing it this way is necessary for the "instant
3318 # tag cache retrieval" case to work.
3318 # tag cache retrieval" case to work.
3319 self.invalidate()
3319 self.invalidate()
3320
3320
3321 def status(
3321 def status(
3322 self,
3322 self,
3323 node1=b'.',
3323 node1=b'.',
3324 node2=None,
3324 node2=None,
3325 match=None,
3325 match=None,
3326 ignored=False,
3326 ignored=False,
3327 clean=False,
3327 clean=False,
3328 unknown=False,
3328 unknown=False,
3329 listsubrepos=False,
3329 listsubrepos=False,
3330 ):
3330 ):
3331 '''a convenience method that calls node1.status(node2)'''
3331 '''a convenience method that calls node1.status(node2)'''
3332 return self[node1].status(
3332 return self[node1].status(
3333 node2, match, ignored, clean, unknown, listsubrepos
3333 node2, match, ignored, clean, unknown, listsubrepos
3334 )
3334 )
3335
3335
3336 def addpostdsstatus(self, ps):
3336 def addpostdsstatus(self, ps):
3337 """Add a callback to run within the wlock, at the point at which status
3337 """Add a callback to run within the wlock, at the point at which status
3338 fixups happen.
3338 fixups happen.
3339
3339
3340 On status completion, callback(wctx, status) will be called with the
3340 On status completion, callback(wctx, status) will be called with the
3341 wlock held, unless the dirstate has changed from underneath or the wlock
3341 wlock held, unless the dirstate has changed from underneath or the wlock
3342 couldn't be grabbed.
3342 couldn't be grabbed.
3343
3343
3344 Callbacks should not capture and use a cached copy of the dirstate --
3344 Callbacks should not capture and use a cached copy of the dirstate --
3345 it might change in the meanwhile. Instead, they should access the
3345 it might change in the meanwhile. Instead, they should access the
3346 dirstate via wctx.repo().dirstate.
3346 dirstate via wctx.repo().dirstate.
3347
3347
3348 This list is emptied out after each status run -- extensions should
3348 This list is emptied out after each status run -- extensions should
3349 make sure it adds to this list each time dirstate.status is called.
3349 make sure it adds to this list each time dirstate.status is called.
3350 Extensions should also make sure they don't call this for statuses
3350 Extensions should also make sure they don't call this for statuses
3351 that don't involve the dirstate.
3351 that don't involve the dirstate.
3352 """
3352 """
3353
3353
3354 # The list is located here for uniqueness reasons -- it is actually
3354 # The list is located here for uniqueness reasons -- it is actually
3355 # managed by the workingctx, but that isn't unique per-repo.
3355 # managed by the workingctx, but that isn't unique per-repo.
3356 self._postdsstatus.append(ps)
3356 self._postdsstatus.append(ps)
3357
3357
3358 def postdsstatus(self):
3358 def postdsstatus(self):
3359 """Used by workingctx to get the list of post-dirstate-status hooks."""
3359 """Used by workingctx to get the list of post-dirstate-status hooks."""
3360 return self._postdsstatus
3360 return self._postdsstatus
3361
3361
3362 def clearpostdsstatus(self):
3362 def clearpostdsstatus(self):
3363 """Used by workingctx to clear post-dirstate-status hooks."""
3363 """Used by workingctx to clear post-dirstate-status hooks."""
3364 del self._postdsstatus[:]
3364 del self._postdsstatus[:]
3365
3365
3366 def heads(self, start=None):
3366 def heads(self, start=None):
3367 if start is None:
3367 if start is None:
3368 cl = self.changelog
3368 cl = self.changelog
3369 headrevs = reversed(cl.headrevs())
3369 headrevs = reversed(cl.headrevs())
3370 return [cl.node(rev) for rev in headrevs]
3370 return [cl.node(rev) for rev in headrevs]
3371
3371
3372 heads = self.changelog.heads(start)
3372 heads = self.changelog.heads(start)
3373 # sort the output in rev descending order
3373 # sort the output in rev descending order
3374 return sorted(heads, key=self.changelog.rev, reverse=True)
3374 return sorted(heads, key=self.changelog.rev, reverse=True)
3375
3375
3376 def branchheads(self, branch=None, start=None, closed=False):
3376 def branchheads(self, branch=None, start=None, closed=False):
3377 '''return a (possibly filtered) list of heads for the given branch
3377 '''return a (possibly filtered) list of heads for the given branch
3378
3378
3379 Heads are returned in topological order, from newest to oldest.
3379 Heads are returned in topological order, from newest to oldest.
3380 If branch is None, use the dirstate branch.
3380 If branch is None, use the dirstate branch.
3381 If start is not None, return only heads reachable from start.
3381 If start is not None, return only heads reachable from start.
3382 If closed is True, return heads that are marked as closed as well.
3382 If closed is True, return heads that are marked as closed as well.
3383 '''
3383 '''
3384 if branch is None:
3384 if branch is None:
3385 branch = self[None].branch()
3385 branch = self[None].branch()
3386 branches = self.branchmap()
3386 branches = self.branchmap()
3387 if not branches.hasbranch(branch):
3387 if not branches.hasbranch(branch):
3388 return []
3388 return []
3389 # the cache returns heads ordered lowest to highest
3389 # the cache returns heads ordered lowest to highest
3390 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3390 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3391 if start is not None:
3391 if start is not None:
3392 # filter out the heads that cannot be reached from startrev
3392 # filter out the heads that cannot be reached from startrev
3393 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3393 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3394 bheads = [h for h in bheads if h in fbheads]
3394 bheads = [h for h in bheads if h in fbheads]
3395 return bheads
3395 return bheads
3396
3396
3397 def branches(self, nodes):
3397 def branches(self, nodes):
3398 if not nodes:
3398 if not nodes:
3399 nodes = [self.changelog.tip()]
3399 nodes = [self.changelog.tip()]
3400 b = []
3400 b = []
3401 for n in nodes:
3401 for n in nodes:
3402 t = n
3402 t = n
3403 while True:
3403 while True:
3404 p = self.changelog.parents(n)
3404 p = self.changelog.parents(n)
3405 if p[1] != nullid or p[0] == nullid:
3405 if p[1] != nullid or p[0] == nullid:
3406 b.append((t, n, p[0], p[1]))
3406 b.append((t, n, p[0], p[1]))
3407 break
3407 break
3408 n = p[0]
3408 n = p[0]
3409 return b
3409 return b
3410
3410
3411 def between(self, pairs):
3411 def between(self, pairs):
3412 r = []
3412 r = []
3413
3413
3414 for top, bottom in pairs:
3414 for top, bottom in pairs:
3415 n, l, i = top, [], 0
3415 n, l, i = top, [], 0
3416 f = 1
3416 f = 1
3417
3417
3418 while n != bottom and n != nullid:
3418 while n != bottom and n != nullid:
3419 p = self.changelog.parents(n)[0]
3419 p = self.changelog.parents(n)[0]
3420 if i == f:
3420 if i == f:
3421 l.append(n)
3421 l.append(n)
3422 f = f * 2
3422 f = f * 2
3423 n = p
3423 n = p
3424 i += 1
3424 i += 1
3425
3425
3426 r.append(l)
3426 r.append(l)
3427
3427
3428 return r
3428 return r
3429
3429
3430 def checkpush(self, pushop):
3430 def checkpush(self, pushop):
3431 """Extensions can override this function if additional checks have
3431 """Extensions can override this function if additional checks have
3432 to be performed before pushing, or call it if they override push
3432 to be performed before pushing, or call it if they override push
3433 command.
3433 command.
3434 """
3434 """
3435
3435
3436 @unfilteredpropertycache
3436 @unfilteredpropertycache
3437 def prepushoutgoinghooks(self):
3437 def prepushoutgoinghooks(self):
3438 """Return util.hooks consists of a pushop with repo, remote, outgoing
3438 """Return util.hooks consists of a pushop with repo, remote, outgoing
3439 methods, which are called before pushing changesets.
3439 methods, which are called before pushing changesets.
3440 """
3440 """
3441 return util.hooks()
3441 return util.hooks()
3442
3442
3443 def pushkey(self, namespace, key, old, new):
3443 def pushkey(self, namespace, key, old, new):
3444 try:
3444 try:
3445 tr = self.currenttransaction()
3445 tr = self.currenttransaction()
3446 hookargs = {}
3446 hookargs = {}
3447 if tr is not None:
3447 if tr is not None:
3448 hookargs.update(tr.hookargs)
3448 hookargs.update(tr.hookargs)
3449 hookargs = pycompat.strkwargs(hookargs)
3449 hookargs = pycompat.strkwargs(hookargs)
3450 hookargs['namespace'] = namespace
3450 hookargs['namespace'] = namespace
3451 hookargs['key'] = key
3451 hookargs['key'] = key
3452 hookargs['old'] = old
3452 hookargs['old'] = old
3453 hookargs['new'] = new
3453 hookargs['new'] = new
3454 self.hook(b'prepushkey', throw=True, **hookargs)
3454 self.hook(b'prepushkey', throw=True, **hookargs)
3455 except error.HookAbort as exc:
3455 except error.HookAbort as exc:
3456 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3456 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3457 if exc.hint:
3457 if exc.hint:
3458 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3458 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3459 return False
3459 return False
3460 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3460 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3461 ret = pushkey.push(self, namespace, key, old, new)
3461 ret = pushkey.push(self, namespace, key, old, new)
3462
3462
3463 def runhook(unused_success):
3463 def runhook(unused_success):
3464 self.hook(
3464 self.hook(
3465 b'pushkey',
3465 b'pushkey',
3466 namespace=namespace,
3466 namespace=namespace,
3467 key=key,
3467 key=key,
3468 old=old,
3468 old=old,
3469 new=new,
3469 new=new,
3470 ret=ret,
3470 ret=ret,
3471 )
3471 )
3472
3472
3473 self._afterlock(runhook)
3473 self._afterlock(runhook)
3474 return ret
3474 return ret
3475
3475
3476 def listkeys(self, namespace):
3476 def listkeys(self, namespace):
3477 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3477 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3478 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3478 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3479 values = pushkey.list(self, namespace)
3479 values = pushkey.list(self, namespace)
3480 self.hook(b'listkeys', namespace=namespace, values=values)
3480 self.hook(b'listkeys', namespace=namespace, values=values)
3481 return values
3481 return values
3482
3482
3483 def debugwireargs(self, one, two, three=None, four=None, five=None):
3483 def debugwireargs(self, one, two, three=None, four=None, five=None):
3484 '''used to test argument passing over the wire'''
3484 '''used to test argument passing over the wire'''
3485 return b"%s %s %s %s %s" % (
3485 return b"%s %s %s %s %s" % (
3486 one,
3486 one,
3487 two,
3487 two,
3488 pycompat.bytestr(three),
3488 pycompat.bytestr(three),
3489 pycompat.bytestr(four),
3489 pycompat.bytestr(four),
3490 pycompat.bytestr(five),
3490 pycompat.bytestr(five),
3491 )
3491 )
3492
3492
3493 def savecommitmessage(self, text):
3493 def savecommitmessage(self, text):
3494 fp = self.vfs(b'last-message.txt', b'wb')
3494 fp = self.vfs(b'last-message.txt', b'wb')
3495 try:
3495 try:
3496 fp.write(text)
3496 fp.write(text)
3497 finally:
3497 finally:
3498 fp.close()
3498 fp.close()
3499 return self.pathto(fp.name[len(self.root) + 1 :])
3499 return self.pathto(fp.name[len(self.root) + 1 :])
3500
3500
3501
3501
3502 # used to avoid circular references so destructors work
3502 # used to avoid circular references so destructors work
3503 def aftertrans(files):
3503 def aftertrans(files):
3504 renamefiles = [tuple(t) for t in files]
3504 renamefiles = [tuple(t) for t in files]
3505
3505
3506 def a():
3506 def a():
3507 for vfs, src, dest in renamefiles:
3507 for vfs, src, dest in renamefiles:
3508 # if src and dest refer to a same file, vfs.rename is a no-op,
3508 # if src and dest refer to a same file, vfs.rename is a no-op,
3509 # leaving both src and dest on disk. delete dest to make sure
3509 # leaving both src and dest on disk. delete dest to make sure
3510 # the rename couldn't be such a no-op.
3510 # the rename couldn't be such a no-op.
3511 vfs.tryunlink(dest)
3511 vfs.tryunlink(dest)
3512 try:
3512 try:
3513 vfs.rename(src, dest)
3513 vfs.rename(src, dest)
3514 except OSError: # journal file does not yet exist
3514 except OSError: # journal file does not yet exist
3515 pass
3515 pass
3516
3516
3517 return a
3517 return a
3518
3518
3519
3519
3520 def undoname(fn):
3520 def undoname(fn):
3521 base, name = os.path.split(fn)
3521 base, name = os.path.split(fn)
3522 assert name.startswith(b'journal')
3522 assert name.startswith(b'journal')
3523 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3523 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3524
3524
3525
3525
3526 def instance(ui, path, create, intents=None, createopts=None):
3526 def instance(ui, path, create, intents=None, createopts=None):
3527 localpath = util.urllocalpath(path)
3527 localpath = util.urllocalpath(path)
3528 if create:
3528 if create:
3529 createrepository(ui, localpath, createopts=createopts)
3529 createrepository(ui, localpath, createopts=createopts)
3530
3530
3531 return makelocalrepository(ui, localpath, intents=intents)
3531 return makelocalrepository(ui, localpath, intents=intents)
3532
3532
3533
3533
3534 def islocal(path):
3534 def islocal(path):
3535 return True
3535 return True
3536
3536
3537
3537
3538 def defaultcreateopts(ui, createopts=None):
3538 def defaultcreateopts(ui, createopts=None):
3539 """Populate the default creation options for a repository.
3539 """Populate the default creation options for a repository.
3540
3540
3541 A dictionary of explicitly requested creation options can be passed
3541 A dictionary of explicitly requested creation options can be passed
3542 in. Missing keys will be populated.
3542 in. Missing keys will be populated.
3543 """
3543 """
3544 createopts = dict(createopts or {})
3544 createopts = dict(createopts or {})
3545
3545
3546 if b'backend' not in createopts:
3546 if b'backend' not in createopts:
3547 # experimental config: storage.new-repo-backend
3547 # experimental config: storage.new-repo-backend
3548 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3548 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3549
3549
3550 return createopts
3550 return createopts
3551
3551
3552
3552
3553 def newreporequirements(ui, createopts):
3553 def newreporequirements(ui, createopts):
3554 """Determine the set of requirements for a new local repository.
3554 """Determine the set of requirements for a new local repository.
3555
3555
3556 Extensions can wrap this function to specify custom requirements for
3556 Extensions can wrap this function to specify custom requirements for
3557 new repositories.
3557 new repositories.
3558 """
3558 """
3559 # If the repo is being created from a shared repository, we copy
3559 # If the repo is being created from a shared repository, we copy
3560 # its requirements.
3560 # its requirements.
3561 if b'sharedrepo' in createopts:
3561 if b'sharedrepo' in createopts:
3562 requirements = set(createopts[b'sharedrepo'].requirements)
3562 requirements = set(createopts[b'sharedrepo'].requirements)
3563 if createopts.get(b'sharedrelative'):
3563 if createopts.get(b'sharedrelative'):
3564 requirements.add(b'relshared')
3564 requirements.add(b'relshared')
3565 else:
3565 else:
3566 requirements.add(b'shared')
3566 requirements.add(b'shared')
3567
3567
3568 return requirements
3568 return requirements
3569
3569
3570 if b'backend' not in createopts:
3570 if b'backend' not in createopts:
3571 raise error.ProgrammingError(
3571 raise error.ProgrammingError(
3572 b'backend key not present in createopts; '
3572 b'backend key not present in createopts; '
3573 b'was defaultcreateopts() called?'
3573 b'was defaultcreateopts() called?'
3574 )
3574 )
3575
3575
3576 if createopts[b'backend'] != b'revlogv1':
3576 if createopts[b'backend'] != b'revlogv1':
3577 raise error.Abort(
3577 raise error.Abort(
3578 _(
3578 _(
3579 b'unable to determine repository requirements for '
3579 b'unable to determine repository requirements for '
3580 b'storage backend: %s'
3580 b'storage backend: %s'
3581 )
3581 )
3582 % createopts[b'backend']
3582 % createopts[b'backend']
3583 )
3583 )
3584
3584
3585 requirements = {b'revlogv1'}
3585 requirements = {b'revlogv1'}
3586 if ui.configbool(b'format', b'usestore'):
3586 if ui.configbool(b'format', b'usestore'):
3587 requirements.add(b'store')
3587 requirements.add(b'store')
3588 if ui.configbool(b'format', b'usefncache'):
3588 if ui.configbool(b'format', b'usefncache'):
3589 requirements.add(b'fncache')
3589 requirements.add(b'fncache')
3590 if ui.configbool(b'format', b'dotencode'):
3590 if ui.configbool(b'format', b'dotencode'):
3591 requirements.add(b'dotencode')
3591 requirements.add(b'dotencode')
3592
3592
3593 compengines = ui.configlist(b'format', b'revlog-compression')
3593 compengines = ui.configlist(b'format', b'revlog-compression')
3594 for compengine in compengines:
3594 for compengine in compengines:
3595 if compengine in util.compengines:
3595 if compengine in util.compengines:
3596 break
3596 break
3597 else:
3597 else:
3598 raise error.Abort(
3598 raise error.Abort(
3599 _(
3599 _(
3600 b'compression engines %s defined by '
3600 b'compression engines %s defined by '
3601 b'format.revlog-compression not available'
3601 b'format.revlog-compression not available'
3602 )
3602 )
3603 % b', '.join(b'"%s"' % e for e in compengines),
3603 % b', '.join(b'"%s"' % e for e in compengines),
3604 hint=_(
3604 hint=_(
3605 b'run "hg debuginstall" to list available '
3605 b'run "hg debuginstall" to list available '
3606 b'compression engines'
3606 b'compression engines'
3607 ),
3607 ),
3608 )
3608 )
3609
3609
3610 # zlib is the historical default and doesn't need an explicit requirement.
3610 # zlib is the historical default and doesn't need an explicit requirement.
3611 if compengine == b'zstd':
3611 if compengine == b'zstd':
3612 requirements.add(b'revlog-compression-zstd')
3612 requirements.add(b'revlog-compression-zstd')
3613 elif compengine != b'zlib':
3613 elif compengine != b'zlib':
3614 requirements.add(b'exp-compression-%s' % compengine)
3614 requirements.add(b'exp-compression-%s' % compengine)
3615
3615
3616 if scmutil.gdinitconfig(ui):
3616 if scmutil.gdinitconfig(ui):
3617 requirements.add(b'generaldelta')
3617 requirements.add(b'generaldelta')
3618 if ui.configbool(b'format', b'sparse-revlog'):
3618 if ui.configbool(b'format', b'sparse-revlog'):
3619 requirements.add(SPARSEREVLOG_REQUIREMENT)
3619 requirements.add(SPARSEREVLOG_REQUIREMENT)
3620
3620
3621 # experimental config: format.exp-use-side-data
3621 # experimental config: format.exp-use-side-data
3622 if ui.configbool(b'format', b'exp-use-side-data'):
3622 if ui.configbool(b'format', b'exp-use-side-data'):
3623 requirements.add(SIDEDATA_REQUIREMENT)
3623 requirements.add(SIDEDATA_REQUIREMENT)
3624 # experimental config: format.exp-use-copies-side-data-changeset
3624 # experimental config: format.exp-use-copies-side-data-changeset
3625 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3625 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3626 requirements.add(SIDEDATA_REQUIREMENT)
3626 requirements.add(SIDEDATA_REQUIREMENT)
3627 requirements.add(COPIESSDC_REQUIREMENT)
3627 requirements.add(COPIESSDC_REQUIREMENT)
3628 if ui.configbool(b'experimental', b'treemanifest'):
3628 if ui.configbool(b'experimental', b'treemanifest'):
3629 requirements.add(b'treemanifest')
3629 requirements.add(b'treemanifest')
3630
3630
3631 revlogv2 = ui.config(b'experimental', b'revlogv2')
3631 revlogv2 = ui.config(b'experimental', b'revlogv2')
3632 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3632 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3633 requirements.remove(b'revlogv1')
3633 requirements.remove(b'revlogv1')
3634 # generaldelta is implied by revlogv2.
3634 # generaldelta is implied by revlogv2.
3635 requirements.discard(b'generaldelta')
3635 requirements.discard(b'generaldelta')
3636 requirements.add(REVLOGV2_REQUIREMENT)
3636 requirements.add(REVLOGV2_REQUIREMENT)
3637 # experimental config: format.internal-phase
3637 # experimental config: format.internal-phase
3638 if ui.configbool(b'format', b'internal-phase'):
3638 if ui.configbool(b'format', b'internal-phase'):
3639 requirements.add(b'internal-phase')
3639 requirements.add(b'internal-phase')
3640
3640
3641 if createopts.get(b'narrowfiles'):
3641 if createopts.get(b'narrowfiles'):
3642 requirements.add(repository.NARROW_REQUIREMENT)
3642 requirements.add(repository.NARROW_REQUIREMENT)
3643
3643
3644 if createopts.get(b'lfs'):
3644 if createopts.get(b'lfs'):
3645 requirements.add(b'lfs')
3645 requirements.add(b'lfs')
3646
3646
3647 if ui.configbool(b'format', b'bookmarks-in-store'):
3647 if ui.configbool(b'format', b'bookmarks-in-store'):
3648 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3648 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3649
3649
3650 if ui.configbool(b'format', b'use-persistent-nodemap'):
3650 if ui.configbool(b'format', b'use-persistent-nodemap'):
3651 requirements.add(NODEMAP_REQUIREMENT)
3651 requirements.add(NODEMAP_REQUIREMENT)
3652
3652
3653 return requirements
3653 return requirements
3654
3654
3655
3655
3656 def filterknowncreateopts(ui, createopts):
3656 def filterknowncreateopts(ui, createopts):
3657 """Filters a dict of repo creation options against options that are known.
3657 """Filters a dict of repo creation options against options that are known.
3658
3658
3659 Receives a dict of repo creation options and returns a dict of those
3659 Receives a dict of repo creation options and returns a dict of those
3660 options that we don't know how to handle.
3660 options that we don't know how to handle.
3661
3661
3662 This function is called as part of repository creation. If the
3662 This function is called as part of repository creation. If the
3663 returned dict contains any items, repository creation will not
3663 returned dict contains any items, repository creation will not
3664 be allowed, as it means there was a request to create a repository
3664 be allowed, as it means there was a request to create a repository
3665 with options not recognized by loaded code.
3665 with options not recognized by loaded code.
3666
3666
3667 Extensions can wrap this function to filter out creation options
3667 Extensions can wrap this function to filter out creation options
3668 they know how to handle.
3668 they know how to handle.
3669 """
3669 """
3670 known = {
3670 known = {
3671 b'backend',
3671 b'backend',
3672 b'lfs',
3672 b'lfs',
3673 b'narrowfiles',
3673 b'narrowfiles',
3674 b'sharedrepo',
3674 b'sharedrepo',
3675 b'sharedrelative',
3675 b'sharedrelative',
3676 b'shareditems',
3676 b'shareditems',
3677 b'shallowfilestore',
3677 b'shallowfilestore',
3678 }
3678 }
3679
3679
3680 return {k: v for k, v in createopts.items() if k not in known}
3680 return {k: v for k, v in createopts.items() if k not in known}
3681
3681
3682
3682
3683 def createrepository(ui, path, createopts=None):
3683 def createrepository(ui, path, createopts=None):
3684 """Create a new repository in a vfs.
3684 """Create a new repository in a vfs.
3685
3685
3686 ``path`` path to the new repo's working directory.
3686 ``path`` path to the new repo's working directory.
3687 ``createopts`` options for the new repository.
3687 ``createopts`` options for the new repository.
3688
3688
3689 The following keys for ``createopts`` are recognized:
3689 The following keys for ``createopts`` are recognized:
3690
3690
3691 backend
3691 backend
3692 The storage backend to use.
3692 The storage backend to use.
3693 lfs
3693 lfs
3694 Repository will be created with ``lfs`` requirement. The lfs extension
3694 Repository will be created with ``lfs`` requirement. The lfs extension
3695 will automatically be loaded when the repository is accessed.
3695 will automatically be loaded when the repository is accessed.
3696 narrowfiles
3696 narrowfiles
3697 Set up repository to support narrow file storage.
3697 Set up repository to support narrow file storage.
3698 sharedrepo
3698 sharedrepo
3699 Repository object from which storage should be shared.
3699 Repository object from which storage should be shared.
3700 sharedrelative
3700 sharedrelative
3701 Boolean indicating if the path to the shared repo should be
3701 Boolean indicating if the path to the shared repo should be
3702 stored as relative. By default, the pointer to the "parent" repo
3702 stored as relative. By default, the pointer to the "parent" repo
3703 is stored as an absolute path.
3703 is stored as an absolute path.
3704 shareditems
3704 shareditems
3705 Set of items to share to the new repository (in addition to storage).
3705 Set of items to share to the new repository (in addition to storage).
3706 shallowfilestore
3706 shallowfilestore
3707 Indicates that storage for files should be shallow (not all ancestor
3707 Indicates that storage for files should be shallow (not all ancestor
3708 revisions are known).
3708 revisions are known).
3709 """
3709 """
3710 createopts = defaultcreateopts(ui, createopts=createopts)
3710 createopts = defaultcreateopts(ui, createopts=createopts)
3711
3711
3712 unknownopts = filterknowncreateopts(ui, createopts)
3712 unknownopts = filterknowncreateopts(ui, createopts)
3713
3713
3714 if not isinstance(unknownopts, dict):
3714 if not isinstance(unknownopts, dict):
3715 raise error.ProgrammingError(
3715 raise error.ProgrammingError(
3716 b'filterknowncreateopts() did not return a dict'
3716 b'filterknowncreateopts() did not return a dict'
3717 )
3717 )
3718
3718
3719 if unknownopts:
3719 if unknownopts:
3720 raise error.Abort(
3720 raise error.Abort(
3721 _(
3721 _(
3722 b'unable to create repository because of unknown '
3722 b'unable to create repository because of unknown '
3723 b'creation option: %s'
3723 b'creation option: %s'
3724 )
3724 )
3725 % b', '.join(sorted(unknownopts)),
3725 % b', '.join(sorted(unknownopts)),
3726 hint=_(b'is a required extension not loaded?'),
3726 hint=_(b'is a required extension not loaded?'),
3727 )
3727 )
3728
3728
3729 requirements = newreporequirements(ui, createopts=createopts)
3729 requirements = newreporequirements(ui, createopts=createopts)
3730
3730
3731 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3731 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3732
3732
3733 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3733 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3734 if hgvfs.exists():
3734 if hgvfs.exists():
3735 raise error.RepoError(_(b'repository %s already exists') % path)
3735 raise error.RepoError(_(b'repository %s already exists') % path)
3736
3736
3737 if b'sharedrepo' in createopts:
3737 if b'sharedrepo' in createopts:
3738 sharedpath = createopts[b'sharedrepo'].sharedpath
3738 sharedpath = createopts[b'sharedrepo'].sharedpath
3739
3739
3740 if createopts.get(b'sharedrelative'):
3740 if createopts.get(b'sharedrelative'):
3741 try:
3741 try:
3742 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3742 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3743 except (IOError, ValueError) as e:
3743 except (IOError, ValueError) as e:
3744 # ValueError is raised on Windows if the drive letters differ
3744 # ValueError is raised on Windows if the drive letters differ
3745 # on each path.
3745 # on each path.
3746 raise error.Abort(
3746 raise error.Abort(
3747 _(b'cannot calculate relative path'),
3747 _(b'cannot calculate relative path'),
3748 hint=stringutil.forcebytestr(e),
3748 hint=stringutil.forcebytestr(e),
3749 )
3749 )
3750
3750
3751 if not wdirvfs.exists():
3751 if not wdirvfs.exists():
3752 wdirvfs.makedirs()
3752 wdirvfs.makedirs()
3753
3753
3754 hgvfs.makedir(notindexed=True)
3754 hgvfs.makedir(notindexed=True)
3755 if b'sharedrepo' not in createopts:
3755 if b'sharedrepo' not in createopts:
3756 hgvfs.mkdir(b'cache')
3756 hgvfs.mkdir(b'cache')
3757 hgvfs.mkdir(b'wcache')
3757 hgvfs.mkdir(b'wcache')
3758
3758
3759 if b'store' in requirements and b'sharedrepo' not in createopts:
3759 if b'store' in requirements and b'sharedrepo' not in createopts:
3760 hgvfs.mkdir(b'store')
3760 hgvfs.mkdir(b'store')
3761
3761
3762 # We create an invalid changelog outside the store so very old
3762 # We create an invalid changelog outside the store so very old
3763 # Mercurial versions (which didn't know about the requirements
3763 # Mercurial versions (which didn't know about the requirements
3764 # file) encounter an error on reading the changelog. This
3764 # file) encounter an error on reading the changelog. This
3765 # effectively locks out old clients and prevents them from
3765 # effectively locks out old clients and prevents them from
3766 # mucking with a repo in an unknown format.
3766 # mucking with a repo in an unknown format.
3767 #
3767 #
3768 # The revlog header has version 2, which won't be recognized by
3768 # The revlog header has version 2, which won't be recognized by
3769 # such old clients.
3769 # such old clients.
3770 hgvfs.append(
3770 hgvfs.append(
3771 b'00changelog.i',
3771 b'00changelog.i',
3772 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3772 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3773 b'layout',
3773 b'layout',
3774 )
3774 )
3775
3775
3776 scmutil.writerequires(hgvfs, requirements)
3776 scmutil.writerequires(hgvfs, requirements)
3777
3777
3778 # Write out file telling readers where to find the shared store.
3778 # Write out file telling readers where to find the shared store.
3779 if b'sharedrepo' in createopts:
3779 if b'sharedrepo' in createopts:
3780 hgvfs.write(b'sharedpath', sharedpath)
3780 hgvfs.write(b'sharedpath', sharedpath)
3781
3781
3782 if createopts.get(b'shareditems'):
3782 if createopts.get(b'shareditems'):
3783 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3783 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3784 hgvfs.write(b'shared', shared)
3784 hgvfs.write(b'shared', shared)
3785
3785
3786
3786
3787 def poisonrepository(repo):
3787 def poisonrepository(repo):
3788 """Poison a repository instance so it can no longer be used."""
3788 """Poison a repository instance so it can no longer be used."""
3789 # Perform any cleanup on the instance.
3789 # Perform any cleanup on the instance.
3790 repo.close()
3790 repo.close()
3791
3791
3792 # Our strategy is to replace the type of the object with one that
3792 # Our strategy is to replace the type of the object with one that
3793 # has all attribute lookups result in error.
3793 # has all attribute lookups result in error.
3794 #
3794 #
3795 # But we have to allow the close() method because some constructors
3795 # But we have to allow the close() method because some constructors
3796 # of repos call close() on repo references.
3796 # of repos call close() on repo references.
3797 class poisonedrepository(object):
3797 class poisonedrepository(object):
3798 def __getattribute__(self, item):
3798 def __getattribute__(self, item):
3799 if item == 'close':
3799 if item == 'close':
3800 return object.__getattribute__(self, item)
3800 return object.__getattribute__(self, item)
3801
3801
3802 raise error.ProgrammingError(
3802 raise error.ProgrammingError(
3803 b'repo instances should not be used after unshare'
3803 b'repo instances should not be used after unshare'
3804 )
3804 )
3805
3805
3806 def close(self):
3806 def close(self):
3807 pass
3807 pass
3808
3808
3809 # We may have a repoview, which intercepts __setattr__. So be sure
3809 # We may have a repoview, which intercepts __setattr__. So be sure
3810 # we operate at the lowest level possible.
3810 # we operate at the lowest level possible.
3811 object.__setattr__(repo, '__class__', poisonedrepository)
3811 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now