##// END OF EJS Templates
localrepo: handle ValueError during repository opening...
Gregory Szorc -
r45469:9e5b4dbe default
parent child Browse files
Show More
@@ -1,3789 +1,3794 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 context,
35 context,
36 dirstate,
36 dirstate,
37 dirstateguard,
37 dirstateguard,
38 discovery,
38 discovery,
39 encoding,
39 encoding,
40 error,
40 error,
41 exchange,
41 exchange,
42 extensions,
42 extensions,
43 filelog,
43 filelog,
44 hook,
44 hook,
45 lock as lockmod,
45 lock as lockmod,
46 match as matchmod,
46 match as matchmod,
47 mergestate as mergestatemod,
47 mergestate as mergestatemod,
48 mergeutil,
48 mergeutil,
49 metadata,
49 metadata,
50 namespaces,
50 namespaces,
51 narrowspec,
51 narrowspec,
52 obsolete,
52 obsolete,
53 pathutil,
53 pathutil,
54 phases,
54 phases,
55 pushkey,
55 pushkey,
56 pycompat,
56 pycompat,
57 rcutil,
57 rcutil,
58 repoview,
58 repoview,
59 revset,
59 revset,
60 revsetlang,
60 revsetlang,
61 scmutil,
61 scmutil,
62 sparse,
62 sparse,
63 store as storemod,
63 store as storemod,
64 subrepoutil,
64 subrepoutil,
65 tags as tagsmod,
65 tags as tagsmod,
66 transaction,
66 transaction,
67 txnutil,
67 txnutil,
68 util,
68 util,
69 vfs as vfsmod,
69 vfs as vfsmod,
70 )
70 )
71
71
72 from .interfaces import (
72 from .interfaces import (
73 repository,
73 repository,
74 util as interfaceutil,
74 util as interfaceutil,
75 )
75 )
76
76
77 from .utils import (
77 from .utils import (
78 hashutil,
78 hashutil,
79 procutil,
79 procutil,
80 stringutil,
80 stringutil,
81 )
81 )
82
82
83 from .revlogutils import constants as revlogconst
83 from .revlogutils import constants as revlogconst
84
84
85 release = lockmod.release
85 release = lockmod.release
86 urlerr = util.urlerr
86 urlerr = util.urlerr
87 urlreq = util.urlreq
87 urlreq = util.urlreq
88
88
89 # set of (path, vfs-location) tuples. vfs-location is:
89 # set of (path, vfs-location) tuples. vfs-location is:
90 # - 'plain for vfs relative paths
90 # - 'plain for vfs relative paths
91 # - '' for svfs relative paths
91 # - '' for svfs relative paths
92 _cachedfiles = set()
92 _cachedfiles = set()
93
93
94
94
95 class _basefilecache(scmutil.filecache):
95 class _basefilecache(scmutil.filecache):
96 """All filecache usage on repo are done for logic that should be unfiltered
96 """All filecache usage on repo are done for logic that should be unfiltered
97 """
97 """
98
98
99 def __get__(self, repo, type=None):
99 def __get__(self, repo, type=None):
100 if repo is None:
100 if repo is None:
101 return self
101 return self
102 # proxy to unfiltered __dict__ since filtered repo has no entry
102 # proxy to unfiltered __dict__ since filtered repo has no entry
103 unfi = repo.unfiltered()
103 unfi = repo.unfiltered()
104 try:
104 try:
105 return unfi.__dict__[self.sname]
105 return unfi.__dict__[self.sname]
106 except KeyError:
106 except KeyError:
107 pass
107 pass
108 return super(_basefilecache, self).__get__(unfi, type)
108 return super(_basefilecache, self).__get__(unfi, type)
109
109
110 def set(self, repo, value):
110 def set(self, repo, value):
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
112
112
113
113
114 class repofilecache(_basefilecache):
114 class repofilecache(_basefilecache):
115 """filecache for files in .hg but outside of .hg/store"""
115 """filecache for files in .hg but outside of .hg/store"""
116
116
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(repofilecache, self).__init__(*paths)
118 super(repofilecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, b'plain'))
120 _cachedfiles.add((path, b'plain'))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.vfs.join(fname)
123 return obj.vfs.join(fname)
124
124
125
125
126 class storecache(_basefilecache):
126 class storecache(_basefilecache):
127 """filecache for files in the store"""
127 """filecache for files in the store"""
128
128
129 def __init__(self, *paths):
129 def __init__(self, *paths):
130 super(storecache, self).__init__(*paths)
130 super(storecache, self).__init__(*paths)
131 for path in paths:
131 for path in paths:
132 _cachedfiles.add((path, b''))
132 _cachedfiles.add((path, b''))
133
133
134 def join(self, obj, fname):
134 def join(self, obj, fname):
135 return obj.sjoin(fname)
135 return obj.sjoin(fname)
136
136
137
137
138 class mixedrepostorecache(_basefilecache):
138 class mixedrepostorecache(_basefilecache):
139 """filecache for a mix files in .hg/store and outside"""
139 """filecache for a mix files in .hg/store and outside"""
140
140
141 def __init__(self, *pathsandlocations):
141 def __init__(self, *pathsandlocations):
142 # scmutil.filecache only uses the path for passing back into our
142 # scmutil.filecache only uses the path for passing back into our
143 # join(), so we can safely pass a list of paths and locations
143 # join(), so we can safely pass a list of paths and locations
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
146
146
147 def join(self, obj, fnameandlocation):
147 def join(self, obj, fnameandlocation):
148 fname, location = fnameandlocation
148 fname, location = fnameandlocation
149 if location == b'plain':
149 if location == b'plain':
150 return obj.vfs.join(fname)
150 return obj.vfs.join(fname)
151 else:
151 else:
152 if location != b'':
152 if location != b'':
153 raise error.ProgrammingError(
153 raise error.ProgrammingError(
154 b'unexpected location: %s' % location
154 b'unexpected location: %s' % location
155 )
155 )
156 return obj.sjoin(fname)
156 return obj.sjoin(fname)
157
157
158
158
159 def isfilecached(repo, name):
159 def isfilecached(repo, name):
160 """check if a repo has already cached "name" filecache-ed property
160 """check if a repo has already cached "name" filecache-ed property
161
161
162 This returns (cachedobj-or-None, iscached) tuple.
162 This returns (cachedobj-or-None, iscached) tuple.
163 """
163 """
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
165 if not cacheentry:
165 if not cacheentry:
166 return None, False
166 return None, False
167 return cacheentry.obj, True
167 return cacheentry.obj, True
168
168
169
169
170 class unfilteredpropertycache(util.propertycache):
170 class unfilteredpropertycache(util.propertycache):
171 """propertycache that apply to unfiltered repo only"""
171 """propertycache that apply to unfiltered repo only"""
172
172
173 def __get__(self, repo, type=None):
173 def __get__(self, repo, type=None):
174 unfi = repo.unfiltered()
174 unfi = repo.unfiltered()
175 if unfi is repo:
175 if unfi is repo:
176 return super(unfilteredpropertycache, self).__get__(unfi)
176 return super(unfilteredpropertycache, self).__get__(unfi)
177 return getattr(unfi, self.name)
177 return getattr(unfi, self.name)
178
178
179
179
180 class filteredpropertycache(util.propertycache):
180 class filteredpropertycache(util.propertycache):
181 """propertycache that must take filtering in account"""
181 """propertycache that must take filtering in account"""
182
182
183 def cachevalue(self, obj, value):
183 def cachevalue(self, obj, value):
184 object.__setattr__(obj, self.name, value)
184 object.__setattr__(obj, self.name, value)
185
185
186
186
187 def hasunfilteredcache(repo, name):
187 def hasunfilteredcache(repo, name):
188 """check if a repo has an unfilteredpropertycache value for <name>"""
188 """check if a repo has an unfilteredpropertycache value for <name>"""
189 return name in vars(repo.unfiltered())
189 return name in vars(repo.unfiltered())
190
190
191
191
192 def unfilteredmethod(orig):
192 def unfilteredmethod(orig):
193 """decorate method that always need to be run on unfiltered version"""
193 """decorate method that always need to be run on unfiltered version"""
194
194
195 def wrapper(repo, *args, **kwargs):
195 def wrapper(repo, *args, **kwargs):
196 return orig(repo.unfiltered(), *args, **kwargs)
196 return orig(repo.unfiltered(), *args, **kwargs)
197
197
198 return wrapper
198 return wrapper
199
199
200
200
201 moderncaps = {
201 moderncaps = {
202 b'lookup',
202 b'lookup',
203 b'branchmap',
203 b'branchmap',
204 b'pushkey',
204 b'pushkey',
205 b'known',
205 b'known',
206 b'getbundle',
206 b'getbundle',
207 b'unbundle',
207 b'unbundle',
208 }
208 }
209 legacycaps = moderncaps.union({b'changegroupsubset'})
209 legacycaps = moderncaps.union({b'changegroupsubset'})
210
210
211
211
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
213 class localcommandexecutor(object):
213 class localcommandexecutor(object):
214 def __init__(self, peer):
214 def __init__(self, peer):
215 self._peer = peer
215 self._peer = peer
216 self._sent = False
216 self._sent = False
217 self._closed = False
217 self._closed = False
218
218
219 def __enter__(self):
219 def __enter__(self):
220 return self
220 return self
221
221
222 def __exit__(self, exctype, excvalue, exctb):
222 def __exit__(self, exctype, excvalue, exctb):
223 self.close()
223 self.close()
224
224
225 def callcommand(self, command, args):
225 def callcommand(self, command, args):
226 if self._sent:
226 if self._sent:
227 raise error.ProgrammingError(
227 raise error.ProgrammingError(
228 b'callcommand() cannot be used after sendcommands()'
228 b'callcommand() cannot be used after sendcommands()'
229 )
229 )
230
230
231 if self._closed:
231 if self._closed:
232 raise error.ProgrammingError(
232 raise error.ProgrammingError(
233 b'callcommand() cannot be used after close()'
233 b'callcommand() cannot be used after close()'
234 )
234 )
235
235
236 # We don't need to support anything fancy. Just call the named
236 # We don't need to support anything fancy. Just call the named
237 # method on the peer and return a resolved future.
237 # method on the peer and return a resolved future.
238 fn = getattr(self._peer, pycompat.sysstr(command))
238 fn = getattr(self._peer, pycompat.sysstr(command))
239
239
240 f = pycompat.futures.Future()
240 f = pycompat.futures.Future()
241
241
242 try:
242 try:
243 result = fn(**pycompat.strkwargs(args))
243 result = fn(**pycompat.strkwargs(args))
244 except Exception:
244 except Exception:
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
246 else:
246 else:
247 f.set_result(result)
247 f.set_result(result)
248
248
249 return f
249 return f
250
250
251 def sendcommands(self):
251 def sendcommands(self):
252 self._sent = True
252 self._sent = True
253
253
254 def close(self):
254 def close(self):
255 self._closed = True
255 self._closed = True
256
256
257
257
258 @interfaceutil.implementer(repository.ipeercommands)
258 @interfaceutil.implementer(repository.ipeercommands)
259 class localpeer(repository.peer):
259 class localpeer(repository.peer):
260 '''peer for a local repo; reflects only the most recent API'''
260 '''peer for a local repo; reflects only the most recent API'''
261
261
262 def __init__(self, repo, caps=None):
262 def __init__(self, repo, caps=None):
263 super(localpeer, self).__init__()
263 super(localpeer, self).__init__()
264
264
265 if caps is None:
265 if caps is None:
266 caps = moderncaps.copy()
266 caps = moderncaps.copy()
267 self._repo = repo.filtered(b'served')
267 self._repo = repo.filtered(b'served')
268 self.ui = repo.ui
268 self.ui = repo.ui
269 self._caps = repo._restrictcapabilities(caps)
269 self._caps = repo._restrictcapabilities(caps)
270
270
271 # Begin of _basepeer interface.
271 # Begin of _basepeer interface.
272
272
273 def url(self):
273 def url(self):
274 return self._repo.url()
274 return self._repo.url()
275
275
276 def local(self):
276 def local(self):
277 return self._repo
277 return self._repo
278
278
279 def peer(self):
279 def peer(self):
280 return self
280 return self
281
281
282 def canpush(self):
282 def canpush(self):
283 return True
283 return True
284
284
285 def close(self):
285 def close(self):
286 self._repo.close()
286 self._repo.close()
287
287
288 # End of _basepeer interface.
288 # End of _basepeer interface.
289
289
290 # Begin of _basewirecommands interface.
290 # Begin of _basewirecommands interface.
291
291
292 def branchmap(self):
292 def branchmap(self):
293 return self._repo.branchmap()
293 return self._repo.branchmap()
294
294
295 def capabilities(self):
295 def capabilities(self):
296 return self._caps
296 return self._caps
297
297
298 def clonebundles(self):
298 def clonebundles(self):
299 return self._repo.tryread(b'clonebundles.manifest')
299 return self._repo.tryread(b'clonebundles.manifest')
300
300
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
302 """Used to test argument passing over the wire"""
302 """Used to test argument passing over the wire"""
303 return b"%s %s %s %s %s" % (
303 return b"%s %s %s %s %s" % (
304 one,
304 one,
305 two,
305 two,
306 pycompat.bytestr(three),
306 pycompat.bytestr(three),
307 pycompat.bytestr(four),
307 pycompat.bytestr(four),
308 pycompat.bytestr(five),
308 pycompat.bytestr(five),
309 )
309 )
310
310
311 def getbundle(
311 def getbundle(
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
313 ):
313 ):
314 chunks = exchange.getbundlechunks(
314 chunks = exchange.getbundlechunks(
315 self._repo,
315 self._repo,
316 source,
316 source,
317 heads=heads,
317 heads=heads,
318 common=common,
318 common=common,
319 bundlecaps=bundlecaps,
319 bundlecaps=bundlecaps,
320 **kwargs
320 **kwargs
321 )[1]
321 )[1]
322 cb = util.chunkbuffer(chunks)
322 cb = util.chunkbuffer(chunks)
323
323
324 if exchange.bundle2requested(bundlecaps):
324 if exchange.bundle2requested(bundlecaps):
325 # When requesting a bundle2, getbundle returns a stream to make the
325 # When requesting a bundle2, getbundle returns a stream to make the
326 # wire level function happier. We need to build a proper object
326 # wire level function happier. We need to build a proper object
327 # from it in local peer.
327 # from it in local peer.
328 return bundle2.getunbundler(self.ui, cb)
328 return bundle2.getunbundler(self.ui, cb)
329 else:
329 else:
330 return changegroup.getunbundler(b'01', cb, None)
330 return changegroup.getunbundler(b'01', cb, None)
331
331
332 def heads(self):
332 def heads(self):
333 return self._repo.heads()
333 return self._repo.heads()
334
334
335 def known(self, nodes):
335 def known(self, nodes):
336 return self._repo.known(nodes)
336 return self._repo.known(nodes)
337
337
338 def listkeys(self, namespace):
338 def listkeys(self, namespace):
339 return self._repo.listkeys(namespace)
339 return self._repo.listkeys(namespace)
340
340
341 def lookup(self, key):
341 def lookup(self, key):
342 return self._repo.lookup(key)
342 return self._repo.lookup(key)
343
343
344 def pushkey(self, namespace, key, old, new):
344 def pushkey(self, namespace, key, old, new):
345 return self._repo.pushkey(namespace, key, old, new)
345 return self._repo.pushkey(namespace, key, old, new)
346
346
347 def stream_out(self):
347 def stream_out(self):
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
349
349
350 def unbundle(self, bundle, heads, url):
350 def unbundle(self, bundle, heads, url):
351 """apply a bundle on a repo
351 """apply a bundle on a repo
352
352
353 This function handles the repo locking itself."""
353 This function handles the repo locking itself."""
354 try:
354 try:
355 try:
355 try:
356 bundle = exchange.readbundle(self.ui, bundle, None)
356 bundle = exchange.readbundle(self.ui, bundle, None)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
358 if util.safehasattr(ret, b'getchunks'):
358 if util.safehasattr(ret, b'getchunks'):
359 # This is a bundle20 object, turn it into an unbundler.
359 # This is a bundle20 object, turn it into an unbundler.
360 # This little dance should be dropped eventually when the
360 # This little dance should be dropped eventually when the
361 # API is finally improved.
361 # API is finally improved.
362 stream = util.chunkbuffer(ret.getchunks())
362 stream = util.chunkbuffer(ret.getchunks())
363 ret = bundle2.getunbundler(self.ui, stream)
363 ret = bundle2.getunbundler(self.ui, stream)
364 return ret
364 return ret
365 except Exception as exc:
365 except Exception as exc:
366 # If the exception contains output salvaged from a bundle2
366 # If the exception contains output salvaged from a bundle2
367 # reply, we need to make sure it is printed before continuing
367 # reply, we need to make sure it is printed before continuing
368 # to fail. So we build a bundle2 with such output and consume
368 # to fail. So we build a bundle2 with such output and consume
369 # it directly.
369 # it directly.
370 #
370 #
371 # This is not very elegant but allows a "simple" solution for
371 # This is not very elegant but allows a "simple" solution for
372 # issue4594
372 # issue4594
373 output = getattr(exc, '_bundle2salvagedoutput', ())
373 output = getattr(exc, '_bundle2salvagedoutput', ())
374 if output:
374 if output:
375 bundler = bundle2.bundle20(self._repo.ui)
375 bundler = bundle2.bundle20(self._repo.ui)
376 for out in output:
376 for out in output:
377 bundler.addpart(out)
377 bundler.addpart(out)
378 stream = util.chunkbuffer(bundler.getchunks())
378 stream = util.chunkbuffer(bundler.getchunks())
379 b = bundle2.getunbundler(self.ui, stream)
379 b = bundle2.getunbundler(self.ui, stream)
380 bundle2.processbundle(self._repo, b)
380 bundle2.processbundle(self._repo, b)
381 raise
381 raise
382 except error.PushRaced as exc:
382 except error.PushRaced as exc:
383 raise error.ResponseError(
383 raise error.ResponseError(
384 _(b'push failed:'), stringutil.forcebytestr(exc)
384 _(b'push failed:'), stringutil.forcebytestr(exc)
385 )
385 )
386
386
387 # End of _basewirecommands interface.
387 # End of _basewirecommands interface.
388
388
389 # Begin of peer interface.
389 # Begin of peer interface.
390
390
391 def commandexecutor(self):
391 def commandexecutor(self):
392 return localcommandexecutor(self)
392 return localcommandexecutor(self)
393
393
394 # End of peer interface.
394 # End of peer interface.
395
395
396
396
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
398 class locallegacypeer(localpeer):
398 class locallegacypeer(localpeer):
399 '''peer extension which implements legacy methods too; used for tests with
399 '''peer extension which implements legacy methods too; used for tests with
400 restricted capabilities'''
400 restricted capabilities'''
401
401
402 def __init__(self, repo):
402 def __init__(self, repo):
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
404
404
405 # Begin of baselegacywirecommands interface.
405 # Begin of baselegacywirecommands interface.
406
406
407 def between(self, pairs):
407 def between(self, pairs):
408 return self._repo.between(pairs)
408 return self._repo.between(pairs)
409
409
410 def branches(self, nodes):
410 def branches(self, nodes):
411 return self._repo.branches(nodes)
411 return self._repo.branches(nodes)
412
412
413 def changegroup(self, nodes, source):
413 def changegroup(self, nodes, source):
414 outgoing = discovery.outgoing(
414 outgoing = discovery.outgoing(
415 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 self._repo, missingroots=nodes, missingheads=self._repo.heads()
416 )
416 )
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
418
418
419 def changegroupsubset(self, bases, heads, source):
419 def changegroupsubset(self, bases, heads, source):
420 outgoing = discovery.outgoing(
420 outgoing = discovery.outgoing(
421 self._repo, missingroots=bases, missingheads=heads
421 self._repo, missingroots=bases, missingheads=heads
422 )
422 )
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
424
424
425 # End of baselegacywirecommands interface.
425 # End of baselegacywirecommands interface.
426
426
427
427
428 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # Increment the sub-version when the revlog v2 format changes to lock out old
429 # clients.
429 # clients.
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
431
431
432 # A repository with the sparserevlog feature will have delta chains that
432 # A repository with the sparserevlog feature will have delta chains that
433 # can spread over a larger span. Sparse reading cuts these large spans into
433 # can spread over a larger span. Sparse reading cuts these large spans into
434 # pieces, so that each piece isn't too big.
434 # pieces, so that each piece isn't too big.
435 # Without the sparserevlog capability, reading from the repository could use
435 # Without the sparserevlog capability, reading from the repository could use
436 # huge amounts of memory, because the whole span would be read at once,
436 # huge amounts of memory, because the whole span would be read at once,
437 # including all the intermediate revisions that aren't pertinent for the chain.
437 # including all the intermediate revisions that aren't pertinent for the chain.
438 # This is why once a repository has enabled sparse-read, it becomes required.
438 # This is why once a repository has enabled sparse-read, it becomes required.
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
440
440
441 # A repository with the sidedataflag requirement will allow to store extra
441 # A repository with the sidedataflag requirement will allow to store extra
442 # information for revision without altering their original hashes.
442 # information for revision without altering their original hashes.
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
444
444
445 # A repository with the the copies-sidedata-changeset requirement will store
445 # A repository with the the copies-sidedata-changeset requirement will store
446 # copies related information in changeset's sidedata.
446 # copies related information in changeset's sidedata.
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
448
448
449 # The repository use persistent nodemap for the changelog and the manifest.
449 # The repository use persistent nodemap for the changelog and the manifest.
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
451
451
452 # Functions receiving (ui, features) that extensions can register to impact
452 # Functions receiving (ui, features) that extensions can register to impact
453 # the ability to load repositories with custom requirements. Only
453 # the ability to load repositories with custom requirements. Only
454 # functions defined in loaded extensions are called.
454 # functions defined in loaded extensions are called.
455 #
455 #
456 # The function receives a set of requirement strings that the repository
456 # The function receives a set of requirement strings that the repository
457 # is capable of opening. Functions will typically add elements to the
457 # is capable of opening. Functions will typically add elements to the
458 # set to reflect that the extension knows how to handle that requirements.
458 # set to reflect that the extension knows how to handle that requirements.
459 featuresetupfuncs = set()
459 featuresetupfuncs = set()
460
460
461
461
462 def makelocalrepository(baseui, path, intents=None):
462 def makelocalrepository(baseui, path, intents=None):
463 """Create a local repository object.
463 """Create a local repository object.
464
464
465 Given arguments needed to construct a local repository, this function
465 Given arguments needed to construct a local repository, this function
466 performs various early repository loading functionality (such as
466 performs various early repository loading functionality (such as
467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
468 the repository can be opened, derives a type suitable for representing
468 the repository can be opened, derives a type suitable for representing
469 that repository, and returns an instance of it.
469 that repository, and returns an instance of it.
470
470
471 The returned object conforms to the ``repository.completelocalrepository``
471 The returned object conforms to the ``repository.completelocalrepository``
472 interface.
472 interface.
473
473
474 The repository type is derived by calling a series of factory functions
474 The repository type is derived by calling a series of factory functions
475 for each aspect/interface of the final repository. These are defined by
475 for each aspect/interface of the final repository. These are defined by
476 ``REPO_INTERFACES``.
476 ``REPO_INTERFACES``.
477
477
478 Each factory function is called to produce a type implementing a specific
478 Each factory function is called to produce a type implementing a specific
479 interface. The cumulative list of returned types will be combined into a
479 interface. The cumulative list of returned types will be combined into a
480 new type and that type will be instantiated to represent the local
480 new type and that type will be instantiated to represent the local
481 repository.
481 repository.
482
482
483 The factory functions each receive various state that may be consulted
483 The factory functions each receive various state that may be consulted
484 as part of deriving a type.
484 as part of deriving a type.
485
485
486 Extensions should wrap these factory functions to customize repository type
486 Extensions should wrap these factory functions to customize repository type
487 creation. Note that an extension's wrapped function may be called even if
487 creation. Note that an extension's wrapped function may be called even if
488 that extension is not loaded for the repo being constructed. Extensions
488 that extension is not loaded for the repo being constructed. Extensions
489 should check if their ``__name__`` appears in the
489 should check if their ``__name__`` appears in the
490 ``extensionmodulenames`` set passed to the factory function and no-op if
490 ``extensionmodulenames`` set passed to the factory function and no-op if
491 not.
491 not.
492 """
492 """
493 ui = baseui.copy()
493 ui = baseui.copy()
494 # Prevent copying repo configuration.
494 # Prevent copying repo configuration.
495 ui.copy = baseui.copy
495 ui.copy = baseui.copy
496
496
497 # Working directory VFS rooted at repository root.
497 # Working directory VFS rooted at repository root.
498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
499
499
500 # Main VFS for .hg/ directory.
500 # Main VFS for .hg/ directory.
501 hgpath = wdirvfs.join(b'.hg')
501 hgpath = wdirvfs.join(b'.hg')
502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
503
503
504 # The .hg/ path should exist and should be a directory. All other
504 # The .hg/ path should exist and should be a directory. All other
505 # cases are errors.
505 # cases are errors.
506 if not hgvfs.isdir():
506 if not hgvfs.isdir():
507 try:
507 try:
508 hgvfs.stat()
508 hgvfs.stat()
509 except OSError as e:
509 except OSError as e:
510 if e.errno != errno.ENOENT:
510 if e.errno != errno.ENOENT:
511 raise
511 raise
512 except ValueError as e:
513 # Can be raised on Python 3.8 when path is invalid.
514 raise error.Abort(
515 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
516 )
512
517
513 raise error.RepoError(_(b'repository %s not found') % path)
518 raise error.RepoError(_(b'repository %s not found') % path)
514
519
515 # .hg/requires file contains a newline-delimited list of
520 # .hg/requires file contains a newline-delimited list of
516 # features/capabilities the opener (us) must have in order to use
521 # features/capabilities the opener (us) must have in order to use
517 # the repository. This file was introduced in Mercurial 0.9.2,
522 # the repository. This file was introduced in Mercurial 0.9.2,
518 # which means very old repositories may not have one. We assume
523 # which means very old repositories may not have one. We assume
519 # a missing file translates to no requirements.
524 # a missing file translates to no requirements.
520 try:
525 try:
521 requirements = set(hgvfs.read(b'requires').splitlines())
526 requirements = set(hgvfs.read(b'requires').splitlines())
522 except IOError as e:
527 except IOError as e:
523 if e.errno != errno.ENOENT:
528 if e.errno != errno.ENOENT:
524 raise
529 raise
525 requirements = set()
530 requirements = set()
526
531
527 # The .hg/hgrc file may load extensions or contain config options
532 # The .hg/hgrc file may load extensions or contain config options
528 # that influence repository construction. Attempt to load it and
533 # that influence repository construction. Attempt to load it and
529 # process any new extensions that it may have pulled in.
534 # process any new extensions that it may have pulled in.
530 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
535 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
531 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
536 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
532 extensions.loadall(ui)
537 extensions.loadall(ui)
533 extensions.populateui(ui)
538 extensions.populateui(ui)
534
539
535 # Set of module names of extensions loaded for this repository.
540 # Set of module names of extensions loaded for this repository.
536 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
541 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
537
542
538 supportedrequirements = gathersupportedrequirements(ui)
543 supportedrequirements = gathersupportedrequirements(ui)
539
544
540 # We first validate the requirements are known.
545 # We first validate the requirements are known.
541 ensurerequirementsrecognized(requirements, supportedrequirements)
546 ensurerequirementsrecognized(requirements, supportedrequirements)
542
547
543 # Then we validate that the known set is reasonable to use together.
548 # Then we validate that the known set is reasonable to use together.
544 ensurerequirementscompatible(ui, requirements)
549 ensurerequirementscompatible(ui, requirements)
545
550
546 # TODO there are unhandled edge cases related to opening repositories with
551 # TODO there are unhandled edge cases related to opening repositories with
547 # shared storage. If storage is shared, we should also test for requirements
552 # shared storage. If storage is shared, we should also test for requirements
548 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
553 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
549 # that repo, as that repo may load extensions needed to open it. This is a
554 # that repo, as that repo may load extensions needed to open it. This is a
550 # bit complicated because we don't want the other hgrc to overwrite settings
555 # bit complicated because we don't want the other hgrc to overwrite settings
551 # in this hgrc.
556 # in this hgrc.
552 #
557 #
553 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
558 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
554 # file when sharing repos. But if a requirement is added after the share is
559 # file when sharing repos. But if a requirement is added after the share is
555 # performed, thereby introducing a new requirement for the opener, we may
560 # performed, thereby introducing a new requirement for the opener, we may
556 # will not see that and could encounter a run-time error interacting with
561 # will not see that and could encounter a run-time error interacting with
557 # that shared store since it has an unknown-to-us requirement.
562 # that shared store since it has an unknown-to-us requirement.
558
563
559 # At this point, we know we should be capable of opening the repository.
564 # At this point, we know we should be capable of opening the repository.
560 # Now get on with doing that.
565 # Now get on with doing that.
561
566
562 features = set()
567 features = set()
563
568
564 # The "store" part of the repository holds versioned data. How it is
569 # The "store" part of the repository holds versioned data. How it is
565 # accessed is determined by various requirements. The ``shared`` or
570 # accessed is determined by various requirements. The ``shared`` or
566 # ``relshared`` requirements indicate the store lives in the path contained
571 # ``relshared`` requirements indicate the store lives in the path contained
567 # in the ``.hg/sharedpath`` file. This is an absolute path for
572 # in the ``.hg/sharedpath`` file. This is an absolute path for
568 # ``shared`` and relative to ``.hg/`` for ``relshared``.
573 # ``shared`` and relative to ``.hg/`` for ``relshared``.
569 if b'shared' in requirements or b'relshared' in requirements:
574 if b'shared' in requirements or b'relshared' in requirements:
570 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
575 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
571 if b'relshared' in requirements:
576 if b'relshared' in requirements:
572 sharedpath = hgvfs.join(sharedpath)
577 sharedpath = hgvfs.join(sharedpath)
573
578
574 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
579 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
575
580
576 if not sharedvfs.exists():
581 if not sharedvfs.exists():
577 raise error.RepoError(
582 raise error.RepoError(
578 _(b'.hg/sharedpath points to nonexistent directory %s')
583 _(b'.hg/sharedpath points to nonexistent directory %s')
579 % sharedvfs.base
584 % sharedvfs.base
580 )
585 )
581
586
582 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
587 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
583
588
584 storebasepath = sharedvfs.base
589 storebasepath = sharedvfs.base
585 cachepath = sharedvfs.join(b'cache')
590 cachepath = sharedvfs.join(b'cache')
586 else:
591 else:
587 storebasepath = hgvfs.base
592 storebasepath = hgvfs.base
588 cachepath = hgvfs.join(b'cache')
593 cachepath = hgvfs.join(b'cache')
589 wcachepath = hgvfs.join(b'wcache')
594 wcachepath = hgvfs.join(b'wcache')
590
595
591 # The store has changed over time and the exact layout is dictated by
596 # The store has changed over time and the exact layout is dictated by
592 # requirements. The store interface abstracts differences across all
597 # requirements. The store interface abstracts differences across all
593 # of them.
598 # of them.
594 store = makestore(
599 store = makestore(
595 requirements,
600 requirements,
596 storebasepath,
601 storebasepath,
597 lambda base: vfsmod.vfs(base, cacheaudited=True),
602 lambda base: vfsmod.vfs(base, cacheaudited=True),
598 )
603 )
599 hgvfs.createmode = store.createmode
604 hgvfs.createmode = store.createmode
600
605
601 storevfs = store.vfs
606 storevfs = store.vfs
602 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
607 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
603
608
604 # The cache vfs is used to manage cache files.
609 # The cache vfs is used to manage cache files.
605 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
610 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
606 cachevfs.createmode = store.createmode
611 cachevfs.createmode = store.createmode
607 # The cache vfs is used to manage cache files related to the working copy
612 # The cache vfs is used to manage cache files related to the working copy
608 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
613 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
609 wcachevfs.createmode = store.createmode
614 wcachevfs.createmode = store.createmode
610
615
611 # Now resolve the type for the repository object. We do this by repeatedly
616 # Now resolve the type for the repository object. We do this by repeatedly
612 # calling a factory function to produces types for specific aspects of the
617 # calling a factory function to produces types for specific aspects of the
613 # repo's operation. The aggregate returned types are used as base classes
618 # repo's operation. The aggregate returned types are used as base classes
614 # for a dynamically-derived type, which will represent our new repository.
619 # for a dynamically-derived type, which will represent our new repository.
615
620
616 bases = []
621 bases = []
617 extrastate = {}
622 extrastate = {}
618
623
619 for iface, fn in REPO_INTERFACES:
624 for iface, fn in REPO_INTERFACES:
620 # We pass all potentially useful state to give extensions tons of
625 # We pass all potentially useful state to give extensions tons of
621 # flexibility.
626 # flexibility.
622 typ = fn()(
627 typ = fn()(
623 ui=ui,
628 ui=ui,
624 intents=intents,
629 intents=intents,
625 requirements=requirements,
630 requirements=requirements,
626 features=features,
631 features=features,
627 wdirvfs=wdirvfs,
632 wdirvfs=wdirvfs,
628 hgvfs=hgvfs,
633 hgvfs=hgvfs,
629 store=store,
634 store=store,
630 storevfs=storevfs,
635 storevfs=storevfs,
631 storeoptions=storevfs.options,
636 storeoptions=storevfs.options,
632 cachevfs=cachevfs,
637 cachevfs=cachevfs,
633 wcachevfs=wcachevfs,
638 wcachevfs=wcachevfs,
634 extensionmodulenames=extensionmodulenames,
639 extensionmodulenames=extensionmodulenames,
635 extrastate=extrastate,
640 extrastate=extrastate,
636 baseclasses=bases,
641 baseclasses=bases,
637 )
642 )
638
643
639 if not isinstance(typ, type):
644 if not isinstance(typ, type):
640 raise error.ProgrammingError(
645 raise error.ProgrammingError(
641 b'unable to construct type for %s' % iface
646 b'unable to construct type for %s' % iface
642 )
647 )
643
648
644 bases.append(typ)
649 bases.append(typ)
645
650
646 # type() allows you to use characters in type names that wouldn't be
651 # type() allows you to use characters in type names that wouldn't be
647 # recognized as Python symbols in source code. We abuse that to add
652 # recognized as Python symbols in source code. We abuse that to add
648 # rich information about our constructed repo.
653 # rich information about our constructed repo.
649 name = pycompat.sysstr(
654 name = pycompat.sysstr(
650 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
655 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
651 )
656 )
652
657
653 cls = type(name, tuple(bases), {})
658 cls = type(name, tuple(bases), {})
654
659
655 return cls(
660 return cls(
656 baseui=baseui,
661 baseui=baseui,
657 ui=ui,
662 ui=ui,
658 origroot=path,
663 origroot=path,
659 wdirvfs=wdirvfs,
664 wdirvfs=wdirvfs,
660 hgvfs=hgvfs,
665 hgvfs=hgvfs,
661 requirements=requirements,
666 requirements=requirements,
662 supportedrequirements=supportedrequirements,
667 supportedrequirements=supportedrequirements,
663 sharedpath=storebasepath,
668 sharedpath=storebasepath,
664 store=store,
669 store=store,
665 cachevfs=cachevfs,
670 cachevfs=cachevfs,
666 wcachevfs=wcachevfs,
671 wcachevfs=wcachevfs,
667 features=features,
672 features=features,
668 intents=intents,
673 intents=intents,
669 )
674 )
670
675
671
676
672 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
677 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
673 """Load hgrc files/content into a ui instance.
678 """Load hgrc files/content into a ui instance.
674
679
675 This is called during repository opening to load any additional
680 This is called during repository opening to load any additional
676 config files or settings relevant to the current repository.
681 config files or settings relevant to the current repository.
677
682
678 Returns a bool indicating whether any additional configs were loaded.
683 Returns a bool indicating whether any additional configs were loaded.
679
684
680 Extensions should monkeypatch this function to modify how per-repo
685 Extensions should monkeypatch this function to modify how per-repo
681 configs are loaded. For example, an extension may wish to pull in
686 configs are loaded. For example, an extension may wish to pull in
682 configs from alternate files or sources.
687 configs from alternate files or sources.
683 """
688 """
684 if not rcutil.use_repo_hgrc():
689 if not rcutil.use_repo_hgrc():
685 return False
690 return False
686 try:
691 try:
687 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
692 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
688 return True
693 return True
689 except IOError:
694 except IOError:
690 return False
695 return False
691
696
692
697
693 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
698 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
694 """Perform additional actions after .hg/hgrc is loaded.
699 """Perform additional actions after .hg/hgrc is loaded.
695
700
696 This function is called during repository loading immediately after
701 This function is called during repository loading immediately after
697 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
702 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
698
703
699 The function can be used to validate configs, automatically add
704 The function can be used to validate configs, automatically add
700 options (including extensions) based on requirements, etc.
705 options (including extensions) based on requirements, etc.
701 """
706 """
702
707
703 # Map of requirements to list of extensions to load automatically when
708 # Map of requirements to list of extensions to load automatically when
704 # requirement is present.
709 # requirement is present.
705 autoextensions = {
710 autoextensions = {
706 b'git': [b'git'],
711 b'git': [b'git'],
707 b'largefiles': [b'largefiles'],
712 b'largefiles': [b'largefiles'],
708 b'lfs': [b'lfs'],
713 b'lfs': [b'lfs'],
709 }
714 }
710
715
711 for requirement, names in sorted(autoextensions.items()):
716 for requirement, names in sorted(autoextensions.items()):
712 if requirement not in requirements:
717 if requirement not in requirements:
713 continue
718 continue
714
719
715 for name in names:
720 for name in names:
716 if not ui.hasconfig(b'extensions', name):
721 if not ui.hasconfig(b'extensions', name):
717 ui.setconfig(b'extensions', name, b'', source=b'autoload')
722 ui.setconfig(b'extensions', name, b'', source=b'autoload')
718
723
719
724
720 def gathersupportedrequirements(ui):
725 def gathersupportedrequirements(ui):
721 """Determine the complete set of recognized requirements."""
726 """Determine the complete set of recognized requirements."""
722 # Start with all requirements supported by this file.
727 # Start with all requirements supported by this file.
723 supported = set(localrepository._basesupported)
728 supported = set(localrepository._basesupported)
724
729
725 # Execute ``featuresetupfuncs`` entries if they belong to an extension
730 # Execute ``featuresetupfuncs`` entries if they belong to an extension
726 # relevant to this ui instance.
731 # relevant to this ui instance.
727 modules = {m.__name__ for n, m in extensions.extensions(ui)}
732 modules = {m.__name__ for n, m in extensions.extensions(ui)}
728
733
729 for fn in featuresetupfuncs:
734 for fn in featuresetupfuncs:
730 if fn.__module__ in modules:
735 if fn.__module__ in modules:
731 fn(ui, supported)
736 fn(ui, supported)
732
737
733 # Add derived requirements from registered compression engines.
738 # Add derived requirements from registered compression engines.
734 for name in util.compengines:
739 for name in util.compengines:
735 engine = util.compengines[name]
740 engine = util.compengines[name]
736 if engine.available() and engine.revlogheader():
741 if engine.available() and engine.revlogheader():
737 supported.add(b'exp-compression-%s' % name)
742 supported.add(b'exp-compression-%s' % name)
738 if engine.name() == b'zstd':
743 if engine.name() == b'zstd':
739 supported.add(b'revlog-compression-zstd')
744 supported.add(b'revlog-compression-zstd')
740
745
741 return supported
746 return supported
742
747
743
748
744 def ensurerequirementsrecognized(requirements, supported):
749 def ensurerequirementsrecognized(requirements, supported):
745 """Validate that a set of local requirements is recognized.
750 """Validate that a set of local requirements is recognized.
746
751
747 Receives a set of requirements. Raises an ``error.RepoError`` if there
752 Receives a set of requirements. Raises an ``error.RepoError`` if there
748 exists any requirement in that set that currently loaded code doesn't
753 exists any requirement in that set that currently loaded code doesn't
749 recognize.
754 recognize.
750
755
751 Returns a set of supported requirements.
756 Returns a set of supported requirements.
752 """
757 """
753 missing = set()
758 missing = set()
754
759
755 for requirement in requirements:
760 for requirement in requirements:
756 if requirement in supported:
761 if requirement in supported:
757 continue
762 continue
758
763
759 if not requirement or not requirement[0:1].isalnum():
764 if not requirement or not requirement[0:1].isalnum():
760 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
765 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
761
766
762 missing.add(requirement)
767 missing.add(requirement)
763
768
764 if missing:
769 if missing:
765 raise error.RequirementError(
770 raise error.RequirementError(
766 _(b'repository requires features unknown to this Mercurial: %s')
771 _(b'repository requires features unknown to this Mercurial: %s')
767 % b' '.join(sorted(missing)),
772 % b' '.join(sorted(missing)),
768 hint=_(
773 hint=_(
769 b'see https://mercurial-scm.org/wiki/MissingRequirement '
774 b'see https://mercurial-scm.org/wiki/MissingRequirement '
770 b'for more information'
775 b'for more information'
771 ),
776 ),
772 )
777 )
773
778
774
779
775 def ensurerequirementscompatible(ui, requirements):
780 def ensurerequirementscompatible(ui, requirements):
776 """Validates that a set of recognized requirements is mutually compatible.
781 """Validates that a set of recognized requirements is mutually compatible.
777
782
778 Some requirements may not be compatible with others or require
783 Some requirements may not be compatible with others or require
779 config options that aren't enabled. This function is called during
784 config options that aren't enabled. This function is called during
780 repository opening to ensure that the set of requirements needed
785 repository opening to ensure that the set of requirements needed
781 to open a repository is sane and compatible with config options.
786 to open a repository is sane and compatible with config options.
782
787
783 Extensions can monkeypatch this function to perform additional
788 Extensions can monkeypatch this function to perform additional
784 checking.
789 checking.
785
790
786 ``error.RepoError`` should be raised on failure.
791 ``error.RepoError`` should be raised on failure.
787 """
792 """
788 if b'exp-sparse' in requirements and not sparse.enabled:
793 if b'exp-sparse' in requirements and not sparse.enabled:
789 raise error.RepoError(
794 raise error.RepoError(
790 _(
795 _(
791 b'repository is using sparse feature but '
796 b'repository is using sparse feature but '
792 b'sparse is not enabled; enable the '
797 b'sparse is not enabled; enable the '
793 b'"sparse" extensions to access'
798 b'"sparse" extensions to access'
794 )
799 )
795 )
800 )
796
801
797
802
798 def makestore(requirements, path, vfstype):
803 def makestore(requirements, path, vfstype):
799 """Construct a storage object for a repository."""
804 """Construct a storage object for a repository."""
800 if b'store' in requirements:
805 if b'store' in requirements:
801 if b'fncache' in requirements:
806 if b'fncache' in requirements:
802 return storemod.fncachestore(
807 return storemod.fncachestore(
803 path, vfstype, b'dotencode' in requirements
808 path, vfstype, b'dotencode' in requirements
804 )
809 )
805
810
806 return storemod.encodedstore(path, vfstype)
811 return storemod.encodedstore(path, vfstype)
807
812
808 return storemod.basicstore(path, vfstype)
813 return storemod.basicstore(path, vfstype)
809
814
810
815
811 def resolvestorevfsoptions(ui, requirements, features):
816 def resolvestorevfsoptions(ui, requirements, features):
812 """Resolve the options to pass to the store vfs opener.
817 """Resolve the options to pass to the store vfs opener.
813
818
814 The returned dict is used to influence behavior of the storage layer.
819 The returned dict is used to influence behavior of the storage layer.
815 """
820 """
816 options = {}
821 options = {}
817
822
818 if b'treemanifest' in requirements:
823 if b'treemanifest' in requirements:
819 options[b'treemanifest'] = True
824 options[b'treemanifest'] = True
820
825
821 # experimental config: format.manifestcachesize
826 # experimental config: format.manifestcachesize
822 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
827 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
823 if manifestcachesize is not None:
828 if manifestcachesize is not None:
824 options[b'manifestcachesize'] = manifestcachesize
829 options[b'manifestcachesize'] = manifestcachesize
825
830
826 # In the absence of another requirement superseding a revlog-related
831 # In the absence of another requirement superseding a revlog-related
827 # requirement, we have to assume the repo is using revlog version 0.
832 # requirement, we have to assume the repo is using revlog version 0.
828 # This revlog format is super old and we don't bother trying to parse
833 # This revlog format is super old and we don't bother trying to parse
829 # opener options for it because those options wouldn't do anything
834 # opener options for it because those options wouldn't do anything
830 # meaningful on such old repos.
835 # meaningful on such old repos.
831 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
836 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
832 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
837 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
833 else: # explicitly mark repo as using revlogv0
838 else: # explicitly mark repo as using revlogv0
834 options[b'revlogv0'] = True
839 options[b'revlogv0'] = True
835
840
836 if COPIESSDC_REQUIREMENT in requirements:
841 if COPIESSDC_REQUIREMENT in requirements:
837 options[b'copies-storage'] = b'changeset-sidedata'
842 options[b'copies-storage'] = b'changeset-sidedata'
838 else:
843 else:
839 writecopiesto = ui.config(b'experimental', b'copies.write-to')
844 writecopiesto = ui.config(b'experimental', b'copies.write-to')
840 copiesextramode = (b'changeset-only', b'compatibility')
845 copiesextramode = (b'changeset-only', b'compatibility')
841 if writecopiesto in copiesextramode:
846 if writecopiesto in copiesextramode:
842 options[b'copies-storage'] = b'extra'
847 options[b'copies-storage'] = b'extra'
843
848
844 return options
849 return options
845
850
846
851
847 def resolverevlogstorevfsoptions(ui, requirements, features):
852 def resolverevlogstorevfsoptions(ui, requirements, features):
848 """Resolve opener options specific to revlogs."""
853 """Resolve opener options specific to revlogs."""
849
854
850 options = {}
855 options = {}
851 options[b'flagprocessors'] = {}
856 options[b'flagprocessors'] = {}
852
857
853 if b'revlogv1' in requirements:
858 if b'revlogv1' in requirements:
854 options[b'revlogv1'] = True
859 options[b'revlogv1'] = True
855 if REVLOGV2_REQUIREMENT in requirements:
860 if REVLOGV2_REQUIREMENT in requirements:
856 options[b'revlogv2'] = True
861 options[b'revlogv2'] = True
857
862
858 if b'generaldelta' in requirements:
863 if b'generaldelta' in requirements:
859 options[b'generaldelta'] = True
864 options[b'generaldelta'] = True
860
865
861 # experimental config: format.chunkcachesize
866 # experimental config: format.chunkcachesize
862 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
867 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
863 if chunkcachesize is not None:
868 if chunkcachesize is not None:
864 options[b'chunkcachesize'] = chunkcachesize
869 options[b'chunkcachesize'] = chunkcachesize
865
870
866 deltabothparents = ui.configbool(
871 deltabothparents = ui.configbool(
867 b'storage', b'revlog.optimize-delta-parent-choice'
872 b'storage', b'revlog.optimize-delta-parent-choice'
868 )
873 )
869 options[b'deltabothparents'] = deltabothparents
874 options[b'deltabothparents'] = deltabothparents
870
875
871 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
876 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
872 lazydeltabase = False
877 lazydeltabase = False
873 if lazydelta:
878 if lazydelta:
874 lazydeltabase = ui.configbool(
879 lazydeltabase = ui.configbool(
875 b'storage', b'revlog.reuse-external-delta-parent'
880 b'storage', b'revlog.reuse-external-delta-parent'
876 )
881 )
877 if lazydeltabase is None:
882 if lazydeltabase is None:
878 lazydeltabase = not scmutil.gddeltaconfig(ui)
883 lazydeltabase = not scmutil.gddeltaconfig(ui)
879 options[b'lazydelta'] = lazydelta
884 options[b'lazydelta'] = lazydelta
880 options[b'lazydeltabase'] = lazydeltabase
885 options[b'lazydeltabase'] = lazydeltabase
881
886
882 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
887 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
883 if 0 <= chainspan:
888 if 0 <= chainspan:
884 options[b'maxdeltachainspan'] = chainspan
889 options[b'maxdeltachainspan'] = chainspan
885
890
886 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
891 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
887 if mmapindexthreshold is not None:
892 if mmapindexthreshold is not None:
888 options[b'mmapindexthreshold'] = mmapindexthreshold
893 options[b'mmapindexthreshold'] = mmapindexthreshold
889
894
890 withsparseread = ui.configbool(b'experimental', b'sparse-read')
895 withsparseread = ui.configbool(b'experimental', b'sparse-read')
891 srdensitythres = float(
896 srdensitythres = float(
892 ui.config(b'experimental', b'sparse-read.density-threshold')
897 ui.config(b'experimental', b'sparse-read.density-threshold')
893 )
898 )
894 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
899 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
895 options[b'with-sparse-read'] = withsparseread
900 options[b'with-sparse-read'] = withsparseread
896 options[b'sparse-read-density-threshold'] = srdensitythres
901 options[b'sparse-read-density-threshold'] = srdensitythres
897 options[b'sparse-read-min-gap-size'] = srmingapsize
902 options[b'sparse-read-min-gap-size'] = srmingapsize
898
903
899 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
904 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
900 options[b'sparse-revlog'] = sparserevlog
905 options[b'sparse-revlog'] = sparserevlog
901 if sparserevlog:
906 if sparserevlog:
902 options[b'generaldelta'] = True
907 options[b'generaldelta'] = True
903
908
904 sidedata = SIDEDATA_REQUIREMENT in requirements
909 sidedata = SIDEDATA_REQUIREMENT in requirements
905 options[b'side-data'] = sidedata
910 options[b'side-data'] = sidedata
906
911
907 maxchainlen = None
912 maxchainlen = None
908 if sparserevlog:
913 if sparserevlog:
909 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
914 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
910 # experimental config: format.maxchainlen
915 # experimental config: format.maxchainlen
911 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
916 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
912 if maxchainlen is not None:
917 if maxchainlen is not None:
913 options[b'maxchainlen'] = maxchainlen
918 options[b'maxchainlen'] = maxchainlen
914
919
915 for r in requirements:
920 for r in requirements:
916 # we allow multiple compression engine requirement to co-exist because
921 # we allow multiple compression engine requirement to co-exist because
917 # strickly speaking, revlog seems to support mixed compression style.
922 # strickly speaking, revlog seems to support mixed compression style.
918 #
923 #
919 # The compression used for new entries will be "the last one"
924 # The compression used for new entries will be "the last one"
920 prefix = r.startswith
925 prefix = r.startswith
921 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
926 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
922 options[b'compengine'] = r.split(b'-', 2)[2]
927 options[b'compengine'] = r.split(b'-', 2)[2]
923
928
924 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
929 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
925 if options[b'zlib.level'] is not None:
930 if options[b'zlib.level'] is not None:
926 if not (0 <= options[b'zlib.level'] <= 9):
931 if not (0 <= options[b'zlib.level'] <= 9):
927 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
932 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
928 raise error.Abort(msg % options[b'zlib.level'])
933 raise error.Abort(msg % options[b'zlib.level'])
929 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
934 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
930 if options[b'zstd.level'] is not None:
935 if options[b'zstd.level'] is not None:
931 if not (0 <= options[b'zstd.level'] <= 22):
936 if not (0 <= options[b'zstd.level'] <= 22):
932 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
937 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
933 raise error.Abort(msg % options[b'zstd.level'])
938 raise error.Abort(msg % options[b'zstd.level'])
934
939
935 if repository.NARROW_REQUIREMENT in requirements:
940 if repository.NARROW_REQUIREMENT in requirements:
936 options[b'enableellipsis'] = True
941 options[b'enableellipsis'] = True
937
942
938 if ui.configbool(b'experimental', b'rust.index'):
943 if ui.configbool(b'experimental', b'rust.index'):
939 options[b'rust.index'] = True
944 options[b'rust.index'] = True
940 if NODEMAP_REQUIREMENT in requirements:
945 if NODEMAP_REQUIREMENT in requirements:
941 options[b'persistent-nodemap'] = True
946 options[b'persistent-nodemap'] = True
942 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
947 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
943 options[b'persistent-nodemap.mmap'] = True
948 options[b'persistent-nodemap.mmap'] = True
944 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
949 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
945 options[b'persistent-nodemap.mode'] = epnm
950 options[b'persistent-nodemap.mode'] = epnm
946 if ui.configbool(b'devel', b'persistent-nodemap'):
951 if ui.configbool(b'devel', b'persistent-nodemap'):
947 options[b'devel-force-nodemap'] = True
952 options[b'devel-force-nodemap'] = True
948
953
949 return options
954 return options
950
955
951
956
952 def makemain(**kwargs):
957 def makemain(**kwargs):
953 """Produce a type conforming to ``ilocalrepositorymain``."""
958 """Produce a type conforming to ``ilocalrepositorymain``."""
954 return localrepository
959 return localrepository
955
960
956
961
957 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
962 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
958 class revlogfilestorage(object):
963 class revlogfilestorage(object):
959 """File storage when using revlogs."""
964 """File storage when using revlogs."""
960
965
961 def file(self, path):
966 def file(self, path):
962 if path[0] == b'/':
967 if path[0] == b'/':
963 path = path[1:]
968 path = path[1:]
964
969
965 return filelog.filelog(self.svfs, path)
970 return filelog.filelog(self.svfs, path)
966
971
967
972
968 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
973 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
969 class revlognarrowfilestorage(object):
974 class revlognarrowfilestorage(object):
970 """File storage when using revlogs and narrow files."""
975 """File storage when using revlogs and narrow files."""
971
976
972 def file(self, path):
977 def file(self, path):
973 if path[0] == b'/':
978 if path[0] == b'/':
974 path = path[1:]
979 path = path[1:]
975
980
976 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
981 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
977
982
978
983
979 def makefilestorage(requirements, features, **kwargs):
984 def makefilestorage(requirements, features, **kwargs):
980 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
985 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
981 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
986 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
982 features.add(repository.REPO_FEATURE_STREAM_CLONE)
987 features.add(repository.REPO_FEATURE_STREAM_CLONE)
983
988
984 if repository.NARROW_REQUIREMENT in requirements:
989 if repository.NARROW_REQUIREMENT in requirements:
985 return revlognarrowfilestorage
990 return revlognarrowfilestorage
986 else:
991 else:
987 return revlogfilestorage
992 return revlogfilestorage
988
993
989
994
990 # List of repository interfaces and factory functions for them. Each
995 # List of repository interfaces and factory functions for them. Each
991 # will be called in order during ``makelocalrepository()`` to iteratively
996 # will be called in order during ``makelocalrepository()`` to iteratively
992 # derive the final type for a local repository instance. We capture the
997 # derive the final type for a local repository instance. We capture the
993 # function as a lambda so we don't hold a reference and the module-level
998 # function as a lambda so we don't hold a reference and the module-level
994 # functions can be wrapped.
999 # functions can be wrapped.
995 REPO_INTERFACES = [
1000 REPO_INTERFACES = [
996 (repository.ilocalrepositorymain, lambda: makemain),
1001 (repository.ilocalrepositorymain, lambda: makemain),
997 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
1002 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
998 ]
1003 ]
999
1004
1000
1005
1001 @interfaceutil.implementer(repository.ilocalrepositorymain)
1006 @interfaceutil.implementer(repository.ilocalrepositorymain)
1002 class localrepository(object):
1007 class localrepository(object):
1003 """Main class for representing local repositories.
1008 """Main class for representing local repositories.
1004
1009
1005 All local repositories are instances of this class.
1010 All local repositories are instances of this class.
1006
1011
1007 Constructed on its own, instances of this class are not usable as
1012 Constructed on its own, instances of this class are not usable as
1008 repository objects. To obtain a usable repository object, call
1013 repository objects. To obtain a usable repository object, call
1009 ``hg.repository()``, ``localrepo.instance()``, or
1014 ``hg.repository()``, ``localrepo.instance()``, or
1010 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1015 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1011 ``instance()`` adds support for creating new repositories.
1016 ``instance()`` adds support for creating new repositories.
1012 ``hg.repository()`` adds more extension integration, including calling
1017 ``hg.repository()`` adds more extension integration, including calling
1013 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1018 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1014 used.
1019 used.
1015 """
1020 """
1016
1021
1017 # obsolete experimental requirements:
1022 # obsolete experimental requirements:
1018 # - manifestv2: An experimental new manifest format that allowed
1023 # - manifestv2: An experimental new manifest format that allowed
1019 # for stem compression of long paths. Experiment ended up not
1024 # for stem compression of long paths. Experiment ended up not
1020 # being successful (repository sizes went up due to worse delta
1025 # being successful (repository sizes went up due to worse delta
1021 # chains), and the code was deleted in 4.6.
1026 # chains), and the code was deleted in 4.6.
1022 supportedformats = {
1027 supportedformats = {
1023 b'revlogv1',
1028 b'revlogv1',
1024 b'generaldelta',
1029 b'generaldelta',
1025 b'treemanifest',
1030 b'treemanifest',
1026 COPIESSDC_REQUIREMENT,
1031 COPIESSDC_REQUIREMENT,
1027 REVLOGV2_REQUIREMENT,
1032 REVLOGV2_REQUIREMENT,
1028 SIDEDATA_REQUIREMENT,
1033 SIDEDATA_REQUIREMENT,
1029 SPARSEREVLOG_REQUIREMENT,
1034 SPARSEREVLOG_REQUIREMENT,
1030 NODEMAP_REQUIREMENT,
1035 NODEMAP_REQUIREMENT,
1031 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1036 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1032 }
1037 }
1033 _basesupported = supportedformats | {
1038 _basesupported = supportedformats | {
1034 b'store',
1039 b'store',
1035 b'fncache',
1040 b'fncache',
1036 b'shared',
1041 b'shared',
1037 b'relshared',
1042 b'relshared',
1038 b'dotencode',
1043 b'dotencode',
1039 b'exp-sparse',
1044 b'exp-sparse',
1040 b'internal-phase',
1045 b'internal-phase',
1041 }
1046 }
1042
1047
1043 # list of prefix for file which can be written without 'wlock'
1048 # list of prefix for file which can be written without 'wlock'
1044 # Extensions should extend this list when needed
1049 # Extensions should extend this list when needed
1045 _wlockfreeprefix = {
1050 _wlockfreeprefix = {
1046 # We migh consider requiring 'wlock' for the next
1051 # We migh consider requiring 'wlock' for the next
1047 # two, but pretty much all the existing code assume
1052 # two, but pretty much all the existing code assume
1048 # wlock is not needed so we keep them excluded for
1053 # wlock is not needed so we keep them excluded for
1049 # now.
1054 # now.
1050 b'hgrc',
1055 b'hgrc',
1051 b'requires',
1056 b'requires',
1052 # XXX cache is a complicatged business someone
1057 # XXX cache is a complicatged business someone
1053 # should investigate this in depth at some point
1058 # should investigate this in depth at some point
1054 b'cache/',
1059 b'cache/',
1055 # XXX shouldn't be dirstate covered by the wlock?
1060 # XXX shouldn't be dirstate covered by the wlock?
1056 b'dirstate',
1061 b'dirstate',
1057 # XXX bisect was still a bit too messy at the time
1062 # XXX bisect was still a bit too messy at the time
1058 # this changeset was introduced. Someone should fix
1063 # this changeset was introduced. Someone should fix
1059 # the remainig bit and drop this line
1064 # the remainig bit and drop this line
1060 b'bisect.state',
1065 b'bisect.state',
1061 }
1066 }
1062
1067
1063 def __init__(
1068 def __init__(
1064 self,
1069 self,
1065 baseui,
1070 baseui,
1066 ui,
1071 ui,
1067 origroot,
1072 origroot,
1068 wdirvfs,
1073 wdirvfs,
1069 hgvfs,
1074 hgvfs,
1070 requirements,
1075 requirements,
1071 supportedrequirements,
1076 supportedrequirements,
1072 sharedpath,
1077 sharedpath,
1073 store,
1078 store,
1074 cachevfs,
1079 cachevfs,
1075 wcachevfs,
1080 wcachevfs,
1076 features,
1081 features,
1077 intents=None,
1082 intents=None,
1078 ):
1083 ):
1079 """Create a new local repository instance.
1084 """Create a new local repository instance.
1080
1085
1081 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1086 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1082 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1087 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1083 object.
1088 object.
1084
1089
1085 Arguments:
1090 Arguments:
1086
1091
1087 baseui
1092 baseui
1088 ``ui.ui`` instance that ``ui`` argument was based off of.
1093 ``ui.ui`` instance that ``ui`` argument was based off of.
1089
1094
1090 ui
1095 ui
1091 ``ui.ui`` instance for use by the repository.
1096 ``ui.ui`` instance for use by the repository.
1092
1097
1093 origroot
1098 origroot
1094 ``bytes`` path to working directory root of this repository.
1099 ``bytes`` path to working directory root of this repository.
1095
1100
1096 wdirvfs
1101 wdirvfs
1097 ``vfs.vfs`` rooted at the working directory.
1102 ``vfs.vfs`` rooted at the working directory.
1098
1103
1099 hgvfs
1104 hgvfs
1100 ``vfs.vfs`` rooted at .hg/
1105 ``vfs.vfs`` rooted at .hg/
1101
1106
1102 requirements
1107 requirements
1103 ``set`` of bytestrings representing repository opening requirements.
1108 ``set`` of bytestrings representing repository opening requirements.
1104
1109
1105 supportedrequirements
1110 supportedrequirements
1106 ``set`` of bytestrings representing repository requirements that we
1111 ``set`` of bytestrings representing repository requirements that we
1107 know how to open. May be a supetset of ``requirements``.
1112 know how to open. May be a supetset of ``requirements``.
1108
1113
1109 sharedpath
1114 sharedpath
1110 ``bytes`` Defining path to storage base directory. Points to a
1115 ``bytes`` Defining path to storage base directory. Points to a
1111 ``.hg/`` directory somewhere.
1116 ``.hg/`` directory somewhere.
1112
1117
1113 store
1118 store
1114 ``store.basicstore`` (or derived) instance providing access to
1119 ``store.basicstore`` (or derived) instance providing access to
1115 versioned storage.
1120 versioned storage.
1116
1121
1117 cachevfs
1122 cachevfs
1118 ``vfs.vfs`` used for cache files.
1123 ``vfs.vfs`` used for cache files.
1119
1124
1120 wcachevfs
1125 wcachevfs
1121 ``vfs.vfs`` used for cache files related to the working copy.
1126 ``vfs.vfs`` used for cache files related to the working copy.
1122
1127
1123 features
1128 features
1124 ``set`` of bytestrings defining features/capabilities of this
1129 ``set`` of bytestrings defining features/capabilities of this
1125 instance.
1130 instance.
1126
1131
1127 intents
1132 intents
1128 ``set`` of system strings indicating what this repo will be used
1133 ``set`` of system strings indicating what this repo will be used
1129 for.
1134 for.
1130 """
1135 """
1131 self.baseui = baseui
1136 self.baseui = baseui
1132 self.ui = ui
1137 self.ui = ui
1133 self.origroot = origroot
1138 self.origroot = origroot
1134 # vfs rooted at working directory.
1139 # vfs rooted at working directory.
1135 self.wvfs = wdirvfs
1140 self.wvfs = wdirvfs
1136 self.root = wdirvfs.base
1141 self.root = wdirvfs.base
1137 # vfs rooted at .hg/. Used to access most non-store paths.
1142 # vfs rooted at .hg/. Used to access most non-store paths.
1138 self.vfs = hgvfs
1143 self.vfs = hgvfs
1139 self.path = hgvfs.base
1144 self.path = hgvfs.base
1140 self.requirements = requirements
1145 self.requirements = requirements
1141 self.supported = supportedrequirements
1146 self.supported = supportedrequirements
1142 self.sharedpath = sharedpath
1147 self.sharedpath = sharedpath
1143 self.store = store
1148 self.store = store
1144 self.cachevfs = cachevfs
1149 self.cachevfs = cachevfs
1145 self.wcachevfs = wcachevfs
1150 self.wcachevfs = wcachevfs
1146 self.features = features
1151 self.features = features
1147
1152
1148 self.filtername = None
1153 self.filtername = None
1149
1154
1150 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1155 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1151 b'devel', b'check-locks'
1156 b'devel', b'check-locks'
1152 ):
1157 ):
1153 self.vfs.audit = self._getvfsward(self.vfs.audit)
1158 self.vfs.audit = self._getvfsward(self.vfs.audit)
1154 # A list of callback to shape the phase if no data were found.
1159 # A list of callback to shape the phase if no data were found.
1155 # Callback are in the form: func(repo, roots) --> processed root.
1160 # Callback are in the form: func(repo, roots) --> processed root.
1156 # This list it to be filled by extension during repo setup
1161 # This list it to be filled by extension during repo setup
1157 self._phasedefaults = []
1162 self._phasedefaults = []
1158
1163
1159 color.setup(self.ui)
1164 color.setup(self.ui)
1160
1165
1161 self.spath = self.store.path
1166 self.spath = self.store.path
1162 self.svfs = self.store.vfs
1167 self.svfs = self.store.vfs
1163 self.sjoin = self.store.join
1168 self.sjoin = self.store.join
1164 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1169 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1165 b'devel', b'check-locks'
1170 b'devel', b'check-locks'
1166 ):
1171 ):
1167 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1172 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1168 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1173 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1169 else: # standard vfs
1174 else: # standard vfs
1170 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1175 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1171
1176
1172 self._dirstatevalidatewarned = False
1177 self._dirstatevalidatewarned = False
1173
1178
1174 self._branchcaches = branchmap.BranchMapCache()
1179 self._branchcaches = branchmap.BranchMapCache()
1175 self._revbranchcache = None
1180 self._revbranchcache = None
1176 self._filterpats = {}
1181 self._filterpats = {}
1177 self._datafilters = {}
1182 self._datafilters = {}
1178 self._transref = self._lockref = self._wlockref = None
1183 self._transref = self._lockref = self._wlockref = None
1179
1184
1180 # A cache for various files under .hg/ that tracks file changes,
1185 # A cache for various files under .hg/ that tracks file changes,
1181 # (used by the filecache decorator)
1186 # (used by the filecache decorator)
1182 #
1187 #
1183 # Maps a property name to its util.filecacheentry
1188 # Maps a property name to its util.filecacheentry
1184 self._filecache = {}
1189 self._filecache = {}
1185
1190
1186 # hold sets of revision to be filtered
1191 # hold sets of revision to be filtered
1187 # should be cleared when something might have changed the filter value:
1192 # should be cleared when something might have changed the filter value:
1188 # - new changesets,
1193 # - new changesets,
1189 # - phase change,
1194 # - phase change,
1190 # - new obsolescence marker,
1195 # - new obsolescence marker,
1191 # - working directory parent change,
1196 # - working directory parent change,
1192 # - bookmark changes
1197 # - bookmark changes
1193 self.filteredrevcache = {}
1198 self.filteredrevcache = {}
1194
1199
1195 # post-dirstate-status hooks
1200 # post-dirstate-status hooks
1196 self._postdsstatus = []
1201 self._postdsstatus = []
1197
1202
1198 # generic mapping between names and nodes
1203 # generic mapping between names and nodes
1199 self.names = namespaces.namespaces()
1204 self.names = namespaces.namespaces()
1200
1205
1201 # Key to signature value.
1206 # Key to signature value.
1202 self._sparsesignaturecache = {}
1207 self._sparsesignaturecache = {}
1203 # Signature to cached matcher instance.
1208 # Signature to cached matcher instance.
1204 self._sparsematchercache = {}
1209 self._sparsematchercache = {}
1205
1210
1206 self._extrafilterid = repoview.extrafilter(ui)
1211 self._extrafilterid = repoview.extrafilter(ui)
1207
1212
1208 self.filecopiesmode = None
1213 self.filecopiesmode = None
1209 if COPIESSDC_REQUIREMENT in self.requirements:
1214 if COPIESSDC_REQUIREMENT in self.requirements:
1210 self.filecopiesmode = b'changeset-sidedata'
1215 self.filecopiesmode = b'changeset-sidedata'
1211
1216
1212 def _getvfsward(self, origfunc):
1217 def _getvfsward(self, origfunc):
1213 """build a ward for self.vfs"""
1218 """build a ward for self.vfs"""
1214 rref = weakref.ref(self)
1219 rref = weakref.ref(self)
1215
1220
1216 def checkvfs(path, mode=None):
1221 def checkvfs(path, mode=None):
1217 ret = origfunc(path, mode=mode)
1222 ret = origfunc(path, mode=mode)
1218 repo = rref()
1223 repo = rref()
1219 if (
1224 if (
1220 repo is None
1225 repo is None
1221 or not util.safehasattr(repo, b'_wlockref')
1226 or not util.safehasattr(repo, b'_wlockref')
1222 or not util.safehasattr(repo, b'_lockref')
1227 or not util.safehasattr(repo, b'_lockref')
1223 ):
1228 ):
1224 return
1229 return
1225 if mode in (None, b'r', b'rb'):
1230 if mode in (None, b'r', b'rb'):
1226 return
1231 return
1227 if path.startswith(repo.path):
1232 if path.startswith(repo.path):
1228 # truncate name relative to the repository (.hg)
1233 # truncate name relative to the repository (.hg)
1229 path = path[len(repo.path) + 1 :]
1234 path = path[len(repo.path) + 1 :]
1230 if path.startswith(b'cache/'):
1235 if path.startswith(b'cache/'):
1231 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1236 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1232 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1237 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1233 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1238 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1234 # journal is covered by 'lock'
1239 # journal is covered by 'lock'
1235 if repo._currentlock(repo._lockref) is None:
1240 if repo._currentlock(repo._lockref) is None:
1236 repo.ui.develwarn(
1241 repo.ui.develwarn(
1237 b'write with no lock: "%s"' % path,
1242 b'write with no lock: "%s"' % path,
1238 stacklevel=3,
1243 stacklevel=3,
1239 config=b'check-locks',
1244 config=b'check-locks',
1240 )
1245 )
1241 elif repo._currentlock(repo._wlockref) is None:
1246 elif repo._currentlock(repo._wlockref) is None:
1242 # rest of vfs files are covered by 'wlock'
1247 # rest of vfs files are covered by 'wlock'
1243 #
1248 #
1244 # exclude special files
1249 # exclude special files
1245 for prefix in self._wlockfreeprefix:
1250 for prefix in self._wlockfreeprefix:
1246 if path.startswith(prefix):
1251 if path.startswith(prefix):
1247 return
1252 return
1248 repo.ui.develwarn(
1253 repo.ui.develwarn(
1249 b'write with no wlock: "%s"' % path,
1254 b'write with no wlock: "%s"' % path,
1250 stacklevel=3,
1255 stacklevel=3,
1251 config=b'check-locks',
1256 config=b'check-locks',
1252 )
1257 )
1253 return ret
1258 return ret
1254
1259
1255 return checkvfs
1260 return checkvfs
1256
1261
1257 def _getsvfsward(self, origfunc):
1262 def _getsvfsward(self, origfunc):
1258 """build a ward for self.svfs"""
1263 """build a ward for self.svfs"""
1259 rref = weakref.ref(self)
1264 rref = weakref.ref(self)
1260
1265
1261 def checksvfs(path, mode=None):
1266 def checksvfs(path, mode=None):
1262 ret = origfunc(path, mode=mode)
1267 ret = origfunc(path, mode=mode)
1263 repo = rref()
1268 repo = rref()
1264 if repo is None or not util.safehasattr(repo, b'_lockref'):
1269 if repo is None or not util.safehasattr(repo, b'_lockref'):
1265 return
1270 return
1266 if mode in (None, b'r', b'rb'):
1271 if mode in (None, b'r', b'rb'):
1267 return
1272 return
1268 if path.startswith(repo.sharedpath):
1273 if path.startswith(repo.sharedpath):
1269 # truncate name relative to the repository (.hg)
1274 # truncate name relative to the repository (.hg)
1270 path = path[len(repo.sharedpath) + 1 :]
1275 path = path[len(repo.sharedpath) + 1 :]
1271 if repo._currentlock(repo._lockref) is None:
1276 if repo._currentlock(repo._lockref) is None:
1272 repo.ui.develwarn(
1277 repo.ui.develwarn(
1273 b'write with no lock: "%s"' % path, stacklevel=4
1278 b'write with no lock: "%s"' % path, stacklevel=4
1274 )
1279 )
1275 return ret
1280 return ret
1276
1281
1277 return checksvfs
1282 return checksvfs
1278
1283
1279 def close(self):
1284 def close(self):
1280 self._writecaches()
1285 self._writecaches()
1281
1286
1282 def _writecaches(self):
1287 def _writecaches(self):
1283 if self._revbranchcache:
1288 if self._revbranchcache:
1284 self._revbranchcache.write()
1289 self._revbranchcache.write()
1285
1290
1286 def _restrictcapabilities(self, caps):
1291 def _restrictcapabilities(self, caps):
1287 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1292 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1288 caps = set(caps)
1293 caps = set(caps)
1289 capsblob = bundle2.encodecaps(
1294 capsblob = bundle2.encodecaps(
1290 bundle2.getrepocaps(self, role=b'client')
1295 bundle2.getrepocaps(self, role=b'client')
1291 )
1296 )
1292 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1297 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1293 return caps
1298 return caps
1294
1299
1295 def _writerequirements(self):
1300 def _writerequirements(self):
1296 scmutil.writerequires(self.vfs, self.requirements)
1301 scmutil.writerequires(self.vfs, self.requirements)
1297
1302
1298 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1303 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1299 # self -> auditor -> self._checknested -> self
1304 # self -> auditor -> self._checknested -> self
1300
1305
1301 @property
1306 @property
1302 def auditor(self):
1307 def auditor(self):
1303 # This is only used by context.workingctx.match in order to
1308 # This is only used by context.workingctx.match in order to
1304 # detect files in subrepos.
1309 # detect files in subrepos.
1305 return pathutil.pathauditor(self.root, callback=self._checknested)
1310 return pathutil.pathauditor(self.root, callback=self._checknested)
1306
1311
1307 @property
1312 @property
1308 def nofsauditor(self):
1313 def nofsauditor(self):
1309 # This is only used by context.basectx.match in order to detect
1314 # This is only used by context.basectx.match in order to detect
1310 # files in subrepos.
1315 # files in subrepos.
1311 return pathutil.pathauditor(
1316 return pathutil.pathauditor(
1312 self.root, callback=self._checknested, realfs=False, cached=True
1317 self.root, callback=self._checknested, realfs=False, cached=True
1313 )
1318 )
1314
1319
1315 def _checknested(self, path):
1320 def _checknested(self, path):
1316 """Determine if path is a legal nested repository."""
1321 """Determine if path is a legal nested repository."""
1317 if not path.startswith(self.root):
1322 if not path.startswith(self.root):
1318 return False
1323 return False
1319 subpath = path[len(self.root) + 1 :]
1324 subpath = path[len(self.root) + 1 :]
1320 normsubpath = util.pconvert(subpath)
1325 normsubpath = util.pconvert(subpath)
1321
1326
1322 # XXX: Checking against the current working copy is wrong in
1327 # XXX: Checking against the current working copy is wrong in
1323 # the sense that it can reject things like
1328 # the sense that it can reject things like
1324 #
1329 #
1325 # $ hg cat -r 10 sub/x.txt
1330 # $ hg cat -r 10 sub/x.txt
1326 #
1331 #
1327 # if sub/ is no longer a subrepository in the working copy
1332 # if sub/ is no longer a subrepository in the working copy
1328 # parent revision.
1333 # parent revision.
1329 #
1334 #
1330 # However, it can of course also allow things that would have
1335 # However, it can of course also allow things that would have
1331 # been rejected before, such as the above cat command if sub/
1336 # been rejected before, such as the above cat command if sub/
1332 # is a subrepository now, but was a normal directory before.
1337 # is a subrepository now, but was a normal directory before.
1333 # The old path auditor would have rejected by mistake since it
1338 # The old path auditor would have rejected by mistake since it
1334 # panics when it sees sub/.hg/.
1339 # panics when it sees sub/.hg/.
1335 #
1340 #
1336 # All in all, checking against the working copy seems sensible
1341 # All in all, checking against the working copy seems sensible
1337 # since we want to prevent access to nested repositories on
1342 # since we want to prevent access to nested repositories on
1338 # the filesystem *now*.
1343 # the filesystem *now*.
1339 ctx = self[None]
1344 ctx = self[None]
1340 parts = util.splitpath(subpath)
1345 parts = util.splitpath(subpath)
1341 while parts:
1346 while parts:
1342 prefix = b'/'.join(parts)
1347 prefix = b'/'.join(parts)
1343 if prefix in ctx.substate:
1348 if prefix in ctx.substate:
1344 if prefix == normsubpath:
1349 if prefix == normsubpath:
1345 return True
1350 return True
1346 else:
1351 else:
1347 sub = ctx.sub(prefix)
1352 sub = ctx.sub(prefix)
1348 return sub.checknested(subpath[len(prefix) + 1 :])
1353 return sub.checknested(subpath[len(prefix) + 1 :])
1349 else:
1354 else:
1350 parts.pop()
1355 parts.pop()
1351 return False
1356 return False
1352
1357
1353 def peer(self):
1358 def peer(self):
1354 return localpeer(self) # not cached to avoid reference cycle
1359 return localpeer(self) # not cached to avoid reference cycle
1355
1360
1356 def unfiltered(self):
1361 def unfiltered(self):
1357 """Return unfiltered version of the repository
1362 """Return unfiltered version of the repository
1358
1363
1359 Intended to be overwritten by filtered repo."""
1364 Intended to be overwritten by filtered repo."""
1360 return self
1365 return self
1361
1366
1362 def filtered(self, name, visibilityexceptions=None):
1367 def filtered(self, name, visibilityexceptions=None):
1363 """Return a filtered version of a repository
1368 """Return a filtered version of a repository
1364
1369
1365 The `name` parameter is the identifier of the requested view. This
1370 The `name` parameter is the identifier of the requested view. This
1366 will return a repoview object set "exactly" to the specified view.
1371 will return a repoview object set "exactly" to the specified view.
1367
1372
1368 This function does not apply recursive filtering to a repository. For
1373 This function does not apply recursive filtering to a repository. For
1369 example calling `repo.filtered("served")` will return a repoview using
1374 example calling `repo.filtered("served")` will return a repoview using
1370 the "served" view, regardless of the initial view used by `repo`.
1375 the "served" view, regardless of the initial view used by `repo`.
1371
1376
1372 In other word, there is always only one level of `repoview` "filtering".
1377 In other word, there is always only one level of `repoview` "filtering".
1373 """
1378 """
1374 if self._extrafilterid is not None and b'%' not in name:
1379 if self._extrafilterid is not None and b'%' not in name:
1375 name = name + b'%' + self._extrafilterid
1380 name = name + b'%' + self._extrafilterid
1376
1381
1377 cls = repoview.newtype(self.unfiltered().__class__)
1382 cls = repoview.newtype(self.unfiltered().__class__)
1378 return cls(self, name, visibilityexceptions)
1383 return cls(self, name, visibilityexceptions)
1379
1384
1380 @mixedrepostorecache(
1385 @mixedrepostorecache(
1381 (b'bookmarks', b'plain'),
1386 (b'bookmarks', b'plain'),
1382 (b'bookmarks.current', b'plain'),
1387 (b'bookmarks.current', b'plain'),
1383 (b'bookmarks', b''),
1388 (b'bookmarks', b''),
1384 (b'00changelog.i', b''),
1389 (b'00changelog.i', b''),
1385 )
1390 )
1386 def _bookmarks(self):
1391 def _bookmarks(self):
1387 # Since the multiple files involved in the transaction cannot be
1392 # Since the multiple files involved in the transaction cannot be
1388 # written atomically (with current repository format), there is a race
1393 # written atomically (with current repository format), there is a race
1389 # condition here.
1394 # condition here.
1390 #
1395 #
1391 # 1) changelog content A is read
1396 # 1) changelog content A is read
1392 # 2) outside transaction update changelog to content B
1397 # 2) outside transaction update changelog to content B
1393 # 3) outside transaction update bookmark file referring to content B
1398 # 3) outside transaction update bookmark file referring to content B
1394 # 4) bookmarks file content is read and filtered against changelog-A
1399 # 4) bookmarks file content is read and filtered against changelog-A
1395 #
1400 #
1396 # When this happens, bookmarks against nodes missing from A are dropped.
1401 # When this happens, bookmarks against nodes missing from A are dropped.
1397 #
1402 #
1398 # Having this happening during read is not great, but it become worse
1403 # Having this happening during read is not great, but it become worse
1399 # when this happen during write because the bookmarks to the "unknown"
1404 # when this happen during write because the bookmarks to the "unknown"
1400 # nodes will be dropped for good. However, writes happen within locks.
1405 # nodes will be dropped for good. However, writes happen within locks.
1401 # This locking makes it possible to have a race free consistent read.
1406 # This locking makes it possible to have a race free consistent read.
1402 # For this purpose data read from disc before locking are
1407 # For this purpose data read from disc before locking are
1403 # "invalidated" right after the locks are taken. This invalidations are
1408 # "invalidated" right after the locks are taken. This invalidations are
1404 # "light", the `filecache` mechanism keep the data in memory and will
1409 # "light", the `filecache` mechanism keep the data in memory and will
1405 # reuse them if the underlying files did not changed. Not parsing the
1410 # reuse them if the underlying files did not changed. Not parsing the
1406 # same data multiple times helps performances.
1411 # same data multiple times helps performances.
1407 #
1412 #
1408 # Unfortunately in the case describe above, the files tracked by the
1413 # Unfortunately in the case describe above, the files tracked by the
1409 # bookmarks file cache might not have changed, but the in-memory
1414 # bookmarks file cache might not have changed, but the in-memory
1410 # content is still "wrong" because we used an older changelog content
1415 # content is still "wrong" because we used an older changelog content
1411 # to process the on-disk data. So after locking, the changelog would be
1416 # to process the on-disk data. So after locking, the changelog would be
1412 # refreshed but `_bookmarks` would be preserved.
1417 # refreshed but `_bookmarks` would be preserved.
1413 # Adding `00changelog.i` to the list of tracked file is not
1418 # Adding `00changelog.i` to the list of tracked file is not
1414 # enough, because at the time we build the content for `_bookmarks` in
1419 # enough, because at the time we build the content for `_bookmarks` in
1415 # (4), the changelog file has already diverged from the content used
1420 # (4), the changelog file has already diverged from the content used
1416 # for loading `changelog` in (1)
1421 # for loading `changelog` in (1)
1417 #
1422 #
1418 # To prevent the issue, we force the changelog to be explicitly
1423 # To prevent the issue, we force the changelog to be explicitly
1419 # reloaded while computing `_bookmarks`. The data race can still happen
1424 # reloaded while computing `_bookmarks`. The data race can still happen
1420 # without the lock (with a narrower window), but it would no longer go
1425 # without the lock (with a narrower window), but it would no longer go
1421 # undetected during the lock time refresh.
1426 # undetected during the lock time refresh.
1422 #
1427 #
1423 # The new schedule is as follow
1428 # The new schedule is as follow
1424 #
1429 #
1425 # 1) filecache logic detect that `_bookmarks` needs to be computed
1430 # 1) filecache logic detect that `_bookmarks` needs to be computed
1426 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1431 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1427 # 3) We force `changelog` filecache to be tested
1432 # 3) We force `changelog` filecache to be tested
1428 # 4) cachestat for `changelog` are captured (for changelog)
1433 # 4) cachestat for `changelog` are captured (for changelog)
1429 # 5) `_bookmarks` is computed and cached
1434 # 5) `_bookmarks` is computed and cached
1430 #
1435 #
1431 # The step in (3) ensure we have a changelog at least as recent as the
1436 # The step in (3) ensure we have a changelog at least as recent as the
1432 # cache stat computed in (1). As a result at locking time:
1437 # cache stat computed in (1). As a result at locking time:
1433 # * if the changelog did not changed since (1) -> we can reuse the data
1438 # * if the changelog did not changed since (1) -> we can reuse the data
1434 # * otherwise -> the bookmarks get refreshed.
1439 # * otherwise -> the bookmarks get refreshed.
1435 self._refreshchangelog()
1440 self._refreshchangelog()
1436 return bookmarks.bmstore(self)
1441 return bookmarks.bmstore(self)
1437
1442
1438 def _refreshchangelog(self):
1443 def _refreshchangelog(self):
1439 """make sure the in memory changelog match the on-disk one"""
1444 """make sure the in memory changelog match the on-disk one"""
1440 if 'changelog' in vars(self) and self.currenttransaction() is None:
1445 if 'changelog' in vars(self) and self.currenttransaction() is None:
1441 del self.changelog
1446 del self.changelog
1442
1447
1443 @property
1448 @property
1444 def _activebookmark(self):
1449 def _activebookmark(self):
1445 return self._bookmarks.active
1450 return self._bookmarks.active
1446
1451
1447 # _phasesets depend on changelog. what we need is to call
1452 # _phasesets depend on changelog. what we need is to call
1448 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1453 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1449 # can't be easily expressed in filecache mechanism.
1454 # can't be easily expressed in filecache mechanism.
1450 @storecache(b'phaseroots', b'00changelog.i')
1455 @storecache(b'phaseroots', b'00changelog.i')
1451 def _phasecache(self):
1456 def _phasecache(self):
1452 return phases.phasecache(self, self._phasedefaults)
1457 return phases.phasecache(self, self._phasedefaults)
1453
1458
1454 @storecache(b'obsstore')
1459 @storecache(b'obsstore')
1455 def obsstore(self):
1460 def obsstore(self):
1456 return obsolete.makestore(self.ui, self)
1461 return obsolete.makestore(self.ui, self)
1457
1462
1458 @storecache(b'00changelog.i')
1463 @storecache(b'00changelog.i')
1459 def changelog(self):
1464 def changelog(self):
1460 # load dirstate before changelog to avoid race see issue6303
1465 # load dirstate before changelog to avoid race see issue6303
1461 self.dirstate.prefetch_parents()
1466 self.dirstate.prefetch_parents()
1462 return self.store.changelog(txnutil.mayhavepending(self.root))
1467 return self.store.changelog(txnutil.mayhavepending(self.root))
1463
1468
1464 @storecache(b'00manifest.i')
1469 @storecache(b'00manifest.i')
1465 def manifestlog(self):
1470 def manifestlog(self):
1466 return self.store.manifestlog(self, self._storenarrowmatch)
1471 return self.store.manifestlog(self, self._storenarrowmatch)
1467
1472
1468 @repofilecache(b'dirstate')
1473 @repofilecache(b'dirstate')
1469 def dirstate(self):
1474 def dirstate(self):
1470 return self._makedirstate()
1475 return self._makedirstate()
1471
1476
1472 def _makedirstate(self):
1477 def _makedirstate(self):
1473 """Extension point for wrapping the dirstate per-repo."""
1478 """Extension point for wrapping the dirstate per-repo."""
1474 sparsematchfn = lambda: sparse.matcher(self)
1479 sparsematchfn = lambda: sparse.matcher(self)
1475
1480
1476 return dirstate.dirstate(
1481 return dirstate.dirstate(
1477 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1482 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1478 )
1483 )
1479
1484
1480 def _dirstatevalidate(self, node):
1485 def _dirstatevalidate(self, node):
1481 try:
1486 try:
1482 self.changelog.rev(node)
1487 self.changelog.rev(node)
1483 return node
1488 return node
1484 except error.LookupError:
1489 except error.LookupError:
1485 if not self._dirstatevalidatewarned:
1490 if not self._dirstatevalidatewarned:
1486 self._dirstatevalidatewarned = True
1491 self._dirstatevalidatewarned = True
1487 self.ui.warn(
1492 self.ui.warn(
1488 _(b"warning: ignoring unknown working parent %s!\n")
1493 _(b"warning: ignoring unknown working parent %s!\n")
1489 % short(node)
1494 % short(node)
1490 )
1495 )
1491 return nullid
1496 return nullid
1492
1497
1493 @storecache(narrowspec.FILENAME)
1498 @storecache(narrowspec.FILENAME)
1494 def narrowpats(self):
1499 def narrowpats(self):
1495 """matcher patterns for this repository's narrowspec
1500 """matcher patterns for this repository's narrowspec
1496
1501
1497 A tuple of (includes, excludes).
1502 A tuple of (includes, excludes).
1498 """
1503 """
1499 return narrowspec.load(self)
1504 return narrowspec.load(self)
1500
1505
1501 @storecache(narrowspec.FILENAME)
1506 @storecache(narrowspec.FILENAME)
1502 def _storenarrowmatch(self):
1507 def _storenarrowmatch(self):
1503 if repository.NARROW_REQUIREMENT not in self.requirements:
1508 if repository.NARROW_REQUIREMENT not in self.requirements:
1504 return matchmod.always()
1509 return matchmod.always()
1505 include, exclude = self.narrowpats
1510 include, exclude = self.narrowpats
1506 return narrowspec.match(self.root, include=include, exclude=exclude)
1511 return narrowspec.match(self.root, include=include, exclude=exclude)
1507
1512
1508 @storecache(narrowspec.FILENAME)
1513 @storecache(narrowspec.FILENAME)
1509 def _narrowmatch(self):
1514 def _narrowmatch(self):
1510 if repository.NARROW_REQUIREMENT not in self.requirements:
1515 if repository.NARROW_REQUIREMENT not in self.requirements:
1511 return matchmod.always()
1516 return matchmod.always()
1512 narrowspec.checkworkingcopynarrowspec(self)
1517 narrowspec.checkworkingcopynarrowspec(self)
1513 include, exclude = self.narrowpats
1518 include, exclude = self.narrowpats
1514 return narrowspec.match(self.root, include=include, exclude=exclude)
1519 return narrowspec.match(self.root, include=include, exclude=exclude)
1515
1520
1516 def narrowmatch(self, match=None, includeexact=False):
1521 def narrowmatch(self, match=None, includeexact=False):
1517 """matcher corresponding the the repo's narrowspec
1522 """matcher corresponding the the repo's narrowspec
1518
1523
1519 If `match` is given, then that will be intersected with the narrow
1524 If `match` is given, then that will be intersected with the narrow
1520 matcher.
1525 matcher.
1521
1526
1522 If `includeexact` is True, then any exact matches from `match` will
1527 If `includeexact` is True, then any exact matches from `match` will
1523 be included even if they're outside the narrowspec.
1528 be included even if they're outside the narrowspec.
1524 """
1529 """
1525 if match:
1530 if match:
1526 if includeexact and not self._narrowmatch.always():
1531 if includeexact and not self._narrowmatch.always():
1527 # do not exclude explicitly-specified paths so that they can
1532 # do not exclude explicitly-specified paths so that they can
1528 # be warned later on
1533 # be warned later on
1529 em = matchmod.exact(match.files())
1534 em = matchmod.exact(match.files())
1530 nm = matchmod.unionmatcher([self._narrowmatch, em])
1535 nm = matchmod.unionmatcher([self._narrowmatch, em])
1531 return matchmod.intersectmatchers(match, nm)
1536 return matchmod.intersectmatchers(match, nm)
1532 return matchmod.intersectmatchers(match, self._narrowmatch)
1537 return matchmod.intersectmatchers(match, self._narrowmatch)
1533 return self._narrowmatch
1538 return self._narrowmatch
1534
1539
1535 def setnarrowpats(self, newincludes, newexcludes):
1540 def setnarrowpats(self, newincludes, newexcludes):
1536 narrowspec.save(self, newincludes, newexcludes)
1541 narrowspec.save(self, newincludes, newexcludes)
1537 self.invalidate(clearfilecache=True)
1542 self.invalidate(clearfilecache=True)
1538
1543
1539 @unfilteredpropertycache
1544 @unfilteredpropertycache
1540 def _quick_access_changeid_null(self):
1545 def _quick_access_changeid_null(self):
1541 return {
1546 return {
1542 b'null': (nullrev, nullid),
1547 b'null': (nullrev, nullid),
1543 nullrev: (nullrev, nullid),
1548 nullrev: (nullrev, nullid),
1544 nullid: (nullrev, nullid),
1549 nullid: (nullrev, nullid),
1545 }
1550 }
1546
1551
1547 @unfilteredpropertycache
1552 @unfilteredpropertycache
1548 def _quick_access_changeid_wc(self):
1553 def _quick_access_changeid_wc(self):
1549 # also fast path access to the working copy parents
1554 # also fast path access to the working copy parents
1550 # however, only do it for filter that ensure wc is visible.
1555 # however, only do it for filter that ensure wc is visible.
1551 quick = {}
1556 quick = {}
1552 cl = self.unfiltered().changelog
1557 cl = self.unfiltered().changelog
1553 for node in self.dirstate.parents():
1558 for node in self.dirstate.parents():
1554 if node == nullid:
1559 if node == nullid:
1555 continue
1560 continue
1556 rev = cl.index.get_rev(node)
1561 rev = cl.index.get_rev(node)
1557 if rev is None:
1562 if rev is None:
1558 # unknown working copy parent case:
1563 # unknown working copy parent case:
1559 #
1564 #
1560 # skip the fast path and let higher code deal with it
1565 # skip the fast path and let higher code deal with it
1561 continue
1566 continue
1562 pair = (rev, node)
1567 pair = (rev, node)
1563 quick[rev] = pair
1568 quick[rev] = pair
1564 quick[node] = pair
1569 quick[node] = pair
1565 # also add the parents of the parents
1570 # also add the parents of the parents
1566 for r in cl.parentrevs(rev):
1571 for r in cl.parentrevs(rev):
1567 if r == nullrev:
1572 if r == nullrev:
1568 continue
1573 continue
1569 n = cl.node(r)
1574 n = cl.node(r)
1570 pair = (r, n)
1575 pair = (r, n)
1571 quick[r] = pair
1576 quick[r] = pair
1572 quick[n] = pair
1577 quick[n] = pair
1573 p1node = self.dirstate.p1()
1578 p1node = self.dirstate.p1()
1574 if p1node != nullid:
1579 if p1node != nullid:
1575 quick[b'.'] = quick[p1node]
1580 quick[b'.'] = quick[p1node]
1576 return quick
1581 return quick
1577
1582
1578 @unfilteredmethod
1583 @unfilteredmethod
1579 def _quick_access_changeid_invalidate(self):
1584 def _quick_access_changeid_invalidate(self):
1580 if '_quick_access_changeid_wc' in vars(self):
1585 if '_quick_access_changeid_wc' in vars(self):
1581 del self.__dict__['_quick_access_changeid_wc']
1586 del self.__dict__['_quick_access_changeid_wc']
1582
1587
1583 @property
1588 @property
1584 def _quick_access_changeid(self):
1589 def _quick_access_changeid(self):
1585 """an helper dictionnary for __getitem__ calls
1590 """an helper dictionnary for __getitem__ calls
1586
1591
1587 This contains a list of symbol we can recognise right away without
1592 This contains a list of symbol we can recognise right away without
1588 further processing.
1593 further processing.
1589 """
1594 """
1590 mapping = self._quick_access_changeid_null
1595 mapping = self._quick_access_changeid_null
1591 if self.filtername in repoview.filter_has_wc:
1596 if self.filtername in repoview.filter_has_wc:
1592 mapping = mapping.copy()
1597 mapping = mapping.copy()
1593 mapping.update(self._quick_access_changeid_wc)
1598 mapping.update(self._quick_access_changeid_wc)
1594 return mapping
1599 return mapping
1595
1600
1596 def __getitem__(self, changeid):
1601 def __getitem__(self, changeid):
1597 # dealing with special cases
1602 # dealing with special cases
1598 if changeid is None:
1603 if changeid is None:
1599 return context.workingctx(self)
1604 return context.workingctx(self)
1600 if isinstance(changeid, context.basectx):
1605 if isinstance(changeid, context.basectx):
1601 return changeid
1606 return changeid
1602
1607
1603 # dealing with multiple revisions
1608 # dealing with multiple revisions
1604 if isinstance(changeid, slice):
1609 if isinstance(changeid, slice):
1605 # wdirrev isn't contiguous so the slice shouldn't include it
1610 # wdirrev isn't contiguous so the slice shouldn't include it
1606 return [
1611 return [
1607 self[i]
1612 self[i]
1608 for i in pycompat.xrange(*changeid.indices(len(self)))
1613 for i in pycompat.xrange(*changeid.indices(len(self)))
1609 if i not in self.changelog.filteredrevs
1614 if i not in self.changelog.filteredrevs
1610 ]
1615 ]
1611
1616
1612 # dealing with some special values
1617 # dealing with some special values
1613 quick_access = self._quick_access_changeid.get(changeid)
1618 quick_access = self._quick_access_changeid.get(changeid)
1614 if quick_access is not None:
1619 if quick_access is not None:
1615 rev, node = quick_access
1620 rev, node = quick_access
1616 return context.changectx(self, rev, node, maybe_filtered=False)
1621 return context.changectx(self, rev, node, maybe_filtered=False)
1617 if changeid == b'tip':
1622 if changeid == b'tip':
1618 node = self.changelog.tip()
1623 node = self.changelog.tip()
1619 rev = self.changelog.rev(node)
1624 rev = self.changelog.rev(node)
1620 return context.changectx(self, rev, node)
1625 return context.changectx(self, rev, node)
1621
1626
1622 # dealing with arbitrary values
1627 # dealing with arbitrary values
1623 try:
1628 try:
1624 if isinstance(changeid, int):
1629 if isinstance(changeid, int):
1625 node = self.changelog.node(changeid)
1630 node = self.changelog.node(changeid)
1626 rev = changeid
1631 rev = changeid
1627 elif changeid == b'.':
1632 elif changeid == b'.':
1628 # this is a hack to delay/avoid loading obsmarkers
1633 # this is a hack to delay/avoid loading obsmarkers
1629 # when we know that '.' won't be hidden
1634 # when we know that '.' won't be hidden
1630 node = self.dirstate.p1()
1635 node = self.dirstate.p1()
1631 rev = self.unfiltered().changelog.rev(node)
1636 rev = self.unfiltered().changelog.rev(node)
1632 elif len(changeid) == 20:
1637 elif len(changeid) == 20:
1633 try:
1638 try:
1634 node = changeid
1639 node = changeid
1635 rev = self.changelog.rev(changeid)
1640 rev = self.changelog.rev(changeid)
1636 except error.FilteredLookupError:
1641 except error.FilteredLookupError:
1637 changeid = hex(changeid) # for the error message
1642 changeid = hex(changeid) # for the error message
1638 raise
1643 raise
1639 except LookupError:
1644 except LookupError:
1640 # check if it might have come from damaged dirstate
1645 # check if it might have come from damaged dirstate
1641 #
1646 #
1642 # XXX we could avoid the unfiltered if we had a recognizable
1647 # XXX we could avoid the unfiltered if we had a recognizable
1643 # exception for filtered changeset access
1648 # exception for filtered changeset access
1644 if (
1649 if (
1645 self.local()
1650 self.local()
1646 and changeid in self.unfiltered().dirstate.parents()
1651 and changeid in self.unfiltered().dirstate.parents()
1647 ):
1652 ):
1648 msg = _(b"working directory has unknown parent '%s'!")
1653 msg = _(b"working directory has unknown parent '%s'!")
1649 raise error.Abort(msg % short(changeid))
1654 raise error.Abort(msg % short(changeid))
1650 changeid = hex(changeid) # for the error message
1655 changeid = hex(changeid) # for the error message
1651 raise
1656 raise
1652
1657
1653 elif len(changeid) == 40:
1658 elif len(changeid) == 40:
1654 node = bin(changeid)
1659 node = bin(changeid)
1655 rev = self.changelog.rev(node)
1660 rev = self.changelog.rev(node)
1656 else:
1661 else:
1657 raise error.ProgrammingError(
1662 raise error.ProgrammingError(
1658 b"unsupported changeid '%s' of type %s"
1663 b"unsupported changeid '%s' of type %s"
1659 % (changeid, pycompat.bytestr(type(changeid)))
1664 % (changeid, pycompat.bytestr(type(changeid)))
1660 )
1665 )
1661
1666
1662 return context.changectx(self, rev, node)
1667 return context.changectx(self, rev, node)
1663
1668
1664 except (error.FilteredIndexError, error.FilteredLookupError):
1669 except (error.FilteredIndexError, error.FilteredLookupError):
1665 raise error.FilteredRepoLookupError(
1670 raise error.FilteredRepoLookupError(
1666 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1671 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1667 )
1672 )
1668 except (IndexError, LookupError):
1673 except (IndexError, LookupError):
1669 raise error.RepoLookupError(
1674 raise error.RepoLookupError(
1670 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1675 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1671 )
1676 )
1672 except error.WdirUnsupported:
1677 except error.WdirUnsupported:
1673 return context.workingctx(self)
1678 return context.workingctx(self)
1674
1679
1675 def __contains__(self, changeid):
1680 def __contains__(self, changeid):
1676 """True if the given changeid exists
1681 """True if the given changeid exists
1677
1682
1678 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1683 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1679 specified.
1684 specified.
1680 """
1685 """
1681 try:
1686 try:
1682 self[changeid]
1687 self[changeid]
1683 return True
1688 return True
1684 except error.RepoLookupError:
1689 except error.RepoLookupError:
1685 return False
1690 return False
1686
1691
1687 def __nonzero__(self):
1692 def __nonzero__(self):
1688 return True
1693 return True
1689
1694
1690 __bool__ = __nonzero__
1695 __bool__ = __nonzero__
1691
1696
1692 def __len__(self):
1697 def __len__(self):
1693 # no need to pay the cost of repoview.changelog
1698 # no need to pay the cost of repoview.changelog
1694 unfi = self.unfiltered()
1699 unfi = self.unfiltered()
1695 return len(unfi.changelog)
1700 return len(unfi.changelog)
1696
1701
1697 def __iter__(self):
1702 def __iter__(self):
1698 return iter(self.changelog)
1703 return iter(self.changelog)
1699
1704
1700 def revs(self, expr, *args):
1705 def revs(self, expr, *args):
1701 '''Find revisions matching a revset.
1706 '''Find revisions matching a revset.
1702
1707
1703 The revset is specified as a string ``expr`` that may contain
1708 The revset is specified as a string ``expr`` that may contain
1704 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1709 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1705
1710
1706 Revset aliases from the configuration are not expanded. To expand
1711 Revset aliases from the configuration are not expanded. To expand
1707 user aliases, consider calling ``scmutil.revrange()`` or
1712 user aliases, consider calling ``scmutil.revrange()`` or
1708 ``repo.anyrevs([expr], user=True)``.
1713 ``repo.anyrevs([expr], user=True)``.
1709
1714
1710 Returns a smartset.abstractsmartset, which is a list-like interface
1715 Returns a smartset.abstractsmartset, which is a list-like interface
1711 that contains integer revisions.
1716 that contains integer revisions.
1712 '''
1717 '''
1713 tree = revsetlang.spectree(expr, *args)
1718 tree = revsetlang.spectree(expr, *args)
1714 return revset.makematcher(tree)(self)
1719 return revset.makematcher(tree)(self)
1715
1720
1716 def set(self, expr, *args):
1721 def set(self, expr, *args):
1717 '''Find revisions matching a revset and emit changectx instances.
1722 '''Find revisions matching a revset and emit changectx instances.
1718
1723
1719 This is a convenience wrapper around ``revs()`` that iterates the
1724 This is a convenience wrapper around ``revs()`` that iterates the
1720 result and is a generator of changectx instances.
1725 result and is a generator of changectx instances.
1721
1726
1722 Revset aliases from the configuration are not expanded. To expand
1727 Revset aliases from the configuration are not expanded. To expand
1723 user aliases, consider calling ``scmutil.revrange()``.
1728 user aliases, consider calling ``scmutil.revrange()``.
1724 '''
1729 '''
1725 for r in self.revs(expr, *args):
1730 for r in self.revs(expr, *args):
1726 yield self[r]
1731 yield self[r]
1727
1732
1728 def anyrevs(self, specs, user=False, localalias=None):
1733 def anyrevs(self, specs, user=False, localalias=None):
1729 '''Find revisions matching one of the given revsets.
1734 '''Find revisions matching one of the given revsets.
1730
1735
1731 Revset aliases from the configuration are not expanded by default. To
1736 Revset aliases from the configuration are not expanded by default. To
1732 expand user aliases, specify ``user=True``. To provide some local
1737 expand user aliases, specify ``user=True``. To provide some local
1733 definitions overriding user aliases, set ``localalias`` to
1738 definitions overriding user aliases, set ``localalias`` to
1734 ``{name: definitionstring}``.
1739 ``{name: definitionstring}``.
1735 '''
1740 '''
1736 if specs == [b'null']:
1741 if specs == [b'null']:
1737 return revset.baseset([nullrev])
1742 return revset.baseset([nullrev])
1738 if specs == [b'.']:
1743 if specs == [b'.']:
1739 quick_data = self._quick_access_changeid.get(b'.')
1744 quick_data = self._quick_access_changeid.get(b'.')
1740 if quick_data is not None:
1745 if quick_data is not None:
1741 return revset.baseset([quick_data[0]])
1746 return revset.baseset([quick_data[0]])
1742 if user:
1747 if user:
1743 m = revset.matchany(
1748 m = revset.matchany(
1744 self.ui,
1749 self.ui,
1745 specs,
1750 specs,
1746 lookup=revset.lookupfn(self),
1751 lookup=revset.lookupfn(self),
1747 localalias=localalias,
1752 localalias=localalias,
1748 )
1753 )
1749 else:
1754 else:
1750 m = revset.matchany(None, specs, localalias=localalias)
1755 m = revset.matchany(None, specs, localalias=localalias)
1751 return m(self)
1756 return m(self)
1752
1757
1753 def url(self):
1758 def url(self):
1754 return b'file:' + self.root
1759 return b'file:' + self.root
1755
1760
1756 def hook(self, name, throw=False, **args):
1761 def hook(self, name, throw=False, **args):
1757 """Call a hook, passing this repo instance.
1762 """Call a hook, passing this repo instance.
1758
1763
1759 This a convenience method to aid invoking hooks. Extensions likely
1764 This a convenience method to aid invoking hooks. Extensions likely
1760 won't call this unless they have registered a custom hook or are
1765 won't call this unless they have registered a custom hook or are
1761 replacing code that is expected to call a hook.
1766 replacing code that is expected to call a hook.
1762 """
1767 """
1763 return hook.hook(self.ui, self, name, throw, **args)
1768 return hook.hook(self.ui, self, name, throw, **args)
1764
1769
1765 @filteredpropertycache
1770 @filteredpropertycache
1766 def _tagscache(self):
1771 def _tagscache(self):
1767 '''Returns a tagscache object that contains various tags related
1772 '''Returns a tagscache object that contains various tags related
1768 caches.'''
1773 caches.'''
1769
1774
1770 # This simplifies its cache management by having one decorated
1775 # This simplifies its cache management by having one decorated
1771 # function (this one) and the rest simply fetch things from it.
1776 # function (this one) and the rest simply fetch things from it.
1772 class tagscache(object):
1777 class tagscache(object):
1773 def __init__(self):
1778 def __init__(self):
1774 # These two define the set of tags for this repository. tags
1779 # These two define the set of tags for this repository. tags
1775 # maps tag name to node; tagtypes maps tag name to 'global' or
1780 # maps tag name to node; tagtypes maps tag name to 'global' or
1776 # 'local'. (Global tags are defined by .hgtags across all
1781 # 'local'. (Global tags are defined by .hgtags across all
1777 # heads, and local tags are defined in .hg/localtags.)
1782 # heads, and local tags are defined in .hg/localtags.)
1778 # They constitute the in-memory cache of tags.
1783 # They constitute the in-memory cache of tags.
1779 self.tags = self.tagtypes = None
1784 self.tags = self.tagtypes = None
1780
1785
1781 self.nodetagscache = self.tagslist = None
1786 self.nodetagscache = self.tagslist = None
1782
1787
1783 cache = tagscache()
1788 cache = tagscache()
1784 cache.tags, cache.tagtypes = self._findtags()
1789 cache.tags, cache.tagtypes = self._findtags()
1785
1790
1786 return cache
1791 return cache
1787
1792
1788 def tags(self):
1793 def tags(self):
1789 '''return a mapping of tag to node'''
1794 '''return a mapping of tag to node'''
1790 t = {}
1795 t = {}
1791 if self.changelog.filteredrevs:
1796 if self.changelog.filteredrevs:
1792 tags, tt = self._findtags()
1797 tags, tt = self._findtags()
1793 else:
1798 else:
1794 tags = self._tagscache.tags
1799 tags = self._tagscache.tags
1795 rev = self.changelog.rev
1800 rev = self.changelog.rev
1796 for k, v in pycompat.iteritems(tags):
1801 for k, v in pycompat.iteritems(tags):
1797 try:
1802 try:
1798 # ignore tags to unknown nodes
1803 # ignore tags to unknown nodes
1799 rev(v)
1804 rev(v)
1800 t[k] = v
1805 t[k] = v
1801 except (error.LookupError, ValueError):
1806 except (error.LookupError, ValueError):
1802 pass
1807 pass
1803 return t
1808 return t
1804
1809
1805 def _findtags(self):
1810 def _findtags(self):
1806 '''Do the hard work of finding tags. Return a pair of dicts
1811 '''Do the hard work of finding tags. Return a pair of dicts
1807 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1812 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1808 maps tag name to a string like \'global\' or \'local\'.
1813 maps tag name to a string like \'global\' or \'local\'.
1809 Subclasses or extensions are free to add their own tags, but
1814 Subclasses or extensions are free to add their own tags, but
1810 should be aware that the returned dicts will be retained for the
1815 should be aware that the returned dicts will be retained for the
1811 duration of the localrepo object.'''
1816 duration of the localrepo object.'''
1812
1817
1813 # XXX what tagtype should subclasses/extensions use? Currently
1818 # XXX what tagtype should subclasses/extensions use? Currently
1814 # mq and bookmarks add tags, but do not set the tagtype at all.
1819 # mq and bookmarks add tags, but do not set the tagtype at all.
1815 # Should each extension invent its own tag type? Should there
1820 # Should each extension invent its own tag type? Should there
1816 # be one tagtype for all such "virtual" tags? Or is the status
1821 # be one tagtype for all such "virtual" tags? Or is the status
1817 # quo fine?
1822 # quo fine?
1818
1823
1819 # map tag name to (node, hist)
1824 # map tag name to (node, hist)
1820 alltags = tagsmod.findglobaltags(self.ui, self)
1825 alltags = tagsmod.findglobaltags(self.ui, self)
1821 # map tag name to tag type
1826 # map tag name to tag type
1822 tagtypes = {tag: b'global' for tag in alltags}
1827 tagtypes = {tag: b'global' for tag in alltags}
1823
1828
1824 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1829 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1825
1830
1826 # Build the return dicts. Have to re-encode tag names because
1831 # Build the return dicts. Have to re-encode tag names because
1827 # the tags module always uses UTF-8 (in order not to lose info
1832 # the tags module always uses UTF-8 (in order not to lose info
1828 # writing to the cache), but the rest of Mercurial wants them in
1833 # writing to the cache), but the rest of Mercurial wants them in
1829 # local encoding.
1834 # local encoding.
1830 tags = {}
1835 tags = {}
1831 for (name, (node, hist)) in pycompat.iteritems(alltags):
1836 for (name, (node, hist)) in pycompat.iteritems(alltags):
1832 if node != nullid:
1837 if node != nullid:
1833 tags[encoding.tolocal(name)] = node
1838 tags[encoding.tolocal(name)] = node
1834 tags[b'tip'] = self.changelog.tip()
1839 tags[b'tip'] = self.changelog.tip()
1835 tagtypes = {
1840 tagtypes = {
1836 encoding.tolocal(name): value
1841 encoding.tolocal(name): value
1837 for (name, value) in pycompat.iteritems(tagtypes)
1842 for (name, value) in pycompat.iteritems(tagtypes)
1838 }
1843 }
1839 return (tags, tagtypes)
1844 return (tags, tagtypes)
1840
1845
1841 def tagtype(self, tagname):
1846 def tagtype(self, tagname):
1842 '''
1847 '''
1843 return the type of the given tag. result can be:
1848 return the type of the given tag. result can be:
1844
1849
1845 'local' : a local tag
1850 'local' : a local tag
1846 'global' : a global tag
1851 'global' : a global tag
1847 None : tag does not exist
1852 None : tag does not exist
1848 '''
1853 '''
1849
1854
1850 return self._tagscache.tagtypes.get(tagname)
1855 return self._tagscache.tagtypes.get(tagname)
1851
1856
1852 def tagslist(self):
1857 def tagslist(self):
1853 '''return a list of tags ordered by revision'''
1858 '''return a list of tags ordered by revision'''
1854 if not self._tagscache.tagslist:
1859 if not self._tagscache.tagslist:
1855 l = []
1860 l = []
1856 for t, n in pycompat.iteritems(self.tags()):
1861 for t, n in pycompat.iteritems(self.tags()):
1857 l.append((self.changelog.rev(n), t, n))
1862 l.append((self.changelog.rev(n), t, n))
1858 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1863 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1859
1864
1860 return self._tagscache.tagslist
1865 return self._tagscache.tagslist
1861
1866
1862 def nodetags(self, node):
1867 def nodetags(self, node):
1863 '''return the tags associated with a node'''
1868 '''return the tags associated with a node'''
1864 if not self._tagscache.nodetagscache:
1869 if not self._tagscache.nodetagscache:
1865 nodetagscache = {}
1870 nodetagscache = {}
1866 for t, n in pycompat.iteritems(self._tagscache.tags):
1871 for t, n in pycompat.iteritems(self._tagscache.tags):
1867 nodetagscache.setdefault(n, []).append(t)
1872 nodetagscache.setdefault(n, []).append(t)
1868 for tags in pycompat.itervalues(nodetagscache):
1873 for tags in pycompat.itervalues(nodetagscache):
1869 tags.sort()
1874 tags.sort()
1870 self._tagscache.nodetagscache = nodetagscache
1875 self._tagscache.nodetagscache = nodetagscache
1871 return self._tagscache.nodetagscache.get(node, [])
1876 return self._tagscache.nodetagscache.get(node, [])
1872
1877
1873 def nodebookmarks(self, node):
1878 def nodebookmarks(self, node):
1874 """return the list of bookmarks pointing to the specified node"""
1879 """return the list of bookmarks pointing to the specified node"""
1875 return self._bookmarks.names(node)
1880 return self._bookmarks.names(node)
1876
1881
1877 def branchmap(self):
1882 def branchmap(self):
1878 '''returns a dictionary {branch: [branchheads]} with branchheads
1883 '''returns a dictionary {branch: [branchheads]} with branchheads
1879 ordered by increasing revision number'''
1884 ordered by increasing revision number'''
1880 return self._branchcaches[self]
1885 return self._branchcaches[self]
1881
1886
1882 @unfilteredmethod
1887 @unfilteredmethod
1883 def revbranchcache(self):
1888 def revbranchcache(self):
1884 if not self._revbranchcache:
1889 if not self._revbranchcache:
1885 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1890 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1886 return self._revbranchcache
1891 return self._revbranchcache
1887
1892
1888 def branchtip(self, branch, ignoremissing=False):
1893 def branchtip(self, branch, ignoremissing=False):
1889 '''return the tip node for a given branch
1894 '''return the tip node for a given branch
1890
1895
1891 If ignoremissing is True, then this method will not raise an error.
1896 If ignoremissing is True, then this method will not raise an error.
1892 This is helpful for callers that only expect None for a missing branch
1897 This is helpful for callers that only expect None for a missing branch
1893 (e.g. namespace).
1898 (e.g. namespace).
1894
1899
1895 '''
1900 '''
1896 try:
1901 try:
1897 return self.branchmap().branchtip(branch)
1902 return self.branchmap().branchtip(branch)
1898 except KeyError:
1903 except KeyError:
1899 if not ignoremissing:
1904 if not ignoremissing:
1900 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1905 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1901 else:
1906 else:
1902 pass
1907 pass
1903
1908
1904 def lookup(self, key):
1909 def lookup(self, key):
1905 node = scmutil.revsymbol(self, key).node()
1910 node = scmutil.revsymbol(self, key).node()
1906 if node is None:
1911 if node is None:
1907 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1912 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1908 return node
1913 return node
1909
1914
1910 def lookupbranch(self, key):
1915 def lookupbranch(self, key):
1911 if self.branchmap().hasbranch(key):
1916 if self.branchmap().hasbranch(key):
1912 return key
1917 return key
1913
1918
1914 return scmutil.revsymbol(self, key).branch()
1919 return scmutil.revsymbol(self, key).branch()
1915
1920
1916 def known(self, nodes):
1921 def known(self, nodes):
1917 cl = self.changelog
1922 cl = self.changelog
1918 get_rev = cl.index.get_rev
1923 get_rev = cl.index.get_rev
1919 filtered = cl.filteredrevs
1924 filtered = cl.filteredrevs
1920 result = []
1925 result = []
1921 for n in nodes:
1926 for n in nodes:
1922 r = get_rev(n)
1927 r = get_rev(n)
1923 resp = not (r is None or r in filtered)
1928 resp = not (r is None or r in filtered)
1924 result.append(resp)
1929 result.append(resp)
1925 return result
1930 return result
1926
1931
1927 def local(self):
1932 def local(self):
1928 return self
1933 return self
1929
1934
1930 def publishing(self):
1935 def publishing(self):
1931 # it's safe (and desirable) to trust the publish flag unconditionally
1936 # it's safe (and desirable) to trust the publish flag unconditionally
1932 # so that we don't finalize changes shared between users via ssh or nfs
1937 # so that we don't finalize changes shared between users via ssh or nfs
1933 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1938 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1934
1939
1935 def cancopy(self):
1940 def cancopy(self):
1936 # so statichttprepo's override of local() works
1941 # so statichttprepo's override of local() works
1937 if not self.local():
1942 if not self.local():
1938 return False
1943 return False
1939 if not self.publishing():
1944 if not self.publishing():
1940 return True
1945 return True
1941 # if publishing we can't copy if there is filtered content
1946 # if publishing we can't copy if there is filtered content
1942 return not self.filtered(b'visible').changelog.filteredrevs
1947 return not self.filtered(b'visible').changelog.filteredrevs
1943
1948
1944 def shared(self):
1949 def shared(self):
1945 '''the type of shared repository (None if not shared)'''
1950 '''the type of shared repository (None if not shared)'''
1946 if self.sharedpath != self.path:
1951 if self.sharedpath != self.path:
1947 return b'store'
1952 return b'store'
1948 return None
1953 return None
1949
1954
1950 def wjoin(self, f, *insidef):
1955 def wjoin(self, f, *insidef):
1951 return self.vfs.reljoin(self.root, f, *insidef)
1956 return self.vfs.reljoin(self.root, f, *insidef)
1952
1957
1953 def setparents(self, p1, p2=nullid):
1958 def setparents(self, p1, p2=nullid):
1954 self[None].setparents(p1, p2)
1959 self[None].setparents(p1, p2)
1955 self._quick_access_changeid_invalidate()
1960 self._quick_access_changeid_invalidate()
1956
1961
1957 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1962 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1958 """changeid must be a changeset revision, if specified.
1963 """changeid must be a changeset revision, if specified.
1959 fileid can be a file revision or node."""
1964 fileid can be a file revision or node."""
1960 return context.filectx(
1965 return context.filectx(
1961 self, path, changeid, fileid, changectx=changectx
1966 self, path, changeid, fileid, changectx=changectx
1962 )
1967 )
1963
1968
1964 def getcwd(self):
1969 def getcwd(self):
1965 return self.dirstate.getcwd()
1970 return self.dirstate.getcwd()
1966
1971
1967 def pathto(self, f, cwd=None):
1972 def pathto(self, f, cwd=None):
1968 return self.dirstate.pathto(f, cwd)
1973 return self.dirstate.pathto(f, cwd)
1969
1974
1970 def _loadfilter(self, filter):
1975 def _loadfilter(self, filter):
1971 if filter not in self._filterpats:
1976 if filter not in self._filterpats:
1972 l = []
1977 l = []
1973 for pat, cmd in self.ui.configitems(filter):
1978 for pat, cmd in self.ui.configitems(filter):
1974 if cmd == b'!':
1979 if cmd == b'!':
1975 continue
1980 continue
1976 mf = matchmod.match(self.root, b'', [pat])
1981 mf = matchmod.match(self.root, b'', [pat])
1977 fn = None
1982 fn = None
1978 params = cmd
1983 params = cmd
1979 for name, filterfn in pycompat.iteritems(self._datafilters):
1984 for name, filterfn in pycompat.iteritems(self._datafilters):
1980 if cmd.startswith(name):
1985 if cmd.startswith(name):
1981 fn = filterfn
1986 fn = filterfn
1982 params = cmd[len(name) :].lstrip()
1987 params = cmd[len(name) :].lstrip()
1983 break
1988 break
1984 if not fn:
1989 if not fn:
1985 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1990 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1986 fn.__name__ = 'commandfilter'
1991 fn.__name__ = 'commandfilter'
1987 # Wrap old filters not supporting keyword arguments
1992 # Wrap old filters not supporting keyword arguments
1988 if not pycompat.getargspec(fn)[2]:
1993 if not pycompat.getargspec(fn)[2]:
1989 oldfn = fn
1994 oldfn = fn
1990 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1995 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1991 fn.__name__ = 'compat-' + oldfn.__name__
1996 fn.__name__ = 'compat-' + oldfn.__name__
1992 l.append((mf, fn, params))
1997 l.append((mf, fn, params))
1993 self._filterpats[filter] = l
1998 self._filterpats[filter] = l
1994 return self._filterpats[filter]
1999 return self._filterpats[filter]
1995
2000
1996 def _filter(self, filterpats, filename, data):
2001 def _filter(self, filterpats, filename, data):
1997 for mf, fn, cmd in filterpats:
2002 for mf, fn, cmd in filterpats:
1998 if mf(filename):
2003 if mf(filename):
1999 self.ui.debug(
2004 self.ui.debug(
2000 b"filtering %s through %s\n"
2005 b"filtering %s through %s\n"
2001 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2006 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2002 )
2007 )
2003 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2008 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2004 break
2009 break
2005
2010
2006 return data
2011 return data
2007
2012
2008 @unfilteredpropertycache
2013 @unfilteredpropertycache
2009 def _encodefilterpats(self):
2014 def _encodefilterpats(self):
2010 return self._loadfilter(b'encode')
2015 return self._loadfilter(b'encode')
2011
2016
2012 @unfilteredpropertycache
2017 @unfilteredpropertycache
2013 def _decodefilterpats(self):
2018 def _decodefilterpats(self):
2014 return self._loadfilter(b'decode')
2019 return self._loadfilter(b'decode')
2015
2020
2016 def adddatafilter(self, name, filter):
2021 def adddatafilter(self, name, filter):
2017 self._datafilters[name] = filter
2022 self._datafilters[name] = filter
2018
2023
2019 def wread(self, filename):
2024 def wread(self, filename):
2020 if self.wvfs.islink(filename):
2025 if self.wvfs.islink(filename):
2021 data = self.wvfs.readlink(filename)
2026 data = self.wvfs.readlink(filename)
2022 else:
2027 else:
2023 data = self.wvfs.read(filename)
2028 data = self.wvfs.read(filename)
2024 return self._filter(self._encodefilterpats, filename, data)
2029 return self._filter(self._encodefilterpats, filename, data)
2025
2030
2026 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2031 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2027 """write ``data`` into ``filename`` in the working directory
2032 """write ``data`` into ``filename`` in the working directory
2028
2033
2029 This returns length of written (maybe decoded) data.
2034 This returns length of written (maybe decoded) data.
2030 """
2035 """
2031 data = self._filter(self._decodefilterpats, filename, data)
2036 data = self._filter(self._decodefilterpats, filename, data)
2032 if b'l' in flags:
2037 if b'l' in flags:
2033 self.wvfs.symlink(data, filename)
2038 self.wvfs.symlink(data, filename)
2034 else:
2039 else:
2035 self.wvfs.write(
2040 self.wvfs.write(
2036 filename, data, backgroundclose=backgroundclose, **kwargs
2041 filename, data, backgroundclose=backgroundclose, **kwargs
2037 )
2042 )
2038 if b'x' in flags:
2043 if b'x' in flags:
2039 self.wvfs.setflags(filename, False, True)
2044 self.wvfs.setflags(filename, False, True)
2040 else:
2045 else:
2041 self.wvfs.setflags(filename, False, False)
2046 self.wvfs.setflags(filename, False, False)
2042 return len(data)
2047 return len(data)
2043
2048
2044 def wwritedata(self, filename, data):
2049 def wwritedata(self, filename, data):
2045 return self._filter(self._decodefilterpats, filename, data)
2050 return self._filter(self._decodefilterpats, filename, data)
2046
2051
2047 def currenttransaction(self):
2052 def currenttransaction(self):
2048 """return the current transaction or None if non exists"""
2053 """return the current transaction or None if non exists"""
2049 if self._transref:
2054 if self._transref:
2050 tr = self._transref()
2055 tr = self._transref()
2051 else:
2056 else:
2052 tr = None
2057 tr = None
2053
2058
2054 if tr and tr.running():
2059 if tr and tr.running():
2055 return tr
2060 return tr
2056 return None
2061 return None
2057
2062
2058 def transaction(self, desc, report=None):
2063 def transaction(self, desc, report=None):
2059 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2064 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2060 b'devel', b'check-locks'
2065 b'devel', b'check-locks'
2061 ):
2066 ):
2062 if self._currentlock(self._lockref) is None:
2067 if self._currentlock(self._lockref) is None:
2063 raise error.ProgrammingError(b'transaction requires locking')
2068 raise error.ProgrammingError(b'transaction requires locking')
2064 tr = self.currenttransaction()
2069 tr = self.currenttransaction()
2065 if tr is not None:
2070 if tr is not None:
2066 return tr.nest(name=desc)
2071 return tr.nest(name=desc)
2067
2072
2068 # abort here if the journal already exists
2073 # abort here if the journal already exists
2069 if self.svfs.exists(b"journal"):
2074 if self.svfs.exists(b"journal"):
2070 raise error.RepoError(
2075 raise error.RepoError(
2071 _(b"abandoned transaction found"),
2076 _(b"abandoned transaction found"),
2072 hint=_(b"run 'hg recover' to clean up transaction"),
2077 hint=_(b"run 'hg recover' to clean up transaction"),
2073 )
2078 )
2074
2079
2075 idbase = b"%.40f#%f" % (random.random(), time.time())
2080 idbase = b"%.40f#%f" % (random.random(), time.time())
2076 ha = hex(hashutil.sha1(idbase).digest())
2081 ha = hex(hashutil.sha1(idbase).digest())
2077 txnid = b'TXN:' + ha
2082 txnid = b'TXN:' + ha
2078 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2083 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2079
2084
2080 self._writejournal(desc)
2085 self._writejournal(desc)
2081 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2086 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2082 if report:
2087 if report:
2083 rp = report
2088 rp = report
2084 else:
2089 else:
2085 rp = self.ui.warn
2090 rp = self.ui.warn
2086 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2091 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2087 # we must avoid cyclic reference between repo and transaction.
2092 # we must avoid cyclic reference between repo and transaction.
2088 reporef = weakref.ref(self)
2093 reporef = weakref.ref(self)
2089 # Code to track tag movement
2094 # Code to track tag movement
2090 #
2095 #
2091 # Since tags are all handled as file content, it is actually quite hard
2096 # Since tags are all handled as file content, it is actually quite hard
2092 # to track these movement from a code perspective. So we fallback to a
2097 # to track these movement from a code perspective. So we fallback to a
2093 # tracking at the repository level. One could envision to track changes
2098 # tracking at the repository level. One could envision to track changes
2094 # to the '.hgtags' file through changegroup apply but that fails to
2099 # to the '.hgtags' file through changegroup apply but that fails to
2095 # cope with case where transaction expose new heads without changegroup
2100 # cope with case where transaction expose new heads without changegroup
2096 # being involved (eg: phase movement).
2101 # being involved (eg: phase movement).
2097 #
2102 #
2098 # For now, We gate the feature behind a flag since this likely comes
2103 # For now, We gate the feature behind a flag since this likely comes
2099 # with performance impacts. The current code run more often than needed
2104 # with performance impacts. The current code run more often than needed
2100 # and do not use caches as much as it could. The current focus is on
2105 # and do not use caches as much as it could. The current focus is on
2101 # the behavior of the feature so we disable it by default. The flag
2106 # the behavior of the feature so we disable it by default. The flag
2102 # will be removed when we are happy with the performance impact.
2107 # will be removed when we are happy with the performance impact.
2103 #
2108 #
2104 # Once this feature is no longer experimental move the following
2109 # Once this feature is no longer experimental move the following
2105 # documentation to the appropriate help section:
2110 # documentation to the appropriate help section:
2106 #
2111 #
2107 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2112 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2108 # tags (new or changed or deleted tags). In addition the details of
2113 # tags (new or changed or deleted tags). In addition the details of
2109 # these changes are made available in a file at:
2114 # these changes are made available in a file at:
2110 # ``REPOROOT/.hg/changes/tags.changes``.
2115 # ``REPOROOT/.hg/changes/tags.changes``.
2111 # Make sure you check for HG_TAG_MOVED before reading that file as it
2116 # Make sure you check for HG_TAG_MOVED before reading that file as it
2112 # might exist from a previous transaction even if no tag were touched
2117 # might exist from a previous transaction even if no tag were touched
2113 # in this one. Changes are recorded in a line base format::
2118 # in this one. Changes are recorded in a line base format::
2114 #
2119 #
2115 # <action> <hex-node> <tag-name>\n
2120 # <action> <hex-node> <tag-name>\n
2116 #
2121 #
2117 # Actions are defined as follow:
2122 # Actions are defined as follow:
2118 # "-R": tag is removed,
2123 # "-R": tag is removed,
2119 # "+A": tag is added,
2124 # "+A": tag is added,
2120 # "-M": tag is moved (old value),
2125 # "-M": tag is moved (old value),
2121 # "+M": tag is moved (new value),
2126 # "+M": tag is moved (new value),
2122 tracktags = lambda x: None
2127 tracktags = lambda x: None
2123 # experimental config: experimental.hook-track-tags
2128 # experimental config: experimental.hook-track-tags
2124 shouldtracktags = self.ui.configbool(
2129 shouldtracktags = self.ui.configbool(
2125 b'experimental', b'hook-track-tags'
2130 b'experimental', b'hook-track-tags'
2126 )
2131 )
2127 if desc != b'strip' and shouldtracktags:
2132 if desc != b'strip' and shouldtracktags:
2128 oldheads = self.changelog.headrevs()
2133 oldheads = self.changelog.headrevs()
2129
2134
2130 def tracktags(tr2):
2135 def tracktags(tr2):
2131 repo = reporef()
2136 repo = reporef()
2132 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2137 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2133 newheads = repo.changelog.headrevs()
2138 newheads = repo.changelog.headrevs()
2134 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2139 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2135 # notes: we compare lists here.
2140 # notes: we compare lists here.
2136 # As we do it only once buiding set would not be cheaper
2141 # As we do it only once buiding set would not be cheaper
2137 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2142 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2138 if changes:
2143 if changes:
2139 tr2.hookargs[b'tag_moved'] = b'1'
2144 tr2.hookargs[b'tag_moved'] = b'1'
2140 with repo.vfs(
2145 with repo.vfs(
2141 b'changes/tags.changes', b'w', atomictemp=True
2146 b'changes/tags.changes', b'w', atomictemp=True
2142 ) as changesfile:
2147 ) as changesfile:
2143 # note: we do not register the file to the transaction
2148 # note: we do not register the file to the transaction
2144 # because we needs it to still exist on the transaction
2149 # because we needs it to still exist on the transaction
2145 # is close (for txnclose hooks)
2150 # is close (for txnclose hooks)
2146 tagsmod.writediff(changesfile, changes)
2151 tagsmod.writediff(changesfile, changes)
2147
2152
2148 def validate(tr2):
2153 def validate(tr2):
2149 """will run pre-closing hooks"""
2154 """will run pre-closing hooks"""
2150 # XXX the transaction API is a bit lacking here so we take a hacky
2155 # XXX the transaction API is a bit lacking here so we take a hacky
2151 # path for now
2156 # path for now
2152 #
2157 #
2153 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2158 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2154 # dict is copied before these run. In addition we needs the data
2159 # dict is copied before these run. In addition we needs the data
2155 # available to in memory hooks too.
2160 # available to in memory hooks too.
2156 #
2161 #
2157 # Moreover, we also need to make sure this runs before txnclose
2162 # Moreover, we also need to make sure this runs before txnclose
2158 # hooks and there is no "pending" mechanism that would execute
2163 # hooks and there is no "pending" mechanism that would execute
2159 # logic only if hooks are about to run.
2164 # logic only if hooks are about to run.
2160 #
2165 #
2161 # Fixing this limitation of the transaction is also needed to track
2166 # Fixing this limitation of the transaction is also needed to track
2162 # other families of changes (bookmarks, phases, obsolescence).
2167 # other families of changes (bookmarks, phases, obsolescence).
2163 #
2168 #
2164 # This will have to be fixed before we remove the experimental
2169 # This will have to be fixed before we remove the experimental
2165 # gating.
2170 # gating.
2166 tracktags(tr2)
2171 tracktags(tr2)
2167 repo = reporef()
2172 repo = reporef()
2168
2173
2169 singleheadopt = (b'experimental', b'single-head-per-branch')
2174 singleheadopt = (b'experimental', b'single-head-per-branch')
2170 singlehead = repo.ui.configbool(*singleheadopt)
2175 singlehead = repo.ui.configbool(*singleheadopt)
2171 if singlehead:
2176 if singlehead:
2172 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2177 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2173 accountclosed = singleheadsub.get(
2178 accountclosed = singleheadsub.get(
2174 b"account-closed-heads", False
2179 b"account-closed-heads", False
2175 )
2180 )
2176 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2181 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2177 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2182 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2178 for name, (old, new) in sorted(
2183 for name, (old, new) in sorted(
2179 tr.changes[b'bookmarks'].items()
2184 tr.changes[b'bookmarks'].items()
2180 ):
2185 ):
2181 args = tr.hookargs.copy()
2186 args = tr.hookargs.copy()
2182 args.update(bookmarks.preparehookargs(name, old, new))
2187 args.update(bookmarks.preparehookargs(name, old, new))
2183 repo.hook(
2188 repo.hook(
2184 b'pretxnclose-bookmark',
2189 b'pretxnclose-bookmark',
2185 throw=True,
2190 throw=True,
2186 **pycompat.strkwargs(args)
2191 **pycompat.strkwargs(args)
2187 )
2192 )
2188 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2193 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2189 cl = repo.unfiltered().changelog
2194 cl = repo.unfiltered().changelog
2190 for revs, (old, new) in tr.changes[b'phases']:
2195 for revs, (old, new) in tr.changes[b'phases']:
2191 for rev in revs:
2196 for rev in revs:
2192 args = tr.hookargs.copy()
2197 args = tr.hookargs.copy()
2193 node = hex(cl.node(rev))
2198 node = hex(cl.node(rev))
2194 args.update(phases.preparehookargs(node, old, new))
2199 args.update(phases.preparehookargs(node, old, new))
2195 repo.hook(
2200 repo.hook(
2196 b'pretxnclose-phase',
2201 b'pretxnclose-phase',
2197 throw=True,
2202 throw=True,
2198 **pycompat.strkwargs(args)
2203 **pycompat.strkwargs(args)
2199 )
2204 )
2200
2205
2201 repo.hook(
2206 repo.hook(
2202 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2207 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2203 )
2208 )
2204
2209
2205 def releasefn(tr, success):
2210 def releasefn(tr, success):
2206 repo = reporef()
2211 repo = reporef()
2207 if repo is None:
2212 if repo is None:
2208 # If the repo has been GC'd (and this release function is being
2213 # If the repo has been GC'd (and this release function is being
2209 # called from transaction.__del__), there's not much we can do,
2214 # called from transaction.__del__), there's not much we can do,
2210 # so just leave the unfinished transaction there and let the
2215 # so just leave the unfinished transaction there and let the
2211 # user run `hg recover`.
2216 # user run `hg recover`.
2212 return
2217 return
2213 if success:
2218 if success:
2214 # this should be explicitly invoked here, because
2219 # this should be explicitly invoked here, because
2215 # in-memory changes aren't written out at closing
2220 # in-memory changes aren't written out at closing
2216 # transaction, if tr.addfilegenerator (via
2221 # transaction, if tr.addfilegenerator (via
2217 # dirstate.write or so) isn't invoked while
2222 # dirstate.write or so) isn't invoked while
2218 # transaction running
2223 # transaction running
2219 repo.dirstate.write(None)
2224 repo.dirstate.write(None)
2220 else:
2225 else:
2221 # discard all changes (including ones already written
2226 # discard all changes (including ones already written
2222 # out) in this transaction
2227 # out) in this transaction
2223 narrowspec.restorebackup(self, b'journal.narrowspec')
2228 narrowspec.restorebackup(self, b'journal.narrowspec')
2224 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2229 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2225 repo.dirstate.restorebackup(None, b'journal.dirstate')
2230 repo.dirstate.restorebackup(None, b'journal.dirstate')
2226
2231
2227 repo.invalidate(clearfilecache=True)
2232 repo.invalidate(clearfilecache=True)
2228
2233
2229 tr = transaction.transaction(
2234 tr = transaction.transaction(
2230 rp,
2235 rp,
2231 self.svfs,
2236 self.svfs,
2232 vfsmap,
2237 vfsmap,
2233 b"journal",
2238 b"journal",
2234 b"undo",
2239 b"undo",
2235 aftertrans(renames),
2240 aftertrans(renames),
2236 self.store.createmode,
2241 self.store.createmode,
2237 validator=validate,
2242 validator=validate,
2238 releasefn=releasefn,
2243 releasefn=releasefn,
2239 checkambigfiles=_cachedfiles,
2244 checkambigfiles=_cachedfiles,
2240 name=desc,
2245 name=desc,
2241 )
2246 )
2242 tr.changes[b'origrepolen'] = len(self)
2247 tr.changes[b'origrepolen'] = len(self)
2243 tr.changes[b'obsmarkers'] = set()
2248 tr.changes[b'obsmarkers'] = set()
2244 tr.changes[b'phases'] = []
2249 tr.changes[b'phases'] = []
2245 tr.changes[b'bookmarks'] = {}
2250 tr.changes[b'bookmarks'] = {}
2246
2251
2247 tr.hookargs[b'txnid'] = txnid
2252 tr.hookargs[b'txnid'] = txnid
2248 tr.hookargs[b'txnname'] = desc
2253 tr.hookargs[b'txnname'] = desc
2249 tr.hookargs[b'changes'] = tr.changes
2254 tr.hookargs[b'changes'] = tr.changes
2250 # note: writing the fncache only during finalize mean that the file is
2255 # note: writing the fncache only during finalize mean that the file is
2251 # outdated when running hooks. As fncache is used for streaming clone,
2256 # outdated when running hooks. As fncache is used for streaming clone,
2252 # this is not expected to break anything that happen during the hooks.
2257 # this is not expected to break anything that happen during the hooks.
2253 tr.addfinalize(b'flush-fncache', self.store.write)
2258 tr.addfinalize(b'flush-fncache', self.store.write)
2254
2259
2255 def txnclosehook(tr2):
2260 def txnclosehook(tr2):
2256 """To be run if transaction is successful, will schedule a hook run
2261 """To be run if transaction is successful, will schedule a hook run
2257 """
2262 """
2258 # Don't reference tr2 in hook() so we don't hold a reference.
2263 # Don't reference tr2 in hook() so we don't hold a reference.
2259 # This reduces memory consumption when there are multiple
2264 # This reduces memory consumption when there are multiple
2260 # transactions per lock. This can likely go away if issue5045
2265 # transactions per lock. This can likely go away if issue5045
2261 # fixes the function accumulation.
2266 # fixes the function accumulation.
2262 hookargs = tr2.hookargs
2267 hookargs = tr2.hookargs
2263
2268
2264 def hookfunc(unused_success):
2269 def hookfunc(unused_success):
2265 repo = reporef()
2270 repo = reporef()
2266 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2271 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2267 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2272 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2268 for name, (old, new) in bmchanges:
2273 for name, (old, new) in bmchanges:
2269 args = tr.hookargs.copy()
2274 args = tr.hookargs.copy()
2270 args.update(bookmarks.preparehookargs(name, old, new))
2275 args.update(bookmarks.preparehookargs(name, old, new))
2271 repo.hook(
2276 repo.hook(
2272 b'txnclose-bookmark',
2277 b'txnclose-bookmark',
2273 throw=False,
2278 throw=False,
2274 **pycompat.strkwargs(args)
2279 **pycompat.strkwargs(args)
2275 )
2280 )
2276
2281
2277 if hook.hashook(repo.ui, b'txnclose-phase'):
2282 if hook.hashook(repo.ui, b'txnclose-phase'):
2278 cl = repo.unfiltered().changelog
2283 cl = repo.unfiltered().changelog
2279 phasemv = sorted(
2284 phasemv = sorted(
2280 tr.changes[b'phases'], key=lambda r: r[0][0]
2285 tr.changes[b'phases'], key=lambda r: r[0][0]
2281 )
2286 )
2282 for revs, (old, new) in phasemv:
2287 for revs, (old, new) in phasemv:
2283 for rev in revs:
2288 for rev in revs:
2284 args = tr.hookargs.copy()
2289 args = tr.hookargs.copy()
2285 node = hex(cl.node(rev))
2290 node = hex(cl.node(rev))
2286 args.update(phases.preparehookargs(node, old, new))
2291 args.update(phases.preparehookargs(node, old, new))
2287 repo.hook(
2292 repo.hook(
2288 b'txnclose-phase',
2293 b'txnclose-phase',
2289 throw=False,
2294 throw=False,
2290 **pycompat.strkwargs(args)
2295 **pycompat.strkwargs(args)
2291 )
2296 )
2292
2297
2293 repo.hook(
2298 repo.hook(
2294 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2299 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2295 )
2300 )
2296
2301
2297 reporef()._afterlock(hookfunc)
2302 reporef()._afterlock(hookfunc)
2298
2303
2299 tr.addfinalize(b'txnclose-hook', txnclosehook)
2304 tr.addfinalize(b'txnclose-hook', txnclosehook)
2300 # Include a leading "-" to make it happen before the transaction summary
2305 # Include a leading "-" to make it happen before the transaction summary
2301 # reports registered via scmutil.registersummarycallback() whose names
2306 # reports registered via scmutil.registersummarycallback() whose names
2302 # are 00-txnreport etc. That way, the caches will be warm when the
2307 # are 00-txnreport etc. That way, the caches will be warm when the
2303 # callbacks run.
2308 # callbacks run.
2304 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2309 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2305
2310
2306 def txnaborthook(tr2):
2311 def txnaborthook(tr2):
2307 """To be run if transaction is aborted
2312 """To be run if transaction is aborted
2308 """
2313 """
2309 reporef().hook(
2314 reporef().hook(
2310 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2315 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2311 )
2316 )
2312
2317
2313 tr.addabort(b'txnabort-hook', txnaborthook)
2318 tr.addabort(b'txnabort-hook', txnaborthook)
2314 # avoid eager cache invalidation. in-memory data should be identical
2319 # avoid eager cache invalidation. in-memory data should be identical
2315 # to stored data if transaction has no error.
2320 # to stored data if transaction has no error.
2316 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2321 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2317 self._transref = weakref.ref(tr)
2322 self._transref = weakref.ref(tr)
2318 scmutil.registersummarycallback(self, tr, desc)
2323 scmutil.registersummarycallback(self, tr, desc)
2319 return tr
2324 return tr
2320
2325
2321 def _journalfiles(self):
2326 def _journalfiles(self):
2322 return (
2327 return (
2323 (self.svfs, b'journal'),
2328 (self.svfs, b'journal'),
2324 (self.svfs, b'journal.narrowspec'),
2329 (self.svfs, b'journal.narrowspec'),
2325 (self.vfs, b'journal.narrowspec.dirstate'),
2330 (self.vfs, b'journal.narrowspec.dirstate'),
2326 (self.vfs, b'journal.dirstate'),
2331 (self.vfs, b'journal.dirstate'),
2327 (self.vfs, b'journal.branch'),
2332 (self.vfs, b'journal.branch'),
2328 (self.vfs, b'journal.desc'),
2333 (self.vfs, b'journal.desc'),
2329 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2334 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2330 (self.svfs, b'journal.phaseroots'),
2335 (self.svfs, b'journal.phaseroots'),
2331 )
2336 )
2332
2337
2333 def undofiles(self):
2338 def undofiles(self):
2334 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2339 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2335
2340
2336 @unfilteredmethod
2341 @unfilteredmethod
2337 def _writejournal(self, desc):
2342 def _writejournal(self, desc):
2338 self.dirstate.savebackup(None, b'journal.dirstate')
2343 self.dirstate.savebackup(None, b'journal.dirstate')
2339 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2344 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2340 narrowspec.savebackup(self, b'journal.narrowspec')
2345 narrowspec.savebackup(self, b'journal.narrowspec')
2341 self.vfs.write(
2346 self.vfs.write(
2342 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2347 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2343 )
2348 )
2344 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2349 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2345 bookmarksvfs = bookmarks.bookmarksvfs(self)
2350 bookmarksvfs = bookmarks.bookmarksvfs(self)
2346 bookmarksvfs.write(
2351 bookmarksvfs.write(
2347 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2352 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2348 )
2353 )
2349 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2354 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2350
2355
2351 def recover(self):
2356 def recover(self):
2352 with self.lock():
2357 with self.lock():
2353 if self.svfs.exists(b"journal"):
2358 if self.svfs.exists(b"journal"):
2354 self.ui.status(_(b"rolling back interrupted transaction\n"))
2359 self.ui.status(_(b"rolling back interrupted transaction\n"))
2355 vfsmap = {
2360 vfsmap = {
2356 b'': self.svfs,
2361 b'': self.svfs,
2357 b'plain': self.vfs,
2362 b'plain': self.vfs,
2358 }
2363 }
2359 transaction.rollback(
2364 transaction.rollback(
2360 self.svfs,
2365 self.svfs,
2361 vfsmap,
2366 vfsmap,
2362 b"journal",
2367 b"journal",
2363 self.ui.warn,
2368 self.ui.warn,
2364 checkambigfiles=_cachedfiles,
2369 checkambigfiles=_cachedfiles,
2365 )
2370 )
2366 self.invalidate()
2371 self.invalidate()
2367 return True
2372 return True
2368 else:
2373 else:
2369 self.ui.warn(_(b"no interrupted transaction available\n"))
2374 self.ui.warn(_(b"no interrupted transaction available\n"))
2370 return False
2375 return False
2371
2376
2372 def rollback(self, dryrun=False, force=False):
2377 def rollback(self, dryrun=False, force=False):
2373 wlock = lock = dsguard = None
2378 wlock = lock = dsguard = None
2374 try:
2379 try:
2375 wlock = self.wlock()
2380 wlock = self.wlock()
2376 lock = self.lock()
2381 lock = self.lock()
2377 if self.svfs.exists(b"undo"):
2382 if self.svfs.exists(b"undo"):
2378 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2383 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2379
2384
2380 return self._rollback(dryrun, force, dsguard)
2385 return self._rollback(dryrun, force, dsguard)
2381 else:
2386 else:
2382 self.ui.warn(_(b"no rollback information available\n"))
2387 self.ui.warn(_(b"no rollback information available\n"))
2383 return 1
2388 return 1
2384 finally:
2389 finally:
2385 release(dsguard, lock, wlock)
2390 release(dsguard, lock, wlock)
2386
2391
2387 @unfilteredmethod # Until we get smarter cache management
2392 @unfilteredmethod # Until we get smarter cache management
2388 def _rollback(self, dryrun, force, dsguard):
2393 def _rollback(self, dryrun, force, dsguard):
2389 ui = self.ui
2394 ui = self.ui
2390 try:
2395 try:
2391 args = self.vfs.read(b'undo.desc').splitlines()
2396 args = self.vfs.read(b'undo.desc').splitlines()
2392 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2397 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2393 if len(args) >= 3:
2398 if len(args) >= 3:
2394 detail = args[2]
2399 detail = args[2]
2395 oldtip = oldlen - 1
2400 oldtip = oldlen - 1
2396
2401
2397 if detail and ui.verbose:
2402 if detail and ui.verbose:
2398 msg = _(
2403 msg = _(
2399 b'repository tip rolled back to revision %d'
2404 b'repository tip rolled back to revision %d'
2400 b' (undo %s: %s)\n'
2405 b' (undo %s: %s)\n'
2401 ) % (oldtip, desc, detail)
2406 ) % (oldtip, desc, detail)
2402 else:
2407 else:
2403 msg = _(
2408 msg = _(
2404 b'repository tip rolled back to revision %d (undo %s)\n'
2409 b'repository tip rolled back to revision %d (undo %s)\n'
2405 ) % (oldtip, desc)
2410 ) % (oldtip, desc)
2406 except IOError:
2411 except IOError:
2407 msg = _(b'rolling back unknown transaction\n')
2412 msg = _(b'rolling back unknown transaction\n')
2408 desc = None
2413 desc = None
2409
2414
2410 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2415 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2411 raise error.Abort(
2416 raise error.Abort(
2412 _(
2417 _(
2413 b'rollback of last commit while not checked out '
2418 b'rollback of last commit while not checked out '
2414 b'may lose data'
2419 b'may lose data'
2415 ),
2420 ),
2416 hint=_(b'use -f to force'),
2421 hint=_(b'use -f to force'),
2417 )
2422 )
2418
2423
2419 ui.status(msg)
2424 ui.status(msg)
2420 if dryrun:
2425 if dryrun:
2421 return 0
2426 return 0
2422
2427
2423 parents = self.dirstate.parents()
2428 parents = self.dirstate.parents()
2424 self.destroying()
2429 self.destroying()
2425 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2430 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2426 transaction.rollback(
2431 transaction.rollback(
2427 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2432 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2428 )
2433 )
2429 bookmarksvfs = bookmarks.bookmarksvfs(self)
2434 bookmarksvfs = bookmarks.bookmarksvfs(self)
2430 if bookmarksvfs.exists(b'undo.bookmarks'):
2435 if bookmarksvfs.exists(b'undo.bookmarks'):
2431 bookmarksvfs.rename(
2436 bookmarksvfs.rename(
2432 b'undo.bookmarks', b'bookmarks', checkambig=True
2437 b'undo.bookmarks', b'bookmarks', checkambig=True
2433 )
2438 )
2434 if self.svfs.exists(b'undo.phaseroots'):
2439 if self.svfs.exists(b'undo.phaseroots'):
2435 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2440 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2436 self.invalidate()
2441 self.invalidate()
2437
2442
2438 has_node = self.changelog.index.has_node
2443 has_node = self.changelog.index.has_node
2439 parentgone = any(not has_node(p) for p in parents)
2444 parentgone = any(not has_node(p) for p in parents)
2440 if parentgone:
2445 if parentgone:
2441 # prevent dirstateguard from overwriting already restored one
2446 # prevent dirstateguard from overwriting already restored one
2442 dsguard.close()
2447 dsguard.close()
2443
2448
2444 narrowspec.restorebackup(self, b'undo.narrowspec')
2449 narrowspec.restorebackup(self, b'undo.narrowspec')
2445 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2450 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2446 self.dirstate.restorebackup(None, b'undo.dirstate')
2451 self.dirstate.restorebackup(None, b'undo.dirstate')
2447 try:
2452 try:
2448 branch = self.vfs.read(b'undo.branch')
2453 branch = self.vfs.read(b'undo.branch')
2449 self.dirstate.setbranch(encoding.tolocal(branch))
2454 self.dirstate.setbranch(encoding.tolocal(branch))
2450 except IOError:
2455 except IOError:
2451 ui.warn(
2456 ui.warn(
2452 _(
2457 _(
2453 b'named branch could not be reset: '
2458 b'named branch could not be reset: '
2454 b'current branch is still \'%s\'\n'
2459 b'current branch is still \'%s\'\n'
2455 )
2460 )
2456 % self.dirstate.branch()
2461 % self.dirstate.branch()
2457 )
2462 )
2458
2463
2459 parents = tuple([p.rev() for p in self[None].parents()])
2464 parents = tuple([p.rev() for p in self[None].parents()])
2460 if len(parents) > 1:
2465 if len(parents) > 1:
2461 ui.status(
2466 ui.status(
2462 _(
2467 _(
2463 b'working directory now based on '
2468 b'working directory now based on '
2464 b'revisions %d and %d\n'
2469 b'revisions %d and %d\n'
2465 )
2470 )
2466 % parents
2471 % parents
2467 )
2472 )
2468 else:
2473 else:
2469 ui.status(
2474 ui.status(
2470 _(b'working directory now based on revision %d\n') % parents
2475 _(b'working directory now based on revision %d\n') % parents
2471 )
2476 )
2472 mergestatemod.mergestate.clean(self, self[b'.'].node())
2477 mergestatemod.mergestate.clean(self, self[b'.'].node())
2473
2478
2474 # TODO: if we know which new heads may result from this rollback, pass
2479 # TODO: if we know which new heads may result from this rollback, pass
2475 # them to destroy(), which will prevent the branchhead cache from being
2480 # them to destroy(), which will prevent the branchhead cache from being
2476 # invalidated.
2481 # invalidated.
2477 self.destroyed()
2482 self.destroyed()
2478 return 0
2483 return 0
2479
2484
2480 def _buildcacheupdater(self, newtransaction):
2485 def _buildcacheupdater(self, newtransaction):
2481 """called during transaction to build the callback updating cache
2486 """called during transaction to build the callback updating cache
2482
2487
2483 Lives on the repository to help extension who might want to augment
2488 Lives on the repository to help extension who might want to augment
2484 this logic. For this purpose, the created transaction is passed to the
2489 this logic. For this purpose, the created transaction is passed to the
2485 method.
2490 method.
2486 """
2491 """
2487 # we must avoid cyclic reference between repo and transaction.
2492 # we must avoid cyclic reference between repo and transaction.
2488 reporef = weakref.ref(self)
2493 reporef = weakref.ref(self)
2489
2494
2490 def updater(tr):
2495 def updater(tr):
2491 repo = reporef()
2496 repo = reporef()
2492 repo.updatecaches(tr)
2497 repo.updatecaches(tr)
2493
2498
2494 return updater
2499 return updater
2495
2500
2496 @unfilteredmethod
2501 @unfilteredmethod
2497 def updatecaches(self, tr=None, full=False):
2502 def updatecaches(self, tr=None, full=False):
2498 """warm appropriate caches
2503 """warm appropriate caches
2499
2504
2500 If this function is called after a transaction closed. The transaction
2505 If this function is called after a transaction closed. The transaction
2501 will be available in the 'tr' argument. This can be used to selectively
2506 will be available in the 'tr' argument. This can be used to selectively
2502 update caches relevant to the changes in that transaction.
2507 update caches relevant to the changes in that transaction.
2503
2508
2504 If 'full' is set, make sure all caches the function knows about have
2509 If 'full' is set, make sure all caches the function knows about have
2505 up-to-date data. Even the ones usually loaded more lazily.
2510 up-to-date data. Even the ones usually loaded more lazily.
2506 """
2511 """
2507 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2512 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2508 # During strip, many caches are invalid but
2513 # During strip, many caches are invalid but
2509 # later call to `destroyed` will refresh them.
2514 # later call to `destroyed` will refresh them.
2510 return
2515 return
2511
2516
2512 if tr is None or tr.changes[b'origrepolen'] < len(self):
2517 if tr is None or tr.changes[b'origrepolen'] < len(self):
2513 # accessing the 'ser ved' branchmap should refresh all the others,
2518 # accessing the 'ser ved' branchmap should refresh all the others,
2514 self.ui.debug(b'updating the branch cache\n')
2519 self.ui.debug(b'updating the branch cache\n')
2515 self.filtered(b'served').branchmap()
2520 self.filtered(b'served').branchmap()
2516 self.filtered(b'served.hidden').branchmap()
2521 self.filtered(b'served.hidden').branchmap()
2517
2522
2518 if full:
2523 if full:
2519 unfi = self.unfiltered()
2524 unfi = self.unfiltered()
2520
2525
2521 self.changelog.update_caches(transaction=tr)
2526 self.changelog.update_caches(transaction=tr)
2522 self.manifestlog.update_caches(transaction=tr)
2527 self.manifestlog.update_caches(transaction=tr)
2523
2528
2524 rbc = unfi.revbranchcache()
2529 rbc = unfi.revbranchcache()
2525 for r in unfi.changelog:
2530 for r in unfi.changelog:
2526 rbc.branchinfo(r)
2531 rbc.branchinfo(r)
2527 rbc.write()
2532 rbc.write()
2528
2533
2529 # ensure the working copy parents are in the manifestfulltextcache
2534 # ensure the working copy parents are in the manifestfulltextcache
2530 for ctx in self[b'.'].parents():
2535 for ctx in self[b'.'].parents():
2531 ctx.manifest() # accessing the manifest is enough
2536 ctx.manifest() # accessing the manifest is enough
2532
2537
2533 # accessing fnode cache warms the cache
2538 # accessing fnode cache warms the cache
2534 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2539 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2535 # accessing tags warm the cache
2540 # accessing tags warm the cache
2536 self.tags()
2541 self.tags()
2537 self.filtered(b'served').tags()
2542 self.filtered(b'served').tags()
2538
2543
2539 # The `full` arg is documented as updating even the lazily-loaded
2544 # The `full` arg is documented as updating even the lazily-loaded
2540 # caches immediately, so we're forcing a write to cause these caches
2545 # caches immediately, so we're forcing a write to cause these caches
2541 # to be warmed up even if they haven't explicitly been requested
2546 # to be warmed up even if they haven't explicitly been requested
2542 # yet (if they've never been used by hg, they won't ever have been
2547 # yet (if they've never been used by hg, they won't ever have been
2543 # written, even if they're a subset of another kind of cache that
2548 # written, even if they're a subset of another kind of cache that
2544 # *has* been used).
2549 # *has* been used).
2545 for filt in repoview.filtertable.keys():
2550 for filt in repoview.filtertable.keys():
2546 filtered = self.filtered(filt)
2551 filtered = self.filtered(filt)
2547 filtered.branchmap().write(filtered)
2552 filtered.branchmap().write(filtered)
2548
2553
2549 def invalidatecaches(self):
2554 def invalidatecaches(self):
2550
2555
2551 if '_tagscache' in vars(self):
2556 if '_tagscache' in vars(self):
2552 # can't use delattr on proxy
2557 # can't use delattr on proxy
2553 del self.__dict__['_tagscache']
2558 del self.__dict__['_tagscache']
2554
2559
2555 self._branchcaches.clear()
2560 self._branchcaches.clear()
2556 self.invalidatevolatilesets()
2561 self.invalidatevolatilesets()
2557 self._sparsesignaturecache.clear()
2562 self._sparsesignaturecache.clear()
2558
2563
2559 def invalidatevolatilesets(self):
2564 def invalidatevolatilesets(self):
2560 self.filteredrevcache.clear()
2565 self.filteredrevcache.clear()
2561 obsolete.clearobscaches(self)
2566 obsolete.clearobscaches(self)
2562 self._quick_access_changeid_invalidate()
2567 self._quick_access_changeid_invalidate()
2563
2568
2564 def invalidatedirstate(self):
2569 def invalidatedirstate(self):
2565 '''Invalidates the dirstate, causing the next call to dirstate
2570 '''Invalidates the dirstate, causing the next call to dirstate
2566 to check if it was modified since the last time it was read,
2571 to check if it was modified since the last time it was read,
2567 rereading it if it has.
2572 rereading it if it has.
2568
2573
2569 This is different to dirstate.invalidate() that it doesn't always
2574 This is different to dirstate.invalidate() that it doesn't always
2570 rereads the dirstate. Use dirstate.invalidate() if you want to
2575 rereads the dirstate. Use dirstate.invalidate() if you want to
2571 explicitly read the dirstate again (i.e. restoring it to a previous
2576 explicitly read the dirstate again (i.e. restoring it to a previous
2572 known good state).'''
2577 known good state).'''
2573 if hasunfilteredcache(self, 'dirstate'):
2578 if hasunfilteredcache(self, 'dirstate'):
2574 for k in self.dirstate._filecache:
2579 for k in self.dirstate._filecache:
2575 try:
2580 try:
2576 delattr(self.dirstate, k)
2581 delattr(self.dirstate, k)
2577 except AttributeError:
2582 except AttributeError:
2578 pass
2583 pass
2579 delattr(self.unfiltered(), 'dirstate')
2584 delattr(self.unfiltered(), 'dirstate')
2580
2585
2581 def invalidate(self, clearfilecache=False):
2586 def invalidate(self, clearfilecache=False):
2582 '''Invalidates both store and non-store parts other than dirstate
2587 '''Invalidates both store and non-store parts other than dirstate
2583
2588
2584 If a transaction is running, invalidation of store is omitted,
2589 If a transaction is running, invalidation of store is omitted,
2585 because discarding in-memory changes might cause inconsistency
2590 because discarding in-memory changes might cause inconsistency
2586 (e.g. incomplete fncache causes unintentional failure, but
2591 (e.g. incomplete fncache causes unintentional failure, but
2587 redundant one doesn't).
2592 redundant one doesn't).
2588 '''
2593 '''
2589 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2594 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2590 for k in list(self._filecache.keys()):
2595 for k in list(self._filecache.keys()):
2591 # dirstate is invalidated separately in invalidatedirstate()
2596 # dirstate is invalidated separately in invalidatedirstate()
2592 if k == b'dirstate':
2597 if k == b'dirstate':
2593 continue
2598 continue
2594 if (
2599 if (
2595 k == b'changelog'
2600 k == b'changelog'
2596 and self.currenttransaction()
2601 and self.currenttransaction()
2597 and self.changelog._delayed
2602 and self.changelog._delayed
2598 ):
2603 ):
2599 # The changelog object may store unwritten revisions. We don't
2604 # The changelog object may store unwritten revisions. We don't
2600 # want to lose them.
2605 # want to lose them.
2601 # TODO: Solve the problem instead of working around it.
2606 # TODO: Solve the problem instead of working around it.
2602 continue
2607 continue
2603
2608
2604 if clearfilecache:
2609 if clearfilecache:
2605 del self._filecache[k]
2610 del self._filecache[k]
2606 try:
2611 try:
2607 delattr(unfiltered, k)
2612 delattr(unfiltered, k)
2608 except AttributeError:
2613 except AttributeError:
2609 pass
2614 pass
2610 self.invalidatecaches()
2615 self.invalidatecaches()
2611 if not self.currenttransaction():
2616 if not self.currenttransaction():
2612 # TODO: Changing contents of store outside transaction
2617 # TODO: Changing contents of store outside transaction
2613 # causes inconsistency. We should make in-memory store
2618 # causes inconsistency. We should make in-memory store
2614 # changes detectable, and abort if changed.
2619 # changes detectable, and abort if changed.
2615 self.store.invalidatecaches()
2620 self.store.invalidatecaches()
2616
2621
2617 def invalidateall(self):
2622 def invalidateall(self):
2618 '''Fully invalidates both store and non-store parts, causing the
2623 '''Fully invalidates both store and non-store parts, causing the
2619 subsequent operation to reread any outside changes.'''
2624 subsequent operation to reread any outside changes.'''
2620 # extension should hook this to invalidate its caches
2625 # extension should hook this to invalidate its caches
2621 self.invalidate()
2626 self.invalidate()
2622 self.invalidatedirstate()
2627 self.invalidatedirstate()
2623
2628
2624 @unfilteredmethod
2629 @unfilteredmethod
2625 def _refreshfilecachestats(self, tr):
2630 def _refreshfilecachestats(self, tr):
2626 """Reload stats of cached files so that they are flagged as valid"""
2631 """Reload stats of cached files so that they are flagged as valid"""
2627 for k, ce in self._filecache.items():
2632 for k, ce in self._filecache.items():
2628 k = pycompat.sysstr(k)
2633 k = pycompat.sysstr(k)
2629 if k == 'dirstate' or k not in self.__dict__:
2634 if k == 'dirstate' or k not in self.__dict__:
2630 continue
2635 continue
2631 ce.refresh()
2636 ce.refresh()
2632
2637
2633 def _lock(
2638 def _lock(
2634 self,
2639 self,
2635 vfs,
2640 vfs,
2636 lockname,
2641 lockname,
2637 wait,
2642 wait,
2638 releasefn,
2643 releasefn,
2639 acquirefn,
2644 acquirefn,
2640 desc,
2645 desc,
2641 inheritchecker=None,
2646 inheritchecker=None,
2642 parentenvvar=None,
2647 parentenvvar=None,
2643 ):
2648 ):
2644 parentlock = None
2649 parentlock = None
2645 # the contents of parentenvvar are used by the underlying lock to
2650 # the contents of parentenvvar are used by the underlying lock to
2646 # determine whether it can be inherited
2651 # determine whether it can be inherited
2647 if parentenvvar is not None:
2652 if parentenvvar is not None:
2648 parentlock = encoding.environ.get(parentenvvar)
2653 parentlock = encoding.environ.get(parentenvvar)
2649
2654
2650 timeout = 0
2655 timeout = 0
2651 warntimeout = 0
2656 warntimeout = 0
2652 if wait:
2657 if wait:
2653 timeout = self.ui.configint(b"ui", b"timeout")
2658 timeout = self.ui.configint(b"ui", b"timeout")
2654 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2659 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2655 # internal config: ui.signal-safe-lock
2660 # internal config: ui.signal-safe-lock
2656 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2661 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2657
2662
2658 l = lockmod.trylock(
2663 l = lockmod.trylock(
2659 self.ui,
2664 self.ui,
2660 vfs,
2665 vfs,
2661 lockname,
2666 lockname,
2662 timeout,
2667 timeout,
2663 warntimeout,
2668 warntimeout,
2664 releasefn=releasefn,
2669 releasefn=releasefn,
2665 acquirefn=acquirefn,
2670 acquirefn=acquirefn,
2666 desc=desc,
2671 desc=desc,
2667 inheritchecker=inheritchecker,
2672 inheritchecker=inheritchecker,
2668 parentlock=parentlock,
2673 parentlock=parentlock,
2669 signalsafe=signalsafe,
2674 signalsafe=signalsafe,
2670 )
2675 )
2671 return l
2676 return l
2672
2677
2673 def _afterlock(self, callback):
2678 def _afterlock(self, callback):
2674 """add a callback to be run when the repository is fully unlocked
2679 """add a callback to be run when the repository is fully unlocked
2675
2680
2676 The callback will be executed when the outermost lock is released
2681 The callback will be executed when the outermost lock is released
2677 (with wlock being higher level than 'lock')."""
2682 (with wlock being higher level than 'lock')."""
2678 for ref in (self._wlockref, self._lockref):
2683 for ref in (self._wlockref, self._lockref):
2679 l = ref and ref()
2684 l = ref and ref()
2680 if l and l.held:
2685 if l and l.held:
2681 l.postrelease.append(callback)
2686 l.postrelease.append(callback)
2682 break
2687 break
2683 else: # no lock have been found.
2688 else: # no lock have been found.
2684 callback(True)
2689 callback(True)
2685
2690
2686 def lock(self, wait=True):
2691 def lock(self, wait=True):
2687 '''Lock the repository store (.hg/store) and return a weak reference
2692 '''Lock the repository store (.hg/store) and return a weak reference
2688 to the lock. Use this before modifying the store (e.g. committing or
2693 to the lock. Use this before modifying the store (e.g. committing or
2689 stripping). If you are opening a transaction, get a lock as well.)
2694 stripping). If you are opening a transaction, get a lock as well.)
2690
2695
2691 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2696 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2692 'wlock' first to avoid a dead-lock hazard.'''
2697 'wlock' first to avoid a dead-lock hazard.'''
2693 l = self._currentlock(self._lockref)
2698 l = self._currentlock(self._lockref)
2694 if l is not None:
2699 if l is not None:
2695 l.lock()
2700 l.lock()
2696 return l
2701 return l
2697
2702
2698 l = self._lock(
2703 l = self._lock(
2699 vfs=self.svfs,
2704 vfs=self.svfs,
2700 lockname=b"lock",
2705 lockname=b"lock",
2701 wait=wait,
2706 wait=wait,
2702 releasefn=None,
2707 releasefn=None,
2703 acquirefn=self.invalidate,
2708 acquirefn=self.invalidate,
2704 desc=_(b'repository %s') % self.origroot,
2709 desc=_(b'repository %s') % self.origroot,
2705 )
2710 )
2706 self._lockref = weakref.ref(l)
2711 self._lockref = weakref.ref(l)
2707 return l
2712 return l
2708
2713
2709 def _wlockchecktransaction(self):
2714 def _wlockchecktransaction(self):
2710 if self.currenttransaction() is not None:
2715 if self.currenttransaction() is not None:
2711 raise error.LockInheritanceContractViolation(
2716 raise error.LockInheritanceContractViolation(
2712 b'wlock cannot be inherited in the middle of a transaction'
2717 b'wlock cannot be inherited in the middle of a transaction'
2713 )
2718 )
2714
2719
2715 def wlock(self, wait=True):
2720 def wlock(self, wait=True):
2716 '''Lock the non-store parts of the repository (everything under
2721 '''Lock the non-store parts of the repository (everything under
2717 .hg except .hg/store) and return a weak reference to the lock.
2722 .hg except .hg/store) and return a weak reference to the lock.
2718
2723
2719 Use this before modifying files in .hg.
2724 Use this before modifying files in .hg.
2720
2725
2721 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2726 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2722 'wlock' first to avoid a dead-lock hazard.'''
2727 'wlock' first to avoid a dead-lock hazard.'''
2723 l = self._wlockref and self._wlockref()
2728 l = self._wlockref and self._wlockref()
2724 if l is not None and l.held:
2729 if l is not None and l.held:
2725 l.lock()
2730 l.lock()
2726 return l
2731 return l
2727
2732
2728 # We do not need to check for non-waiting lock acquisition. Such
2733 # We do not need to check for non-waiting lock acquisition. Such
2729 # acquisition would not cause dead-lock as they would just fail.
2734 # acquisition would not cause dead-lock as they would just fail.
2730 if wait and (
2735 if wait and (
2731 self.ui.configbool(b'devel', b'all-warnings')
2736 self.ui.configbool(b'devel', b'all-warnings')
2732 or self.ui.configbool(b'devel', b'check-locks')
2737 or self.ui.configbool(b'devel', b'check-locks')
2733 ):
2738 ):
2734 if self._currentlock(self._lockref) is not None:
2739 if self._currentlock(self._lockref) is not None:
2735 self.ui.develwarn(b'"wlock" acquired after "lock"')
2740 self.ui.develwarn(b'"wlock" acquired after "lock"')
2736
2741
2737 def unlock():
2742 def unlock():
2738 if self.dirstate.pendingparentchange():
2743 if self.dirstate.pendingparentchange():
2739 self.dirstate.invalidate()
2744 self.dirstate.invalidate()
2740 else:
2745 else:
2741 self.dirstate.write(None)
2746 self.dirstate.write(None)
2742
2747
2743 self._filecache[b'dirstate'].refresh()
2748 self._filecache[b'dirstate'].refresh()
2744
2749
2745 l = self._lock(
2750 l = self._lock(
2746 self.vfs,
2751 self.vfs,
2747 b"wlock",
2752 b"wlock",
2748 wait,
2753 wait,
2749 unlock,
2754 unlock,
2750 self.invalidatedirstate,
2755 self.invalidatedirstate,
2751 _(b'working directory of %s') % self.origroot,
2756 _(b'working directory of %s') % self.origroot,
2752 inheritchecker=self._wlockchecktransaction,
2757 inheritchecker=self._wlockchecktransaction,
2753 parentenvvar=b'HG_WLOCK_LOCKER',
2758 parentenvvar=b'HG_WLOCK_LOCKER',
2754 )
2759 )
2755 self._wlockref = weakref.ref(l)
2760 self._wlockref = weakref.ref(l)
2756 return l
2761 return l
2757
2762
2758 def _currentlock(self, lockref):
2763 def _currentlock(self, lockref):
2759 """Returns the lock if it's held, or None if it's not."""
2764 """Returns the lock if it's held, or None if it's not."""
2760 if lockref is None:
2765 if lockref is None:
2761 return None
2766 return None
2762 l = lockref()
2767 l = lockref()
2763 if l is None or not l.held:
2768 if l is None or not l.held:
2764 return None
2769 return None
2765 return l
2770 return l
2766
2771
2767 def currentwlock(self):
2772 def currentwlock(self):
2768 """Returns the wlock if it's held, or None if it's not."""
2773 """Returns the wlock if it's held, or None if it's not."""
2769 return self._currentlock(self._wlockref)
2774 return self._currentlock(self._wlockref)
2770
2775
2771 def _filecommit(
2776 def _filecommit(
2772 self,
2777 self,
2773 fctx,
2778 fctx,
2774 manifest1,
2779 manifest1,
2775 manifest2,
2780 manifest2,
2776 linkrev,
2781 linkrev,
2777 tr,
2782 tr,
2778 changelist,
2783 changelist,
2779 includecopymeta,
2784 includecopymeta,
2780 ):
2785 ):
2781 """
2786 """
2782 commit an individual file as part of a larger transaction
2787 commit an individual file as part of a larger transaction
2783 """
2788 """
2784
2789
2785 fname = fctx.path()
2790 fname = fctx.path()
2786 fparent1 = manifest1.get(fname, nullid)
2791 fparent1 = manifest1.get(fname, nullid)
2787 fparent2 = manifest2.get(fname, nullid)
2792 fparent2 = manifest2.get(fname, nullid)
2788 if isinstance(fctx, context.filectx):
2793 if isinstance(fctx, context.filectx):
2789 node = fctx.filenode()
2794 node = fctx.filenode()
2790 if node in [fparent1, fparent2]:
2795 if node in [fparent1, fparent2]:
2791 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2796 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2792 if (
2797 if (
2793 fparent1 != nullid
2798 fparent1 != nullid
2794 and manifest1.flags(fname) != fctx.flags()
2799 and manifest1.flags(fname) != fctx.flags()
2795 ) or (
2800 ) or (
2796 fparent2 != nullid
2801 fparent2 != nullid
2797 and manifest2.flags(fname) != fctx.flags()
2802 and manifest2.flags(fname) != fctx.flags()
2798 ):
2803 ):
2799 changelist.append(fname)
2804 changelist.append(fname)
2800 return node
2805 return node
2801
2806
2802 flog = self.file(fname)
2807 flog = self.file(fname)
2803 meta = {}
2808 meta = {}
2804 cfname = fctx.copysource()
2809 cfname = fctx.copysource()
2805 if cfname and cfname != fname:
2810 if cfname and cfname != fname:
2806 # Mark the new revision of this file as a copy of another
2811 # Mark the new revision of this file as a copy of another
2807 # file. This copy data will effectively act as a parent
2812 # file. This copy data will effectively act as a parent
2808 # of this new revision. If this is a merge, the first
2813 # of this new revision. If this is a merge, the first
2809 # parent will be the nullid (meaning "look up the copy data")
2814 # parent will be the nullid (meaning "look up the copy data")
2810 # and the second one will be the other parent. For example:
2815 # and the second one will be the other parent. For example:
2811 #
2816 #
2812 # 0 --- 1 --- 3 rev1 changes file foo
2817 # 0 --- 1 --- 3 rev1 changes file foo
2813 # \ / rev2 renames foo to bar and changes it
2818 # \ / rev2 renames foo to bar and changes it
2814 # \- 2 -/ rev3 should have bar with all changes and
2819 # \- 2 -/ rev3 should have bar with all changes and
2815 # should record that bar descends from
2820 # should record that bar descends from
2816 # bar in rev2 and foo in rev1
2821 # bar in rev2 and foo in rev1
2817 #
2822 #
2818 # this allows this merge to succeed:
2823 # this allows this merge to succeed:
2819 #
2824 #
2820 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2825 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2821 # \ / merging rev3 and rev4 should use bar@rev2
2826 # \ / merging rev3 and rev4 should use bar@rev2
2822 # \- 2 --- 4 as the merge base
2827 # \- 2 --- 4 as the merge base
2823 #
2828 #
2824
2829
2825 cnode = manifest1.get(cfname)
2830 cnode = manifest1.get(cfname)
2826 newfparent = fparent2
2831 newfparent = fparent2
2827
2832
2828 if manifest2: # branch merge
2833 if manifest2: # branch merge
2829 if fparent2 == nullid or cnode is None: # copied on remote side
2834 if fparent2 == nullid or cnode is None: # copied on remote side
2830 if cfname in manifest2:
2835 if cfname in manifest2:
2831 cnode = manifest2[cfname]
2836 cnode = manifest2[cfname]
2832 newfparent = fparent1
2837 newfparent = fparent1
2833
2838
2834 # Here, we used to search backwards through history to try to find
2839 # Here, we used to search backwards through history to try to find
2835 # where the file copy came from if the source of a copy was not in
2840 # where the file copy came from if the source of a copy was not in
2836 # the parent directory. However, this doesn't actually make sense to
2841 # the parent directory. However, this doesn't actually make sense to
2837 # do (what does a copy from something not in your working copy even
2842 # do (what does a copy from something not in your working copy even
2838 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2843 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2839 # the user that copy information was dropped, so if they didn't
2844 # the user that copy information was dropped, so if they didn't
2840 # expect this outcome it can be fixed, but this is the correct
2845 # expect this outcome it can be fixed, but this is the correct
2841 # behavior in this circumstance.
2846 # behavior in this circumstance.
2842
2847
2843 if cnode:
2848 if cnode:
2844 self.ui.debug(
2849 self.ui.debug(
2845 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2850 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2846 )
2851 )
2847 if includecopymeta:
2852 if includecopymeta:
2848 meta[b"copy"] = cfname
2853 meta[b"copy"] = cfname
2849 meta[b"copyrev"] = hex(cnode)
2854 meta[b"copyrev"] = hex(cnode)
2850 fparent1, fparent2 = nullid, newfparent
2855 fparent1, fparent2 = nullid, newfparent
2851 else:
2856 else:
2852 self.ui.warn(
2857 self.ui.warn(
2853 _(
2858 _(
2854 b"warning: can't find ancestor for '%s' "
2859 b"warning: can't find ancestor for '%s' "
2855 b"copied from '%s'!\n"
2860 b"copied from '%s'!\n"
2856 )
2861 )
2857 % (fname, cfname)
2862 % (fname, cfname)
2858 )
2863 )
2859
2864
2860 elif fparent1 == nullid:
2865 elif fparent1 == nullid:
2861 fparent1, fparent2 = fparent2, nullid
2866 fparent1, fparent2 = fparent2, nullid
2862 elif fparent2 != nullid:
2867 elif fparent2 != nullid:
2863 # is one parent an ancestor of the other?
2868 # is one parent an ancestor of the other?
2864 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2869 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2865 if fparent1 in fparentancestors:
2870 if fparent1 in fparentancestors:
2866 fparent1, fparent2 = fparent2, nullid
2871 fparent1, fparent2 = fparent2, nullid
2867 elif fparent2 in fparentancestors:
2872 elif fparent2 in fparentancestors:
2868 fparent2 = nullid
2873 fparent2 = nullid
2869 elif not fparentancestors:
2874 elif not fparentancestors:
2870 # TODO: this whole if-else might be simplified much more
2875 # TODO: this whole if-else might be simplified much more
2871 ms = mergestatemod.mergestate.read(self)
2876 ms = mergestatemod.mergestate.read(self)
2872 if (
2877 if (
2873 fname in ms
2878 fname in ms
2874 and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
2879 and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
2875 ):
2880 ):
2876 fparent1, fparent2 = fparent2, nullid
2881 fparent1, fparent2 = fparent2, nullid
2877
2882
2878 # is the file changed?
2883 # is the file changed?
2879 text = fctx.data()
2884 text = fctx.data()
2880 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2885 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2881 changelist.append(fname)
2886 changelist.append(fname)
2882 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2887 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2883 # are just the flags changed during merge?
2888 # are just the flags changed during merge?
2884 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2889 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2885 changelist.append(fname)
2890 changelist.append(fname)
2886
2891
2887 return fparent1
2892 return fparent1
2888
2893
2889 def checkcommitpatterns(self, wctx, match, status, fail):
2894 def checkcommitpatterns(self, wctx, match, status, fail):
2890 """check for commit arguments that aren't committable"""
2895 """check for commit arguments that aren't committable"""
2891 if match.isexact() or match.prefix():
2896 if match.isexact() or match.prefix():
2892 matched = set(status.modified + status.added + status.removed)
2897 matched = set(status.modified + status.added + status.removed)
2893
2898
2894 for f in match.files():
2899 for f in match.files():
2895 f = self.dirstate.normalize(f)
2900 f = self.dirstate.normalize(f)
2896 if f == b'.' or f in matched or f in wctx.substate:
2901 if f == b'.' or f in matched or f in wctx.substate:
2897 continue
2902 continue
2898 if f in status.deleted:
2903 if f in status.deleted:
2899 fail(f, _(b'file not found!'))
2904 fail(f, _(b'file not found!'))
2900 # Is it a directory that exists or used to exist?
2905 # Is it a directory that exists or used to exist?
2901 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2906 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2902 d = f + b'/'
2907 d = f + b'/'
2903 for mf in matched:
2908 for mf in matched:
2904 if mf.startswith(d):
2909 if mf.startswith(d):
2905 break
2910 break
2906 else:
2911 else:
2907 fail(f, _(b"no match under directory!"))
2912 fail(f, _(b"no match under directory!"))
2908 elif f not in self.dirstate:
2913 elif f not in self.dirstate:
2909 fail(f, _(b"file not tracked!"))
2914 fail(f, _(b"file not tracked!"))
2910
2915
2911 @unfilteredmethod
2916 @unfilteredmethod
2912 def commit(
2917 def commit(
2913 self,
2918 self,
2914 text=b"",
2919 text=b"",
2915 user=None,
2920 user=None,
2916 date=None,
2921 date=None,
2917 match=None,
2922 match=None,
2918 force=False,
2923 force=False,
2919 editor=None,
2924 editor=None,
2920 extra=None,
2925 extra=None,
2921 ):
2926 ):
2922 """Add a new revision to current repository.
2927 """Add a new revision to current repository.
2923
2928
2924 Revision information is gathered from the working directory,
2929 Revision information is gathered from the working directory,
2925 match can be used to filter the committed files. If editor is
2930 match can be used to filter the committed files. If editor is
2926 supplied, it is called to get a commit message.
2931 supplied, it is called to get a commit message.
2927 """
2932 """
2928 if extra is None:
2933 if extra is None:
2929 extra = {}
2934 extra = {}
2930
2935
2931 def fail(f, msg):
2936 def fail(f, msg):
2932 raise error.Abort(b'%s: %s' % (f, msg))
2937 raise error.Abort(b'%s: %s' % (f, msg))
2933
2938
2934 if not match:
2939 if not match:
2935 match = matchmod.always()
2940 match = matchmod.always()
2936
2941
2937 if not force:
2942 if not force:
2938 match.bad = fail
2943 match.bad = fail
2939
2944
2940 # lock() for recent changelog (see issue4368)
2945 # lock() for recent changelog (see issue4368)
2941 with self.wlock(), self.lock():
2946 with self.wlock(), self.lock():
2942 wctx = self[None]
2947 wctx = self[None]
2943 merge = len(wctx.parents()) > 1
2948 merge = len(wctx.parents()) > 1
2944
2949
2945 if not force and merge and not match.always():
2950 if not force and merge and not match.always():
2946 raise error.Abort(
2951 raise error.Abort(
2947 _(
2952 _(
2948 b'cannot partially commit a merge '
2953 b'cannot partially commit a merge '
2949 b'(do not specify files or patterns)'
2954 b'(do not specify files or patterns)'
2950 )
2955 )
2951 )
2956 )
2952
2957
2953 status = self.status(match=match, clean=force)
2958 status = self.status(match=match, clean=force)
2954 if force:
2959 if force:
2955 status.modified.extend(
2960 status.modified.extend(
2956 status.clean
2961 status.clean
2957 ) # mq may commit clean files
2962 ) # mq may commit clean files
2958
2963
2959 # check subrepos
2964 # check subrepos
2960 subs, commitsubs, newstate = subrepoutil.precommit(
2965 subs, commitsubs, newstate = subrepoutil.precommit(
2961 self.ui, wctx, status, match, force=force
2966 self.ui, wctx, status, match, force=force
2962 )
2967 )
2963
2968
2964 # make sure all explicit patterns are matched
2969 # make sure all explicit patterns are matched
2965 if not force:
2970 if not force:
2966 self.checkcommitpatterns(wctx, match, status, fail)
2971 self.checkcommitpatterns(wctx, match, status, fail)
2967
2972
2968 cctx = context.workingcommitctx(
2973 cctx = context.workingcommitctx(
2969 self, status, text, user, date, extra
2974 self, status, text, user, date, extra
2970 )
2975 )
2971
2976
2972 ms = mergestatemod.mergestate.read(self)
2977 ms = mergestatemod.mergestate.read(self)
2973 mergeutil.checkunresolved(ms)
2978 mergeutil.checkunresolved(ms)
2974
2979
2975 # internal config: ui.allowemptycommit
2980 # internal config: ui.allowemptycommit
2976 allowemptycommit = (
2981 allowemptycommit = (
2977 wctx.branch() != wctx.p1().branch()
2982 wctx.branch() != wctx.p1().branch()
2978 or extra.get(b'close')
2983 or extra.get(b'close')
2979 or merge
2984 or merge
2980 or cctx.files()
2985 or cctx.files()
2981 or self.ui.configbool(b'ui', b'allowemptycommit')
2986 or self.ui.configbool(b'ui', b'allowemptycommit')
2982 )
2987 )
2983 if not allowemptycommit:
2988 if not allowemptycommit:
2984 self.ui.debug(b'nothing to commit, clearing merge state\n')
2989 self.ui.debug(b'nothing to commit, clearing merge state\n')
2985 ms.reset()
2990 ms.reset()
2986 return None
2991 return None
2987
2992
2988 if merge and cctx.deleted():
2993 if merge and cctx.deleted():
2989 raise error.Abort(_(b"cannot commit merge with missing files"))
2994 raise error.Abort(_(b"cannot commit merge with missing files"))
2990
2995
2991 if editor:
2996 if editor:
2992 cctx._text = editor(self, cctx, subs)
2997 cctx._text = editor(self, cctx, subs)
2993 edited = text != cctx._text
2998 edited = text != cctx._text
2994
2999
2995 # Save commit message in case this transaction gets rolled back
3000 # Save commit message in case this transaction gets rolled back
2996 # (e.g. by a pretxncommit hook). Leave the content alone on
3001 # (e.g. by a pretxncommit hook). Leave the content alone on
2997 # the assumption that the user will use the same editor again.
3002 # the assumption that the user will use the same editor again.
2998 msgfn = self.savecommitmessage(cctx._text)
3003 msgfn = self.savecommitmessage(cctx._text)
2999
3004
3000 # commit subs and write new state
3005 # commit subs and write new state
3001 if subs:
3006 if subs:
3002 uipathfn = scmutil.getuipathfn(self)
3007 uipathfn = scmutil.getuipathfn(self)
3003 for s in sorted(commitsubs):
3008 for s in sorted(commitsubs):
3004 sub = wctx.sub(s)
3009 sub = wctx.sub(s)
3005 self.ui.status(
3010 self.ui.status(
3006 _(b'committing subrepository %s\n')
3011 _(b'committing subrepository %s\n')
3007 % uipathfn(subrepoutil.subrelpath(sub))
3012 % uipathfn(subrepoutil.subrelpath(sub))
3008 )
3013 )
3009 sr = sub.commit(cctx._text, user, date)
3014 sr = sub.commit(cctx._text, user, date)
3010 newstate[s] = (newstate[s][0], sr)
3015 newstate[s] = (newstate[s][0], sr)
3011 subrepoutil.writestate(self, newstate)
3016 subrepoutil.writestate(self, newstate)
3012
3017
3013 p1, p2 = self.dirstate.parents()
3018 p1, p2 = self.dirstate.parents()
3014 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3019 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3015 try:
3020 try:
3016 self.hook(
3021 self.hook(
3017 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3022 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3018 )
3023 )
3019 with self.transaction(b'commit'):
3024 with self.transaction(b'commit'):
3020 ret = self.commitctx(cctx, True)
3025 ret = self.commitctx(cctx, True)
3021 # update bookmarks, dirstate and mergestate
3026 # update bookmarks, dirstate and mergestate
3022 bookmarks.update(self, [p1, p2], ret)
3027 bookmarks.update(self, [p1, p2], ret)
3023 cctx.markcommitted(ret)
3028 cctx.markcommitted(ret)
3024 ms.reset()
3029 ms.reset()
3025 except: # re-raises
3030 except: # re-raises
3026 if edited:
3031 if edited:
3027 self.ui.write(
3032 self.ui.write(
3028 _(b'note: commit message saved in %s\n') % msgfn
3033 _(b'note: commit message saved in %s\n') % msgfn
3029 )
3034 )
3030 self.ui.write(
3035 self.ui.write(
3031 _(
3036 _(
3032 b"note: use 'hg commit --logfile "
3037 b"note: use 'hg commit --logfile "
3033 b".hg/last-message.txt --edit' to reuse it\n"
3038 b".hg/last-message.txt --edit' to reuse it\n"
3034 )
3039 )
3035 )
3040 )
3036 raise
3041 raise
3037
3042
3038 def commithook(unused_success):
3043 def commithook(unused_success):
3039 # hack for command that use a temporary commit (eg: histedit)
3044 # hack for command that use a temporary commit (eg: histedit)
3040 # temporary commit got stripped before hook release
3045 # temporary commit got stripped before hook release
3041 if self.changelog.hasnode(ret):
3046 if self.changelog.hasnode(ret):
3042 self.hook(
3047 self.hook(
3043 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3048 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3044 )
3049 )
3045
3050
3046 self._afterlock(commithook)
3051 self._afterlock(commithook)
3047 return ret
3052 return ret
3048
3053
3049 @unfilteredmethod
3054 @unfilteredmethod
3050 def commitctx(self, ctx, error=False, origctx=None):
3055 def commitctx(self, ctx, error=False, origctx=None):
3051 """Add a new revision to current repository.
3056 """Add a new revision to current repository.
3052 Revision information is passed via the context argument.
3057 Revision information is passed via the context argument.
3053
3058
3054 ctx.files() should list all files involved in this commit, i.e.
3059 ctx.files() should list all files involved in this commit, i.e.
3055 modified/added/removed files. On merge, it may be wider than the
3060 modified/added/removed files. On merge, it may be wider than the
3056 ctx.files() to be committed, since any file nodes derived directly
3061 ctx.files() to be committed, since any file nodes derived directly
3057 from p1 or p2 are excluded from the committed ctx.files().
3062 from p1 or p2 are excluded from the committed ctx.files().
3058
3063
3059 origctx is for convert to work around the problem that bug
3064 origctx is for convert to work around the problem that bug
3060 fixes to the files list in changesets change hashes. For
3065 fixes to the files list in changesets change hashes. For
3061 convert to be the identity, it can pass an origctx and this
3066 convert to be the identity, it can pass an origctx and this
3062 function will use the same files list when it makes sense to
3067 function will use the same files list when it makes sense to
3063 do so.
3068 do so.
3064 """
3069 """
3065
3070
3066 p1, p2 = ctx.p1(), ctx.p2()
3071 p1, p2 = ctx.p1(), ctx.p2()
3067 user = ctx.user()
3072 user = ctx.user()
3068
3073
3069 if self.filecopiesmode == b'changeset-sidedata':
3074 if self.filecopiesmode == b'changeset-sidedata':
3070 writechangesetcopy = True
3075 writechangesetcopy = True
3071 writefilecopymeta = True
3076 writefilecopymeta = True
3072 writecopiesto = None
3077 writecopiesto = None
3073 else:
3078 else:
3074 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3079 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3075 writefilecopymeta = writecopiesto != b'changeset-only'
3080 writefilecopymeta = writecopiesto != b'changeset-only'
3076 writechangesetcopy = writecopiesto in (
3081 writechangesetcopy = writecopiesto in (
3077 b'changeset-only',
3082 b'changeset-only',
3078 b'compatibility',
3083 b'compatibility',
3079 )
3084 )
3080 p1copies, p2copies = None, None
3085 p1copies, p2copies = None, None
3081 if writechangesetcopy:
3086 if writechangesetcopy:
3082 p1copies = ctx.p1copies()
3087 p1copies = ctx.p1copies()
3083 p2copies = ctx.p2copies()
3088 p2copies = ctx.p2copies()
3084 filesadded, filesremoved = None, None
3089 filesadded, filesremoved = None, None
3085 with self.lock(), self.transaction(b"commit") as tr:
3090 with self.lock(), self.transaction(b"commit") as tr:
3086 trp = weakref.proxy(tr)
3091 trp = weakref.proxy(tr)
3087
3092
3088 if ctx.manifestnode():
3093 if ctx.manifestnode():
3089 # reuse an existing manifest revision
3094 # reuse an existing manifest revision
3090 self.ui.debug(b'reusing known manifest\n')
3095 self.ui.debug(b'reusing known manifest\n')
3091 mn = ctx.manifestnode()
3096 mn = ctx.manifestnode()
3092 files = ctx.files()
3097 files = ctx.files()
3093 if writechangesetcopy:
3098 if writechangesetcopy:
3094 filesadded = ctx.filesadded()
3099 filesadded = ctx.filesadded()
3095 filesremoved = ctx.filesremoved()
3100 filesremoved = ctx.filesremoved()
3096 elif ctx.files():
3101 elif ctx.files():
3097 m1ctx = p1.manifestctx()
3102 m1ctx = p1.manifestctx()
3098 m2ctx = p2.manifestctx()
3103 m2ctx = p2.manifestctx()
3099 mctx = m1ctx.copy()
3104 mctx = m1ctx.copy()
3100
3105
3101 m = mctx.read()
3106 m = mctx.read()
3102 m1 = m1ctx.read()
3107 m1 = m1ctx.read()
3103 m2 = m2ctx.read()
3108 m2 = m2ctx.read()
3104
3109
3105 # check in files
3110 # check in files
3106 added = []
3111 added = []
3107 changed = []
3112 changed = []
3108 removed = list(ctx.removed())
3113 removed = list(ctx.removed())
3109 linkrev = len(self)
3114 linkrev = len(self)
3110 self.ui.note(_(b"committing files:\n"))
3115 self.ui.note(_(b"committing files:\n"))
3111 uipathfn = scmutil.getuipathfn(self)
3116 uipathfn = scmutil.getuipathfn(self)
3112 for f in sorted(ctx.modified() + ctx.added()):
3117 for f in sorted(ctx.modified() + ctx.added()):
3113 self.ui.note(uipathfn(f) + b"\n")
3118 self.ui.note(uipathfn(f) + b"\n")
3114 try:
3119 try:
3115 fctx = ctx[f]
3120 fctx = ctx[f]
3116 if fctx is None:
3121 if fctx is None:
3117 removed.append(f)
3122 removed.append(f)
3118 else:
3123 else:
3119 added.append(f)
3124 added.append(f)
3120 m[f] = self._filecommit(
3125 m[f] = self._filecommit(
3121 fctx,
3126 fctx,
3122 m1,
3127 m1,
3123 m2,
3128 m2,
3124 linkrev,
3129 linkrev,
3125 trp,
3130 trp,
3126 changed,
3131 changed,
3127 writefilecopymeta,
3132 writefilecopymeta,
3128 )
3133 )
3129 m.setflag(f, fctx.flags())
3134 m.setflag(f, fctx.flags())
3130 except OSError:
3135 except OSError:
3131 self.ui.warn(
3136 self.ui.warn(
3132 _(b"trouble committing %s!\n") % uipathfn(f)
3137 _(b"trouble committing %s!\n") % uipathfn(f)
3133 )
3138 )
3134 raise
3139 raise
3135 except IOError as inst:
3140 except IOError as inst:
3136 errcode = getattr(inst, 'errno', errno.ENOENT)
3141 errcode = getattr(inst, 'errno', errno.ENOENT)
3137 if error or errcode and errcode != errno.ENOENT:
3142 if error or errcode and errcode != errno.ENOENT:
3138 self.ui.warn(
3143 self.ui.warn(
3139 _(b"trouble committing %s!\n") % uipathfn(f)
3144 _(b"trouble committing %s!\n") % uipathfn(f)
3140 )
3145 )
3141 raise
3146 raise
3142
3147
3143 # update manifest
3148 # update manifest
3144 removed = [f for f in removed if f in m1 or f in m2]
3149 removed = [f for f in removed if f in m1 or f in m2]
3145 drop = sorted([f for f in removed if f in m])
3150 drop = sorted([f for f in removed if f in m])
3146 for f in drop:
3151 for f in drop:
3147 del m[f]
3152 del m[f]
3148 if p2.rev() != nullrev:
3153 if p2.rev() != nullrev:
3149 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
3154 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
3150 removed = [f for f in removed if not rf(f)]
3155 removed = [f for f in removed if not rf(f)]
3151
3156
3152 files = changed + removed
3157 files = changed + removed
3153 md = None
3158 md = None
3154 if not files:
3159 if not files:
3155 # if no "files" actually changed in terms of the changelog,
3160 # if no "files" actually changed in terms of the changelog,
3156 # try hard to detect unmodified manifest entry so that the
3161 # try hard to detect unmodified manifest entry so that the
3157 # exact same commit can be reproduced later on convert.
3162 # exact same commit can be reproduced later on convert.
3158 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3163 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3159 if not files and md:
3164 if not files and md:
3160 self.ui.debug(
3165 self.ui.debug(
3161 b'not reusing manifest (no file change in '
3166 b'not reusing manifest (no file change in '
3162 b'changelog, but manifest differs)\n'
3167 b'changelog, but manifest differs)\n'
3163 )
3168 )
3164 if files or md:
3169 if files or md:
3165 self.ui.note(_(b"committing manifest\n"))
3170 self.ui.note(_(b"committing manifest\n"))
3166 # we're using narrowmatch here since it's already applied at
3171 # we're using narrowmatch here since it's already applied at
3167 # other stages (such as dirstate.walk), so we're already
3172 # other stages (such as dirstate.walk), so we're already
3168 # ignoring things outside of narrowspec in most cases. The
3173 # ignoring things outside of narrowspec in most cases. The
3169 # one case where we might have files outside the narrowspec
3174 # one case where we might have files outside the narrowspec
3170 # at this point is merges, and we already error out in the
3175 # at this point is merges, and we already error out in the
3171 # case where the merge has files outside of the narrowspec,
3176 # case where the merge has files outside of the narrowspec,
3172 # so this is safe.
3177 # so this is safe.
3173 mn = mctx.write(
3178 mn = mctx.write(
3174 trp,
3179 trp,
3175 linkrev,
3180 linkrev,
3176 p1.manifestnode(),
3181 p1.manifestnode(),
3177 p2.manifestnode(),
3182 p2.manifestnode(),
3178 added,
3183 added,
3179 drop,
3184 drop,
3180 match=self.narrowmatch(),
3185 match=self.narrowmatch(),
3181 )
3186 )
3182
3187
3183 if writechangesetcopy:
3188 if writechangesetcopy:
3184 filesadded = [
3189 filesadded = [
3185 f for f in changed if not (f in m1 or f in m2)
3190 f for f in changed if not (f in m1 or f in m2)
3186 ]
3191 ]
3187 filesremoved = removed
3192 filesremoved = removed
3188 else:
3193 else:
3189 self.ui.debug(
3194 self.ui.debug(
3190 b'reusing manifest from p1 (listed files '
3195 b'reusing manifest from p1 (listed files '
3191 b'actually unchanged)\n'
3196 b'actually unchanged)\n'
3192 )
3197 )
3193 mn = p1.manifestnode()
3198 mn = p1.manifestnode()
3194 else:
3199 else:
3195 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3200 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3196 mn = p1.manifestnode()
3201 mn = p1.manifestnode()
3197 files = []
3202 files = []
3198
3203
3199 if writecopiesto == b'changeset-only':
3204 if writecopiesto == b'changeset-only':
3200 # If writing only to changeset extras, use None to indicate that
3205 # If writing only to changeset extras, use None to indicate that
3201 # no entry should be written. If writing to both, write an empty
3206 # no entry should be written. If writing to both, write an empty
3202 # entry to prevent the reader from falling back to reading
3207 # entry to prevent the reader from falling back to reading
3203 # filelogs.
3208 # filelogs.
3204 p1copies = p1copies or None
3209 p1copies = p1copies or None
3205 p2copies = p2copies or None
3210 p2copies = p2copies or None
3206 filesadded = filesadded or None
3211 filesadded = filesadded or None
3207 filesremoved = filesremoved or None
3212 filesremoved = filesremoved or None
3208
3213
3209 if origctx and origctx.manifestnode() == mn:
3214 if origctx and origctx.manifestnode() == mn:
3210 files = origctx.files()
3215 files = origctx.files()
3211
3216
3212 # update changelog
3217 # update changelog
3213 self.ui.note(_(b"committing changelog\n"))
3218 self.ui.note(_(b"committing changelog\n"))
3214 self.changelog.delayupdate(tr)
3219 self.changelog.delayupdate(tr)
3215 n = self.changelog.add(
3220 n = self.changelog.add(
3216 mn,
3221 mn,
3217 files,
3222 files,
3218 ctx.description(),
3223 ctx.description(),
3219 trp,
3224 trp,
3220 p1.node(),
3225 p1.node(),
3221 p2.node(),
3226 p2.node(),
3222 user,
3227 user,
3223 ctx.date(),
3228 ctx.date(),
3224 ctx.extra().copy(),
3229 ctx.extra().copy(),
3225 p1copies,
3230 p1copies,
3226 p2copies,
3231 p2copies,
3227 filesadded,
3232 filesadded,
3228 filesremoved,
3233 filesremoved,
3229 )
3234 )
3230 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3235 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3231 self.hook(
3236 self.hook(
3232 b'pretxncommit',
3237 b'pretxncommit',
3233 throw=True,
3238 throw=True,
3234 node=hex(n),
3239 node=hex(n),
3235 parent1=xp1,
3240 parent1=xp1,
3236 parent2=xp2,
3241 parent2=xp2,
3237 )
3242 )
3238 # set the new commit is proper phase
3243 # set the new commit is proper phase
3239 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3244 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3240 if targetphase:
3245 if targetphase:
3241 # retract boundary do not alter parent changeset.
3246 # retract boundary do not alter parent changeset.
3242 # if a parent have higher the resulting phase will
3247 # if a parent have higher the resulting phase will
3243 # be compliant anyway
3248 # be compliant anyway
3244 #
3249 #
3245 # if minimal phase was 0 we don't need to retract anything
3250 # if minimal phase was 0 we don't need to retract anything
3246 phases.registernew(self, tr, targetphase, [n])
3251 phases.registernew(self, tr, targetphase, [n])
3247 return n
3252 return n
3248
3253
3249 @unfilteredmethod
3254 @unfilteredmethod
3250 def destroying(self):
3255 def destroying(self):
3251 '''Inform the repository that nodes are about to be destroyed.
3256 '''Inform the repository that nodes are about to be destroyed.
3252 Intended for use by strip and rollback, so there's a common
3257 Intended for use by strip and rollback, so there's a common
3253 place for anything that has to be done before destroying history.
3258 place for anything that has to be done before destroying history.
3254
3259
3255 This is mostly useful for saving state that is in memory and waiting
3260 This is mostly useful for saving state that is in memory and waiting
3256 to be flushed when the current lock is released. Because a call to
3261 to be flushed when the current lock is released. Because a call to
3257 destroyed is imminent, the repo will be invalidated causing those
3262 destroyed is imminent, the repo will be invalidated causing those
3258 changes to stay in memory (waiting for the next unlock), or vanish
3263 changes to stay in memory (waiting for the next unlock), or vanish
3259 completely.
3264 completely.
3260 '''
3265 '''
3261 # When using the same lock to commit and strip, the phasecache is left
3266 # When using the same lock to commit and strip, the phasecache is left
3262 # dirty after committing. Then when we strip, the repo is invalidated,
3267 # dirty after committing. Then when we strip, the repo is invalidated,
3263 # causing those changes to disappear.
3268 # causing those changes to disappear.
3264 if '_phasecache' in vars(self):
3269 if '_phasecache' in vars(self):
3265 self._phasecache.write()
3270 self._phasecache.write()
3266
3271
3267 @unfilteredmethod
3272 @unfilteredmethod
3268 def destroyed(self):
3273 def destroyed(self):
3269 '''Inform the repository that nodes have been destroyed.
3274 '''Inform the repository that nodes have been destroyed.
3270 Intended for use by strip and rollback, so there's a common
3275 Intended for use by strip and rollback, so there's a common
3271 place for anything that has to be done after destroying history.
3276 place for anything that has to be done after destroying history.
3272 '''
3277 '''
3273 # When one tries to:
3278 # When one tries to:
3274 # 1) destroy nodes thus calling this method (e.g. strip)
3279 # 1) destroy nodes thus calling this method (e.g. strip)
3275 # 2) use phasecache somewhere (e.g. commit)
3280 # 2) use phasecache somewhere (e.g. commit)
3276 #
3281 #
3277 # then 2) will fail because the phasecache contains nodes that were
3282 # then 2) will fail because the phasecache contains nodes that were
3278 # removed. We can either remove phasecache from the filecache,
3283 # removed. We can either remove phasecache from the filecache,
3279 # causing it to reload next time it is accessed, or simply filter
3284 # causing it to reload next time it is accessed, or simply filter
3280 # the removed nodes now and write the updated cache.
3285 # the removed nodes now and write the updated cache.
3281 self._phasecache.filterunknown(self)
3286 self._phasecache.filterunknown(self)
3282 self._phasecache.write()
3287 self._phasecache.write()
3283
3288
3284 # refresh all repository caches
3289 # refresh all repository caches
3285 self.updatecaches()
3290 self.updatecaches()
3286
3291
3287 # Ensure the persistent tag cache is updated. Doing it now
3292 # Ensure the persistent tag cache is updated. Doing it now
3288 # means that the tag cache only has to worry about destroyed
3293 # means that the tag cache only has to worry about destroyed
3289 # heads immediately after a strip/rollback. That in turn
3294 # heads immediately after a strip/rollback. That in turn
3290 # guarantees that "cachetip == currenttip" (comparing both rev
3295 # guarantees that "cachetip == currenttip" (comparing both rev
3291 # and node) always means no nodes have been added or destroyed.
3296 # and node) always means no nodes have been added or destroyed.
3292
3297
3293 # XXX this is suboptimal when qrefresh'ing: we strip the current
3298 # XXX this is suboptimal when qrefresh'ing: we strip the current
3294 # head, refresh the tag cache, then immediately add a new head.
3299 # head, refresh the tag cache, then immediately add a new head.
3295 # But I think doing it this way is necessary for the "instant
3300 # But I think doing it this way is necessary for the "instant
3296 # tag cache retrieval" case to work.
3301 # tag cache retrieval" case to work.
3297 self.invalidate()
3302 self.invalidate()
3298
3303
3299 def status(
3304 def status(
3300 self,
3305 self,
3301 node1=b'.',
3306 node1=b'.',
3302 node2=None,
3307 node2=None,
3303 match=None,
3308 match=None,
3304 ignored=False,
3309 ignored=False,
3305 clean=False,
3310 clean=False,
3306 unknown=False,
3311 unknown=False,
3307 listsubrepos=False,
3312 listsubrepos=False,
3308 ):
3313 ):
3309 '''a convenience method that calls node1.status(node2)'''
3314 '''a convenience method that calls node1.status(node2)'''
3310 return self[node1].status(
3315 return self[node1].status(
3311 node2, match, ignored, clean, unknown, listsubrepos
3316 node2, match, ignored, clean, unknown, listsubrepos
3312 )
3317 )
3313
3318
3314 def addpostdsstatus(self, ps):
3319 def addpostdsstatus(self, ps):
3315 """Add a callback to run within the wlock, at the point at which status
3320 """Add a callback to run within the wlock, at the point at which status
3316 fixups happen.
3321 fixups happen.
3317
3322
3318 On status completion, callback(wctx, status) will be called with the
3323 On status completion, callback(wctx, status) will be called with the
3319 wlock held, unless the dirstate has changed from underneath or the wlock
3324 wlock held, unless the dirstate has changed from underneath or the wlock
3320 couldn't be grabbed.
3325 couldn't be grabbed.
3321
3326
3322 Callbacks should not capture and use a cached copy of the dirstate --
3327 Callbacks should not capture and use a cached copy of the dirstate --
3323 it might change in the meanwhile. Instead, they should access the
3328 it might change in the meanwhile. Instead, they should access the
3324 dirstate via wctx.repo().dirstate.
3329 dirstate via wctx.repo().dirstate.
3325
3330
3326 This list is emptied out after each status run -- extensions should
3331 This list is emptied out after each status run -- extensions should
3327 make sure it adds to this list each time dirstate.status is called.
3332 make sure it adds to this list each time dirstate.status is called.
3328 Extensions should also make sure they don't call this for statuses
3333 Extensions should also make sure they don't call this for statuses
3329 that don't involve the dirstate.
3334 that don't involve the dirstate.
3330 """
3335 """
3331
3336
3332 # The list is located here for uniqueness reasons -- it is actually
3337 # The list is located here for uniqueness reasons -- it is actually
3333 # managed by the workingctx, but that isn't unique per-repo.
3338 # managed by the workingctx, but that isn't unique per-repo.
3334 self._postdsstatus.append(ps)
3339 self._postdsstatus.append(ps)
3335
3340
3336 def postdsstatus(self):
3341 def postdsstatus(self):
3337 """Used by workingctx to get the list of post-dirstate-status hooks."""
3342 """Used by workingctx to get the list of post-dirstate-status hooks."""
3338 return self._postdsstatus
3343 return self._postdsstatus
3339
3344
3340 def clearpostdsstatus(self):
3345 def clearpostdsstatus(self):
3341 """Used by workingctx to clear post-dirstate-status hooks."""
3346 """Used by workingctx to clear post-dirstate-status hooks."""
3342 del self._postdsstatus[:]
3347 del self._postdsstatus[:]
3343
3348
3344 def heads(self, start=None):
3349 def heads(self, start=None):
3345 if start is None:
3350 if start is None:
3346 cl = self.changelog
3351 cl = self.changelog
3347 headrevs = reversed(cl.headrevs())
3352 headrevs = reversed(cl.headrevs())
3348 return [cl.node(rev) for rev in headrevs]
3353 return [cl.node(rev) for rev in headrevs]
3349
3354
3350 heads = self.changelog.heads(start)
3355 heads = self.changelog.heads(start)
3351 # sort the output in rev descending order
3356 # sort the output in rev descending order
3352 return sorted(heads, key=self.changelog.rev, reverse=True)
3357 return sorted(heads, key=self.changelog.rev, reverse=True)
3353
3358
3354 def branchheads(self, branch=None, start=None, closed=False):
3359 def branchheads(self, branch=None, start=None, closed=False):
3355 '''return a (possibly filtered) list of heads for the given branch
3360 '''return a (possibly filtered) list of heads for the given branch
3356
3361
3357 Heads are returned in topological order, from newest to oldest.
3362 Heads are returned in topological order, from newest to oldest.
3358 If branch is None, use the dirstate branch.
3363 If branch is None, use the dirstate branch.
3359 If start is not None, return only heads reachable from start.
3364 If start is not None, return only heads reachable from start.
3360 If closed is True, return heads that are marked as closed as well.
3365 If closed is True, return heads that are marked as closed as well.
3361 '''
3366 '''
3362 if branch is None:
3367 if branch is None:
3363 branch = self[None].branch()
3368 branch = self[None].branch()
3364 branches = self.branchmap()
3369 branches = self.branchmap()
3365 if not branches.hasbranch(branch):
3370 if not branches.hasbranch(branch):
3366 return []
3371 return []
3367 # the cache returns heads ordered lowest to highest
3372 # the cache returns heads ordered lowest to highest
3368 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3373 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3369 if start is not None:
3374 if start is not None:
3370 # filter out the heads that cannot be reached from startrev
3375 # filter out the heads that cannot be reached from startrev
3371 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3376 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3372 bheads = [h for h in bheads if h in fbheads]
3377 bheads = [h for h in bheads if h in fbheads]
3373 return bheads
3378 return bheads
3374
3379
3375 def branches(self, nodes):
3380 def branches(self, nodes):
3376 if not nodes:
3381 if not nodes:
3377 nodes = [self.changelog.tip()]
3382 nodes = [self.changelog.tip()]
3378 b = []
3383 b = []
3379 for n in nodes:
3384 for n in nodes:
3380 t = n
3385 t = n
3381 while True:
3386 while True:
3382 p = self.changelog.parents(n)
3387 p = self.changelog.parents(n)
3383 if p[1] != nullid or p[0] == nullid:
3388 if p[1] != nullid or p[0] == nullid:
3384 b.append((t, n, p[0], p[1]))
3389 b.append((t, n, p[0], p[1]))
3385 break
3390 break
3386 n = p[0]
3391 n = p[0]
3387 return b
3392 return b
3388
3393
3389 def between(self, pairs):
3394 def between(self, pairs):
3390 r = []
3395 r = []
3391
3396
3392 for top, bottom in pairs:
3397 for top, bottom in pairs:
3393 n, l, i = top, [], 0
3398 n, l, i = top, [], 0
3394 f = 1
3399 f = 1
3395
3400
3396 while n != bottom and n != nullid:
3401 while n != bottom and n != nullid:
3397 p = self.changelog.parents(n)[0]
3402 p = self.changelog.parents(n)[0]
3398 if i == f:
3403 if i == f:
3399 l.append(n)
3404 l.append(n)
3400 f = f * 2
3405 f = f * 2
3401 n = p
3406 n = p
3402 i += 1
3407 i += 1
3403
3408
3404 r.append(l)
3409 r.append(l)
3405
3410
3406 return r
3411 return r
3407
3412
3408 def checkpush(self, pushop):
3413 def checkpush(self, pushop):
3409 """Extensions can override this function if additional checks have
3414 """Extensions can override this function if additional checks have
3410 to be performed before pushing, or call it if they override push
3415 to be performed before pushing, or call it if they override push
3411 command.
3416 command.
3412 """
3417 """
3413
3418
3414 @unfilteredpropertycache
3419 @unfilteredpropertycache
3415 def prepushoutgoinghooks(self):
3420 def prepushoutgoinghooks(self):
3416 """Return util.hooks consists of a pushop with repo, remote, outgoing
3421 """Return util.hooks consists of a pushop with repo, remote, outgoing
3417 methods, which are called before pushing changesets.
3422 methods, which are called before pushing changesets.
3418 """
3423 """
3419 return util.hooks()
3424 return util.hooks()
3420
3425
3421 def pushkey(self, namespace, key, old, new):
3426 def pushkey(self, namespace, key, old, new):
3422 try:
3427 try:
3423 tr = self.currenttransaction()
3428 tr = self.currenttransaction()
3424 hookargs = {}
3429 hookargs = {}
3425 if tr is not None:
3430 if tr is not None:
3426 hookargs.update(tr.hookargs)
3431 hookargs.update(tr.hookargs)
3427 hookargs = pycompat.strkwargs(hookargs)
3432 hookargs = pycompat.strkwargs(hookargs)
3428 hookargs['namespace'] = namespace
3433 hookargs['namespace'] = namespace
3429 hookargs['key'] = key
3434 hookargs['key'] = key
3430 hookargs['old'] = old
3435 hookargs['old'] = old
3431 hookargs['new'] = new
3436 hookargs['new'] = new
3432 self.hook(b'prepushkey', throw=True, **hookargs)
3437 self.hook(b'prepushkey', throw=True, **hookargs)
3433 except error.HookAbort as exc:
3438 except error.HookAbort as exc:
3434 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3439 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3435 if exc.hint:
3440 if exc.hint:
3436 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3441 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3437 return False
3442 return False
3438 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3443 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3439 ret = pushkey.push(self, namespace, key, old, new)
3444 ret = pushkey.push(self, namespace, key, old, new)
3440
3445
3441 def runhook(unused_success):
3446 def runhook(unused_success):
3442 self.hook(
3447 self.hook(
3443 b'pushkey',
3448 b'pushkey',
3444 namespace=namespace,
3449 namespace=namespace,
3445 key=key,
3450 key=key,
3446 old=old,
3451 old=old,
3447 new=new,
3452 new=new,
3448 ret=ret,
3453 ret=ret,
3449 )
3454 )
3450
3455
3451 self._afterlock(runhook)
3456 self._afterlock(runhook)
3452 return ret
3457 return ret
3453
3458
3454 def listkeys(self, namespace):
3459 def listkeys(self, namespace):
3455 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3460 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3456 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3461 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3457 values = pushkey.list(self, namespace)
3462 values = pushkey.list(self, namespace)
3458 self.hook(b'listkeys', namespace=namespace, values=values)
3463 self.hook(b'listkeys', namespace=namespace, values=values)
3459 return values
3464 return values
3460
3465
3461 def debugwireargs(self, one, two, three=None, four=None, five=None):
3466 def debugwireargs(self, one, two, three=None, four=None, five=None):
3462 '''used to test argument passing over the wire'''
3467 '''used to test argument passing over the wire'''
3463 return b"%s %s %s %s %s" % (
3468 return b"%s %s %s %s %s" % (
3464 one,
3469 one,
3465 two,
3470 two,
3466 pycompat.bytestr(three),
3471 pycompat.bytestr(three),
3467 pycompat.bytestr(four),
3472 pycompat.bytestr(four),
3468 pycompat.bytestr(five),
3473 pycompat.bytestr(five),
3469 )
3474 )
3470
3475
3471 def savecommitmessage(self, text):
3476 def savecommitmessage(self, text):
3472 fp = self.vfs(b'last-message.txt', b'wb')
3477 fp = self.vfs(b'last-message.txt', b'wb')
3473 try:
3478 try:
3474 fp.write(text)
3479 fp.write(text)
3475 finally:
3480 finally:
3476 fp.close()
3481 fp.close()
3477 return self.pathto(fp.name[len(self.root) + 1 :])
3482 return self.pathto(fp.name[len(self.root) + 1 :])
3478
3483
3479
3484
3480 # used to avoid circular references so destructors work
3485 # used to avoid circular references so destructors work
3481 def aftertrans(files):
3486 def aftertrans(files):
3482 renamefiles = [tuple(t) for t in files]
3487 renamefiles = [tuple(t) for t in files]
3483
3488
3484 def a():
3489 def a():
3485 for vfs, src, dest in renamefiles:
3490 for vfs, src, dest in renamefiles:
3486 # if src and dest refer to a same file, vfs.rename is a no-op,
3491 # if src and dest refer to a same file, vfs.rename is a no-op,
3487 # leaving both src and dest on disk. delete dest to make sure
3492 # leaving both src and dest on disk. delete dest to make sure
3488 # the rename couldn't be such a no-op.
3493 # the rename couldn't be such a no-op.
3489 vfs.tryunlink(dest)
3494 vfs.tryunlink(dest)
3490 try:
3495 try:
3491 vfs.rename(src, dest)
3496 vfs.rename(src, dest)
3492 except OSError: # journal file does not yet exist
3497 except OSError: # journal file does not yet exist
3493 pass
3498 pass
3494
3499
3495 return a
3500 return a
3496
3501
3497
3502
3498 def undoname(fn):
3503 def undoname(fn):
3499 base, name = os.path.split(fn)
3504 base, name = os.path.split(fn)
3500 assert name.startswith(b'journal')
3505 assert name.startswith(b'journal')
3501 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3506 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3502
3507
3503
3508
3504 def instance(ui, path, create, intents=None, createopts=None):
3509 def instance(ui, path, create, intents=None, createopts=None):
3505 localpath = util.urllocalpath(path)
3510 localpath = util.urllocalpath(path)
3506 if create:
3511 if create:
3507 createrepository(ui, localpath, createopts=createopts)
3512 createrepository(ui, localpath, createopts=createopts)
3508
3513
3509 return makelocalrepository(ui, localpath, intents=intents)
3514 return makelocalrepository(ui, localpath, intents=intents)
3510
3515
3511
3516
3512 def islocal(path):
3517 def islocal(path):
3513 return True
3518 return True
3514
3519
3515
3520
3516 def defaultcreateopts(ui, createopts=None):
3521 def defaultcreateopts(ui, createopts=None):
3517 """Populate the default creation options for a repository.
3522 """Populate the default creation options for a repository.
3518
3523
3519 A dictionary of explicitly requested creation options can be passed
3524 A dictionary of explicitly requested creation options can be passed
3520 in. Missing keys will be populated.
3525 in. Missing keys will be populated.
3521 """
3526 """
3522 createopts = dict(createopts or {})
3527 createopts = dict(createopts or {})
3523
3528
3524 if b'backend' not in createopts:
3529 if b'backend' not in createopts:
3525 # experimental config: storage.new-repo-backend
3530 # experimental config: storage.new-repo-backend
3526 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3531 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3527
3532
3528 return createopts
3533 return createopts
3529
3534
3530
3535
3531 def newreporequirements(ui, createopts):
3536 def newreporequirements(ui, createopts):
3532 """Determine the set of requirements for a new local repository.
3537 """Determine the set of requirements for a new local repository.
3533
3538
3534 Extensions can wrap this function to specify custom requirements for
3539 Extensions can wrap this function to specify custom requirements for
3535 new repositories.
3540 new repositories.
3536 """
3541 """
3537 # If the repo is being created from a shared repository, we copy
3542 # If the repo is being created from a shared repository, we copy
3538 # its requirements.
3543 # its requirements.
3539 if b'sharedrepo' in createopts:
3544 if b'sharedrepo' in createopts:
3540 requirements = set(createopts[b'sharedrepo'].requirements)
3545 requirements = set(createopts[b'sharedrepo'].requirements)
3541 if createopts.get(b'sharedrelative'):
3546 if createopts.get(b'sharedrelative'):
3542 requirements.add(b'relshared')
3547 requirements.add(b'relshared')
3543 else:
3548 else:
3544 requirements.add(b'shared')
3549 requirements.add(b'shared')
3545
3550
3546 return requirements
3551 return requirements
3547
3552
3548 if b'backend' not in createopts:
3553 if b'backend' not in createopts:
3549 raise error.ProgrammingError(
3554 raise error.ProgrammingError(
3550 b'backend key not present in createopts; '
3555 b'backend key not present in createopts; '
3551 b'was defaultcreateopts() called?'
3556 b'was defaultcreateopts() called?'
3552 )
3557 )
3553
3558
3554 if createopts[b'backend'] != b'revlogv1':
3559 if createopts[b'backend'] != b'revlogv1':
3555 raise error.Abort(
3560 raise error.Abort(
3556 _(
3561 _(
3557 b'unable to determine repository requirements for '
3562 b'unable to determine repository requirements for '
3558 b'storage backend: %s'
3563 b'storage backend: %s'
3559 )
3564 )
3560 % createopts[b'backend']
3565 % createopts[b'backend']
3561 )
3566 )
3562
3567
3563 requirements = {b'revlogv1'}
3568 requirements = {b'revlogv1'}
3564 if ui.configbool(b'format', b'usestore'):
3569 if ui.configbool(b'format', b'usestore'):
3565 requirements.add(b'store')
3570 requirements.add(b'store')
3566 if ui.configbool(b'format', b'usefncache'):
3571 if ui.configbool(b'format', b'usefncache'):
3567 requirements.add(b'fncache')
3572 requirements.add(b'fncache')
3568 if ui.configbool(b'format', b'dotencode'):
3573 if ui.configbool(b'format', b'dotencode'):
3569 requirements.add(b'dotencode')
3574 requirements.add(b'dotencode')
3570
3575
3571 compengines = ui.configlist(b'format', b'revlog-compression')
3576 compengines = ui.configlist(b'format', b'revlog-compression')
3572 for compengine in compengines:
3577 for compengine in compengines:
3573 if compengine in util.compengines:
3578 if compengine in util.compengines:
3574 break
3579 break
3575 else:
3580 else:
3576 raise error.Abort(
3581 raise error.Abort(
3577 _(
3582 _(
3578 b'compression engines %s defined by '
3583 b'compression engines %s defined by '
3579 b'format.revlog-compression not available'
3584 b'format.revlog-compression not available'
3580 )
3585 )
3581 % b', '.join(b'"%s"' % e for e in compengines),
3586 % b', '.join(b'"%s"' % e for e in compengines),
3582 hint=_(
3587 hint=_(
3583 b'run "hg debuginstall" to list available '
3588 b'run "hg debuginstall" to list available '
3584 b'compression engines'
3589 b'compression engines'
3585 ),
3590 ),
3586 )
3591 )
3587
3592
3588 # zlib is the historical default and doesn't need an explicit requirement.
3593 # zlib is the historical default and doesn't need an explicit requirement.
3589 if compengine == b'zstd':
3594 if compengine == b'zstd':
3590 requirements.add(b'revlog-compression-zstd')
3595 requirements.add(b'revlog-compression-zstd')
3591 elif compengine != b'zlib':
3596 elif compengine != b'zlib':
3592 requirements.add(b'exp-compression-%s' % compengine)
3597 requirements.add(b'exp-compression-%s' % compengine)
3593
3598
3594 if scmutil.gdinitconfig(ui):
3599 if scmutil.gdinitconfig(ui):
3595 requirements.add(b'generaldelta')
3600 requirements.add(b'generaldelta')
3596 if ui.configbool(b'format', b'sparse-revlog'):
3601 if ui.configbool(b'format', b'sparse-revlog'):
3597 requirements.add(SPARSEREVLOG_REQUIREMENT)
3602 requirements.add(SPARSEREVLOG_REQUIREMENT)
3598
3603
3599 # experimental config: format.exp-use-side-data
3604 # experimental config: format.exp-use-side-data
3600 if ui.configbool(b'format', b'exp-use-side-data'):
3605 if ui.configbool(b'format', b'exp-use-side-data'):
3601 requirements.add(SIDEDATA_REQUIREMENT)
3606 requirements.add(SIDEDATA_REQUIREMENT)
3602 # experimental config: format.exp-use-copies-side-data-changeset
3607 # experimental config: format.exp-use-copies-side-data-changeset
3603 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3608 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3604 requirements.add(SIDEDATA_REQUIREMENT)
3609 requirements.add(SIDEDATA_REQUIREMENT)
3605 requirements.add(COPIESSDC_REQUIREMENT)
3610 requirements.add(COPIESSDC_REQUIREMENT)
3606 if ui.configbool(b'experimental', b'treemanifest'):
3611 if ui.configbool(b'experimental', b'treemanifest'):
3607 requirements.add(b'treemanifest')
3612 requirements.add(b'treemanifest')
3608
3613
3609 revlogv2 = ui.config(b'experimental', b'revlogv2')
3614 revlogv2 = ui.config(b'experimental', b'revlogv2')
3610 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3615 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3611 requirements.remove(b'revlogv1')
3616 requirements.remove(b'revlogv1')
3612 # generaldelta is implied by revlogv2.
3617 # generaldelta is implied by revlogv2.
3613 requirements.discard(b'generaldelta')
3618 requirements.discard(b'generaldelta')
3614 requirements.add(REVLOGV2_REQUIREMENT)
3619 requirements.add(REVLOGV2_REQUIREMENT)
3615 # experimental config: format.internal-phase
3620 # experimental config: format.internal-phase
3616 if ui.configbool(b'format', b'internal-phase'):
3621 if ui.configbool(b'format', b'internal-phase'):
3617 requirements.add(b'internal-phase')
3622 requirements.add(b'internal-phase')
3618
3623
3619 if createopts.get(b'narrowfiles'):
3624 if createopts.get(b'narrowfiles'):
3620 requirements.add(repository.NARROW_REQUIREMENT)
3625 requirements.add(repository.NARROW_REQUIREMENT)
3621
3626
3622 if createopts.get(b'lfs'):
3627 if createopts.get(b'lfs'):
3623 requirements.add(b'lfs')
3628 requirements.add(b'lfs')
3624
3629
3625 if ui.configbool(b'format', b'bookmarks-in-store'):
3630 if ui.configbool(b'format', b'bookmarks-in-store'):
3626 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3631 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3627
3632
3628 if ui.configbool(b'format', b'use-persistent-nodemap'):
3633 if ui.configbool(b'format', b'use-persistent-nodemap'):
3629 requirements.add(NODEMAP_REQUIREMENT)
3634 requirements.add(NODEMAP_REQUIREMENT)
3630
3635
3631 return requirements
3636 return requirements
3632
3637
3633
3638
3634 def filterknowncreateopts(ui, createopts):
3639 def filterknowncreateopts(ui, createopts):
3635 """Filters a dict of repo creation options against options that are known.
3640 """Filters a dict of repo creation options against options that are known.
3636
3641
3637 Receives a dict of repo creation options and returns a dict of those
3642 Receives a dict of repo creation options and returns a dict of those
3638 options that we don't know how to handle.
3643 options that we don't know how to handle.
3639
3644
3640 This function is called as part of repository creation. If the
3645 This function is called as part of repository creation. If the
3641 returned dict contains any items, repository creation will not
3646 returned dict contains any items, repository creation will not
3642 be allowed, as it means there was a request to create a repository
3647 be allowed, as it means there was a request to create a repository
3643 with options not recognized by loaded code.
3648 with options not recognized by loaded code.
3644
3649
3645 Extensions can wrap this function to filter out creation options
3650 Extensions can wrap this function to filter out creation options
3646 they know how to handle.
3651 they know how to handle.
3647 """
3652 """
3648 known = {
3653 known = {
3649 b'backend',
3654 b'backend',
3650 b'lfs',
3655 b'lfs',
3651 b'narrowfiles',
3656 b'narrowfiles',
3652 b'sharedrepo',
3657 b'sharedrepo',
3653 b'sharedrelative',
3658 b'sharedrelative',
3654 b'shareditems',
3659 b'shareditems',
3655 b'shallowfilestore',
3660 b'shallowfilestore',
3656 }
3661 }
3657
3662
3658 return {k: v for k, v in createopts.items() if k not in known}
3663 return {k: v for k, v in createopts.items() if k not in known}
3659
3664
3660
3665
3661 def createrepository(ui, path, createopts=None):
3666 def createrepository(ui, path, createopts=None):
3662 """Create a new repository in a vfs.
3667 """Create a new repository in a vfs.
3663
3668
3664 ``path`` path to the new repo's working directory.
3669 ``path`` path to the new repo's working directory.
3665 ``createopts`` options for the new repository.
3670 ``createopts`` options for the new repository.
3666
3671
3667 The following keys for ``createopts`` are recognized:
3672 The following keys for ``createopts`` are recognized:
3668
3673
3669 backend
3674 backend
3670 The storage backend to use.
3675 The storage backend to use.
3671 lfs
3676 lfs
3672 Repository will be created with ``lfs`` requirement. The lfs extension
3677 Repository will be created with ``lfs`` requirement. The lfs extension
3673 will automatically be loaded when the repository is accessed.
3678 will automatically be loaded when the repository is accessed.
3674 narrowfiles
3679 narrowfiles
3675 Set up repository to support narrow file storage.
3680 Set up repository to support narrow file storage.
3676 sharedrepo
3681 sharedrepo
3677 Repository object from which storage should be shared.
3682 Repository object from which storage should be shared.
3678 sharedrelative
3683 sharedrelative
3679 Boolean indicating if the path to the shared repo should be
3684 Boolean indicating if the path to the shared repo should be
3680 stored as relative. By default, the pointer to the "parent" repo
3685 stored as relative. By default, the pointer to the "parent" repo
3681 is stored as an absolute path.
3686 is stored as an absolute path.
3682 shareditems
3687 shareditems
3683 Set of items to share to the new repository (in addition to storage).
3688 Set of items to share to the new repository (in addition to storage).
3684 shallowfilestore
3689 shallowfilestore
3685 Indicates that storage for files should be shallow (not all ancestor
3690 Indicates that storage for files should be shallow (not all ancestor
3686 revisions are known).
3691 revisions are known).
3687 """
3692 """
3688 createopts = defaultcreateopts(ui, createopts=createopts)
3693 createopts = defaultcreateopts(ui, createopts=createopts)
3689
3694
3690 unknownopts = filterknowncreateopts(ui, createopts)
3695 unknownopts = filterknowncreateopts(ui, createopts)
3691
3696
3692 if not isinstance(unknownopts, dict):
3697 if not isinstance(unknownopts, dict):
3693 raise error.ProgrammingError(
3698 raise error.ProgrammingError(
3694 b'filterknowncreateopts() did not return a dict'
3699 b'filterknowncreateopts() did not return a dict'
3695 )
3700 )
3696
3701
3697 if unknownopts:
3702 if unknownopts:
3698 raise error.Abort(
3703 raise error.Abort(
3699 _(
3704 _(
3700 b'unable to create repository because of unknown '
3705 b'unable to create repository because of unknown '
3701 b'creation option: %s'
3706 b'creation option: %s'
3702 )
3707 )
3703 % b', '.join(sorted(unknownopts)),
3708 % b', '.join(sorted(unknownopts)),
3704 hint=_(b'is a required extension not loaded?'),
3709 hint=_(b'is a required extension not loaded?'),
3705 )
3710 )
3706
3711
3707 requirements = newreporequirements(ui, createopts=createopts)
3712 requirements = newreporequirements(ui, createopts=createopts)
3708
3713
3709 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3714 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3710
3715
3711 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3716 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3712 if hgvfs.exists():
3717 if hgvfs.exists():
3713 raise error.RepoError(_(b'repository %s already exists') % path)
3718 raise error.RepoError(_(b'repository %s already exists') % path)
3714
3719
3715 if b'sharedrepo' in createopts:
3720 if b'sharedrepo' in createopts:
3716 sharedpath = createopts[b'sharedrepo'].sharedpath
3721 sharedpath = createopts[b'sharedrepo'].sharedpath
3717
3722
3718 if createopts.get(b'sharedrelative'):
3723 if createopts.get(b'sharedrelative'):
3719 try:
3724 try:
3720 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3725 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3721 except (IOError, ValueError) as e:
3726 except (IOError, ValueError) as e:
3722 # ValueError is raised on Windows if the drive letters differ
3727 # ValueError is raised on Windows if the drive letters differ
3723 # on each path.
3728 # on each path.
3724 raise error.Abort(
3729 raise error.Abort(
3725 _(b'cannot calculate relative path'),
3730 _(b'cannot calculate relative path'),
3726 hint=stringutil.forcebytestr(e),
3731 hint=stringutil.forcebytestr(e),
3727 )
3732 )
3728
3733
3729 if not wdirvfs.exists():
3734 if not wdirvfs.exists():
3730 wdirvfs.makedirs()
3735 wdirvfs.makedirs()
3731
3736
3732 hgvfs.makedir(notindexed=True)
3737 hgvfs.makedir(notindexed=True)
3733 if b'sharedrepo' not in createopts:
3738 if b'sharedrepo' not in createopts:
3734 hgvfs.mkdir(b'cache')
3739 hgvfs.mkdir(b'cache')
3735 hgvfs.mkdir(b'wcache')
3740 hgvfs.mkdir(b'wcache')
3736
3741
3737 if b'store' in requirements and b'sharedrepo' not in createopts:
3742 if b'store' in requirements and b'sharedrepo' not in createopts:
3738 hgvfs.mkdir(b'store')
3743 hgvfs.mkdir(b'store')
3739
3744
3740 # We create an invalid changelog outside the store so very old
3745 # We create an invalid changelog outside the store so very old
3741 # Mercurial versions (which didn't know about the requirements
3746 # Mercurial versions (which didn't know about the requirements
3742 # file) encounter an error on reading the changelog. This
3747 # file) encounter an error on reading the changelog. This
3743 # effectively locks out old clients and prevents them from
3748 # effectively locks out old clients and prevents them from
3744 # mucking with a repo in an unknown format.
3749 # mucking with a repo in an unknown format.
3745 #
3750 #
3746 # The revlog header has version 2, which won't be recognized by
3751 # The revlog header has version 2, which won't be recognized by
3747 # such old clients.
3752 # such old clients.
3748 hgvfs.append(
3753 hgvfs.append(
3749 b'00changelog.i',
3754 b'00changelog.i',
3750 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3755 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3751 b'layout',
3756 b'layout',
3752 )
3757 )
3753
3758
3754 scmutil.writerequires(hgvfs, requirements)
3759 scmutil.writerequires(hgvfs, requirements)
3755
3760
3756 # Write out file telling readers where to find the shared store.
3761 # Write out file telling readers where to find the shared store.
3757 if b'sharedrepo' in createopts:
3762 if b'sharedrepo' in createopts:
3758 hgvfs.write(b'sharedpath', sharedpath)
3763 hgvfs.write(b'sharedpath', sharedpath)
3759
3764
3760 if createopts.get(b'shareditems'):
3765 if createopts.get(b'shareditems'):
3761 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3766 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3762 hgvfs.write(b'shared', shared)
3767 hgvfs.write(b'shared', shared)
3763
3768
3764
3769
3765 def poisonrepository(repo):
3770 def poisonrepository(repo):
3766 """Poison a repository instance so it can no longer be used."""
3771 """Poison a repository instance so it can no longer be used."""
3767 # Perform any cleanup on the instance.
3772 # Perform any cleanup on the instance.
3768 repo.close()
3773 repo.close()
3769
3774
3770 # Our strategy is to replace the type of the object with one that
3775 # Our strategy is to replace the type of the object with one that
3771 # has all attribute lookups result in error.
3776 # has all attribute lookups result in error.
3772 #
3777 #
3773 # But we have to allow the close() method because some constructors
3778 # But we have to allow the close() method because some constructors
3774 # of repos call close() on repo references.
3779 # of repos call close() on repo references.
3775 class poisonedrepository(object):
3780 class poisonedrepository(object):
3776 def __getattribute__(self, item):
3781 def __getattribute__(self, item):
3777 if item == 'close':
3782 if item == 'close':
3778 return object.__getattribute__(self, item)
3783 return object.__getattribute__(self, item)
3779
3784
3780 raise error.ProgrammingError(
3785 raise error.ProgrammingError(
3781 b'repo instances should not be used after unshare'
3786 b'repo instances should not be used after unshare'
3782 )
3787 )
3783
3788
3784 def close(self):
3789 def close(self):
3785 pass
3790 pass
3786
3791
3787 # We may have a repoview, which intercepts __setattr__. So be sure
3792 # We may have a repoview, which intercepts __setattr__. So be sure
3788 # we operate at the lowest level possible.
3793 # we operate at the lowest level possible.
3789 object.__setattr__(repo, '__class__', poisonedrepository)
3794 object.__setattr__(repo, '__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now