##// END OF EJS Templates
files: extract code for extra filtering of the `removed` entry into copies...
marmoute -
r45467:edd08aa1 default
parent child Browse files
Show More
@@ -1,3831 +1,3789 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import random
12 import random
13 import sys
13 import sys
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 )
24 )
25 from .pycompat import (
25 from .pycompat import (
26 delattr,
26 delattr,
27 getattr,
27 getattr,
28 )
28 )
29 from . import (
29 from . import (
30 bookmarks,
30 bookmarks,
31 branchmap,
31 branchmap,
32 bundle2,
32 bundle2,
33 changegroup,
33 changegroup,
34 color,
34 color,
35 context,
35 context,
36 dirstate,
36 dirstate,
37 dirstateguard,
37 dirstateguard,
38 discovery,
38 discovery,
39 encoding,
39 encoding,
40 error,
40 error,
41 exchange,
41 exchange,
42 extensions,
42 extensions,
43 filelog,
43 filelog,
44 hook,
44 hook,
45 lock as lockmod,
45 lock as lockmod,
46 match as matchmod,
46 match as matchmod,
47 mergestate as mergestatemod,
47 mergestate as mergestatemod,
48 mergeutil,
48 mergeutil,
49 metadata,
49 namespaces,
50 namespaces,
50 narrowspec,
51 narrowspec,
51 obsolete,
52 obsolete,
52 pathutil,
53 pathutil,
53 phases,
54 phases,
54 pushkey,
55 pushkey,
55 pycompat,
56 pycompat,
56 rcutil,
57 rcutil,
57 repoview,
58 repoview,
58 revset,
59 revset,
59 revsetlang,
60 revsetlang,
60 scmutil,
61 scmutil,
61 sparse,
62 sparse,
62 store as storemod,
63 store as storemod,
63 subrepoutil,
64 subrepoutil,
64 tags as tagsmod,
65 tags as tagsmod,
65 transaction,
66 transaction,
66 txnutil,
67 txnutil,
67 util,
68 util,
68 vfs as vfsmod,
69 vfs as vfsmod,
69 )
70 )
70
71
71 from .interfaces import (
72 from .interfaces import (
72 repository,
73 repository,
73 util as interfaceutil,
74 util as interfaceutil,
74 )
75 )
75
76
76 from .utils import (
77 from .utils import (
77 hashutil,
78 hashutil,
78 procutil,
79 procutil,
79 stringutil,
80 stringutil,
80 )
81 )
81
82
82 from .revlogutils import constants as revlogconst
83 from .revlogutils import constants as revlogconst
83
84
84 release = lockmod.release
85 release = lockmod.release
85 urlerr = util.urlerr
86 urlerr = util.urlerr
86 urlreq = util.urlreq
87 urlreq = util.urlreq
87
88
88 # set of (path, vfs-location) tuples. vfs-location is:
89 # set of (path, vfs-location) tuples. vfs-location is:
89 # - 'plain for vfs relative paths
90 # - 'plain for vfs relative paths
90 # - '' for svfs relative paths
91 # - '' for svfs relative paths
91 _cachedfiles = set()
92 _cachedfiles = set()
92
93
93
94
94 class _basefilecache(scmutil.filecache):
95 class _basefilecache(scmutil.filecache):
95 """All filecache usage on repo are done for logic that should be unfiltered
96 """All filecache usage on repo are done for logic that should be unfiltered
96 """
97 """
97
98
98 def __get__(self, repo, type=None):
99 def __get__(self, repo, type=None):
99 if repo is None:
100 if repo is None:
100 return self
101 return self
101 # proxy to unfiltered __dict__ since filtered repo has no entry
102 # proxy to unfiltered __dict__ since filtered repo has no entry
102 unfi = repo.unfiltered()
103 unfi = repo.unfiltered()
103 try:
104 try:
104 return unfi.__dict__[self.sname]
105 return unfi.__dict__[self.sname]
105 except KeyError:
106 except KeyError:
106 pass
107 pass
107 return super(_basefilecache, self).__get__(unfi, type)
108 return super(_basefilecache, self).__get__(unfi, type)
108
109
109 def set(self, repo, value):
110 def set(self, repo, value):
110 return super(_basefilecache, self).set(repo.unfiltered(), value)
111 return super(_basefilecache, self).set(repo.unfiltered(), value)
111
112
112
113
113 class repofilecache(_basefilecache):
114 class repofilecache(_basefilecache):
114 """filecache for files in .hg but outside of .hg/store"""
115 """filecache for files in .hg but outside of .hg/store"""
115
116
116 def __init__(self, *paths):
117 def __init__(self, *paths):
117 super(repofilecache, self).__init__(*paths)
118 super(repofilecache, self).__init__(*paths)
118 for path in paths:
119 for path in paths:
119 _cachedfiles.add((path, b'plain'))
120 _cachedfiles.add((path, b'plain'))
120
121
121 def join(self, obj, fname):
122 def join(self, obj, fname):
122 return obj.vfs.join(fname)
123 return obj.vfs.join(fname)
123
124
124
125
125 class storecache(_basefilecache):
126 class storecache(_basefilecache):
126 """filecache for files in the store"""
127 """filecache for files in the store"""
127
128
128 def __init__(self, *paths):
129 def __init__(self, *paths):
129 super(storecache, self).__init__(*paths)
130 super(storecache, self).__init__(*paths)
130 for path in paths:
131 for path in paths:
131 _cachedfiles.add((path, b''))
132 _cachedfiles.add((path, b''))
132
133
133 def join(self, obj, fname):
134 def join(self, obj, fname):
134 return obj.sjoin(fname)
135 return obj.sjoin(fname)
135
136
136
137
137 class mixedrepostorecache(_basefilecache):
138 class mixedrepostorecache(_basefilecache):
138 """filecache for a mix files in .hg/store and outside"""
139 """filecache for a mix files in .hg/store and outside"""
139
140
140 def __init__(self, *pathsandlocations):
141 def __init__(self, *pathsandlocations):
141 # scmutil.filecache only uses the path for passing back into our
142 # scmutil.filecache only uses the path for passing back into our
142 # join(), so we can safely pass a list of paths and locations
143 # join(), so we can safely pass a list of paths and locations
143 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 super(mixedrepostorecache, self).__init__(*pathsandlocations)
144 _cachedfiles.update(pathsandlocations)
145 _cachedfiles.update(pathsandlocations)
145
146
146 def join(self, obj, fnameandlocation):
147 def join(self, obj, fnameandlocation):
147 fname, location = fnameandlocation
148 fname, location = fnameandlocation
148 if location == b'plain':
149 if location == b'plain':
149 return obj.vfs.join(fname)
150 return obj.vfs.join(fname)
150 else:
151 else:
151 if location != b'':
152 if location != b'':
152 raise error.ProgrammingError(
153 raise error.ProgrammingError(
153 b'unexpected location: %s' % location
154 b'unexpected location: %s' % location
154 )
155 )
155 return obj.sjoin(fname)
156 return obj.sjoin(fname)
156
157
157
158
158 def isfilecached(repo, name):
159 def isfilecached(repo, name):
159 """check if a repo has already cached "name" filecache-ed property
160 """check if a repo has already cached "name" filecache-ed property
160
161
161 This returns (cachedobj-or-None, iscached) tuple.
162 This returns (cachedobj-or-None, iscached) tuple.
162 """
163 """
163 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 cacheentry = repo.unfiltered()._filecache.get(name, None)
164 if not cacheentry:
165 if not cacheentry:
165 return None, False
166 return None, False
166 return cacheentry.obj, True
167 return cacheentry.obj, True
167
168
168
169
169 class unfilteredpropertycache(util.propertycache):
170 class unfilteredpropertycache(util.propertycache):
170 """propertycache that apply to unfiltered repo only"""
171 """propertycache that apply to unfiltered repo only"""
171
172
172 def __get__(self, repo, type=None):
173 def __get__(self, repo, type=None):
173 unfi = repo.unfiltered()
174 unfi = repo.unfiltered()
174 if unfi is repo:
175 if unfi is repo:
175 return super(unfilteredpropertycache, self).__get__(unfi)
176 return super(unfilteredpropertycache, self).__get__(unfi)
176 return getattr(unfi, self.name)
177 return getattr(unfi, self.name)
177
178
178
179
179 class filteredpropertycache(util.propertycache):
180 class filteredpropertycache(util.propertycache):
180 """propertycache that must take filtering in account"""
181 """propertycache that must take filtering in account"""
181
182
182 def cachevalue(self, obj, value):
183 def cachevalue(self, obj, value):
183 object.__setattr__(obj, self.name, value)
184 object.__setattr__(obj, self.name, value)
184
185
185
186
186 def hasunfilteredcache(repo, name):
187 def hasunfilteredcache(repo, name):
187 """check if a repo has an unfilteredpropertycache value for <name>"""
188 """check if a repo has an unfilteredpropertycache value for <name>"""
188 return name in vars(repo.unfiltered())
189 return name in vars(repo.unfiltered())
189
190
190
191
191 def unfilteredmethod(orig):
192 def unfilteredmethod(orig):
192 """decorate method that always need to be run on unfiltered version"""
193 """decorate method that always need to be run on unfiltered version"""
193
194
194 def wrapper(repo, *args, **kwargs):
195 def wrapper(repo, *args, **kwargs):
195 return orig(repo.unfiltered(), *args, **kwargs)
196 return orig(repo.unfiltered(), *args, **kwargs)
196
197
197 return wrapper
198 return wrapper
198
199
199
200
200 moderncaps = {
201 moderncaps = {
201 b'lookup',
202 b'lookup',
202 b'branchmap',
203 b'branchmap',
203 b'pushkey',
204 b'pushkey',
204 b'known',
205 b'known',
205 b'getbundle',
206 b'getbundle',
206 b'unbundle',
207 b'unbundle',
207 }
208 }
208 legacycaps = moderncaps.union({b'changegroupsubset'})
209 legacycaps = moderncaps.union({b'changegroupsubset'})
209
210
210
211
211 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 @interfaceutil.implementer(repository.ipeercommandexecutor)
212 class localcommandexecutor(object):
213 class localcommandexecutor(object):
213 def __init__(self, peer):
214 def __init__(self, peer):
214 self._peer = peer
215 self._peer = peer
215 self._sent = False
216 self._sent = False
216 self._closed = False
217 self._closed = False
217
218
218 def __enter__(self):
219 def __enter__(self):
219 return self
220 return self
220
221
221 def __exit__(self, exctype, excvalue, exctb):
222 def __exit__(self, exctype, excvalue, exctb):
222 self.close()
223 self.close()
223
224
224 def callcommand(self, command, args):
225 def callcommand(self, command, args):
225 if self._sent:
226 if self._sent:
226 raise error.ProgrammingError(
227 raise error.ProgrammingError(
227 b'callcommand() cannot be used after sendcommands()'
228 b'callcommand() cannot be used after sendcommands()'
228 )
229 )
229
230
230 if self._closed:
231 if self._closed:
231 raise error.ProgrammingError(
232 raise error.ProgrammingError(
232 b'callcommand() cannot be used after close()'
233 b'callcommand() cannot be used after close()'
233 )
234 )
234
235
235 # We don't need to support anything fancy. Just call the named
236 # We don't need to support anything fancy. Just call the named
236 # method on the peer and return a resolved future.
237 # method on the peer and return a resolved future.
237 fn = getattr(self._peer, pycompat.sysstr(command))
238 fn = getattr(self._peer, pycompat.sysstr(command))
238
239
239 f = pycompat.futures.Future()
240 f = pycompat.futures.Future()
240
241
241 try:
242 try:
242 result = fn(**pycompat.strkwargs(args))
243 result = fn(**pycompat.strkwargs(args))
243 except Exception:
244 except Exception:
244 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
245 else:
246 else:
246 f.set_result(result)
247 f.set_result(result)
247
248
248 return f
249 return f
249
250
250 def sendcommands(self):
251 def sendcommands(self):
251 self._sent = True
252 self._sent = True
252
253
253 def close(self):
254 def close(self):
254 self._closed = True
255 self._closed = True
255
256
256
257
257 @interfaceutil.implementer(repository.ipeercommands)
258 @interfaceutil.implementer(repository.ipeercommands)
258 class localpeer(repository.peer):
259 class localpeer(repository.peer):
259 '''peer for a local repo; reflects only the most recent API'''
260 '''peer for a local repo; reflects only the most recent API'''
260
261
261 def __init__(self, repo, caps=None):
262 def __init__(self, repo, caps=None):
262 super(localpeer, self).__init__()
263 super(localpeer, self).__init__()
263
264
264 if caps is None:
265 if caps is None:
265 caps = moderncaps.copy()
266 caps = moderncaps.copy()
266 self._repo = repo.filtered(b'served')
267 self._repo = repo.filtered(b'served')
267 self.ui = repo.ui
268 self.ui = repo.ui
268 self._caps = repo._restrictcapabilities(caps)
269 self._caps = repo._restrictcapabilities(caps)
269
270
270 # Begin of _basepeer interface.
271 # Begin of _basepeer interface.
271
272
272 def url(self):
273 def url(self):
273 return self._repo.url()
274 return self._repo.url()
274
275
275 def local(self):
276 def local(self):
276 return self._repo
277 return self._repo
277
278
278 def peer(self):
279 def peer(self):
279 return self
280 return self
280
281
281 def canpush(self):
282 def canpush(self):
282 return True
283 return True
283
284
284 def close(self):
285 def close(self):
285 self._repo.close()
286 self._repo.close()
286
287
287 # End of _basepeer interface.
288 # End of _basepeer interface.
288
289
289 # Begin of _basewirecommands interface.
290 # Begin of _basewirecommands interface.
290
291
291 def branchmap(self):
292 def branchmap(self):
292 return self._repo.branchmap()
293 return self._repo.branchmap()
293
294
294 def capabilities(self):
295 def capabilities(self):
295 return self._caps
296 return self._caps
296
297
297 def clonebundles(self):
298 def clonebundles(self):
298 return self._repo.tryread(b'clonebundles.manifest')
299 return self._repo.tryread(b'clonebundles.manifest')
299
300
300 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 def debugwireargs(self, one, two, three=None, four=None, five=None):
301 """Used to test argument passing over the wire"""
302 """Used to test argument passing over the wire"""
302 return b"%s %s %s %s %s" % (
303 return b"%s %s %s %s %s" % (
303 one,
304 one,
304 two,
305 two,
305 pycompat.bytestr(three),
306 pycompat.bytestr(three),
306 pycompat.bytestr(four),
307 pycompat.bytestr(four),
307 pycompat.bytestr(five),
308 pycompat.bytestr(five),
308 )
309 )
309
310
310 def getbundle(
311 def getbundle(
311 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 self, source, heads=None, common=None, bundlecaps=None, **kwargs
312 ):
313 ):
313 chunks = exchange.getbundlechunks(
314 chunks = exchange.getbundlechunks(
314 self._repo,
315 self._repo,
315 source,
316 source,
316 heads=heads,
317 heads=heads,
317 common=common,
318 common=common,
318 bundlecaps=bundlecaps,
319 bundlecaps=bundlecaps,
319 **kwargs
320 **kwargs
320 )[1]
321 )[1]
321 cb = util.chunkbuffer(chunks)
322 cb = util.chunkbuffer(chunks)
322
323
323 if exchange.bundle2requested(bundlecaps):
324 if exchange.bundle2requested(bundlecaps):
324 # When requesting a bundle2, getbundle returns a stream to make the
325 # When requesting a bundle2, getbundle returns a stream to make the
325 # wire level function happier. We need to build a proper object
326 # wire level function happier. We need to build a proper object
326 # from it in local peer.
327 # from it in local peer.
327 return bundle2.getunbundler(self.ui, cb)
328 return bundle2.getunbundler(self.ui, cb)
328 else:
329 else:
329 return changegroup.getunbundler(b'01', cb, None)
330 return changegroup.getunbundler(b'01', cb, None)
330
331
331 def heads(self):
332 def heads(self):
332 return self._repo.heads()
333 return self._repo.heads()
333
334
334 def known(self, nodes):
335 def known(self, nodes):
335 return self._repo.known(nodes)
336 return self._repo.known(nodes)
336
337
337 def listkeys(self, namespace):
338 def listkeys(self, namespace):
338 return self._repo.listkeys(namespace)
339 return self._repo.listkeys(namespace)
339
340
340 def lookup(self, key):
341 def lookup(self, key):
341 return self._repo.lookup(key)
342 return self._repo.lookup(key)
342
343
343 def pushkey(self, namespace, key, old, new):
344 def pushkey(self, namespace, key, old, new):
344 return self._repo.pushkey(namespace, key, old, new)
345 return self._repo.pushkey(namespace, key, old, new)
345
346
346 def stream_out(self):
347 def stream_out(self):
347 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348 raise error.Abort(_(b'cannot perform stream clone against local peer'))
348
349
349 def unbundle(self, bundle, heads, url):
350 def unbundle(self, bundle, heads, url):
350 """apply a bundle on a repo
351 """apply a bundle on a repo
351
352
352 This function handles the repo locking itself."""
353 This function handles the repo locking itself."""
353 try:
354 try:
354 try:
355 try:
355 bundle = exchange.readbundle(self.ui, bundle, None)
356 bundle = exchange.readbundle(self.ui, bundle, None)
356 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
357 if util.safehasattr(ret, b'getchunks'):
358 if util.safehasattr(ret, b'getchunks'):
358 # This is a bundle20 object, turn it into an unbundler.
359 # This is a bundle20 object, turn it into an unbundler.
359 # This little dance should be dropped eventually when the
360 # This little dance should be dropped eventually when the
360 # API is finally improved.
361 # API is finally improved.
361 stream = util.chunkbuffer(ret.getchunks())
362 stream = util.chunkbuffer(ret.getchunks())
362 ret = bundle2.getunbundler(self.ui, stream)
363 ret = bundle2.getunbundler(self.ui, stream)
363 return ret
364 return ret
364 except Exception as exc:
365 except Exception as exc:
365 # If the exception contains output salvaged from a bundle2
366 # If the exception contains output salvaged from a bundle2
366 # reply, we need to make sure it is printed before continuing
367 # reply, we need to make sure it is printed before continuing
367 # to fail. So we build a bundle2 with such output and consume
368 # to fail. So we build a bundle2 with such output and consume
368 # it directly.
369 # it directly.
369 #
370 #
370 # This is not very elegant but allows a "simple" solution for
371 # This is not very elegant but allows a "simple" solution for
371 # issue4594
372 # issue4594
372 output = getattr(exc, '_bundle2salvagedoutput', ())
373 output = getattr(exc, '_bundle2salvagedoutput', ())
373 if output:
374 if output:
374 bundler = bundle2.bundle20(self._repo.ui)
375 bundler = bundle2.bundle20(self._repo.ui)
375 for out in output:
376 for out in output:
376 bundler.addpart(out)
377 bundler.addpart(out)
377 stream = util.chunkbuffer(bundler.getchunks())
378 stream = util.chunkbuffer(bundler.getchunks())
378 b = bundle2.getunbundler(self.ui, stream)
379 b = bundle2.getunbundler(self.ui, stream)
379 bundle2.processbundle(self._repo, b)
380 bundle2.processbundle(self._repo, b)
380 raise
381 raise
381 except error.PushRaced as exc:
382 except error.PushRaced as exc:
382 raise error.ResponseError(
383 raise error.ResponseError(
383 _(b'push failed:'), stringutil.forcebytestr(exc)
384 _(b'push failed:'), stringutil.forcebytestr(exc)
384 )
385 )
385
386
386 # End of _basewirecommands interface.
387 # End of _basewirecommands interface.
387
388
388 # Begin of peer interface.
389 # Begin of peer interface.
389
390
390 def commandexecutor(self):
391 def commandexecutor(self):
391 return localcommandexecutor(self)
392 return localcommandexecutor(self)
392
393
393 # End of peer interface.
394 # End of peer interface.
394
395
395
396
396 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 @interfaceutil.implementer(repository.ipeerlegacycommands)
397 class locallegacypeer(localpeer):
398 class locallegacypeer(localpeer):
398 '''peer extension which implements legacy methods too; used for tests with
399 '''peer extension which implements legacy methods too; used for tests with
399 restricted capabilities'''
400 restricted capabilities'''
400
401
401 def __init__(self, repo):
402 def __init__(self, repo):
402 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
403
404
404 # Begin of baselegacywirecommands interface.
405 # Begin of baselegacywirecommands interface.
405
406
406 def between(self, pairs):
407 def between(self, pairs):
407 return self._repo.between(pairs)
408 return self._repo.between(pairs)
408
409
409 def branches(self, nodes):
410 def branches(self, nodes):
410 return self._repo.branches(nodes)
411 return self._repo.branches(nodes)
411
412
412 def changegroup(self, nodes, source):
413 def changegroup(self, nodes, source):
413 outgoing = discovery.outgoing(
414 outgoing = discovery.outgoing(
414 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 self._repo, missingroots=nodes, missingheads=self._repo.heads()
415 )
416 )
416 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
417
418
418 def changegroupsubset(self, bases, heads, source):
419 def changegroupsubset(self, bases, heads, source):
419 outgoing = discovery.outgoing(
420 outgoing = discovery.outgoing(
420 self._repo, missingroots=bases, missingheads=heads
421 self._repo, missingroots=bases, missingheads=heads
421 )
422 )
422 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
423
424
424 # End of baselegacywirecommands interface.
425 # End of baselegacywirecommands interface.
425
426
426
427
427 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # Increment the sub-version when the revlog v2 format changes to lock out old
428 # clients.
429 # clients.
429 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
430
431
431 # A repository with the sparserevlog feature will have delta chains that
432 # A repository with the sparserevlog feature will have delta chains that
432 # can spread over a larger span. Sparse reading cuts these large spans into
433 # can spread over a larger span. Sparse reading cuts these large spans into
433 # pieces, so that each piece isn't too big.
434 # pieces, so that each piece isn't too big.
434 # Without the sparserevlog capability, reading from the repository could use
435 # Without the sparserevlog capability, reading from the repository could use
435 # huge amounts of memory, because the whole span would be read at once,
436 # huge amounts of memory, because the whole span would be read at once,
436 # including all the intermediate revisions that aren't pertinent for the chain.
437 # including all the intermediate revisions that aren't pertinent for the chain.
437 # This is why once a repository has enabled sparse-read, it becomes required.
438 # This is why once a repository has enabled sparse-read, it becomes required.
438 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
439
440
440 # A repository with the sidedataflag requirement will allow to store extra
441 # A repository with the sidedataflag requirement will allow to store extra
441 # information for revision without altering their original hashes.
442 # information for revision without altering their original hashes.
442 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
443
444
444 # A repository with the the copies-sidedata-changeset requirement will store
445 # A repository with the the copies-sidedata-changeset requirement will store
445 # copies related information in changeset's sidedata.
446 # copies related information in changeset's sidedata.
446 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
447
448
448 # The repository use persistent nodemap for the changelog and the manifest.
449 # The repository use persistent nodemap for the changelog and the manifest.
449 NODEMAP_REQUIREMENT = b'persistent-nodemap'
450 NODEMAP_REQUIREMENT = b'persistent-nodemap'
450
451
451 # Functions receiving (ui, features) that extensions can register to impact
452 # Functions receiving (ui, features) that extensions can register to impact
452 # the ability to load repositories with custom requirements. Only
453 # the ability to load repositories with custom requirements. Only
453 # functions defined in loaded extensions are called.
454 # functions defined in loaded extensions are called.
454 #
455 #
455 # The function receives a set of requirement strings that the repository
456 # The function receives a set of requirement strings that the repository
456 # is capable of opening. Functions will typically add elements to the
457 # is capable of opening. Functions will typically add elements to the
457 # set to reflect that the extension knows how to handle that requirements.
458 # set to reflect that the extension knows how to handle that requirements.
458 featuresetupfuncs = set()
459 featuresetupfuncs = set()
459
460
460
461
461 def makelocalrepository(baseui, path, intents=None):
462 def makelocalrepository(baseui, path, intents=None):
462 """Create a local repository object.
463 """Create a local repository object.
463
464
464 Given arguments needed to construct a local repository, this function
465 Given arguments needed to construct a local repository, this function
465 performs various early repository loading functionality (such as
466 performs various early repository loading functionality (such as
466 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
467 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
467 the repository can be opened, derives a type suitable for representing
468 the repository can be opened, derives a type suitable for representing
468 that repository, and returns an instance of it.
469 that repository, and returns an instance of it.
469
470
470 The returned object conforms to the ``repository.completelocalrepository``
471 The returned object conforms to the ``repository.completelocalrepository``
471 interface.
472 interface.
472
473
473 The repository type is derived by calling a series of factory functions
474 The repository type is derived by calling a series of factory functions
474 for each aspect/interface of the final repository. These are defined by
475 for each aspect/interface of the final repository. These are defined by
475 ``REPO_INTERFACES``.
476 ``REPO_INTERFACES``.
476
477
477 Each factory function is called to produce a type implementing a specific
478 Each factory function is called to produce a type implementing a specific
478 interface. The cumulative list of returned types will be combined into a
479 interface. The cumulative list of returned types will be combined into a
479 new type and that type will be instantiated to represent the local
480 new type and that type will be instantiated to represent the local
480 repository.
481 repository.
481
482
482 The factory functions each receive various state that may be consulted
483 The factory functions each receive various state that may be consulted
483 as part of deriving a type.
484 as part of deriving a type.
484
485
485 Extensions should wrap these factory functions to customize repository type
486 Extensions should wrap these factory functions to customize repository type
486 creation. Note that an extension's wrapped function may be called even if
487 creation. Note that an extension's wrapped function may be called even if
487 that extension is not loaded for the repo being constructed. Extensions
488 that extension is not loaded for the repo being constructed. Extensions
488 should check if their ``__name__`` appears in the
489 should check if their ``__name__`` appears in the
489 ``extensionmodulenames`` set passed to the factory function and no-op if
490 ``extensionmodulenames`` set passed to the factory function and no-op if
490 not.
491 not.
491 """
492 """
492 ui = baseui.copy()
493 ui = baseui.copy()
493 # Prevent copying repo configuration.
494 # Prevent copying repo configuration.
494 ui.copy = baseui.copy
495 ui.copy = baseui.copy
495
496
496 # Working directory VFS rooted at repository root.
497 # Working directory VFS rooted at repository root.
497 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
498 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
498
499
499 # Main VFS for .hg/ directory.
500 # Main VFS for .hg/ directory.
500 hgpath = wdirvfs.join(b'.hg')
501 hgpath = wdirvfs.join(b'.hg')
501 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
502 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
502
503
503 # The .hg/ path should exist and should be a directory. All other
504 # The .hg/ path should exist and should be a directory. All other
504 # cases are errors.
505 # cases are errors.
505 if not hgvfs.isdir():
506 if not hgvfs.isdir():
506 try:
507 try:
507 hgvfs.stat()
508 hgvfs.stat()
508 except OSError as e:
509 except OSError as e:
509 if e.errno != errno.ENOENT:
510 if e.errno != errno.ENOENT:
510 raise
511 raise
511
512
512 raise error.RepoError(_(b'repository %s not found') % path)
513 raise error.RepoError(_(b'repository %s not found') % path)
513
514
514 # .hg/requires file contains a newline-delimited list of
515 # .hg/requires file contains a newline-delimited list of
515 # features/capabilities the opener (us) must have in order to use
516 # features/capabilities the opener (us) must have in order to use
516 # the repository. This file was introduced in Mercurial 0.9.2,
517 # the repository. This file was introduced in Mercurial 0.9.2,
517 # which means very old repositories may not have one. We assume
518 # which means very old repositories may not have one. We assume
518 # a missing file translates to no requirements.
519 # a missing file translates to no requirements.
519 try:
520 try:
520 requirements = set(hgvfs.read(b'requires').splitlines())
521 requirements = set(hgvfs.read(b'requires').splitlines())
521 except IOError as e:
522 except IOError as e:
522 if e.errno != errno.ENOENT:
523 if e.errno != errno.ENOENT:
523 raise
524 raise
524 requirements = set()
525 requirements = set()
525
526
526 # The .hg/hgrc file may load extensions or contain config options
527 # The .hg/hgrc file may load extensions or contain config options
527 # that influence repository construction. Attempt to load it and
528 # that influence repository construction. Attempt to load it and
528 # process any new extensions that it may have pulled in.
529 # process any new extensions that it may have pulled in.
529 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
530 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
530 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
531 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
531 extensions.loadall(ui)
532 extensions.loadall(ui)
532 extensions.populateui(ui)
533 extensions.populateui(ui)
533
534
534 # Set of module names of extensions loaded for this repository.
535 # Set of module names of extensions loaded for this repository.
535 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
536 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
536
537
537 supportedrequirements = gathersupportedrequirements(ui)
538 supportedrequirements = gathersupportedrequirements(ui)
538
539
539 # We first validate the requirements are known.
540 # We first validate the requirements are known.
540 ensurerequirementsrecognized(requirements, supportedrequirements)
541 ensurerequirementsrecognized(requirements, supportedrequirements)
541
542
542 # Then we validate that the known set is reasonable to use together.
543 # Then we validate that the known set is reasonable to use together.
543 ensurerequirementscompatible(ui, requirements)
544 ensurerequirementscompatible(ui, requirements)
544
545
545 # TODO there are unhandled edge cases related to opening repositories with
546 # TODO there are unhandled edge cases related to opening repositories with
546 # shared storage. If storage is shared, we should also test for requirements
547 # shared storage. If storage is shared, we should also test for requirements
547 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
548 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
548 # that repo, as that repo may load extensions needed to open it. This is a
549 # that repo, as that repo may load extensions needed to open it. This is a
549 # bit complicated because we don't want the other hgrc to overwrite settings
550 # bit complicated because we don't want the other hgrc to overwrite settings
550 # in this hgrc.
551 # in this hgrc.
551 #
552 #
552 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
553 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
553 # file when sharing repos. But if a requirement is added after the share is
554 # file when sharing repos. But if a requirement is added after the share is
554 # performed, thereby introducing a new requirement for the opener, we may
555 # performed, thereby introducing a new requirement for the opener, we may
555 # will not see that and could encounter a run-time error interacting with
556 # will not see that and could encounter a run-time error interacting with
556 # that shared store since it has an unknown-to-us requirement.
557 # that shared store since it has an unknown-to-us requirement.
557
558
558 # At this point, we know we should be capable of opening the repository.
559 # At this point, we know we should be capable of opening the repository.
559 # Now get on with doing that.
560 # Now get on with doing that.
560
561
561 features = set()
562 features = set()
562
563
563 # The "store" part of the repository holds versioned data. How it is
564 # The "store" part of the repository holds versioned data. How it is
564 # accessed is determined by various requirements. The ``shared`` or
565 # accessed is determined by various requirements. The ``shared`` or
565 # ``relshared`` requirements indicate the store lives in the path contained
566 # ``relshared`` requirements indicate the store lives in the path contained
566 # in the ``.hg/sharedpath`` file. This is an absolute path for
567 # in the ``.hg/sharedpath`` file. This is an absolute path for
567 # ``shared`` and relative to ``.hg/`` for ``relshared``.
568 # ``shared`` and relative to ``.hg/`` for ``relshared``.
568 if b'shared' in requirements or b'relshared' in requirements:
569 if b'shared' in requirements or b'relshared' in requirements:
569 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
570 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
570 if b'relshared' in requirements:
571 if b'relshared' in requirements:
571 sharedpath = hgvfs.join(sharedpath)
572 sharedpath = hgvfs.join(sharedpath)
572
573
573 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
574 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
574
575
575 if not sharedvfs.exists():
576 if not sharedvfs.exists():
576 raise error.RepoError(
577 raise error.RepoError(
577 _(b'.hg/sharedpath points to nonexistent directory %s')
578 _(b'.hg/sharedpath points to nonexistent directory %s')
578 % sharedvfs.base
579 % sharedvfs.base
579 )
580 )
580
581
581 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
582 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
582
583
583 storebasepath = sharedvfs.base
584 storebasepath = sharedvfs.base
584 cachepath = sharedvfs.join(b'cache')
585 cachepath = sharedvfs.join(b'cache')
585 else:
586 else:
586 storebasepath = hgvfs.base
587 storebasepath = hgvfs.base
587 cachepath = hgvfs.join(b'cache')
588 cachepath = hgvfs.join(b'cache')
588 wcachepath = hgvfs.join(b'wcache')
589 wcachepath = hgvfs.join(b'wcache')
589
590
590 # The store has changed over time and the exact layout is dictated by
591 # The store has changed over time and the exact layout is dictated by
591 # requirements. The store interface abstracts differences across all
592 # requirements. The store interface abstracts differences across all
592 # of them.
593 # of them.
593 store = makestore(
594 store = makestore(
594 requirements,
595 requirements,
595 storebasepath,
596 storebasepath,
596 lambda base: vfsmod.vfs(base, cacheaudited=True),
597 lambda base: vfsmod.vfs(base, cacheaudited=True),
597 )
598 )
598 hgvfs.createmode = store.createmode
599 hgvfs.createmode = store.createmode
599
600
600 storevfs = store.vfs
601 storevfs = store.vfs
601 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
602 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
602
603
603 # The cache vfs is used to manage cache files.
604 # The cache vfs is used to manage cache files.
604 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
605 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
605 cachevfs.createmode = store.createmode
606 cachevfs.createmode = store.createmode
606 # The cache vfs is used to manage cache files related to the working copy
607 # The cache vfs is used to manage cache files related to the working copy
607 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
608 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
608 wcachevfs.createmode = store.createmode
609 wcachevfs.createmode = store.createmode
609
610
610 # Now resolve the type for the repository object. We do this by repeatedly
611 # Now resolve the type for the repository object. We do this by repeatedly
611 # calling a factory function to produces types for specific aspects of the
612 # calling a factory function to produces types for specific aspects of the
612 # repo's operation. The aggregate returned types are used as base classes
613 # repo's operation. The aggregate returned types are used as base classes
613 # for a dynamically-derived type, which will represent our new repository.
614 # for a dynamically-derived type, which will represent our new repository.
614
615
615 bases = []
616 bases = []
616 extrastate = {}
617 extrastate = {}
617
618
618 for iface, fn in REPO_INTERFACES:
619 for iface, fn in REPO_INTERFACES:
619 # We pass all potentially useful state to give extensions tons of
620 # We pass all potentially useful state to give extensions tons of
620 # flexibility.
621 # flexibility.
621 typ = fn()(
622 typ = fn()(
622 ui=ui,
623 ui=ui,
623 intents=intents,
624 intents=intents,
624 requirements=requirements,
625 requirements=requirements,
625 features=features,
626 features=features,
626 wdirvfs=wdirvfs,
627 wdirvfs=wdirvfs,
627 hgvfs=hgvfs,
628 hgvfs=hgvfs,
628 store=store,
629 store=store,
629 storevfs=storevfs,
630 storevfs=storevfs,
630 storeoptions=storevfs.options,
631 storeoptions=storevfs.options,
631 cachevfs=cachevfs,
632 cachevfs=cachevfs,
632 wcachevfs=wcachevfs,
633 wcachevfs=wcachevfs,
633 extensionmodulenames=extensionmodulenames,
634 extensionmodulenames=extensionmodulenames,
634 extrastate=extrastate,
635 extrastate=extrastate,
635 baseclasses=bases,
636 baseclasses=bases,
636 )
637 )
637
638
638 if not isinstance(typ, type):
639 if not isinstance(typ, type):
639 raise error.ProgrammingError(
640 raise error.ProgrammingError(
640 b'unable to construct type for %s' % iface
641 b'unable to construct type for %s' % iface
641 )
642 )
642
643
643 bases.append(typ)
644 bases.append(typ)
644
645
645 # type() allows you to use characters in type names that wouldn't be
646 # type() allows you to use characters in type names that wouldn't be
646 # recognized as Python symbols in source code. We abuse that to add
647 # recognized as Python symbols in source code. We abuse that to add
647 # rich information about our constructed repo.
648 # rich information about our constructed repo.
648 name = pycompat.sysstr(
649 name = pycompat.sysstr(
649 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
650 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
650 )
651 )
651
652
652 cls = type(name, tuple(bases), {})
653 cls = type(name, tuple(bases), {})
653
654
654 return cls(
655 return cls(
655 baseui=baseui,
656 baseui=baseui,
656 ui=ui,
657 ui=ui,
657 origroot=path,
658 origroot=path,
658 wdirvfs=wdirvfs,
659 wdirvfs=wdirvfs,
659 hgvfs=hgvfs,
660 hgvfs=hgvfs,
660 requirements=requirements,
661 requirements=requirements,
661 supportedrequirements=supportedrequirements,
662 supportedrequirements=supportedrequirements,
662 sharedpath=storebasepath,
663 sharedpath=storebasepath,
663 store=store,
664 store=store,
664 cachevfs=cachevfs,
665 cachevfs=cachevfs,
665 wcachevfs=wcachevfs,
666 wcachevfs=wcachevfs,
666 features=features,
667 features=features,
667 intents=intents,
668 intents=intents,
668 )
669 )
669
670
670
671
671 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
672 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
672 """Load hgrc files/content into a ui instance.
673 """Load hgrc files/content into a ui instance.
673
674
674 This is called during repository opening to load any additional
675 This is called during repository opening to load any additional
675 config files or settings relevant to the current repository.
676 config files or settings relevant to the current repository.
676
677
677 Returns a bool indicating whether any additional configs were loaded.
678 Returns a bool indicating whether any additional configs were loaded.
678
679
679 Extensions should monkeypatch this function to modify how per-repo
680 Extensions should monkeypatch this function to modify how per-repo
680 configs are loaded. For example, an extension may wish to pull in
681 configs are loaded. For example, an extension may wish to pull in
681 configs from alternate files or sources.
682 configs from alternate files or sources.
682 """
683 """
683 if not rcutil.use_repo_hgrc():
684 if not rcutil.use_repo_hgrc():
684 return False
685 return False
685 try:
686 try:
686 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
687 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
687 return True
688 return True
688 except IOError:
689 except IOError:
689 return False
690 return False
690
691
691
692
692 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
693 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
693 """Perform additional actions after .hg/hgrc is loaded.
694 """Perform additional actions after .hg/hgrc is loaded.
694
695
695 This function is called during repository loading immediately after
696 This function is called during repository loading immediately after
696 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
697 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
697
698
698 The function can be used to validate configs, automatically add
699 The function can be used to validate configs, automatically add
699 options (including extensions) based on requirements, etc.
700 options (including extensions) based on requirements, etc.
700 """
701 """
701
702
702 # Map of requirements to list of extensions to load automatically when
703 # Map of requirements to list of extensions to load automatically when
703 # requirement is present.
704 # requirement is present.
704 autoextensions = {
705 autoextensions = {
705 b'git': [b'git'],
706 b'git': [b'git'],
706 b'largefiles': [b'largefiles'],
707 b'largefiles': [b'largefiles'],
707 b'lfs': [b'lfs'],
708 b'lfs': [b'lfs'],
708 }
709 }
709
710
710 for requirement, names in sorted(autoextensions.items()):
711 for requirement, names in sorted(autoextensions.items()):
711 if requirement not in requirements:
712 if requirement not in requirements:
712 continue
713 continue
713
714
714 for name in names:
715 for name in names:
715 if not ui.hasconfig(b'extensions', name):
716 if not ui.hasconfig(b'extensions', name):
716 ui.setconfig(b'extensions', name, b'', source=b'autoload')
717 ui.setconfig(b'extensions', name, b'', source=b'autoload')
717
718
718
719
719 def gathersupportedrequirements(ui):
720 def gathersupportedrequirements(ui):
720 """Determine the complete set of recognized requirements."""
721 """Determine the complete set of recognized requirements."""
721 # Start with all requirements supported by this file.
722 # Start with all requirements supported by this file.
722 supported = set(localrepository._basesupported)
723 supported = set(localrepository._basesupported)
723
724
724 # Execute ``featuresetupfuncs`` entries if they belong to an extension
725 # Execute ``featuresetupfuncs`` entries if they belong to an extension
725 # relevant to this ui instance.
726 # relevant to this ui instance.
726 modules = {m.__name__ for n, m in extensions.extensions(ui)}
727 modules = {m.__name__ for n, m in extensions.extensions(ui)}
727
728
728 for fn in featuresetupfuncs:
729 for fn in featuresetupfuncs:
729 if fn.__module__ in modules:
730 if fn.__module__ in modules:
730 fn(ui, supported)
731 fn(ui, supported)
731
732
732 # Add derived requirements from registered compression engines.
733 # Add derived requirements from registered compression engines.
733 for name in util.compengines:
734 for name in util.compengines:
734 engine = util.compengines[name]
735 engine = util.compengines[name]
735 if engine.available() and engine.revlogheader():
736 if engine.available() and engine.revlogheader():
736 supported.add(b'exp-compression-%s' % name)
737 supported.add(b'exp-compression-%s' % name)
737 if engine.name() == b'zstd':
738 if engine.name() == b'zstd':
738 supported.add(b'revlog-compression-zstd')
739 supported.add(b'revlog-compression-zstd')
739
740
740 return supported
741 return supported
741
742
742
743
743 def ensurerequirementsrecognized(requirements, supported):
744 def ensurerequirementsrecognized(requirements, supported):
744 """Validate that a set of local requirements is recognized.
745 """Validate that a set of local requirements is recognized.
745
746
746 Receives a set of requirements. Raises an ``error.RepoError`` if there
747 Receives a set of requirements. Raises an ``error.RepoError`` if there
747 exists any requirement in that set that currently loaded code doesn't
748 exists any requirement in that set that currently loaded code doesn't
748 recognize.
749 recognize.
749
750
750 Returns a set of supported requirements.
751 Returns a set of supported requirements.
751 """
752 """
752 missing = set()
753 missing = set()
753
754
754 for requirement in requirements:
755 for requirement in requirements:
755 if requirement in supported:
756 if requirement in supported:
756 continue
757 continue
757
758
758 if not requirement or not requirement[0:1].isalnum():
759 if not requirement or not requirement[0:1].isalnum():
759 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
760 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
760
761
761 missing.add(requirement)
762 missing.add(requirement)
762
763
763 if missing:
764 if missing:
764 raise error.RequirementError(
765 raise error.RequirementError(
765 _(b'repository requires features unknown to this Mercurial: %s')
766 _(b'repository requires features unknown to this Mercurial: %s')
766 % b' '.join(sorted(missing)),
767 % b' '.join(sorted(missing)),
767 hint=_(
768 hint=_(
768 b'see https://mercurial-scm.org/wiki/MissingRequirement '
769 b'see https://mercurial-scm.org/wiki/MissingRequirement '
769 b'for more information'
770 b'for more information'
770 ),
771 ),
771 )
772 )
772
773
773
774
774 def ensurerequirementscompatible(ui, requirements):
775 def ensurerequirementscompatible(ui, requirements):
775 """Validates that a set of recognized requirements is mutually compatible.
776 """Validates that a set of recognized requirements is mutually compatible.
776
777
777 Some requirements may not be compatible with others or require
778 Some requirements may not be compatible with others or require
778 config options that aren't enabled. This function is called during
779 config options that aren't enabled. This function is called during
779 repository opening to ensure that the set of requirements needed
780 repository opening to ensure that the set of requirements needed
780 to open a repository is sane and compatible with config options.
781 to open a repository is sane and compatible with config options.
781
782
782 Extensions can monkeypatch this function to perform additional
783 Extensions can monkeypatch this function to perform additional
783 checking.
784 checking.
784
785
785 ``error.RepoError`` should be raised on failure.
786 ``error.RepoError`` should be raised on failure.
786 """
787 """
787 if b'exp-sparse' in requirements and not sparse.enabled:
788 if b'exp-sparse' in requirements and not sparse.enabled:
788 raise error.RepoError(
789 raise error.RepoError(
789 _(
790 _(
790 b'repository is using sparse feature but '
791 b'repository is using sparse feature but '
791 b'sparse is not enabled; enable the '
792 b'sparse is not enabled; enable the '
792 b'"sparse" extensions to access'
793 b'"sparse" extensions to access'
793 )
794 )
794 )
795 )
795
796
796
797
797 def makestore(requirements, path, vfstype):
798 def makestore(requirements, path, vfstype):
798 """Construct a storage object for a repository."""
799 """Construct a storage object for a repository."""
799 if b'store' in requirements:
800 if b'store' in requirements:
800 if b'fncache' in requirements:
801 if b'fncache' in requirements:
801 return storemod.fncachestore(
802 return storemod.fncachestore(
802 path, vfstype, b'dotencode' in requirements
803 path, vfstype, b'dotencode' in requirements
803 )
804 )
804
805
805 return storemod.encodedstore(path, vfstype)
806 return storemod.encodedstore(path, vfstype)
806
807
807 return storemod.basicstore(path, vfstype)
808 return storemod.basicstore(path, vfstype)
808
809
809
810
810 def resolvestorevfsoptions(ui, requirements, features):
811 def resolvestorevfsoptions(ui, requirements, features):
811 """Resolve the options to pass to the store vfs opener.
812 """Resolve the options to pass to the store vfs opener.
812
813
813 The returned dict is used to influence behavior of the storage layer.
814 The returned dict is used to influence behavior of the storage layer.
814 """
815 """
815 options = {}
816 options = {}
816
817
817 if b'treemanifest' in requirements:
818 if b'treemanifest' in requirements:
818 options[b'treemanifest'] = True
819 options[b'treemanifest'] = True
819
820
820 # experimental config: format.manifestcachesize
821 # experimental config: format.manifestcachesize
821 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
822 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
822 if manifestcachesize is not None:
823 if manifestcachesize is not None:
823 options[b'manifestcachesize'] = manifestcachesize
824 options[b'manifestcachesize'] = manifestcachesize
824
825
825 # In the absence of another requirement superseding a revlog-related
826 # In the absence of another requirement superseding a revlog-related
826 # requirement, we have to assume the repo is using revlog version 0.
827 # requirement, we have to assume the repo is using revlog version 0.
827 # This revlog format is super old and we don't bother trying to parse
828 # This revlog format is super old and we don't bother trying to parse
828 # opener options for it because those options wouldn't do anything
829 # opener options for it because those options wouldn't do anything
829 # meaningful on such old repos.
830 # meaningful on such old repos.
830 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
831 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
831 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
832 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
832 else: # explicitly mark repo as using revlogv0
833 else: # explicitly mark repo as using revlogv0
833 options[b'revlogv0'] = True
834 options[b'revlogv0'] = True
834
835
835 if COPIESSDC_REQUIREMENT in requirements:
836 if COPIESSDC_REQUIREMENT in requirements:
836 options[b'copies-storage'] = b'changeset-sidedata'
837 options[b'copies-storage'] = b'changeset-sidedata'
837 else:
838 else:
838 writecopiesto = ui.config(b'experimental', b'copies.write-to')
839 writecopiesto = ui.config(b'experimental', b'copies.write-to')
839 copiesextramode = (b'changeset-only', b'compatibility')
840 copiesextramode = (b'changeset-only', b'compatibility')
840 if writecopiesto in copiesextramode:
841 if writecopiesto in copiesextramode:
841 options[b'copies-storage'] = b'extra'
842 options[b'copies-storage'] = b'extra'
842
843
843 return options
844 return options
844
845
845
846
846 def resolverevlogstorevfsoptions(ui, requirements, features):
847 def resolverevlogstorevfsoptions(ui, requirements, features):
847 """Resolve opener options specific to revlogs."""
848 """Resolve opener options specific to revlogs."""
848
849
849 options = {}
850 options = {}
850 options[b'flagprocessors'] = {}
851 options[b'flagprocessors'] = {}
851
852
852 if b'revlogv1' in requirements:
853 if b'revlogv1' in requirements:
853 options[b'revlogv1'] = True
854 options[b'revlogv1'] = True
854 if REVLOGV2_REQUIREMENT in requirements:
855 if REVLOGV2_REQUIREMENT in requirements:
855 options[b'revlogv2'] = True
856 options[b'revlogv2'] = True
856
857
857 if b'generaldelta' in requirements:
858 if b'generaldelta' in requirements:
858 options[b'generaldelta'] = True
859 options[b'generaldelta'] = True
859
860
860 # experimental config: format.chunkcachesize
861 # experimental config: format.chunkcachesize
861 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
862 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
862 if chunkcachesize is not None:
863 if chunkcachesize is not None:
863 options[b'chunkcachesize'] = chunkcachesize
864 options[b'chunkcachesize'] = chunkcachesize
864
865
865 deltabothparents = ui.configbool(
866 deltabothparents = ui.configbool(
866 b'storage', b'revlog.optimize-delta-parent-choice'
867 b'storage', b'revlog.optimize-delta-parent-choice'
867 )
868 )
868 options[b'deltabothparents'] = deltabothparents
869 options[b'deltabothparents'] = deltabothparents
869
870
870 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
871 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
871 lazydeltabase = False
872 lazydeltabase = False
872 if lazydelta:
873 if lazydelta:
873 lazydeltabase = ui.configbool(
874 lazydeltabase = ui.configbool(
874 b'storage', b'revlog.reuse-external-delta-parent'
875 b'storage', b'revlog.reuse-external-delta-parent'
875 )
876 )
876 if lazydeltabase is None:
877 if lazydeltabase is None:
877 lazydeltabase = not scmutil.gddeltaconfig(ui)
878 lazydeltabase = not scmutil.gddeltaconfig(ui)
878 options[b'lazydelta'] = lazydelta
879 options[b'lazydelta'] = lazydelta
879 options[b'lazydeltabase'] = lazydeltabase
880 options[b'lazydeltabase'] = lazydeltabase
880
881
881 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
882 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
882 if 0 <= chainspan:
883 if 0 <= chainspan:
883 options[b'maxdeltachainspan'] = chainspan
884 options[b'maxdeltachainspan'] = chainspan
884
885
885 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
886 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
886 if mmapindexthreshold is not None:
887 if mmapindexthreshold is not None:
887 options[b'mmapindexthreshold'] = mmapindexthreshold
888 options[b'mmapindexthreshold'] = mmapindexthreshold
888
889
889 withsparseread = ui.configbool(b'experimental', b'sparse-read')
890 withsparseread = ui.configbool(b'experimental', b'sparse-read')
890 srdensitythres = float(
891 srdensitythres = float(
891 ui.config(b'experimental', b'sparse-read.density-threshold')
892 ui.config(b'experimental', b'sparse-read.density-threshold')
892 )
893 )
893 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
894 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
894 options[b'with-sparse-read'] = withsparseread
895 options[b'with-sparse-read'] = withsparseread
895 options[b'sparse-read-density-threshold'] = srdensitythres
896 options[b'sparse-read-density-threshold'] = srdensitythres
896 options[b'sparse-read-min-gap-size'] = srmingapsize
897 options[b'sparse-read-min-gap-size'] = srmingapsize
897
898
898 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
899 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
899 options[b'sparse-revlog'] = sparserevlog
900 options[b'sparse-revlog'] = sparserevlog
900 if sparserevlog:
901 if sparserevlog:
901 options[b'generaldelta'] = True
902 options[b'generaldelta'] = True
902
903
903 sidedata = SIDEDATA_REQUIREMENT in requirements
904 sidedata = SIDEDATA_REQUIREMENT in requirements
904 options[b'side-data'] = sidedata
905 options[b'side-data'] = sidedata
905
906
906 maxchainlen = None
907 maxchainlen = None
907 if sparserevlog:
908 if sparserevlog:
908 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
909 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
909 # experimental config: format.maxchainlen
910 # experimental config: format.maxchainlen
910 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
911 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
911 if maxchainlen is not None:
912 if maxchainlen is not None:
912 options[b'maxchainlen'] = maxchainlen
913 options[b'maxchainlen'] = maxchainlen
913
914
914 for r in requirements:
915 for r in requirements:
915 # we allow multiple compression engine requirement to co-exist because
916 # we allow multiple compression engine requirement to co-exist because
916 # strickly speaking, revlog seems to support mixed compression style.
917 # strickly speaking, revlog seems to support mixed compression style.
917 #
918 #
918 # The compression used for new entries will be "the last one"
919 # The compression used for new entries will be "the last one"
919 prefix = r.startswith
920 prefix = r.startswith
920 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
921 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
921 options[b'compengine'] = r.split(b'-', 2)[2]
922 options[b'compengine'] = r.split(b'-', 2)[2]
922
923
923 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
924 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
924 if options[b'zlib.level'] is not None:
925 if options[b'zlib.level'] is not None:
925 if not (0 <= options[b'zlib.level'] <= 9):
926 if not (0 <= options[b'zlib.level'] <= 9):
926 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
927 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
927 raise error.Abort(msg % options[b'zlib.level'])
928 raise error.Abort(msg % options[b'zlib.level'])
928 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
929 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
929 if options[b'zstd.level'] is not None:
930 if options[b'zstd.level'] is not None:
930 if not (0 <= options[b'zstd.level'] <= 22):
931 if not (0 <= options[b'zstd.level'] <= 22):
931 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
932 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
932 raise error.Abort(msg % options[b'zstd.level'])
933 raise error.Abort(msg % options[b'zstd.level'])
933
934
934 if repository.NARROW_REQUIREMENT in requirements:
935 if repository.NARROW_REQUIREMENT in requirements:
935 options[b'enableellipsis'] = True
936 options[b'enableellipsis'] = True
936
937
937 if ui.configbool(b'experimental', b'rust.index'):
938 if ui.configbool(b'experimental', b'rust.index'):
938 options[b'rust.index'] = True
939 options[b'rust.index'] = True
939 if NODEMAP_REQUIREMENT in requirements:
940 if NODEMAP_REQUIREMENT in requirements:
940 options[b'persistent-nodemap'] = True
941 options[b'persistent-nodemap'] = True
941 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
942 if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
942 options[b'persistent-nodemap.mmap'] = True
943 options[b'persistent-nodemap.mmap'] = True
943 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
944 epnm = ui.config(b'storage', b'revlog.nodemap.mode')
944 options[b'persistent-nodemap.mode'] = epnm
945 options[b'persistent-nodemap.mode'] = epnm
945 if ui.configbool(b'devel', b'persistent-nodemap'):
946 if ui.configbool(b'devel', b'persistent-nodemap'):
946 options[b'devel-force-nodemap'] = True
947 options[b'devel-force-nodemap'] = True
947
948
948 return options
949 return options
949
950
950
951
951 def makemain(**kwargs):
952 def makemain(**kwargs):
952 """Produce a type conforming to ``ilocalrepositorymain``."""
953 """Produce a type conforming to ``ilocalrepositorymain``."""
953 return localrepository
954 return localrepository
954
955
955
956
956 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
957 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
957 class revlogfilestorage(object):
958 class revlogfilestorage(object):
958 """File storage when using revlogs."""
959 """File storage when using revlogs."""
959
960
960 def file(self, path):
961 def file(self, path):
961 if path[0] == b'/':
962 if path[0] == b'/':
962 path = path[1:]
963 path = path[1:]
963
964
964 return filelog.filelog(self.svfs, path)
965 return filelog.filelog(self.svfs, path)
965
966
966
967
967 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
968 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
968 class revlognarrowfilestorage(object):
969 class revlognarrowfilestorage(object):
969 """File storage when using revlogs and narrow files."""
970 """File storage when using revlogs and narrow files."""
970
971
971 def file(self, path):
972 def file(self, path):
972 if path[0] == b'/':
973 if path[0] == b'/':
973 path = path[1:]
974 path = path[1:]
974
975
975 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
976 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
976
977
977
978
978 def makefilestorage(requirements, features, **kwargs):
979 def makefilestorage(requirements, features, **kwargs):
979 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
980 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
980 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
981 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
981 features.add(repository.REPO_FEATURE_STREAM_CLONE)
982 features.add(repository.REPO_FEATURE_STREAM_CLONE)
982
983
983 if repository.NARROW_REQUIREMENT in requirements:
984 if repository.NARROW_REQUIREMENT in requirements:
984 return revlognarrowfilestorage
985 return revlognarrowfilestorage
985 else:
986 else:
986 return revlogfilestorage
987 return revlogfilestorage
987
988
988
989
989 # List of repository interfaces and factory functions for them. Each
990 # List of repository interfaces and factory functions for them. Each
990 # will be called in order during ``makelocalrepository()`` to iteratively
991 # will be called in order during ``makelocalrepository()`` to iteratively
991 # derive the final type for a local repository instance. We capture the
992 # derive the final type for a local repository instance. We capture the
992 # function as a lambda so we don't hold a reference and the module-level
993 # function as a lambda so we don't hold a reference and the module-level
993 # functions can be wrapped.
994 # functions can be wrapped.
994 REPO_INTERFACES = [
995 REPO_INTERFACES = [
995 (repository.ilocalrepositorymain, lambda: makemain),
996 (repository.ilocalrepositorymain, lambda: makemain),
996 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
997 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
997 ]
998 ]
998
999
999
1000
1000 @interfaceutil.implementer(repository.ilocalrepositorymain)
1001 @interfaceutil.implementer(repository.ilocalrepositorymain)
1001 class localrepository(object):
1002 class localrepository(object):
1002 """Main class for representing local repositories.
1003 """Main class for representing local repositories.
1003
1004
1004 All local repositories are instances of this class.
1005 All local repositories are instances of this class.
1005
1006
1006 Constructed on its own, instances of this class are not usable as
1007 Constructed on its own, instances of this class are not usable as
1007 repository objects. To obtain a usable repository object, call
1008 repository objects. To obtain a usable repository object, call
1008 ``hg.repository()``, ``localrepo.instance()``, or
1009 ``hg.repository()``, ``localrepo.instance()``, or
1009 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1010 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
1010 ``instance()`` adds support for creating new repositories.
1011 ``instance()`` adds support for creating new repositories.
1011 ``hg.repository()`` adds more extension integration, including calling
1012 ``hg.repository()`` adds more extension integration, including calling
1012 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1013 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
1013 used.
1014 used.
1014 """
1015 """
1015
1016
1016 # obsolete experimental requirements:
1017 # obsolete experimental requirements:
1017 # - manifestv2: An experimental new manifest format that allowed
1018 # - manifestv2: An experimental new manifest format that allowed
1018 # for stem compression of long paths. Experiment ended up not
1019 # for stem compression of long paths. Experiment ended up not
1019 # being successful (repository sizes went up due to worse delta
1020 # being successful (repository sizes went up due to worse delta
1020 # chains), and the code was deleted in 4.6.
1021 # chains), and the code was deleted in 4.6.
1021 supportedformats = {
1022 supportedformats = {
1022 b'revlogv1',
1023 b'revlogv1',
1023 b'generaldelta',
1024 b'generaldelta',
1024 b'treemanifest',
1025 b'treemanifest',
1025 COPIESSDC_REQUIREMENT,
1026 COPIESSDC_REQUIREMENT,
1026 REVLOGV2_REQUIREMENT,
1027 REVLOGV2_REQUIREMENT,
1027 SIDEDATA_REQUIREMENT,
1028 SIDEDATA_REQUIREMENT,
1028 SPARSEREVLOG_REQUIREMENT,
1029 SPARSEREVLOG_REQUIREMENT,
1029 NODEMAP_REQUIREMENT,
1030 NODEMAP_REQUIREMENT,
1030 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1031 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1031 }
1032 }
1032 _basesupported = supportedformats | {
1033 _basesupported = supportedformats | {
1033 b'store',
1034 b'store',
1034 b'fncache',
1035 b'fncache',
1035 b'shared',
1036 b'shared',
1036 b'relshared',
1037 b'relshared',
1037 b'dotencode',
1038 b'dotencode',
1038 b'exp-sparse',
1039 b'exp-sparse',
1039 b'internal-phase',
1040 b'internal-phase',
1040 }
1041 }
1041
1042
1042 # list of prefix for file which can be written without 'wlock'
1043 # list of prefix for file which can be written without 'wlock'
1043 # Extensions should extend this list when needed
1044 # Extensions should extend this list when needed
1044 _wlockfreeprefix = {
1045 _wlockfreeprefix = {
1045 # We migh consider requiring 'wlock' for the next
1046 # We migh consider requiring 'wlock' for the next
1046 # two, but pretty much all the existing code assume
1047 # two, but pretty much all the existing code assume
1047 # wlock is not needed so we keep them excluded for
1048 # wlock is not needed so we keep them excluded for
1048 # now.
1049 # now.
1049 b'hgrc',
1050 b'hgrc',
1050 b'requires',
1051 b'requires',
1051 # XXX cache is a complicatged business someone
1052 # XXX cache is a complicatged business someone
1052 # should investigate this in depth at some point
1053 # should investigate this in depth at some point
1053 b'cache/',
1054 b'cache/',
1054 # XXX shouldn't be dirstate covered by the wlock?
1055 # XXX shouldn't be dirstate covered by the wlock?
1055 b'dirstate',
1056 b'dirstate',
1056 # XXX bisect was still a bit too messy at the time
1057 # XXX bisect was still a bit too messy at the time
1057 # this changeset was introduced. Someone should fix
1058 # this changeset was introduced. Someone should fix
1058 # the remainig bit and drop this line
1059 # the remainig bit and drop this line
1059 b'bisect.state',
1060 b'bisect.state',
1060 }
1061 }
1061
1062
1062 def __init__(
1063 def __init__(
1063 self,
1064 self,
1064 baseui,
1065 baseui,
1065 ui,
1066 ui,
1066 origroot,
1067 origroot,
1067 wdirvfs,
1068 wdirvfs,
1068 hgvfs,
1069 hgvfs,
1069 requirements,
1070 requirements,
1070 supportedrequirements,
1071 supportedrequirements,
1071 sharedpath,
1072 sharedpath,
1072 store,
1073 store,
1073 cachevfs,
1074 cachevfs,
1074 wcachevfs,
1075 wcachevfs,
1075 features,
1076 features,
1076 intents=None,
1077 intents=None,
1077 ):
1078 ):
1078 """Create a new local repository instance.
1079 """Create a new local repository instance.
1079
1080
1080 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1081 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1081 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1082 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1082 object.
1083 object.
1083
1084
1084 Arguments:
1085 Arguments:
1085
1086
1086 baseui
1087 baseui
1087 ``ui.ui`` instance that ``ui`` argument was based off of.
1088 ``ui.ui`` instance that ``ui`` argument was based off of.
1088
1089
1089 ui
1090 ui
1090 ``ui.ui`` instance for use by the repository.
1091 ``ui.ui`` instance for use by the repository.
1091
1092
1092 origroot
1093 origroot
1093 ``bytes`` path to working directory root of this repository.
1094 ``bytes`` path to working directory root of this repository.
1094
1095
1095 wdirvfs
1096 wdirvfs
1096 ``vfs.vfs`` rooted at the working directory.
1097 ``vfs.vfs`` rooted at the working directory.
1097
1098
1098 hgvfs
1099 hgvfs
1099 ``vfs.vfs`` rooted at .hg/
1100 ``vfs.vfs`` rooted at .hg/
1100
1101
1101 requirements
1102 requirements
1102 ``set`` of bytestrings representing repository opening requirements.
1103 ``set`` of bytestrings representing repository opening requirements.
1103
1104
1104 supportedrequirements
1105 supportedrequirements
1105 ``set`` of bytestrings representing repository requirements that we
1106 ``set`` of bytestrings representing repository requirements that we
1106 know how to open. May be a supetset of ``requirements``.
1107 know how to open. May be a supetset of ``requirements``.
1107
1108
1108 sharedpath
1109 sharedpath
1109 ``bytes`` Defining path to storage base directory. Points to a
1110 ``bytes`` Defining path to storage base directory. Points to a
1110 ``.hg/`` directory somewhere.
1111 ``.hg/`` directory somewhere.
1111
1112
1112 store
1113 store
1113 ``store.basicstore`` (or derived) instance providing access to
1114 ``store.basicstore`` (or derived) instance providing access to
1114 versioned storage.
1115 versioned storage.
1115
1116
1116 cachevfs
1117 cachevfs
1117 ``vfs.vfs`` used for cache files.
1118 ``vfs.vfs`` used for cache files.
1118
1119
1119 wcachevfs
1120 wcachevfs
1120 ``vfs.vfs`` used for cache files related to the working copy.
1121 ``vfs.vfs`` used for cache files related to the working copy.
1121
1122
1122 features
1123 features
1123 ``set`` of bytestrings defining features/capabilities of this
1124 ``set`` of bytestrings defining features/capabilities of this
1124 instance.
1125 instance.
1125
1126
1126 intents
1127 intents
1127 ``set`` of system strings indicating what this repo will be used
1128 ``set`` of system strings indicating what this repo will be used
1128 for.
1129 for.
1129 """
1130 """
1130 self.baseui = baseui
1131 self.baseui = baseui
1131 self.ui = ui
1132 self.ui = ui
1132 self.origroot = origroot
1133 self.origroot = origroot
1133 # vfs rooted at working directory.
1134 # vfs rooted at working directory.
1134 self.wvfs = wdirvfs
1135 self.wvfs = wdirvfs
1135 self.root = wdirvfs.base
1136 self.root = wdirvfs.base
1136 # vfs rooted at .hg/. Used to access most non-store paths.
1137 # vfs rooted at .hg/. Used to access most non-store paths.
1137 self.vfs = hgvfs
1138 self.vfs = hgvfs
1138 self.path = hgvfs.base
1139 self.path = hgvfs.base
1139 self.requirements = requirements
1140 self.requirements = requirements
1140 self.supported = supportedrequirements
1141 self.supported = supportedrequirements
1141 self.sharedpath = sharedpath
1142 self.sharedpath = sharedpath
1142 self.store = store
1143 self.store = store
1143 self.cachevfs = cachevfs
1144 self.cachevfs = cachevfs
1144 self.wcachevfs = wcachevfs
1145 self.wcachevfs = wcachevfs
1145 self.features = features
1146 self.features = features
1146
1147
1147 self.filtername = None
1148 self.filtername = None
1148
1149
1149 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1150 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1150 b'devel', b'check-locks'
1151 b'devel', b'check-locks'
1151 ):
1152 ):
1152 self.vfs.audit = self._getvfsward(self.vfs.audit)
1153 self.vfs.audit = self._getvfsward(self.vfs.audit)
1153 # A list of callback to shape the phase if no data were found.
1154 # A list of callback to shape the phase if no data were found.
1154 # Callback are in the form: func(repo, roots) --> processed root.
1155 # Callback are in the form: func(repo, roots) --> processed root.
1155 # This list it to be filled by extension during repo setup
1156 # This list it to be filled by extension during repo setup
1156 self._phasedefaults = []
1157 self._phasedefaults = []
1157
1158
1158 color.setup(self.ui)
1159 color.setup(self.ui)
1159
1160
1160 self.spath = self.store.path
1161 self.spath = self.store.path
1161 self.svfs = self.store.vfs
1162 self.svfs = self.store.vfs
1162 self.sjoin = self.store.join
1163 self.sjoin = self.store.join
1163 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1164 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1164 b'devel', b'check-locks'
1165 b'devel', b'check-locks'
1165 ):
1166 ):
1166 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1167 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1167 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1168 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1168 else: # standard vfs
1169 else: # standard vfs
1169 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1170 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1170
1171
1171 self._dirstatevalidatewarned = False
1172 self._dirstatevalidatewarned = False
1172
1173
1173 self._branchcaches = branchmap.BranchMapCache()
1174 self._branchcaches = branchmap.BranchMapCache()
1174 self._revbranchcache = None
1175 self._revbranchcache = None
1175 self._filterpats = {}
1176 self._filterpats = {}
1176 self._datafilters = {}
1177 self._datafilters = {}
1177 self._transref = self._lockref = self._wlockref = None
1178 self._transref = self._lockref = self._wlockref = None
1178
1179
1179 # A cache for various files under .hg/ that tracks file changes,
1180 # A cache for various files under .hg/ that tracks file changes,
1180 # (used by the filecache decorator)
1181 # (used by the filecache decorator)
1181 #
1182 #
1182 # Maps a property name to its util.filecacheentry
1183 # Maps a property name to its util.filecacheentry
1183 self._filecache = {}
1184 self._filecache = {}
1184
1185
1185 # hold sets of revision to be filtered
1186 # hold sets of revision to be filtered
1186 # should be cleared when something might have changed the filter value:
1187 # should be cleared when something might have changed the filter value:
1187 # - new changesets,
1188 # - new changesets,
1188 # - phase change,
1189 # - phase change,
1189 # - new obsolescence marker,
1190 # - new obsolescence marker,
1190 # - working directory parent change,
1191 # - working directory parent change,
1191 # - bookmark changes
1192 # - bookmark changes
1192 self.filteredrevcache = {}
1193 self.filteredrevcache = {}
1193
1194
1194 # post-dirstate-status hooks
1195 # post-dirstate-status hooks
1195 self._postdsstatus = []
1196 self._postdsstatus = []
1196
1197
1197 # generic mapping between names and nodes
1198 # generic mapping between names and nodes
1198 self.names = namespaces.namespaces()
1199 self.names = namespaces.namespaces()
1199
1200
1200 # Key to signature value.
1201 # Key to signature value.
1201 self._sparsesignaturecache = {}
1202 self._sparsesignaturecache = {}
1202 # Signature to cached matcher instance.
1203 # Signature to cached matcher instance.
1203 self._sparsematchercache = {}
1204 self._sparsematchercache = {}
1204
1205
1205 self._extrafilterid = repoview.extrafilter(ui)
1206 self._extrafilterid = repoview.extrafilter(ui)
1206
1207
1207 self.filecopiesmode = None
1208 self.filecopiesmode = None
1208 if COPIESSDC_REQUIREMENT in self.requirements:
1209 if COPIESSDC_REQUIREMENT in self.requirements:
1209 self.filecopiesmode = b'changeset-sidedata'
1210 self.filecopiesmode = b'changeset-sidedata'
1210
1211
1211 def _getvfsward(self, origfunc):
1212 def _getvfsward(self, origfunc):
1212 """build a ward for self.vfs"""
1213 """build a ward for self.vfs"""
1213 rref = weakref.ref(self)
1214 rref = weakref.ref(self)
1214
1215
1215 def checkvfs(path, mode=None):
1216 def checkvfs(path, mode=None):
1216 ret = origfunc(path, mode=mode)
1217 ret = origfunc(path, mode=mode)
1217 repo = rref()
1218 repo = rref()
1218 if (
1219 if (
1219 repo is None
1220 repo is None
1220 or not util.safehasattr(repo, b'_wlockref')
1221 or not util.safehasattr(repo, b'_wlockref')
1221 or not util.safehasattr(repo, b'_lockref')
1222 or not util.safehasattr(repo, b'_lockref')
1222 ):
1223 ):
1223 return
1224 return
1224 if mode in (None, b'r', b'rb'):
1225 if mode in (None, b'r', b'rb'):
1225 return
1226 return
1226 if path.startswith(repo.path):
1227 if path.startswith(repo.path):
1227 # truncate name relative to the repository (.hg)
1228 # truncate name relative to the repository (.hg)
1228 path = path[len(repo.path) + 1 :]
1229 path = path[len(repo.path) + 1 :]
1229 if path.startswith(b'cache/'):
1230 if path.startswith(b'cache/'):
1230 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1231 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1231 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1232 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1232 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1233 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1233 # journal is covered by 'lock'
1234 # journal is covered by 'lock'
1234 if repo._currentlock(repo._lockref) is None:
1235 if repo._currentlock(repo._lockref) is None:
1235 repo.ui.develwarn(
1236 repo.ui.develwarn(
1236 b'write with no lock: "%s"' % path,
1237 b'write with no lock: "%s"' % path,
1237 stacklevel=3,
1238 stacklevel=3,
1238 config=b'check-locks',
1239 config=b'check-locks',
1239 )
1240 )
1240 elif repo._currentlock(repo._wlockref) is None:
1241 elif repo._currentlock(repo._wlockref) is None:
1241 # rest of vfs files are covered by 'wlock'
1242 # rest of vfs files are covered by 'wlock'
1242 #
1243 #
1243 # exclude special files
1244 # exclude special files
1244 for prefix in self._wlockfreeprefix:
1245 for prefix in self._wlockfreeprefix:
1245 if path.startswith(prefix):
1246 if path.startswith(prefix):
1246 return
1247 return
1247 repo.ui.develwarn(
1248 repo.ui.develwarn(
1248 b'write with no wlock: "%s"' % path,
1249 b'write with no wlock: "%s"' % path,
1249 stacklevel=3,
1250 stacklevel=3,
1250 config=b'check-locks',
1251 config=b'check-locks',
1251 )
1252 )
1252 return ret
1253 return ret
1253
1254
1254 return checkvfs
1255 return checkvfs
1255
1256
1256 def _getsvfsward(self, origfunc):
1257 def _getsvfsward(self, origfunc):
1257 """build a ward for self.svfs"""
1258 """build a ward for self.svfs"""
1258 rref = weakref.ref(self)
1259 rref = weakref.ref(self)
1259
1260
1260 def checksvfs(path, mode=None):
1261 def checksvfs(path, mode=None):
1261 ret = origfunc(path, mode=mode)
1262 ret = origfunc(path, mode=mode)
1262 repo = rref()
1263 repo = rref()
1263 if repo is None or not util.safehasattr(repo, b'_lockref'):
1264 if repo is None or not util.safehasattr(repo, b'_lockref'):
1264 return
1265 return
1265 if mode in (None, b'r', b'rb'):
1266 if mode in (None, b'r', b'rb'):
1266 return
1267 return
1267 if path.startswith(repo.sharedpath):
1268 if path.startswith(repo.sharedpath):
1268 # truncate name relative to the repository (.hg)
1269 # truncate name relative to the repository (.hg)
1269 path = path[len(repo.sharedpath) + 1 :]
1270 path = path[len(repo.sharedpath) + 1 :]
1270 if repo._currentlock(repo._lockref) is None:
1271 if repo._currentlock(repo._lockref) is None:
1271 repo.ui.develwarn(
1272 repo.ui.develwarn(
1272 b'write with no lock: "%s"' % path, stacklevel=4
1273 b'write with no lock: "%s"' % path, stacklevel=4
1273 )
1274 )
1274 return ret
1275 return ret
1275
1276
1276 return checksvfs
1277 return checksvfs
1277
1278
1278 def close(self):
1279 def close(self):
1279 self._writecaches()
1280 self._writecaches()
1280
1281
1281 def _writecaches(self):
1282 def _writecaches(self):
1282 if self._revbranchcache:
1283 if self._revbranchcache:
1283 self._revbranchcache.write()
1284 self._revbranchcache.write()
1284
1285
1285 def _restrictcapabilities(self, caps):
1286 def _restrictcapabilities(self, caps):
1286 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1287 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1287 caps = set(caps)
1288 caps = set(caps)
1288 capsblob = bundle2.encodecaps(
1289 capsblob = bundle2.encodecaps(
1289 bundle2.getrepocaps(self, role=b'client')
1290 bundle2.getrepocaps(self, role=b'client')
1290 )
1291 )
1291 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1292 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1292 return caps
1293 return caps
1293
1294
1294 def _writerequirements(self):
1295 def _writerequirements(self):
1295 scmutil.writerequires(self.vfs, self.requirements)
1296 scmutil.writerequires(self.vfs, self.requirements)
1296
1297
1297 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1298 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1298 # self -> auditor -> self._checknested -> self
1299 # self -> auditor -> self._checknested -> self
1299
1300
1300 @property
1301 @property
1301 def auditor(self):
1302 def auditor(self):
1302 # This is only used by context.workingctx.match in order to
1303 # This is only used by context.workingctx.match in order to
1303 # detect files in subrepos.
1304 # detect files in subrepos.
1304 return pathutil.pathauditor(self.root, callback=self._checknested)
1305 return pathutil.pathauditor(self.root, callback=self._checknested)
1305
1306
1306 @property
1307 @property
1307 def nofsauditor(self):
1308 def nofsauditor(self):
1308 # This is only used by context.basectx.match in order to detect
1309 # This is only used by context.basectx.match in order to detect
1309 # files in subrepos.
1310 # files in subrepos.
1310 return pathutil.pathauditor(
1311 return pathutil.pathauditor(
1311 self.root, callback=self._checknested, realfs=False, cached=True
1312 self.root, callback=self._checknested, realfs=False, cached=True
1312 )
1313 )
1313
1314
1314 def _checknested(self, path):
1315 def _checknested(self, path):
1315 """Determine if path is a legal nested repository."""
1316 """Determine if path is a legal nested repository."""
1316 if not path.startswith(self.root):
1317 if not path.startswith(self.root):
1317 return False
1318 return False
1318 subpath = path[len(self.root) + 1 :]
1319 subpath = path[len(self.root) + 1 :]
1319 normsubpath = util.pconvert(subpath)
1320 normsubpath = util.pconvert(subpath)
1320
1321
1321 # XXX: Checking against the current working copy is wrong in
1322 # XXX: Checking against the current working copy is wrong in
1322 # the sense that it can reject things like
1323 # the sense that it can reject things like
1323 #
1324 #
1324 # $ hg cat -r 10 sub/x.txt
1325 # $ hg cat -r 10 sub/x.txt
1325 #
1326 #
1326 # if sub/ is no longer a subrepository in the working copy
1327 # if sub/ is no longer a subrepository in the working copy
1327 # parent revision.
1328 # parent revision.
1328 #
1329 #
1329 # However, it can of course also allow things that would have
1330 # However, it can of course also allow things that would have
1330 # been rejected before, such as the above cat command if sub/
1331 # been rejected before, such as the above cat command if sub/
1331 # is a subrepository now, but was a normal directory before.
1332 # is a subrepository now, but was a normal directory before.
1332 # The old path auditor would have rejected by mistake since it
1333 # The old path auditor would have rejected by mistake since it
1333 # panics when it sees sub/.hg/.
1334 # panics when it sees sub/.hg/.
1334 #
1335 #
1335 # All in all, checking against the working copy seems sensible
1336 # All in all, checking against the working copy seems sensible
1336 # since we want to prevent access to nested repositories on
1337 # since we want to prevent access to nested repositories on
1337 # the filesystem *now*.
1338 # the filesystem *now*.
1338 ctx = self[None]
1339 ctx = self[None]
1339 parts = util.splitpath(subpath)
1340 parts = util.splitpath(subpath)
1340 while parts:
1341 while parts:
1341 prefix = b'/'.join(parts)
1342 prefix = b'/'.join(parts)
1342 if prefix in ctx.substate:
1343 if prefix in ctx.substate:
1343 if prefix == normsubpath:
1344 if prefix == normsubpath:
1344 return True
1345 return True
1345 else:
1346 else:
1346 sub = ctx.sub(prefix)
1347 sub = ctx.sub(prefix)
1347 return sub.checknested(subpath[len(prefix) + 1 :])
1348 return sub.checknested(subpath[len(prefix) + 1 :])
1348 else:
1349 else:
1349 parts.pop()
1350 parts.pop()
1350 return False
1351 return False
1351
1352
1352 def peer(self):
1353 def peer(self):
1353 return localpeer(self) # not cached to avoid reference cycle
1354 return localpeer(self) # not cached to avoid reference cycle
1354
1355
1355 def unfiltered(self):
1356 def unfiltered(self):
1356 """Return unfiltered version of the repository
1357 """Return unfiltered version of the repository
1357
1358
1358 Intended to be overwritten by filtered repo."""
1359 Intended to be overwritten by filtered repo."""
1359 return self
1360 return self
1360
1361
1361 def filtered(self, name, visibilityexceptions=None):
1362 def filtered(self, name, visibilityexceptions=None):
1362 """Return a filtered version of a repository
1363 """Return a filtered version of a repository
1363
1364
1364 The `name` parameter is the identifier of the requested view. This
1365 The `name` parameter is the identifier of the requested view. This
1365 will return a repoview object set "exactly" to the specified view.
1366 will return a repoview object set "exactly" to the specified view.
1366
1367
1367 This function does not apply recursive filtering to a repository. For
1368 This function does not apply recursive filtering to a repository. For
1368 example calling `repo.filtered("served")` will return a repoview using
1369 example calling `repo.filtered("served")` will return a repoview using
1369 the "served" view, regardless of the initial view used by `repo`.
1370 the "served" view, regardless of the initial view used by `repo`.
1370
1371
1371 In other word, there is always only one level of `repoview` "filtering".
1372 In other word, there is always only one level of `repoview` "filtering".
1372 """
1373 """
1373 if self._extrafilterid is not None and b'%' not in name:
1374 if self._extrafilterid is not None and b'%' not in name:
1374 name = name + b'%' + self._extrafilterid
1375 name = name + b'%' + self._extrafilterid
1375
1376
1376 cls = repoview.newtype(self.unfiltered().__class__)
1377 cls = repoview.newtype(self.unfiltered().__class__)
1377 return cls(self, name, visibilityexceptions)
1378 return cls(self, name, visibilityexceptions)
1378
1379
1379 @mixedrepostorecache(
1380 @mixedrepostorecache(
1380 (b'bookmarks', b'plain'),
1381 (b'bookmarks', b'plain'),
1381 (b'bookmarks.current', b'plain'),
1382 (b'bookmarks.current', b'plain'),
1382 (b'bookmarks', b''),
1383 (b'bookmarks', b''),
1383 (b'00changelog.i', b''),
1384 (b'00changelog.i', b''),
1384 )
1385 )
1385 def _bookmarks(self):
1386 def _bookmarks(self):
1386 # Since the multiple files involved in the transaction cannot be
1387 # Since the multiple files involved in the transaction cannot be
1387 # written atomically (with current repository format), there is a race
1388 # written atomically (with current repository format), there is a race
1388 # condition here.
1389 # condition here.
1389 #
1390 #
1390 # 1) changelog content A is read
1391 # 1) changelog content A is read
1391 # 2) outside transaction update changelog to content B
1392 # 2) outside transaction update changelog to content B
1392 # 3) outside transaction update bookmark file referring to content B
1393 # 3) outside transaction update bookmark file referring to content B
1393 # 4) bookmarks file content is read and filtered against changelog-A
1394 # 4) bookmarks file content is read and filtered against changelog-A
1394 #
1395 #
1395 # When this happens, bookmarks against nodes missing from A are dropped.
1396 # When this happens, bookmarks against nodes missing from A are dropped.
1396 #
1397 #
1397 # Having this happening during read is not great, but it become worse
1398 # Having this happening during read is not great, but it become worse
1398 # when this happen during write because the bookmarks to the "unknown"
1399 # when this happen during write because the bookmarks to the "unknown"
1399 # nodes will be dropped for good. However, writes happen within locks.
1400 # nodes will be dropped for good. However, writes happen within locks.
1400 # This locking makes it possible to have a race free consistent read.
1401 # This locking makes it possible to have a race free consistent read.
1401 # For this purpose data read from disc before locking are
1402 # For this purpose data read from disc before locking are
1402 # "invalidated" right after the locks are taken. This invalidations are
1403 # "invalidated" right after the locks are taken. This invalidations are
1403 # "light", the `filecache` mechanism keep the data in memory and will
1404 # "light", the `filecache` mechanism keep the data in memory and will
1404 # reuse them if the underlying files did not changed. Not parsing the
1405 # reuse them if the underlying files did not changed. Not parsing the
1405 # same data multiple times helps performances.
1406 # same data multiple times helps performances.
1406 #
1407 #
1407 # Unfortunately in the case describe above, the files tracked by the
1408 # Unfortunately in the case describe above, the files tracked by the
1408 # bookmarks file cache might not have changed, but the in-memory
1409 # bookmarks file cache might not have changed, but the in-memory
1409 # content is still "wrong" because we used an older changelog content
1410 # content is still "wrong" because we used an older changelog content
1410 # to process the on-disk data. So after locking, the changelog would be
1411 # to process the on-disk data. So after locking, the changelog would be
1411 # refreshed but `_bookmarks` would be preserved.
1412 # refreshed but `_bookmarks` would be preserved.
1412 # Adding `00changelog.i` to the list of tracked file is not
1413 # Adding `00changelog.i` to the list of tracked file is not
1413 # enough, because at the time we build the content for `_bookmarks` in
1414 # enough, because at the time we build the content for `_bookmarks` in
1414 # (4), the changelog file has already diverged from the content used
1415 # (4), the changelog file has already diverged from the content used
1415 # for loading `changelog` in (1)
1416 # for loading `changelog` in (1)
1416 #
1417 #
1417 # To prevent the issue, we force the changelog to be explicitly
1418 # To prevent the issue, we force the changelog to be explicitly
1418 # reloaded while computing `_bookmarks`. The data race can still happen
1419 # reloaded while computing `_bookmarks`. The data race can still happen
1419 # without the lock (with a narrower window), but it would no longer go
1420 # without the lock (with a narrower window), but it would no longer go
1420 # undetected during the lock time refresh.
1421 # undetected during the lock time refresh.
1421 #
1422 #
1422 # The new schedule is as follow
1423 # The new schedule is as follow
1423 #
1424 #
1424 # 1) filecache logic detect that `_bookmarks` needs to be computed
1425 # 1) filecache logic detect that `_bookmarks` needs to be computed
1425 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1426 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1426 # 3) We force `changelog` filecache to be tested
1427 # 3) We force `changelog` filecache to be tested
1427 # 4) cachestat for `changelog` are captured (for changelog)
1428 # 4) cachestat for `changelog` are captured (for changelog)
1428 # 5) `_bookmarks` is computed and cached
1429 # 5) `_bookmarks` is computed and cached
1429 #
1430 #
1430 # The step in (3) ensure we have a changelog at least as recent as the
1431 # The step in (3) ensure we have a changelog at least as recent as the
1431 # cache stat computed in (1). As a result at locking time:
1432 # cache stat computed in (1). As a result at locking time:
1432 # * if the changelog did not changed since (1) -> we can reuse the data
1433 # * if the changelog did not changed since (1) -> we can reuse the data
1433 # * otherwise -> the bookmarks get refreshed.
1434 # * otherwise -> the bookmarks get refreshed.
1434 self._refreshchangelog()
1435 self._refreshchangelog()
1435 return bookmarks.bmstore(self)
1436 return bookmarks.bmstore(self)
1436
1437
1437 def _refreshchangelog(self):
1438 def _refreshchangelog(self):
1438 """make sure the in memory changelog match the on-disk one"""
1439 """make sure the in memory changelog match the on-disk one"""
1439 if 'changelog' in vars(self) and self.currenttransaction() is None:
1440 if 'changelog' in vars(self) and self.currenttransaction() is None:
1440 del self.changelog
1441 del self.changelog
1441
1442
1442 @property
1443 @property
1443 def _activebookmark(self):
1444 def _activebookmark(self):
1444 return self._bookmarks.active
1445 return self._bookmarks.active
1445
1446
1446 # _phasesets depend on changelog. what we need is to call
1447 # _phasesets depend on changelog. what we need is to call
1447 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1448 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1448 # can't be easily expressed in filecache mechanism.
1449 # can't be easily expressed in filecache mechanism.
1449 @storecache(b'phaseroots', b'00changelog.i')
1450 @storecache(b'phaseroots', b'00changelog.i')
1450 def _phasecache(self):
1451 def _phasecache(self):
1451 return phases.phasecache(self, self._phasedefaults)
1452 return phases.phasecache(self, self._phasedefaults)
1452
1453
1453 @storecache(b'obsstore')
1454 @storecache(b'obsstore')
1454 def obsstore(self):
1455 def obsstore(self):
1455 return obsolete.makestore(self.ui, self)
1456 return obsolete.makestore(self.ui, self)
1456
1457
1457 @storecache(b'00changelog.i')
1458 @storecache(b'00changelog.i')
1458 def changelog(self):
1459 def changelog(self):
1459 # load dirstate before changelog to avoid race see issue6303
1460 # load dirstate before changelog to avoid race see issue6303
1460 self.dirstate.prefetch_parents()
1461 self.dirstate.prefetch_parents()
1461 return self.store.changelog(txnutil.mayhavepending(self.root))
1462 return self.store.changelog(txnutil.mayhavepending(self.root))
1462
1463
1463 @storecache(b'00manifest.i')
1464 @storecache(b'00manifest.i')
1464 def manifestlog(self):
1465 def manifestlog(self):
1465 return self.store.manifestlog(self, self._storenarrowmatch)
1466 return self.store.manifestlog(self, self._storenarrowmatch)
1466
1467
1467 @repofilecache(b'dirstate')
1468 @repofilecache(b'dirstate')
1468 def dirstate(self):
1469 def dirstate(self):
1469 return self._makedirstate()
1470 return self._makedirstate()
1470
1471
1471 def _makedirstate(self):
1472 def _makedirstate(self):
1472 """Extension point for wrapping the dirstate per-repo."""
1473 """Extension point for wrapping the dirstate per-repo."""
1473 sparsematchfn = lambda: sparse.matcher(self)
1474 sparsematchfn = lambda: sparse.matcher(self)
1474
1475
1475 return dirstate.dirstate(
1476 return dirstate.dirstate(
1476 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1477 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1477 )
1478 )
1478
1479
1479 def _dirstatevalidate(self, node):
1480 def _dirstatevalidate(self, node):
1480 try:
1481 try:
1481 self.changelog.rev(node)
1482 self.changelog.rev(node)
1482 return node
1483 return node
1483 except error.LookupError:
1484 except error.LookupError:
1484 if not self._dirstatevalidatewarned:
1485 if not self._dirstatevalidatewarned:
1485 self._dirstatevalidatewarned = True
1486 self._dirstatevalidatewarned = True
1486 self.ui.warn(
1487 self.ui.warn(
1487 _(b"warning: ignoring unknown working parent %s!\n")
1488 _(b"warning: ignoring unknown working parent %s!\n")
1488 % short(node)
1489 % short(node)
1489 )
1490 )
1490 return nullid
1491 return nullid
1491
1492
1492 @storecache(narrowspec.FILENAME)
1493 @storecache(narrowspec.FILENAME)
1493 def narrowpats(self):
1494 def narrowpats(self):
1494 """matcher patterns for this repository's narrowspec
1495 """matcher patterns for this repository's narrowspec
1495
1496
1496 A tuple of (includes, excludes).
1497 A tuple of (includes, excludes).
1497 """
1498 """
1498 return narrowspec.load(self)
1499 return narrowspec.load(self)
1499
1500
1500 @storecache(narrowspec.FILENAME)
1501 @storecache(narrowspec.FILENAME)
1501 def _storenarrowmatch(self):
1502 def _storenarrowmatch(self):
1502 if repository.NARROW_REQUIREMENT not in self.requirements:
1503 if repository.NARROW_REQUIREMENT not in self.requirements:
1503 return matchmod.always()
1504 return matchmod.always()
1504 include, exclude = self.narrowpats
1505 include, exclude = self.narrowpats
1505 return narrowspec.match(self.root, include=include, exclude=exclude)
1506 return narrowspec.match(self.root, include=include, exclude=exclude)
1506
1507
1507 @storecache(narrowspec.FILENAME)
1508 @storecache(narrowspec.FILENAME)
1508 def _narrowmatch(self):
1509 def _narrowmatch(self):
1509 if repository.NARROW_REQUIREMENT not in self.requirements:
1510 if repository.NARROW_REQUIREMENT not in self.requirements:
1510 return matchmod.always()
1511 return matchmod.always()
1511 narrowspec.checkworkingcopynarrowspec(self)
1512 narrowspec.checkworkingcopynarrowspec(self)
1512 include, exclude = self.narrowpats
1513 include, exclude = self.narrowpats
1513 return narrowspec.match(self.root, include=include, exclude=exclude)
1514 return narrowspec.match(self.root, include=include, exclude=exclude)
1514
1515
1515 def narrowmatch(self, match=None, includeexact=False):
1516 def narrowmatch(self, match=None, includeexact=False):
1516 """matcher corresponding the the repo's narrowspec
1517 """matcher corresponding the the repo's narrowspec
1517
1518
1518 If `match` is given, then that will be intersected with the narrow
1519 If `match` is given, then that will be intersected with the narrow
1519 matcher.
1520 matcher.
1520
1521
1521 If `includeexact` is True, then any exact matches from `match` will
1522 If `includeexact` is True, then any exact matches from `match` will
1522 be included even if they're outside the narrowspec.
1523 be included even if they're outside the narrowspec.
1523 """
1524 """
1524 if match:
1525 if match:
1525 if includeexact and not self._narrowmatch.always():
1526 if includeexact and not self._narrowmatch.always():
1526 # do not exclude explicitly-specified paths so that they can
1527 # do not exclude explicitly-specified paths so that they can
1527 # be warned later on
1528 # be warned later on
1528 em = matchmod.exact(match.files())
1529 em = matchmod.exact(match.files())
1529 nm = matchmod.unionmatcher([self._narrowmatch, em])
1530 nm = matchmod.unionmatcher([self._narrowmatch, em])
1530 return matchmod.intersectmatchers(match, nm)
1531 return matchmod.intersectmatchers(match, nm)
1531 return matchmod.intersectmatchers(match, self._narrowmatch)
1532 return matchmod.intersectmatchers(match, self._narrowmatch)
1532 return self._narrowmatch
1533 return self._narrowmatch
1533
1534
1534 def setnarrowpats(self, newincludes, newexcludes):
1535 def setnarrowpats(self, newincludes, newexcludes):
1535 narrowspec.save(self, newincludes, newexcludes)
1536 narrowspec.save(self, newincludes, newexcludes)
1536 self.invalidate(clearfilecache=True)
1537 self.invalidate(clearfilecache=True)
1537
1538
1538 @unfilteredpropertycache
1539 @unfilteredpropertycache
1539 def _quick_access_changeid_null(self):
1540 def _quick_access_changeid_null(self):
1540 return {
1541 return {
1541 b'null': (nullrev, nullid),
1542 b'null': (nullrev, nullid),
1542 nullrev: (nullrev, nullid),
1543 nullrev: (nullrev, nullid),
1543 nullid: (nullrev, nullid),
1544 nullid: (nullrev, nullid),
1544 }
1545 }
1545
1546
1546 @unfilteredpropertycache
1547 @unfilteredpropertycache
1547 def _quick_access_changeid_wc(self):
1548 def _quick_access_changeid_wc(self):
1548 # also fast path access to the working copy parents
1549 # also fast path access to the working copy parents
1549 # however, only do it for filter that ensure wc is visible.
1550 # however, only do it for filter that ensure wc is visible.
1550 quick = {}
1551 quick = {}
1551 cl = self.unfiltered().changelog
1552 cl = self.unfiltered().changelog
1552 for node in self.dirstate.parents():
1553 for node in self.dirstate.parents():
1553 if node == nullid:
1554 if node == nullid:
1554 continue
1555 continue
1555 rev = cl.index.get_rev(node)
1556 rev = cl.index.get_rev(node)
1556 if rev is None:
1557 if rev is None:
1557 # unknown working copy parent case:
1558 # unknown working copy parent case:
1558 #
1559 #
1559 # skip the fast path and let higher code deal with it
1560 # skip the fast path and let higher code deal with it
1560 continue
1561 continue
1561 pair = (rev, node)
1562 pair = (rev, node)
1562 quick[rev] = pair
1563 quick[rev] = pair
1563 quick[node] = pair
1564 quick[node] = pair
1564 # also add the parents of the parents
1565 # also add the parents of the parents
1565 for r in cl.parentrevs(rev):
1566 for r in cl.parentrevs(rev):
1566 if r == nullrev:
1567 if r == nullrev:
1567 continue
1568 continue
1568 n = cl.node(r)
1569 n = cl.node(r)
1569 pair = (r, n)
1570 pair = (r, n)
1570 quick[r] = pair
1571 quick[r] = pair
1571 quick[n] = pair
1572 quick[n] = pair
1572 p1node = self.dirstate.p1()
1573 p1node = self.dirstate.p1()
1573 if p1node != nullid:
1574 if p1node != nullid:
1574 quick[b'.'] = quick[p1node]
1575 quick[b'.'] = quick[p1node]
1575 return quick
1576 return quick
1576
1577
1577 @unfilteredmethod
1578 @unfilteredmethod
1578 def _quick_access_changeid_invalidate(self):
1579 def _quick_access_changeid_invalidate(self):
1579 if '_quick_access_changeid_wc' in vars(self):
1580 if '_quick_access_changeid_wc' in vars(self):
1580 del self.__dict__['_quick_access_changeid_wc']
1581 del self.__dict__['_quick_access_changeid_wc']
1581
1582
1582 @property
1583 @property
1583 def _quick_access_changeid(self):
1584 def _quick_access_changeid(self):
1584 """an helper dictionnary for __getitem__ calls
1585 """an helper dictionnary for __getitem__ calls
1585
1586
1586 This contains a list of symbol we can recognise right away without
1587 This contains a list of symbol we can recognise right away without
1587 further processing.
1588 further processing.
1588 """
1589 """
1589 mapping = self._quick_access_changeid_null
1590 mapping = self._quick_access_changeid_null
1590 if self.filtername in repoview.filter_has_wc:
1591 if self.filtername in repoview.filter_has_wc:
1591 mapping = mapping.copy()
1592 mapping = mapping.copy()
1592 mapping.update(self._quick_access_changeid_wc)
1593 mapping.update(self._quick_access_changeid_wc)
1593 return mapping
1594 return mapping
1594
1595
1595 def __getitem__(self, changeid):
1596 def __getitem__(self, changeid):
1596 # dealing with special cases
1597 # dealing with special cases
1597 if changeid is None:
1598 if changeid is None:
1598 return context.workingctx(self)
1599 return context.workingctx(self)
1599 if isinstance(changeid, context.basectx):
1600 if isinstance(changeid, context.basectx):
1600 return changeid
1601 return changeid
1601
1602
1602 # dealing with multiple revisions
1603 # dealing with multiple revisions
1603 if isinstance(changeid, slice):
1604 if isinstance(changeid, slice):
1604 # wdirrev isn't contiguous so the slice shouldn't include it
1605 # wdirrev isn't contiguous so the slice shouldn't include it
1605 return [
1606 return [
1606 self[i]
1607 self[i]
1607 for i in pycompat.xrange(*changeid.indices(len(self)))
1608 for i in pycompat.xrange(*changeid.indices(len(self)))
1608 if i not in self.changelog.filteredrevs
1609 if i not in self.changelog.filteredrevs
1609 ]
1610 ]
1610
1611
1611 # dealing with some special values
1612 # dealing with some special values
1612 quick_access = self._quick_access_changeid.get(changeid)
1613 quick_access = self._quick_access_changeid.get(changeid)
1613 if quick_access is not None:
1614 if quick_access is not None:
1614 rev, node = quick_access
1615 rev, node = quick_access
1615 return context.changectx(self, rev, node, maybe_filtered=False)
1616 return context.changectx(self, rev, node, maybe_filtered=False)
1616 if changeid == b'tip':
1617 if changeid == b'tip':
1617 node = self.changelog.tip()
1618 node = self.changelog.tip()
1618 rev = self.changelog.rev(node)
1619 rev = self.changelog.rev(node)
1619 return context.changectx(self, rev, node)
1620 return context.changectx(self, rev, node)
1620
1621
1621 # dealing with arbitrary values
1622 # dealing with arbitrary values
1622 try:
1623 try:
1623 if isinstance(changeid, int):
1624 if isinstance(changeid, int):
1624 node = self.changelog.node(changeid)
1625 node = self.changelog.node(changeid)
1625 rev = changeid
1626 rev = changeid
1626 elif changeid == b'.':
1627 elif changeid == b'.':
1627 # this is a hack to delay/avoid loading obsmarkers
1628 # this is a hack to delay/avoid loading obsmarkers
1628 # when we know that '.' won't be hidden
1629 # when we know that '.' won't be hidden
1629 node = self.dirstate.p1()
1630 node = self.dirstate.p1()
1630 rev = self.unfiltered().changelog.rev(node)
1631 rev = self.unfiltered().changelog.rev(node)
1631 elif len(changeid) == 20:
1632 elif len(changeid) == 20:
1632 try:
1633 try:
1633 node = changeid
1634 node = changeid
1634 rev = self.changelog.rev(changeid)
1635 rev = self.changelog.rev(changeid)
1635 except error.FilteredLookupError:
1636 except error.FilteredLookupError:
1636 changeid = hex(changeid) # for the error message
1637 changeid = hex(changeid) # for the error message
1637 raise
1638 raise
1638 except LookupError:
1639 except LookupError:
1639 # check if it might have come from damaged dirstate
1640 # check if it might have come from damaged dirstate
1640 #
1641 #
1641 # XXX we could avoid the unfiltered if we had a recognizable
1642 # XXX we could avoid the unfiltered if we had a recognizable
1642 # exception for filtered changeset access
1643 # exception for filtered changeset access
1643 if (
1644 if (
1644 self.local()
1645 self.local()
1645 and changeid in self.unfiltered().dirstate.parents()
1646 and changeid in self.unfiltered().dirstate.parents()
1646 ):
1647 ):
1647 msg = _(b"working directory has unknown parent '%s'!")
1648 msg = _(b"working directory has unknown parent '%s'!")
1648 raise error.Abort(msg % short(changeid))
1649 raise error.Abort(msg % short(changeid))
1649 changeid = hex(changeid) # for the error message
1650 changeid = hex(changeid) # for the error message
1650 raise
1651 raise
1651
1652
1652 elif len(changeid) == 40:
1653 elif len(changeid) == 40:
1653 node = bin(changeid)
1654 node = bin(changeid)
1654 rev = self.changelog.rev(node)
1655 rev = self.changelog.rev(node)
1655 else:
1656 else:
1656 raise error.ProgrammingError(
1657 raise error.ProgrammingError(
1657 b"unsupported changeid '%s' of type %s"
1658 b"unsupported changeid '%s' of type %s"
1658 % (changeid, pycompat.bytestr(type(changeid)))
1659 % (changeid, pycompat.bytestr(type(changeid)))
1659 )
1660 )
1660
1661
1661 return context.changectx(self, rev, node)
1662 return context.changectx(self, rev, node)
1662
1663
1663 except (error.FilteredIndexError, error.FilteredLookupError):
1664 except (error.FilteredIndexError, error.FilteredLookupError):
1664 raise error.FilteredRepoLookupError(
1665 raise error.FilteredRepoLookupError(
1665 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1666 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1666 )
1667 )
1667 except (IndexError, LookupError):
1668 except (IndexError, LookupError):
1668 raise error.RepoLookupError(
1669 raise error.RepoLookupError(
1669 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1670 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1670 )
1671 )
1671 except error.WdirUnsupported:
1672 except error.WdirUnsupported:
1672 return context.workingctx(self)
1673 return context.workingctx(self)
1673
1674
1674 def __contains__(self, changeid):
1675 def __contains__(self, changeid):
1675 """True if the given changeid exists
1676 """True if the given changeid exists
1676
1677
1677 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1678 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1678 specified.
1679 specified.
1679 """
1680 """
1680 try:
1681 try:
1681 self[changeid]
1682 self[changeid]
1682 return True
1683 return True
1683 except error.RepoLookupError:
1684 except error.RepoLookupError:
1684 return False
1685 return False
1685
1686
1686 def __nonzero__(self):
1687 def __nonzero__(self):
1687 return True
1688 return True
1688
1689
1689 __bool__ = __nonzero__
1690 __bool__ = __nonzero__
1690
1691
1691 def __len__(self):
1692 def __len__(self):
1692 # no need to pay the cost of repoview.changelog
1693 # no need to pay the cost of repoview.changelog
1693 unfi = self.unfiltered()
1694 unfi = self.unfiltered()
1694 return len(unfi.changelog)
1695 return len(unfi.changelog)
1695
1696
1696 def __iter__(self):
1697 def __iter__(self):
1697 return iter(self.changelog)
1698 return iter(self.changelog)
1698
1699
1699 def revs(self, expr, *args):
1700 def revs(self, expr, *args):
1700 '''Find revisions matching a revset.
1701 '''Find revisions matching a revset.
1701
1702
1702 The revset is specified as a string ``expr`` that may contain
1703 The revset is specified as a string ``expr`` that may contain
1703 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1704 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1704
1705
1705 Revset aliases from the configuration are not expanded. To expand
1706 Revset aliases from the configuration are not expanded. To expand
1706 user aliases, consider calling ``scmutil.revrange()`` or
1707 user aliases, consider calling ``scmutil.revrange()`` or
1707 ``repo.anyrevs([expr], user=True)``.
1708 ``repo.anyrevs([expr], user=True)``.
1708
1709
1709 Returns a smartset.abstractsmartset, which is a list-like interface
1710 Returns a smartset.abstractsmartset, which is a list-like interface
1710 that contains integer revisions.
1711 that contains integer revisions.
1711 '''
1712 '''
1712 tree = revsetlang.spectree(expr, *args)
1713 tree = revsetlang.spectree(expr, *args)
1713 return revset.makematcher(tree)(self)
1714 return revset.makematcher(tree)(self)
1714
1715
1715 def set(self, expr, *args):
1716 def set(self, expr, *args):
1716 '''Find revisions matching a revset and emit changectx instances.
1717 '''Find revisions matching a revset and emit changectx instances.
1717
1718
1718 This is a convenience wrapper around ``revs()`` that iterates the
1719 This is a convenience wrapper around ``revs()`` that iterates the
1719 result and is a generator of changectx instances.
1720 result and is a generator of changectx instances.
1720
1721
1721 Revset aliases from the configuration are not expanded. To expand
1722 Revset aliases from the configuration are not expanded. To expand
1722 user aliases, consider calling ``scmutil.revrange()``.
1723 user aliases, consider calling ``scmutil.revrange()``.
1723 '''
1724 '''
1724 for r in self.revs(expr, *args):
1725 for r in self.revs(expr, *args):
1725 yield self[r]
1726 yield self[r]
1726
1727
1727 def anyrevs(self, specs, user=False, localalias=None):
1728 def anyrevs(self, specs, user=False, localalias=None):
1728 '''Find revisions matching one of the given revsets.
1729 '''Find revisions matching one of the given revsets.
1729
1730
1730 Revset aliases from the configuration are not expanded by default. To
1731 Revset aliases from the configuration are not expanded by default. To
1731 expand user aliases, specify ``user=True``. To provide some local
1732 expand user aliases, specify ``user=True``. To provide some local
1732 definitions overriding user aliases, set ``localalias`` to
1733 definitions overriding user aliases, set ``localalias`` to
1733 ``{name: definitionstring}``.
1734 ``{name: definitionstring}``.
1734 '''
1735 '''
1735 if specs == [b'null']:
1736 if specs == [b'null']:
1736 return revset.baseset([nullrev])
1737 return revset.baseset([nullrev])
1737 if specs == [b'.']:
1738 if specs == [b'.']:
1738 quick_data = self._quick_access_changeid.get(b'.')
1739 quick_data = self._quick_access_changeid.get(b'.')
1739 if quick_data is not None:
1740 if quick_data is not None:
1740 return revset.baseset([quick_data[0]])
1741 return revset.baseset([quick_data[0]])
1741 if user:
1742 if user:
1742 m = revset.matchany(
1743 m = revset.matchany(
1743 self.ui,
1744 self.ui,
1744 specs,
1745 specs,
1745 lookup=revset.lookupfn(self),
1746 lookup=revset.lookupfn(self),
1746 localalias=localalias,
1747 localalias=localalias,
1747 )
1748 )
1748 else:
1749 else:
1749 m = revset.matchany(None, specs, localalias=localalias)
1750 m = revset.matchany(None, specs, localalias=localalias)
1750 return m(self)
1751 return m(self)
1751
1752
1752 def url(self):
1753 def url(self):
1753 return b'file:' + self.root
1754 return b'file:' + self.root
1754
1755
1755 def hook(self, name, throw=False, **args):
1756 def hook(self, name, throw=False, **args):
1756 """Call a hook, passing this repo instance.
1757 """Call a hook, passing this repo instance.
1757
1758
1758 This a convenience method to aid invoking hooks. Extensions likely
1759 This a convenience method to aid invoking hooks. Extensions likely
1759 won't call this unless they have registered a custom hook or are
1760 won't call this unless they have registered a custom hook or are
1760 replacing code that is expected to call a hook.
1761 replacing code that is expected to call a hook.
1761 """
1762 """
1762 return hook.hook(self.ui, self, name, throw, **args)
1763 return hook.hook(self.ui, self, name, throw, **args)
1763
1764
1764 @filteredpropertycache
1765 @filteredpropertycache
1765 def _tagscache(self):
1766 def _tagscache(self):
1766 '''Returns a tagscache object that contains various tags related
1767 '''Returns a tagscache object that contains various tags related
1767 caches.'''
1768 caches.'''
1768
1769
1769 # This simplifies its cache management by having one decorated
1770 # This simplifies its cache management by having one decorated
1770 # function (this one) and the rest simply fetch things from it.
1771 # function (this one) and the rest simply fetch things from it.
1771 class tagscache(object):
1772 class tagscache(object):
1772 def __init__(self):
1773 def __init__(self):
1773 # These two define the set of tags for this repository. tags
1774 # These two define the set of tags for this repository. tags
1774 # maps tag name to node; tagtypes maps tag name to 'global' or
1775 # maps tag name to node; tagtypes maps tag name to 'global' or
1775 # 'local'. (Global tags are defined by .hgtags across all
1776 # 'local'. (Global tags are defined by .hgtags across all
1776 # heads, and local tags are defined in .hg/localtags.)
1777 # heads, and local tags are defined in .hg/localtags.)
1777 # They constitute the in-memory cache of tags.
1778 # They constitute the in-memory cache of tags.
1778 self.tags = self.tagtypes = None
1779 self.tags = self.tagtypes = None
1779
1780
1780 self.nodetagscache = self.tagslist = None
1781 self.nodetagscache = self.tagslist = None
1781
1782
1782 cache = tagscache()
1783 cache = tagscache()
1783 cache.tags, cache.tagtypes = self._findtags()
1784 cache.tags, cache.tagtypes = self._findtags()
1784
1785
1785 return cache
1786 return cache
1786
1787
1787 def tags(self):
1788 def tags(self):
1788 '''return a mapping of tag to node'''
1789 '''return a mapping of tag to node'''
1789 t = {}
1790 t = {}
1790 if self.changelog.filteredrevs:
1791 if self.changelog.filteredrevs:
1791 tags, tt = self._findtags()
1792 tags, tt = self._findtags()
1792 else:
1793 else:
1793 tags = self._tagscache.tags
1794 tags = self._tagscache.tags
1794 rev = self.changelog.rev
1795 rev = self.changelog.rev
1795 for k, v in pycompat.iteritems(tags):
1796 for k, v in pycompat.iteritems(tags):
1796 try:
1797 try:
1797 # ignore tags to unknown nodes
1798 # ignore tags to unknown nodes
1798 rev(v)
1799 rev(v)
1799 t[k] = v
1800 t[k] = v
1800 except (error.LookupError, ValueError):
1801 except (error.LookupError, ValueError):
1801 pass
1802 pass
1802 return t
1803 return t
1803
1804
1804 def _findtags(self):
1805 def _findtags(self):
1805 '''Do the hard work of finding tags. Return a pair of dicts
1806 '''Do the hard work of finding tags. Return a pair of dicts
1806 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1807 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1807 maps tag name to a string like \'global\' or \'local\'.
1808 maps tag name to a string like \'global\' or \'local\'.
1808 Subclasses or extensions are free to add their own tags, but
1809 Subclasses or extensions are free to add their own tags, but
1809 should be aware that the returned dicts will be retained for the
1810 should be aware that the returned dicts will be retained for the
1810 duration of the localrepo object.'''
1811 duration of the localrepo object.'''
1811
1812
1812 # XXX what tagtype should subclasses/extensions use? Currently
1813 # XXX what tagtype should subclasses/extensions use? Currently
1813 # mq and bookmarks add tags, but do not set the tagtype at all.
1814 # mq and bookmarks add tags, but do not set the tagtype at all.
1814 # Should each extension invent its own tag type? Should there
1815 # Should each extension invent its own tag type? Should there
1815 # be one tagtype for all such "virtual" tags? Or is the status
1816 # be one tagtype for all such "virtual" tags? Or is the status
1816 # quo fine?
1817 # quo fine?
1817
1818
1818 # map tag name to (node, hist)
1819 # map tag name to (node, hist)
1819 alltags = tagsmod.findglobaltags(self.ui, self)
1820 alltags = tagsmod.findglobaltags(self.ui, self)
1820 # map tag name to tag type
1821 # map tag name to tag type
1821 tagtypes = {tag: b'global' for tag in alltags}
1822 tagtypes = {tag: b'global' for tag in alltags}
1822
1823
1823 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1824 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1824
1825
1825 # Build the return dicts. Have to re-encode tag names because
1826 # Build the return dicts. Have to re-encode tag names because
1826 # the tags module always uses UTF-8 (in order not to lose info
1827 # the tags module always uses UTF-8 (in order not to lose info
1827 # writing to the cache), but the rest of Mercurial wants them in
1828 # writing to the cache), but the rest of Mercurial wants them in
1828 # local encoding.
1829 # local encoding.
1829 tags = {}
1830 tags = {}
1830 for (name, (node, hist)) in pycompat.iteritems(alltags):
1831 for (name, (node, hist)) in pycompat.iteritems(alltags):
1831 if node != nullid:
1832 if node != nullid:
1832 tags[encoding.tolocal(name)] = node
1833 tags[encoding.tolocal(name)] = node
1833 tags[b'tip'] = self.changelog.tip()
1834 tags[b'tip'] = self.changelog.tip()
1834 tagtypes = {
1835 tagtypes = {
1835 encoding.tolocal(name): value
1836 encoding.tolocal(name): value
1836 for (name, value) in pycompat.iteritems(tagtypes)
1837 for (name, value) in pycompat.iteritems(tagtypes)
1837 }
1838 }
1838 return (tags, tagtypes)
1839 return (tags, tagtypes)
1839
1840
1840 def tagtype(self, tagname):
1841 def tagtype(self, tagname):
1841 '''
1842 '''
1842 return the type of the given tag. result can be:
1843 return the type of the given tag. result can be:
1843
1844
1844 'local' : a local tag
1845 'local' : a local tag
1845 'global' : a global tag
1846 'global' : a global tag
1846 None : tag does not exist
1847 None : tag does not exist
1847 '''
1848 '''
1848
1849
1849 return self._tagscache.tagtypes.get(tagname)
1850 return self._tagscache.tagtypes.get(tagname)
1850
1851
1851 def tagslist(self):
1852 def tagslist(self):
1852 '''return a list of tags ordered by revision'''
1853 '''return a list of tags ordered by revision'''
1853 if not self._tagscache.tagslist:
1854 if not self._tagscache.tagslist:
1854 l = []
1855 l = []
1855 for t, n in pycompat.iteritems(self.tags()):
1856 for t, n in pycompat.iteritems(self.tags()):
1856 l.append((self.changelog.rev(n), t, n))
1857 l.append((self.changelog.rev(n), t, n))
1857 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1858 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1858
1859
1859 return self._tagscache.tagslist
1860 return self._tagscache.tagslist
1860
1861
1861 def nodetags(self, node):
1862 def nodetags(self, node):
1862 '''return the tags associated with a node'''
1863 '''return the tags associated with a node'''
1863 if not self._tagscache.nodetagscache:
1864 if not self._tagscache.nodetagscache:
1864 nodetagscache = {}
1865 nodetagscache = {}
1865 for t, n in pycompat.iteritems(self._tagscache.tags):
1866 for t, n in pycompat.iteritems(self._tagscache.tags):
1866 nodetagscache.setdefault(n, []).append(t)
1867 nodetagscache.setdefault(n, []).append(t)
1867 for tags in pycompat.itervalues(nodetagscache):
1868 for tags in pycompat.itervalues(nodetagscache):
1868 tags.sort()
1869 tags.sort()
1869 self._tagscache.nodetagscache = nodetagscache
1870 self._tagscache.nodetagscache = nodetagscache
1870 return self._tagscache.nodetagscache.get(node, [])
1871 return self._tagscache.nodetagscache.get(node, [])
1871
1872
1872 def nodebookmarks(self, node):
1873 def nodebookmarks(self, node):
1873 """return the list of bookmarks pointing to the specified node"""
1874 """return the list of bookmarks pointing to the specified node"""
1874 return self._bookmarks.names(node)
1875 return self._bookmarks.names(node)
1875
1876
1876 def branchmap(self):
1877 def branchmap(self):
1877 '''returns a dictionary {branch: [branchheads]} with branchheads
1878 '''returns a dictionary {branch: [branchheads]} with branchheads
1878 ordered by increasing revision number'''
1879 ordered by increasing revision number'''
1879 return self._branchcaches[self]
1880 return self._branchcaches[self]
1880
1881
1881 @unfilteredmethod
1882 @unfilteredmethod
1882 def revbranchcache(self):
1883 def revbranchcache(self):
1883 if not self._revbranchcache:
1884 if not self._revbranchcache:
1884 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1885 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1885 return self._revbranchcache
1886 return self._revbranchcache
1886
1887
1887 def branchtip(self, branch, ignoremissing=False):
1888 def branchtip(self, branch, ignoremissing=False):
1888 '''return the tip node for a given branch
1889 '''return the tip node for a given branch
1889
1890
1890 If ignoremissing is True, then this method will not raise an error.
1891 If ignoremissing is True, then this method will not raise an error.
1891 This is helpful for callers that only expect None for a missing branch
1892 This is helpful for callers that only expect None for a missing branch
1892 (e.g. namespace).
1893 (e.g. namespace).
1893
1894
1894 '''
1895 '''
1895 try:
1896 try:
1896 return self.branchmap().branchtip(branch)
1897 return self.branchmap().branchtip(branch)
1897 except KeyError:
1898 except KeyError:
1898 if not ignoremissing:
1899 if not ignoremissing:
1899 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1900 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1900 else:
1901 else:
1901 pass
1902 pass
1902
1903
1903 def lookup(self, key):
1904 def lookup(self, key):
1904 node = scmutil.revsymbol(self, key).node()
1905 node = scmutil.revsymbol(self, key).node()
1905 if node is None:
1906 if node is None:
1906 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1907 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1907 return node
1908 return node
1908
1909
1909 def lookupbranch(self, key):
1910 def lookupbranch(self, key):
1910 if self.branchmap().hasbranch(key):
1911 if self.branchmap().hasbranch(key):
1911 return key
1912 return key
1912
1913
1913 return scmutil.revsymbol(self, key).branch()
1914 return scmutil.revsymbol(self, key).branch()
1914
1915
1915 def known(self, nodes):
1916 def known(self, nodes):
1916 cl = self.changelog
1917 cl = self.changelog
1917 get_rev = cl.index.get_rev
1918 get_rev = cl.index.get_rev
1918 filtered = cl.filteredrevs
1919 filtered = cl.filteredrevs
1919 result = []
1920 result = []
1920 for n in nodes:
1921 for n in nodes:
1921 r = get_rev(n)
1922 r = get_rev(n)
1922 resp = not (r is None or r in filtered)
1923 resp = not (r is None or r in filtered)
1923 result.append(resp)
1924 result.append(resp)
1924 return result
1925 return result
1925
1926
1926 def local(self):
1927 def local(self):
1927 return self
1928 return self
1928
1929
1929 def publishing(self):
1930 def publishing(self):
1930 # it's safe (and desirable) to trust the publish flag unconditionally
1931 # it's safe (and desirable) to trust the publish flag unconditionally
1931 # so that we don't finalize changes shared between users via ssh or nfs
1932 # so that we don't finalize changes shared between users via ssh or nfs
1932 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1933 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1933
1934
1934 def cancopy(self):
1935 def cancopy(self):
1935 # so statichttprepo's override of local() works
1936 # so statichttprepo's override of local() works
1936 if not self.local():
1937 if not self.local():
1937 return False
1938 return False
1938 if not self.publishing():
1939 if not self.publishing():
1939 return True
1940 return True
1940 # if publishing we can't copy if there is filtered content
1941 # if publishing we can't copy if there is filtered content
1941 return not self.filtered(b'visible').changelog.filteredrevs
1942 return not self.filtered(b'visible').changelog.filteredrevs
1942
1943
1943 def shared(self):
1944 def shared(self):
1944 '''the type of shared repository (None if not shared)'''
1945 '''the type of shared repository (None if not shared)'''
1945 if self.sharedpath != self.path:
1946 if self.sharedpath != self.path:
1946 return b'store'
1947 return b'store'
1947 return None
1948 return None
1948
1949
1949 def wjoin(self, f, *insidef):
1950 def wjoin(self, f, *insidef):
1950 return self.vfs.reljoin(self.root, f, *insidef)
1951 return self.vfs.reljoin(self.root, f, *insidef)
1951
1952
1952 def setparents(self, p1, p2=nullid):
1953 def setparents(self, p1, p2=nullid):
1953 self[None].setparents(p1, p2)
1954 self[None].setparents(p1, p2)
1954 self._quick_access_changeid_invalidate()
1955 self._quick_access_changeid_invalidate()
1955
1956
1956 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1957 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1957 """changeid must be a changeset revision, if specified.
1958 """changeid must be a changeset revision, if specified.
1958 fileid can be a file revision or node."""
1959 fileid can be a file revision or node."""
1959 return context.filectx(
1960 return context.filectx(
1960 self, path, changeid, fileid, changectx=changectx
1961 self, path, changeid, fileid, changectx=changectx
1961 )
1962 )
1962
1963
1963 def getcwd(self):
1964 def getcwd(self):
1964 return self.dirstate.getcwd()
1965 return self.dirstate.getcwd()
1965
1966
1966 def pathto(self, f, cwd=None):
1967 def pathto(self, f, cwd=None):
1967 return self.dirstate.pathto(f, cwd)
1968 return self.dirstate.pathto(f, cwd)
1968
1969
1969 def _loadfilter(self, filter):
1970 def _loadfilter(self, filter):
1970 if filter not in self._filterpats:
1971 if filter not in self._filterpats:
1971 l = []
1972 l = []
1972 for pat, cmd in self.ui.configitems(filter):
1973 for pat, cmd in self.ui.configitems(filter):
1973 if cmd == b'!':
1974 if cmd == b'!':
1974 continue
1975 continue
1975 mf = matchmod.match(self.root, b'', [pat])
1976 mf = matchmod.match(self.root, b'', [pat])
1976 fn = None
1977 fn = None
1977 params = cmd
1978 params = cmd
1978 for name, filterfn in pycompat.iteritems(self._datafilters):
1979 for name, filterfn in pycompat.iteritems(self._datafilters):
1979 if cmd.startswith(name):
1980 if cmd.startswith(name):
1980 fn = filterfn
1981 fn = filterfn
1981 params = cmd[len(name) :].lstrip()
1982 params = cmd[len(name) :].lstrip()
1982 break
1983 break
1983 if not fn:
1984 if not fn:
1984 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1985 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1985 fn.__name__ = 'commandfilter'
1986 fn.__name__ = 'commandfilter'
1986 # Wrap old filters not supporting keyword arguments
1987 # Wrap old filters not supporting keyword arguments
1987 if not pycompat.getargspec(fn)[2]:
1988 if not pycompat.getargspec(fn)[2]:
1988 oldfn = fn
1989 oldfn = fn
1989 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1990 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1990 fn.__name__ = 'compat-' + oldfn.__name__
1991 fn.__name__ = 'compat-' + oldfn.__name__
1991 l.append((mf, fn, params))
1992 l.append((mf, fn, params))
1992 self._filterpats[filter] = l
1993 self._filterpats[filter] = l
1993 return self._filterpats[filter]
1994 return self._filterpats[filter]
1994
1995
1995 def _filter(self, filterpats, filename, data):
1996 def _filter(self, filterpats, filename, data):
1996 for mf, fn, cmd in filterpats:
1997 for mf, fn, cmd in filterpats:
1997 if mf(filename):
1998 if mf(filename):
1998 self.ui.debug(
1999 self.ui.debug(
1999 b"filtering %s through %s\n"
2000 b"filtering %s through %s\n"
2000 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2001 % (filename, cmd or pycompat.sysbytes(fn.__name__))
2001 )
2002 )
2002 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2003 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
2003 break
2004 break
2004
2005
2005 return data
2006 return data
2006
2007
2007 @unfilteredpropertycache
2008 @unfilteredpropertycache
2008 def _encodefilterpats(self):
2009 def _encodefilterpats(self):
2009 return self._loadfilter(b'encode')
2010 return self._loadfilter(b'encode')
2010
2011
2011 @unfilteredpropertycache
2012 @unfilteredpropertycache
2012 def _decodefilterpats(self):
2013 def _decodefilterpats(self):
2013 return self._loadfilter(b'decode')
2014 return self._loadfilter(b'decode')
2014
2015
2015 def adddatafilter(self, name, filter):
2016 def adddatafilter(self, name, filter):
2016 self._datafilters[name] = filter
2017 self._datafilters[name] = filter
2017
2018
2018 def wread(self, filename):
2019 def wread(self, filename):
2019 if self.wvfs.islink(filename):
2020 if self.wvfs.islink(filename):
2020 data = self.wvfs.readlink(filename)
2021 data = self.wvfs.readlink(filename)
2021 else:
2022 else:
2022 data = self.wvfs.read(filename)
2023 data = self.wvfs.read(filename)
2023 return self._filter(self._encodefilterpats, filename, data)
2024 return self._filter(self._encodefilterpats, filename, data)
2024
2025
2025 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2026 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
2026 """write ``data`` into ``filename`` in the working directory
2027 """write ``data`` into ``filename`` in the working directory
2027
2028
2028 This returns length of written (maybe decoded) data.
2029 This returns length of written (maybe decoded) data.
2029 """
2030 """
2030 data = self._filter(self._decodefilterpats, filename, data)
2031 data = self._filter(self._decodefilterpats, filename, data)
2031 if b'l' in flags:
2032 if b'l' in flags:
2032 self.wvfs.symlink(data, filename)
2033 self.wvfs.symlink(data, filename)
2033 else:
2034 else:
2034 self.wvfs.write(
2035 self.wvfs.write(
2035 filename, data, backgroundclose=backgroundclose, **kwargs
2036 filename, data, backgroundclose=backgroundclose, **kwargs
2036 )
2037 )
2037 if b'x' in flags:
2038 if b'x' in flags:
2038 self.wvfs.setflags(filename, False, True)
2039 self.wvfs.setflags(filename, False, True)
2039 else:
2040 else:
2040 self.wvfs.setflags(filename, False, False)
2041 self.wvfs.setflags(filename, False, False)
2041 return len(data)
2042 return len(data)
2042
2043
2043 def wwritedata(self, filename, data):
2044 def wwritedata(self, filename, data):
2044 return self._filter(self._decodefilterpats, filename, data)
2045 return self._filter(self._decodefilterpats, filename, data)
2045
2046
2046 def currenttransaction(self):
2047 def currenttransaction(self):
2047 """return the current transaction or None if non exists"""
2048 """return the current transaction or None if non exists"""
2048 if self._transref:
2049 if self._transref:
2049 tr = self._transref()
2050 tr = self._transref()
2050 else:
2051 else:
2051 tr = None
2052 tr = None
2052
2053
2053 if tr and tr.running():
2054 if tr and tr.running():
2054 return tr
2055 return tr
2055 return None
2056 return None
2056
2057
2057 def transaction(self, desc, report=None):
2058 def transaction(self, desc, report=None):
2058 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2059 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
2059 b'devel', b'check-locks'
2060 b'devel', b'check-locks'
2060 ):
2061 ):
2061 if self._currentlock(self._lockref) is None:
2062 if self._currentlock(self._lockref) is None:
2062 raise error.ProgrammingError(b'transaction requires locking')
2063 raise error.ProgrammingError(b'transaction requires locking')
2063 tr = self.currenttransaction()
2064 tr = self.currenttransaction()
2064 if tr is not None:
2065 if tr is not None:
2065 return tr.nest(name=desc)
2066 return tr.nest(name=desc)
2066
2067
2067 # abort here if the journal already exists
2068 # abort here if the journal already exists
2068 if self.svfs.exists(b"journal"):
2069 if self.svfs.exists(b"journal"):
2069 raise error.RepoError(
2070 raise error.RepoError(
2070 _(b"abandoned transaction found"),
2071 _(b"abandoned transaction found"),
2071 hint=_(b"run 'hg recover' to clean up transaction"),
2072 hint=_(b"run 'hg recover' to clean up transaction"),
2072 )
2073 )
2073
2074
2074 idbase = b"%.40f#%f" % (random.random(), time.time())
2075 idbase = b"%.40f#%f" % (random.random(), time.time())
2075 ha = hex(hashutil.sha1(idbase).digest())
2076 ha = hex(hashutil.sha1(idbase).digest())
2076 txnid = b'TXN:' + ha
2077 txnid = b'TXN:' + ha
2077 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2078 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2078
2079
2079 self._writejournal(desc)
2080 self._writejournal(desc)
2080 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2081 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2081 if report:
2082 if report:
2082 rp = report
2083 rp = report
2083 else:
2084 else:
2084 rp = self.ui.warn
2085 rp = self.ui.warn
2085 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2086 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2086 # we must avoid cyclic reference between repo and transaction.
2087 # we must avoid cyclic reference between repo and transaction.
2087 reporef = weakref.ref(self)
2088 reporef = weakref.ref(self)
2088 # Code to track tag movement
2089 # Code to track tag movement
2089 #
2090 #
2090 # Since tags are all handled as file content, it is actually quite hard
2091 # Since tags are all handled as file content, it is actually quite hard
2091 # to track these movement from a code perspective. So we fallback to a
2092 # to track these movement from a code perspective. So we fallback to a
2092 # tracking at the repository level. One could envision to track changes
2093 # tracking at the repository level. One could envision to track changes
2093 # to the '.hgtags' file through changegroup apply but that fails to
2094 # to the '.hgtags' file through changegroup apply but that fails to
2094 # cope with case where transaction expose new heads without changegroup
2095 # cope with case where transaction expose new heads without changegroup
2095 # being involved (eg: phase movement).
2096 # being involved (eg: phase movement).
2096 #
2097 #
2097 # For now, We gate the feature behind a flag since this likely comes
2098 # For now, We gate the feature behind a flag since this likely comes
2098 # with performance impacts. The current code run more often than needed
2099 # with performance impacts. The current code run more often than needed
2099 # and do not use caches as much as it could. The current focus is on
2100 # and do not use caches as much as it could. The current focus is on
2100 # the behavior of the feature so we disable it by default. The flag
2101 # the behavior of the feature so we disable it by default. The flag
2101 # will be removed when we are happy with the performance impact.
2102 # will be removed when we are happy with the performance impact.
2102 #
2103 #
2103 # Once this feature is no longer experimental move the following
2104 # Once this feature is no longer experimental move the following
2104 # documentation to the appropriate help section:
2105 # documentation to the appropriate help section:
2105 #
2106 #
2106 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2107 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2107 # tags (new or changed or deleted tags). In addition the details of
2108 # tags (new or changed or deleted tags). In addition the details of
2108 # these changes are made available in a file at:
2109 # these changes are made available in a file at:
2109 # ``REPOROOT/.hg/changes/tags.changes``.
2110 # ``REPOROOT/.hg/changes/tags.changes``.
2110 # Make sure you check for HG_TAG_MOVED before reading that file as it
2111 # Make sure you check for HG_TAG_MOVED before reading that file as it
2111 # might exist from a previous transaction even if no tag were touched
2112 # might exist from a previous transaction even if no tag were touched
2112 # in this one. Changes are recorded in a line base format::
2113 # in this one. Changes are recorded in a line base format::
2113 #
2114 #
2114 # <action> <hex-node> <tag-name>\n
2115 # <action> <hex-node> <tag-name>\n
2115 #
2116 #
2116 # Actions are defined as follow:
2117 # Actions are defined as follow:
2117 # "-R": tag is removed,
2118 # "-R": tag is removed,
2118 # "+A": tag is added,
2119 # "+A": tag is added,
2119 # "-M": tag is moved (old value),
2120 # "-M": tag is moved (old value),
2120 # "+M": tag is moved (new value),
2121 # "+M": tag is moved (new value),
2121 tracktags = lambda x: None
2122 tracktags = lambda x: None
2122 # experimental config: experimental.hook-track-tags
2123 # experimental config: experimental.hook-track-tags
2123 shouldtracktags = self.ui.configbool(
2124 shouldtracktags = self.ui.configbool(
2124 b'experimental', b'hook-track-tags'
2125 b'experimental', b'hook-track-tags'
2125 )
2126 )
2126 if desc != b'strip' and shouldtracktags:
2127 if desc != b'strip' and shouldtracktags:
2127 oldheads = self.changelog.headrevs()
2128 oldheads = self.changelog.headrevs()
2128
2129
2129 def tracktags(tr2):
2130 def tracktags(tr2):
2130 repo = reporef()
2131 repo = reporef()
2131 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2132 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2132 newheads = repo.changelog.headrevs()
2133 newheads = repo.changelog.headrevs()
2133 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2134 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2134 # notes: we compare lists here.
2135 # notes: we compare lists here.
2135 # As we do it only once buiding set would not be cheaper
2136 # As we do it only once buiding set would not be cheaper
2136 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2137 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2137 if changes:
2138 if changes:
2138 tr2.hookargs[b'tag_moved'] = b'1'
2139 tr2.hookargs[b'tag_moved'] = b'1'
2139 with repo.vfs(
2140 with repo.vfs(
2140 b'changes/tags.changes', b'w', atomictemp=True
2141 b'changes/tags.changes', b'w', atomictemp=True
2141 ) as changesfile:
2142 ) as changesfile:
2142 # note: we do not register the file to the transaction
2143 # note: we do not register the file to the transaction
2143 # because we needs it to still exist on the transaction
2144 # because we needs it to still exist on the transaction
2144 # is close (for txnclose hooks)
2145 # is close (for txnclose hooks)
2145 tagsmod.writediff(changesfile, changes)
2146 tagsmod.writediff(changesfile, changes)
2146
2147
2147 def validate(tr2):
2148 def validate(tr2):
2148 """will run pre-closing hooks"""
2149 """will run pre-closing hooks"""
2149 # XXX the transaction API is a bit lacking here so we take a hacky
2150 # XXX the transaction API is a bit lacking here so we take a hacky
2150 # path for now
2151 # path for now
2151 #
2152 #
2152 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2153 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2153 # dict is copied before these run. In addition we needs the data
2154 # dict is copied before these run. In addition we needs the data
2154 # available to in memory hooks too.
2155 # available to in memory hooks too.
2155 #
2156 #
2156 # Moreover, we also need to make sure this runs before txnclose
2157 # Moreover, we also need to make sure this runs before txnclose
2157 # hooks and there is no "pending" mechanism that would execute
2158 # hooks and there is no "pending" mechanism that would execute
2158 # logic only if hooks are about to run.
2159 # logic only if hooks are about to run.
2159 #
2160 #
2160 # Fixing this limitation of the transaction is also needed to track
2161 # Fixing this limitation of the transaction is also needed to track
2161 # other families of changes (bookmarks, phases, obsolescence).
2162 # other families of changes (bookmarks, phases, obsolescence).
2162 #
2163 #
2163 # This will have to be fixed before we remove the experimental
2164 # This will have to be fixed before we remove the experimental
2164 # gating.
2165 # gating.
2165 tracktags(tr2)
2166 tracktags(tr2)
2166 repo = reporef()
2167 repo = reporef()
2167
2168
2168 singleheadopt = (b'experimental', b'single-head-per-branch')
2169 singleheadopt = (b'experimental', b'single-head-per-branch')
2169 singlehead = repo.ui.configbool(*singleheadopt)
2170 singlehead = repo.ui.configbool(*singleheadopt)
2170 if singlehead:
2171 if singlehead:
2171 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2172 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2172 accountclosed = singleheadsub.get(
2173 accountclosed = singleheadsub.get(
2173 b"account-closed-heads", False
2174 b"account-closed-heads", False
2174 )
2175 )
2175 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2176 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2176 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2177 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2177 for name, (old, new) in sorted(
2178 for name, (old, new) in sorted(
2178 tr.changes[b'bookmarks'].items()
2179 tr.changes[b'bookmarks'].items()
2179 ):
2180 ):
2180 args = tr.hookargs.copy()
2181 args = tr.hookargs.copy()
2181 args.update(bookmarks.preparehookargs(name, old, new))
2182 args.update(bookmarks.preparehookargs(name, old, new))
2182 repo.hook(
2183 repo.hook(
2183 b'pretxnclose-bookmark',
2184 b'pretxnclose-bookmark',
2184 throw=True,
2185 throw=True,
2185 **pycompat.strkwargs(args)
2186 **pycompat.strkwargs(args)
2186 )
2187 )
2187 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2188 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2188 cl = repo.unfiltered().changelog
2189 cl = repo.unfiltered().changelog
2189 for revs, (old, new) in tr.changes[b'phases']:
2190 for revs, (old, new) in tr.changes[b'phases']:
2190 for rev in revs:
2191 for rev in revs:
2191 args = tr.hookargs.copy()
2192 args = tr.hookargs.copy()
2192 node = hex(cl.node(rev))
2193 node = hex(cl.node(rev))
2193 args.update(phases.preparehookargs(node, old, new))
2194 args.update(phases.preparehookargs(node, old, new))
2194 repo.hook(
2195 repo.hook(
2195 b'pretxnclose-phase',
2196 b'pretxnclose-phase',
2196 throw=True,
2197 throw=True,
2197 **pycompat.strkwargs(args)
2198 **pycompat.strkwargs(args)
2198 )
2199 )
2199
2200
2200 repo.hook(
2201 repo.hook(
2201 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2202 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2202 )
2203 )
2203
2204
2204 def releasefn(tr, success):
2205 def releasefn(tr, success):
2205 repo = reporef()
2206 repo = reporef()
2206 if repo is None:
2207 if repo is None:
2207 # If the repo has been GC'd (and this release function is being
2208 # If the repo has been GC'd (and this release function is being
2208 # called from transaction.__del__), there's not much we can do,
2209 # called from transaction.__del__), there's not much we can do,
2209 # so just leave the unfinished transaction there and let the
2210 # so just leave the unfinished transaction there and let the
2210 # user run `hg recover`.
2211 # user run `hg recover`.
2211 return
2212 return
2212 if success:
2213 if success:
2213 # this should be explicitly invoked here, because
2214 # this should be explicitly invoked here, because
2214 # in-memory changes aren't written out at closing
2215 # in-memory changes aren't written out at closing
2215 # transaction, if tr.addfilegenerator (via
2216 # transaction, if tr.addfilegenerator (via
2216 # dirstate.write or so) isn't invoked while
2217 # dirstate.write or so) isn't invoked while
2217 # transaction running
2218 # transaction running
2218 repo.dirstate.write(None)
2219 repo.dirstate.write(None)
2219 else:
2220 else:
2220 # discard all changes (including ones already written
2221 # discard all changes (including ones already written
2221 # out) in this transaction
2222 # out) in this transaction
2222 narrowspec.restorebackup(self, b'journal.narrowspec')
2223 narrowspec.restorebackup(self, b'journal.narrowspec')
2223 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2224 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2224 repo.dirstate.restorebackup(None, b'journal.dirstate')
2225 repo.dirstate.restorebackup(None, b'journal.dirstate')
2225
2226
2226 repo.invalidate(clearfilecache=True)
2227 repo.invalidate(clearfilecache=True)
2227
2228
2228 tr = transaction.transaction(
2229 tr = transaction.transaction(
2229 rp,
2230 rp,
2230 self.svfs,
2231 self.svfs,
2231 vfsmap,
2232 vfsmap,
2232 b"journal",
2233 b"journal",
2233 b"undo",
2234 b"undo",
2234 aftertrans(renames),
2235 aftertrans(renames),
2235 self.store.createmode,
2236 self.store.createmode,
2236 validator=validate,
2237 validator=validate,
2237 releasefn=releasefn,
2238 releasefn=releasefn,
2238 checkambigfiles=_cachedfiles,
2239 checkambigfiles=_cachedfiles,
2239 name=desc,
2240 name=desc,
2240 )
2241 )
2241 tr.changes[b'origrepolen'] = len(self)
2242 tr.changes[b'origrepolen'] = len(self)
2242 tr.changes[b'obsmarkers'] = set()
2243 tr.changes[b'obsmarkers'] = set()
2243 tr.changes[b'phases'] = []
2244 tr.changes[b'phases'] = []
2244 tr.changes[b'bookmarks'] = {}
2245 tr.changes[b'bookmarks'] = {}
2245
2246
2246 tr.hookargs[b'txnid'] = txnid
2247 tr.hookargs[b'txnid'] = txnid
2247 tr.hookargs[b'txnname'] = desc
2248 tr.hookargs[b'txnname'] = desc
2248 tr.hookargs[b'changes'] = tr.changes
2249 tr.hookargs[b'changes'] = tr.changes
2249 # note: writing the fncache only during finalize mean that the file is
2250 # note: writing the fncache only during finalize mean that the file is
2250 # outdated when running hooks. As fncache is used for streaming clone,
2251 # outdated when running hooks. As fncache is used for streaming clone,
2251 # this is not expected to break anything that happen during the hooks.
2252 # this is not expected to break anything that happen during the hooks.
2252 tr.addfinalize(b'flush-fncache', self.store.write)
2253 tr.addfinalize(b'flush-fncache', self.store.write)
2253
2254
2254 def txnclosehook(tr2):
2255 def txnclosehook(tr2):
2255 """To be run if transaction is successful, will schedule a hook run
2256 """To be run if transaction is successful, will schedule a hook run
2256 """
2257 """
2257 # Don't reference tr2 in hook() so we don't hold a reference.
2258 # Don't reference tr2 in hook() so we don't hold a reference.
2258 # This reduces memory consumption when there are multiple
2259 # This reduces memory consumption when there are multiple
2259 # transactions per lock. This can likely go away if issue5045
2260 # transactions per lock. This can likely go away if issue5045
2260 # fixes the function accumulation.
2261 # fixes the function accumulation.
2261 hookargs = tr2.hookargs
2262 hookargs = tr2.hookargs
2262
2263
2263 def hookfunc(unused_success):
2264 def hookfunc(unused_success):
2264 repo = reporef()
2265 repo = reporef()
2265 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2266 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2266 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2267 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2267 for name, (old, new) in bmchanges:
2268 for name, (old, new) in bmchanges:
2268 args = tr.hookargs.copy()
2269 args = tr.hookargs.copy()
2269 args.update(bookmarks.preparehookargs(name, old, new))
2270 args.update(bookmarks.preparehookargs(name, old, new))
2270 repo.hook(
2271 repo.hook(
2271 b'txnclose-bookmark',
2272 b'txnclose-bookmark',
2272 throw=False,
2273 throw=False,
2273 **pycompat.strkwargs(args)
2274 **pycompat.strkwargs(args)
2274 )
2275 )
2275
2276
2276 if hook.hashook(repo.ui, b'txnclose-phase'):
2277 if hook.hashook(repo.ui, b'txnclose-phase'):
2277 cl = repo.unfiltered().changelog
2278 cl = repo.unfiltered().changelog
2278 phasemv = sorted(
2279 phasemv = sorted(
2279 tr.changes[b'phases'], key=lambda r: r[0][0]
2280 tr.changes[b'phases'], key=lambda r: r[0][0]
2280 )
2281 )
2281 for revs, (old, new) in phasemv:
2282 for revs, (old, new) in phasemv:
2282 for rev in revs:
2283 for rev in revs:
2283 args = tr.hookargs.copy()
2284 args = tr.hookargs.copy()
2284 node = hex(cl.node(rev))
2285 node = hex(cl.node(rev))
2285 args.update(phases.preparehookargs(node, old, new))
2286 args.update(phases.preparehookargs(node, old, new))
2286 repo.hook(
2287 repo.hook(
2287 b'txnclose-phase',
2288 b'txnclose-phase',
2288 throw=False,
2289 throw=False,
2289 **pycompat.strkwargs(args)
2290 **pycompat.strkwargs(args)
2290 )
2291 )
2291
2292
2292 repo.hook(
2293 repo.hook(
2293 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2294 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2294 )
2295 )
2295
2296
2296 reporef()._afterlock(hookfunc)
2297 reporef()._afterlock(hookfunc)
2297
2298
2298 tr.addfinalize(b'txnclose-hook', txnclosehook)
2299 tr.addfinalize(b'txnclose-hook', txnclosehook)
2299 # Include a leading "-" to make it happen before the transaction summary
2300 # Include a leading "-" to make it happen before the transaction summary
2300 # reports registered via scmutil.registersummarycallback() whose names
2301 # reports registered via scmutil.registersummarycallback() whose names
2301 # are 00-txnreport etc. That way, the caches will be warm when the
2302 # are 00-txnreport etc. That way, the caches will be warm when the
2302 # callbacks run.
2303 # callbacks run.
2303 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2304 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2304
2305
2305 def txnaborthook(tr2):
2306 def txnaborthook(tr2):
2306 """To be run if transaction is aborted
2307 """To be run if transaction is aborted
2307 """
2308 """
2308 reporef().hook(
2309 reporef().hook(
2309 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2310 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2310 )
2311 )
2311
2312
2312 tr.addabort(b'txnabort-hook', txnaborthook)
2313 tr.addabort(b'txnabort-hook', txnaborthook)
2313 # avoid eager cache invalidation. in-memory data should be identical
2314 # avoid eager cache invalidation. in-memory data should be identical
2314 # to stored data if transaction has no error.
2315 # to stored data if transaction has no error.
2315 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2316 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2316 self._transref = weakref.ref(tr)
2317 self._transref = weakref.ref(tr)
2317 scmutil.registersummarycallback(self, tr, desc)
2318 scmutil.registersummarycallback(self, tr, desc)
2318 return tr
2319 return tr
2319
2320
2320 def _journalfiles(self):
2321 def _journalfiles(self):
2321 return (
2322 return (
2322 (self.svfs, b'journal'),
2323 (self.svfs, b'journal'),
2323 (self.svfs, b'journal.narrowspec'),
2324 (self.svfs, b'journal.narrowspec'),
2324 (self.vfs, b'journal.narrowspec.dirstate'),
2325 (self.vfs, b'journal.narrowspec.dirstate'),
2325 (self.vfs, b'journal.dirstate'),
2326 (self.vfs, b'journal.dirstate'),
2326 (self.vfs, b'journal.branch'),
2327 (self.vfs, b'journal.branch'),
2327 (self.vfs, b'journal.desc'),
2328 (self.vfs, b'journal.desc'),
2328 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2329 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2329 (self.svfs, b'journal.phaseroots'),
2330 (self.svfs, b'journal.phaseroots'),
2330 )
2331 )
2331
2332
2332 def undofiles(self):
2333 def undofiles(self):
2333 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2334 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2334
2335
2335 @unfilteredmethod
2336 @unfilteredmethod
2336 def _writejournal(self, desc):
2337 def _writejournal(self, desc):
2337 self.dirstate.savebackup(None, b'journal.dirstate')
2338 self.dirstate.savebackup(None, b'journal.dirstate')
2338 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2339 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2339 narrowspec.savebackup(self, b'journal.narrowspec')
2340 narrowspec.savebackup(self, b'journal.narrowspec')
2340 self.vfs.write(
2341 self.vfs.write(
2341 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2342 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2342 )
2343 )
2343 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2344 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2344 bookmarksvfs = bookmarks.bookmarksvfs(self)
2345 bookmarksvfs = bookmarks.bookmarksvfs(self)
2345 bookmarksvfs.write(
2346 bookmarksvfs.write(
2346 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2347 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2347 )
2348 )
2348 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2349 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2349
2350
2350 def recover(self):
2351 def recover(self):
2351 with self.lock():
2352 with self.lock():
2352 if self.svfs.exists(b"journal"):
2353 if self.svfs.exists(b"journal"):
2353 self.ui.status(_(b"rolling back interrupted transaction\n"))
2354 self.ui.status(_(b"rolling back interrupted transaction\n"))
2354 vfsmap = {
2355 vfsmap = {
2355 b'': self.svfs,
2356 b'': self.svfs,
2356 b'plain': self.vfs,
2357 b'plain': self.vfs,
2357 }
2358 }
2358 transaction.rollback(
2359 transaction.rollback(
2359 self.svfs,
2360 self.svfs,
2360 vfsmap,
2361 vfsmap,
2361 b"journal",
2362 b"journal",
2362 self.ui.warn,
2363 self.ui.warn,
2363 checkambigfiles=_cachedfiles,
2364 checkambigfiles=_cachedfiles,
2364 )
2365 )
2365 self.invalidate()
2366 self.invalidate()
2366 return True
2367 return True
2367 else:
2368 else:
2368 self.ui.warn(_(b"no interrupted transaction available\n"))
2369 self.ui.warn(_(b"no interrupted transaction available\n"))
2369 return False
2370 return False
2370
2371
2371 def rollback(self, dryrun=False, force=False):
2372 def rollback(self, dryrun=False, force=False):
2372 wlock = lock = dsguard = None
2373 wlock = lock = dsguard = None
2373 try:
2374 try:
2374 wlock = self.wlock()
2375 wlock = self.wlock()
2375 lock = self.lock()
2376 lock = self.lock()
2376 if self.svfs.exists(b"undo"):
2377 if self.svfs.exists(b"undo"):
2377 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2378 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2378
2379
2379 return self._rollback(dryrun, force, dsguard)
2380 return self._rollback(dryrun, force, dsguard)
2380 else:
2381 else:
2381 self.ui.warn(_(b"no rollback information available\n"))
2382 self.ui.warn(_(b"no rollback information available\n"))
2382 return 1
2383 return 1
2383 finally:
2384 finally:
2384 release(dsguard, lock, wlock)
2385 release(dsguard, lock, wlock)
2385
2386
2386 @unfilteredmethod # Until we get smarter cache management
2387 @unfilteredmethod # Until we get smarter cache management
2387 def _rollback(self, dryrun, force, dsguard):
2388 def _rollback(self, dryrun, force, dsguard):
2388 ui = self.ui
2389 ui = self.ui
2389 try:
2390 try:
2390 args = self.vfs.read(b'undo.desc').splitlines()
2391 args = self.vfs.read(b'undo.desc').splitlines()
2391 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2392 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2392 if len(args) >= 3:
2393 if len(args) >= 3:
2393 detail = args[2]
2394 detail = args[2]
2394 oldtip = oldlen - 1
2395 oldtip = oldlen - 1
2395
2396
2396 if detail and ui.verbose:
2397 if detail and ui.verbose:
2397 msg = _(
2398 msg = _(
2398 b'repository tip rolled back to revision %d'
2399 b'repository tip rolled back to revision %d'
2399 b' (undo %s: %s)\n'
2400 b' (undo %s: %s)\n'
2400 ) % (oldtip, desc, detail)
2401 ) % (oldtip, desc, detail)
2401 else:
2402 else:
2402 msg = _(
2403 msg = _(
2403 b'repository tip rolled back to revision %d (undo %s)\n'
2404 b'repository tip rolled back to revision %d (undo %s)\n'
2404 ) % (oldtip, desc)
2405 ) % (oldtip, desc)
2405 except IOError:
2406 except IOError:
2406 msg = _(b'rolling back unknown transaction\n')
2407 msg = _(b'rolling back unknown transaction\n')
2407 desc = None
2408 desc = None
2408
2409
2409 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2410 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2410 raise error.Abort(
2411 raise error.Abort(
2411 _(
2412 _(
2412 b'rollback of last commit while not checked out '
2413 b'rollback of last commit while not checked out '
2413 b'may lose data'
2414 b'may lose data'
2414 ),
2415 ),
2415 hint=_(b'use -f to force'),
2416 hint=_(b'use -f to force'),
2416 )
2417 )
2417
2418
2418 ui.status(msg)
2419 ui.status(msg)
2419 if dryrun:
2420 if dryrun:
2420 return 0
2421 return 0
2421
2422
2422 parents = self.dirstate.parents()
2423 parents = self.dirstate.parents()
2423 self.destroying()
2424 self.destroying()
2424 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2425 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2425 transaction.rollback(
2426 transaction.rollback(
2426 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2427 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2427 )
2428 )
2428 bookmarksvfs = bookmarks.bookmarksvfs(self)
2429 bookmarksvfs = bookmarks.bookmarksvfs(self)
2429 if bookmarksvfs.exists(b'undo.bookmarks'):
2430 if bookmarksvfs.exists(b'undo.bookmarks'):
2430 bookmarksvfs.rename(
2431 bookmarksvfs.rename(
2431 b'undo.bookmarks', b'bookmarks', checkambig=True
2432 b'undo.bookmarks', b'bookmarks', checkambig=True
2432 )
2433 )
2433 if self.svfs.exists(b'undo.phaseroots'):
2434 if self.svfs.exists(b'undo.phaseroots'):
2434 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2435 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2435 self.invalidate()
2436 self.invalidate()
2436
2437
2437 has_node = self.changelog.index.has_node
2438 has_node = self.changelog.index.has_node
2438 parentgone = any(not has_node(p) for p in parents)
2439 parentgone = any(not has_node(p) for p in parents)
2439 if parentgone:
2440 if parentgone:
2440 # prevent dirstateguard from overwriting already restored one
2441 # prevent dirstateguard from overwriting already restored one
2441 dsguard.close()
2442 dsguard.close()
2442
2443
2443 narrowspec.restorebackup(self, b'undo.narrowspec')
2444 narrowspec.restorebackup(self, b'undo.narrowspec')
2444 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2445 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2445 self.dirstate.restorebackup(None, b'undo.dirstate')
2446 self.dirstate.restorebackup(None, b'undo.dirstate')
2446 try:
2447 try:
2447 branch = self.vfs.read(b'undo.branch')
2448 branch = self.vfs.read(b'undo.branch')
2448 self.dirstate.setbranch(encoding.tolocal(branch))
2449 self.dirstate.setbranch(encoding.tolocal(branch))
2449 except IOError:
2450 except IOError:
2450 ui.warn(
2451 ui.warn(
2451 _(
2452 _(
2452 b'named branch could not be reset: '
2453 b'named branch could not be reset: '
2453 b'current branch is still \'%s\'\n'
2454 b'current branch is still \'%s\'\n'
2454 )
2455 )
2455 % self.dirstate.branch()
2456 % self.dirstate.branch()
2456 )
2457 )
2457
2458
2458 parents = tuple([p.rev() for p in self[None].parents()])
2459 parents = tuple([p.rev() for p in self[None].parents()])
2459 if len(parents) > 1:
2460 if len(parents) > 1:
2460 ui.status(
2461 ui.status(
2461 _(
2462 _(
2462 b'working directory now based on '
2463 b'working directory now based on '
2463 b'revisions %d and %d\n'
2464 b'revisions %d and %d\n'
2464 )
2465 )
2465 % parents
2466 % parents
2466 )
2467 )
2467 else:
2468 else:
2468 ui.status(
2469 ui.status(
2469 _(b'working directory now based on revision %d\n') % parents
2470 _(b'working directory now based on revision %d\n') % parents
2470 )
2471 )
2471 mergestatemod.mergestate.clean(self, self[b'.'].node())
2472 mergestatemod.mergestate.clean(self, self[b'.'].node())
2472
2473
2473 # TODO: if we know which new heads may result from this rollback, pass
2474 # TODO: if we know which new heads may result from this rollback, pass
2474 # them to destroy(), which will prevent the branchhead cache from being
2475 # them to destroy(), which will prevent the branchhead cache from being
2475 # invalidated.
2476 # invalidated.
2476 self.destroyed()
2477 self.destroyed()
2477 return 0
2478 return 0
2478
2479
2479 def _buildcacheupdater(self, newtransaction):
2480 def _buildcacheupdater(self, newtransaction):
2480 """called during transaction to build the callback updating cache
2481 """called during transaction to build the callback updating cache
2481
2482
2482 Lives on the repository to help extension who might want to augment
2483 Lives on the repository to help extension who might want to augment
2483 this logic. For this purpose, the created transaction is passed to the
2484 this logic. For this purpose, the created transaction is passed to the
2484 method.
2485 method.
2485 """
2486 """
2486 # we must avoid cyclic reference between repo and transaction.
2487 # we must avoid cyclic reference between repo and transaction.
2487 reporef = weakref.ref(self)
2488 reporef = weakref.ref(self)
2488
2489
2489 def updater(tr):
2490 def updater(tr):
2490 repo = reporef()
2491 repo = reporef()
2491 repo.updatecaches(tr)
2492 repo.updatecaches(tr)
2492
2493
2493 return updater
2494 return updater
2494
2495
2495 @unfilteredmethod
2496 @unfilteredmethod
2496 def updatecaches(self, tr=None, full=False):
2497 def updatecaches(self, tr=None, full=False):
2497 """warm appropriate caches
2498 """warm appropriate caches
2498
2499
2499 If this function is called after a transaction closed. The transaction
2500 If this function is called after a transaction closed. The transaction
2500 will be available in the 'tr' argument. This can be used to selectively
2501 will be available in the 'tr' argument. This can be used to selectively
2501 update caches relevant to the changes in that transaction.
2502 update caches relevant to the changes in that transaction.
2502
2503
2503 If 'full' is set, make sure all caches the function knows about have
2504 If 'full' is set, make sure all caches the function knows about have
2504 up-to-date data. Even the ones usually loaded more lazily.
2505 up-to-date data. Even the ones usually loaded more lazily.
2505 """
2506 """
2506 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2507 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2507 # During strip, many caches are invalid but
2508 # During strip, many caches are invalid but
2508 # later call to `destroyed` will refresh them.
2509 # later call to `destroyed` will refresh them.
2509 return
2510 return
2510
2511
2511 if tr is None or tr.changes[b'origrepolen'] < len(self):
2512 if tr is None or tr.changes[b'origrepolen'] < len(self):
2512 # accessing the 'ser ved' branchmap should refresh all the others,
2513 # accessing the 'ser ved' branchmap should refresh all the others,
2513 self.ui.debug(b'updating the branch cache\n')
2514 self.ui.debug(b'updating the branch cache\n')
2514 self.filtered(b'served').branchmap()
2515 self.filtered(b'served').branchmap()
2515 self.filtered(b'served.hidden').branchmap()
2516 self.filtered(b'served.hidden').branchmap()
2516
2517
2517 if full:
2518 if full:
2518 unfi = self.unfiltered()
2519 unfi = self.unfiltered()
2519
2520
2520 self.changelog.update_caches(transaction=tr)
2521 self.changelog.update_caches(transaction=tr)
2521 self.manifestlog.update_caches(transaction=tr)
2522 self.manifestlog.update_caches(transaction=tr)
2522
2523
2523 rbc = unfi.revbranchcache()
2524 rbc = unfi.revbranchcache()
2524 for r in unfi.changelog:
2525 for r in unfi.changelog:
2525 rbc.branchinfo(r)
2526 rbc.branchinfo(r)
2526 rbc.write()
2527 rbc.write()
2527
2528
2528 # ensure the working copy parents are in the manifestfulltextcache
2529 # ensure the working copy parents are in the manifestfulltextcache
2529 for ctx in self[b'.'].parents():
2530 for ctx in self[b'.'].parents():
2530 ctx.manifest() # accessing the manifest is enough
2531 ctx.manifest() # accessing the manifest is enough
2531
2532
2532 # accessing fnode cache warms the cache
2533 # accessing fnode cache warms the cache
2533 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2534 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2534 # accessing tags warm the cache
2535 # accessing tags warm the cache
2535 self.tags()
2536 self.tags()
2536 self.filtered(b'served').tags()
2537 self.filtered(b'served').tags()
2537
2538
2538 # The `full` arg is documented as updating even the lazily-loaded
2539 # The `full` arg is documented as updating even the lazily-loaded
2539 # caches immediately, so we're forcing a write to cause these caches
2540 # caches immediately, so we're forcing a write to cause these caches
2540 # to be warmed up even if they haven't explicitly been requested
2541 # to be warmed up even if they haven't explicitly been requested
2541 # yet (if they've never been used by hg, they won't ever have been
2542 # yet (if they've never been used by hg, they won't ever have been
2542 # written, even if they're a subset of another kind of cache that
2543 # written, even if they're a subset of another kind of cache that
2543 # *has* been used).
2544 # *has* been used).
2544 for filt in repoview.filtertable.keys():
2545 for filt in repoview.filtertable.keys():
2545 filtered = self.filtered(filt)
2546 filtered = self.filtered(filt)
2546 filtered.branchmap().write(filtered)
2547 filtered.branchmap().write(filtered)
2547
2548
2548 def invalidatecaches(self):
2549 def invalidatecaches(self):
2549
2550
2550 if '_tagscache' in vars(self):
2551 if '_tagscache' in vars(self):
2551 # can't use delattr on proxy
2552 # can't use delattr on proxy
2552 del self.__dict__['_tagscache']
2553 del self.__dict__['_tagscache']
2553
2554
2554 self._branchcaches.clear()
2555 self._branchcaches.clear()
2555 self.invalidatevolatilesets()
2556 self.invalidatevolatilesets()
2556 self._sparsesignaturecache.clear()
2557 self._sparsesignaturecache.clear()
2557
2558
2558 def invalidatevolatilesets(self):
2559 def invalidatevolatilesets(self):
2559 self.filteredrevcache.clear()
2560 self.filteredrevcache.clear()
2560 obsolete.clearobscaches(self)
2561 obsolete.clearobscaches(self)
2561 self._quick_access_changeid_invalidate()
2562 self._quick_access_changeid_invalidate()
2562
2563
2563 def invalidatedirstate(self):
2564 def invalidatedirstate(self):
2564 '''Invalidates the dirstate, causing the next call to dirstate
2565 '''Invalidates the dirstate, causing the next call to dirstate
2565 to check if it was modified since the last time it was read,
2566 to check if it was modified since the last time it was read,
2566 rereading it if it has.
2567 rereading it if it has.
2567
2568
2568 This is different to dirstate.invalidate() that it doesn't always
2569 This is different to dirstate.invalidate() that it doesn't always
2569 rereads the dirstate. Use dirstate.invalidate() if you want to
2570 rereads the dirstate. Use dirstate.invalidate() if you want to
2570 explicitly read the dirstate again (i.e. restoring it to a previous
2571 explicitly read the dirstate again (i.e. restoring it to a previous
2571 known good state).'''
2572 known good state).'''
2572 if hasunfilteredcache(self, 'dirstate'):
2573 if hasunfilteredcache(self, 'dirstate'):
2573 for k in self.dirstate._filecache:
2574 for k in self.dirstate._filecache:
2574 try:
2575 try:
2575 delattr(self.dirstate, k)
2576 delattr(self.dirstate, k)
2576 except AttributeError:
2577 except AttributeError:
2577 pass
2578 pass
2578 delattr(self.unfiltered(), 'dirstate')
2579 delattr(self.unfiltered(), 'dirstate')
2579
2580
2580 def invalidate(self, clearfilecache=False):
2581 def invalidate(self, clearfilecache=False):
2581 '''Invalidates both store and non-store parts other than dirstate
2582 '''Invalidates both store and non-store parts other than dirstate
2582
2583
2583 If a transaction is running, invalidation of store is omitted,
2584 If a transaction is running, invalidation of store is omitted,
2584 because discarding in-memory changes might cause inconsistency
2585 because discarding in-memory changes might cause inconsistency
2585 (e.g. incomplete fncache causes unintentional failure, but
2586 (e.g. incomplete fncache causes unintentional failure, but
2586 redundant one doesn't).
2587 redundant one doesn't).
2587 '''
2588 '''
2588 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2589 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2589 for k in list(self._filecache.keys()):
2590 for k in list(self._filecache.keys()):
2590 # dirstate is invalidated separately in invalidatedirstate()
2591 # dirstate is invalidated separately in invalidatedirstate()
2591 if k == b'dirstate':
2592 if k == b'dirstate':
2592 continue
2593 continue
2593 if (
2594 if (
2594 k == b'changelog'
2595 k == b'changelog'
2595 and self.currenttransaction()
2596 and self.currenttransaction()
2596 and self.changelog._delayed
2597 and self.changelog._delayed
2597 ):
2598 ):
2598 # The changelog object may store unwritten revisions. We don't
2599 # The changelog object may store unwritten revisions. We don't
2599 # want to lose them.
2600 # want to lose them.
2600 # TODO: Solve the problem instead of working around it.
2601 # TODO: Solve the problem instead of working around it.
2601 continue
2602 continue
2602
2603
2603 if clearfilecache:
2604 if clearfilecache:
2604 del self._filecache[k]
2605 del self._filecache[k]
2605 try:
2606 try:
2606 delattr(unfiltered, k)
2607 delattr(unfiltered, k)
2607 except AttributeError:
2608 except AttributeError:
2608 pass
2609 pass
2609 self.invalidatecaches()
2610 self.invalidatecaches()
2610 if not self.currenttransaction():
2611 if not self.currenttransaction():
2611 # TODO: Changing contents of store outside transaction
2612 # TODO: Changing contents of store outside transaction
2612 # causes inconsistency. We should make in-memory store
2613 # causes inconsistency. We should make in-memory store
2613 # changes detectable, and abort if changed.
2614 # changes detectable, and abort if changed.
2614 self.store.invalidatecaches()
2615 self.store.invalidatecaches()
2615
2616
2616 def invalidateall(self):
2617 def invalidateall(self):
2617 '''Fully invalidates both store and non-store parts, causing the
2618 '''Fully invalidates both store and non-store parts, causing the
2618 subsequent operation to reread any outside changes.'''
2619 subsequent operation to reread any outside changes.'''
2619 # extension should hook this to invalidate its caches
2620 # extension should hook this to invalidate its caches
2620 self.invalidate()
2621 self.invalidate()
2621 self.invalidatedirstate()
2622 self.invalidatedirstate()
2622
2623
2623 @unfilteredmethod
2624 @unfilteredmethod
2624 def _refreshfilecachestats(self, tr):
2625 def _refreshfilecachestats(self, tr):
2625 """Reload stats of cached files so that they are flagged as valid"""
2626 """Reload stats of cached files so that they are flagged as valid"""
2626 for k, ce in self._filecache.items():
2627 for k, ce in self._filecache.items():
2627 k = pycompat.sysstr(k)
2628 k = pycompat.sysstr(k)
2628 if k == 'dirstate' or k not in self.__dict__:
2629 if k == 'dirstate' or k not in self.__dict__:
2629 continue
2630 continue
2630 ce.refresh()
2631 ce.refresh()
2631
2632
2632 def _lock(
2633 def _lock(
2633 self,
2634 self,
2634 vfs,
2635 vfs,
2635 lockname,
2636 lockname,
2636 wait,
2637 wait,
2637 releasefn,
2638 releasefn,
2638 acquirefn,
2639 acquirefn,
2639 desc,
2640 desc,
2640 inheritchecker=None,
2641 inheritchecker=None,
2641 parentenvvar=None,
2642 parentenvvar=None,
2642 ):
2643 ):
2643 parentlock = None
2644 parentlock = None
2644 # the contents of parentenvvar are used by the underlying lock to
2645 # the contents of parentenvvar are used by the underlying lock to
2645 # determine whether it can be inherited
2646 # determine whether it can be inherited
2646 if parentenvvar is not None:
2647 if parentenvvar is not None:
2647 parentlock = encoding.environ.get(parentenvvar)
2648 parentlock = encoding.environ.get(parentenvvar)
2648
2649
2649 timeout = 0
2650 timeout = 0
2650 warntimeout = 0
2651 warntimeout = 0
2651 if wait:
2652 if wait:
2652 timeout = self.ui.configint(b"ui", b"timeout")
2653 timeout = self.ui.configint(b"ui", b"timeout")
2653 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2654 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2654 # internal config: ui.signal-safe-lock
2655 # internal config: ui.signal-safe-lock
2655 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2656 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2656
2657
2657 l = lockmod.trylock(
2658 l = lockmod.trylock(
2658 self.ui,
2659 self.ui,
2659 vfs,
2660 vfs,
2660 lockname,
2661 lockname,
2661 timeout,
2662 timeout,
2662 warntimeout,
2663 warntimeout,
2663 releasefn=releasefn,
2664 releasefn=releasefn,
2664 acquirefn=acquirefn,
2665 acquirefn=acquirefn,
2665 desc=desc,
2666 desc=desc,
2666 inheritchecker=inheritchecker,
2667 inheritchecker=inheritchecker,
2667 parentlock=parentlock,
2668 parentlock=parentlock,
2668 signalsafe=signalsafe,
2669 signalsafe=signalsafe,
2669 )
2670 )
2670 return l
2671 return l
2671
2672
2672 def _afterlock(self, callback):
2673 def _afterlock(self, callback):
2673 """add a callback to be run when the repository is fully unlocked
2674 """add a callback to be run when the repository is fully unlocked
2674
2675
2675 The callback will be executed when the outermost lock is released
2676 The callback will be executed when the outermost lock is released
2676 (with wlock being higher level than 'lock')."""
2677 (with wlock being higher level than 'lock')."""
2677 for ref in (self._wlockref, self._lockref):
2678 for ref in (self._wlockref, self._lockref):
2678 l = ref and ref()
2679 l = ref and ref()
2679 if l and l.held:
2680 if l and l.held:
2680 l.postrelease.append(callback)
2681 l.postrelease.append(callback)
2681 break
2682 break
2682 else: # no lock have been found.
2683 else: # no lock have been found.
2683 callback(True)
2684 callback(True)
2684
2685
2685 def lock(self, wait=True):
2686 def lock(self, wait=True):
2686 '''Lock the repository store (.hg/store) and return a weak reference
2687 '''Lock the repository store (.hg/store) and return a weak reference
2687 to the lock. Use this before modifying the store (e.g. committing or
2688 to the lock. Use this before modifying the store (e.g. committing or
2688 stripping). If you are opening a transaction, get a lock as well.)
2689 stripping). If you are opening a transaction, get a lock as well.)
2689
2690
2690 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2691 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2691 'wlock' first to avoid a dead-lock hazard.'''
2692 'wlock' first to avoid a dead-lock hazard.'''
2692 l = self._currentlock(self._lockref)
2693 l = self._currentlock(self._lockref)
2693 if l is not None:
2694 if l is not None:
2694 l.lock()
2695 l.lock()
2695 return l
2696 return l
2696
2697
2697 l = self._lock(
2698 l = self._lock(
2698 vfs=self.svfs,
2699 vfs=self.svfs,
2699 lockname=b"lock",
2700 lockname=b"lock",
2700 wait=wait,
2701 wait=wait,
2701 releasefn=None,
2702 releasefn=None,
2702 acquirefn=self.invalidate,
2703 acquirefn=self.invalidate,
2703 desc=_(b'repository %s') % self.origroot,
2704 desc=_(b'repository %s') % self.origroot,
2704 )
2705 )
2705 self._lockref = weakref.ref(l)
2706 self._lockref = weakref.ref(l)
2706 return l
2707 return l
2707
2708
2708 def _wlockchecktransaction(self):
2709 def _wlockchecktransaction(self):
2709 if self.currenttransaction() is not None:
2710 if self.currenttransaction() is not None:
2710 raise error.LockInheritanceContractViolation(
2711 raise error.LockInheritanceContractViolation(
2711 b'wlock cannot be inherited in the middle of a transaction'
2712 b'wlock cannot be inherited in the middle of a transaction'
2712 )
2713 )
2713
2714
2714 def wlock(self, wait=True):
2715 def wlock(self, wait=True):
2715 '''Lock the non-store parts of the repository (everything under
2716 '''Lock the non-store parts of the repository (everything under
2716 .hg except .hg/store) and return a weak reference to the lock.
2717 .hg except .hg/store) and return a weak reference to the lock.
2717
2718
2718 Use this before modifying files in .hg.
2719 Use this before modifying files in .hg.
2719
2720
2720 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2721 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2721 'wlock' first to avoid a dead-lock hazard.'''
2722 'wlock' first to avoid a dead-lock hazard.'''
2722 l = self._wlockref and self._wlockref()
2723 l = self._wlockref and self._wlockref()
2723 if l is not None and l.held:
2724 if l is not None and l.held:
2724 l.lock()
2725 l.lock()
2725 return l
2726 return l
2726
2727
2727 # We do not need to check for non-waiting lock acquisition. Such
2728 # We do not need to check for non-waiting lock acquisition. Such
2728 # acquisition would not cause dead-lock as they would just fail.
2729 # acquisition would not cause dead-lock as they would just fail.
2729 if wait and (
2730 if wait and (
2730 self.ui.configbool(b'devel', b'all-warnings')
2731 self.ui.configbool(b'devel', b'all-warnings')
2731 or self.ui.configbool(b'devel', b'check-locks')
2732 or self.ui.configbool(b'devel', b'check-locks')
2732 ):
2733 ):
2733 if self._currentlock(self._lockref) is not None:
2734 if self._currentlock(self._lockref) is not None:
2734 self.ui.develwarn(b'"wlock" acquired after "lock"')
2735 self.ui.develwarn(b'"wlock" acquired after "lock"')
2735
2736
2736 def unlock():
2737 def unlock():
2737 if self.dirstate.pendingparentchange():
2738 if self.dirstate.pendingparentchange():
2738 self.dirstate.invalidate()
2739 self.dirstate.invalidate()
2739 else:
2740 else:
2740 self.dirstate.write(None)
2741 self.dirstate.write(None)
2741
2742
2742 self._filecache[b'dirstate'].refresh()
2743 self._filecache[b'dirstate'].refresh()
2743
2744
2744 l = self._lock(
2745 l = self._lock(
2745 self.vfs,
2746 self.vfs,
2746 b"wlock",
2747 b"wlock",
2747 wait,
2748 wait,
2748 unlock,
2749 unlock,
2749 self.invalidatedirstate,
2750 self.invalidatedirstate,
2750 _(b'working directory of %s') % self.origroot,
2751 _(b'working directory of %s') % self.origroot,
2751 inheritchecker=self._wlockchecktransaction,
2752 inheritchecker=self._wlockchecktransaction,
2752 parentenvvar=b'HG_WLOCK_LOCKER',
2753 parentenvvar=b'HG_WLOCK_LOCKER',
2753 )
2754 )
2754 self._wlockref = weakref.ref(l)
2755 self._wlockref = weakref.ref(l)
2755 return l
2756 return l
2756
2757
2757 def _currentlock(self, lockref):
2758 def _currentlock(self, lockref):
2758 """Returns the lock if it's held, or None if it's not."""
2759 """Returns the lock if it's held, or None if it's not."""
2759 if lockref is None:
2760 if lockref is None:
2760 return None
2761 return None
2761 l = lockref()
2762 l = lockref()
2762 if l is None or not l.held:
2763 if l is None or not l.held:
2763 return None
2764 return None
2764 return l
2765 return l
2765
2766
2766 def currentwlock(self):
2767 def currentwlock(self):
2767 """Returns the wlock if it's held, or None if it's not."""
2768 """Returns the wlock if it's held, or None if it's not."""
2768 return self._currentlock(self._wlockref)
2769 return self._currentlock(self._wlockref)
2769
2770
2770 def _filecommit(
2771 def _filecommit(
2771 self,
2772 self,
2772 fctx,
2773 fctx,
2773 manifest1,
2774 manifest1,
2774 manifest2,
2775 manifest2,
2775 linkrev,
2776 linkrev,
2776 tr,
2777 tr,
2777 changelist,
2778 changelist,
2778 includecopymeta,
2779 includecopymeta,
2779 ):
2780 ):
2780 """
2781 """
2781 commit an individual file as part of a larger transaction
2782 commit an individual file as part of a larger transaction
2782 """
2783 """
2783
2784
2784 fname = fctx.path()
2785 fname = fctx.path()
2785 fparent1 = manifest1.get(fname, nullid)
2786 fparent1 = manifest1.get(fname, nullid)
2786 fparent2 = manifest2.get(fname, nullid)
2787 fparent2 = manifest2.get(fname, nullid)
2787 if isinstance(fctx, context.filectx):
2788 if isinstance(fctx, context.filectx):
2788 node = fctx.filenode()
2789 node = fctx.filenode()
2789 if node in [fparent1, fparent2]:
2790 if node in [fparent1, fparent2]:
2790 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2791 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2791 if (
2792 if (
2792 fparent1 != nullid
2793 fparent1 != nullid
2793 and manifest1.flags(fname) != fctx.flags()
2794 and manifest1.flags(fname) != fctx.flags()
2794 ) or (
2795 ) or (
2795 fparent2 != nullid
2796 fparent2 != nullid
2796 and manifest2.flags(fname) != fctx.flags()
2797 and manifest2.flags(fname) != fctx.flags()
2797 ):
2798 ):
2798 changelist.append(fname)
2799 changelist.append(fname)
2799 return node
2800 return node
2800
2801
2801 flog = self.file(fname)
2802 flog = self.file(fname)
2802 meta = {}
2803 meta = {}
2803 cfname = fctx.copysource()
2804 cfname = fctx.copysource()
2804 if cfname and cfname != fname:
2805 if cfname and cfname != fname:
2805 # Mark the new revision of this file as a copy of another
2806 # Mark the new revision of this file as a copy of another
2806 # file. This copy data will effectively act as a parent
2807 # file. This copy data will effectively act as a parent
2807 # of this new revision. If this is a merge, the first
2808 # of this new revision. If this is a merge, the first
2808 # parent will be the nullid (meaning "look up the copy data")
2809 # parent will be the nullid (meaning "look up the copy data")
2809 # and the second one will be the other parent. For example:
2810 # and the second one will be the other parent. For example:
2810 #
2811 #
2811 # 0 --- 1 --- 3 rev1 changes file foo
2812 # 0 --- 1 --- 3 rev1 changes file foo
2812 # \ / rev2 renames foo to bar and changes it
2813 # \ / rev2 renames foo to bar and changes it
2813 # \- 2 -/ rev3 should have bar with all changes and
2814 # \- 2 -/ rev3 should have bar with all changes and
2814 # should record that bar descends from
2815 # should record that bar descends from
2815 # bar in rev2 and foo in rev1
2816 # bar in rev2 and foo in rev1
2816 #
2817 #
2817 # this allows this merge to succeed:
2818 # this allows this merge to succeed:
2818 #
2819 #
2819 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2820 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2820 # \ / merging rev3 and rev4 should use bar@rev2
2821 # \ / merging rev3 and rev4 should use bar@rev2
2821 # \- 2 --- 4 as the merge base
2822 # \- 2 --- 4 as the merge base
2822 #
2823 #
2823
2824
2824 cnode = manifest1.get(cfname)
2825 cnode = manifest1.get(cfname)
2825 newfparent = fparent2
2826 newfparent = fparent2
2826
2827
2827 if manifest2: # branch merge
2828 if manifest2: # branch merge
2828 if fparent2 == nullid or cnode is None: # copied on remote side
2829 if fparent2 == nullid or cnode is None: # copied on remote side
2829 if cfname in manifest2:
2830 if cfname in manifest2:
2830 cnode = manifest2[cfname]
2831 cnode = manifest2[cfname]
2831 newfparent = fparent1
2832 newfparent = fparent1
2832
2833
2833 # Here, we used to search backwards through history to try to find
2834 # Here, we used to search backwards through history to try to find
2834 # where the file copy came from if the source of a copy was not in
2835 # where the file copy came from if the source of a copy was not in
2835 # the parent directory. However, this doesn't actually make sense to
2836 # the parent directory. However, this doesn't actually make sense to
2836 # do (what does a copy from something not in your working copy even
2837 # do (what does a copy from something not in your working copy even
2837 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2838 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2838 # the user that copy information was dropped, so if they didn't
2839 # the user that copy information was dropped, so if they didn't
2839 # expect this outcome it can be fixed, but this is the correct
2840 # expect this outcome it can be fixed, but this is the correct
2840 # behavior in this circumstance.
2841 # behavior in this circumstance.
2841
2842
2842 if cnode:
2843 if cnode:
2843 self.ui.debug(
2844 self.ui.debug(
2844 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2845 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2845 )
2846 )
2846 if includecopymeta:
2847 if includecopymeta:
2847 meta[b"copy"] = cfname
2848 meta[b"copy"] = cfname
2848 meta[b"copyrev"] = hex(cnode)
2849 meta[b"copyrev"] = hex(cnode)
2849 fparent1, fparent2 = nullid, newfparent
2850 fparent1, fparent2 = nullid, newfparent
2850 else:
2851 else:
2851 self.ui.warn(
2852 self.ui.warn(
2852 _(
2853 _(
2853 b"warning: can't find ancestor for '%s' "
2854 b"warning: can't find ancestor for '%s' "
2854 b"copied from '%s'!\n"
2855 b"copied from '%s'!\n"
2855 )
2856 )
2856 % (fname, cfname)
2857 % (fname, cfname)
2857 )
2858 )
2858
2859
2859 elif fparent1 == nullid:
2860 elif fparent1 == nullid:
2860 fparent1, fparent2 = fparent2, nullid
2861 fparent1, fparent2 = fparent2, nullid
2861 elif fparent2 != nullid:
2862 elif fparent2 != nullid:
2862 # is one parent an ancestor of the other?
2863 # is one parent an ancestor of the other?
2863 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2864 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2864 if fparent1 in fparentancestors:
2865 if fparent1 in fparentancestors:
2865 fparent1, fparent2 = fparent2, nullid
2866 fparent1, fparent2 = fparent2, nullid
2866 elif fparent2 in fparentancestors:
2867 elif fparent2 in fparentancestors:
2867 fparent2 = nullid
2868 fparent2 = nullid
2868 elif not fparentancestors:
2869 elif not fparentancestors:
2869 # TODO: this whole if-else might be simplified much more
2870 # TODO: this whole if-else might be simplified much more
2870 ms = mergestatemod.mergestate.read(self)
2871 ms = mergestatemod.mergestate.read(self)
2871 if (
2872 if (
2872 fname in ms
2873 fname in ms
2873 and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
2874 and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
2874 ):
2875 ):
2875 fparent1, fparent2 = fparent2, nullid
2876 fparent1, fparent2 = fparent2, nullid
2876
2877
2877 # is the file changed?
2878 # is the file changed?
2878 text = fctx.data()
2879 text = fctx.data()
2879 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2880 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2880 changelist.append(fname)
2881 changelist.append(fname)
2881 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2882 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2882 # are just the flags changed during merge?
2883 # are just the flags changed during merge?
2883 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2884 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2884 changelist.append(fname)
2885 changelist.append(fname)
2885
2886
2886 return fparent1
2887 return fparent1
2887
2888
2888 def checkcommitpatterns(self, wctx, match, status, fail):
2889 def checkcommitpatterns(self, wctx, match, status, fail):
2889 """check for commit arguments that aren't committable"""
2890 """check for commit arguments that aren't committable"""
2890 if match.isexact() or match.prefix():
2891 if match.isexact() or match.prefix():
2891 matched = set(status.modified + status.added + status.removed)
2892 matched = set(status.modified + status.added + status.removed)
2892
2893
2893 for f in match.files():
2894 for f in match.files():
2894 f = self.dirstate.normalize(f)
2895 f = self.dirstate.normalize(f)
2895 if f == b'.' or f in matched or f in wctx.substate:
2896 if f == b'.' or f in matched or f in wctx.substate:
2896 continue
2897 continue
2897 if f in status.deleted:
2898 if f in status.deleted:
2898 fail(f, _(b'file not found!'))
2899 fail(f, _(b'file not found!'))
2899 # Is it a directory that exists or used to exist?
2900 # Is it a directory that exists or used to exist?
2900 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2901 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2901 d = f + b'/'
2902 d = f + b'/'
2902 for mf in matched:
2903 for mf in matched:
2903 if mf.startswith(d):
2904 if mf.startswith(d):
2904 break
2905 break
2905 else:
2906 else:
2906 fail(f, _(b"no match under directory!"))
2907 fail(f, _(b"no match under directory!"))
2907 elif f not in self.dirstate:
2908 elif f not in self.dirstate:
2908 fail(f, _(b"file not tracked!"))
2909 fail(f, _(b"file not tracked!"))
2909
2910
2910 @unfilteredmethod
2911 @unfilteredmethod
2911 def commit(
2912 def commit(
2912 self,
2913 self,
2913 text=b"",
2914 text=b"",
2914 user=None,
2915 user=None,
2915 date=None,
2916 date=None,
2916 match=None,
2917 match=None,
2917 force=False,
2918 force=False,
2918 editor=None,
2919 editor=None,
2919 extra=None,
2920 extra=None,
2920 ):
2921 ):
2921 """Add a new revision to current repository.
2922 """Add a new revision to current repository.
2922
2923
2923 Revision information is gathered from the working directory,
2924 Revision information is gathered from the working directory,
2924 match can be used to filter the committed files. If editor is
2925 match can be used to filter the committed files. If editor is
2925 supplied, it is called to get a commit message.
2926 supplied, it is called to get a commit message.
2926 """
2927 """
2927 if extra is None:
2928 if extra is None:
2928 extra = {}
2929 extra = {}
2929
2930
2930 def fail(f, msg):
2931 def fail(f, msg):
2931 raise error.Abort(b'%s: %s' % (f, msg))
2932 raise error.Abort(b'%s: %s' % (f, msg))
2932
2933
2933 if not match:
2934 if not match:
2934 match = matchmod.always()
2935 match = matchmod.always()
2935
2936
2936 if not force:
2937 if not force:
2937 match.bad = fail
2938 match.bad = fail
2938
2939
2939 # lock() for recent changelog (see issue4368)
2940 # lock() for recent changelog (see issue4368)
2940 with self.wlock(), self.lock():
2941 with self.wlock(), self.lock():
2941 wctx = self[None]
2942 wctx = self[None]
2942 merge = len(wctx.parents()) > 1
2943 merge = len(wctx.parents()) > 1
2943
2944
2944 if not force and merge and not match.always():
2945 if not force and merge and not match.always():
2945 raise error.Abort(
2946 raise error.Abort(
2946 _(
2947 _(
2947 b'cannot partially commit a merge '
2948 b'cannot partially commit a merge '
2948 b'(do not specify files or patterns)'
2949 b'(do not specify files or patterns)'
2949 )
2950 )
2950 )
2951 )
2951
2952
2952 status = self.status(match=match, clean=force)
2953 status = self.status(match=match, clean=force)
2953 if force:
2954 if force:
2954 status.modified.extend(
2955 status.modified.extend(
2955 status.clean
2956 status.clean
2956 ) # mq may commit clean files
2957 ) # mq may commit clean files
2957
2958
2958 # check subrepos
2959 # check subrepos
2959 subs, commitsubs, newstate = subrepoutil.precommit(
2960 subs, commitsubs, newstate = subrepoutil.precommit(
2960 self.ui, wctx, status, match, force=force
2961 self.ui, wctx, status, match, force=force
2961 )
2962 )
2962
2963
2963 # make sure all explicit patterns are matched
2964 # make sure all explicit patterns are matched
2964 if not force:
2965 if not force:
2965 self.checkcommitpatterns(wctx, match, status, fail)
2966 self.checkcommitpatterns(wctx, match, status, fail)
2966
2967
2967 cctx = context.workingcommitctx(
2968 cctx = context.workingcommitctx(
2968 self, status, text, user, date, extra
2969 self, status, text, user, date, extra
2969 )
2970 )
2970
2971
2971 ms = mergestatemod.mergestate.read(self)
2972 ms = mergestatemod.mergestate.read(self)
2972 mergeutil.checkunresolved(ms)
2973 mergeutil.checkunresolved(ms)
2973
2974
2974 # internal config: ui.allowemptycommit
2975 # internal config: ui.allowemptycommit
2975 allowemptycommit = (
2976 allowemptycommit = (
2976 wctx.branch() != wctx.p1().branch()
2977 wctx.branch() != wctx.p1().branch()
2977 or extra.get(b'close')
2978 or extra.get(b'close')
2978 or merge
2979 or merge
2979 or cctx.files()
2980 or cctx.files()
2980 or self.ui.configbool(b'ui', b'allowemptycommit')
2981 or self.ui.configbool(b'ui', b'allowemptycommit')
2981 )
2982 )
2982 if not allowemptycommit:
2983 if not allowemptycommit:
2983 self.ui.debug(b'nothing to commit, clearing merge state\n')
2984 self.ui.debug(b'nothing to commit, clearing merge state\n')
2984 ms.reset()
2985 ms.reset()
2985 return None
2986 return None
2986
2987
2987 if merge and cctx.deleted():
2988 if merge and cctx.deleted():
2988 raise error.Abort(_(b"cannot commit merge with missing files"))
2989 raise error.Abort(_(b"cannot commit merge with missing files"))
2989
2990
2990 if editor:
2991 if editor:
2991 cctx._text = editor(self, cctx, subs)
2992 cctx._text = editor(self, cctx, subs)
2992 edited = text != cctx._text
2993 edited = text != cctx._text
2993
2994
2994 # Save commit message in case this transaction gets rolled back
2995 # Save commit message in case this transaction gets rolled back
2995 # (e.g. by a pretxncommit hook). Leave the content alone on
2996 # (e.g. by a pretxncommit hook). Leave the content alone on
2996 # the assumption that the user will use the same editor again.
2997 # the assumption that the user will use the same editor again.
2997 msgfn = self.savecommitmessage(cctx._text)
2998 msgfn = self.savecommitmessage(cctx._text)
2998
2999
2999 # commit subs and write new state
3000 # commit subs and write new state
3000 if subs:
3001 if subs:
3001 uipathfn = scmutil.getuipathfn(self)
3002 uipathfn = scmutil.getuipathfn(self)
3002 for s in sorted(commitsubs):
3003 for s in sorted(commitsubs):
3003 sub = wctx.sub(s)
3004 sub = wctx.sub(s)
3004 self.ui.status(
3005 self.ui.status(
3005 _(b'committing subrepository %s\n')
3006 _(b'committing subrepository %s\n')
3006 % uipathfn(subrepoutil.subrelpath(sub))
3007 % uipathfn(subrepoutil.subrelpath(sub))
3007 )
3008 )
3008 sr = sub.commit(cctx._text, user, date)
3009 sr = sub.commit(cctx._text, user, date)
3009 newstate[s] = (newstate[s][0], sr)
3010 newstate[s] = (newstate[s][0], sr)
3010 subrepoutil.writestate(self, newstate)
3011 subrepoutil.writestate(self, newstate)
3011
3012
3012 p1, p2 = self.dirstate.parents()
3013 p1, p2 = self.dirstate.parents()
3013 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3014 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
3014 try:
3015 try:
3015 self.hook(
3016 self.hook(
3016 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3017 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
3017 )
3018 )
3018 with self.transaction(b'commit'):
3019 with self.transaction(b'commit'):
3019 ret = self.commitctx(cctx, True)
3020 ret = self.commitctx(cctx, True)
3020 # update bookmarks, dirstate and mergestate
3021 # update bookmarks, dirstate and mergestate
3021 bookmarks.update(self, [p1, p2], ret)
3022 bookmarks.update(self, [p1, p2], ret)
3022 cctx.markcommitted(ret)
3023 cctx.markcommitted(ret)
3023 ms.reset()
3024 ms.reset()
3024 except: # re-raises
3025 except: # re-raises
3025 if edited:
3026 if edited:
3026 self.ui.write(
3027 self.ui.write(
3027 _(b'note: commit message saved in %s\n') % msgfn
3028 _(b'note: commit message saved in %s\n') % msgfn
3028 )
3029 )
3029 self.ui.write(
3030 self.ui.write(
3030 _(
3031 _(
3031 b"note: use 'hg commit --logfile "
3032 b"note: use 'hg commit --logfile "
3032 b".hg/last-message.txt --edit' to reuse it\n"
3033 b".hg/last-message.txt --edit' to reuse it\n"
3033 )
3034 )
3034 )
3035 )
3035 raise
3036 raise
3036
3037
3037 def commithook(unused_success):
3038 def commithook(unused_success):
3038 # hack for command that use a temporary commit (eg: histedit)
3039 # hack for command that use a temporary commit (eg: histedit)
3039 # temporary commit got stripped before hook release
3040 # temporary commit got stripped before hook release
3040 if self.changelog.hasnode(ret):
3041 if self.changelog.hasnode(ret):
3041 self.hook(
3042 self.hook(
3042 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3043 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
3043 )
3044 )
3044
3045
3045 self._afterlock(commithook)
3046 self._afterlock(commithook)
3046 return ret
3047 return ret
3047
3048
3048 @unfilteredmethod
3049 @unfilteredmethod
3049 def commitctx(self, ctx, error=False, origctx=None):
3050 def commitctx(self, ctx, error=False, origctx=None):
3050 """Add a new revision to current repository.
3051 """Add a new revision to current repository.
3051 Revision information is passed via the context argument.
3052 Revision information is passed via the context argument.
3052
3053
3053 ctx.files() should list all files involved in this commit, i.e.
3054 ctx.files() should list all files involved in this commit, i.e.
3054 modified/added/removed files. On merge, it may be wider than the
3055 modified/added/removed files. On merge, it may be wider than the
3055 ctx.files() to be committed, since any file nodes derived directly
3056 ctx.files() to be committed, since any file nodes derived directly
3056 from p1 or p2 are excluded from the committed ctx.files().
3057 from p1 or p2 are excluded from the committed ctx.files().
3057
3058
3058 origctx is for convert to work around the problem that bug
3059 origctx is for convert to work around the problem that bug
3059 fixes to the files list in changesets change hashes. For
3060 fixes to the files list in changesets change hashes. For
3060 convert to be the identity, it can pass an origctx and this
3061 convert to be the identity, it can pass an origctx and this
3061 function will use the same files list when it makes sense to
3062 function will use the same files list when it makes sense to
3062 do so.
3063 do so.
3063 """
3064 """
3064
3065
3065 p1, p2 = ctx.p1(), ctx.p2()
3066 p1, p2 = ctx.p1(), ctx.p2()
3066 user = ctx.user()
3067 user = ctx.user()
3067
3068
3068 if self.filecopiesmode == b'changeset-sidedata':
3069 if self.filecopiesmode == b'changeset-sidedata':
3069 writechangesetcopy = True
3070 writechangesetcopy = True
3070 writefilecopymeta = True
3071 writefilecopymeta = True
3071 writecopiesto = None
3072 writecopiesto = None
3072 else:
3073 else:
3073 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3074 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
3074 writefilecopymeta = writecopiesto != b'changeset-only'
3075 writefilecopymeta = writecopiesto != b'changeset-only'
3075 writechangesetcopy = writecopiesto in (
3076 writechangesetcopy = writecopiesto in (
3076 b'changeset-only',
3077 b'changeset-only',
3077 b'compatibility',
3078 b'compatibility',
3078 )
3079 )
3079 p1copies, p2copies = None, None
3080 p1copies, p2copies = None, None
3080 if writechangesetcopy:
3081 if writechangesetcopy:
3081 p1copies = ctx.p1copies()
3082 p1copies = ctx.p1copies()
3082 p2copies = ctx.p2copies()
3083 p2copies = ctx.p2copies()
3083 filesadded, filesremoved = None, None
3084 filesadded, filesremoved = None, None
3084 with self.lock(), self.transaction(b"commit") as tr:
3085 with self.lock(), self.transaction(b"commit") as tr:
3085 trp = weakref.proxy(tr)
3086 trp = weakref.proxy(tr)
3086
3087
3087 if ctx.manifestnode():
3088 if ctx.manifestnode():
3088 # reuse an existing manifest revision
3089 # reuse an existing manifest revision
3089 self.ui.debug(b'reusing known manifest\n')
3090 self.ui.debug(b'reusing known manifest\n')
3090 mn = ctx.manifestnode()
3091 mn = ctx.manifestnode()
3091 files = ctx.files()
3092 files = ctx.files()
3092 if writechangesetcopy:
3093 if writechangesetcopy:
3093 filesadded = ctx.filesadded()
3094 filesadded = ctx.filesadded()
3094 filesremoved = ctx.filesremoved()
3095 filesremoved = ctx.filesremoved()
3095 elif ctx.files():
3096 elif ctx.files():
3096 m1ctx = p1.manifestctx()
3097 m1ctx = p1.manifestctx()
3097 m2ctx = p2.manifestctx()
3098 m2ctx = p2.manifestctx()
3098 mctx = m1ctx.copy()
3099 mctx = m1ctx.copy()
3099
3100
3100 m = mctx.read()
3101 m = mctx.read()
3101 m1 = m1ctx.read()
3102 m1 = m1ctx.read()
3102 m2 = m2ctx.read()
3103 m2 = m2ctx.read()
3103
3104
3104 # check in files
3105 # check in files
3105 added = []
3106 added = []
3106 changed = []
3107 changed = []
3107 removed = list(ctx.removed())
3108 removed = list(ctx.removed())
3108 linkrev = len(self)
3109 linkrev = len(self)
3109 self.ui.note(_(b"committing files:\n"))
3110 self.ui.note(_(b"committing files:\n"))
3110 uipathfn = scmutil.getuipathfn(self)
3111 uipathfn = scmutil.getuipathfn(self)
3111 for f in sorted(ctx.modified() + ctx.added()):
3112 for f in sorted(ctx.modified() + ctx.added()):
3112 self.ui.note(uipathfn(f) + b"\n")
3113 self.ui.note(uipathfn(f) + b"\n")
3113 try:
3114 try:
3114 fctx = ctx[f]
3115 fctx = ctx[f]
3115 if fctx is None:
3116 if fctx is None:
3116 removed.append(f)
3117 removed.append(f)
3117 else:
3118 else:
3118 added.append(f)
3119 added.append(f)
3119 m[f] = self._filecommit(
3120 m[f] = self._filecommit(
3120 fctx,
3121 fctx,
3121 m1,
3122 m1,
3122 m2,
3123 m2,
3123 linkrev,
3124 linkrev,
3124 trp,
3125 trp,
3125 changed,
3126 changed,
3126 writefilecopymeta,
3127 writefilecopymeta,
3127 )
3128 )
3128 m.setflag(f, fctx.flags())
3129 m.setflag(f, fctx.flags())
3129 except OSError:
3130 except OSError:
3130 self.ui.warn(
3131 self.ui.warn(
3131 _(b"trouble committing %s!\n") % uipathfn(f)
3132 _(b"trouble committing %s!\n") % uipathfn(f)
3132 )
3133 )
3133 raise
3134 raise
3134 except IOError as inst:
3135 except IOError as inst:
3135 errcode = getattr(inst, 'errno', errno.ENOENT)
3136 errcode = getattr(inst, 'errno', errno.ENOENT)
3136 if error or errcode and errcode != errno.ENOENT:
3137 if error or errcode and errcode != errno.ENOENT:
3137 self.ui.warn(
3138 self.ui.warn(
3138 _(b"trouble committing %s!\n") % uipathfn(f)
3139 _(b"trouble committing %s!\n") % uipathfn(f)
3139 )
3140 )
3140 raise
3141 raise
3141
3142
3142 # update manifest
3143 # update manifest
3143 removed = [f for f in removed if f in m1 or f in m2]
3144 removed = [f for f in removed if f in m1 or f in m2]
3144 drop = sorted([f for f in removed if f in m])
3145 drop = sorted([f for f in removed if f in m])
3145 for f in drop:
3146 for f in drop:
3146 del m[f]
3147 del m[f]
3147 if p2.rev() != nullrev:
3148 if p2.rev() != nullrev:
3148
3149 rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
3149 @util.cachefunc
3150 removed = [f for f in removed if not rf(f)]
3150 def mas():
3151 p1n = p1.node()
3152 p2n = p2.node()
3153 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3154 if not cahs:
3155 cahs = [nullrev]
3156 return [self[r].manifest() for r in cahs]
3157
3158 def deletionfromparent(f):
3159 # When a file is removed relative to p1 in a merge, this
3160 # function determines whether the absence is due to a
3161 # deletion from a parent, or whether the merge commit
3162 # itself deletes the file. We decide this by doing a
3163 # simplified three way merge of the manifest entry for
3164 # the file. There are two ways we decide the merge
3165 # itself didn't delete a file:
3166 # - neither parent (nor the merge) contain the file
3167 # - exactly one parent contains the file, and that
3168 # parent has the same filelog entry as the merge
3169 # ancestor (or all of them if there two). In other
3170 # words, that parent left the file unchanged while the
3171 # other one deleted it.
3172 # One way to think about this is that deleting a file is
3173 # similar to emptying it, so the list of changed files
3174 # should be similar either way. The computation
3175 # described above is not done directly in _filecommit
3176 # when creating the list of changed files, however
3177 # it does something very similar by comparing filelog
3178 # nodes.
3179 if f in m1:
3180 return f not in m2 and all(
3181 f in ma and ma.find(f) == m1.find(f)
3182 for ma in mas()
3183 )
3184 elif f in m2:
3185 return all(
3186 f in ma and ma.find(f) == m2.find(f)
3187 for ma in mas()
3188 )
3189 else:
3190 return True
3191
3192 removed = [f for f in removed if not deletionfromparent(f)]
3193
3151
3194 files = changed + removed
3152 files = changed + removed
3195 md = None
3153 md = None
3196 if not files:
3154 if not files:
3197 # if no "files" actually changed in terms of the changelog,
3155 # if no "files" actually changed in terms of the changelog,
3198 # try hard to detect unmodified manifest entry so that the
3156 # try hard to detect unmodified manifest entry so that the
3199 # exact same commit can be reproduced later on convert.
3157 # exact same commit can be reproduced later on convert.
3200 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3158 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3201 if not files and md:
3159 if not files and md:
3202 self.ui.debug(
3160 self.ui.debug(
3203 b'not reusing manifest (no file change in '
3161 b'not reusing manifest (no file change in '
3204 b'changelog, but manifest differs)\n'
3162 b'changelog, but manifest differs)\n'
3205 )
3163 )
3206 if files or md:
3164 if files or md:
3207 self.ui.note(_(b"committing manifest\n"))
3165 self.ui.note(_(b"committing manifest\n"))
3208 # we're using narrowmatch here since it's already applied at
3166 # we're using narrowmatch here since it's already applied at
3209 # other stages (such as dirstate.walk), so we're already
3167 # other stages (such as dirstate.walk), so we're already
3210 # ignoring things outside of narrowspec in most cases. The
3168 # ignoring things outside of narrowspec in most cases. The
3211 # one case where we might have files outside the narrowspec
3169 # one case where we might have files outside the narrowspec
3212 # at this point is merges, and we already error out in the
3170 # at this point is merges, and we already error out in the
3213 # case where the merge has files outside of the narrowspec,
3171 # case where the merge has files outside of the narrowspec,
3214 # so this is safe.
3172 # so this is safe.
3215 mn = mctx.write(
3173 mn = mctx.write(
3216 trp,
3174 trp,
3217 linkrev,
3175 linkrev,
3218 p1.manifestnode(),
3176 p1.manifestnode(),
3219 p2.manifestnode(),
3177 p2.manifestnode(),
3220 added,
3178 added,
3221 drop,
3179 drop,
3222 match=self.narrowmatch(),
3180 match=self.narrowmatch(),
3223 )
3181 )
3224
3182
3225 if writechangesetcopy:
3183 if writechangesetcopy:
3226 filesadded = [
3184 filesadded = [
3227 f for f in changed if not (f in m1 or f in m2)
3185 f for f in changed if not (f in m1 or f in m2)
3228 ]
3186 ]
3229 filesremoved = removed
3187 filesremoved = removed
3230 else:
3188 else:
3231 self.ui.debug(
3189 self.ui.debug(
3232 b'reusing manifest from p1 (listed files '
3190 b'reusing manifest from p1 (listed files '
3233 b'actually unchanged)\n'
3191 b'actually unchanged)\n'
3234 )
3192 )
3235 mn = p1.manifestnode()
3193 mn = p1.manifestnode()
3236 else:
3194 else:
3237 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3195 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3238 mn = p1.manifestnode()
3196 mn = p1.manifestnode()
3239 files = []
3197 files = []
3240
3198
3241 if writecopiesto == b'changeset-only':
3199 if writecopiesto == b'changeset-only':
3242 # If writing only to changeset extras, use None to indicate that
3200 # If writing only to changeset extras, use None to indicate that
3243 # no entry should be written. If writing to both, write an empty
3201 # no entry should be written. If writing to both, write an empty
3244 # entry to prevent the reader from falling back to reading
3202 # entry to prevent the reader from falling back to reading
3245 # filelogs.
3203 # filelogs.
3246 p1copies = p1copies or None
3204 p1copies = p1copies or None
3247 p2copies = p2copies or None
3205 p2copies = p2copies or None
3248 filesadded = filesadded or None
3206 filesadded = filesadded or None
3249 filesremoved = filesremoved or None
3207 filesremoved = filesremoved or None
3250
3208
3251 if origctx and origctx.manifestnode() == mn:
3209 if origctx and origctx.manifestnode() == mn:
3252 files = origctx.files()
3210 files = origctx.files()
3253
3211
3254 # update changelog
3212 # update changelog
3255 self.ui.note(_(b"committing changelog\n"))
3213 self.ui.note(_(b"committing changelog\n"))
3256 self.changelog.delayupdate(tr)
3214 self.changelog.delayupdate(tr)
3257 n = self.changelog.add(
3215 n = self.changelog.add(
3258 mn,
3216 mn,
3259 files,
3217 files,
3260 ctx.description(),
3218 ctx.description(),
3261 trp,
3219 trp,
3262 p1.node(),
3220 p1.node(),
3263 p2.node(),
3221 p2.node(),
3264 user,
3222 user,
3265 ctx.date(),
3223 ctx.date(),
3266 ctx.extra().copy(),
3224 ctx.extra().copy(),
3267 p1copies,
3225 p1copies,
3268 p2copies,
3226 p2copies,
3269 filesadded,
3227 filesadded,
3270 filesremoved,
3228 filesremoved,
3271 )
3229 )
3272 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3230 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3273 self.hook(
3231 self.hook(
3274 b'pretxncommit',
3232 b'pretxncommit',
3275 throw=True,
3233 throw=True,
3276 node=hex(n),
3234 node=hex(n),
3277 parent1=xp1,
3235 parent1=xp1,
3278 parent2=xp2,
3236 parent2=xp2,
3279 )
3237 )
3280 # set the new commit is proper phase
3238 # set the new commit is proper phase
3281 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3239 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3282 if targetphase:
3240 if targetphase:
3283 # retract boundary do not alter parent changeset.
3241 # retract boundary do not alter parent changeset.
3284 # if a parent have higher the resulting phase will
3242 # if a parent have higher the resulting phase will
3285 # be compliant anyway
3243 # be compliant anyway
3286 #
3244 #
3287 # if minimal phase was 0 we don't need to retract anything
3245 # if minimal phase was 0 we don't need to retract anything
3288 phases.registernew(self, tr, targetphase, [n])
3246 phases.registernew(self, tr, targetphase, [n])
3289 return n
3247 return n
3290
3248
3291 @unfilteredmethod
3249 @unfilteredmethod
3292 def destroying(self):
3250 def destroying(self):
3293 '''Inform the repository that nodes are about to be destroyed.
3251 '''Inform the repository that nodes are about to be destroyed.
3294 Intended for use by strip and rollback, so there's a common
3252 Intended for use by strip and rollback, so there's a common
3295 place for anything that has to be done before destroying history.
3253 place for anything that has to be done before destroying history.
3296
3254
3297 This is mostly useful for saving state that is in memory and waiting
3255 This is mostly useful for saving state that is in memory and waiting
3298 to be flushed when the current lock is released. Because a call to
3256 to be flushed when the current lock is released. Because a call to
3299 destroyed is imminent, the repo will be invalidated causing those
3257 destroyed is imminent, the repo will be invalidated causing those
3300 changes to stay in memory (waiting for the next unlock), or vanish
3258 changes to stay in memory (waiting for the next unlock), or vanish
3301 completely.
3259 completely.
3302 '''
3260 '''
3303 # When using the same lock to commit and strip, the phasecache is left
3261 # When using the same lock to commit and strip, the phasecache is left
3304 # dirty after committing. Then when we strip, the repo is invalidated,
3262 # dirty after committing. Then when we strip, the repo is invalidated,
3305 # causing those changes to disappear.
3263 # causing those changes to disappear.
3306 if '_phasecache' in vars(self):
3264 if '_phasecache' in vars(self):
3307 self._phasecache.write()
3265 self._phasecache.write()
3308
3266
3309 @unfilteredmethod
3267 @unfilteredmethod
3310 def destroyed(self):
3268 def destroyed(self):
3311 '''Inform the repository that nodes have been destroyed.
3269 '''Inform the repository that nodes have been destroyed.
3312 Intended for use by strip and rollback, so there's a common
3270 Intended for use by strip and rollback, so there's a common
3313 place for anything that has to be done after destroying history.
3271 place for anything that has to be done after destroying history.
3314 '''
3272 '''
3315 # When one tries to:
3273 # When one tries to:
3316 # 1) destroy nodes thus calling this method (e.g. strip)
3274 # 1) destroy nodes thus calling this method (e.g. strip)
3317 # 2) use phasecache somewhere (e.g. commit)
3275 # 2) use phasecache somewhere (e.g. commit)
3318 #
3276 #
3319 # then 2) will fail because the phasecache contains nodes that were
3277 # then 2) will fail because the phasecache contains nodes that were
3320 # removed. We can either remove phasecache from the filecache,
3278 # removed. We can either remove phasecache from the filecache,
3321 # causing it to reload next time it is accessed, or simply filter
3279 # causing it to reload next time it is accessed, or simply filter
3322 # the removed nodes now and write the updated cache.
3280 # the removed nodes now and write the updated cache.
3323 self._phasecache.filterunknown(self)
3281 self._phasecache.filterunknown(self)
3324 self._phasecache.write()
3282 self._phasecache.write()
3325
3283
3326 # refresh all repository caches
3284 # refresh all repository caches
3327 self.updatecaches()
3285 self.updatecaches()
3328
3286
3329 # Ensure the persistent tag cache is updated. Doing it now
3287 # Ensure the persistent tag cache is updated. Doing it now
3330 # means that the tag cache only has to worry about destroyed
3288 # means that the tag cache only has to worry about destroyed
3331 # heads immediately after a strip/rollback. That in turn
3289 # heads immediately after a strip/rollback. That in turn
3332 # guarantees that "cachetip == currenttip" (comparing both rev
3290 # guarantees that "cachetip == currenttip" (comparing both rev
3333 # and node) always means no nodes have been added or destroyed.
3291 # and node) always means no nodes have been added or destroyed.
3334
3292
3335 # XXX this is suboptimal when qrefresh'ing: we strip the current
3293 # XXX this is suboptimal when qrefresh'ing: we strip the current
3336 # head, refresh the tag cache, then immediately add a new head.
3294 # head, refresh the tag cache, then immediately add a new head.
3337 # But I think doing it this way is necessary for the "instant
3295 # But I think doing it this way is necessary for the "instant
3338 # tag cache retrieval" case to work.
3296 # tag cache retrieval" case to work.
3339 self.invalidate()
3297 self.invalidate()
3340
3298
3341 def status(
3299 def status(
3342 self,
3300 self,
3343 node1=b'.',
3301 node1=b'.',
3344 node2=None,
3302 node2=None,
3345 match=None,
3303 match=None,
3346 ignored=False,
3304 ignored=False,
3347 clean=False,
3305 clean=False,
3348 unknown=False,
3306 unknown=False,
3349 listsubrepos=False,
3307 listsubrepos=False,
3350 ):
3308 ):
3351 '''a convenience method that calls node1.status(node2)'''
3309 '''a convenience method that calls node1.status(node2)'''
3352 return self[node1].status(
3310 return self[node1].status(
3353 node2, match, ignored, clean, unknown, listsubrepos
3311 node2, match, ignored, clean, unknown, listsubrepos
3354 )
3312 )
3355
3313
3356 def addpostdsstatus(self, ps):
3314 def addpostdsstatus(self, ps):
3357 """Add a callback to run within the wlock, at the point at which status
3315 """Add a callback to run within the wlock, at the point at which status
3358 fixups happen.
3316 fixups happen.
3359
3317
3360 On status completion, callback(wctx, status) will be called with the
3318 On status completion, callback(wctx, status) will be called with the
3361 wlock held, unless the dirstate has changed from underneath or the wlock
3319 wlock held, unless the dirstate has changed from underneath or the wlock
3362 couldn't be grabbed.
3320 couldn't be grabbed.
3363
3321
3364 Callbacks should not capture and use a cached copy of the dirstate --
3322 Callbacks should not capture and use a cached copy of the dirstate --
3365 it might change in the meanwhile. Instead, they should access the
3323 it might change in the meanwhile. Instead, they should access the
3366 dirstate via wctx.repo().dirstate.
3324 dirstate via wctx.repo().dirstate.
3367
3325
3368 This list is emptied out after each status run -- extensions should
3326 This list is emptied out after each status run -- extensions should
3369 make sure it adds to this list each time dirstate.status is called.
3327 make sure it adds to this list each time dirstate.status is called.
3370 Extensions should also make sure they don't call this for statuses
3328 Extensions should also make sure they don't call this for statuses
3371 that don't involve the dirstate.
3329 that don't involve the dirstate.
3372 """
3330 """
3373
3331
3374 # The list is located here for uniqueness reasons -- it is actually
3332 # The list is located here for uniqueness reasons -- it is actually
3375 # managed by the workingctx, but that isn't unique per-repo.
3333 # managed by the workingctx, but that isn't unique per-repo.
3376 self._postdsstatus.append(ps)
3334 self._postdsstatus.append(ps)
3377
3335
3378 def postdsstatus(self):
3336 def postdsstatus(self):
3379 """Used by workingctx to get the list of post-dirstate-status hooks."""
3337 """Used by workingctx to get the list of post-dirstate-status hooks."""
3380 return self._postdsstatus
3338 return self._postdsstatus
3381
3339
3382 def clearpostdsstatus(self):
3340 def clearpostdsstatus(self):
3383 """Used by workingctx to clear post-dirstate-status hooks."""
3341 """Used by workingctx to clear post-dirstate-status hooks."""
3384 del self._postdsstatus[:]
3342 del self._postdsstatus[:]
3385
3343
3386 def heads(self, start=None):
3344 def heads(self, start=None):
3387 if start is None:
3345 if start is None:
3388 cl = self.changelog
3346 cl = self.changelog
3389 headrevs = reversed(cl.headrevs())
3347 headrevs = reversed(cl.headrevs())
3390 return [cl.node(rev) for rev in headrevs]
3348 return [cl.node(rev) for rev in headrevs]
3391
3349
3392 heads = self.changelog.heads(start)
3350 heads = self.changelog.heads(start)
3393 # sort the output in rev descending order
3351 # sort the output in rev descending order
3394 return sorted(heads, key=self.changelog.rev, reverse=True)
3352 return sorted(heads, key=self.changelog.rev, reverse=True)
3395
3353
3396 def branchheads(self, branch=None, start=None, closed=False):
3354 def branchheads(self, branch=None, start=None, closed=False):
3397 '''return a (possibly filtered) list of heads for the given branch
3355 '''return a (possibly filtered) list of heads for the given branch
3398
3356
3399 Heads are returned in topological order, from newest to oldest.
3357 Heads are returned in topological order, from newest to oldest.
3400 If branch is None, use the dirstate branch.
3358 If branch is None, use the dirstate branch.
3401 If start is not None, return only heads reachable from start.
3359 If start is not None, return only heads reachable from start.
3402 If closed is True, return heads that are marked as closed as well.
3360 If closed is True, return heads that are marked as closed as well.
3403 '''
3361 '''
3404 if branch is None:
3362 if branch is None:
3405 branch = self[None].branch()
3363 branch = self[None].branch()
3406 branches = self.branchmap()
3364 branches = self.branchmap()
3407 if not branches.hasbranch(branch):
3365 if not branches.hasbranch(branch):
3408 return []
3366 return []
3409 # the cache returns heads ordered lowest to highest
3367 # the cache returns heads ordered lowest to highest
3410 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3368 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3411 if start is not None:
3369 if start is not None:
3412 # filter out the heads that cannot be reached from startrev
3370 # filter out the heads that cannot be reached from startrev
3413 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3371 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3414 bheads = [h for h in bheads if h in fbheads]
3372 bheads = [h for h in bheads if h in fbheads]
3415 return bheads
3373 return bheads
3416
3374
3417 def branches(self, nodes):
3375 def branches(self, nodes):
3418 if not nodes:
3376 if not nodes:
3419 nodes = [self.changelog.tip()]
3377 nodes = [self.changelog.tip()]
3420 b = []
3378 b = []
3421 for n in nodes:
3379 for n in nodes:
3422 t = n
3380 t = n
3423 while True:
3381 while True:
3424 p = self.changelog.parents(n)
3382 p = self.changelog.parents(n)
3425 if p[1] != nullid or p[0] == nullid:
3383 if p[1] != nullid or p[0] == nullid:
3426 b.append((t, n, p[0], p[1]))
3384 b.append((t, n, p[0], p[1]))
3427 break
3385 break
3428 n = p[0]
3386 n = p[0]
3429 return b
3387 return b
3430
3388
3431 def between(self, pairs):
3389 def between(self, pairs):
3432 r = []
3390 r = []
3433
3391
3434 for top, bottom in pairs:
3392 for top, bottom in pairs:
3435 n, l, i = top, [], 0
3393 n, l, i = top, [], 0
3436 f = 1
3394 f = 1
3437
3395
3438 while n != bottom and n != nullid:
3396 while n != bottom and n != nullid:
3439 p = self.changelog.parents(n)[0]
3397 p = self.changelog.parents(n)[0]
3440 if i == f:
3398 if i == f:
3441 l.append(n)
3399 l.append(n)
3442 f = f * 2
3400 f = f * 2
3443 n = p
3401 n = p
3444 i += 1
3402 i += 1
3445
3403
3446 r.append(l)
3404 r.append(l)
3447
3405
3448 return r
3406 return r
3449
3407
3450 def checkpush(self, pushop):
3408 def checkpush(self, pushop):
3451 """Extensions can override this function if additional checks have
3409 """Extensions can override this function if additional checks have
3452 to be performed before pushing, or call it if they override push
3410 to be performed before pushing, or call it if they override push
3453 command.
3411 command.
3454 """
3412 """
3455
3413
3456 @unfilteredpropertycache
3414 @unfilteredpropertycache
3457 def prepushoutgoinghooks(self):
3415 def prepushoutgoinghooks(self):
3458 """Return util.hooks consists of a pushop with repo, remote, outgoing
3416 """Return util.hooks consists of a pushop with repo, remote, outgoing
3459 methods, which are called before pushing changesets.
3417 methods, which are called before pushing changesets.
3460 """
3418 """
3461 return util.hooks()
3419 return util.hooks()
3462
3420
3463 def pushkey(self, namespace, key, old, new):
3421 def pushkey(self, namespace, key, old, new):
3464 try:
3422 try:
3465 tr = self.currenttransaction()
3423 tr = self.currenttransaction()
3466 hookargs = {}
3424 hookargs = {}
3467 if tr is not None:
3425 if tr is not None:
3468 hookargs.update(tr.hookargs)
3426 hookargs.update(tr.hookargs)
3469 hookargs = pycompat.strkwargs(hookargs)
3427 hookargs = pycompat.strkwargs(hookargs)
3470 hookargs['namespace'] = namespace
3428 hookargs['namespace'] = namespace
3471 hookargs['key'] = key
3429 hookargs['key'] = key
3472 hookargs['old'] = old
3430 hookargs['old'] = old
3473 hookargs['new'] = new
3431 hookargs['new'] = new
3474 self.hook(b'prepushkey', throw=True, **hookargs)
3432 self.hook(b'prepushkey', throw=True, **hookargs)
3475 except error.HookAbort as exc:
3433 except error.HookAbort as exc:
3476 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3434 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3477 if exc.hint:
3435 if exc.hint:
3478 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3436 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3479 return False
3437 return False
3480 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3438 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3481 ret = pushkey.push(self, namespace, key, old, new)
3439 ret = pushkey.push(self, namespace, key, old, new)
3482
3440
3483 def runhook(unused_success):
3441 def runhook(unused_success):
3484 self.hook(
3442 self.hook(
3485 b'pushkey',
3443 b'pushkey',
3486 namespace=namespace,
3444 namespace=namespace,
3487 key=key,
3445 key=key,
3488 old=old,
3446 old=old,
3489 new=new,
3447 new=new,
3490 ret=ret,
3448 ret=ret,
3491 )
3449 )
3492
3450
3493 self._afterlock(runhook)
3451 self._afterlock(runhook)
3494 return ret
3452 return ret
3495
3453
3496 def listkeys(self, namespace):
3454 def listkeys(self, namespace):
3497 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3455 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3498 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3456 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3499 values = pushkey.list(self, namespace)
3457 values = pushkey.list(self, namespace)
3500 self.hook(b'listkeys', namespace=namespace, values=values)
3458 self.hook(b'listkeys', namespace=namespace, values=values)
3501 return values
3459 return values
3502
3460
3503 def debugwireargs(self, one, two, three=None, four=None, five=None):
3461 def debugwireargs(self, one, two, three=None, four=None, five=None):
3504 '''used to test argument passing over the wire'''
3462 '''used to test argument passing over the wire'''
3505 return b"%s %s %s %s %s" % (
3463 return b"%s %s %s %s %s" % (
3506 one,
3464 one,
3507 two,
3465 two,
3508 pycompat.bytestr(three),
3466 pycompat.bytestr(three),
3509 pycompat.bytestr(four),
3467 pycompat.bytestr(four),
3510 pycompat.bytestr(five),
3468 pycompat.bytestr(five),
3511 )
3469 )
3512
3470
3513 def savecommitmessage(self, text):
3471 def savecommitmessage(self, text):
3514 fp = self.vfs(b'last-message.txt', b'wb')
3472 fp = self.vfs(b'last-message.txt', b'wb')
3515 try:
3473 try:
3516 fp.write(text)
3474 fp.write(text)
3517 finally:
3475 finally:
3518 fp.close()
3476 fp.close()
3519 return self.pathto(fp.name[len(self.root) + 1 :])
3477 return self.pathto(fp.name[len(self.root) + 1 :])
3520
3478
3521
3479
3522 # used to avoid circular references so destructors work
3480 # used to avoid circular references so destructors work
3523 def aftertrans(files):
3481 def aftertrans(files):
3524 renamefiles = [tuple(t) for t in files]
3482 renamefiles = [tuple(t) for t in files]
3525
3483
3526 def a():
3484 def a():
3527 for vfs, src, dest in renamefiles:
3485 for vfs, src, dest in renamefiles:
3528 # if src and dest refer to a same file, vfs.rename is a no-op,
3486 # if src and dest refer to a same file, vfs.rename is a no-op,
3529 # leaving both src and dest on disk. delete dest to make sure
3487 # leaving both src and dest on disk. delete dest to make sure
3530 # the rename couldn't be such a no-op.
3488 # the rename couldn't be such a no-op.
3531 vfs.tryunlink(dest)
3489 vfs.tryunlink(dest)
3532 try:
3490 try:
3533 vfs.rename(src, dest)
3491 vfs.rename(src, dest)
3534 except OSError: # journal file does not yet exist
3492 except OSError: # journal file does not yet exist
3535 pass
3493 pass
3536
3494
3537 return a
3495 return a
3538
3496
3539
3497
3540 def undoname(fn):
3498 def undoname(fn):
3541 base, name = os.path.split(fn)
3499 base, name = os.path.split(fn)
3542 assert name.startswith(b'journal')
3500 assert name.startswith(b'journal')
3543 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3501 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3544
3502
3545
3503
3546 def instance(ui, path, create, intents=None, createopts=None):
3504 def instance(ui, path, create, intents=None, createopts=None):
3547 localpath = util.urllocalpath(path)
3505 localpath = util.urllocalpath(path)
3548 if create:
3506 if create:
3549 createrepository(ui, localpath, createopts=createopts)
3507 createrepository(ui, localpath, createopts=createopts)
3550
3508
3551 return makelocalrepository(ui, localpath, intents=intents)
3509 return makelocalrepository(ui, localpath, intents=intents)
3552
3510
3553
3511
3554 def islocal(path):
3512 def islocal(path):
3555 return True
3513 return True
3556
3514
3557
3515
3558 def defaultcreateopts(ui, createopts=None):
3516 def defaultcreateopts(ui, createopts=None):
3559 """Populate the default creation options for a repository.
3517 """Populate the default creation options for a repository.
3560
3518
3561 A dictionary of explicitly requested creation options can be passed
3519 A dictionary of explicitly requested creation options can be passed
3562 in. Missing keys will be populated.
3520 in. Missing keys will be populated.
3563 """
3521 """
3564 createopts = dict(createopts or {})
3522 createopts = dict(createopts or {})
3565
3523
3566 if b'backend' not in createopts:
3524 if b'backend' not in createopts:
3567 # experimental config: storage.new-repo-backend
3525 # experimental config: storage.new-repo-backend
3568 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3526 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3569
3527
3570 return createopts
3528 return createopts
3571
3529
3572
3530
3573 def newreporequirements(ui, createopts):
3531 def newreporequirements(ui, createopts):
3574 """Determine the set of requirements for a new local repository.
3532 """Determine the set of requirements for a new local repository.
3575
3533
3576 Extensions can wrap this function to specify custom requirements for
3534 Extensions can wrap this function to specify custom requirements for
3577 new repositories.
3535 new repositories.
3578 """
3536 """
3579 # If the repo is being created from a shared repository, we copy
3537 # If the repo is being created from a shared repository, we copy
3580 # its requirements.
3538 # its requirements.
3581 if b'sharedrepo' in createopts:
3539 if b'sharedrepo' in createopts:
3582 requirements = set(createopts[b'sharedrepo'].requirements)
3540 requirements = set(createopts[b'sharedrepo'].requirements)
3583 if createopts.get(b'sharedrelative'):
3541 if createopts.get(b'sharedrelative'):
3584 requirements.add(b'relshared')
3542 requirements.add(b'relshared')
3585 else:
3543 else:
3586 requirements.add(b'shared')
3544 requirements.add(b'shared')
3587
3545
3588 return requirements
3546 return requirements
3589
3547
3590 if b'backend' not in createopts:
3548 if b'backend' not in createopts:
3591 raise error.ProgrammingError(
3549 raise error.ProgrammingError(
3592 b'backend key not present in createopts; '
3550 b'backend key not present in createopts; '
3593 b'was defaultcreateopts() called?'
3551 b'was defaultcreateopts() called?'
3594 )
3552 )
3595
3553
3596 if createopts[b'backend'] != b'revlogv1':
3554 if createopts[b'backend'] != b'revlogv1':
3597 raise error.Abort(
3555 raise error.Abort(
3598 _(
3556 _(
3599 b'unable to determine repository requirements for '
3557 b'unable to determine repository requirements for '
3600 b'storage backend: %s'
3558 b'storage backend: %s'
3601 )
3559 )
3602 % createopts[b'backend']
3560 % createopts[b'backend']
3603 )
3561 )
3604
3562
3605 requirements = {b'revlogv1'}
3563 requirements = {b'revlogv1'}
3606 if ui.configbool(b'format', b'usestore'):
3564 if ui.configbool(b'format', b'usestore'):
3607 requirements.add(b'store')
3565 requirements.add(b'store')
3608 if ui.configbool(b'format', b'usefncache'):
3566 if ui.configbool(b'format', b'usefncache'):
3609 requirements.add(b'fncache')
3567 requirements.add(b'fncache')
3610 if ui.configbool(b'format', b'dotencode'):
3568 if ui.configbool(b'format', b'dotencode'):
3611 requirements.add(b'dotencode')
3569 requirements.add(b'dotencode')
3612
3570
3613 compengines = ui.configlist(b'format', b'revlog-compression')
3571 compengines = ui.configlist(b'format', b'revlog-compression')
3614 for compengine in compengines:
3572 for compengine in compengines:
3615 if compengine in util.compengines:
3573 if compengine in util.compengines:
3616 break
3574 break
3617 else:
3575 else:
3618 raise error.Abort(
3576 raise error.Abort(
3619 _(
3577 _(
3620 b'compression engines %s defined by '
3578 b'compression engines %s defined by '
3621 b'format.revlog-compression not available'
3579 b'format.revlog-compression not available'
3622 )
3580 )
3623 % b', '.join(b'"%s"' % e for e in compengines),
3581 % b', '.join(b'"%s"' % e for e in compengines),
3624 hint=_(
3582 hint=_(
3625 b'run "hg debuginstall" to list available '
3583 b'run "hg debuginstall" to list available '
3626 b'compression engines'
3584 b'compression engines'
3627 ),
3585 ),
3628 )
3586 )
3629
3587
3630 # zlib is the historical default and doesn't need an explicit requirement.
3588 # zlib is the historical default and doesn't need an explicit requirement.
3631 if compengine == b'zstd':
3589 if compengine == b'zstd':
3632 requirements.add(b'revlog-compression-zstd')
3590 requirements.add(b'revlog-compression-zstd')
3633 elif compengine != b'zlib':
3591 elif compengine != b'zlib':
3634 requirements.add(b'exp-compression-%s' % compengine)
3592 requirements.add(b'exp-compression-%s' % compengine)
3635
3593
3636 if scmutil.gdinitconfig(ui):
3594 if scmutil.gdinitconfig(ui):
3637 requirements.add(b'generaldelta')
3595 requirements.add(b'generaldelta')
3638 if ui.configbool(b'format', b'sparse-revlog'):
3596 if ui.configbool(b'format', b'sparse-revlog'):
3639 requirements.add(SPARSEREVLOG_REQUIREMENT)
3597 requirements.add(SPARSEREVLOG_REQUIREMENT)
3640
3598
3641 # experimental config: format.exp-use-side-data
3599 # experimental config: format.exp-use-side-data
3642 if ui.configbool(b'format', b'exp-use-side-data'):
3600 if ui.configbool(b'format', b'exp-use-side-data'):
3643 requirements.add(SIDEDATA_REQUIREMENT)
3601 requirements.add(SIDEDATA_REQUIREMENT)
3644 # experimental config: format.exp-use-copies-side-data-changeset
3602 # experimental config: format.exp-use-copies-side-data-changeset
3645 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3603 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3646 requirements.add(SIDEDATA_REQUIREMENT)
3604 requirements.add(SIDEDATA_REQUIREMENT)
3647 requirements.add(COPIESSDC_REQUIREMENT)
3605 requirements.add(COPIESSDC_REQUIREMENT)
3648 if ui.configbool(b'experimental', b'treemanifest'):
3606 if ui.configbool(b'experimental', b'treemanifest'):
3649 requirements.add(b'treemanifest')
3607 requirements.add(b'treemanifest')
3650
3608
3651 revlogv2 = ui.config(b'experimental', b'revlogv2')
3609 revlogv2 = ui.config(b'experimental', b'revlogv2')
3652 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3610 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3653 requirements.remove(b'revlogv1')
3611 requirements.remove(b'revlogv1')
3654 # generaldelta is implied by revlogv2.
3612 # generaldelta is implied by revlogv2.
3655 requirements.discard(b'generaldelta')
3613 requirements.discard(b'generaldelta')
3656 requirements.add(REVLOGV2_REQUIREMENT)
3614 requirements.add(REVLOGV2_REQUIREMENT)
3657 # experimental config: format.internal-phase
3615 # experimental config: format.internal-phase
3658 if ui.configbool(b'format', b'internal-phase'):
3616 if ui.configbool(b'format', b'internal-phase'):
3659 requirements.add(b'internal-phase')
3617 requirements.add(b'internal-phase')
3660
3618
3661 if createopts.get(b'narrowfiles'):
3619 if createopts.get(b'narrowfiles'):
3662 requirements.add(repository.NARROW_REQUIREMENT)
3620 requirements.add(repository.NARROW_REQUIREMENT)
3663
3621
3664 if createopts.get(b'lfs'):
3622 if createopts.get(b'lfs'):
3665 requirements.add(b'lfs')
3623 requirements.add(b'lfs')
3666
3624
3667 if ui.configbool(b'format', b'bookmarks-in-store'):
3625 if ui.configbool(b'format', b'bookmarks-in-store'):
3668 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3626 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3669
3627
3670 if ui.configbool(b'format', b'use-persistent-nodemap'):
3628 if ui.configbool(b'format', b'use-persistent-nodemap'):
3671 requirements.add(NODEMAP_REQUIREMENT)
3629 requirements.add(NODEMAP_REQUIREMENT)
3672
3630
3673 return requirements
3631 return requirements
3674
3632
3675
3633
3676 def filterknowncreateopts(ui, createopts):
3634 def filterknowncreateopts(ui, createopts):
3677 """Filters a dict of repo creation options against options that are known.
3635 """Filters a dict of repo creation options against options that are known.
3678
3636
3679 Receives a dict of repo creation options and returns a dict of those
3637 Receives a dict of repo creation options and returns a dict of those
3680 options that we don't know how to handle.
3638 options that we don't know how to handle.
3681
3639
3682 This function is called as part of repository creation. If the
3640 This function is called as part of repository creation. If the
3683 returned dict contains any items, repository creation will not
3641 returned dict contains any items, repository creation will not
3684 be allowed, as it means there was a request to create a repository
3642 be allowed, as it means there was a request to create a repository
3685 with options not recognized by loaded code.
3643 with options not recognized by loaded code.
3686
3644
3687 Extensions can wrap this function to filter out creation options
3645 Extensions can wrap this function to filter out creation options
3688 they know how to handle.
3646 they know how to handle.
3689 """
3647 """
3690 known = {
3648 known = {
3691 b'backend',
3649 b'backend',
3692 b'lfs',
3650 b'lfs',
3693 b'narrowfiles',
3651 b'narrowfiles',
3694 b'sharedrepo',
3652 b'sharedrepo',
3695 b'sharedrelative',
3653 b'sharedrelative',
3696 b'shareditems',
3654 b'shareditems',
3697 b'shallowfilestore',
3655 b'shallowfilestore',
3698 }
3656 }
3699
3657
3700 return {k: v for k, v in createopts.items() if k not in known}
3658 return {k: v for k, v in createopts.items() if k not in known}
3701
3659
3702
3660
3703 def createrepository(ui, path, createopts=None):
3661 def createrepository(ui, path, createopts=None):
3704 """Create a new repository in a vfs.
3662 """Create a new repository in a vfs.
3705
3663
3706 ``path`` path to the new repo's working directory.
3664 ``path`` path to the new repo's working directory.
3707 ``createopts`` options for the new repository.
3665 ``createopts`` options for the new repository.
3708
3666
3709 The following keys for ``createopts`` are recognized:
3667 The following keys for ``createopts`` are recognized:
3710
3668
3711 backend
3669 backend
3712 The storage backend to use.
3670 The storage backend to use.
3713 lfs
3671 lfs
3714 Repository will be created with ``lfs`` requirement. The lfs extension
3672 Repository will be created with ``lfs`` requirement. The lfs extension
3715 will automatically be loaded when the repository is accessed.
3673 will automatically be loaded when the repository is accessed.
3716 narrowfiles
3674 narrowfiles
3717 Set up repository to support narrow file storage.
3675 Set up repository to support narrow file storage.
3718 sharedrepo
3676 sharedrepo
3719 Repository object from which storage should be shared.
3677 Repository object from which storage should be shared.
3720 sharedrelative
3678 sharedrelative
3721 Boolean indicating if the path to the shared repo should be
3679 Boolean indicating if the path to the shared repo should be
3722 stored as relative. By default, the pointer to the "parent" repo
3680 stored as relative. By default, the pointer to the "parent" repo
3723 is stored as an absolute path.
3681 is stored as an absolute path.
3724 shareditems
3682 shareditems
3725 Set of items to share to the new repository (in addition to storage).
3683 Set of items to share to the new repository (in addition to storage).
3726 shallowfilestore
3684 shallowfilestore
3727 Indicates that storage for files should be shallow (not all ancestor
3685 Indicates that storage for files should be shallow (not all ancestor
3728 revisions are known).
3686 revisions are known).
3729 """
3687 """
3730 createopts = defaultcreateopts(ui, createopts=createopts)
3688 createopts = defaultcreateopts(ui, createopts=createopts)
3731
3689
3732 unknownopts = filterknowncreateopts(ui, createopts)
3690 unknownopts = filterknowncreateopts(ui, createopts)
3733
3691
3734 if not isinstance(unknownopts, dict):
3692 if not isinstance(unknownopts, dict):
3735 raise error.ProgrammingError(
3693 raise error.ProgrammingError(
3736 b'filterknowncreateopts() did not return a dict'
3694 b'filterknowncreateopts() did not return a dict'
3737 )
3695 )
3738
3696
3739 if unknownopts:
3697 if unknownopts:
3740 raise error.Abort(
3698 raise error.Abort(
3741 _(
3699 _(
3742 b'unable to create repository because of unknown '
3700 b'unable to create repository because of unknown '
3743 b'creation option: %s'
3701 b'creation option: %s'
3744 )
3702 )
3745 % b', '.join(sorted(unknownopts)),
3703 % b', '.join(sorted(unknownopts)),
3746 hint=_(b'is a required extension not loaded?'),
3704 hint=_(b'is a required extension not loaded?'),
3747 )
3705 )
3748
3706
3749 requirements = newreporequirements(ui, createopts=createopts)
3707 requirements = newreporequirements(ui, createopts=createopts)
3750
3708
3751 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3709 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3752
3710
3753 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3711 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3754 if hgvfs.exists():
3712 if hgvfs.exists():
3755 raise error.RepoError(_(b'repository %s already exists') % path)
3713 raise error.RepoError(_(b'repository %s already exists') % path)
3756
3714
3757 if b'sharedrepo' in createopts:
3715 if b'sharedrepo' in createopts:
3758 sharedpath = createopts[b'sharedrepo'].sharedpath
3716 sharedpath = createopts[b'sharedrepo'].sharedpath
3759
3717
3760 if createopts.get(b'sharedrelative'):
3718 if createopts.get(b'sharedrelative'):
3761 try:
3719 try:
3762 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3720 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3763 except (IOError, ValueError) as e:
3721 except (IOError, ValueError) as e:
3764 # ValueError is raised on Windows if the drive letters differ
3722 # ValueError is raised on Windows if the drive letters differ
3765 # on each path.
3723 # on each path.
3766 raise error.Abort(
3724 raise error.Abort(
3767 _(b'cannot calculate relative path'),
3725 _(b'cannot calculate relative path'),
3768 hint=stringutil.forcebytestr(e),
3726 hint=stringutil.forcebytestr(e),
3769 )
3727 )
3770
3728
3771 if not wdirvfs.exists():
3729 if not wdirvfs.exists():
3772 wdirvfs.makedirs()
3730 wdirvfs.makedirs()
3773
3731
3774 hgvfs.makedir(notindexed=True)
3732 hgvfs.makedir(notindexed=True)
3775 if b'sharedrepo' not in createopts:
3733 if b'sharedrepo' not in createopts:
3776 hgvfs.mkdir(b'cache')
3734 hgvfs.mkdir(b'cache')
3777 hgvfs.mkdir(b'wcache')
3735 hgvfs.mkdir(b'wcache')
3778
3736
3779 if b'store' in requirements and b'sharedrepo' not in createopts:
3737 if b'store' in requirements and b'sharedrepo' not in createopts:
3780 hgvfs.mkdir(b'store')
3738 hgvfs.mkdir(b'store')
3781
3739
3782 # We create an invalid changelog outside the store so very old
3740 # We create an invalid changelog outside the store so very old
3783 # Mercurial versions (which didn't know about the requirements
3741 # Mercurial versions (which didn't know about the requirements
3784 # file) encounter an error on reading the changelog. This
3742 # file) encounter an error on reading the changelog. This
3785 # effectively locks out old clients and prevents them from
3743 # effectively locks out old clients and prevents them from
3786 # mucking with a repo in an unknown format.
3744 # mucking with a repo in an unknown format.
3787 #
3745 #
3788 # The revlog header has version 2, which won't be recognized by
3746 # The revlog header has version 2, which won't be recognized by
3789 # such old clients.
3747 # such old clients.
3790 hgvfs.append(
3748 hgvfs.append(
3791 b'00changelog.i',
3749 b'00changelog.i',
3792 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3750 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3793 b'layout',
3751 b'layout',
3794 )
3752 )
3795
3753
3796 scmutil.writerequires(hgvfs, requirements)
3754 scmutil.writerequires(hgvfs, requirements)
3797
3755
3798 # Write out file telling readers where to find the shared store.
3756 # Write out file telling readers where to find the shared store.
3799 if b'sharedrepo' in createopts:
3757 if b'sharedrepo' in createopts:
3800 hgvfs.write(b'sharedpath', sharedpath)
3758 hgvfs.write(b'sharedpath', sharedpath)
3801
3759
3802 if createopts.get(b'shareditems'):
3760 if createopts.get(b'shareditems'):
3803 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3761 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3804 hgvfs.write(b'shared', shared)
3762 hgvfs.write(b'shared', shared)
3805
3763
3806
3764
3807 def poisonrepository(repo):
3765 def poisonrepository(repo):
3808 """Poison a repository instance so it can no longer be used."""
3766 """Poison a repository instance so it can no longer be used."""
3809 # Perform any cleanup on the instance.
3767 # Perform any cleanup on the instance.
3810 repo.close()
3768 repo.close()
3811
3769
3812 # Our strategy is to replace the type of the object with one that
3770 # Our strategy is to replace the type of the object with one that
3813 # has all attribute lookups result in error.
3771 # has all attribute lookups result in error.
3814 #
3772 #
3815 # But we have to allow the close() method because some constructors
3773 # But we have to allow the close() method because some constructors
3816 # of repos call close() on repo references.
3774 # of repos call close() on repo references.
3817 class poisonedrepository(object):
3775 class poisonedrepository(object):
3818 def __getattribute__(self, item):
3776 def __getattribute__(self, item):
3819 if item == 'close':
3777 if item == 'close':
3820 return object.__getattribute__(self, item)
3778 return object.__getattribute__(self, item)
3821
3779
3822 raise error.ProgrammingError(
3780 raise error.ProgrammingError(
3823 b'repo instances should not be used after unshare'
3781 b'repo instances should not be used after unshare'
3824 )
3782 )
3825
3783
3826 def close(self):
3784 def close(self):
3827 pass
3785 pass
3828
3786
3829 # We may have a repoview, which intercepts __setattr__. So be sure
3787 # We may have a repoview, which intercepts __setattr__. So be sure
3830 # we operate at the lowest level possible.
3788 # we operate at the lowest level possible.
3831 object.__setattr__(repo, '__class__', poisonedrepository)
3789 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,268 +1,324 b''
1 # metadata.py -- code related to various metadata computation and access.
1 # metadata.py -- code related to various metadata computation and access.
2 #
2 #
3 # Copyright 2019 Google, Inc <martinvonz@google.com>
3 # Copyright 2019 Google, Inc <martinvonz@google.com>
4 # Copyright 2020 Pierre-Yves David <pierre-yves.david@octobus.net>
4 # Copyright 2020 Pierre-Yves David <pierre-yves.david@octobus.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 from __future__ import absolute_import, print_function
8 from __future__ import absolute_import, print_function
9
9
10 import multiprocessing
10 import multiprocessing
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 node,
14 pycompat,
15 pycompat,
15 util,
16 util,
16 )
17 )
17
18
18 from .revlogutils import (
19 from .revlogutils import (
19 flagutil as sidedataflag,
20 flagutil as sidedataflag,
20 sidedata as sidedatamod,
21 sidedata as sidedatamod,
21 )
22 )
22
23
23
24
24 def computechangesetfilesadded(ctx):
25 def computechangesetfilesadded(ctx):
25 """return the list of files added in a changeset
26 """return the list of files added in a changeset
26 """
27 """
27 added = []
28 added = []
28 for f in ctx.files():
29 for f in ctx.files():
29 if not any(f in p for p in ctx.parents()):
30 if not any(f in p for p in ctx.parents()):
30 added.append(f)
31 added.append(f)
31 return added
32 return added
32
33
33
34
35 def get_removal_filter(ctx, x=None):
36 """return a function to detect files "wrongly" detected as `removed`
37
38 When a file is removed relative to p1 in a merge, this
39 function determines whether the absence is due to a
40 deletion from a parent, or whether the merge commit
41 itself deletes the file. We decide this by doing a
42 simplified three way merge of the manifest entry for
43 the file. There are two ways we decide the merge
44 itself didn't delete a file:
45 - neither parent (nor the merge) contain the file
46 - exactly one parent contains the file, and that
47 parent has the same filelog entry as the merge
48 ancestor (or all of them if there two). In other
49 words, that parent left the file unchanged while the
50 other one deleted it.
51 One way to think about this is that deleting a file is
52 similar to emptying it, so the list of changed files
53 should be similar either way. The computation
54 described above is not done directly in _filecommit
55 when creating the list of changed files, however
56 it does something very similar by comparing filelog
57 nodes.
58 """
59
60 if x is not None:
61 p1, p2, m1, m2 = x
62 else:
63 p1 = ctx.p1()
64 p2 = ctx.p2()
65 m1 = p1.manifest()
66 m2 = p2.manifest()
67
68 @util.cachefunc
69 def mas():
70 p1n = p1.node()
71 p2n = p2.node()
72 cahs = ctx.repo().changelog.commonancestorsheads(p1n, p2n)
73 if not cahs:
74 cahs = [node.nullrev]
75 return [ctx.repo()[r].manifest() for r in cahs]
76
77 def deletionfromparent(f):
78 if f in m1:
79 return f not in m2 and all(
80 f in ma and ma.find(f) == m1.find(f) for ma in mas()
81 )
82 elif f in m2:
83 return all(f in ma and ma.find(f) == m2.find(f) for ma in mas())
84 else:
85 return True
86
87 return deletionfromparent
88
89
34 def computechangesetfilesremoved(ctx):
90 def computechangesetfilesremoved(ctx):
35 """return the list of files removed in a changeset
91 """return the list of files removed in a changeset
36 """
92 """
37 removed = []
93 removed = []
38 for f in ctx.files():
94 for f in ctx.files():
39 if f not in ctx:
95 if f not in ctx:
40 removed.append(f)
96 removed.append(f)
41 return removed
97 return removed
42
98
43
99
44 def computechangesetcopies(ctx):
100 def computechangesetcopies(ctx):
45 """return the copies data for a changeset
101 """return the copies data for a changeset
46
102
47 The copies data are returned as a pair of dictionnary (p1copies, p2copies).
103 The copies data are returned as a pair of dictionnary (p1copies, p2copies).
48
104
49 Each dictionnary are in the form: `{newname: oldname}`
105 Each dictionnary are in the form: `{newname: oldname}`
50 """
106 """
51 p1copies = {}
107 p1copies = {}
52 p2copies = {}
108 p2copies = {}
53 p1 = ctx.p1()
109 p1 = ctx.p1()
54 p2 = ctx.p2()
110 p2 = ctx.p2()
55 narrowmatch = ctx._repo.narrowmatch()
111 narrowmatch = ctx._repo.narrowmatch()
56 for dst in ctx.files():
112 for dst in ctx.files():
57 if not narrowmatch(dst) or dst not in ctx:
113 if not narrowmatch(dst) or dst not in ctx:
58 continue
114 continue
59 copied = ctx[dst].renamed()
115 copied = ctx[dst].renamed()
60 if not copied:
116 if not copied:
61 continue
117 continue
62 src, srcnode = copied
118 src, srcnode = copied
63 if src in p1 and p1[src].filenode() == srcnode:
119 if src in p1 and p1[src].filenode() == srcnode:
64 p1copies[dst] = src
120 p1copies[dst] = src
65 elif src in p2 and p2[src].filenode() == srcnode:
121 elif src in p2 and p2[src].filenode() == srcnode:
66 p2copies[dst] = src
122 p2copies[dst] = src
67 return p1copies, p2copies
123 return p1copies, p2copies
68
124
69
125
70 def encodecopies(files, copies):
126 def encodecopies(files, copies):
71 items = []
127 items = []
72 for i, dst in enumerate(files):
128 for i, dst in enumerate(files):
73 if dst in copies:
129 if dst in copies:
74 items.append(b'%d\0%s' % (i, copies[dst]))
130 items.append(b'%d\0%s' % (i, copies[dst]))
75 if len(items) != len(copies):
131 if len(items) != len(copies):
76 raise error.ProgrammingError(
132 raise error.ProgrammingError(
77 b'some copy targets missing from file list'
133 b'some copy targets missing from file list'
78 )
134 )
79 return b"\n".join(items)
135 return b"\n".join(items)
80
136
81
137
82 def decodecopies(files, data):
138 def decodecopies(files, data):
83 try:
139 try:
84 copies = {}
140 copies = {}
85 if not data:
141 if not data:
86 return copies
142 return copies
87 for l in data.split(b'\n'):
143 for l in data.split(b'\n'):
88 strindex, src = l.split(b'\0')
144 strindex, src = l.split(b'\0')
89 i = int(strindex)
145 i = int(strindex)
90 dst = files[i]
146 dst = files[i]
91 copies[dst] = src
147 copies[dst] = src
92 return copies
148 return copies
93 except (ValueError, IndexError):
149 except (ValueError, IndexError):
94 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
150 # Perhaps someone had chosen the same key name (e.g. "p1copies") and
95 # used different syntax for the value.
151 # used different syntax for the value.
96 return None
152 return None
97
153
98
154
99 def encodefileindices(files, subset):
155 def encodefileindices(files, subset):
100 subset = set(subset)
156 subset = set(subset)
101 indices = []
157 indices = []
102 for i, f in enumerate(files):
158 for i, f in enumerate(files):
103 if f in subset:
159 if f in subset:
104 indices.append(b'%d' % i)
160 indices.append(b'%d' % i)
105 return b'\n'.join(indices)
161 return b'\n'.join(indices)
106
162
107
163
108 def decodefileindices(files, data):
164 def decodefileindices(files, data):
109 try:
165 try:
110 subset = []
166 subset = []
111 if not data:
167 if not data:
112 return subset
168 return subset
113 for strindex in data.split(b'\n'):
169 for strindex in data.split(b'\n'):
114 i = int(strindex)
170 i = int(strindex)
115 if i < 0 or i >= len(files):
171 if i < 0 or i >= len(files):
116 return None
172 return None
117 subset.append(files[i])
173 subset.append(files[i])
118 return subset
174 return subset
119 except (ValueError, IndexError):
175 except (ValueError, IndexError):
120 # Perhaps someone had chosen the same key name (e.g. "added") and
176 # Perhaps someone had chosen the same key name (e.g. "added") and
121 # used different syntax for the value.
177 # used different syntax for the value.
122 return None
178 return None
123
179
124
180
125 def _getsidedata(srcrepo, rev):
181 def _getsidedata(srcrepo, rev):
126 ctx = srcrepo[rev]
182 ctx = srcrepo[rev]
127 filescopies = computechangesetcopies(ctx)
183 filescopies = computechangesetcopies(ctx)
128 filesadded = computechangesetfilesadded(ctx)
184 filesadded = computechangesetfilesadded(ctx)
129 filesremoved = computechangesetfilesremoved(ctx)
185 filesremoved = computechangesetfilesremoved(ctx)
130 sidedata = {}
186 sidedata = {}
131 if any([filescopies, filesadded, filesremoved]):
187 if any([filescopies, filesadded, filesremoved]):
132 sortedfiles = sorted(ctx.files())
188 sortedfiles = sorted(ctx.files())
133 p1copies, p2copies = filescopies
189 p1copies, p2copies = filescopies
134 p1copies = encodecopies(sortedfiles, p1copies)
190 p1copies = encodecopies(sortedfiles, p1copies)
135 p2copies = encodecopies(sortedfiles, p2copies)
191 p2copies = encodecopies(sortedfiles, p2copies)
136 filesadded = encodefileindices(sortedfiles, filesadded)
192 filesadded = encodefileindices(sortedfiles, filesadded)
137 filesremoved = encodefileindices(sortedfiles, filesremoved)
193 filesremoved = encodefileindices(sortedfiles, filesremoved)
138 if p1copies:
194 if p1copies:
139 sidedata[sidedatamod.SD_P1COPIES] = p1copies
195 sidedata[sidedatamod.SD_P1COPIES] = p1copies
140 if p2copies:
196 if p2copies:
141 sidedata[sidedatamod.SD_P2COPIES] = p2copies
197 sidedata[sidedatamod.SD_P2COPIES] = p2copies
142 if filesadded:
198 if filesadded:
143 sidedata[sidedatamod.SD_FILESADDED] = filesadded
199 sidedata[sidedatamod.SD_FILESADDED] = filesadded
144 if filesremoved:
200 if filesremoved:
145 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
201 sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
146 return sidedata
202 return sidedata
147
203
148
204
149 def getsidedataadder(srcrepo, destrepo):
205 def getsidedataadder(srcrepo, destrepo):
150 use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
206 use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
151 if pycompat.iswindows or not use_w:
207 if pycompat.iswindows or not use_w:
152 return _get_simple_sidedata_adder(srcrepo, destrepo)
208 return _get_simple_sidedata_adder(srcrepo, destrepo)
153 else:
209 else:
154 return _get_worker_sidedata_adder(srcrepo, destrepo)
210 return _get_worker_sidedata_adder(srcrepo, destrepo)
155
211
156
212
157 def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
213 def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
158 """The function used by worker precomputing sidedata
214 """The function used by worker precomputing sidedata
159
215
160 It read an input queue containing revision numbers
216 It read an input queue containing revision numbers
161 It write in an output queue containing (rev, <sidedata-map>)
217 It write in an output queue containing (rev, <sidedata-map>)
162
218
163 The `None` input value is used as a stop signal.
219 The `None` input value is used as a stop signal.
164
220
165 The `tokens` semaphore is user to avoid having too many unprocessed
221 The `tokens` semaphore is user to avoid having too many unprocessed
166 entries. The workers needs to acquire one token before fetching a task.
222 entries. The workers needs to acquire one token before fetching a task.
167 They will be released by the consumer of the produced data.
223 They will be released by the consumer of the produced data.
168 """
224 """
169 tokens.acquire()
225 tokens.acquire()
170 rev = revs_queue.get()
226 rev = revs_queue.get()
171 while rev is not None:
227 while rev is not None:
172 data = _getsidedata(srcrepo, rev)
228 data = _getsidedata(srcrepo, rev)
173 sidedata_queue.put((rev, data))
229 sidedata_queue.put((rev, data))
174 tokens.acquire()
230 tokens.acquire()
175 rev = revs_queue.get()
231 rev = revs_queue.get()
176 # processing of `None` is completed, release the token.
232 # processing of `None` is completed, release the token.
177 tokens.release()
233 tokens.release()
178
234
179
235
180 BUFF_PER_WORKER = 50
236 BUFF_PER_WORKER = 50
181
237
182
238
183 def _get_worker_sidedata_adder(srcrepo, destrepo):
239 def _get_worker_sidedata_adder(srcrepo, destrepo):
184 """The parallel version of the sidedata computation
240 """The parallel version of the sidedata computation
185
241
186 This code spawn a pool of worker that precompute a buffer of sidedata
242 This code spawn a pool of worker that precompute a buffer of sidedata
187 before we actually need them"""
243 before we actually need them"""
188 # avoid circular import copies -> scmutil -> worker -> copies
244 # avoid circular import copies -> scmutil -> worker -> copies
189 from . import worker
245 from . import worker
190
246
191 nbworkers = worker._numworkers(srcrepo.ui)
247 nbworkers = worker._numworkers(srcrepo.ui)
192
248
193 tokens = multiprocessing.BoundedSemaphore(nbworkers * BUFF_PER_WORKER)
249 tokens = multiprocessing.BoundedSemaphore(nbworkers * BUFF_PER_WORKER)
194 revsq = multiprocessing.Queue()
250 revsq = multiprocessing.Queue()
195 sidedataq = multiprocessing.Queue()
251 sidedataq = multiprocessing.Queue()
196
252
197 assert srcrepo.filtername is None
253 assert srcrepo.filtername is None
198 # queue all tasks beforehand, revision numbers are small and it make
254 # queue all tasks beforehand, revision numbers are small and it make
199 # synchronisation simpler
255 # synchronisation simpler
200 #
256 #
201 # Since the computation for each node can be quite expensive, the overhead
257 # Since the computation for each node can be quite expensive, the overhead
202 # of using a single queue is not revelant. In practice, most computation
258 # of using a single queue is not revelant. In practice, most computation
203 # are fast but some are very expensive and dominate all the other smaller
259 # are fast but some are very expensive and dominate all the other smaller
204 # cost.
260 # cost.
205 for r in srcrepo.changelog.revs():
261 for r in srcrepo.changelog.revs():
206 revsq.put(r)
262 revsq.put(r)
207 # queue the "no more tasks" markers
263 # queue the "no more tasks" markers
208 for i in range(nbworkers):
264 for i in range(nbworkers):
209 revsq.put(None)
265 revsq.put(None)
210
266
211 allworkers = []
267 allworkers = []
212 for i in range(nbworkers):
268 for i in range(nbworkers):
213 args = (srcrepo, revsq, sidedataq, tokens)
269 args = (srcrepo, revsq, sidedataq, tokens)
214 w = multiprocessing.Process(target=_sidedata_worker, args=args)
270 w = multiprocessing.Process(target=_sidedata_worker, args=args)
215 allworkers.append(w)
271 allworkers.append(w)
216 w.start()
272 w.start()
217
273
218 # dictionnary to store results for revision higher than we one we are
274 # dictionnary to store results for revision higher than we one we are
219 # looking for. For example, if we need the sidedatamap for 42, and 43 is
275 # looking for. For example, if we need the sidedatamap for 42, and 43 is
220 # received, when shelve 43 for later use.
276 # received, when shelve 43 for later use.
221 staging = {}
277 staging = {}
222
278
223 def sidedata_companion(revlog, rev):
279 def sidedata_companion(revlog, rev):
224 sidedata = {}
280 sidedata = {}
225 if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
281 if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
226 # Is the data previously shelved ?
282 # Is the data previously shelved ?
227 sidedata = staging.pop(rev, None)
283 sidedata = staging.pop(rev, None)
228 if sidedata is None:
284 if sidedata is None:
229 # look at the queued result until we find the one we are lookig
285 # look at the queued result until we find the one we are lookig
230 # for (shelve the other ones)
286 # for (shelve the other ones)
231 r, sidedata = sidedataq.get()
287 r, sidedata = sidedataq.get()
232 while r != rev:
288 while r != rev:
233 staging[r] = sidedata
289 staging[r] = sidedata
234 r, sidedata = sidedataq.get()
290 r, sidedata = sidedataq.get()
235 tokens.release()
291 tokens.release()
236 return False, (), sidedata
292 return False, (), sidedata
237
293
238 return sidedata_companion
294 return sidedata_companion
239
295
240
296
241 def _get_simple_sidedata_adder(srcrepo, destrepo):
297 def _get_simple_sidedata_adder(srcrepo, destrepo):
242 """The simple version of the sidedata computation
298 """The simple version of the sidedata computation
243
299
244 It just compute it in the same thread on request"""
300 It just compute it in the same thread on request"""
245
301
246 def sidedatacompanion(revlog, rev):
302 def sidedatacompanion(revlog, rev):
247 sidedata = {}
303 sidedata = {}
248 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
304 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
249 sidedata = _getsidedata(srcrepo, rev)
305 sidedata = _getsidedata(srcrepo, rev)
250 return False, (), sidedata
306 return False, (), sidedata
251
307
252 return sidedatacompanion
308 return sidedatacompanion
253
309
254
310
255 def getsidedataremover(srcrepo, destrepo):
311 def getsidedataremover(srcrepo, destrepo):
256 def sidedatacompanion(revlog, rev):
312 def sidedatacompanion(revlog, rev):
257 f = ()
313 f = ()
258 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
314 if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
259 if revlog.flags(rev) & sidedataflag.REVIDX_SIDEDATA:
315 if revlog.flags(rev) & sidedataflag.REVIDX_SIDEDATA:
260 f = (
316 f = (
261 sidedatamod.SD_P1COPIES,
317 sidedatamod.SD_P1COPIES,
262 sidedatamod.SD_P2COPIES,
318 sidedatamod.SD_P2COPIES,
263 sidedatamod.SD_FILESADDED,
319 sidedatamod.SD_FILESADDED,
264 sidedatamod.SD_FILESREMOVED,
320 sidedatamod.SD_FILESREMOVED,
265 )
321 )
266 return False, f, {}
322 return False, f, {}
267
323
268 return sidedatacompanion
324 return sidedatacompanion
General Comments 0
You need to be logged in to leave comments. Login now