##// END OF EJS Templates
localrepo: unconditionally enable general delta with sparse revlogs...
Boris Feld -
r38783:17da52bb stable
parent child Browse files
Show More
@@ -1,2395 +1,2397 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 release = lockmod.release
73 release = lockmod.release
74 urlerr = util.urlerr
74 urlerr = util.urlerr
75 urlreq = util.urlreq
75 urlreq = util.urlreq
76
76
77 # set of (path, vfs-location) tuples. vfs-location is:
77 # set of (path, vfs-location) tuples. vfs-location is:
78 # - 'plain for vfs relative paths
78 # - 'plain for vfs relative paths
79 # - '' for svfs relative paths
79 # - '' for svfs relative paths
80 _cachedfiles = set()
80 _cachedfiles = set()
81
81
82 class _basefilecache(scmutil.filecache):
82 class _basefilecache(scmutil.filecache):
83 """All filecache usage on repo are done for logic that should be unfiltered
83 """All filecache usage on repo are done for logic that should be unfiltered
84 """
84 """
85 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
86 if repo is None:
86 if repo is None:
87 return self
87 return self
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
89 def __set__(self, repo, value):
89 def __set__(self, repo, value):
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
91 def __delete__(self, repo):
91 def __delete__(self, repo):
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
93
93
94 class repofilecache(_basefilecache):
94 class repofilecache(_basefilecache):
95 """filecache for files in .hg but outside of .hg/store"""
95 """filecache for files in .hg but outside of .hg/store"""
96 def __init__(self, *paths):
96 def __init__(self, *paths):
97 super(repofilecache, self).__init__(*paths)
97 super(repofilecache, self).__init__(*paths)
98 for path in paths:
98 for path in paths:
99 _cachedfiles.add((path, 'plain'))
99 _cachedfiles.add((path, 'plain'))
100
100
101 def join(self, obj, fname):
101 def join(self, obj, fname):
102 return obj.vfs.join(fname)
102 return obj.vfs.join(fname)
103
103
104 class storecache(_basefilecache):
104 class storecache(_basefilecache):
105 """filecache for files in the store"""
105 """filecache for files in the store"""
106 def __init__(self, *paths):
106 def __init__(self, *paths):
107 super(storecache, self).__init__(*paths)
107 super(storecache, self).__init__(*paths)
108 for path in paths:
108 for path in paths:
109 _cachedfiles.add((path, ''))
109 _cachedfiles.add((path, ''))
110
110
111 def join(self, obj, fname):
111 def join(self, obj, fname):
112 return obj.sjoin(fname)
112 return obj.sjoin(fname)
113
113
114 def isfilecached(repo, name):
114 def isfilecached(repo, name):
115 """check if a repo has already cached "name" filecache-ed property
115 """check if a repo has already cached "name" filecache-ed property
116
116
117 This returns (cachedobj-or-None, iscached) tuple.
117 This returns (cachedobj-or-None, iscached) tuple.
118 """
118 """
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
120 if not cacheentry:
120 if not cacheentry:
121 return None, False
121 return None, False
122 return cacheentry.obj, True
122 return cacheentry.obj, True
123
123
124 class unfilteredpropertycache(util.propertycache):
124 class unfilteredpropertycache(util.propertycache):
125 """propertycache that apply to unfiltered repo only"""
125 """propertycache that apply to unfiltered repo only"""
126
126
127 def __get__(self, repo, type=None):
127 def __get__(self, repo, type=None):
128 unfi = repo.unfiltered()
128 unfi = repo.unfiltered()
129 if unfi is repo:
129 if unfi is repo:
130 return super(unfilteredpropertycache, self).__get__(unfi)
130 return super(unfilteredpropertycache, self).__get__(unfi)
131 return getattr(unfi, self.name)
131 return getattr(unfi, self.name)
132
132
133 class filteredpropertycache(util.propertycache):
133 class filteredpropertycache(util.propertycache):
134 """propertycache that must take filtering in account"""
134 """propertycache that must take filtering in account"""
135
135
136 def cachevalue(self, obj, value):
136 def cachevalue(self, obj, value):
137 object.__setattr__(obj, self.name, value)
137 object.__setattr__(obj, self.name, value)
138
138
139
139
140 def hasunfilteredcache(repo, name):
140 def hasunfilteredcache(repo, name):
141 """check if a repo has an unfilteredpropertycache value for <name>"""
141 """check if a repo has an unfilteredpropertycache value for <name>"""
142 return name in vars(repo.unfiltered())
142 return name in vars(repo.unfiltered())
143
143
144 def unfilteredmethod(orig):
144 def unfilteredmethod(orig):
145 """decorate method that always need to be run on unfiltered version"""
145 """decorate method that always need to be run on unfiltered version"""
146 def wrapper(repo, *args, **kwargs):
146 def wrapper(repo, *args, **kwargs):
147 return orig(repo.unfiltered(), *args, **kwargs)
147 return orig(repo.unfiltered(), *args, **kwargs)
148 return wrapper
148 return wrapper
149
149
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
151 'unbundle'}
151 'unbundle'}
152 legacycaps = moderncaps.union({'changegroupsubset'})
152 legacycaps = moderncaps.union({'changegroupsubset'})
153
153
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
155 class localcommandexecutor(object):
155 class localcommandexecutor(object):
156 def __init__(self, peer):
156 def __init__(self, peer):
157 self._peer = peer
157 self._peer = peer
158 self._sent = False
158 self._sent = False
159 self._closed = False
159 self._closed = False
160
160
161 def __enter__(self):
161 def __enter__(self):
162 return self
162 return self
163
163
164 def __exit__(self, exctype, excvalue, exctb):
164 def __exit__(self, exctype, excvalue, exctb):
165 self.close()
165 self.close()
166
166
167 def callcommand(self, command, args):
167 def callcommand(self, command, args):
168 if self._sent:
168 if self._sent:
169 raise error.ProgrammingError('callcommand() cannot be used after '
169 raise error.ProgrammingError('callcommand() cannot be used after '
170 'sendcommands()')
170 'sendcommands()')
171
171
172 if self._closed:
172 if self._closed:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'close()')
174 'close()')
175
175
176 # We don't need to support anything fancy. Just call the named
176 # We don't need to support anything fancy. Just call the named
177 # method on the peer and return a resolved future.
177 # method on the peer and return a resolved future.
178 fn = getattr(self._peer, pycompat.sysstr(command))
178 fn = getattr(self._peer, pycompat.sysstr(command))
179
179
180 f = pycompat.futures.Future()
180 f = pycompat.futures.Future()
181
181
182 try:
182 try:
183 result = fn(**pycompat.strkwargs(args))
183 result = fn(**pycompat.strkwargs(args))
184 except Exception:
184 except Exception:
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
186 else:
186 else:
187 f.set_result(result)
187 f.set_result(result)
188
188
189 return f
189 return f
190
190
191 def sendcommands(self):
191 def sendcommands(self):
192 self._sent = True
192 self._sent = True
193
193
194 def close(self):
194 def close(self):
195 self._closed = True
195 self._closed = True
196
196
197 @interfaceutil.implementer(repository.ipeercommands)
197 @interfaceutil.implementer(repository.ipeercommands)
198 class localpeer(repository.peer):
198 class localpeer(repository.peer):
199 '''peer for a local repo; reflects only the most recent API'''
199 '''peer for a local repo; reflects only the most recent API'''
200
200
201 def __init__(self, repo, caps=None):
201 def __init__(self, repo, caps=None):
202 super(localpeer, self).__init__()
202 super(localpeer, self).__init__()
203
203
204 if caps is None:
204 if caps is None:
205 caps = moderncaps.copy()
205 caps = moderncaps.copy()
206 self._repo = repo.filtered('served')
206 self._repo = repo.filtered('served')
207 self.ui = repo.ui
207 self.ui = repo.ui
208 self._caps = repo._restrictcapabilities(caps)
208 self._caps = repo._restrictcapabilities(caps)
209
209
210 # Begin of _basepeer interface.
210 # Begin of _basepeer interface.
211
211
212 def url(self):
212 def url(self):
213 return self._repo.url()
213 return self._repo.url()
214
214
215 def local(self):
215 def local(self):
216 return self._repo
216 return self._repo
217
217
218 def peer(self):
218 def peer(self):
219 return self
219 return self
220
220
221 def canpush(self):
221 def canpush(self):
222 return True
222 return True
223
223
224 def close(self):
224 def close(self):
225 self._repo.close()
225 self._repo.close()
226
226
227 # End of _basepeer interface.
227 # End of _basepeer interface.
228
228
229 # Begin of _basewirecommands interface.
229 # Begin of _basewirecommands interface.
230
230
231 def branchmap(self):
231 def branchmap(self):
232 return self._repo.branchmap()
232 return self._repo.branchmap()
233
233
234 def capabilities(self):
234 def capabilities(self):
235 return self._caps
235 return self._caps
236
236
237 def clonebundles(self):
237 def clonebundles(self):
238 return self._repo.tryread('clonebundles.manifest')
238 return self._repo.tryread('clonebundles.manifest')
239
239
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 """Used to test argument passing over the wire"""
241 """Used to test argument passing over the wire"""
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
243 pycompat.bytestr(four),
243 pycompat.bytestr(four),
244 pycompat.bytestr(five))
244 pycompat.bytestr(five))
245
245
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
247 **kwargs):
247 **kwargs):
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
249 common=common, bundlecaps=bundlecaps,
249 common=common, bundlecaps=bundlecaps,
250 **kwargs)[1]
250 **kwargs)[1]
251 cb = util.chunkbuffer(chunks)
251 cb = util.chunkbuffer(chunks)
252
252
253 if exchange.bundle2requested(bundlecaps):
253 if exchange.bundle2requested(bundlecaps):
254 # When requesting a bundle2, getbundle returns a stream to make the
254 # When requesting a bundle2, getbundle returns a stream to make the
255 # wire level function happier. We need to build a proper object
255 # wire level function happier. We need to build a proper object
256 # from it in local peer.
256 # from it in local peer.
257 return bundle2.getunbundler(self.ui, cb)
257 return bundle2.getunbundler(self.ui, cb)
258 else:
258 else:
259 return changegroup.getunbundler('01', cb, None)
259 return changegroup.getunbundler('01', cb, None)
260
260
261 def heads(self):
261 def heads(self):
262 return self._repo.heads()
262 return self._repo.heads()
263
263
264 def known(self, nodes):
264 def known(self, nodes):
265 return self._repo.known(nodes)
265 return self._repo.known(nodes)
266
266
267 def listkeys(self, namespace):
267 def listkeys(self, namespace):
268 return self._repo.listkeys(namespace)
268 return self._repo.listkeys(namespace)
269
269
270 def lookup(self, key):
270 def lookup(self, key):
271 return self._repo.lookup(key)
271 return self._repo.lookup(key)
272
272
273 def pushkey(self, namespace, key, old, new):
273 def pushkey(self, namespace, key, old, new):
274 return self._repo.pushkey(namespace, key, old, new)
274 return self._repo.pushkey(namespace, key, old, new)
275
275
276 def stream_out(self):
276 def stream_out(self):
277 raise error.Abort(_('cannot perform stream clone against local '
277 raise error.Abort(_('cannot perform stream clone against local '
278 'peer'))
278 'peer'))
279
279
280 def unbundle(self, bundle, heads, url):
280 def unbundle(self, bundle, heads, url):
281 """apply a bundle on a repo
281 """apply a bundle on a repo
282
282
283 This function handles the repo locking itself."""
283 This function handles the repo locking itself."""
284 try:
284 try:
285 try:
285 try:
286 bundle = exchange.readbundle(self.ui, bundle, None)
286 bundle = exchange.readbundle(self.ui, bundle, None)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
288 if util.safehasattr(ret, 'getchunks'):
288 if util.safehasattr(ret, 'getchunks'):
289 # This is a bundle20 object, turn it into an unbundler.
289 # This is a bundle20 object, turn it into an unbundler.
290 # This little dance should be dropped eventually when the
290 # This little dance should be dropped eventually when the
291 # API is finally improved.
291 # API is finally improved.
292 stream = util.chunkbuffer(ret.getchunks())
292 stream = util.chunkbuffer(ret.getchunks())
293 ret = bundle2.getunbundler(self.ui, stream)
293 ret = bundle2.getunbundler(self.ui, stream)
294 return ret
294 return ret
295 except Exception as exc:
295 except Exception as exc:
296 # If the exception contains output salvaged from a bundle2
296 # If the exception contains output salvaged from a bundle2
297 # reply, we need to make sure it is printed before continuing
297 # reply, we need to make sure it is printed before continuing
298 # to fail. So we build a bundle2 with such output and consume
298 # to fail. So we build a bundle2 with such output and consume
299 # it directly.
299 # it directly.
300 #
300 #
301 # This is not very elegant but allows a "simple" solution for
301 # This is not very elegant but allows a "simple" solution for
302 # issue4594
302 # issue4594
303 output = getattr(exc, '_bundle2salvagedoutput', ())
303 output = getattr(exc, '_bundle2salvagedoutput', ())
304 if output:
304 if output:
305 bundler = bundle2.bundle20(self._repo.ui)
305 bundler = bundle2.bundle20(self._repo.ui)
306 for out in output:
306 for out in output:
307 bundler.addpart(out)
307 bundler.addpart(out)
308 stream = util.chunkbuffer(bundler.getchunks())
308 stream = util.chunkbuffer(bundler.getchunks())
309 b = bundle2.getunbundler(self.ui, stream)
309 b = bundle2.getunbundler(self.ui, stream)
310 bundle2.processbundle(self._repo, b)
310 bundle2.processbundle(self._repo, b)
311 raise
311 raise
312 except error.PushRaced as exc:
312 except error.PushRaced as exc:
313 raise error.ResponseError(_('push failed:'),
313 raise error.ResponseError(_('push failed:'),
314 stringutil.forcebytestr(exc))
314 stringutil.forcebytestr(exc))
315
315
316 # End of _basewirecommands interface.
316 # End of _basewirecommands interface.
317
317
318 # Begin of peer interface.
318 # Begin of peer interface.
319
319
320 def commandexecutor(self):
320 def commandexecutor(self):
321 return localcommandexecutor(self)
321 return localcommandexecutor(self)
322
322
323 # End of peer interface.
323 # End of peer interface.
324
324
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
326 class locallegacypeer(localpeer):
326 class locallegacypeer(localpeer):
327 '''peer extension which implements legacy methods too; used for tests with
327 '''peer extension which implements legacy methods too; used for tests with
328 restricted capabilities'''
328 restricted capabilities'''
329
329
330 def __init__(self, repo):
330 def __init__(self, repo):
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
332
332
333 # Begin of baselegacywirecommands interface.
333 # Begin of baselegacywirecommands interface.
334
334
335 def between(self, pairs):
335 def between(self, pairs):
336 return self._repo.between(pairs)
336 return self._repo.between(pairs)
337
337
338 def branches(self, nodes):
338 def branches(self, nodes):
339 return self._repo.branches(nodes)
339 return self._repo.branches(nodes)
340
340
341 def changegroup(self, nodes, source):
341 def changegroup(self, nodes, source):
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
343 missingheads=self._repo.heads())
343 missingheads=self._repo.heads())
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
345
345
346 def changegroupsubset(self, bases, heads, source):
346 def changegroupsubset(self, bases, heads, source):
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
348 missingheads=heads)
348 missingheads=heads)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350
350
351 # End of baselegacywirecommands interface.
351 # End of baselegacywirecommands interface.
352
352
353 # Increment the sub-version when the revlog v2 format changes to lock out old
353 # Increment the sub-version when the revlog v2 format changes to lock out old
354 # clients.
354 # clients.
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
356
356
357 # A repository with the sparserevlog feature will have delta chains that
357 # A repository with the sparserevlog feature will have delta chains that
358 # can spread over a larger span. Sparse reading cuts these large spans into
358 # can spread over a larger span. Sparse reading cuts these large spans into
359 # pieces, so that each piece isn't too big.
359 # pieces, so that each piece isn't too big.
360 # Without the sparserevlog capability, reading from the repository could use
360 # Without the sparserevlog capability, reading from the repository could use
361 # huge amounts of memory, because the whole span would be read at once,
361 # huge amounts of memory, because the whole span would be read at once,
362 # including all the intermediate revisions that aren't pertinent for the chain.
362 # including all the intermediate revisions that aren't pertinent for the chain.
363 # This is why once a repository has enabled sparse-read, it becomes required.
363 # This is why once a repository has enabled sparse-read, it becomes required.
364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
365
365
366 # Functions receiving (ui, features) that extensions can register to impact
366 # Functions receiving (ui, features) that extensions can register to impact
367 # the ability to load repositories with custom requirements. Only
367 # the ability to load repositories with custom requirements. Only
368 # functions defined in loaded extensions are called.
368 # functions defined in loaded extensions are called.
369 #
369 #
370 # The function receives a set of requirement strings that the repository
370 # The function receives a set of requirement strings that the repository
371 # is capable of opening. Functions will typically add elements to the
371 # is capable of opening. Functions will typically add elements to the
372 # set to reflect that the extension knows how to handle that requirements.
372 # set to reflect that the extension knows how to handle that requirements.
373 featuresetupfuncs = set()
373 featuresetupfuncs = set()
374
374
375 @interfaceutil.implementer(repository.completelocalrepository)
375 @interfaceutil.implementer(repository.completelocalrepository)
376 class localrepository(object):
376 class localrepository(object):
377
377
378 # obsolete experimental requirements:
378 # obsolete experimental requirements:
379 # - manifestv2: An experimental new manifest format that allowed
379 # - manifestv2: An experimental new manifest format that allowed
380 # for stem compression of long paths. Experiment ended up not
380 # for stem compression of long paths. Experiment ended up not
381 # being successful (repository sizes went up due to worse delta
381 # being successful (repository sizes went up due to worse delta
382 # chains), and the code was deleted in 4.6.
382 # chains), and the code was deleted in 4.6.
383 supportedformats = {
383 supportedformats = {
384 'revlogv1',
384 'revlogv1',
385 'generaldelta',
385 'generaldelta',
386 'treemanifest',
386 'treemanifest',
387 REVLOGV2_REQUIREMENT,
387 REVLOGV2_REQUIREMENT,
388 SPARSEREVLOG_REQUIREMENT,
388 SPARSEREVLOG_REQUIREMENT,
389 }
389 }
390 _basesupported = supportedformats | {
390 _basesupported = supportedformats | {
391 'store',
391 'store',
392 'fncache',
392 'fncache',
393 'shared',
393 'shared',
394 'relshared',
394 'relshared',
395 'dotencode',
395 'dotencode',
396 'exp-sparse',
396 'exp-sparse',
397 }
397 }
398 openerreqs = {
398 openerreqs = {
399 'revlogv1',
399 'revlogv1',
400 'generaldelta',
400 'generaldelta',
401 'treemanifest',
401 'treemanifest',
402 }
402 }
403
403
404 # list of prefix for file which can be written without 'wlock'
404 # list of prefix for file which can be written without 'wlock'
405 # Extensions should extend this list when needed
405 # Extensions should extend this list when needed
406 _wlockfreeprefix = {
406 _wlockfreeprefix = {
407 # We migh consider requiring 'wlock' for the next
407 # We migh consider requiring 'wlock' for the next
408 # two, but pretty much all the existing code assume
408 # two, but pretty much all the existing code assume
409 # wlock is not needed so we keep them excluded for
409 # wlock is not needed so we keep them excluded for
410 # now.
410 # now.
411 'hgrc',
411 'hgrc',
412 'requires',
412 'requires',
413 # XXX cache is a complicatged business someone
413 # XXX cache is a complicatged business someone
414 # should investigate this in depth at some point
414 # should investigate this in depth at some point
415 'cache/',
415 'cache/',
416 # XXX shouldn't be dirstate covered by the wlock?
416 # XXX shouldn't be dirstate covered by the wlock?
417 'dirstate',
417 'dirstate',
418 # XXX bisect was still a bit too messy at the time
418 # XXX bisect was still a bit too messy at the time
419 # this changeset was introduced. Someone should fix
419 # this changeset was introduced. Someone should fix
420 # the remainig bit and drop this line
420 # the remainig bit and drop this line
421 'bisect.state',
421 'bisect.state',
422 }
422 }
423
423
424 def __init__(self, baseui, path, create=False, intents=None):
424 def __init__(self, baseui, path, create=False, intents=None):
425 self.requirements = set()
425 self.requirements = set()
426 self.filtername = None
426 self.filtername = None
427 # wvfs: rooted at the repository root, used to access the working copy
427 # wvfs: rooted at the repository root, used to access the working copy
428 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
428 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
429 # vfs: rooted at .hg, used to access repo files outside of .hg/store
429 # vfs: rooted at .hg, used to access repo files outside of .hg/store
430 self.vfs = None
430 self.vfs = None
431 # svfs: usually rooted at .hg/store, used to access repository history
431 # svfs: usually rooted at .hg/store, used to access repository history
432 # If this is a shared repository, this vfs may point to another
432 # If this is a shared repository, this vfs may point to another
433 # repository's .hg/store directory.
433 # repository's .hg/store directory.
434 self.svfs = None
434 self.svfs = None
435 self.root = self.wvfs.base
435 self.root = self.wvfs.base
436 self.path = self.wvfs.join(".hg")
436 self.path = self.wvfs.join(".hg")
437 self.origroot = path
437 self.origroot = path
438 # This is only used by context.workingctx.match in order to
438 # This is only used by context.workingctx.match in order to
439 # detect files in subrepos.
439 # detect files in subrepos.
440 self.auditor = pathutil.pathauditor(
440 self.auditor = pathutil.pathauditor(
441 self.root, callback=self._checknested)
441 self.root, callback=self._checknested)
442 # This is only used by context.basectx.match in order to detect
442 # This is only used by context.basectx.match in order to detect
443 # files in subrepos.
443 # files in subrepos.
444 self.nofsauditor = pathutil.pathauditor(
444 self.nofsauditor = pathutil.pathauditor(
445 self.root, callback=self._checknested, realfs=False, cached=True)
445 self.root, callback=self._checknested, realfs=False, cached=True)
446 self.baseui = baseui
446 self.baseui = baseui
447 self.ui = baseui.copy()
447 self.ui = baseui.copy()
448 self.ui.copy = baseui.copy # prevent copying repo configuration
448 self.ui.copy = baseui.copy # prevent copying repo configuration
449 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
449 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
450 if (self.ui.configbool('devel', 'all-warnings') or
450 if (self.ui.configbool('devel', 'all-warnings') or
451 self.ui.configbool('devel', 'check-locks')):
451 self.ui.configbool('devel', 'check-locks')):
452 self.vfs.audit = self._getvfsward(self.vfs.audit)
452 self.vfs.audit = self._getvfsward(self.vfs.audit)
453 # A list of callback to shape the phase if no data were found.
453 # A list of callback to shape the phase if no data were found.
454 # Callback are in the form: func(repo, roots) --> processed root.
454 # Callback are in the form: func(repo, roots) --> processed root.
455 # This list it to be filled by extension during repo setup
455 # This list it to be filled by extension during repo setup
456 self._phasedefaults = []
456 self._phasedefaults = []
457 try:
457 try:
458 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
458 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
459 self._loadextensions()
459 self._loadextensions()
460 except IOError:
460 except IOError:
461 pass
461 pass
462
462
463 if featuresetupfuncs:
463 if featuresetupfuncs:
464 self.supported = set(self._basesupported) # use private copy
464 self.supported = set(self._basesupported) # use private copy
465 extmods = set(m.__name__ for n, m
465 extmods = set(m.__name__ for n, m
466 in extensions.extensions(self.ui))
466 in extensions.extensions(self.ui))
467 for setupfunc in featuresetupfuncs:
467 for setupfunc in featuresetupfuncs:
468 if setupfunc.__module__ in extmods:
468 if setupfunc.__module__ in extmods:
469 setupfunc(self.ui, self.supported)
469 setupfunc(self.ui, self.supported)
470 else:
470 else:
471 self.supported = self._basesupported
471 self.supported = self._basesupported
472 color.setup(self.ui)
472 color.setup(self.ui)
473
473
474 # Add compression engines.
474 # Add compression engines.
475 for name in util.compengines:
475 for name in util.compengines:
476 engine = util.compengines[name]
476 engine = util.compengines[name]
477 if engine.revlogheader():
477 if engine.revlogheader():
478 self.supported.add('exp-compression-%s' % name)
478 self.supported.add('exp-compression-%s' % name)
479
479
480 if not self.vfs.isdir():
480 if not self.vfs.isdir():
481 if create:
481 if create:
482 self.requirements = newreporequirements(self)
482 self.requirements = newreporequirements(self)
483
483
484 if not self.wvfs.exists():
484 if not self.wvfs.exists():
485 self.wvfs.makedirs()
485 self.wvfs.makedirs()
486 self.vfs.makedir(notindexed=True)
486 self.vfs.makedir(notindexed=True)
487
487
488 if 'store' in self.requirements:
488 if 'store' in self.requirements:
489 self.vfs.mkdir("store")
489 self.vfs.mkdir("store")
490
490
491 # create an invalid changelog
491 # create an invalid changelog
492 self.vfs.append(
492 self.vfs.append(
493 "00changelog.i",
493 "00changelog.i",
494 '\0\0\0\2' # represents revlogv2
494 '\0\0\0\2' # represents revlogv2
495 ' dummy changelog to prevent using the old repo layout'
495 ' dummy changelog to prevent using the old repo layout'
496 )
496 )
497 else:
497 else:
498 raise error.RepoError(_("repository %s not found") % path)
498 raise error.RepoError(_("repository %s not found") % path)
499 elif create:
499 elif create:
500 raise error.RepoError(_("repository %s already exists") % path)
500 raise error.RepoError(_("repository %s already exists") % path)
501 else:
501 else:
502 try:
502 try:
503 self.requirements = scmutil.readrequires(
503 self.requirements = scmutil.readrequires(
504 self.vfs, self.supported)
504 self.vfs, self.supported)
505 except IOError as inst:
505 except IOError as inst:
506 if inst.errno != errno.ENOENT:
506 if inst.errno != errno.ENOENT:
507 raise
507 raise
508
508
509 cachepath = self.vfs.join('cache')
509 cachepath = self.vfs.join('cache')
510 self.sharedpath = self.path
510 self.sharedpath = self.path
511 try:
511 try:
512 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
512 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
513 if 'relshared' in self.requirements:
513 if 'relshared' in self.requirements:
514 sharedpath = self.vfs.join(sharedpath)
514 sharedpath = self.vfs.join(sharedpath)
515 vfs = vfsmod.vfs(sharedpath, realpath=True)
515 vfs = vfsmod.vfs(sharedpath, realpath=True)
516 cachepath = vfs.join('cache')
516 cachepath = vfs.join('cache')
517 s = vfs.base
517 s = vfs.base
518 if not vfs.exists():
518 if not vfs.exists():
519 raise error.RepoError(
519 raise error.RepoError(
520 _('.hg/sharedpath points to nonexistent directory %s') % s)
520 _('.hg/sharedpath points to nonexistent directory %s') % s)
521 self.sharedpath = s
521 self.sharedpath = s
522 except IOError as inst:
522 except IOError as inst:
523 if inst.errno != errno.ENOENT:
523 if inst.errno != errno.ENOENT:
524 raise
524 raise
525
525
526 if 'exp-sparse' in self.requirements and not sparse.enabled:
526 if 'exp-sparse' in self.requirements and not sparse.enabled:
527 raise error.RepoError(_('repository is using sparse feature but '
527 raise error.RepoError(_('repository is using sparse feature but '
528 'sparse is not enabled; enable the '
528 'sparse is not enabled; enable the '
529 '"sparse" extensions to access'))
529 '"sparse" extensions to access'))
530
530
531 self.store = store.store(
531 self.store = store.store(
532 self.requirements, self.sharedpath,
532 self.requirements, self.sharedpath,
533 lambda base: vfsmod.vfs(base, cacheaudited=True))
533 lambda base: vfsmod.vfs(base, cacheaudited=True))
534 self.spath = self.store.path
534 self.spath = self.store.path
535 self.svfs = self.store.vfs
535 self.svfs = self.store.vfs
536 self.sjoin = self.store.join
536 self.sjoin = self.store.join
537 self.vfs.createmode = self.store.createmode
537 self.vfs.createmode = self.store.createmode
538 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
538 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
539 self.cachevfs.createmode = self.store.createmode
539 self.cachevfs.createmode = self.store.createmode
540 if (self.ui.configbool('devel', 'all-warnings') or
540 if (self.ui.configbool('devel', 'all-warnings') or
541 self.ui.configbool('devel', 'check-locks')):
541 self.ui.configbool('devel', 'check-locks')):
542 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
542 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
543 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
543 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
544 else: # standard vfs
544 else: # standard vfs
545 self.svfs.audit = self._getsvfsward(self.svfs.audit)
545 self.svfs.audit = self._getsvfsward(self.svfs.audit)
546 self._applyopenerreqs()
546 self._applyopenerreqs()
547 if create:
547 if create:
548 self._writerequirements()
548 self._writerequirements()
549
549
550 self._dirstatevalidatewarned = False
550 self._dirstatevalidatewarned = False
551
551
552 self._branchcaches = {}
552 self._branchcaches = {}
553 self._revbranchcache = None
553 self._revbranchcache = None
554 self._filterpats = {}
554 self._filterpats = {}
555 self._datafilters = {}
555 self._datafilters = {}
556 self._transref = self._lockref = self._wlockref = None
556 self._transref = self._lockref = self._wlockref = None
557
557
558 # A cache for various files under .hg/ that tracks file changes,
558 # A cache for various files under .hg/ that tracks file changes,
559 # (used by the filecache decorator)
559 # (used by the filecache decorator)
560 #
560 #
561 # Maps a property name to its util.filecacheentry
561 # Maps a property name to its util.filecacheentry
562 self._filecache = {}
562 self._filecache = {}
563
563
564 # hold sets of revision to be filtered
564 # hold sets of revision to be filtered
565 # should be cleared when something might have changed the filter value:
565 # should be cleared when something might have changed the filter value:
566 # - new changesets,
566 # - new changesets,
567 # - phase change,
567 # - phase change,
568 # - new obsolescence marker,
568 # - new obsolescence marker,
569 # - working directory parent change,
569 # - working directory parent change,
570 # - bookmark changes
570 # - bookmark changes
571 self.filteredrevcache = {}
571 self.filteredrevcache = {}
572
572
573 # post-dirstate-status hooks
573 # post-dirstate-status hooks
574 self._postdsstatus = []
574 self._postdsstatus = []
575
575
576 # generic mapping between names and nodes
576 # generic mapping between names and nodes
577 self.names = namespaces.namespaces()
577 self.names = namespaces.namespaces()
578
578
579 # Key to signature value.
579 # Key to signature value.
580 self._sparsesignaturecache = {}
580 self._sparsesignaturecache = {}
581 # Signature to cached matcher instance.
581 # Signature to cached matcher instance.
582 self._sparsematchercache = {}
582 self._sparsematchercache = {}
583
583
584 def _getvfsward(self, origfunc):
584 def _getvfsward(self, origfunc):
585 """build a ward for self.vfs"""
585 """build a ward for self.vfs"""
586 rref = weakref.ref(self)
586 rref = weakref.ref(self)
587 def checkvfs(path, mode=None):
587 def checkvfs(path, mode=None):
588 ret = origfunc(path, mode=mode)
588 ret = origfunc(path, mode=mode)
589 repo = rref()
589 repo = rref()
590 if (repo is None
590 if (repo is None
591 or not util.safehasattr(repo, '_wlockref')
591 or not util.safehasattr(repo, '_wlockref')
592 or not util.safehasattr(repo, '_lockref')):
592 or not util.safehasattr(repo, '_lockref')):
593 return
593 return
594 if mode in (None, 'r', 'rb'):
594 if mode in (None, 'r', 'rb'):
595 return
595 return
596 if path.startswith(repo.path):
596 if path.startswith(repo.path):
597 # truncate name relative to the repository (.hg)
597 # truncate name relative to the repository (.hg)
598 path = path[len(repo.path) + 1:]
598 path = path[len(repo.path) + 1:]
599 if path.startswith('cache/'):
599 if path.startswith('cache/'):
600 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
600 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
601 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
601 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
602 if path.startswith('journal.'):
602 if path.startswith('journal.'):
603 # journal is covered by 'lock'
603 # journal is covered by 'lock'
604 if repo._currentlock(repo._lockref) is None:
604 if repo._currentlock(repo._lockref) is None:
605 repo.ui.develwarn('write with no lock: "%s"' % path,
605 repo.ui.develwarn('write with no lock: "%s"' % path,
606 stacklevel=2, config='check-locks')
606 stacklevel=2, config='check-locks')
607 elif repo._currentlock(repo._wlockref) is None:
607 elif repo._currentlock(repo._wlockref) is None:
608 # rest of vfs files are covered by 'wlock'
608 # rest of vfs files are covered by 'wlock'
609 #
609 #
610 # exclude special files
610 # exclude special files
611 for prefix in self._wlockfreeprefix:
611 for prefix in self._wlockfreeprefix:
612 if path.startswith(prefix):
612 if path.startswith(prefix):
613 return
613 return
614 repo.ui.develwarn('write with no wlock: "%s"' % path,
614 repo.ui.develwarn('write with no wlock: "%s"' % path,
615 stacklevel=2, config='check-locks')
615 stacklevel=2, config='check-locks')
616 return ret
616 return ret
617 return checkvfs
617 return checkvfs
618
618
619 def _getsvfsward(self, origfunc):
619 def _getsvfsward(self, origfunc):
620 """build a ward for self.svfs"""
620 """build a ward for self.svfs"""
621 rref = weakref.ref(self)
621 rref = weakref.ref(self)
622 def checksvfs(path, mode=None):
622 def checksvfs(path, mode=None):
623 ret = origfunc(path, mode=mode)
623 ret = origfunc(path, mode=mode)
624 repo = rref()
624 repo = rref()
625 if repo is None or not util.safehasattr(repo, '_lockref'):
625 if repo is None or not util.safehasattr(repo, '_lockref'):
626 return
626 return
627 if mode in (None, 'r', 'rb'):
627 if mode in (None, 'r', 'rb'):
628 return
628 return
629 if path.startswith(repo.sharedpath):
629 if path.startswith(repo.sharedpath):
630 # truncate name relative to the repository (.hg)
630 # truncate name relative to the repository (.hg)
631 path = path[len(repo.sharedpath) + 1:]
631 path = path[len(repo.sharedpath) + 1:]
632 if repo._currentlock(repo._lockref) is None:
632 if repo._currentlock(repo._lockref) is None:
633 repo.ui.develwarn('write with no lock: "%s"' % path,
633 repo.ui.develwarn('write with no lock: "%s"' % path,
634 stacklevel=3)
634 stacklevel=3)
635 return ret
635 return ret
636 return checksvfs
636 return checksvfs
637
637
638 def close(self):
638 def close(self):
639 self._writecaches()
639 self._writecaches()
640
640
641 def _loadextensions(self):
641 def _loadextensions(self):
642 extensions.loadall(self.ui)
642 extensions.loadall(self.ui)
643
643
644 def _writecaches(self):
644 def _writecaches(self):
645 if self._revbranchcache:
645 if self._revbranchcache:
646 self._revbranchcache.write()
646 self._revbranchcache.write()
647
647
648 def _restrictcapabilities(self, caps):
648 def _restrictcapabilities(self, caps):
649 if self.ui.configbool('experimental', 'bundle2-advertise'):
649 if self.ui.configbool('experimental', 'bundle2-advertise'):
650 caps = set(caps)
650 caps = set(caps)
651 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
651 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
652 role='client'))
652 role='client'))
653 caps.add('bundle2=' + urlreq.quote(capsblob))
653 caps.add('bundle2=' + urlreq.quote(capsblob))
654 return caps
654 return caps
655
655
656 def _applyopenerreqs(self):
656 def _applyopenerreqs(self):
657 self.svfs.options = dict((r, 1) for r in self.requirements
657 self.svfs.options = dict((r, 1) for r in self.requirements
658 if r in self.openerreqs)
658 if r in self.openerreqs)
659 # experimental config: format.chunkcachesize
659 # experimental config: format.chunkcachesize
660 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
660 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
661 if chunkcachesize is not None:
661 if chunkcachesize is not None:
662 self.svfs.options['chunkcachesize'] = chunkcachesize
662 self.svfs.options['chunkcachesize'] = chunkcachesize
663 # experimental config: format.maxchainlen
663 # experimental config: format.maxchainlen
664 maxchainlen = self.ui.configint('format', 'maxchainlen')
664 maxchainlen = self.ui.configint('format', 'maxchainlen')
665 if maxchainlen is not None:
665 if maxchainlen is not None:
666 self.svfs.options['maxchainlen'] = maxchainlen
666 self.svfs.options['maxchainlen'] = maxchainlen
667 # experimental config: format.manifestcachesize
667 # experimental config: format.manifestcachesize
668 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
668 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
669 if manifestcachesize is not None:
669 if manifestcachesize is not None:
670 self.svfs.options['manifestcachesize'] = manifestcachesize
670 self.svfs.options['manifestcachesize'] = manifestcachesize
671 deltabothparents = self.ui.configbool('storage',
671 deltabothparents = self.ui.configbool('storage',
672 'revlog.optimize-delta-parent-choice')
672 'revlog.optimize-delta-parent-choice')
673 self.svfs.options['deltabothparents'] = deltabothparents
673 self.svfs.options['deltabothparents'] = deltabothparents
674 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
674 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
675 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
675 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
676 if 0 <= chainspan:
676 if 0 <= chainspan:
677 self.svfs.options['maxdeltachainspan'] = chainspan
677 self.svfs.options['maxdeltachainspan'] = chainspan
678 mmapindexthreshold = self.ui.configbytes('experimental',
678 mmapindexthreshold = self.ui.configbytes('experimental',
679 'mmapindexthreshold')
679 'mmapindexthreshold')
680 if mmapindexthreshold is not None:
680 if mmapindexthreshold is not None:
681 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
681 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
682 withsparseread = self.ui.configbool('experimental', 'sparse-read')
682 withsparseread = self.ui.configbool('experimental', 'sparse-read')
683 srdensitythres = float(self.ui.config('experimental',
683 srdensitythres = float(self.ui.config('experimental',
684 'sparse-read.density-threshold'))
684 'sparse-read.density-threshold'))
685 srmingapsize = self.ui.configbytes('experimental',
685 srmingapsize = self.ui.configbytes('experimental',
686 'sparse-read.min-gap-size')
686 'sparse-read.min-gap-size')
687 self.svfs.options['with-sparse-read'] = withsparseread
687 self.svfs.options['with-sparse-read'] = withsparseread
688 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
688 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
689 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
689 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
690 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
690 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
691 self.svfs.options['sparse-revlog'] = sparserevlog
691 self.svfs.options['sparse-revlog'] = sparserevlog
692 if sparserevlog:
693 self.svfs.options['generaldelta'] = True
692
694
693 for r in self.requirements:
695 for r in self.requirements:
694 if r.startswith('exp-compression-'):
696 if r.startswith('exp-compression-'):
695 self.svfs.options['compengine'] = r[len('exp-compression-'):]
697 self.svfs.options['compengine'] = r[len('exp-compression-'):]
696
698
697 # TODO move "revlogv2" to openerreqs once finalized.
699 # TODO move "revlogv2" to openerreqs once finalized.
698 if REVLOGV2_REQUIREMENT in self.requirements:
700 if REVLOGV2_REQUIREMENT in self.requirements:
699 self.svfs.options['revlogv2'] = True
701 self.svfs.options['revlogv2'] = True
700
702
701 def _writerequirements(self):
703 def _writerequirements(self):
702 scmutil.writerequires(self.vfs, self.requirements)
704 scmutil.writerequires(self.vfs, self.requirements)
703
705
704 def _checknested(self, path):
706 def _checknested(self, path):
705 """Determine if path is a legal nested repository."""
707 """Determine if path is a legal nested repository."""
706 if not path.startswith(self.root):
708 if not path.startswith(self.root):
707 return False
709 return False
708 subpath = path[len(self.root) + 1:]
710 subpath = path[len(self.root) + 1:]
709 normsubpath = util.pconvert(subpath)
711 normsubpath = util.pconvert(subpath)
710
712
711 # XXX: Checking against the current working copy is wrong in
713 # XXX: Checking against the current working copy is wrong in
712 # the sense that it can reject things like
714 # the sense that it can reject things like
713 #
715 #
714 # $ hg cat -r 10 sub/x.txt
716 # $ hg cat -r 10 sub/x.txt
715 #
717 #
716 # if sub/ is no longer a subrepository in the working copy
718 # if sub/ is no longer a subrepository in the working copy
717 # parent revision.
719 # parent revision.
718 #
720 #
719 # However, it can of course also allow things that would have
721 # However, it can of course also allow things that would have
720 # been rejected before, such as the above cat command if sub/
722 # been rejected before, such as the above cat command if sub/
721 # is a subrepository now, but was a normal directory before.
723 # is a subrepository now, but was a normal directory before.
722 # The old path auditor would have rejected by mistake since it
724 # The old path auditor would have rejected by mistake since it
723 # panics when it sees sub/.hg/.
725 # panics when it sees sub/.hg/.
724 #
726 #
725 # All in all, checking against the working copy seems sensible
727 # All in all, checking against the working copy seems sensible
726 # since we want to prevent access to nested repositories on
728 # since we want to prevent access to nested repositories on
727 # the filesystem *now*.
729 # the filesystem *now*.
728 ctx = self[None]
730 ctx = self[None]
729 parts = util.splitpath(subpath)
731 parts = util.splitpath(subpath)
730 while parts:
732 while parts:
731 prefix = '/'.join(parts)
733 prefix = '/'.join(parts)
732 if prefix in ctx.substate:
734 if prefix in ctx.substate:
733 if prefix == normsubpath:
735 if prefix == normsubpath:
734 return True
736 return True
735 else:
737 else:
736 sub = ctx.sub(prefix)
738 sub = ctx.sub(prefix)
737 return sub.checknested(subpath[len(prefix) + 1:])
739 return sub.checknested(subpath[len(prefix) + 1:])
738 else:
740 else:
739 parts.pop()
741 parts.pop()
740 return False
742 return False
741
743
742 def peer(self):
744 def peer(self):
743 return localpeer(self) # not cached to avoid reference cycle
745 return localpeer(self) # not cached to avoid reference cycle
744
746
745 def unfiltered(self):
747 def unfiltered(self):
746 """Return unfiltered version of the repository
748 """Return unfiltered version of the repository
747
749
748 Intended to be overwritten by filtered repo."""
750 Intended to be overwritten by filtered repo."""
749 return self
751 return self
750
752
751 def filtered(self, name, visibilityexceptions=None):
753 def filtered(self, name, visibilityexceptions=None):
752 """Return a filtered version of a repository"""
754 """Return a filtered version of a repository"""
753 cls = repoview.newtype(self.unfiltered().__class__)
755 cls = repoview.newtype(self.unfiltered().__class__)
754 return cls(self, name, visibilityexceptions)
756 return cls(self, name, visibilityexceptions)
755
757
756 @repofilecache('bookmarks', 'bookmarks.current')
758 @repofilecache('bookmarks', 'bookmarks.current')
757 def _bookmarks(self):
759 def _bookmarks(self):
758 return bookmarks.bmstore(self)
760 return bookmarks.bmstore(self)
759
761
760 @property
762 @property
761 def _activebookmark(self):
763 def _activebookmark(self):
762 return self._bookmarks.active
764 return self._bookmarks.active
763
765
764 # _phasesets depend on changelog. what we need is to call
766 # _phasesets depend on changelog. what we need is to call
765 # _phasecache.invalidate() if '00changelog.i' was changed, but it
767 # _phasecache.invalidate() if '00changelog.i' was changed, but it
766 # can't be easily expressed in filecache mechanism.
768 # can't be easily expressed in filecache mechanism.
767 @storecache('phaseroots', '00changelog.i')
769 @storecache('phaseroots', '00changelog.i')
768 def _phasecache(self):
770 def _phasecache(self):
769 return phases.phasecache(self, self._phasedefaults)
771 return phases.phasecache(self, self._phasedefaults)
770
772
771 @storecache('obsstore')
773 @storecache('obsstore')
772 def obsstore(self):
774 def obsstore(self):
773 return obsolete.makestore(self.ui, self)
775 return obsolete.makestore(self.ui, self)
774
776
775 @storecache('00changelog.i')
777 @storecache('00changelog.i')
776 def changelog(self):
778 def changelog(self):
777 return changelog.changelog(self.svfs,
779 return changelog.changelog(self.svfs,
778 trypending=txnutil.mayhavepending(self.root))
780 trypending=txnutil.mayhavepending(self.root))
779
781
780 def _constructmanifest(self):
782 def _constructmanifest(self):
781 # This is a temporary function while we migrate from manifest to
783 # This is a temporary function while we migrate from manifest to
782 # manifestlog. It allows bundlerepo and unionrepo to intercept the
784 # manifestlog. It allows bundlerepo and unionrepo to intercept the
783 # manifest creation.
785 # manifest creation.
784 return manifest.manifestrevlog(self.svfs)
786 return manifest.manifestrevlog(self.svfs)
785
787
786 @storecache('00manifest.i')
788 @storecache('00manifest.i')
787 def manifestlog(self):
789 def manifestlog(self):
788 return manifest.manifestlog(self.svfs, self)
790 return manifest.manifestlog(self.svfs, self)
789
791
790 @repofilecache('dirstate')
792 @repofilecache('dirstate')
791 def dirstate(self):
793 def dirstate(self):
792 return self._makedirstate()
794 return self._makedirstate()
793
795
794 def _makedirstate(self):
796 def _makedirstate(self):
795 """Extension point for wrapping the dirstate per-repo."""
797 """Extension point for wrapping the dirstate per-repo."""
796 sparsematchfn = lambda: sparse.matcher(self)
798 sparsematchfn = lambda: sparse.matcher(self)
797
799
798 return dirstate.dirstate(self.vfs, self.ui, self.root,
800 return dirstate.dirstate(self.vfs, self.ui, self.root,
799 self._dirstatevalidate, sparsematchfn)
801 self._dirstatevalidate, sparsematchfn)
800
802
801 def _dirstatevalidate(self, node):
803 def _dirstatevalidate(self, node):
802 try:
804 try:
803 self.changelog.rev(node)
805 self.changelog.rev(node)
804 return node
806 return node
805 except error.LookupError:
807 except error.LookupError:
806 if not self._dirstatevalidatewarned:
808 if not self._dirstatevalidatewarned:
807 self._dirstatevalidatewarned = True
809 self._dirstatevalidatewarned = True
808 self.ui.warn(_("warning: ignoring unknown"
810 self.ui.warn(_("warning: ignoring unknown"
809 " working parent %s!\n") % short(node))
811 " working parent %s!\n") % short(node))
810 return nullid
812 return nullid
811
813
812 @repofilecache(narrowspec.FILENAME)
814 @repofilecache(narrowspec.FILENAME)
813 def narrowpats(self):
815 def narrowpats(self):
814 """matcher patterns for this repository's narrowspec
816 """matcher patterns for this repository's narrowspec
815
817
816 A tuple of (includes, excludes).
818 A tuple of (includes, excludes).
817 """
819 """
818 source = self
820 source = self
819 if self.shared():
821 if self.shared():
820 from . import hg
822 from . import hg
821 source = hg.sharedreposource(self)
823 source = hg.sharedreposource(self)
822 return narrowspec.load(source)
824 return narrowspec.load(source)
823
825
824 @repofilecache(narrowspec.FILENAME)
826 @repofilecache(narrowspec.FILENAME)
825 def _narrowmatch(self):
827 def _narrowmatch(self):
826 if changegroup.NARROW_REQUIREMENT not in self.requirements:
828 if changegroup.NARROW_REQUIREMENT not in self.requirements:
827 return matchmod.always(self.root, '')
829 return matchmod.always(self.root, '')
828 include, exclude = self.narrowpats
830 include, exclude = self.narrowpats
829 return narrowspec.match(self.root, include=include, exclude=exclude)
831 return narrowspec.match(self.root, include=include, exclude=exclude)
830
832
831 # TODO(martinvonz): make this property-like instead?
833 # TODO(martinvonz): make this property-like instead?
832 def narrowmatch(self):
834 def narrowmatch(self):
833 return self._narrowmatch
835 return self._narrowmatch
834
836
835 def setnarrowpats(self, newincludes, newexcludes):
837 def setnarrowpats(self, newincludes, newexcludes):
836 target = self
838 target = self
837 if self.shared():
839 if self.shared():
838 from . import hg
840 from . import hg
839 target = hg.sharedreposource(self)
841 target = hg.sharedreposource(self)
840 narrowspec.save(target, newincludes, newexcludes)
842 narrowspec.save(target, newincludes, newexcludes)
841 self.invalidate(clearfilecache=True)
843 self.invalidate(clearfilecache=True)
842
844
843 def __getitem__(self, changeid):
845 def __getitem__(self, changeid):
844 if changeid is None:
846 if changeid is None:
845 return context.workingctx(self)
847 return context.workingctx(self)
846 if isinstance(changeid, context.basectx):
848 if isinstance(changeid, context.basectx):
847 return changeid
849 return changeid
848 if isinstance(changeid, slice):
850 if isinstance(changeid, slice):
849 # wdirrev isn't contiguous so the slice shouldn't include it
851 # wdirrev isn't contiguous so the slice shouldn't include it
850 return [context.changectx(self, i)
852 return [context.changectx(self, i)
851 for i in xrange(*changeid.indices(len(self)))
853 for i in xrange(*changeid.indices(len(self)))
852 if i not in self.changelog.filteredrevs]
854 if i not in self.changelog.filteredrevs]
853 try:
855 try:
854 return context.changectx(self, changeid)
856 return context.changectx(self, changeid)
855 except error.WdirUnsupported:
857 except error.WdirUnsupported:
856 return context.workingctx(self)
858 return context.workingctx(self)
857
859
858 def __contains__(self, changeid):
860 def __contains__(self, changeid):
859 """True if the given changeid exists
861 """True if the given changeid exists
860
862
861 error.LookupError is raised if an ambiguous node specified.
863 error.LookupError is raised if an ambiguous node specified.
862 """
864 """
863 try:
865 try:
864 self[changeid]
866 self[changeid]
865 return True
867 return True
866 except error.RepoLookupError:
868 except error.RepoLookupError:
867 return False
869 return False
868
870
869 def __nonzero__(self):
871 def __nonzero__(self):
870 return True
872 return True
871
873
872 __bool__ = __nonzero__
874 __bool__ = __nonzero__
873
875
874 def __len__(self):
876 def __len__(self):
875 # no need to pay the cost of repoview.changelog
877 # no need to pay the cost of repoview.changelog
876 unfi = self.unfiltered()
878 unfi = self.unfiltered()
877 return len(unfi.changelog)
879 return len(unfi.changelog)
878
880
879 def __iter__(self):
881 def __iter__(self):
880 return iter(self.changelog)
882 return iter(self.changelog)
881
883
882 def revs(self, expr, *args):
884 def revs(self, expr, *args):
883 '''Find revisions matching a revset.
885 '''Find revisions matching a revset.
884
886
885 The revset is specified as a string ``expr`` that may contain
887 The revset is specified as a string ``expr`` that may contain
886 %-formatting to escape certain types. See ``revsetlang.formatspec``.
888 %-formatting to escape certain types. See ``revsetlang.formatspec``.
887
889
888 Revset aliases from the configuration are not expanded. To expand
890 Revset aliases from the configuration are not expanded. To expand
889 user aliases, consider calling ``scmutil.revrange()`` or
891 user aliases, consider calling ``scmutil.revrange()`` or
890 ``repo.anyrevs([expr], user=True)``.
892 ``repo.anyrevs([expr], user=True)``.
891
893
892 Returns a revset.abstractsmartset, which is a list-like interface
894 Returns a revset.abstractsmartset, which is a list-like interface
893 that contains integer revisions.
895 that contains integer revisions.
894 '''
896 '''
895 expr = revsetlang.formatspec(expr, *args)
897 expr = revsetlang.formatspec(expr, *args)
896 m = revset.match(None, expr)
898 m = revset.match(None, expr)
897 return m(self)
899 return m(self)
898
900
899 def set(self, expr, *args):
901 def set(self, expr, *args):
900 '''Find revisions matching a revset and emit changectx instances.
902 '''Find revisions matching a revset and emit changectx instances.
901
903
902 This is a convenience wrapper around ``revs()`` that iterates the
904 This is a convenience wrapper around ``revs()`` that iterates the
903 result and is a generator of changectx instances.
905 result and is a generator of changectx instances.
904
906
905 Revset aliases from the configuration are not expanded. To expand
907 Revset aliases from the configuration are not expanded. To expand
906 user aliases, consider calling ``scmutil.revrange()``.
908 user aliases, consider calling ``scmutil.revrange()``.
907 '''
909 '''
908 for r in self.revs(expr, *args):
910 for r in self.revs(expr, *args):
909 yield self[r]
911 yield self[r]
910
912
911 def anyrevs(self, specs, user=False, localalias=None):
913 def anyrevs(self, specs, user=False, localalias=None):
912 '''Find revisions matching one of the given revsets.
914 '''Find revisions matching one of the given revsets.
913
915
914 Revset aliases from the configuration are not expanded by default. To
916 Revset aliases from the configuration are not expanded by default. To
915 expand user aliases, specify ``user=True``. To provide some local
917 expand user aliases, specify ``user=True``. To provide some local
916 definitions overriding user aliases, set ``localalias`` to
918 definitions overriding user aliases, set ``localalias`` to
917 ``{name: definitionstring}``.
919 ``{name: definitionstring}``.
918 '''
920 '''
919 if user:
921 if user:
920 m = revset.matchany(self.ui, specs,
922 m = revset.matchany(self.ui, specs,
921 lookup=revset.lookupfn(self),
923 lookup=revset.lookupfn(self),
922 localalias=localalias)
924 localalias=localalias)
923 else:
925 else:
924 m = revset.matchany(None, specs, localalias=localalias)
926 m = revset.matchany(None, specs, localalias=localalias)
925 return m(self)
927 return m(self)
926
928
927 def url(self):
929 def url(self):
928 return 'file:' + self.root
930 return 'file:' + self.root
929
931
930 def hook(self, name, throw=False, **args):
932 def hook(self, name, throw=False, **args):
931 """Call a hook, passing this repo instance.
933 """Call a hook, passing this repo instance.
932
934
933 This a convenience method to aid invoking hooks. Extensions likely
935 This a convenience method to aid invoking hooks. Extensions likely
934 won't call this unless they have registered a custom hook or are
936 won't call this unless they have registered a custom hook or are
935 replacing code that is expected to call a hook.
937 replacing code that is expected to call a hook.
936 """
938 """
937 return hook.hook(self.ui, self, name, throw, **args)
939 return hook.hook(self.ui, self, name, throw, **args)
938
940
939 @filteredpropertycache
941 @filteredpropertycache
940 def _tagscache(self):
942 def _tagscache(self):
941 '''Returns a tagscache object that contains various tags related
943 '''Returns a tagscache object that contains various tags related
942 caches.'''
944 caches.'''
943
945
944 # This simplifies its cache management by having one decorated
946 # This simplifies its cache management by having one decorated
945 # function (this one) and the rest simply fetch things from it.
947 # function (this one) and the rest simply fetch things from it.
946 class tagscache(object):
948 class tagscache(object):
947 def __init__(self):
949 def __init__(self):
948 # These two define the set of tags for this repository. tags
950 # These two define the set of tags for this repository. tags
949 # maps tag name to node; tagtypes maps tag name to 'global' or
951 # maps tag name to node; tagtypes maps tag name to 'global' or
950 # 'local'. (Global tags are defined by .hgtags across all
952 # 'local'. (Global tags are defined by .hgtags across all
951 # heads, and local tags are defined in .hg/localtags.)
953 # heads, and local tags are defined in .hg/localtags.)
952 # They constitute the in-memory cache of tags.
954 # They constitute the in-memory cache of tags.
953 self.tags = self.tagtypes = None
955 self.tags = self.tagtypes = None
954
956
955 self.nodetagscache = self.tagslist = None
957 self.nodetagscache = self.tagslist = None
956
958
957 cache = tagscache()
959 cache = tagscache()
958 cache.tags, cache.tagtypes = self._findtags()
960 cache.tags, cache.tagtypes = self._findtags()
959
961
960 return cache
962 return cache
961
963
962 def tags(self):
964 def tags(self):
963 '''return a mapping of tag to node'''
965 '''return a mapping of tag to node'''
964 t = {}
966 t = {}
965 if self.changelog.filteredrevs:
967 if self.changelog.filteredrevs:
966 tags, tt = self._findtags()
968 tags, tt = self._findtags()
967 else:
969 else:
968 tags = self._tagscache.tags
970 tags = self._tagscache.tags
969 for k, v in tags.iteritems():
971 for k, v in tags.iteritems():
970 try:
972 try:
971 # ignore tags to unknown nodes
973 # ignore tags to unknown nodes
972 self.changelog.rev(v)
974 self.changelog.rev(v)
973 t[k] = v
975 t[k] = v
974 except (error.LookupError, ValueError):
976 except (error.LookupError, ValueError):
975 pass
977 pass
976 return t
978 return t
977
979
978 def _findtags(self):
980 def _findtags(self):
979 '''Do the hard work of finding tags. Return a pair of dicts
981 '''Do the hard work of finding tags. Return a pair of dicts
980 (tags, tagtypes) where tags maps tag name to node, and tagtypes
982 (tags, tagtypes) where tags maps tag name to node, and tagtypes
981 maps tag name to a string like \'global\' or \'local\'.
983 maps tag name to a string like \'global\' or \'local\'.
982 Subclasses or extensions are free to add their own tags, but
984 Subclasses or extensions are free to add their own tags, but
983 should be aware that the returned dicts will be retained for the
985 should be aware that the returned dicts will be retained for the
984 duration of the localrepo object.'''
986 duration of the localrepo object.'''
985
987
986 # XXX what tagtype should subclasses/extensions use? Currently
988 # XXX what tagtype should subclasses/extensions use? Currently
987 # mq and bookmarks add tags, but do not set the tagtype at all.
989 # mq and bookmarks add tags, but do not set the tagtype at all.
988 # Should each extension invent its own tag type? Should there
990 # Should each extension invent its own tag type? Should there
989 # be one tagtype for all such "virtual" tags? Or is the status
991 # be one tagtype for all such "virtual" tags? Or is the status
990 # quo fine?
992 # quo fine?
991
993
992
994
993 # map tag name to (node, hist)
995 # map tag name to (node, hist)
994 alltags = tagsmod.findglobaltags(self.ui, self)
996 alltags = tagsmod.findglobaltags(self.ui, self)
995 # map tag name to tag type
997 # map tag name to tag type
996 tagtypes = dict((tag, 'global') for tag in alltags)
998 tagtypes = dict((tag, 'global') for tag in alltags)
997
999
998 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1000 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
999
1001
1000 # Build the return dicts. Have to re-encode tag names because
1002 # Build the return dicts. Have to re-encode tag names because
1001 # the tags module always uses UTF-8 (in order not to lose info
1003 # the tags module always uses UTF-8 (in order not to lose info
1002 # writing to the cache), but the rest of Mercurial wants them in
1004 # writing to the cache), but the rest of Mercurial wants them in
1003 # local encoding.
1005 # local encoding.
1004 tags = {}
1006 tags = {}
1005 for (name, (node, hist)) in alltags.iteritems():
1007 for (name, (node, hist)) in alltags.iteritems():
1006 if node != nullid:
1008 if node != nullid:
1007 tags[encoding.tolocal(name)] = node
1009 tags[encoding.tolocal(name)] = node
1008 tags['tip'] = self.changelog.tip()
1010 tags['tip'] = self.changelog.tip()
1009 tagtypes = dict([(encoding.tolocal(name), value)
1011 tagtypes = dict([(encoding.tolocal(name), value)
1010 for (name, value) in tagtypes.iteritems()])
1012 for (name, value) in tagtypes.iteritems()])
1011 return (tags, tagtypes)
1013 return (tags, tagtypes)
1012
1014
1013 def tagtype(self, tagname):
1015 def tagtype(self, tagname):
1014 '''
1016 '''
1015 return the type of the given tag. result can be:
1017 return the type of the given tag. result can be:
1016
1018
1017 'local' : a local tag
1019 'local' : a local tag
1018 'global' : a global tag
1020 'global' : a global tag
1019 None : tag does not exist
1021 None : tag does not exist
1020 '''
1022 '''
1021
1023
1022 return self._tagscache.tagtypes.get(tagname)
1024 return self._tagscache.tagtypes.get(tagname)
1023
1025
1024 def tagslist(self):
1026 def tagslist(self):
1025 '''return a list of tags ordered by revision'''
1027 '''return a list of tags ordered by revision'''
1026 if not self._tagscache.tagslist:
1028 if not self._tagscache.tagslist:
1027 l = []
1029 l = []
1028 for t, n in self.tags().iteritems():
1030 for t, n in self.tags().iteritems():
1029 l.append((self.changelog.rev(n), t, n))
1031 l.append((self.changelog.rev(n), t, n))
1030 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1032 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1031
1033
1032 return self._tagscache.tagslist
1034 return self._tagscache.tagslist
1033
1035
1034 def nodetags(self, node):
1036 def nodetags(self, node):
1035 '''return the tags associated with a node'''
1037 '''return the tags associated with a node'''
1036 if not self._tagscache.nodetagscache:
1038 if not self._tagscache.nodetagscache:
1037 nodetagscache = {}
1039 nodetagscache = {}
1038 for t, n in self._tagscache.tags.iteritems():
1040 for t, n in self._tagscache.tags.iteritems():
1039 nodetagscache.setdefault(n, []).append(t)
1041 nodetagscache.setdefault(n, []).append(t)
1040 for tags in nodetagscache.itervalues():
1042 for tags in nodetagscache.itervalues():
1041 tags.sort()
1043 tags.sort()
1042 self._tagscache.nodetagscache = nodetagscache
1044 self._tagscache.nodetagscache = nodetagscache
1043 return self._tagscache.nodetagscache.get(node, [])
1045 return self._tagscache.nodetagscache.get(node, [])
1044
1046
1045 def nodebookmarks(self, node):
1047 def nodebookmarks(self, node):
1046 """return the list of bookmarks pointing to the specified node"""
1048 """return the list of bookmarks pointing to the specified node"""
1047 return self._bookmarks.names(node)
1049 return self._bookmarks.names(node)
1048
1050
1049 def branchmap(self):
1051 def branchmap(self):
1050 '''returns a dictionary {branch: [branchheads]} with branchheads
1052 '''returns a dictionary {branch: [branchheads]} with branchheads
1051 ordered by increasing revision number'''
1053 ordered by increasing revision number'''
1052 branchmap.updatecache(self)
1054 branchmap.updatecache(self)
1053 return self._branchcaches[self.filtername]
1055 return self._branchcaches[self.filtername]
1054
1056
1055 @unfilteredmethod
1057 @unfilteredmethod
1056 def revbranchcache(self):
1058 def revbranchcache(self):
1057 if not self._revbranchcache:
1059 if not self._revbranchcache:
1058 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1060 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1059 return self._revbranchcache
1061 return self._revbranchcache
1060
1062
1061 def branchtip(self, branch, ignoremissing=False):
1063 def branchtip(self, branch, ignoremissing=False):
1062 '''return the tip node for a given branch
1064 '''return the tip node for a given branch
1063
1065
1064 If ignoremissing is True, then this method will not raise an error.
1066 If ignoremissing is True, then this method will not raise an error.
1065 This is helpful for callers that only expect None for a missing branch
1067 This is helpful for callers that only expect None for a missing branch
1066 (e.g. namespace).
1068 (e.g. namespace).
1067
1069
1068 '''
1070 '''
1069 try:
1071 try:
1070 return self.branchmap().branchtip(branch)
1072 return self.branchmap().branchtip(branch)
1071 except KeyError:
1073 except KeyError:
1072 if not ignoremissing:
1074 if not ignoremissing:
1073 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1075 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1074 else:
1076 else:
1075 pass
1077 pass
1076
1078
1077 def lookup(self, key):
1079 def lookup(self, key):
1078 return scmutil.revsymbol(self, key).node()
1080 return scmutil.revsymbol(self, key).node()
1079
1081
1080 def lookupbranch(self, key):
1082 def lookupbranch(self, key):
1081 if key in self.branchmap():
1083 if key in self.branchmap():
1082 return key
1084 return key
1083
1085
1084 return scmutil.revsymbol(self, key).branch()
1086 return scmutil.revsymbol(self, key).branch()
1085
1087
1086 def known(self, nodes):
1088 def known(self, nodes):
1087 cl = self.changelog
1089 cl = self.changelog
1088 nm = cl.nodemap
1090 nm = cl.nodemap
1089 filtered = cl.filteredrevs
1091 filtered = cl.filteredrevs
1090 result = []
1092 result = []
1091 for n in nodes:
1093 for n in nodes:
1092 r = nm.get(n)
1094 r = nm.get(n)
1093 resp = not (r is None or r in filtered)
1095 resp = not (r is None or r in filtered)
1094 result.append(resp)
1096 result.append(resp)
1095 return result
1097 return result
1096
1098
1097 def local(self):
1099 def local(self):
1098 return self
1100 return self
1099
1101
1100 def publishing(self):
1102 def publishing(self):
1101 # it's safe (and desirable) to trust the publish flag unconditionally
1103 # it's safe (and desirable) to trust the publish flag unconditionally
1102 # so that we don't finalize changes shared between users via ssh or nfs
1104 # so that we don't finalize changes shared between users via ssh or nfs
1103 return self.ui.configbool('phases', 'publish', untrusted=True)
1105 return self.ui.configbool('phases', 'publish', untrusted=True)
1104
1106
1105 def cancopy(self):
1107 def cancopy(self):
1106 # so statichttprepo's override of local() works
1108 # so statichttprepo's override of local() works
1107 if not self.local():
1109 if not self.local():
1108 return False
1110 return False
1109 if not self.publishing():
1111 if not self.publishing():
1110 return True
1112 return True
1111 # if publishing we can't copy if there is filtered content
1113 # if publishing we can't copy if there is filtered content
1112 return not self.filtered('visible').changelog.filteredrevs
1114 return not self.filtered('visible').changelog.filteredrevs
1113
1115
1114 def shared(self):
1116 def shared(self):
1115 '''the type of shared repository (None if not shared)'''
1117 '''the type of shared repository (None if not shared)'''
1116 if self.sharedpath != self.path:
1118 if self.sharedpath != self.path:
1117 return 'store'
1119 return 'store'
1118 return None
1120 return None
1119
1121
1120 def wjoin(self, f, *insidef):
1122 def wjoin(self, f, *insidef):
1121 return self.vfs.reljoin(self.root, f, *insidef)
1123 return self.vfs.reljoin(self.root, f, *insidef)
1122
1124
1123 def file(self, f):
1125 def file(self, f):
1124 if f[0] == '/':
1126 if f[0] == '/':
1125 f = f[1:]
1127 f = f[1:]
1126 return filelog.filelog(self.svfs, f)
1128 return filelog.filelog(self.svfs, f)
1127
1129
1128 def setparents(self, p1, p2=nullid):
1130 def setparents(self, p1, p2=nullid):
1129 with self.dirstate.parentchange():
1131 with self.dirstate.parentchange():
1130 copies = self.dirstate.setparents(p1, p2)
1132 copies = self.dirstate.setparents(p1, p2)
1131 pctx = self[p1]
1133 pctx = self[p1]
1132 if copies:
1134 if copies:
1133 # Adjust copy records, the dirstate cannot do it, it
1135 # Adjust copy records, the dirstate cannot do it, it
1134 # requires access to parents manifests. Preserve them
1136 # requires access to parents manifests. Preserve them
1135 # only for entries added to first parent.
1137 # only for entries added to first parent.
1136 for f in copies:
1138 for f in copies:
1137 if f not in pctx and copies[f] in pctx:
1139 if f not in pctx and copies[f] in pctx:
1138 self.dirstate.copy(copies[f], f)
1140 self.dirstate.copy(copies[f], f)
1139 if p2 == nullid:
1141 if p2 == nullid:
1140 for f, s in sorted(self.dirstate.copies().items()):
1142 for f, s in sorted(self.dirstate.copies().items()):
1141 if f not in pctx and s not in pctx:
1143 if f not in pctx and s not in pctx:
1142 self.dirstate.copy(None, f)
1144 self.dirstate.copy(None, f)
1143
1145
1144 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1146 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1145 """changeid can be a changeset revision, node, or tag.
1147 """changeid can be a changeset revision, node, or tag.
1146 fileid can be a file revision or node."""
1148 fileid can be a file revision or node."""
1147 return context.filectx(self, path, changeid, fileid,
1149 return context.filectx(self, path, changeid, fileid,
1148 changectx=changectx)
1150 changectx=changectx)
1149
1151
1150 def getcwd(self):
1152 def getcwd(self):
1151 return self.dirstate.getcwd()
1153 return self.dirstate.getcwd()
1152
1154
1153 def pathto(self, f, cwd=None):
1155 def pathto(self, f, cwd=None):
1154 return self.dirstate.pathto(f, cwd)
1156 return self.dirstate.pathto(f, cwd)
1155
1157
1156 def _loadfilter(self, filter):
1158 def _loadfilter(self, filter):
1157 if filter not in self._filterpats:
1159 if filter not in self._filterpats:
1158 l = []
1160 l = []
1159 for pat, cmd in self.ui.configitems(filter):
1161 for pat, cmd in self.ui.configitems(filter):
1160 if cmd == '!':
1162 if cmd == '!':
1161 continue
1163 continue
1162 mf = matchmod.match(self.root, '', [pat])
1164 mf = matchmod.match(self.root, '', [pat])
1163 fn = None
1165 fn = None
1164 params = cmd
1166 params = cmd
1165 for name, filterfn in self._datafilters.iteritems():
1167 for name, filterfn in self._datafilters.iteritems():
1166 if cmd.startswith(name):
1168 if cmd.startswith(name):
1167 fn = filterfn
1169 fn = filterfn
1168 params = cmd[len(name):].lstrip()
1170 params = cmd[len(name):].lstrip()
1169 break
1171 break
1170 if not fn:
1172 if not fn:
1171 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1173 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1172 # Wrap old filters not supporting keyword arguments
1174 # Wrap old filters not supporting keyword arguments
1173 if not pycompat.getargspec(fn)[2]:
1175 if not pycompat.getargspec(fn)[2]:
1174 oldfn = fn
1176 oldfn = fn
1175 fn = lambda s, c, **kwargs: oldfn(s, c)
1177 fn = lambda s, c, **kwargs: oldfn(s, c)
1176 l.append((mf, fn, params))
1178 l.append((mf, fn, params))
1177 self._filterpats[filter] = l
1179 self._filterpats[filter] = l
1178 return self._filterpats[filter]
1180 return self._filterpats[filter]
1179
1181
1180 def _filter(self, filterpats, filename, data):
1182 def _filter(self, filterpats, filename, data):
1181 for mf, fn, cmd in filterpats:
1183 for mf, fn, cmd in filterpats:
1182 if mf(filename):
1184 if mf(filename):
1183 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1185 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1184 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1186 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1185 break
1187 break
1186
1188
1187 return data
1189 return data
1188
1190
1189 @unfilteredpropertycache
1191 @unfilteredpropertycache
1190 def _encodefilterpats(self):
1192 def _encodefilterpats(self):
1191 return self._loadfilter('encode')
1193 return self._loadfilter('encode')
1192
1194
1193 @unfilteredpropertycache
1195 @unfilteredpropertycache
1194 def _decodefilterpats(self):
1196 def _decodefilterpats(self):
1195 return self._loadfilter('decode')
1197 return self._loadfilter('decode')
1196
1198
1197 def adddatafilter(self, name, filter):
1199 def adddatafilter(self, name, filter):
1198 self._datafilters[name] = filter
1200 self._datafilters[name] = filter
1199
1201
1200 def wread(self, filename):
1202 def wread(self, filename):
1201 if self.wvfs.islink(filename):
1203 if self.wvfs.islink(filename):
1202 data = self.wvfs.readlink(filename)
1204 data = self.wvfs.readlink(filename)
1203 else:
1205 else:
1204 data = self.wvfs.read(filename)
1206 data = self.wvfs.read(filename)
1205 return self._filter(self._encodefilterpats, filename, data)
1207 return self._filter(self._encodefilterpats, filename, data)
1206
1208
1207 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1209 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1208 """write ``data`` into ``filename`` in the working directory
1210 """write ``data`` into ``filename`` in the working directory
1209
1211
1210 This returns length of written (maybe decoded) data.
1212 This returns length of written (maybe decoded) data.
1211 """
1213 """
1212 data = self._filter(self._decodefilterpats, filename, data)
1214 data = self._filter(self._decodefilterpats, filename, data)
1213 if 'l' in flags:
1215 if 'l' in flags:
1214 self.wvfs.symlink(data, filename)
1216 self.wvfs.symlink(data, filename)
1215 else:
1217 else:
1216 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1218 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1217 **kwargs)
1219 **kwargs)
1218 if 'x' in flags:
1220 if 'x' in flags:
1219 self.wvfs.setflags(filename, False, True)
1221 self.wvfs.setflags(filename, False, True)
1220 else:
1222 else:
1221 self.wvfs.setflags(filename, False, False)
1223 self.wvfs.setflags(filename, False, False)
1222 return len(data)
1224 return len(data)
1223
1225
1224 def wwritedata(self, filename, data):
1226 def wwritedata(self, filename, data):
1225 return self._filter(self._decodefilterpats, filename, data)
1227 return self._filter(self._decodefilterpats, filename, data)
1226
1228
1227 def currenttransaction(self):
1229 def currenttransaction(self):
1228 """return the current transaction or None if non exists"""
1230 """return the current transaction or None if non exists"""
1229 if self._transref:
1231 if self._transref:
1230 tr = self._transref()
1232 tr = self._transref()
1231 else:
1233 else:
1232 tr = None
1234 tr = None
1233
1235
1234 if tr and tr.running():
1236 if tr and tr.running():
1235 return tr
1237 return tr
1236 return None
1238 return None
1237
1239
1238 def transaction(self, desc, report=None):
1240 def transaction(self, desc, report=None):
1239 if (self.ui.configbool('devel', 'all-warnings')
1241 if (self.ui.configbool('devel', 'all-warnings')
1240 or self.ui.configbool('devel', 'check-locks')):
1242 or self.ui.configbool('devel', 'check-locks')):
1241 if self._currentlock(self._lockref) is None:
1243 if self._currentlock(self._lockref) is None:
1242 raise error.ProgrammingError('transaction requires locking')
1244 raise error.ProgrammingError('transaction requires locking')
1243 tr = self.currenttransaction()
1245 tr = self.currenttransaction()
1244 if tr is not None:
1246 if tr is not None:
1245 return tr.nest(name=desc)
1247 return tr.nest(name=desc)
1246
1248
1247 # abort here if the journal already exists
1249 # abort here if the journal already exists
1248 if self.svfs.exists("journal"):
1250 if self.svfs.exists("journal"):
1249 raise error.RepoError(
1251 raise error.RepoError(
1250 _("abandoned transaction found"),
1252 _("abandoned transaction found"),
1251 hint=_("run 'hg recover' to clean up transaction"))
1253 hint=_("run 'hg recover' to clean up transaction"))
1252
1254
1253 idbase = "%.40f#%f" % (random.random(), time.time())
1255 idbase = "%.40f#%f" % (random.random(), time.time())
1254 ha = hex(hashlib.sha1(idbase).digest())
1256 ha = hex(hashlib.sha1(idbase).digest())
1255 txnid = 'TXN:' + ha
1257 txnid = 'TXN:' + ha
1256 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1258 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1257
1259
1258 self._writejournal(desc)
1260 self._writejournal(desc)
1259 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1261 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1260 if report:
1262 if report:
1261 rp = report
1263 rp = report
1262 else:
1264 else:
1263 rp = self.ui.warn
1265 rp = self.ui.warn
1264 vfsmap = {'plain': self.vfs} # root of .hg/
1266 vfsmap = {'plain': self.vfs} # root of .hg/
1265 # we must avoid cyclic reference between repo and transaction.
1267 # we must avoid cyclic reference between repo and transaction.
1266 reporef = weakref.ref(self)
1268 reporef = weakref.ref(self)
1267 # Code to track tag movement
1269 # Code to track tag movement
1268 #
1270 #
1269 # Since tags are all handled as file content, it is actually quite hard
1271 # Since tags are all handled as file content, it is actually quite hard
1270 # to track these movement from a code perspective. So we fallback to a
1272 # to track these movement from a code perspective. So we fallback to a
1271 # tracking at the repository level. One could envision to track changes
1273 # tracking at the repository level. One could envision to track changes
1272 # to the '.hgtags' file through changegroup apply but that fails to
1274 # to the '.hgtags' file through changegroup apply but that fails to
1273 # cope with case where transaction expose new heads without changegroup
1275 # cope with case where transaction expose new heads without changegroup
1274 # being involved (eg: phase movement).
1276 # being involved (eg: phase movement).
1275 #
1277 #
1276 # For now, We gate the feature behind a flag since this likely comes
1278 # For now, We gate the feature behind a flag since this likely comes
1277 # with performance impacts. The current code run more often than needed
1279 # with performance impacts. The current code run more often than needed
1278 # and do not use caches as much as it could. The current focus is on
1280 # and do not use caches as much as it could. The current focus is on
1279 # the behavior of the feature so we disable it by default. The flag
1281 # the behavior of the feature so we disable it by default. The flag
1280 # will be removed when we are happy with the performance impact.
1282 # will be removed when we are happy with the performance impact.
1281 #
1283 #
1282 # Once this feature is no longer experimental move the following
1284 # Once this feature is no longer experimental move the following
1283 # documentation to the appropriate help section:
1285 # documentation to the appropriate help section:
1284 #
1286 #
1285 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1287 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1286 # tags (new or changed or deleted tags). In addition the details of
1288 # tags (new or changed or deleted tags). In addition the details of
1287 # these changes are made available in a file at:
1289 # these changes are made available in a file at:
1288 # ``REPOROOT/.hg/changes/tags.changes``.
1290 # ``REPOROOT/.hg/changes/tags.changes``.
1289 # Make sure you check for HG_TAG_MOVED before reading that file as it
1291 # Make sure you check for HG_TAG_MOVED before reading that file as it
1290 # might exist from a previous transaction even if no tag were touched
1292 # might exist from a previous transaction even if no tag were touched
1291 # in this one. Changes are recorded in a line base format::
1293 # in this one. Changes are recorded in a line base format::
1292 #
1294 #
1293 # <action> <hex-node> <tag-name>\n
1295 # <action> <hex-node> <tag-name>\n
1294 #
1296 #
1295 # Actions are defined as follow:
1297 # Actions are defined as follow:
1296 # "-R": tag is removed,
1298 # "-R": tag is removed,
1297 # "+A": tag is added,
1299 # "+A": tag is added,
1298 # "-M": tag is moved (old value),
1300 # "-M": tag is moved (old value),
1299 # "+M": tag is moved (new value),
1301 # "+M": tag is moved (new value),
1300 tracktags = lambda x: None
1302 tracktags = lambda x: None
1301 # experimental config: experimental.hook-track-tags
1303 # experimental config: experimental.hook-track-tags
1302 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1304 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1303 if desc != 'strip' and shouldtracktags:
1305 if desc != 'strip' and shouldtracktags:
1304 oldheads = self.changelog.headrevs()
1306 oldheads = self.changelog.headrevs()
1305 def tracktags(tr2):
1307 def tracktags(tr2):
1306 repo = reporef()
1308 repo = reporef()
1307 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1309 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1308 newheads = repo.changelog.headrevs()
1310 newheads = repo.changelog.headrevs()
1309 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1311 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1310 # notes: we compare lists here.
1312 # notes: we compare lists here.
1311 # As we do it only once buiding set would not be cheaper
1313 # As we do it only once buiding set would not be cheaper
1312 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1314 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1313 if changes:
1315 if changes:
1314 tr2.hookargs['tag_moved'] = '1'
1316 tr2.hookargs['tag_moved'] = '1'
1315 with repo.vfs('changes/tags.changes', 'w',
1317 with repo.vfs('changes/tags.changes', 'w',
1316 atomictemp=True) as changesfile:
1318 atomictemp=True) as changesfile:
1317 # note: we do not register the file to the transaction
1319 # note: we do not register the file to the transaction
1318 # because we needs it to still exist on the transaction
1320 # because we needs it to still exist on the transaction
1319 # is close (for txnclose hooks)
1321 # is close (for txnclose hooks)
1320 tagsmod.writediff(changesfile, changes)
1322 tagsmod.writediff(changesfile, changes)
1321 def validate(tr2):
1323 def validate(tr2):
1322 """will run pre-closing hooks"""
1324 """will run pre-closing hooks"""
1323 # XXX the transaction API is a bit lacking here so we take a hacky
1325 # XXX the transaction API is a bit lacking here so we take a hacky
1324 # path for now
1326 # path for now
1325 #
1327 #
1326 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1328 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1327 # dict is copied before these run. In addition we needs the data
1329 # dict is copied before these run. In addition we needs the data
1328 # available to in memory hooks too.
1330 # available to in memory hooks too.
1329 #
1331 #
1330 # Moreover, we also need to make sure this runs before txnclose
1332 # Moreover, we also need to make sure this runs before txnclose
1331 # hooks and there is no "pending" mechanism that would execute
1333 # hooks and there is no "pending" mechanism that would execute
1332 # logic only if hooks are about to run.
1334 # logic only if hooks are about to run.
1333 #
1335 #
1334 # Fixing this limitation of the transaction is also needed to track
1336 # Fixing this limitation of the transaction is also needed to track
1335 # other families of changes (bookmarks, phases, obsolescence).
1337 # other families of changes (bookmarks, phases, obsolescence).
1336 #
1338 #
1337 # This will have to be fixed before we remove the experimental
1339 # This will have to be fixed before we remove the experimental
1338 # gating.
1340 # gating.
1339 tracktags(tr2)
1341 tracktags(tr2)
1340 repo = reporef()
1342 repo = reporef()
1341 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1343 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1342 scmutil.enforcesinglehead(repo, tr2, desc)
1344 scmutil.enforcesinglehead(repo, tr2, desc)
1343 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1345 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1344 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1346 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1345 args = tr.hookargs.copy()
1347 args = tr.hookargs.copy()
1346 args.update(bookmarks.preparehookargs(name, old, new))
1348 args.update(bookmarks.preparehookargs(name, old, new))
1347 repo.hook('pretxnclose-bookmark', throw=True,
1349 repo.hook('pretxnclose-bookmark', throw=True,
1348 txnname=desc,
1350 txnname=desc,
1349 **pycompat.strkwargs(args))
1351 **pycompat.strkwargs(args))
1350 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1352 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1351 cl = repo.unfiltered().changelog
1353 cl = repo.unfiltered().changelog
1352 for rev, (old, new) in tr.changes['phases'].items():
1354 for rev, (old, new) in tr.changes['phases'].items():
1353 args = tr.hookargs.copy()
1355 args = tr.hookargs.copy()
1354 node = hex(cl.node(rev))
1356 node = hex(cl.node(rev))
1355 args.update(phases.preparehookargs(node, old, new))
1357 args.update(phases.preparehookargs(node, old, new))
1356 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1358 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1357 **pycompat.strkwargs(args))
1359 **pycompat.strkwargs(args))
1358
1360
1359 repo.hook('pretxnclose', throw=True,
1361 repo.hook('pretxnclose', throw=True,
1360 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1362 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1361 def releasefn(tr, success):
1363 def releasefn(tr, success):
1362 repo = reporef()
1364 repo = reporef()
1363 if success:
1365 if success:
1364 # this should be explicitly invoked here, because
1366 # this should be explicitly invoked here, because
1365 # in-memory changes aren't written out at closing
1367 # in-memory changes aren't written out at closing
1366 # transaction, if tr.addfilegenerator (via
1368 # transaction, if tr.addfilegenerator (via
1367 # dirstate.write or so) isn't invoked while
1369 # dirstate.write or so) isn't invoked while
1368 # transaction running
1370 # transaction running
1369 repo.dirstate.write(None)
1371 repo.dirstate.write(None)
1370 else:
1372 else:
1371 # discard all changes (including ones already written
1373 # discard all changes (including ones already written
1372 # out) in this transaction
1374 # out) in this transaction
1373 repo.dirstate.restorebackup(None, 'journal.dirstate')
1375 repo.dirstate.restorebackup(None, 'journal.dirstate')
1374
1376
1375 repo.invalidate(clearfilecache=True)
1377 repo.invalidate(clearfilecache=True)
1376
1378
1377 tr = transaction.transaction(rp, self.svfs, vfsmap,
1379 tr = transaction.transaction(rp, self.svfs, vfsmap,
1378 "journal",
1380 "journal",
1379 "undo",
1381 "undo",
1380 aftertrans(renames),
1382 aftertrans(renames),
1381 self.store.createmode,
1383 self.store.createmode,
1382 validator=validate,
1384 validator=validate,
1383 releasefn=releasefn,
1385 releasefn=releasefn,
1384 checkambigfiles=_cachedfiles,
1386 checkambigfiles=_cachedfiles,
1385 name=desc)
1387 name=desc)
1386 tr.changes['revs'] = xrange(0, 0)
1388 tr.changes['revs'] = xrange(0, 0)
1387 tr.changes['obsmarkers'] = set()
1389 tr.changes['obsmarkers'] = set()
1388 tr.changes['phases'] = {}
1390 tr.changes['phases'] = {}
1389 tr.changes['bookmarks'] = {}
1391 tr.changes['bookmarks'] = {}
1390
1392
1391 tr.hookargs['txnid'] = txnid
1393 tr.hookargs['txnid'] = txnid
1392 # note: writing the fncache only during finalize mean that the file is
1394 # note: writing the fncache only during finalize mean that the file is
1393 # outdated when running hooks. As fncache is used for streaming clone,
1395 # outdated when running hooks. As fncache is used for streaming clone,
1394 # this is not expected to break anything that happen during the hooks.
1396 # this is not expected to break anything that happen during the hooks.
1395 tr.addfinalize('flush-fncache', self.store.write)
1397 tr.addfinalize('flush-fncache', self.store.write)
1396 def txnclosehook(tr2):
1398 def txnclosehook(tr2):
1397 """To be run if transaction is successful, will schedule a hook run
1399 """To be run if transaction is successful, will schedule a hook run
1398 """
1400 """
1399 # Don't reference tr2 in hook() so we don't hold a reference.
1401 # Don't reference tr2 in hook() so we don't hold a reference.
1400 # This reduces memory consumption when there are multiple
1402 # This reduces memory consumption when there are multiple
1401 # transactions per lock. This can likely go away if issue5045
1403 # transactions per lock. This can likely go away if issue5045
1402 # fixes the function accumulation.
1404 # fixes the function accumulation.
1403 hookargs = tr2.hookargs
1405 hookargs = tr2.hookargs
1404
1406
1405 def hookfunc():
1407 def hookfunc():
1406 repo = reporef()
1408 repo = reporef()
1407 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1409 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1408 bmchanges = sorted(tr.changes['bookmarks'].items())
1410 bmchanges = sorted(tr.changes['bookmarks'].items())
1409 for name, (old, new) in bmchanges:
1411 for name, (old, new) in bmchanges:
1410 args = tr.hookargs.copy()
1412 args = tr.hookargs.copy()
1411 args.update(bookmarks.preparehookargs(name, old, new))
1413 args.update(bookmarks.preparehookargs(name, old, new))
1412 repo.hook('txnclose-bookmark', throw=False,
1414 repo.hook('txnclose-bookmark', throw=False,
1413 txnname=desc, **pycompat.strkwargs(args))
1415 txnname=desc, **pycompat.strkwargs(args))
1414
1416
1415 if hook.hashook(repo.ui, 'txnclose-phase'):
1417 if hook.hashook(repo.ui, 'txnclose-phase'):
1416 cl = repo.unfiltered().changelog
1418 cl = repo.unfiltered().changelog
1417 phasemv = sorted(tr.changes['phases'].items())
1419 phasemv = sorted(tr.changes['phases'].items())
1418 for rev, (old, new) in phasemv:
1420 for rev, (old, new) in phasemv:
1419 args = tr.hookargs.copy()
1421 args = tr.hookargs.copy()
1420 node = hex(cl.node(rev))
1422 node = hex(cl.node(rev))
1421 args.update(phases.preparehookargs(node, old, new))
1423 args.update(phases.preparehookargs(node, old, new))
1422 repo.hook('txnclose-phase', throw=False, txnname=desc,
1424 repo.hook('txnclose-phase', throw=False, txnname=desc,
1423 **pycompat.strkwargs(args))
1425 **pycompat.strkwargs(args))
1424
1426
1425 repo.hook('txnclose', throw=False, txnname=desc,
1427 repo.hook('txnclose', throw=False, txnname=desc,
1426 **pycompat.strkwargs(hookargs))
1428 **pycompat.strkwargs(hookargs))
1427 reporef()._afterlock(hookfunc)
1429 reporef()._afterlock(hookfunc)
1428 tr.addfinalize('txnclose-hook', txnclosehook)
1430 tr.addfinalize('txnclose-hook', txnclosehook)
1429 # Include a leading "-" to make it happen before the transaction summary
1431 # Include a leading "-" to make it happen before the transaction summary
1430 # reports registered via scmutil.registersummarycallback() whose names
1432 # reports registered via scmutil.registersummarycallback() whose names
1431 # are 00-txnreport etc. That way, the caches will be warm when the
1433 # are 00-txnreport etc. That way, the caches will be warm when the
1432 # callbacks run.
1434 # callbacks run.
1433 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1435 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1434 def txnaborthook(tr2):
1436 def txnaborthook(tr2):
1435 """To be run if transaction is aborted
1437 """To be run if transaction is aborted
1436 """
1438 """
1437 reporef().hook('txnabort', throw=False, txnname=desc,
1439 reporef().hook('txnabort', throw=False, txnname=desc,
1438 **pycompat.strkwargs(tr2.hookargs))
1440 **pycompat.strkwargs(tr2.hookargs))
1439 tr.addabort('txnabort-hook', txnaborthook)
1441 tr.addabort('txnabort-hook', txnaborthook)
1440 # avoid eager cache invalidation. in-memory data should be identical
1442 # avoid eager cache invalidation. in-memory data should be identical
1441 # to stored data if transaction has no error.
1443 # to stored data if transaction has no error.
1442 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1444 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1443 self._transref = weakref.ref(tr)
1445 self._transref = weakref.ref(tr)
1444 scmutil.registersummarycallback(self, tr, desc)
1446 scmutil.registersummarycallback(self, tr, desc)
1445 return tr
1447 return tr
1446
1448
1447 def _journalfiles(self):
1449 def _journalfiles(self):
1448 return ((self.svfs, 'journal'),
1450 return ((self.svfs, 'journal'),
1449 (self.vfs, 'journal.dirstate'),
1451 (self.vfs, 'journal.dirstate'),
1450 (self.vfs, 'journal.branch'),
1452 (self.vfs, 'journal.branch'),
1451 (self.vfs, 'journal.desc'),
1453 (self.vfs, 'journal.desc'),
1452 (self.vfs, 'journal.bookmarks'),
1454 (self.vfs, 'journal.bookmarks'),
1453 (self.svfs, 'journal.phaseroots'))
1455 (self.svfs, 'journal.phaseroots'))
1454
1456
1455 def undofiles(self):
1457 def undofiles(self):
1456 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1458 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1457
1459
1458 @unfilteredmethod
1460 @unfilteredmethod
1459 def _writejournal(self, desc):
1461 def _writejournal(self, desc):
1460 self.dirstate.savebackup(None, 'journal.dirstate')
1462 self.dirstate.savebackup(None, 'journal.dirstate')
1461 self.vfs.write("journal.branch",
1463 self.vfs.write("journal.branch",
1462 encoding.fromlocal(self.dirstate.branch()))
1464 encoding.fromlocal(self.dirstate.branch()))
1463 self.vfs.write("journal.desc",
1465 self.vfs.write("journal.desc",
1464 "%d\n%s\n" % (len(self), desc))
1466 "%d\n%s\n" % (len(self), desc))
1465 self.vfs.write("journal.bookmarks",
1467 self.vfs.write("journal.bookmarks",
1466 self.vfs.tryread("bookmarks"))
1468 self.vfs.tryread("bookmarks"))
1467 self.svfs.write("journal.phaseroots",
1469 self.svfs.write("journal.phaseroots",
1468 self.svfs.tryread("phaseroots"))
1470 self.svfs.tryread("phaseroots"))
1469
1471
1470 def recover(self):
1472 def recover(self):
1471 with self.lock():
1473 with self.lock():
1472 if self.svfs.exists("journal"):
1474 if self.svfs.exists("journal"):
1473 self.ui.status(_("rolling back interrupted transaction\n"))
1475 self.ui.status(_("rolling back interrupted transaction\n"))
1474 vfsmap = {'': self.svfs,
1476 vfsmap = {'': self.svfs,
1475 'plain': self.vfs,}
1477 'plain': self.vfs,}
1476 transaction.rollback(self.svfs, vfsmap, "journal",
1478 transaction.rollback(self.svfs, vfsmap, "journal",
1477 self.ui.warn,
1479 self.ui.warn,
1478 checkambigfiles=_cachedfiles)
1480 checkambigfiles=_cachedfiles)
1479 self.invalidate()
1481 self.invalidate()
1480 return True
1482 return True
1481 else:
1483 else:
1482 self.ui.warn(_("no interrupted transaction available\n"))
1484 self.ui.warn(_("no interrupted transaction available\n"))
1483 return False
1485 return False
1484
1486
1485 def rollback(self, dryrun=False, force=False):
1487 def rollback(self, dryrun=False, force=False):
1486 wlock = lock = dsguard = None
1488 wlock = lock = dsguard = None
1487 try:
1489 try:
1488 wlock = self.wlock()
1490 wlock = self.wlock()
1489 lock = self.lock()
1491 lock = self.lock()
1490 if self.svfs.exists("undo"):
1492 if self.svfs.exists("undo"):
1491 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1493 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1492
1494
1493 return self._rollback(dryrun, force, dsguard)
1495 return self._rollback(dryrun, force, dsguard)
1494 else:
1496 else:
1495 self.ui.warn(_("no rollback information available\n"))
1497 self.ui.warn(_("no rollback information available\n"))
1496 return 1
1498 return 1
1497 finally:
1499 finally:
1498 release(dsguard, lock, wlock)
1500 release(dsguard, lock, wlock)
1499
1501
1500 @unfilteredmethod # Until we get smarter cache management
1502 @unfilteredmethod # Until we get smarter cache management
1501 def _rollback(self, dryrun, force, dsguard):
1503 def _rollback(self, dryrun, force, dsguard):
1502 ui = self.ui
1504 ui = self.ui
1503 try:
1505 try:
1504 args = self.vfs.read('undo.desc').splitlines()
1506 args = self.vfs.read('undo.desc').splitlines()
1505 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1507 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1506 if len(args) >= 3:
1508 if len(args) >= 3:
1507 detail = args[2]
1509 detail = args[2]
1508 oldtip = oldlen - 1
1510 oldtip = oldlen - 1
1509
1511
1510 if detail and ui.verbose:
1512 if detail and ui.verbose:
1511 msg = (_('repository tip rolled back to revision %d'
1513 msg = (_('repository tip rolled back to revision %d'
1512 ' (undo %s: %s)\n')
1514 ' (undo %s: %s)\n')
1513 % (oldtip, desc, detail))
1515 % (oldtip, desc, detail))
1514 else:
1516 else:
1515 msg = (_('repository tip rolled back to revision %d'
1517 msg = (_('repository tip rolled back to revision %d'
1516 ' (undo %s)\n')
1518 ' (undo %s)\n')
1517 % (oldtip, desc))
1519 % (oldtip, desc))
1518 except IOError:
1520 except IOError:
1519 msg = _('rolling back unknown transaction\n')
1521 msg = _('rolling back unknown transaction\n')
1520 desc = None
1522 desc = None
1521
1523
1522 if not force and self['.'] != self['tip'] and desc == 'commit':
1524 if not force and self['.'] != self['tip'] and desc == 'commit':
1523 raise error.Abort(
1525 raise error.Abort(
1524 _('rollback of last commit while not checked out '
1526 _('rollback of last commit while not checked out '
1525 'may lose data'), hint=_('use -f to force'))
1527 'may lose data'), hint=_('use -f to force'))
1526
1528
1527 ui.status(msg)
1529 ui.status(msg)
1528 if dryrun:
1530 if dryrun:
1529 return 0
1531 return 0
1530
1532
1531 parents = self.dirstate.parents()
1533 parents = self.dirstate.parents()
1532 self.destroying()
1534 self.destroying()
1533 vfsmap = {'plain': self.vfs, '': self.svfs}
1535 vfsmap = {'plain': self.vfs, '': self.svfs}
1534 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1536 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1535 checkambigfiles=_cachedfiles)
1537 checkambigfiles=_cachedfiles)
1536 if self.vfs.exists('undo.bookmarks'):
1538 if self.vfs.exists('undo.bookmarks'):
1537 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1539 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1538 if self.svfs.exists('undo.phaseroots'):
1540 if self.svfs.exists('undo.phaseroots'):
1539 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1541 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1540 self.invalidate()
1542 self.invalidate()
1541
1543
1542 parentgone = (parents[0] not in self.changelog.nodemap or
1544 parentgone = (parents[0] not in self.changelog.nodemap or
1543 parents[1] not in self.changelog.nodemap)
1545 parents[1] not in self.changelog.nodemap)
1544 if parentgone:
1546 if parentgone:
1545 # prevent dirstateguard from overwriting already restored one
1547 # prevent dirstateguard from overwriting already restored one
1546 dsguard.close()
1548 dsguard.close()
1547
1549
1548 self.dirstate.restorebackup(None, 'undo.dirstate')
1550 self.dirstate.restorebackup(None, 'undo.dirstate')
1549 try:
1551 try:
1550 branch = self.vfs.read('undo.branch')
1552 branch = self.vfs.read('undo.branch')
1551 self.dirstate.setbranch(encoding.tolocal(branch))
1553 self.dirstate.setbranch(encoding.tolocal(branch))
1552 except IOError:
1554 except IOError:
1553 ui.warn(_('named branch could not be reset: '
1555 ui.warn(_('named branch could not be reset: '
1554 'current branch is still \'%s\'\n')
1556 'current branch is still \'%s\'\n')
1555 % self.dirstate.branch())
1557 % self.dirstate.branch())
1556
1558
1557 parents = tuple([p.rev() for p in self[None].parents()])
1559 parents = tuple([p.rev() for p in self[None].parents()])
1558 if len(parents) > 1:
1560 if len(parents) > 1:
1559 ui.status(_('working directory now based on '
1561 ui.status(_('working directory now based on '
1560 'revisions %d and %d\n') % parents)
1562 'revisions %d and %d\n') % parents)
1561 else:
1563 else:
1562 ui.status(_('working directory now based on '
1564 ui.status(_('working directory now based on '
1563 'revision %d\n') % parents)
1565 'revision %d\n') % parents)
1564 mergemod.mergestate.clean(self, self['.'].node())
1566 mergemod.mergestate.clean(self, self['.'].node())
1565
1567
1566 # TODO: if we know which new heads may result from this rollback, pass
1568 # TODO: if we know which new heads may result from this rollback, pass
1567 # them to destroy(), which will prevent the branchhead cache from being
1569 # them to destroy(), which will prevent the branchhead cache from being
1568 # invalidated.
1570 # invalidated.
1569 self.destroyed()
1571 self.destroyed()
1570 return 0
1572 return 0
1571
1573
1572 def _buildcacheupdater(self, newtransaction):
1574 def _buildcacheupdater(self, newtransaction):
1573 """called during transaction to build the callback updating cache
1575 """called during transaction to build the callback updating cache
1574
1576
1575 Lives on the repository to help extension who might want to augment
1577 Lives on the repository to help extension who might want to augment
1576 this logic. For this purpose, the created transaction is passed to the
1578 this logic. For this purpose, the created transaction is passed to the
1577 method.
1579 method.
1578 """
1580 """
1579 # we must avoid cyclic reference between repo and transaction.
1581 # we must avoid cyclic reference between repo and transaction.
1580 reporef = weakref.ref(self)
1582 reporef = weakref.ref(self)
1581 def updater(tr):
1583 def updater(tr):
1582 repo = reporef()
1584 repo = reporef()
1583 repo.updatecaches(tr)
1585 repo.updatecaches(tr)
1584 return updater
1586 return updater
1585
1587
1586 @unfilteredmethod
1588 @unfilteredmethod
1587 def updatecaches(self, tr=None, full=False):
1589 def updatecaches(self, tr=None, full=False):
1588 """warm appropriate caches
1590 """warm appropriate caches
1589
1591
1590 If this function is called after a transaction closed. The transaction
1592 If this function is called after a transaction closed. The transaction
1591 will be available in the 'tr' argument. This can be used to selectively
1593 will be available in the 'tr' argument. This can be used to selectively
1592 update caches relevant to the changes in that transaction.
1594 update caches relevant to the changes in that transaction.
1593
1595
1594 If 'full' is set, make sure all caches the function knows about have
1596 If 'full' is set, make sure all caches the function knows about have
1595 up-to-date data. Even the ones usually loaded more lazily.
1597 up-to-date data. Even the ones usually loaded more lazily.
1596 """
1598 """
1597 if tr is not None and tr.hookargs.get('source') == 'strip':
1599 if tr is not None and tr.hookargs.get('source') == 'strip':
1598 # During strip, many caches are invalid but
1600 # During strip, many caches are invalid but
1599 # later call to `destroyed` will refresh them.
1601 # later call to `destroyed` will refresh them.
1600 return
1602 return
1601
1603
1602 if tr is None or tr.changes['revs']:
1604 if tr is None or tr.changes['revs']:
1603 # updating the unfiltered branchmap should refresh all the others,
1605 # updating the unfiltered branchmap should refresh all the others,
1604 self.ui.debug('updating the branch cache\n')
1606 self.ui.debug('updating the branch cache\n')
1605 branchmap.updatecache(self.filtered('served'))
1607 branchmap.updatecache(self.filtered('served'))
1606
1608
1607 if full:
1609 if full:
1608 rbc = self.revbranchcache()
1610 rbc = self.revbranchcache()
1609 for r in self.changelog:
1611 for r in self.changelog:
1610 rbc.branchinfo(r)
1612 rbc.branchinfo(r)
1611 rbc.write()
1613 rbc.write()
1612
1614
1613 def invalidatecaches(self):
1615 def invalidatecaches(self):
1614
1616
1615 if '_tagscache' in vars(self):
1617 if '_tagscache' in vars(self):
1616 # can't use delattr on proxy
1618 # can't use delattr on proxy
1617 del self.__dict__['_tagscache']
1619 del self.__dict__['_tagscache']
1618
1620
1619 self.unfiltered()._branchcaches.clear()
1621 self.unfiltered()._branchcaches.clear()
1620 self.invalidatevolatilesets()
1622 self.invalidatevolatilesets()
1621 self._sparsesignaturecache.clear()
1623 self._sparsesignaturecache.clear()
1622
1624
1623 def invalidatevolatilesets(self):
1625 def invalidatevolatilesets(self):
1624 self.filteredrevcache.clear()
1626 self.filteredrevcache.clear()
1625 obsolete.clearobscaches(self)
1627 obsolete.clearobscaches(self)
1626
1628
1627 def invalidatedirstate(self):
1629 def invalidatedirstate(self):
1628 '''Invalidates the dirstate, causing the next call to dirstate
1630 '''Invalidates the dirstate, causing the next call to dirstate
1629 to check if it was modified since the last time it was read,
1631 to check if it was modified since the last time it was read,
1630 rereading it if it has.
1632 rereading it if it has.
1631
1633
1632 This is different to dirstate.invalidate() that it doesn't always
1634 This is different to dirstate.invalidate() that it doesn't always
1633 rereads the dirstate. Use dirstate.invalidate() if you want to
1635 rereads the dirstate. Use dirstate.invalidate() if you want to
1634 explicitly read the dirstate again (i.e. restoring it to a previous
1636 explicitly read the dirstate again (i.e. restoring it to a previous
1635 known good state).'''
1637 known good state).'''
1636 if hasunfilteredcache(self, 'dirstate'):
1638 if hasunfilteredcache(self, 'dirstate'):
1637 for k in self.dirstate._filecache:
1639 for k in self.dirstate._filecache:
1638 try:
1640 try:
1639 delattr(self.dirstate, k)
1641 delattr(self.dirstate, k)
1640 except AttributeError:
1642 except AttributeError:
1641 pass
1643 pass
1642 delattr(self.unfiltered(), 'dirstate')
1644 delattr(self.unfiltered(), 'dirstate')
1643
1645
1644 def invalidate(self, clearfilecache=False):
1646 def invalidate(self, clearfilecache=False):
1645 '''Invalidates both store and non-store parts other than dirstate
1647 '''Invalidates both store and non-store parts other than dirstate
1646
1648
1647 If a transaction is running, invalidation of store is omitted,
1649 If a transaction is running, invalidation of store is omitted,
1648 because discarding in-memory changes might cause inconsistency
1650 because discarding in-memory changes might cause inconsistency
1649 (e.g. incomplete fncache causes unintentional failure, but
1651 (e.g. incomplete fncache causes unintentional failure, but
1650 redundant one doesn't).
1652 redundant one doesn't).
1651 '''
1653 '''
1652 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1654 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1653 for k in list(self._filecache.keys()):
1655 for k in list(self._filecache.keys()):
1654 # dirstate is invalidated separately in invalidatedirstate()
1656 # dirstate is invalidated separately in invalidatedirstate()
1655 if k == 'dirstate':
1657 if k == 'dirstate':
1656 continue
1658 continue
1657 if (k == 'changelog' and
1659 if (k == 'changelog' and
1658 self.currenttransaction() and
1660 self.currenttransaction() and
1659 self.changelog._delayed):
1661 self.changelog._delayed):
1660 # The changelog object may store unwritten revisions. We don't
1662 # The changelog object may store unwritten revisions. We don't
1661 # want to lose them.
1663 # want to lose them.
1662 # TODO: Solve the problem instead of working around it.
1664 # TODO: Solve the problem instead of working around it.
1663 continue
1665 continue
1664
1666
1665 if clearfilecache:
1667 if clearfilecache:
1666 del self._filecache[k]
1668 del self._filecache[k]
1667 try:
1669 try:
1668 delattr(unfiltered, k)
1670 delattr(unfiltered, k)
1669 except AttributeError:
1671 except AttributeError:
1670 pass
1672 pass
1671 self.invalidatecaches()
1673 self.invalidatecaches()
1672 if not self.currenttransaction():
1674 if not self.currenttransaction():
1673 # TODO: Changing contents of store outside transaction
1675 # TODO: Changing contents of store outside transaction
1674 # causes inconsistency. We should make in-memory store
1676 # causes inconsistency. We should make in-memory store
1675 # changes detectable, and abort if changed.
1677 # changes detectable, and abort if changed.
1676 self.store.invalidatecaches()
1678 self.store.invalidatecaches()
1677
1679
1678 def invalidateall(self):
1680 def invalidateall(self):
1679 '''Fully invalidates both store and non-store parts, causing the
1681 '''Fully invalidates both store and non-store parts, causing the
1680 subsequent operation to reread any outside changes.'''
1682 subsequent operation to reread any outside changes.'''
1681 # extension should hook this to invalidate its caches
1683 # extension should hook this to invalidate its caches
1682 self.invalidate()
1684 self.invalidate()
1683 self.invalidatedirstate()
1685 self.invalidatedirstate()
1684
1686
1685 @unfilteredmethod
1687 @unfilteredmethod
1686 def _refreshfilecachestats(self, tr):
1688 def _refreshfilecachestats(self, tr):
1687 """Reload stats of cached files so that they are flagged as valid"""
1689 """Reload stats of cached files so that they are flagged as valid"""
1688 for k, ce in self._filecache.items():
1690 for k, ce in self._filecache.items():
1689 k = pycompat.sysstr(k)
1691 k = pycompat.sysstr(k)
1690 if k == r'dirstate' or k not in self.__dict__:
1692 if k == r'dirstate' or k not in self.__dict__:
1691 continue
1693 continue
1692 ce.refresh()
1694 ce.refresh()
1693
1695
1694 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1696 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1695 inheritchecker=None, parentenvvar=None):
1697 inheritchecker=None, parentenvvar=None):
1696 parentlock = None
1698 parentlock = None
1697 # the contents of parentenvvar are used by the underlying lock to
1699 # the contents of parentenvvar are used by the underlying lock to
1698 # determine whether it can be inherited
1700 # determine whether it can be inherited
1699 if parentenvvar is not None:
1701 if parentenvvar is not None:
1700 parentlock = encoding.environ.get(parentenvvar)
1702 parentlock = encoding.environ.get(parentenvvar)
1701
1703
1702 timeout = 0
1704 timeout = 0
1703 warntimeout = 0
1705 warntimeout = 0
1704 if wait:
1706 if wait:
1705 timeout = self.ui.configint("ui", "timeout")
1707 timeout = self.ui.configint("ui", "timeout")
1706 warntimeout = self.ui.configint("ui", "timeout.warn")
1708 warntimeout = self.ui.configint("ui", "timeout.warn")
1707 # internal config: ui.signal-safe-lock
1709 # internal config: ui.signal-safe-lock
1708 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1710 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1709
1711
1710 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1712 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1711 releasefn=releasefn,
1713 releasefn=releasefn,
1712 acquirefn=acquirefn, desc=desc,
1714 acquirefn=acquirefn, desc=desc,
1713 inheritchecker=inheritchecker,
1715 inheritchecker=inheritchecker,
1714 parentlock=parentlock,
1716 parentlock=parentlock,
1715 signalsafe=signalsafe)
1717 signalsafe=signalsafe)
1716 return l
1718 return l
1717
1719
1718 def _afterlock(self, callback):
1720 def _afterlock(self, callback):
1719 """add a callback to be run when the repository is fully unlocked
1721 """add a callback to be run when the repository is fully unlocked
1720
1722
1721 The callback will be executed when the outermost lock is released
1723 The callback will be executed when the outermost lock is released
1722 (with wlock being higher level than 'lock')."""
1724 (with wlock being higher level than 'lock')."""
1723 for ref in (self._wlockref, self._lockref):
1725 for ref in (self._wlockref, self._lockref):
1724 l = ref and ref()
1726 l = ref and ref()
1725 if l and l.held:
1727 if l and l.held:
1726 l.postrelease.append(callback)
1728 l.postrelease.append(callback)
1727 break
1729 break
1728 else: # no lock have been found.
1730 else: # no lock have been found.
1729 callback()
1731 callback()
1730
1732
1731 def lock(self, wait=True):
1733 def lock(self, wait=True):
1732 '''Lock the repository store (.hg/store) and return a weak reference
1734 '''Lock the repository store (.hg/store) and return a weak reference
1733 to the lock. Use this before modifying the store (e.g. committing or
1735 to the lock. Use this before modifying the store (e.g. committing or
1734 stripping). If you are opening a transaction, get a lock as well.)
1736 stripping). If you are opening a transaction, get a lock as well.)
1735
1737
1736 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1738 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1737 'wlock' first to avoid a dead-lock hazard.'''
1739 'wlock' first to avoid a dead-lock hazard.'''
1738 l = self._currentlock(self._lockref)
1740 l = self._currentlock(self._lockref)
1739 if l is not None:
1741 if l is not None:
1740 l.lock()
1742 l.lock()
1741 return l
1743 return l
1742
1744
1743 l = self._lock(self.svfs, "lock", wait, None,
1745 l = self._lock(self.svfs, "lock", wait, None,
1744 self.invalidate, _('repository %s') % self.origroot)
1746 self.invalidate, _('repository %s') % self.origroot)
1745 self._lockref = weakref.ref(l)
1747 self._lockref = weakref.ref(l)
1746 return l
1748 return l
1747
1749
1748 def _wlockchecktransaction(self):
1750 def _wlockchecktransaction(self):
1749 if self.currenttransaction() is not None:
1751 if self.currenttransaction() is not None:
1750 raise error.LockInheritanceContractViolation(
1752 raise error.LockInheritanceContractViolation(
1751 'wlock cannot be inherited in the middle of a transaction')
1753 'wlock cannot be inherited in the middle of a transaction')
1752
1754
1753 def wlock(self, wait=True):
1755 def wlock(self, wait=True):
1754 '''Lock the non-store parts of the repository (everything under
1756 '''Lock the non-store parts of the repository (everything under
1755 .hg except .hg/store) and return a weak reference to the lock.
1757 .hg except .hg/store) and return a weak reference to the lock.
1756
1758
1757 Use this before modifying files in .hg.
1759 Use this before modifying files in .hg.
1758
1760
1759 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1761 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1760 'wlock' first to avoid a dead-lock hazard.'''
1762 'wlock' first to avoid a dead-lock hazard.'''
1761 l = self._wlockref and self._wlockref()
1763 l = self._wlockref and self._wlockref()
1762 if l is not None and l.held:
1764 if l is not None and l.held:
1763 l.lock()
1765 l.lock()
1764 return l
1766 return l
1765
1767
1766 # We do not need to check for non-waiting lock acquisition. Such
1768 # We do not need to check for non-waiting lock acquisition. Such
1767 # acquisition would not cause dead-lock as they would just fail.
1769 # acquisition would not cause dead-lock as they would just fail.
1768 if wait and (self.ui.configbool('devel', 'all-warnings')
1770 if wait and (self.ui.configbool('devel', 'all-warnings')
1769 or self.ui.configbool('devel', 'check-locks')):
1771 or self.ui.configbool('devel', 'check-locks')):
1770 if self._currentlock(self._lockref) is not None:
1772 if self._currentlock(self._lockref) is not None:
1771 self.ui.develwarn('"wlock" acquired after "lock"')
1773 self.ui.develwarn('"wlock" acquired after "lock"')
1772
1774
1773 def unlock():
1775 def unlock():
1774 if self.dirstate.pendingparentchange():
1776 if self.dirstate.pendingparentchange():
1775 self.dirstate.invalidate()
1777 self.dirstate.invalidate()
1776 else:
1778 else:
1777 self.dirstate.write(None)
1779 self.dirstate.write(None)
1778
1780
1779 self._filecache['dirstate'].refresh()
1781 self._filecache['dirstate'].refresh()
1780
1782
1781 l = self._lock(self.vfs, "wlock", wait, unlock,
1783 l = self._lock(self.vfs, "wlock", wait, unlock,
1782 self.invalidatedirstate, _('working directory of %s') %
1784 self.invalidatedirstate, _('working directory of %s') %
1783 self.origroot,
1785 self.origroot,
1784 inheritchecker=self._wlockchecktransaction,
1786 inheritchecker=self._wlockchecktransaction,
1785 parentenvvar='HG_WLOCK_LOCKER')
1787 parentenvvar='HG_WLOCK_LOCKER')
1786 self._wlockref = weakref.ref(l)
1788 self._wlockref = weakref.ref(l)
1787 return l
1789 return l
1788
1790
1789 def _currentlock(self, lockref):
1791 def _currentlock(self, lockref):
1790 """Returns the lock if it's held, or None if it's not."""
1792 """Returns the lock if it's held, or None if it's not."""
1791 if lockref is None:
1793 if lockref is None:
1792 return None
1794 return None
1793 l = lockref()
1795 l = lockref()
1794 if l is None or not l.held:
1796 if l is None or not l.held:
1795 return None
1797 return None
1796 return l
1798 return l
1797
1799
1798 def currentwlock(self):
1800 def currentwlock(self):
1799 """Returns the wlock if it's held, or None if it's not."""
1801 """Returns the wlock if it's held, or None if it's not."""
1800 return self._currentlock(self._wlockref)
1802 return self._currentlock(self._wlockref)
1801
1803
1802 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1804 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1803 """
1805 """
1804 commit an individual file as part of a larger transaction
1806 commit an individual file as part of a larger transaction
1805 """
1807 """
1806
1808
1807 fname = fctx.path()
1809 fname = fctx.path()
1808 fparent1 = manifest1.get(fname, nullid)
1810 fparent1 = manifest1.get(fname, nullid)
1809 fparent2 = manifest2.get(fname, nullid)
1811 fparent2 = manifest2.get(fname, nullid)
1810 if isinstance(fctx, context.filectx):
1812 if isinstance(fctx, context.filectx):
1811 node = fctx.filenode()
1813 node = fctx.filenode()
1812 if node in [fparent1, fparent2]:
1814 if node in [fparent1, fparent2]:
1813 self.ui.debug('reusing %s filelog entry\n' % fname)
1815 self.ui.debug('reusing %s filelog entry\n' % fname)
1814 if manifest1.flags(fname) != fctx.flags():
1816 if manifest1.flags(fname) != fctx.flags():
1815 changelist.append(fname)
1817 changelist.append(fname)
1816 return node
1818 return node
1817
1819
1818 flog = self.file(fname)
1820 flog = self.file(fname)
1819 meta = {}
1821 meta = {}
1820 copy = fctx.renamed()
1822 copy = fctx.renamed()
1821 if copy and copy[0] != fname:
1823 if copy and copy[0] != fname:
1822 # Mark the new revision of this file as a copy of another
1824 # Mark the new revision of this file as a copy of another
1823 # file. This copy data will effectively act as a parent
1825 # file. This copy data will effectively act as a parent
1824 # of this new revision. If this is a merge, the first
1826 # of this new revision. If this is a merge, the first
1825 # parent will be the nullid (meaning "look up the copy data")
1827 # parent will be the nullid (meaning "look up the copy data")
1826 # and the second one will be the other parent. For example:
1828 # and the second one will be the other parent. For example:
1827 #
1829 #
1828 # 0 --- 1 --- 3 rev1 changes file foo
1830 # 0 --- 1 --- 3 rev1 changes file foo
1829 # \ / rev2 renames foo to bar and changes it
1831 # \ / rev2 renames foo to bar and changes it
1830 # \- 2 -/ rev3 should have bar with all changes and
1832 # \- 2 -/ rev3 should have bar with all changes and
1831 # should record that bar descends from
1833 # should record that bar descends from
1832 # bar in rev2 and foo in rev1
1834 # bar in rev2 and foo in rev1
1833 #
1835 #
1834 # this allows this merge to succeed:
1836 # this allows this merge to succeed:
1835 #
1837 #
1836 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1838 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1837 # \ / merging rev3 and rev4 should use bar@rev2
1839 # \ / merging rev3 and rev4 should use bar@rev2
1838 # \- 2 --- 4 as the merge base
1840 # \- 2 --- 4 as the merge base
1839 #
1841 #
1840
1842
1841 cfname = copy[0]
1843 cfname = copy[0]
1842 crev = manifest1.get(cfname)
1844 crev = manifest1.get(cfname)
1843 newfparent = fparent2
1845 newfparent = fparent2
1844
1846
1845 if manifest2: # branch merge
1847 if manifest2: # branch merge
1846 if fparent2 == nullid or crev is None: # copied on remote side
1848 if fparent2 == nullid or crev is None: # copied on remote side
1847 if cfname in manifest2:
1849 if cfname in manifest2:
1848 crev = manifest2[cfname]
1850 crev = manifest2[cfname]
1849 newfparent = fparent1
1851 newfparent = fparent1
1850
1852
1851 # Here, we used to search backwards through history to try to find
1853 # Here, we used to search backwards through history to try to find
1852 # where the file copy came from if the source of a copy was not in
1854 # where the file copy came from if the source of a copy was not in
1853 # the parent directory. However, this doesn't actually make sense to
1855 # the parent directory. However, this doesn't actually make sense to
1854 # do (what does a copy from something not in your working copy even
1856 # do (what does a copy from something not in your working copy even
1855 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1857 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1856 # the user that copy information was dropped, so if they didn't
1858 # the user that copy information was dropped, so if they didn't
1857 # expect this outcome it can be fixed, but this is the correct
1859 # expect this outcome it can be fixed, but this is the correct
1858 # behavior in this circumstance.
1860 # behavior in this circumstance.
1859
1861
1860 if crev:
1862 if crev:
1861 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1863 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1862 meta["copy"] = cfname
1864 meta["copy"] = cfname
1863 meta["copyrev"] = hex(crev)
1865 meta["copyrev"] = hex(crev)
1864 fparent1, fparent2 = nullid, newfparent
1866 fparent1, fparent2 = nullid, newfparent
1865 else:
1867 else:
1866 self.ui.warn(_("warning: can't find ancestor for '%s' "
1868 self.ui.warn(_("warning: can't find ancestor for '%s' "
1867 "copied from '%s'!\n") % (fname, cfname))
1869 "copied from '%s'!\n") % (fname, cfname))
1868
1870
1869 elif fparent1 == nullid:
1871 elif fparent1 == nullid:
1870 fparent1, fparent2 = fparent2, nullid
1872 fparent1, fparent2 = fparent2, nullid
1871 elif fparent2 != nullid:
1873 elif fparent2 != nullid:
1872 # is one parent an ancestor of the other?
1874 # is one parent an ancestor of the other?
1873 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1875 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1874 if fparent1 in fparentancestors:
1876 if fparent1 in fparentancestors:
1875 fparent1, fparent2 = fparent2, nullid
1877 fparent1, fparent2 = fparent2, nullid
1876 elif fparent2 in fparentancestors:
1878 elif fparent2 in fparentancestors:
1877 fparent2 = nullid
1879 fparent2 = nullid
1878
1880
1879 # is the file changed?
1881 # is the file changed?
1880 text = fctx.data()
1882 text = fctx.data()
1881 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1883 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1882 changelist.append(fname)
1884 changelist.append(fname)
1883 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1885 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1884 # are just the flags changed during merge?
1886 # are just the flags changed during merge?
1885 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1887 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1886 changelist.append(fname)
1888 changelist.append(fname)
1887
1889
1888 return fparent1
1890 return fparent1
1889
1891
1890 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1892 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1891 """check for commit arguments that aren't committable"""
1893 """check for commit arguments that aren't committable"""
1892 if match.isexact() or match.prefix():
1894 if match.isexact() or match.prefix():
1893 matched = set(status.modified + status.added + status.removed)
1895 matched = set(status.modified + status.added + status.removed)
1894
1896
1895 for f in match.files():
1897 for f in match.files():
1896 f = self.dirstate.normalize(f)
1898 f = self.dirstate.normalize(f)
1897 if f == '.' or f in matched or f in wctx.substate:
1899 if f == '.' or f in matched or f in wctx.substate:
1898 continue
1900 continue
1899 if f in status.deleted:
1901 if f in status.deleted:
1900 fail(f, _('file not found!'))
1902 fail(f, _('file not found!'))
1901 if f in vdirs: # visited directory
1903 if f in vdirs: # visited directory
1902 d = f + '/'
1904 d = f + '/'
1903 for mf in matched:
1905 for mf in matched:
1904 if mf.startswith(d):
1906 if mf.startswith(d):
1905 break
1907 break
1906 else:
1908 else:
1907 fail(f, _("no match under directory!"))
1909 fail(f, _("no match under directory!"))
1908 elif f not in self.dirstate:
1910 elif f not in self.dirstate:
1909 fail(f, _("file not tracked!"))
1911 fail(f, _("file not tracked!"))
1910
1912
1911 @unfilteredmethod
1913 @unfilteredmethod
1912 def commit(self, text="", user=None, date=None, match=None, force=False,
1914 def commit(self, text="", user=None, date=None, match=None, force=False,
1913 editor=False, extra=None):
1915 editor=False, extra=None):
1914 """Add a new revision to current repository.
1916 """Add a new revision to current repository.
1915
1917
1916 Revision information is gathered from the working directory,
1918 Revision information is gathered from the working directory,
1917 match can be used to filter the committed files. If editor is
1919 match can be used to filter the committed files. If editor is
1918 supplied, it is called to get a commit message.
1920 supplied, it is called to get a commit message.
1919 """
1921 """
1920 if extra is None:
1922 if extra is None:
1921 extra = {}
1923 extra = {}
1922
1924
1923 def fail(f, msg):
1925 def fail(f, msg):
1924 raise error.Abort('%s: %s' % (f, msg))
1926 raise error.Abort('%s: %s' % (f, msg))
1925
1927
1926 if not match:
1928 if not match:
1927 match = matchmod.always(self.root, '')
1929 match = matchmod.always(self.root, '')
1928
1930
1929 if not force:
1931 if not force:
1930 vdirs = []
1932 vdirs = []
1931 match.explicitdir = vdirs.append
1933 match.explicitdir = vdirs.append
1932 match.bad = fail
1934 match.bad = fail
1933
1935
1934 wlock = lock = tr = None
1936 wlock = lock = tr = None
1935 try:
1937 try:
1936 wlock = self.wlock()
1938 wlock = self.wlock()
1937 lock = self.lock() # for recent changelog (see issue4368)
1939 lock = self.lock() # for recent changelog (see issue4368)
1938
1940
1939 wctx = self[None]
1941 wctx = self[None]
1940 merge = len(wctx.parents()) > 1
1942 merge = len(wctx.parents()) > 1
1941
1943
1942 if not force and merge and not match.always():
1944 if not force and merge and not match.always():
1943 raise error.Abort(_('cannot partially commit a merge '
1945 raise error.Abort(_('cannot partially commit a merge '
1944 '(do not specify files or patterns)'))
1946 '(do not specify files or patterns)'))
1945
1947
1946 status = self.status(match=match, clean=force)
1948 status = self.status(match=match, clean=force)
1947 if force:
1949 if force:
1948 status.modified.extend(status.clean) # mq may commit clean files
1950 status.modified.extend(status.clean) # mq may commit clean files
1949
1951
1950 # check subrepos
1952 # check subrepos
1951 subs, commitsubs, newstate = subrepoutil.precommit(
1953 subs, commitsubs, newstate = subrepoutil.precommit(
1952 self.ui, wctx, status, match, force=force)
1954 self.ui, wctx, status, match, force=force)
1953
1955
1954 # make sure all explicit patterns are matched
1956 # make sure all explicit patterns are matched
1955 if not force:
1957 if not force:
1956 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1958 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1957
1959
1958 cctx = context.workingcommitctx(self, status,
1960 cctx = context.workingcommitctx(self, status,
1959 text, user, date, extra)
1961 text, user, date, extra)
1960
1962
1961 # internal config: ui.allowemptycommit
1963 # internal config: ui.allowemptycommit
1962 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1964 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1963 or extra.get('close') or merge or cctx.files()
1965 or extra.get('close') or merge or cctx.files()
1964 or self.ui.configbool('ui', 'allowemptycommit'))
1966 or self.ui.configbool('ui', 'allowemptycommit'))
1965 if not allowemptycommit:
1967 if not allowemptycommit:
1966 return None
1968 return None
1967
1969
1968 if merge and cctx.deleted():
1970 if merge and cctx.deleted():
1969 raise error.Abort(_("cannot commit merge with missing files"))
1971 raise error.Abort(_("cannot commit merge with missing files"))
1970
1972
1971 ms = mergemod.mergestate.read(self)
1973 ms = mergemod.mergestate.read(self)
1972 mergeutil.checkunresolved(ms)
1974 mergeutil.checkunresolved(ms)
1973
1975
1974 if editor:
1976 if editor:
1975 cctx._text = editor(self, cctx, subs)
1977 cctx._text = editor(self, cctx, subs)
1976 edited = (text != cctx._text)
1978 edited = (text != cctx._text)
1977
1979
1978 # Save commit message in case this transaction gets rolled back
1980 # Save commit message in case this transaction gets rolled back
1979 # (e.g. by a pretxncommit hook). Leave the content alone on
1981 # (e.g. by a pretxncommit hook). Leave the content alone on
1980 # the assumption that the user will use the same editor again.
1982 # the assumption that the user will use the same editor again.
1981 msgfn = self.savecommitmessage(cctx._text)
1983 msgfn = self.savecommitmessage(cctx._text)
1982
1984
1983 # commit subs and write new state
1985 # commit subs and write new state
1984 if subs:
1986 if subs:
1985 for s in sorted(commitsubs):
1987 for s in sorted(commitsubs):
1986 sub = wctx.sub(s)
1988 sub = wctx.sub(s)
1987 self.ui.status(_('committing subrepository %s\n') %
1989 self.ui.status(_('committing subrepository %s\n') %
1988 subrepoutil.subrelpath(sub))
1990 subrepoutil.subrelpath(sub))
1989 sr = sub.commit(cctx._text, user, date)
1991 sr = sub.commit(cctx._text, user, date)
1990 newstate[s] = (newstate[s][0], sr)
1992 newstate[s] = (newstate[s][0], sr)
1991 subrepoutil.writestate(self, newstate)
1993 subrepoutil.writestate(self, newstate)
1992
1994
1993 p1, p2 = self.dirstate.parents()
1995 p1, p2 = self.dirstate.parents()
1994 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1996 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1995 try:
1997 try:
1996 self.hook("precommit", throw=True, parent1=hookp1,
1998 self.hook("precommit", throw=True, parent1=hookp1,
1997 parent2=hookp2)
1999 parent2=hookp2)
1998 tr = self.transaction('commit')
2000 tr = self.transaction('commit')
1999 ret = self.commitctx(cctx, True)
2001 ret = self.commitctx(cctx, True)
2000 except: # re-raises
2002 except: # re-raises
2001 if edited:
2003 if edited:
2002 self.ui.write(
2004 self.ui.write(
2003 _('note: commit message saved in %s\n') % msgfn)
2005 _('note: commit message saved in %s\n') % msgfn)
2004 raise
2006 raise
2005 # update bookmarks, dirstate and mergestate
2007 # update bookmarks, dirstate and mergestate
2006 bookmarks.update(self, [p1, p2], ret)
2008 bookmarks.update(self, [p1, p2], ret)
2007 cctx.markcommitted(ret)
2009 cctx.markcommitted(ret)
2008 ms.reset()
2010 ms.reset()
2009 tr.close()
2011 tr.close()
2010
2012
2011 finally:
2013 finally:
2012 lockmod.release(tr, lock, wlock)
2014 lockmod.release(tr, lock, wlock)
2013
2015
2014 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2016 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2015 # hack for command that use a temporary commit (eg: histedit)
2017 # hack for command that use a temporary commit (eg: histedit)
2016 # temporary commit got stripped before hook release
2018 # temporary commit got stripped before hook release
2017 if self.changelog.hasnode(ret):
2019 if self.changelog.hasnode(ret):
2018 self.hook("commit", node=node, parent1=parent1,
2020 self.hook("commit", node=node, parent1=parent1,
2019 parent2=parent2)
2021 parent2=parent2)
2020 self._afterlock(commithook)
2022 self._afterlock(commithook)
2021 return ret
2023 return ret
2022
2024
2023 @unfilteredmethod
2025 @unfilteredmethod
2024 def commitctx(self, ctx, error=False):
2026 def commitctx(self, ctx, error=False):
2025 """Add a new revision to current repository.
2027 """Add a new revision to current repository.
2026 Revision information is passed via the context argument.
2028 Revision information is passed via the context argument.
2027 """
2029 """
2028
2030
2029 tr = None
2031 tr = None
2030 p1, p2 = ctx.p1(), ctx.p2()
2032 p1, p2 = ctx.p1(), ctx.p2()
2031 user = ctx.user()
2033 user = ctx.user()
2032
2034
2033 lock = self.lock()
2035 lock = self.lock()
2034 try:
2036 try:
2035 tr = self.transaction("commit")
2037 tr = self.transaction("commit")
2036 trp = weakref.proxy(tr)
2038 trp = weakref.proxy(tr)
2037
2039
2038 if ctx.manifestnode():
2040 if ctx.manifestnode():
2039 # reuse an existing manifest revision
2041 # reuse an existing manifest revision
2040 mn = ctx.manifestnode()
2042 mn = ctx.manifestnode()
2041 files = ctx.files()
2043 files = ctx.files()
2042 elif ctx.files():
2044 elif ctx.files():
2043 m1ctx = p1.manifestctx()
2045 m1ctx = p1.manifestctx()
2044 m2ctx = p2.manifestctx()
2046 m2ctx = p2.manifestctx()
2045 mctx = m1ctx.copy()
2047 mctx = m1ctx.copy()
2046
2048
2047 m = mctx.read()
2049 m = mctx.read()
2048 m1 = m1ctx.read()
2050 m1 = m1ctx.read()
2049 m2 = m2ctx.read()
2051 m2 = m2ctx.read()
2050
2052
2051 # check in files
2053 # check in files
2052 added = []
2054 added = []
2053 changed = []
2055 changed = []
2054 removed = list(ctx.removed())
2056 removed = list(ctx.removed())
2055 linkrev = len(self)
2057 linkrev = len(self)
2056 self.ui.note(_("committing files:\n"))
2058 self.ui.note(_("committing files:\n"))
2057 for f in sorted(ctx.modified() + ctx.added()):
2059 for f in sorted(ctx.modified() + ctx.added()):
2058 self.ui.note(f + "\n")
2060 self.ui.note(f + "\n")
2059 try:
2061 try:
2060 fctx = ctx[f]
2062 fctx = ctx[f]
2061 if fctx is None:
2063 if fctx is None:
2062 removed.append(f)
2064 removed.append(f)
2063 else:
2065 else:
2064 added.append(f)
2066 added.append(f)
2065 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2067 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2066 trp, changed)
2068 trp, changed)
2067 m.setflag(f, fctx.flags())
2069 m.setflag(f, fctx.flags())
2068 except OSError as inst:
2070 except OSError as inst:
2069 self.ui.warn(_("trouble committing %s!\n") % f)
2071 self.ui.warn(_("trouble committing %s!\n") % f)
2070 raise
2072 raise
2071 except IOError as inst:
2073 except IOError as inst:
2072 errcode = getattr(inst, 'errno', errno.ENOENT)
2074 errcode = getattr(inst, 'errno', errno.ENOENT)
2073 if error or errcode and errcode != errno.ENOENT:
2075 if error or errcode and errcode != errno.ENOENT:
2074 self.ui.warn(_("trouble committing %s!\n") % f)
2076 self.ui.warn(_("trouble committing %s!\n") % f)
2075 raise
2077 raise
2076
2078
2077 # update manifest
2079 # update manifest
2078 self.ui.note(_("committing manifest\n"))
2080 self.ui.note(_("committing manifest\n"))
2079 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2081 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2080 drop = [f for f in removed if f in m]
2082 drop = [f for f in removed if f in m]
2081 for f in drop:
2083 for f in drop:
2082 del m[f]
2084 del m[f]
2083 mn = mctx.write(trp, linkrev,
2085 mn = mctx.write(trp, linkrev,
2084 p1.manifestnode(), p2.manifestnode(),
2086 p1.manifestnode(), p2.manifestnode(),
2085 added, drop)
2087 added, drop)
2086 files = changed + removed
2088 files = changed + removed
2087 else:
2089 else:
2088 mn = p1.manifestnode()
2090 mn = p1.manifestnode()
2089 files = []
2091 files = []
2090
2092
2091 # update changelog
2093 # update changelog
2092 self.ui.note(_("committing changelog\n"))
2094 self.ui.note(_("committing changelog\n"))
2093 self.changelog.delayupdate(tr)
2095 self.changelog.delayupdate(tr)
2094 n = self.changelog.add(mn, files, ctx.description(),
2096 n = self.changelog.add(mn, files, ctx.description(),
2095 trp, p1.node(), p2.node(),
2097 trp, p1.node(), p2.node(),
2096 user, ctx.date(), ctx.extra().copy())
2098 user, ctx.date(), ctx.extra().copy())
2097 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2099 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2098 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2100 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2099 parent2=xp2)
2101 parent2=xp2)
2100 # set the new commit is proper phase
2102 # set the new commit is proper phase
2101 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2103 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2102 if targetphase:
2104 if targetphase:
2103 # retract boundary do not alter parent changeset.
2105 # retract boundary do not alter parent changeset.
2104 # if a parent have higher the resulting phase will
2106 # if a parent have higher the resulting phase will
2105 # be compliant anyway
2107 # be compliant anyway
2106 #
2108 #
2107 # if minimal phase was 0 we don't need to retract anything
2109 # if minimal phase was 0 we don't need to retract anything
2108 phases.registernew(self, tr, targetphase, [n])
2110 phases.registernew(self, tr, targetphase, [n])
2109 tr.close()
2111 tr.close()
2110 return n
2112 return n
2111 finally:
2113 finally:
2112 if tr:
2114 if tr:
2113 tr.release()
2115 tr.release()
2114 lock.release()
2116 lock.release()
2115
2117
2116 @unfilteredmethod
2118 @unfilteredmethod
2117 def destroying(self):
2119 def destroying(self):
2118 '''Inform the repository that nodes are about to be destroyed.
2120 '''Inform the repository that nodes are about to be destroyed.
2119 Intended for use by strip and rollback, so there's a common
2121 Intended for use by strip and rollback, so there's a common
2120 place for anything that has to be done before destroying history.
2122 place for anything that has to be done before destroying history.
2121
2123
2122 This is mostly useful for saving state that is in memory and waiting
2124 This is mostly useful for saving state that is in memory and waiting
2123 to be flushed when the current lock is released. Because a call to
2125 to be flushed when the current lock is released. Because a call to
2124 destroyed is imminent, the repo will be invalidated causing those
2126 destroyed is imminent, the repo will be invalidated causing those
2125 changes to stay in memory (waiting for the next unlock), or vanish
2127 changes to stay in memory (waiting for the next unlock), or vanish
2126 completely.
2128 completely.
2127 '''
2129 '''
2128 # When using the same lock to commit and strip, the phasecache is left
2130 # When using the same lock to commit and strip, the phasecache is left
2129 # dirty after committing. Then when we strip, the repo is invalidated,
2131 # dirty after committing. Then when we strip, the repo is invalidated,
2130 # causing those changes to disappear.
2132 # causing those changes to disappear.
2131 if '_phasecache' in vars(self):
2133 if '_phasecache' in vars(self):
2132 self._phasecache.write()
2134 self._phasecache.write()
2133
2135
2134 @unfilteredmethod
2136 @unfilteredmethod
2135 def destroyed(self):
2137 def destroyed(self):
2136 '''Inform the repository that nodes have been destroyed.
2138 '''Inform the repository that nodes have been destroyed.
2137 Intended for use by strip and rollback, so there's a common
2139 Intended for use by strip and rollback, so there's a common
2138 place for anything that has to be done after destroying history.
2140 place for anything that has to be done after destroying history.
2139 '''
2141 '''
2140 # When one tries to:
2142 # When one tries to:
2141 # 1) destroy nodes thus calling this method (e.g. strip)
2143 # 1) destroy nodes thus calling this method (e.g. strip)
2142 # 2) use phasecache somewhere (e.g. commit)
2144 # 2) use phasecache somewhere (e.g. commit)
2143 #
2145 #
2144 # then 2) will fail because the phasecache contains nodes that were
2146 # then 2) will fail because the phasecache contains nodes that were
2145 # removed. We can either remove phasecache from the filecache,
2147 # removed. We can either remove phasecache from the filecache,
2146 # causing it to reload next time it is accessed, or simply filter
2148 # causing it to reload next time it is accessed, or simply filter
2147 # the removed nodes now and write the updated cache.
2149 # the removed nodes now and write the updated cache.
2148 self._phasecache.filterunknown(self)
2150 self._phasecache.filterunknown(self)
2149 self._phasecache.write()
2151 self._phasecache.write()
2150
2152
2151 # refresh all repository caches
2153 # refresh all repository caches
2152 self.updatecaches()
2154 self.updatecaches()
2153
2155
2154 # Ensure the persistent tag cache is updated. Doing it now
2156 # Ensure the persistent tag cache is updated. Doing it now
2155 # means that the tag cache only has to worry about destroyed
2157 # means that the tag cache only has to worry about destroyed
2156 # heads immediately after a strip/rollback. That in turn
2158 # heads immediately after a strip/rollback. That in turn
2157 # guarantees that "cachetip == currenttip" (comparing both rev
2159 # guarantees that "cachetip == currenttip" (comparing both rev
2158 # and node) always means no nodes have been added or destroyed.
2160 # and node) always means no nodes have been added or destroyed.
2159
2161
2160 # XXX this is suboptimal when qrefresh'ing: we strip the current
2162 # XXX this is suboptimal when qrefresh'ing: we strip the current
2161 # head, refresh the tag cache, then immediately add a new head.
2163 # head, refresh the tag cache, then immediately add a new head.
2162 # But I think doing it this way is necessary for the "instant
2164 # But I think doing it this way is necessary for the "instant
2163 # tag cache retrieval" case to work.
2165 # tag cache retrieval" case to work.
2164 self.invalidate()
2166 self.invalidate()
2165
2167
2166 def status(self, node1='.', node2=None, match=None,
2168 def status(self, node1='.', node2=None, match=None,
2167 ignored=False, clean=False, unknown=False,
2169 ignored=False, clean=False, unknown=False,
2168 listsubrepos=False):
2170 listsubrepos=False):
2169 '''a convenience method that calls node1.status(node2)'''
2171 '''a convenience method that calls node1.status(node2)'''
2170 return self[node1].status(node2, match, ignored, clean, unknown,
2172 return self[node1].status(node2, match, ignored, clean, unknown,
2171 listsubrepos)
2173 listsubrepos)
2172
2174
2173 def addpostdsstatus(self, ps):
2175 def addpostdsstatus(self, ps):
2174 """Add a callback to run within the wlock, at the point at which status
2176 """Add a callback to run within the wlock, at the point at which status
2175 fixups happen.
2177 fixups happen.
2176
2178
2177 On status completion, callback(wctx, status) will be called with the
2179 On status completion, callback(wctx, status) will be called with the
2178 wlock held, unless the dirstate has changed from underneath or the wlock
2180 wlock held, unless the dirstate has changed from underneath or the wlock
2179 couldn't be grabbed.
2181 couldn't be grabbed.
2180
2182
2181 Callbacks should not capture and use a cached copy of the dirstate --
2183 Callbacks should not capture and use a cached copy of the dirstate --
2182 it might change in the meanwhile. Instead, they should access the
2184 it might change in the meanwhile. Instead, they should access the
2183 dirstate via wctx.repo().dirstate.
2185 dirstate via wctx.repo().dirstate.
2184
2186
2185 This list is emptied out after each status run -- extensions should
2187 This list is emptied out after each status run -- extensions should
2186 make sure it adds to this list each time dirstate.status is called.
2188 make sure it adds to this list each time dirstate.status is called.
2187 Extensions should also make sure they don't call this for statuses
2189 Extensions should also make sure they don't call this for statuses
2188 that don't involve the dirstate.
2190 that don't involve the dirstate.
2189 """
2191 """
2190
2192
2191 # The list is located here for uniqueness reasons -- it is actually
2193 # The list is located here for uniqueness reasons -- it is actually
2192 # managed by the workingctx, but that isn't unique per-repo.
2194 # managed by the workingctx, but that isn't unique per-repo.
2193 self._postdsstatus.append(ps)
2195 self._postdsstatus.append(ps)
2194
2196
2195 def postdsstatus(self):
2197 def postdsstatus(self):
2196 """Used by workingctx to get the list of post-dirstate-status hooks."""
2198 """Used by workingctx to get the list of post-dirstate-status hooks."""
2197 return self._postdsstatus
2199 return self._postdsstatus
2198
2200
2199 def clearpostdsstatus(self):
2201 def clearpostdsstatus(self):
2200 """Used by workingctx to clear post-dirstate-status hooks."""
2202 """Used by workingctx to clear post-dirstate-status hooks."""
2201 del self._postdsstatus[:]
2203 del self._postdsstatus[:]
2202
2204
2203 def heads(self, start=None):
2205 def heads(self, start=None):
2204 if start is None:
2206 if start is None:
2205 cl = self.changelog
2207 cl = self.changelog
2206 headrevs = reversed(cl.headrevs())
2208 headrevs = reversed(cl.headrevs())
2207 return [cl.node(rev) for rev in headrevs]
2209 return [cl.node(rev) for rev in headrevs]
2208
2210
2209 heads = self.changelog.heads(start)
2211 heads = self.changelog.heads(start)
2210 # sort the output in rev descending order
2212 # sort the output in rev descending order
2211 return sorted(heads, key=self.changelog.rev, reverse=True)
2213 return sorted(heads, key=self.changelog.rev, reverse=True)
2212
2214
2213 def branchheads(self, branch=None, start=None, closed=False):
2215 def branchheads(self, branch=None, start=None, closed=False):
2214 '''return a (possibly filtered) list of heads for the given branch
2216 '''return a (possibly filtered) list of heads for the given branch
2215
2217
2216 Heads are returned in topological order, from newest to oldest.
2218 Heads are returned in topological order, from newest to oldest.
2217 If branch is None, use the dirstate branch.
2219 If branch is None, use the dirstate branch.
2218 If start is not None, return only heads reachable from start.
2220 If start is not None, return only heads reachable from start.
2219 If closed is True, return heads that are marked as closed as well.
2221 If closed is True, return heads that are marked as closed as well.
2220 '''
2222 '''
2221 if branch is None:
2223 if branch is None:
2222 branch = self[None].branch()
2224 branch = self[None].branch()
2223 branches = self.branchmap()
2225 branches = self.branchmap()
2224 if branch not in branches:
2226 if branch not in branches:
2225 return []
2227 return []
2226 # the cache returns heads ordered lowest to highest
2228 # the cache returns heads ordered lowest to highest
2227 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2229 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2228 if start is not None:
2230 if start is not None:
2229 # filter out the heads that cannot be reached from startrev
2231 # filter out the heads that cannot be reached from startrev
2230 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2232 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2231 bheads = [h for h in bheads if h in fbheads]
2233 bheads = [h for h in bheads if h in fbheads]
2232 return bheads
2234 return bheads
2233
2235
2234 def branches(self, nodes):
2236 def branches(self, nodes):
2235 if not nodes:
2237 if not nodes:
2236 nodes = [self.changelog.tip()]
2238 nodes = [self.changelog.tip()]
2237 b = []
2239 b = []
2238 for n in nodes:
2240 for n in nodes:
2239 t = n
2241 t = n
2240 while True:
2242 while True:
2241 p = self.changelog.parents(n)
2243 p = self.changelog.parents(n)
2242 if p[1] != nullid or p[0] == nullid:
2244 if p[1] != nullid or p[0] == nullid:
2243 b.append((t, n, p[0], p[1]))
2245 b.append((t, n, p[0], p[1]))
2244 break
2246 break
2245 n = p[0]
2247 n = p[0]
2246 return b
2248 return b
2247
2249
2248 def between(self, pairs):
2250 def between(self, pairs):
2249 r = []
2251 r = []
2250
2252
2251 for top, bottom in pairs:
2253 for top, bottom in pairs:
2252 n, l, i = top, [], 0
2254 n, l, i = top, [], 0
2253 f = 1
2255 f = 1
2254
2256
2255 while n != bottom and n != nullid:
2257 while n != bottom and n != nullid:
2256 p = self.changelog.parents(n)[0]
2258 p = self.changelog.parents(n)[0]
2257 if i == f:
2259 if i == f:
2258 l.append(n)
2260 l.append(n)
2259 f = f * 2
2261 f = f * 2
2260 n = p
2262 n = p
2261 i += 1
2263 i += 1
2262
2264
2263 r.append(l)
2265 r.append(l)
2264
2266
2265 return r
2267 return r
2266
2268
2267 def checkpush(self, pushop):
2269 def checkpush(self, pushop):
2268 """Extensions can override this function if additional checks have
2270 """Extensions can override this function if additional checks have
2269 to be performed before pushing, or call it if they override push
2271 to be performed before pushing, or call it if they override push
2270 command.
2272 command.
2271 """
2273 """
2272
2274
2273 @unfilteredpropertycache
2275 @unfilteredpropertycache
2274 def prepushoutgoinghooks(self):
2276 def prepushoutgoinghooks(self):
2275 """Return util.hooks consists of a pushop with repo, remote, outgoing
2277 """Return util.hooks consists of a pushop with repo, remote, outgoing
2276 methods, which are called before pushing changesets.
2278 methods, which are called before pushing changesets.
2277 """
2279 """
2278 return util.hooks()
2280 return util.hooks()
2279
2281
2280 def pushkey(self, namespace, key, old, new):
2282 def pushkey(self, namespace, key, old, new):
2281 try:
2283 try:
2282 tr = self.currenttransaction()
2284 tr = self.currenttransaction()
2283 hookargs = {}
2285 hookargs = {}
2284 if tr is not None:
2286 if tr is not None:
2285 hookargs.update(tr.hookargs)
2287 hookargs.update(tr.hookargs)
2286 hookargs = pycompat.strkwargs(hookargs)
2288 hookargs = pycompat.strkwargs(hookargs)
2287 hookargs[r'namespace'] = namespace
2289 hookargs[r'namespace'] = namespace
2288 hookargs[r'key'] = key
2290 hookargs[r'key'] = key
2289 hookargs[r'old'] = old
2291 hookargs[r'old'] = old
2290 hookargs[r'new'] = new
2292 hookargs[r'new'] = new
2291 self.hook('prepushkey', throw=True, **hookargs)
2293 self.hook('prepushkey', throw=True, **hookargs)
2292 except error.HookAbort as exc:
2294 except error.HookAbort as exc:
2293 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2295 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2294 if exc.hint:
2296 if exc.hint:
2295 self.ui.write_err(_("(%s)\n") % exc.hint)
2297 self.ui.write_err(_("(%s)\n") % exc.hint)
2296 return False
2298 return False
2297 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2299 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2298 ret = pushkey.push(self, namespace, key, old, new)
2300 ret = pushkey.push(self, namespace, key, old, new)
2299 def runhook():
2301 def runhook():
2300 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2302 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2301 ret=ret)
2303 ret=ret)
2302 self._afterlock(runhook)
2304 self._afterlock(runhook)
2303 return ret
2305 return ret
2304
2306
2305 def listkeys(self, namespace):
2307 def listkeys(self, namespace):
2306 self.hook('prelistkeys', throw=True, namespace=namespace)
2308 self.hook('prelistkeys', throw=True, namespace=namespace)
2307 self.ui.debug('listing keys for "%s"\n' % namespace)
2309 self.ui.debug('listing keys for "%s"\n' % namespace)
2308 values = pushkey.list(self, namespace)
2310 values = pushkey.list(self, namespace)
2309 self.hook('listkeys', namespace=namespace, values=values)
2311 self.hook('listkeys', namespace=namespace, values=values)
2310 return values
2312 return values
2311
2313
2312 def debugwireargs(self, one, two, three=None, four=None, five=None):
2314 def debugwireargs(self, one, two, three=None, four=None, five=None):
2313 '''used to test argument passing over the wire'''
2315 '''used to test argument passing over the wire'''
2314 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2316 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2315 pycompat.bytestr(four),
2317 pycompat.bytestr(four),
2316 pycompat.bytestr(five))
2318 pycompat.bytestr(five))
2317
2319
2318 def savecommitmessage(self, text):
2320 def savecommitmessage(self, text):
2319 fp = self.vfs('last-message.txt', 'wb')
2321 fp = self.vfs('last-message.txt', 'wb')
2320 try:
2322 try:
2321 fp.write(text)
2323 fp.write(text)
2322 finally:
2324 finally:
2323 fp.close()
2325 fp.close()
2324 return self.pathto(fp.name[len(self.root) + 1:])
2326 return self.pathto(fp.name[len(self.root) + 1:])
2325
2327
2326 # used to avoid circular references so destructors work
2328 # used to avoid circular references so destructors work
2327 def aftertrans(files):
2329 def aftertrans(files):
2328 renamefiles = [tuple(t) for t in files]
2330 renamefiles = [tuple(t) for t in files]
2329 def a():
2331 def a():
2330 for vfs, src, dest in renamefiles:
2332 for vfs, src, dest in renamefiles:
2331 # if src and dest refer to a same file, vfs.rename is a no-op,
2333 # if src and dest refer to a same file, vfs.rename is a no-op,
2332 # leaving both src and dest on disk. delete dest to make sure
2334 # leaving both src and dest on disk. delete dest to make sure
2333 # the rename couldn't be such a no-op.
2335 # the rename couldn't be such a no-op.
2334 vfs.tryunlink(dest)
2336 vfs.tryunlink(dest)
2335 try:
2337 try:
2336 vfs.rename(src, dest)
2338 vfs.rename(src, dest)
2337 except OSError: # journal file does not yet exist
2339 except OSError: # journal file does not yet exist
2338 pass
2340 pass
2339 return a
2341 return a
2340
2342
2341 def undoname(fn):
2343 def undoname(fn):
2342 base, name = os.path.split(fn)
2344 base, name = os.path.split(fn)
2343 assert name.startswith('journal')
2345 assert name.startswith('journal')
2344 return os.path.join(base, name.replace('journal', 'undo', 1))
2346 return os.path.join(base, name.replace('journal', 'undo', 1))
2345
2347
2346 def instance(ui, path, create, intents=None):
2348 def instance(ui, path, create, intents=None):
2347 return localrepository(ui, util.urllocalpath(path), create,
2349 return localrepository(ui, util.urllocalpath(path), create,
2348 intents=intents)
2350 intents=intents)
2349
2351
2350 def islocal(path):
2352 def islocal(path):
2351 return True
2353 return True
2352
2354
2353 def newreporequirements(repo):
2355 def newreporequirements(repo):
2354 """Determine the set of requirements for a new local repository.
2356 """Determine the set of requirements for a new local repository.
2355
2357
2356 Extensions can wrap this function to specify custom requirements for
2358 Extensions can wrap this function to specify custom requirements for
2357 new repositories.
2359 new repositories.
2358 """
2360 """
2359 ui = repo.ui
2361 ui = repo.ui
2360 requirements = {'revlogv1'}
2362 requirements = {'revlogv1'}
2361 if ui.configbool('format', 'usestore'):
2363 if ui.configbool('format', 'usestore'):
2362 requirements.add('store')
2364 requirements.add('store')
2363 if ui.configbool('format', 'usefncache'):
2365 if ui.configbool('format', 'usefncache'):
2364 requirements.add('fncache')
2366 requirements.add('fncache')
2365 if ui.configbool('format', 'dotencode'):
2367 if ui.configbool('format', 'dotencode'):
2366 requirements.add('dotencode')
2368 requirements.add('dotencode')
2367
2369
2368 compengine = ui.config('experimental', 'format.compression')
2370 compengine = ui.config('experimental', 'format.compression')
2369 if compengine not in util.compengines:
2371 if compengine not in util.compengines:
2370 raise error.Abort(_('compression engine %s defined by '
2372 raise error.Abort(_('compression engine %s defined by '
2371 'experimental.format.compression not available') %
2373 'experimental.format.compression not available') %
2372 compengine,
2374 compengine,
2373 hint=_('run "hg debuginstall" to list available '
2375 hint=_('run "hg debuginstall" to list available '
2374 'compression engines'))
2376 'compression engines'))
2375
2377
2376 # zlib is the historical default and doesn't need an explicit requirement.
2378 # zlib is the historical default and doesn't need an explicit requirement.
2377 if compengine != 'zlib':
2379 if compengine != 'zlib':
2378 requirements.add('exp-compression-%s' % compengine)
2380 requirements.add('exp-compression-%s' % compengine)
2379
2381
2380 if scmutil.gdinitconfig(ui):
2382 if scmutil.gdinitconfig(ui):
2381 requirements.add('generaldelta')
2383 requirements.add('generaldelta')
2382 if ui.configbool('experimental', 'treemanifest'):
2384 if ui.configbool('experimental', 'treemanifest'):
2383 requirements.add('treemanifest')
2385 requirements.add('treemanifest')
2384 # experimental config: format.sparse-revlog
2386 # experimental config: format.sparse-revlog
2385 if ui.configbool('format', 'sparse-revlog'):
2387 if ui.configbool('format', 'sparse-revlog'):
2386 requirements.add(SPARSEREVLOG_REQUIREMENT)
2388 requirements.add(SPARSEREVLOG_REQUIREMENT)
2387
2389
2388 revlogv2 = ui.config('experimental', 'revlogv2')
2390 revlogv2 = ui.config('experimental', 'revlogv2')
2389 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2391 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2390 requirements.remove('revlogv1')
2392 requirements.remove('revlogv1')
2391 # generaldelta is implied by revlogv2.
2393 # generaldelta is implied by revlogv2.
2392 requirements.discard('generaldelta')
2394 requirements.discard('generaldelta')
2393 requirements.add(REVLOGV2_REQUIREMENT)
2395 requirements.add(REVLOGV2_REQUIREMENT)
2394
2396
2395 return requirements
2397 return requirements
General Comments 0
You need to be logged in to leave comments. Login now