##// END OF EJS Templates
repo: remove the last few "pass" statements in localrepo.__getitem__...
Martin von Zweigbergk -
r40099:f84d7ed3 default
parent child Browse files
Show More
@@ -1,3026 +1,3020
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
95 def __set__(self, repo, value):
95 def __set__(self, repo, value):
96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
97 def __delete__(self, repo):
97 def __delete__(self, repo):
98 return super(_basefilecache, self).__delete__(repo.unfiltered())
98 return super(_basefilecache, self).__delete__(repo.unfiltered())
99
99
100 class repofilecache(_basefilecache):
100 class repofilecache(_basefilecache):
101 """filecache for files in .hg but outside of .hg/store"""
101 """filecache for files in .hg but outside of .hg/store"""
102 def __init__(self, *paths):
102 def __init__(self, *paths):
103 super(repofilecache, self).__init__(*paths)
103 super(repofilecache, self).__init__(*paths)
104 for path in paths:
104 for path in paths:
105 _cachedfiles.add((path, 'plain'))
105 _cachedfiles.add((path, 'plain'))
106
106
107 def join(self, obj, fname):
107 def join(self, obj, fname):
108 return obj.vfs.join(fname)
108 return obj.vfs.join(fname)
109
109
110 class storecache(_basefilecache):
110 class storecache(_basefilecache):
111 """filecache for files in the store"""
111 """filecache for files in the store"""
112 def __init__(self, *paths):
112 def __init__(self, *paths):
113 super(storecache, self).__init__(*paths)
113 super(storecache, self).__init__(*paths)
114 for path in paths:
114 for path in paths:
115 _cachedfiles.add((path, ''))
115 _cachedfiles.add((path, ''))
116
116
117 def join(self, obj, fname):
117 def join(self, obj, fname):
118 return obj.sjoin(fname)
118 return obj.sjoin(fname)
119
119
120 def isfilecached(repo, name):
120 def isfilecached(repo, name):
121 """check if a repo has already cached "name" filecache-ed property
121 """check if a repo has already cached "name" filecache-ed property
122
122
123 This returns (cachedobj-or-None, iscached) tuple.
123 This returns (cachedobj-or-None, iscached) tuple.
124 """
124 """
125 cacheentry = repo.unfiltered()._filecache.get(name, None)
125 cacheentry = repo.unfiltered()._filecache.get(name, None)
126 if not cacheentry:
126 if not cacheentry:
127 return None, False
127 return None, False
128 return cacheentry.obj, True
128 return cacheentry.obj, True
129
129
130 class unfilteredpropertycache(util.propertycache):
130 class unfilteredpropertycache(util.propertycache):
131 """propertycache that apply to unfiltered repo only"""
131 """propertycache that apply to unfiltered repo only"""
132
132
133 def __get__(self, repo, type=None):
133 def __get__(self, repo, type=None):
134 unfi = repo.unfiltered()
134 unfi = repo.unfiltered()
135 if unfi is repo:
135 if unfi is repo:
136 return super(unfilteredpropertycache, self).__get__(unfi)
136 return super(unfilteredpropertycache, self).__get__(unfi)
137 return getattr(unfi, self.name)
137 return getattr(unfi, self.name)
138
138
139 class filteredpropertycache(util.propertycache):
139 class filteredpropertycache(util.propertycache):
140 """propertycache that must take filtering in account"""
140 """propertycache that must take filtering in account"""
141
141
142 def cachevalue(self, obj, value):
142 def cachevalue(self, obj, value):
143 object.__setattr__(obj, self.name, value)
143 object.__setattr__(obj, self.name, value)
144
144
145
145
146 def hasunfilteredcache(repo, name):
146 def hasunfilteredcache(repo, name):
147 """check if a repo has an unfilteredpropertycache value for <name>"""
147 """check if a repo has an unfilteredpropertycache value for <name>"""
148 return name in vars(repo.unfiltered())
148 return name in vars(repo.unfiltered())
149
149
150 def unfilteredmethod(orig):
150 def unfilteredmethod(orig):
151 """decorate method that always need to be run on unfiltered version"""
151 """decorate method that always need to be run on unfiltered version"""
152 def wrapper(repo, *args, **kwargs):
152 def wrapper(repo, *args, **kwargs):
153 return orig(repo.unfiltered(), *args, **kwargs)
153 return orig(repo.unfiltered(), *args, **kwargs)
154 return wrapper
154 return wrapper
155
155
156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
157 'unbundle'}
157 'unbundle'}
158 legacycaps = moderncaps.union({'changegroupsubset'})
158 legacycaps = moderncaps.union({'changegroupsubset'})
159
159
160 @interfaceutil.implementer(repository.ipeercommandexecutor)
160 @interfaceutil.implementer(repository.ipeercommandexecutor)
161 class localcommandexecutor(object):
161 class localcommandexecutor(object):
162 def __init__(self, peer):
162 def __init__(self, peer):
163 self._peer = peer
163 self._peer = peer
164 self._sent = False
164 self._sent = False
165 self._closed = False
165 self._closed = False
166
166
167 def __enter__(self):
167 def __enter__(self):
168 return self
168 return self
169
169
170 def __exit__(self, exctype, excvalue, exctb):
170 def __exit__(self, exctype, excvalue, exctb):
171 self.close()
171 self.close()
172
172
173 def callcommand(self, command, args):
173 def callcommand(self, command, args):
174 if self._sent:
174 if self._sent:
175 raise error.ProgrammingError('callcommand() cannot be used after '
175 raise error.ProgrammingError('callcommand() cannot be used after '
176 'sendcommands()')
176 'sendcommands()')
177
177
178 if self._closed:
178 if self._closed:
179 raise error.ProgrammingError('callcommand() cannot be used after '
179 raise error.ProgrammingError('callcommand() cannot be used after '
180 'close()')
180 'close()')
181
181
182 # We don't need to support anything fancy. Just call the named
182 # We don't need to support anything fancy. Just call the named
183 # method on the peer and return a resolved future.
183 # method on the peer and return a resolved future.
184 fn = getattr(self._peer, pycompat.sysstr(command))
184 fn = getattr(self._peer, pycompat.sysstr(command))
185
185
186 f = pycompat.futures.Future()
186 f = pycompat.futures.Future()
187
187
188 try:
188 try:
189 result = fn(**pycompat.strkwargs(args))
189 result = fn(**pycompat.strkwargs(args))
190 except Exception:
190 except Exception:
191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
192 else:
192 else:
193 f.set_result(result)
193 f.set_result(result)
194
194
195 return f
195 return f
196
196
197 def sendcommands(self):
197 def sendcommands(self):
198 self._sent = True
198 self._sent = True
199
199
200 def close(self):
200 def close(self):
201 self._closed = True
201 self._closed = True
202
202
203 @interfaceutil.implementer(repository.ipeercommands)
203 @interfaceutil.implementer(repository.ipeercommands)
204 class localpeer(repository.peer):
204 class localpeer(repository.peer):
205 '''peer for a local repo; reflects only the most recent API'''
205 '''peer for a local repo; reflects only the most recent API'''
206
206
207 def __init__(self, repo, caps=None):
207 def __init__(self, repo, caps=None):
208 super(localpeer, self).__init__()
208 super(localpeer, self).__init__()
209
209
210 if caps is None:
210 if caps is None:
211 caps = moderncaps.copy()
211 caps = moderncaps.copy()
212 self._repo = repo.filtered('served')
212 self._repo = repo.filtered('served')
213 self.ui = repo.ui
213 self.ui = repo.ui
214 self._caps = repo._restrictcapabilities(caps)
214 self._caps = repo._restrictcapabilities(caps)
215
215
216 # Begin of _basepeer interface.
216 # Begin of _basepeer interface.
217
217
218 def url(self):
218 def url(self):
219 return self._repo.url()
219 return self._repo.url()
220
220
221 def local(self):
221 def local(self):
222 return self._repo
222 return self._repo
223
223
224 def peer(self):
224 def peer(self):
225 return self
225 return self
226
226
227 def canpush(self):
227 def canpush(self):
228 return True
228 return True
229
229
230 def close(self):
230 def close(self):
231 self._repo.close()
231 self._repo.close()
232
232
233 # End of _basepeer interface.
233 # End of _basepeer interface.
234
234
235 # Begin of _basewirecommands interface.
235 # Begin of _basewirecommands interface.
236
236
237 def branchmap(self):
237 def branchmap(self):
238 return self._repo.branchmap()
238 return self._repo.branchmap()
239
239
240 def capabilities(self):
240 def capabilities(self):
241 return self._caps
241 return self._caps
242
242
243 def clonebundles(self):
243 def clonebundles(self):
244 return self._repo.tryread('clonebundles.manifest')
244 return self._repo.tryread('clonebundles.manifest')
245
245
246 def debugwireargs(self, one, two, three=None, four=None, five=None):
246 def debugwireargs(self, one, two, three=None, four=None, five=None):
247 """Used to test argument passing over the wire"""
247 """Used to test argument passing over the wire"""
248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
249 pycompat.bytestr(four),
249 pycompat.bytestr(four),
250 pycompat.bytestr(five))
250 pycompat.bytestr(five))
251
251
252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
253 **kwargs):
253 **kwargs):
254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
255 common=common, bundlecaps=bundlecaps,
255 common=common, bundlecaps=bundlecaps,
256 **kwargs)[1]
256 **kwargs)[1]
257 cb = util.chunkbuffer(chunks)
257 cb = util.chunkbuffer(chunks)
258
258
259 if exchange.bundle2requested(bundlecaps):
259 if exchange.bundle2requested(bundlecaps):
260 # When requesting a bundle2, getbundle returns a stream to make the
260 # When requesting a bundle2, getbundle returns a stream to make the
261 # wire level function happier. We need to build a proper object
261 # wire level function happier. We need to build a proper object
262 # from it in local peer.
262 # from it in local peer.
263 return bundle2.getunbundler(self.ui, cb)
263 return bundle2.getunbundler(self.ui, cb)
264 else:
264 else:
265 return changegroup.getunbundler('01', cb, None)
265 return changegroup.getunbundler('01', cb, None)
266
266
267 def heads(self):
267 def heads(self):
268 return self._repo.heads()
268 return self._repo.heads()
269
269
270 def known(self, nodes):
270 def known(self, nodes):
271 return self._repo.known(nodes)
271 return self._repo.known(nodes)
272
272
273 def listkeys(self, namespace):
273 def listkeys(self, namespace):
274 return self._repo.listkeys(namespace)
274 return self._repo.listkeys(namespace)
275
275
276 def lookup(self, key):
276 def lookup(self, key):
277 return self._repo.lookup(key)
277 return self._repo.lookup(key)
278
278
279 def pushkey(self, namespace, key, old, new):
279 def pushkey(self, namespace, key, old, new):
280 return self._repo.pushkey(namespace, key, old, new)
280 return self._repo.pushkey(namespace, key, old, new)
281
281
282 def stream_out(self):
282 def stream_out(self):
283 raise error.Abort(_('cannot perform stream clone against local '
283 raise error.Abort(_('cannot perform stream clone against local '
284 'peer'))
284 'peer'))
285
285
286 def unbundle(self, bundle, heads, url):
286 def unbundle(self, bundle, heads, url):
287 """apply a bundle on a repo
287 """apply a bundle on a repo
288
288
289 This function handles the repo locking itself."""
289 This function handles the repo locking itself."""
290 try:
290 try:
291 try:
291 try:
292 bundle = exchange.readbundle(self.ui, bundle, None)
292 bundle = exchange.readbundle(self.ui, bundle, None)
293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
294 if util.safehasattr(ret, 'getchunks'):
294 if util.safehasattr(ret, 'getchunks'):
295 # This is a bundle20 object, turn it into an unbundler.
295 # This is a bundle20 object, turn it into an unbundler.
296 # This little dance should be dropped eventually when the
296 # This little dance should be dropped eventually when the
297 # API is finally improved.
297 # API is finally improved.
298 stream = util.chunkbuffer(ret.getchunks())
298 stream = util.chunkbuffer(ret.getchunks())
299 ret = bundle2.getunbundler(self.ui, stream)
299 ret = bundle2.getunbundler(self.ui, stream)
300 return ret
300 return ret
301 except Exception as exc:
301 except Exception as exc:
302 # If the exception contains output salvaged from a bundle2
302 # If the exception contains output salvaged from a bundle2
303 # reply, we need to make sure it is printed before continuing
303 # reply, we need to make sure it is printed before continuing
304 # to fail. So we build a bundle2 with such output and consume
304 # to fail. So we build a bundle2 with such output and consume
305 # it directly.
305 # it directly.
306 #
306 #
307 # This is not very elegant but allows a "simple" solution for
307 # This is not very elegant but allows a "simple" solution for
308 # issue4594
308 # issue4594
309 output = getattr(exc, '_bundle2salvagedoutput', ())
309 output = getattr(exc, '_bundle2salvagedoutput', ())
310 if output:
310 if output:
311 bundler = bundle2.bundle20(self._repo.ui)
311 bundler = bundle2.bundle20(self._repo.ui)
312 for out in output:
312 for out in output:
313 bundler.addpart(out)
313 bundler.addpart(out)
314 stream = util.chunkbuffer(bundler.getchunks())
314 stream = util.chunkbuffer(bundler.getchunks())
315 b = bundle2.getunbundler(self.ui, stream)
315 b = bundle2.getunbundler(self.ui, stream)
316 bundle2.processbundle(self._repo, b)
316 bundle2.processbundle(self._repo, b)
317 raise
317 raise
318 except error.PushRaced as exc:
318 except error.PushRaced as exc:
319 raise error.ResponseError(_('push failed:'),
319 raise error.ResponseError(_('push failed:'),
320 stringutil.forcebytestr(exc))
320 stringutil.forcebytestr(exc))
321
321
322 # End of _basewirecommands interface.
322 # End of _basewirecommands interface.
323
323
324 # Begin of peer interface.
324 # Begin of peer interface.
325
325
326 def commandexecutor(self):
326 def commandexecutor(self):
327 return localcommandexecutor(self)
327 return localcommandexecutor(self)
328
328
329 # End of peer interface.
329 # End of peer interface.
330
330
331 @interfaceutil.implementer(repository.ipeerlegacycommands)
331 @interfaceutil.implementer(repository.ipeerlegacycommands)
332 class locallegacypeer(localpeer):
332 class locallegacypeer(localpeer):
333 '''peer extension which implements legacy methods too; used for tests with
333 '''peer extension which implements legacy methods too; used for tests with
334 restricted capabilities'''
334 restricted capabilities'''
335
335
336 def __init__(self, repo):
336 def __init__(self, repo):
337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
338
338
339 # Begin of baselegacywirecommands interface.
339 # Begin of baselegacywirecommands interface.
340
340
341 def between(self, pairs):
341 def between(self, pairs):
342 return self._repo.between(pairs)
342 return self._repo.between(pairs)
343
343
344 def branches(self, nodes):
344 def branches(self, nodes):
345 return self._repo.branches(nodes)
345 return self._repo.branches(nodes)
346
346
347 def changegroup(self, nodes, source):
347 def changegroup(self, nodes, source):
348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
349 missingheads=self._repo.heads())
349 missingheads=self._repo.heads())
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
351
351
352 def changegroupsubset(self, bases, heads, source):
352 def changegroupsubset(self, bases, heads, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
354 missingheads=heads)
354 missingheads=heads)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 # End of baselegacywirecommands interface.
357 # End of baselegacywirecommands interface.
358
358
359 # Increment the sub-version when the revlog v2 format changes to lock out old
359 # Increment the sub-version when the revlog v2 format changes to lock out old
360 # clients.
360 # clients.
361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
362
362
363 # A repository with the sparserevlog feature will have delta chains that
363 # A repository with the sparserevlog feature will have delta chains that
364 # can spread over a larger span. Sparse reading cuts these large spans into
364 # can spread over a larger span. Sparse reading cuts these large spans into
365 # pieces, so that each piece isn't too big.
365 # pieces, so that each piece isn't too big.
366 # Without the sparserevlog capability, reading from the repository could use
366 # Without the sparserevlog capability, reading from the repository could use
367 # huge amounts of memory, because the whole span would be read at once,
367 # huge amounts of memory, because the whole span would be read at once,
368 # including all the intermediate revisions that aren't pertinent for the chain.
368 # including all the intermediate revisions that aren't pertinent for the chain.
369 # This is why once a repository has enabled sparse-read, it becomes required.
369 # This is why once a repository has enabled sparse-read, it becomes required.
370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
371
371
372 # Functions receiving (ui, features) that extensions can register to impact
372 # Functions receiving (ui, features) that extensions can register to impact
373 # the ability to load repositories with custom requirements. Only
373 # the ability to load repositories with custom requirements. Only
374 # functions defined in loaded extensions are called.
374 # functions defined in loaded extensions are called.
375 #
375 #
376 # The function receives a set of requirement strings that the repository
376 # The function receives a set of requirement strings that the repository
377 # is capable of opening. Functions will typically add elements to the
377 # is capable of opening. Functions will typically add elements to the
378 # set to reflect that the extension knows how to handle that requirements.
378 # set to reflect that the extension knows how to handle that requirements.
379 featuresetupfuncs = set()
379 featuresetupfuncs = set()
380
380
381 def makelocalrepository(baseui, path, intents=None):
381 def makelocalrepository(baseui, path, intents=None):
382 """Create a local repository object.
382 """Create a local repository object.
383
383
384 Given arguments needed to construct a local repository, this function
384 Given arguments needed to construct a local repository, this function
385 performs various early repository loading functionality (such as
385 performs various early repository loading functionality (such as
386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
387 the repository can be opened, derives a type suitable for representing
387 the repository can be opened, derives a type suitable for representing
388 that repository, and returns an instance of it.
388 that repository, and returns an instance of it.
389
389
390 The returned object conforms to the ``repository.completelocalrepository``
390 The returned object conforms to the ``repository.completelocalrepository``
391 interface.
391 interface.
392
392
393 The repository type is derived by calling a series of factory functions
393 The repository type is derived by calling a series of factory functions
394 for each aspect/interface of the final repository. These are defined by
394 for each aspect/interface of the final repository. These are defined by
395 ``REPO_INTERFACES``.
395 ``REPO_INTERFACES``.
396
396
397 Each factory function is called to produce a type implementing a specific
397 Each factory function is called to produce a type implementing a specific
398 interface. The cumulative list of returned types will be combined into a
398 interface. The cumulative list of returned types will be combined into a
399 new type and that type will be instantiated to represent the local
399 new type and that type will be instantiated to represent the local
400 repository.
400 repository.
401
401
402 The factory functions each receive various state that may be consulted
402 The factory functions each receive various state that may be consulted
403 as part of deriving a type.
403 as part of deriving a type.
404
404
405 Extensions should wrap these factory functions to customize repository type
405 Extensions should wrap these factory functions to customize repository type
406 creation. Note that an extension's wrapped function may be called even if
406 creation. Note that an extension's wrapped function may be called even if
407 that extension is not loaded for the repo being constructed. Extensions
407 that extension is not loaded for the repo being constructed. Extensions
408 should check if their ``__name__`` appears in the
408 should check if their ``__name__`` appears in the
409 ``extensionmodulenames`` set passed to the factory function and no-op if
409 ``extensionmodulenames`` set passed to the factory function and no-op if
410 not.
410 not.
411 """
411 """
412 ui = baseui.copy()
412 ui = baseui.copy()
413 # Prevent copying repo configuration.
413 # Prevent copying repo configuration.
414 ui.copy = baseui.copy
414 ui.copy = baseui.copy
415
415
416 # Working directory VFS rooted at repository root.
416 # Working directory VFS rooted at repository root.
417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
418
418
419 # Main VFS for .hg/ directory.
419 # Main VFS for .hg/ directory.
420 hgpath = wdirvfs.join(b'.hg')
420 hgpath = wdirvfs.join(b'.hg')
421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
422
422
423 # The .hg/ path should exist and should be a directory. All other
423 # The .hg/ path should exist and should be a directory. All other
424 # cases are errors.
424 # cases are errors.
425 if not hgvfs.isdir():
425 if not hgvfs.isdir():
426 try:
426 try:
427 hgvfs.stat()
427 hgvfs.stat()
428 except OSError as e:
428 except OSError as e:
429 if e.errno != errno.ENOENT:
429 if e.errno != errno.ENOENT:
430 raise
430 raise
431
431
432 raise error.RepoError(_(b'repository %s not found') % path)
432 raise error.RepoError(_(b'repository %s not found') % path)
433
433
434 # .hg/requires file contains a newline-delimited list of
434 # .hg/requires file contains a newline-delimited list of
435 # features/capabilities the opener (us) must have in order to use
435 # features/capabilities the opener (us) must have in order to use
436 # the repository. This file was introduced in Mercurial 0.9.2,
436 # the repository. This file was introduced in Mercurial 0.9.2,
437 # which means very old repositories may not have one. We assume
437 # which means very old repositories may not have one. We assume
438 # a missing file translates to no requirements.
438 # a missing file translates to no requirements.
439 try:
439 try:
440 requirements = set(hgvfs.read(b'requires').splitlines())
440 requirements = set(hgvfs.read(b'requires').splitlines())
441 except IOError as e:
441 except IOError as e:
442 if e.errno != errno.ENOENT:
442 if e.errno != errno.ENOENT:
443 raise
443 raise
444 requirements = set()
444 requirements = set()
445
445
446 # The .hg/hgrc file may load extensions or contain config options
446 # The .hg/hgrc file may load extensions or contain config options
447 # that influence repository construction. Attempt to load it and
447 # that influence repository construction. Attempt to load it and
448 # process any new extensions that it may have pulled in.
448 # process any new extensions that it may have pulled in.
449 try:
449 try:
450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
451 # Run this before extensions.loadall() so extensions can be
451 # Run this before extensions.loadall() so extensions can be
452 # automatically enabled.
452 # automatically enabled.
453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
454 except IOError:
454 except IOError:
455 pass
455 pass
456 else:
456 else:
457 extensions.loadall(ui)
457 extensions.loadall(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511
511
512 # The store has changed over time and the exact layout is dictated by
512 # The store has changed over time and the exact layout is dictated by
513 # requirements. The store interface abstracts differences across all
513 # requirements. The store interface abstracts differences across all
514 # of them.
514 # of them.
515 store = makestore(requirements, storebasepath,
515 store = makestore(requirements, storebasepath,
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
517 hgvfs.createmode = store.createmode
517 hgvfs.createmode = store.createmode
518
518
519 storevfs = store.vfs
519 storevfs = store.vfs
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
521
521
522 # The cache vfs is used to manage cache files.
522 # The cache vfs is used to manage cache files.
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
524 cachevfs.createmode = store.createmode
524 cachevfs.createmode = store.createmode
525
525
526 # Now resolve the type for the repository object. We do this by repeatedly
526 # Now resolve the type for the repository object. We do this by repeatedly
527 # calling a factory function to produces types for specific aspects of the
527 # calling a factory function to produces types for specific aspects of the
528 # repo's operation. The aggregate returned types are used as base classes
528 # repo's operation. The aggregate returned types are used as base classes
529 # for a dynamically-derived type, which will represent our new repository.
529 # for a dynamically-derived type, which will represent our new repository.
530
530
531 bases = []
531 bases = []
532 extrastate = {}
532 extrastate = {}
533
533
534 for iface, fn in REPO_INTERFACES:
534 for iface, fn in REPO_INTERFACES:
535 # We pass all potentially useful state to give extensions tons of
535 # We pass all potentially useful state to give extensions tons of
536 # flexibility.
536 # flexibility.
537 typ = fn()(ui=ui,
537 typ = fn()(ui=ui,
538 intents=intents,
538 intents=intents,
539 requirements=requirements,
539 requirements=requirements,
540 features=features,
540 features=features,
541 wdirvfs=wdirvfs,
541 wdirvfs=wdirvfs,
542 hgvfs=hgvfs,
542 hgvfs=hgvfs,
543 store=store,
543 store=store,
544 storevfs=storevfs,
544 storevfs=storevfs,
545 storeoptions=storevfs.options,
545 storeoptions=storevfs.options,
546 cachevfs=cachevfs,
546 cachevfs=cachevfs,
547 extensionmodulenames=extensionmodulenames,
547 extensionmodulenames=extensionmodulenames,
548 extrastate=extrastate,
548 extrastate=extrastate,
549 baseclasses=bases)
549 baseclasses=bases)
550
550
551 if not isinstance(typ, type):
551 if not isinstance(typ, type):
552 raise error.ProgrammingError('unable to construct type for %s' %
552 raise error.ProgrammingError('unable to construct type for %s' %
553 iface)
553 iface)
554
554
555 bases.append(typ)
555 bases.append(typ)
556
556
557 # type() allows you to use characters in type names that wouldn't be
557 # type() allows you to use characters in type names that wouldn't be
558 # recognized as Python symbols in source code. We abuse that to add
558 # recognized as Python symbols in source code. We abuse that to add
559 # rich information about our constructed repo.
559 # rich information about our constructed repo.
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
561 wdirvfs.base,
561 wdirvfs.base,
562 b','.join(sorted(requirements))))
562 b','.join(sorted(requirements))))
563
563
564 cls = type(name, tuple(bases), {})
564 cls = type(name, tuple(bases), {})
565
565
566 return cls(
566 return cls(
567 baseui=baseui,
567 baseui=baseui,
568 ui=ui,
568 ui=ui,
569 origroot=path,
569 origroot=path,
570 wdirvfs=wdirvfs,
570 wdirvfs=wdirvfs,
571 hgvfs=hgvfs,
571 hgvfs=hgvfs,
572 requirements=requirements,
572 requirements=requirements,
573 supportedrequirements=supportedrequirements,
573 supportedrequirements=supportedrequirements,
574 sharedpath=storebasepath,
574 sharedpath=storebasepath,
575 store=store,
575 store=store,
576 cachevfs=cachevfs,
576 cachevfs=cachevfs,
577 features=features,
577 features=features,
578 intents=intents)
578 intents=intents)
579
579
580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
581 """Perform additional actions after .hg/hgrc is loaded.
581 """Perform additional actions after .hg/hgrc is loaded.
582
582
583 This function is called during repository loading immediately after
583 This function is called during repository loading immediately after
584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
585
585
586 The function can be used to validate configs, automatically add
586 The function can be used to validate configs, automatically add
587 options (including extensions) based on requirements, etc.
587 options (including extensions) based on requirements, etc.
588 """
588 """
589
589
590 # Map of requirements to list of extensions to load automatically when
590 # Map of requirements to list of extensions to load automatically when
591 # requirement is present.
591 # requirement is present.
592 autoextensions = {
592 autoextensions = {
593 b'largefiles': [b'largefiles'],
593 b'largefiles': [b'largefiles'],
594 b'lfs': [b'lfs'],
594 b'lfs': [b'lfs'],
595 }
595 }
596
596
597 for requirement, names in sorted(autoextensions.items()):
597 for requirement, names in sorted(autoextensions.items()):
598 if requirement not in requirements:
598 if requirement not in requirements:
599 continue
599 continue
600
600
601 for name in names:
601 for name in names:
602 if not ui.hasconfig(b'extensions', name):
602 if not ui.hasconfig(b'extensions', name):
603 ui.setconfig(b'extensions', name, b'', source='autoload')
603 ui.setconfig(b'extensions', name, b'', source='autoload')
604
604
605 def gathersupportedrequirements(ui):
605 def gathersupportedrequirements(ui):
606 """Determine the complete set of recognized requirements."""
606 """Determine the complete set of recognized requirements."""
607 # Start with all requirements supported by this file.
607 # Start with all requirements supported by this file.
608 supported = set(localrepository._basesupported)
608 supported = set(localrepository._basesupported)
609
609
610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
611 # relevant to this ui instance.
611 # relevant to this ui instance.
612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
613
613
614 for fn in featuresetupfuncs:
614 for fn in featuresetupfuncs:
615 if fn.__module__ in modules:
615 if fn.__module__ in modules:
616 fn(ui, supported)
616 fn(ui, supported)
617
617
618 # Add derived requirements from registered compression engines.
618 # Add derived requirements from registered compression engines.
619 for name in util.compengines:
619 for name in util.compengines:
620 engine = util.compengines[name]
620 engine = util.compengines[name]
621 if engine.revlogheader():
621 if engine.revlogheader():
622 supported.add(b'exp-compression-%s' % name)
622 supported.add(b'exp-compression-%s' % name)
623
623
624 return supported
624 return supported
625
625
626 def ensurerequirementsrecognized(requirements, supported):
626 def ensurerequirementsrecognized(requirements, supported):
627 """Validate that a set of local requirements is recognized.
627 """Validate that a set of local requirements is recognized.
628
628
629 Receives a set of requirements. Raises an ``error.RepoError`` if there
629 Receives a set of requirements. Raises an ``error.RepoError`` if there
630 exists any requirement in that set that currently loaded code doesn't
630 exists any requirement in that set that currently loaded code doesn't
631 recognize.
631 recognize.
632
632
633 Returns a set of supported requirements.
633 Returns a set of supported requirements.
634 """
634 """
635 missing = set()
635 missing = set()
636
636
637 for requirement in requirements:
637 for requirement in requirements:
638 if requirement in supported:
638 if requirement in supported:
639 continue
639 continue
640
640
641 if not requirement or not requirement[0:1].isalnum():
641 if not requirement or not requirement[0:1].isalnum():
642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
643
643
644 missing.add(requirement)
644 missing.add(requirement)
645
645
646 if missing:
646 if missing:
647 raise error.RequirementError(
647 raise error.RequirementError(
648 _(b'repository requires features unknown to this Mercurial: %s') %
648 _(b'repository requires features unknown to this Mercurial: %s') %
649 b' '.join(sorted(missing)),
649 b' '.join(sorted(missing)),
650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
651 b'for more information'))
651 b'for more information'))
652
652
653 def ensurerequirementscompatible(ui, requirements):
653 def ensurerequirementscompatible(ui, requirements):
654 """Validates that a set of recognized requirements is mutually compatible.
654 """Validates that a set of recognized requirements is mutually compatible.
655
655
656 Some requirements may not be compatible with others or require
656 Some requirements may not be compatible with others or require
657 config options that aren't enabled. This function is called during
657 config options that aren't enabled. This function is called during
658 repository opening to ensure that the set of requirements needed
658 repository opening to ensure that the set of requirements needed
659 to open a repository is sane and compatible with config options.
659 to open a repository is sane and compatible with config options.
660
660
661 Extensions can monkeypatch this function to perform additional
661 Extensions can monkeypatch this function to perform additional
662 checking.
662 checking.
663
663
664 ``error.RepoError`` should be raised on failure.
664 ``error.RepoError`` should be raised on failure.
665 """
665 """
666 if b'exp-sparse' in requirements and not sparse.enabled:
666 if b'exp-sparse' in requirements and not sparse.enabled:
667 raise error.RepoError(_(b'repository is using sparse feature but '
667 raise error.RepoError(_(b'repository is using sparse feature but '
668 b'sparse is not enabled; enable the '
668 b'sparse is not enabled; enable the '
669 b'"sparse" extensions to access'))
669 b'"sparse" extensions to access'))
670
670
671 def makestore(requirements, path, vfstype):
671 def makestore(requirements, path, vfstype):
672 """Construct a storage object for a repository."""
672 """Construct a storage object for a repository."""
673 if b'store' in requirements:
673 if b'store' in requirements:
674 if b'fncache' in requirements:
674 if b'fncache' in requirements:
675 return storemod.fncachestore(path, vfstype,
675 return storemod.fncachestore(path, vfstype,
676 b'dotencode' in requirements)
676 b'dotencode' in requirements)
677
677
678 return storemod.encodedstore(path, vfstype)
678 return storemod.encodedstore(path, vfstype)
679
679
680 return storemod.basicstore(path, vfstype)
680 return storemod.basicstore(path, vfstype)
681
681
682 def resolvestorevfsoptions(ui, requirements, features):
682 def resolvestorevfsoptions(ui, requirements, features):
683 """Resolve the options to pass to the store vfs opener.
683 """Resolve the options to pass to the store vfs opener.
684
684
685 The returned dict is used to influence behavior of the storage layer.
685 The returned dict is used to influence behavior of the storage layer.
686 """
686 """
687 options = {}
687 options = {}
688
688
689 if b'treemanifest' in requirements:
689 if b'treemanifest' in requirements:
690 options[b'treemanifest'] = True
690 options[b'treemanifest'] = True
691
691
692 # experimental config: format.manifestcachesize
692 # experimental config: format.manifestcachesize
693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
694 if manifestcachesize is not None:
694 if manifestcachesize is not None:
695 options[b'manifestcachesize'] = manifestcachesize
695 options[b'manifestcachesize'] = manifestcachesize
696
696
697 # In the absence of another requirement superseding a revlog-related
697 # In the absence of another requirement superseding a revlog-related
698 # requirement, we have to assume the repo is using revlog version 0.
698 # requirement, we have to assume the repo is using revlog version 0.
699 # This revlog format is super old and we don't bother trying to parse
699 # This revlog format is super old and we don't bother trying to parse
700 # opener options for it because those options wouldn't do anything
700 # opener options for it because those options wouldn't do anything
701 # meaningful on such old repos.
701 # meaningful on such old repos.
702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
704
704
705 return options
705 return options
706
706
707 def resolverevlogstorevfsoptions(ui, requirements, features):
707 def resolverevlogstorevfsoptions(ui, requirements, features):
708 """Resolve opener options specific to revlogs."""
708 """Resolve opener options specific to revlogs."""
709
709
710 options = {}
710 options = {}
711
711
712 if b'revlogv1' in requirements:
712 if b'revlogv1' in requirements:
713 options[b'revlogv1'] = True
713 options[b'revlogv1'] = True
714 if REVLOGV2_REQUIREMENT in requirements:
714 if REVLOGV2_REQUIREMENT in requirements:
715 options[b'revlogv2'] = True
715 options[b'revlogv2'] = True
716
716
717 if b'generaldelta' in requirements:
717 if b'generaldelta' in requirements:
718 options[b'generaldelta'] = True
718 options[b'generaldelta'] = True
719
719
720 # experimental config: format.chunkcachesize
720 # experimental config: format.chunkcachesize
721 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
721 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
722 if chunkcachesize is not None:
722 if chunkcachesize is not None:
723 options[b'chunkcachesize'] = chunkcachesize
723 options[b'chunkcachesize'] = chunkcachesize
724
724
725 deltabothparents = ui.configbool(b'storage',
725 deltabothparents = ui.configbool(b'storage',
726 b'revlog.optimize-delta-parent-choice')
726 b'revlog.optimize-delta-parent-choice')
727 options[b'deltabothparents'] = deltabothparents
727 options[b'deltabothparents'] = deltabothparents
728
728
729 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
729 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
730
730
731 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
731 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
732 if 0 <= chainspan:
732 if 0 <= chainspan:
733 options[b'maxdeltachainspan'] = chainspan
733 options[b'maxdeltachainspan'] = chainspan
734
734
735 mmapindexthreshold = ui.configbytes(b'experimental',
735 mmapindexthreshold = ui.configbytes(b'experimental',
736 b'mmapindexthreshold')
736 b'mmapindexthreshold')
737 if mmapindexthreshold is not None:
737 if mmapindexthreshold is not None:
738 options[b'mmapindexthreshold'] = mmapindexthreshold
738 options[b'mmapindexthreshold'] = mmapindexthreshold
739
739
740 withsparseread = ui.configbool(b'experimental', b'sparse-read')
740 withsparseread = ui.configbool(b'experimental', b'sparse-read')
741 srdensitythres = float(ui.config(b'experimental',
741 srdensitythres = float(ui.config(b'experimental',
742 b'sparse-read.density-threshold'))
742 b'sparse-read.density-threshold'))
743 srmingapsize = ui.configbytes(b'experimental',
743 srmingapsize = ui.configbytes(b'experimental',
744 b'sparse-read.min-gap-size')
744 b'sparse-read.min-gap-size')
745 options[b'with-sparse-read'] = withsparseread
745 options[b'with-sparse-read'] = withsparseread
746 options[b'sparse-read-density-threshold'] = srdensitythres
746 options[b'sparse-read-density-threshold'] = srdensitythres
747 options[b'sparse-read-min-gap-size'] = srmingapsize
747 options[b'sparse-read-min-gap-size'] = srmingapsize
748
748
749 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
749 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
750 options[b'sparse-revlog'] = sparserevlog
750 options[b'sparse-revlog'] = sparserevlog
751 if sparserevlog:
751 if sparserevlog:
752 options[b'generaldelta'] = True
752 options[b'generaldelta'] = True
753
753
754 maxchainlen = None
754 maxchainlen = None
755 if sparserevlog:
755 if sparserevlog:
756 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
756 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
757 # experimental config: format.maxchainlen
757 # experimental config: format.maxchainlen
758 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
758 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
759 if maxchainlen is not None:
759 if maxchainlen is not None:
760 options[b'maxchainlen'] = maxchainlen
760 options[b'maxchainlen'] = maxchainlen
761
761
762 for r in requirements:
762 for r in requirements:
763 if r.startswith(b'exp-compression-'):
763 if r.startswith(b'exp-compression-'):
764 options[b'compengine'] = r[len(b'exp-compression-'):]
764 options[b'compengine'] = r[len(b'exp-compression-'):]
765
765
766 if repository.NARROW_REQUIREMENT in requirements:
766 if repository.NARROW_REQUIREMENT in requirements:
767 options[b'enableellipsis'] = True
767 options[b'enableellipsis'] = True
768
768
769 return options
769 return options
770
770
771 def makemain(**kwargs):
771 def makemain(**kwargs):
772 """Produce a type conforming to ``ilocalrepositorymain``."""
772 """Produce a type conforming to ``ilocalrepositorymain``."""
773 return localrepository
773 return localrepository
774
774
775 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
775 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
776 class revlogfilestorage(object):
776 class revlogfilestorage(object):
777 """File storage when using revlogs."""
777 """File storage when using revlogs."""
778
778
779 def file(self, path):
779 def file(self, path):
780 if path[0] == b'/':
780 if path[0] == b'/':
781 path = path[1:]
781 path = path[1:]
782
782
783 return filelog.filelog(self.svfs, path)
783 return filelog.filelog(self.svfs, path)
784
784
785 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
785 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
786 class revlognarrowfilestorage(object):
786 class revlognarrowfilestorage(object):
787 """File storage when using revlogs and narrow files."""
787 """File storage when using revlogs and narrow files."""
788
788
789 def file(self, path):
789 def file(self, path):
790 if path[0] == b'/':
790 if path[0] == b'/':
791 path = path[1:]
791 path = path[1:]
792
792
793 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
793 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
794
794
795 def makefilestorage(requirements, features, **kwargs):
795 def makefilestorage(requirements, features, **kwargs):
796 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
796 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
797 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
797 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
798 features.add(repository.REPO_FEATURE_STREAM_CLONE)
798 features.add(repository.REPO_FEATURE_STREAM_CLONE)
799
799
800 if repository.NARROW_REQUIREMENT in requirements:
800 if repository.NARROW_REQUIREMENT in requirements:
801 return revlognarrowfilestorage
801 return revlognarrowfilestorage
802 else:
802 else:
803 return revlogfilestorage
803 return revlogfilestorage
804
804
805 # List of repository interfaces and factory functions for them. Each
805 # List of repository interfaces and factory functions for them. Each
806 # will be called in order during ``makelocalrepository()`` to iteratively
806 # will be called in order during ``makelocalrepository()`` to iteratively
807 # derive the final type for a local repository instance. We capture the
807 # derive the final type for a local repository instance. We capture the
808 # function as a lambda so we don't hold a reference and the module-level
808 # function as a lambda so we don't hold a reference and the module-level
809 # functions can be wrapped.
809 # functions can be wrapped.
810 REPO_INTERFACES = [
810 REPO_INTERFACES = [
811 (repository.ilocalrepositorymain, lambda: makemain),
811 (repository.ilocalrepositorymain, lambda: makemain),
812 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
812 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
813 ]
813 ]
814
814
815 @interfaceutil.implementer(repository.ilocalrepositorymain)
815 @interfaceutil.implementer(repository.ilocalrepositorymain)
816 class localrepository(object):
816 class localrepository(object):
817 """Main class for representing local repositories.
817 """Main class for representing local repositories.
818
818
819 All local repositories are instances of this class.
819 All local repositories are instances of this class.
820
820
821 Constructed on its own, instances of this class are not usable as
821 Constructed on its own, instances of this class are not usable as
822 repository objects. To obtain a usable repository object, call
822 repository objects. To obtain a usable repository object, call
823 ``hg.repository()``, ``localrepo.instance()``, or
823 ``hg.repository()``, ``localrepo.instance()``, or
824 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
824 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
825 ``instance()`` adds support for creating new repositories.
825 ``instance()`` adds support for creating new repositories.
826 ``hg.repository()`` adds more extension integration, including calling
826 ``hg.repository()`` adds more extension integration, including calling
827 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
827 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
828 used.
828 used.
829 """
829 """
830
830
831 # obsolete experimental requirements:
831 # obsolete experimental requirements:
832 # - manifestv2: An experimental new manifest format that allowed
832 # - manifestv2: An experimental new manifest format that allowed
833 # for stem compression of long paths. Experiment ended up not
833 # for stem compression of long paths. Experiment ended up not
834 # being successful (repository sizes went up due to worse delta
834 # being successful (repository sizes went up due to worse delta
835 # chains), and the code was deleted in 4.6.
835 # chains), and the code was deleted in 4.6.
836 supportedformats = {
836 supportedformats = {
837 'revlogv1',
837 'revlogv1',
838 'generaldelta',
838 'generaldelta',
839 'treemanifest',
839 'treemanifest',
840 REVLOGV2_REQUIREMENT,
840 REVLOGV2_REQUIREMENT,
841 SPARSEREVLOG_REQUIREMENT,
841 SPARSEREVLOG_REQUIREMENT,
842 }
842 }
843 _basesupported = supportedformats | {
843 _basesupported = supportedformats | {
844 'store',
844 'store',
845 'fncache',
845 'fncache',
846 'shared',
846 'shared',
847 'relshared',
847 'relshared',
848 'dotencode',
848 'dotencode',
849 'exp-sparse',
849 'exp-sparse',
850 'internal-phase'
850 'internal-phase'
851 }
851 }
852
852
853 # list of prefix for file which can be written without 'wlock'
853 # list of prefix for file which can be written without 'wlock'
854 # Extensions should extend this list when needed
854 # Extensions should extend this list when needed
855 _wlockfreeprefix = {
855 _wlockfreeprefix = {
856 # We migh consider requiring 'wlock' for the next
856 # We migh consider requiring 'wlock' for the next
857 # two, but pretty much all the existing code assume
857 # two, but pretty much all the existing code assume
858 # wlock is not needed so we keep them excluded for
858 # wlock is not needed so we keep them excluded for
859 # now.
859 # now.
860 'hgrc',
860 'hgrc',
861 'requires',
861 'requires',
862 # XXX cache is a complicatged business someone
862 # XXX cache is a complicatged business someone
863 # should investigate this in depth at some point
863 # should investigate this in depth at some point
864 'cache/',
864 'cache/',
865 # XXX shouldn't be dirstate covered by the wlock?
865 # XXX shouldn't be dirstate covered by the wlock?
866 'dirstate',
866 'dirstate',
867 # XXX bisect was still a bit too messy at the time
867 # XXX bisect was still a bit too messy at the time
868 # this changeset was introduced. Someone should fix
868 # this changeset was introduced. Someone should fix
869 # the remainig bit and drop this line
869 # the remainig bit and drop this line
870 'bisect.state',
870 'bisect.state',
871 }
871 }
872
872
873 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
873 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
874 supportedrequirements, sharedpath, store, cachevfs,
874 supportedrequirements, sharedpath, store, cachevfs,
875 features, intents=None):
875 features, intents=None):
876 """Create a new local repository instance.
876 """Create a new local repository instance.
877
877
878 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
878 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
879 or ``localrepo.makelocalrepository()`` for obtaining a new repository
879 or ``localrepo.makelocalrepository()`` for obtaining a new repository
880 object.
880 object.
881
881
882 Arguments:
882 Arguments:
883
883
884 baseui
884 baseui
885 ``ui.ui`` instance that ``ui`` argument was based off of.
885 ``ui.ui`` instance that ``ui`` argument was based off of.
886
886
887 ui
887 ui
888 ``ui.ui`` instance for use by the repository.
888 ``ui.ui`` instance for use by the repository.
889
889
890 origroot
890 origroot
891 ``bytes`` path to working directory root of this repository.
891 ``bytes`` path to working directory root of this repository.
892
892
893 wdirvfs
893 wdirvfs
894 ``vfs.vfs`` rooted at the working directory.
894 ``vfs.vfs`` rooted at the working directory.
895
895
896 hgvfs
896 hgvfs
897 ``vfs.vfs`` rooted at .hg/
897 ``vfs.vfs`` rooted at .hg/
898
898
899 requirements
899 requirements
900 ``set`` of bytestrings representing repository opening requirements.
900 ``set`` of bytestrings representing repository opening requirements.
901
901
902 supportedrequirements
902 supportedrequirements
903 ``set`` of bytestrings representing repository requirements that we
903 ``set`` of bytestrings representing repository requirements that we
904 know how to open. May be a supetset of ``requirements``.
904 know how to open. May be a supetset of ``requirements``.
905
905
906 sharedpath
906 sharedpath
907 ``bytes`` Defining path to storage base directory. Points to a
907 ``bytes`` Defining path to storage base directory. Points to a
908 ``.hg/`` directory somewhere.
908 ``.hg/`` directory somewhere.
909
909
910 store
910 store
911 ``store.basicstore`` (or derived) instance providing access to
911 ``store.basicstore`` (or derived) instance providing access to
912 versioned storage.
912 versioned storage.
913
913
914 cachevfs
914 cachevfs
915 ``vfs.vfs`` used for cache files.
915 ``vfs.vfs`` used for cache files.
916
916
917 features
917 features
918 ``set`` of bytestrings defining features/capabilities of this
918 ``set`` of bytestrings defining features/capabilities of this
919 instance.
919 instance.
920
920
921 intents
921 intents
922 ``set`` of system strings indicating what this repo will be used
922 ``set`` of system strings indicating what this repo will be used
923 for.
923 for.
924 """
924 """
925 self.baseui = baseui
925 self.baseui = baseui
926 self.ui = ui
926 self.ui = ui
927 self.origroot = origroot
927 self.origroot = origroot
928 # vfs rooted at working directory.
928 # vfs rooted at working directory.
929 self.wvfs = wdirvfs
929 self.wvfs = wdirvfs
930 self.root = wdirvfs.base
930 self.root = wdirvfs.base
931 # vfs rooted at .hg/. Used to access most non-store paths.
931 # vfs rooted at .hg/. Used to access most non-store paths.
932 self.vfs = hgvfs
932 self.vfs = hgvfs
933 self.path = hgvfs.base
933 self.path = hgvfs.base
934 self.requirements = requirements
934 self.requirements = requirements
935 self.supported = supportedrequirements
935 self.supported = supportedrequirements
936 self.sharedpath = sharedpath
936 self.sharedpath = sharedpath
937 self.store = store
937 self.store = store
938 self.cachevfs = cachevfs
938 self.cachevfs = cachevfs
939 self.features = features
939 self.features = features
940
940
941 self.filtername = None
941 self.filtername = None
942
942
943 if (self.ui.configbool('devel', 'all-warnings') or
943 if (self.ui.configbool('devel', 'all-warnings') or
944 self.ui.configbool('devel', 'check-locks')):
944 self.ui.configbool('devel', 'check-locks')):
945 self.vfs.audit = self._getvfsward(self.vfs.audit)
945 self.vfs.audit = self._getvfsward(self.vfs.audit)
946 # A list of callback to shape the phase if no data were found.
946 # A list of callback to shape the phase if no data were found.
947 # Callback are in the form: func(repo, roots) --> processed root.
947 # Callback are in the form: func(repo, roots) --> processed root.
948 # This list it to be filled by extension during repo setup
948 # This list it to be filled by extension during repo setup
949 self._phasedefaults = []
949 self._phasedefaults = []
950
950
951 color.setup(self.ui)
951 color.setup(self.ui)
952
952
953 self.spath = self.store.path
953 self.spath = self.store.path
954 self.svfs = self.store.vfs
954 self.svfs = self.store.vfs
955 self.sjoin = self.store.join
955 self.sjoin = self.store.join
956 if (self.ui.configbool('devel', 'all-warnings') or
956 if (self.ui.configbool('devel', 'all-warnings') or
957 self.ui.configbool('devel', 'check-locks')):
957 self.ui.configbool('devel', 'check-locks')):
958 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
958 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
959 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
959 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
960 else: # standard vfs
960 else: # standard vfs
961 self.svfs.audit = self._getsvfsward(self.svfs.audit)
961 self.svfs.audit = self._getsvfsward(self.svfs.audit)
962
962
963 self._dirstatevalidatewarned = False
963 self._dirstatevalidatewarned = False
964
964
965 self._branchcaches = {}
965 self._branchcaches = {}
966 self._revbranchcache = None
966 self._revbranchcache = None
967 self._filterpats = {}
967 self._filterpats = {}
968 self._datafilters = {}
968 self._datafilters = {}
969 self._transref = self._lockref = self._wlockref = None
969 self._transref = self._lockref = self._wlockref = None
970
970
971 # A cache for various files under .hg/ that tracks file changes,
971 # A cache for various files under .hg/ that tracks file changes,
972 # (used by the filecache decorator)
972 # (used by the filecache decorator)
973 #
973 #
974 # Maps a property name to its util.filecacheentry
974 # Maps a property name to its util.filecacheentry
975 self._filecache = {}
975 self._filecache = {}
976
976
977 # hold sets of revision to be filtered
977 # hold sets of revision to be filtered
978 # should be cleared when something might have changed the filter value:
978 # should be cleared when something might have changed the filter value:
979 # - new changesets,
979 # - new changesets,
980 # - phase change,
980 # - phase change,
981 # - new obsolescence marker,
981 # - new obsolescence marker,
982 # - working directory parent change,
982 # - working directory parent change,
983 # - bookmark changes
983 # - bookmark changes
984 self.filteredrevcache = {}
984 self.filteredrevcache = {}
985
985
986 # post-dirstate-status hooks
986 # post-dirstate-status hooks
987 self._postdsstatus = []
987 self._postdsstatus = []
988
988
989 # generic mapping between names and nodes
989 # generic mapping between names and nodes
990 self.names = namespaces.namespaces()
990 self.names = namespaces.namespaces()
991
991
992 # Key to signature value.
992 # Key to signature value.
993 self._sparsesignaturecache = {}
993 self._sparsesignaturecache = {}
994 # Signature to cached matcher instance.
994 # Signature to cached matcher instance.
995 self._sparsematchercache = {}
995 self._sparsematchercache = {}
996
996
997 def _getvfsward(self, origfunc):
997 def _getvfsward(self, origfunc):
998 """build a ward for self.vfs"""
998 """build a ward for self.vfs"""
999 rref = weakref.ref(self)
999 rref = weakref.ref(self)
1000 def checkvfs(path, mode=None):
1000 def checkvfs(path, mode=None):
1001 ret = origfunc(path, mode=mode)
1001 ret = origfunc(path, mode=mode)
1002 repo = rref()
1002 repo = rref()
1003 if (repo is None
1003 if (repo is None
1004 or not util.safehasattr(repo, '_wlockref')
1004 or not util.safehasattr(repo, '_wlockref')
1005 or not util.safehasattr(repo, '_lockref')):
1005 or not util.safehasattr(repo, '_lockref')):
1006 return
1006 return
1007 if mode in (None, 'r', 'rb'):
1007 if mode in (None, 'r', 'rb'):
1008 return
1008 return
1009 if path.startswith(repo.path):
1009 if path.startswith(repo.path):
1010 # truncate name relative to the repository (.hg)
1010 # truncate name relative to the repository (.hg)
1011 path = path[len(repo.path) + 1:]
1011 path = path[len(repo.path) + 1:]
1012 if path.startswith('cache/'):
1012 if path.startswith('cache/'):
1013 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1013 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1014 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1014 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1015 if path.startswith('journal.'):
1015 if path.startswith('journal.'):
1016 # journal is covered by 'lock'
1016 # journal is covered by 'lock'
1017 if repo._currentlock(repo._lockref) is None:
1017 if repo._currentlock(repo._lockref) is None:
1018 repo.ui.develwarn('write with no lock: "%s"' % path,
1018 repo.ui.develwarn('write with no lock: "%s"' % path,
1019 stacklevel=2, config='check-locks')
1019 stacklevel=2, config='check-locks')
1020 elif repo._currentlock(repo._wlockref) is None:
1020 elif repo._currentlock(repo._wlockref) is None:
1021 # rest of vfs files are covered by 'wlock'
1021 # rest of vfs files are covered by 'wlock'
1022 #
1022 #
1023 # exclude special files
1023 # exclude special files
1024 for prefix in self._wlockfreeprefix:
1024 for prefix in self._wlockfreeprefix:
1025 if path.startswith(prefix):
1025 if path.startswith(prefix):
1026 return
1026 return
1027 repo.ui.develwarn('write with no wlock: "%s"' % path,
1027 repo.ui.develwarn('write with no wlock: "%s"' % path,
1028 stacklevel=2, config='check-locks')
1028 stacklevel=2, config='check-locks')
1029 return ret
1029 return ret
1030 return checkvfs
1030 return checkvfs
1031
1031
1032 def _getsvfsward(self, origfunc):
1032 def _getsvfsward(self, origfunc):
1033 """build a ward for self.svfs"""
1033 """build a ward for self.svfs"""
1034 rref = weakref.ref(self)
1034 rref = weakref.ref(self)
1035 def checksvfs(path, mode=None):
1035 def checksvfs(path, mode=None):
1036 ret = origfunc(path, mode=mode)
1036 ret = origfunc(path, mode=mode)
1037 repo = rref()
1037 repo = rref()
1038 if repo is None or not util.safehasattr(repo, '_lockref'):
1038 if repo is None or not util.safehasattr(repo, '_lockref'):
1039 return
1039 return
1040 if mode in (None, 'r', 'rb'):
1040 if mode in (None, 'r', 'rb'):
1041 return
1041 return
1042 if path.startswith(repo.sharedpath):
1042 if path.startswith(repo.sharedpath):
1043 # truncate name relative to the repository (.hg)
1043 # truncate name relative to the repository (.hg)
1044 path = path[len(repo.sharedpath) + 1:]
1044 path = path[len(repo.sharedpath) + 1:]
1045 if repo._currentlock(repo._lockref) is None:
1045 if repo._currentlock(repo._lockref) is None:
1046 repo.ui.develwarn('write with no lock: "%s"' % path,
1046 repo.ui.develwarn('write with no lock: "%s"' % path,
1047 stacklevel=3)
1047 stacklevel=3)
1048 return ret
1048 return ret
1049 return checksvfs
1049 return checksvfs
1050
1050
1051 def close(self):
1051 def close(self):
1052 self._writecaches()
1052 self._writecaches()
1053
1053
1054 def _writecaches(self):
1054 def _writecaches(self):
1055 if self._revbranchcache:
1055 if self._revbranchcache:
1056 self._revbranchcache.write()
1056 self._revbranchcache.write()
1057
1057
1058 def _restrictcapabilities(self, caps):
1058 def _restrictcapabilities(self, caps):
1059 if self.ui.configbool('experimental', 'bundle2-advertise'):
1059 if self.ui.configbool('experimental', 'bundle2-advertise'):
1060 caps = set(caps)
1060 caps = set(caps)
1061 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1061 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1062 role='client'))
1062 role='client'))
1063 caps.add('bundle2=' + urlreq.quote(capsblob))
1063 caps.add('bundle2=' + urlreq.quote(capsblob))
1064 return caps
1064 return caps
1065
1065
1066 def _writerequirements(self):
1066 def _writerequirements(self):
1067 scmutil.writerequires(self.vfs, self.requirements)
1067 scmutil.writerequires(self.vfs, self.requirements)
1068
1068
1069 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1069 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1070 # self -> auditor -> self._checknested -> self
1070 # self -> auditor -> self._checknested -> self
1071
1071
1072 @property
1072 @property
1073 def auditor(self):
1073 def auditor(self):
1074 # This is only used by context.workingctx.match in order to
1074 # This is only used by context.workingctx.match in order to
1075 # detect files in subrepos.
1075 # detect files in subrepos.
1076 return pathutil.pathauditor(self.root, callback=self._checknested)
1076 return pathutil.pathauditor(self.root, callback=self._checknested)
1077
1077
1078 @property
1078 @property
1079 def nofsauditor(self):
1079 def nofsauditor(self):
1080 # This is only used by context.basectx.match in order to detect
1080 # This is only used by context.basectx.match in order to detect
1081 # files in subrepos.
1081 # files in subrepos.
1082 return pathutil.pathauditor(self.root, callback=self._checknested,
1082 return pathutil.pathauditor(self.root, callback=self._checknested,
1083 realfs=False, cached=True)
1083 realfs=False, cached=True)
1084
1084
1085 def _checknested(self, path):
1085 def _checknested(self, path):
1086 """Determine if path is a legal nested repository."""
1086 """Determine if path is a legal nested repository."""
1087 if not path.startswith(self.root):
1087 if not path.startswith(self.root):
1088 return False
1088 return False
1089 subpath = path[len(self.root) + 1:]
1089 subpath = path[len(self.root) + 1:]
1090 normsubpath = util.pconvert(subpath)
1090 normsubpath = util.pconvert(subpath)
1091
1091
1092 # XXX: Checking against the current working copy is wrong in
1092 # XXX: Checking against the current working copy is wrong in
1093 # the sense that it can reject things like
1093 # the sense that it can reject things like
1094 #
1094 #
1095 # $ hg cat -r 10 sub/x.txt
1095 # $ hg cat -r 10 sub/x.txt
1096 #
1096 #
1097 # if sub/ is no longer a subrepository in the working copy
1097 # if sub/ is no longer a subrepository in the working copy
1098 # parent revision.
1098 # parent revision.
1099 #
1099 #
1100 # However, it can of course also allow things that would have
1100 # However, it can of course also allow things that would have
1101 # been rejected before, such as the above cat command if sub/
1101 # been rejected before, such as the above cat command if sub/
1102 # is a subrepository now, but was a normal directory before.
1102 # is a subrepository now, but was a normal directory before.
1103 # The old path auditor would have rejected by mistake since it
1103 # The old path auditor would have rejected by mistake since it
1104 # panics when it sees sub/.hg/.
1104 # panics when it sees sub/.hg/.
1105 #
1105 #
1106 # All in all, checking against the working copy seems sensible
1106 # All in all, checking against the working copy seems sensible
1107 # since we want to prevent access to nested repositories on
1107 # since we want to prevent access to nested repositories on
1108 # the filesystem *now*.
1108 # the filesystem *now*.
1109 ctx = self[None]
1109 ctx = self[None]
1110 parts = util.splitpath(subpath)
1110 parts = util.splitpath(subpath)
1111 while parts:
1111 while parts:
1112 prefix = '/'.join(parts)
1112 prefix = '/'.join(parts)
1113 if prefix in ctx.substate:
1113 if prefix in ctx.substate:
1114 if prefix == normsubpath:
1114 if prefix == normsubpath:
1115 return True
1115 return True
1116 else:
1116 else:
1117 sub = ctx.sub(prefix)
1117 sub = ctx.sub(prefix)
1118 return sub.checknested(subpath[len(prefix) + 1:])
1118 return sub.checknested(subpath[len(prefix) + 1:])
1119 else:
1119 else:
1120 parts.pop()
1120 parts.pop()
1121 return False
1121 return False
1122
1122
1123 def peer(self):
1123 def peer(self):
1124 return localpeer(self) # not cached to avoid reference cycle
1124 return localpeer(self) # not cached to avoid reference cycle
1125
1125
1126 def unfiltered(self):
1126 def unfiltered(self):
1127 """Return unfiltered version of the repository
1127 """Return unfiltered version of the repository
1128
1128
1129 Intended to be overwritten by filtered repo."""
1129 Intended to be overwritten by filtered repo."""
1130 return self
1130 return self
1131
1131
1132 def filtered(self, name, visibilityexceptions=None):
1132 def filtered(self, name, visibilityexceptions=None):
1133 """Return a filtered version of a repository"""
1133 """Return a filtered version of a repository"""
1134 cls = repoview.newtype(self.unfiltered().__class__)
1134 cls = repoview.newtype(self.unfiltered().__class__)
1135 return cls(self, name, visibilityexceptions)
1135 return cls(self, name, visibilityexceptions)
1136
1136
1137 @repofilecache('bookmarks', 'bookmarks.current')
1137 @repofilecache('bookmarks', 'bookmarks.current')
1138 def _bookmarks(self):
1138 def _bookmarks(self):
1139 return bookmarks.bmstore(self)
1139 return bookmarks.bmstore(self)
1140
1140
1141 @property
1141 @property
1142 def _activebookmark(self):
1142 def _activebookmark(self):
1143 return self._bookmarks.active
1143 return self._bookmarks.active
1144
1144
1145 # _phasesets depend on changelog. what we need is to call
1145 # _phasesets depend on changelog. what we need is to call
1146 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1146 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1147 # can't be easily expressed in filecache mechanism.
1147 # can't be easily expressed in filecache mechanism.
1148 @storecache('phaseroots', '00changelog.i')
1148 @storecache('phaseroots', '00changelog.i')
1149 def _phasecache(self):
1149 def _phasecache(self):
1150 return phases.phasecache(self, self._phasedefaults)
1150 return phases.phasecache(self, self._phasedefaults)
1151
1151
1152 @storecache('obsstore')
1152 @storecache('obsstore')
1153 def obsstore(self):
1153 def obsstore(self):
1154 return obsolete.makestore(self.ui, self)
1154 return obsolete.makestore(self.ui, self)
1155
1155
1156 @storecache('00changelog.i')
1156 @storecache('00changelog.i')
1157 def changelog(self):
1157 def changelog(self):
1158 return changelog.changelog(self.svfs,
1158 return changelog.changelog(self.svfs,
1159 trypending=txnutil.mayhavepending(self.root))
1159 trypending=txnutil.mayhavepending(self.root))
1160
1160
1161 @storecache('00manifest.i')
1161 @storecache('00manifest.i')
1162 def manifestlog(self):
1162 def manifestlog(self):
1163 rootstore = manifest.manifestrevlog(self.svfs)
1163 rootstore = manifest.manifestrevlog(self.svfs)
1164 return manifest.manifestlog(self.svfs, self, rootstore)
1164 return manifest.manifestlog(self.svfs, self, rootstore)
1165
1165
1166 @repofilecache('dirstate')
1166 @repofilecache('dirstate')
1167 def dirstate(self):
1167 def dirstate(self):
1168 return self._makedirstate()
1168 return self._makedirstate()
1169
1169
1170 def _makedirstate(self):
1170 def _makedirstate(self):
1171 """Extension point for wrapping the dirstate per-repo."""
1171 """Extension point for wrapping the dirstate per-repo."""
1172 sparsematchfn = lambda: sparse.matcher(self)
1172 sparsematchfn = lambda: sparse.matcher(self)
1173
1173
1174 return dirstate.dirstate(self.vfs, self.ui, self.root,
1174 return dirstate.dirstate(self.vfs, self.ui, self.root,
1175 self._dirstatevalidate, sparsematchfn)
1175 self._dirstatevalidate, sparsematchfn)
1176
1176
1177 def _dirstatevalidate(self, node):
1177 def _dirstatevalidate(self, node):
1178 try:
1178 try:
1179 self.changelog.rev(node)
1179 self.changelog.rev(node)
1180 return node
1180 return node
1181 except error.LookupError:
1181 except error.LookupError:
1182 if not self._dirstatevalidatewarned:
1182 if not self._dirstatevalidatewarned:
1183 self._dirstatevalidatewarned = True
1183 self._dirstatevalidatewarned = True
1184 self.ui.warn(_("warning: ignoring unknown"
1184 self.ui.warn(_("warning: ignoring unknown"
1185 " working parent %s!\n") % short(node))
1185 " working parent %s!\n") % short(node))
1186 return nullid
1186 return nullid
1187
1187
1188 @storecache(narrowspec.FILENAME)
1188 @storecache(narrowspec.FILENAME)
1189 def narrowpats(self):
1189 def narrowpats(self):
1190 """matcher patterns for this repository's narrowspec
1190 """matcher patterns for this repository's narrowspec
1191
1191
1192 A tuple of (includes, excludes).
1192 A tuple of (includes, excludes).
1193 """
1193 """
1194 return narrowspec.load(self)
1194 return narrowspec.load(self)
1195
1195
1196 @storecache(narrowspec.FILENAME)
1196 @storecache(narrowspec.FILENAME)
1197 def _narrowmatch(self):
1197 def _narrowmatch(self):
1198 if repository.NARROW_REQUIREMENT not in self.requirements:
1198 if repository.NARROW_REQUIREMENT not in self.requirements:
1199 return matchmod.always(self.root, '')
1199 return matchmod.always(self.root, '')
1200 include, exclude = self.narrowpats
1200 include, exclude = self.narrowpats
1201 return narrowspec.match(self.root, include=include, exclude=exclude)
1201 return narrowspec.match(self.root, include=include, exclude=exclude)
1202
1202
1203 # TODO(martinvonz): make this property-like instead?
1203 # TODO(martinvonz): make this property-like instead?
1204 def narrowmatch(self):
1204 def narrowmatch(self):
1205 return self._narrowmatch
1205 return self._narrowmatch
1206
1206
1207 def setnarrowpats(self, newincludes, newexcludes):
1207 def setnarrowpats(self, newincludes, newexcludes):
1208 narrowspec.save(self, newincludes, newexcludes)
1208 narrowspec.save(self, newincludes, newexcludes)
1209 self.invalidate(clearfilecache=True)
1209 self.invalidate(clearfilecache=True)
1210
1210
1211 def __getitem__(self, changeid):
1211 def __getitem__(self, changeid):
1212 if changeid is None:
1212 if changeid is None:
1213 return context.workingctx(self)
1213 return context.workingctx(self)
1214 if isinstance(changeid, context.basectx):
1214 if isinstance(changeid, context.basectx):
1215 return changeid
1215 return changeid
1216 if isinstance(changeid, slice):
1216 if isinstance(changeid, slice):
1217 # wdirrev isn't contiguous so the slice shouldn't include it
1217 # wdirrev isn't contiguous so the slice shouldn't include it
1218 return [self[i]
1218 return [self[i]
1219 for i in pycompat.xrange(*changeid.indices(len(self)))
1219 for i in pycompat.xrange(*changeid.indices(len(self)))
1220 if i not in self.changelog.filteredrevs]
1220 if i not in self.changelog.filteredrevs]
1221 try:
1221 try:
1222 if isinstance(changeid, int):
1222 if isinstance(changeid, int):
1223 node = self.changelog.node(changeid)
1223 node = self.changelog.node(changeid)
1224 rev = changeid
1224 rev = changeid
1225 return context.changectx(self, rev, node)
1225 return context.changectx(self, rev, node)
1226 elif changeid == 'null':
1226 elif changeid == 'null':
1227 node = nullid
1227 node = nullid
1228 rev = nullrev
1228 rev = nullrev
1229 return context.changectx(self, rev, node)
1229 return context.changectx(self, rev, node)
1230 elif changeid == 'tip':
1230 elif changeid == 'tip':
1231 node = self.changelog.tip()
1231 node = self.changelog.tip()
1232 rev = self.changelog.rev(node)
1232 rev = self.changelog.rev(node)
1233 return context.changectx(self, rev, node)
1233 return context.changectx(self, rev, node)
1234 elif changeid == '.':
1234 elif changeid == '.':
1235 # this is a hack to delay/avoid loading obsmarkers
1235 # this is a hack to delay/avoid loading obsmarkers
1236 # when we know that '.' won't be hidden
1236 # when we know that '.' won't be hidden
1237 node = self.dirstate.p1()
1237 node = self.dirstate.p1()
1238 rev = self.unfiltered().changelog.rev(node)
1238 rev = self.unfiltered().changelog.rev(node)
1239 return context.changectx(self, rev, node)
1239 return context.changectx(self, rev, node)
1240 elif len(changeid) == 20:
1240 elif len(changeid) == 20:
1241 try:
1241 try:
1242 node = changeid
1242 node = changeid
1243 rev = self.changelog.rev(changeid)
1243 rev = self.changelog.rev(changeid)
1244 return context.changectx(self, rev, node)
1244 return context.changectx(self, rev, node)
1245 except error.FilteredLookupError:
1245 except error.FilteredLookupError:
1246 changeid = hex(changeid) # for the error message
1246 changeid = hex(changeid) # for the error message
1247 raise
1247 raise
1248 except LookupError:
1248 except LookupError:
1249 # check if it might have come from damaged dirstate
1249 # check if it might have come from damaged dirstate
1250 #
1250 #
1251 # XXX we could avoid the unfiltered if we had a recognizable
1251 # XXX we could avoid the unfiltered if we had a recognizable
1252 # exception for filtered changeset access
1252 # exception for filtered changeset access
1253 if (self.local()
1253 if (self.local()
1254 and changeid in self.unfiltered().dirstate.parents()):
1254 and changeid in self.unfiltered().dirstate.parents()):
1255 msg = _("working directory has unknown parent '%s'!")
1255 msg = _("working directory has unknown parent '%s'!")
1256 raise error.Abort(msg % short(changeid))
1256 raise error.Abort(msg % short(changeid))
1257 changeid = hex(changeid) # for the error message
1257 changeid = hex(changeid) # for the error message
1258 raise
1258
1259
1259 elif len(changeid) == 40:
1260 elif len(changeid) == 40:
1260 try:
1261 node = bin(changeid)
1261 node = bin(changeid)
1262 rev = self.changelog.rev(node)
1262 rev = self.changelog.rev(node)
1263 return context.changectx(self, rev, node)
1263 return context.changectx(self, rev, node)
1264 except error.FilteredLookupError:
1265 raise
1266 except LookupError:
1267 pass
1268 else:
1264 else:
1269 raise error.ProgrammingError(
1265 raise error.ProgrammingError(
1270 "unsupported changeid '%s' of type %s" %
1266 "unsupported changeid '%s' of type %s" %
1271 (changeid, type(changeid)))
1267 (changeid, type(changeid)))
1272
1268
1273 except (error.FilteredIndexError, error.FilteredLookupError):
1269 except (error.FilteredIndexError, error.FilteredLookupError):
1274 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1270 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1275 % pycompat.bytestr(changeid))
1271 % pycompat.bytestr(changeid))
1276 except IndexError:
1272 except (IndexError, LookupError):
1277 pass
1273 raise error.RepoLookupError(_("unknown revision '%s'") % changeid)
1278 except error.WdirUnsupported:
1274 except error.WdirUnsupported:
1279 return context.workingctx(self)
1275 return context.workingctx(self)
1280 raise error.RepoLookupError(
1281 _("unknown revision '%s'") % changeid)
1282
1276
1283 def __contains__(self, changeid):
1277 def __contains__(self, changeid):
1284 """True if the given changeid exists
1278 """True if the given changeid exists
1285
1279
1286 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1280 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1287 specified.
1281 specified.
1288 """
1282 """
1289 try:
1283 try:
1290 self[changeid]
1284 self[changeid]
1291 return True
1285 return True
1292 except error.RepoLookupError:
1286 except error.RepoLookupError:
1293 return False
1287 return False
1294
1288
1295 def __nonzero__(self):
1289 def __nonzero__(self):
1296 return True
1290 return True
1297
1291
1298 __bool__ = __nonzero__
1292 __bool__ = __nonzero__
1299
1293
1300 def __len__(self):
1294 def __len__(self):
1301 # no need to pay the cost of repoview.changelog
1295 # no need to pay the cost of repoview.changelog
1302 unfi = self.unfiltered()
1296 unfi = self.unfiltered()
1303 return len(unfi.changelog)
1297 return len(unfi.changelog)
1304
1298
1305 def __iter__(self):
1299 def __iter__(self):
1306 return iter(self.changelog)
1300 return iter(self.changelog)
1307
1301
1308 def revs(self, expr, *args):
1302 def revs(self, expr, *args):
1309 '''Find revisions matching a revset.
1303 '''Find revisions matching a revset.
1310
1304
1311 The revset is specified as a string ``expr`` that may contain
1305 The revset is specified as a string ``expr`` that may contain
1312 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1306 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1313
1307
1314 Revset aliases from the configuration are not expanded. To expand
1308 Revset aliases from the configuration are not expanded. To expand
1315 user aliases, consider calling ``scmutil.revrange()`` or
1309 user aliases, consider calling ``scmutil.revrange()`` or
1316 ``repo.anyrevs([expr], user=True)``.
1310 ``repo.anyrevs([expr], user=True)``.
1317
1311
1318 Returns a revset.abstractsmartset, which is a list-like interface
1312 Returns a revset.abstractsmartset, which is a list-like interface
1319 that contains integer revisions.
1313 that contains integer revisions.
1320 '''
1314 '''
1321 expr = revsetlang.formatspec(expr, *args)
1315 expr = revsetlang.formatspec(expr, *args)
1322 m = revset.match(None, expr)
1316 m = revset.match(None, expr)
1323 return m(self)
1317 return m(self)
1324
1318
1325 def set(self, expr, *args):
1319 def set(self, expr, *args):
1326 '''Find revisions matching a revset and emit changectx instances.
1320 '''Find revisions matching a revset and emit changectx instances.
1327
1321
1328 This is a convenience wrapper around ``revs()`` that iterates the
1322 This is a convenience wrapper around ``revs()`` that iterates the
1329 result and is a generator of changectx instances.
1323 result and is a generator of changectx instances.
1330
1324
1331 Revset aliases from the configuration are not expanded. To expand
1325 Revset aliases from the configuration are not expanded. To expand
1332 user aliases, consider calling ``scmutil.revrange()``.
1326 user aliases, consider calling ``scmutil.revrange()``.
1333 '''
1327 '''
1334 for r in self.revs(expr, *args):
1328 for r in self.revs(expr, *args):
1335 yield self[r]
1329 yield self[r]
1336
1330
1337 def anyrevs(self, specs, user=False, localalias=None):
1331 def anyrevs(self, specs, user=False, localalias=None):
1338 '''Find revisions matching one of the given revsets.
1332 '''Find revisions matching one of the given revsets.
1339
1333
1340 Revset aliases from the configuration are not expanded by default. To
1334 Revset aliases from the configuration are not expanded by default. To
1341 expand user aliases, specify ``user=True``. To provide some local
1335 expand user aliases, specify ``user=True``. To provide some local
1342 definitions overriding user aliases, set ``localalias`` to
1336 definitions overriding user aliases, set ``localalias`` to
1343 ``{name: definitionstring}``.
1337 ``{name: definitionstring}``.
1344 '''
1338 '''
1345 if user:
1339 if user:
1346 m = revset.matchany(self.ui, specs,
1340 m = revset.matchany(self.ui, specs,
1347 lookup=revset.lookupfn(self),
1341 lookup=revset.lookupfn(self),
1348 localalias=localalias)
1342 localalias=localalias)
1349 else:
1343 else:
1350 m = revset.matchany(None, specs, localalias=localalias)
1344 m = revset.matchany(None, specs, localalias=localalias)
1351 return m(self)
1345 return m(self)
1352
1346
1353 def url(self):
1347 def url(self):
1354 return 'file:' + self.root
1348 return 'file:' + self.root
1355
1349
1356 def hook(self, name, throw=False, **args):
1350 def hook(self, name, throw=False, **args):
1357 """Call a hook, passing this repo instance.
1351 """Call a hook, passing this repo instance.
1358
1352
1359 This a convenience method to aid invoking hooks. Extensions likely
1353 This a convenience method to aid invoking hooks. Extensions likely
1360 won't call this unless they have registered a custom hook or are
1354 won't call this unless they have registered a custom hook or are
1361 replacing code that is expected to call a hook.
1355 replacing code that is expected to call a hook.
1362 """
1356 """
1363 return hook.hook(self.ui, self, name, throw, **args)
1357 return hook.hook(self.ui, self, name, throw, **args)
1364
1358
1365 @filteredpropertycache
1359 @filteredpropertycache
1366 def _tagscache(self):
1360 def _tagscache(self):
1367 '''Returns a tagscache object that contains various tags related
1361 '''Returns a tagscache object that contains various tags related
1368 caches.'''
1362 caches.'''
1369
1363
1370 # This simplifies its cache management by having one decorated
1364 # This simplifies its cache management by having one decorated
1371 # function (this one) and the rest simply fetch things from it.
1365 # function (this one) and the rest simply fetch things from it.
1372 class tagscache(object):
1366 class tagscache(object):
1373 def __init__(self):
1367 def __init__(self):
1374 # These two define the set of tags for this repository. tags
1368 # These two define the set of tags for this repository. tags
1375 # maps tag name to node; tagtypes maps tag name to 'global' or
1369 # maps tag name to node; tagtypes maps tag name to 'global' or
1376 # 'local'. (Global tags are defined by .hgtags across all
1370 # 'local'. (Global tags are defined by .hgtags across all
1377 # heads, and local tags are defined in .hg/localtags.)
1371 # heads, and local tags are defined in .hg/localtags.)
1378 # They constitute the in-memory cache of tags.
1372 # They constitute the in-memory cache of tags.
1379 self.tags = self.tagtypes = None
1373 self.tags = self.tagtypes = None
1380
1374
1381 self.nodetagscache = self.tagslist = None
1375 self.nodetagscache = self.tagslist = None
1382
1376
1383 cache = tagscache()
1377 cache = tagscache()
1384 cache.tags, cache.tagtypes = self._findtags()
1378 cache.tags, cache.tagtypes = self._findtags()
1385
1379
1386 return cache
1380 return cache
1387
1381
1388 def tags(self):
1382 def tags(self):
1389 '''return a mapping of tag to node'''
1383 '''return a mapping of tag to node'''
1390 t = {}
1384 t = {}
1391 if self.changelog.filteredrevs:
1385 if self.changelog.filteredrevs:
1392 tags, tt = self._findtags()
1386 tags, tt = self._findtags()
1393 else:
1387 else:
1394 tags = self._tagscache.tags
1388 tags = self._tagscache.tags
1395 for k, v in tags.iteritems():
1389 for k, v in tags.iteritems():
1396 try:
1390 try:
1397 # ignore tags to unknown nodes
1391 # ignore tags to unknown nodes
1398 self.changelog.rev(v)
1392 self.changelog.rev(v)
1399 t[k] = v
1393 t[k] = v
1400 except (error.LookupError, ValueError):
1394 except (error.LookupError, ValueError):
1401 pass
1395 pass
1402 return t
1396 return t
1403
1397
1404 def _findtags(self):
1398 def _findtags(self):
1405 '''Do the hard work of finding tags. Return a pair of dicts
1399 '''Do the hard work of finding tags. Return a pair of dicts
1406 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1400 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1407 maps tag name to a string like \'global\' or \'local\'.
1401 maps tag name to a string like \'global\' or \'local\'.
1408 Subclasses or extensions are free to add their own tags, but
1402 Subclasses or extensions are free to add their own tags, but
1409 should be aware that the returned dicts will be retained for the
1403 should be aware that the returned dicts will be retained for the
1410 duration of the localrepo object.'''
1404 duration of the localrepo object.'''
1411
1405
1412 # XXX what tagtype should subclasses/extensions use? Currently
1406 # XXX what tagtype should subclasses/extensions use? Currently
1413 # mq and bookmarks add tags, but do not set the tagtype at all.
1407 # mq and bookmarks add tags, but do not set the tagtype at all.
1414 # Should each extension invent its own tag type? Should there
1408 # Should each extension invent its own tag type? Should there
1415 # be one tagtype for all such "virtual" tags? Or is the status
1409 # be one tagtype for all such "virtual" tags? Or is the status
1416 # quo fine?
1410 # quo fine?
1417
1411
1418
1412
1419 # map tag name to (node, hist)
1413 # map tag name to (node, hist)
1420 alltags = tagsmod.findglobaltags(self.ui, self)
1414 alltags = tagsmod.findglobaltags(self.ui, self)
1421 # map tag name to tag type
1415 # map tag name to tag type
1422 tagtypes = dict((tag, 'global') for tag in alltags)
1416 tagtypes = dict((tag, 'global') for tag in alltags)
1423
1417
1424 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1418 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1425
1419
1426 # Build the return dicts. Have to re-encode tag names because
1420 # Build the return dicts. Have to re-encode tag names because
1427 # the tags module always uses UTF-8 (in order not to lose info
1421 # the tags module always uses UTF-8 (in order not to lose info
1428 # writing to the cache), but the rest of Mercurial wants them in
1422 # writing to the cache), but the rest of Mercurial wants them in
1429 # local encoding.
1423 # local encoding.
1430 tags = {}
1424 tags = {}
1431 for (name, (node, hist)) in alltags.iteritems():
1425 for (name, (node, hist)) in alltags.iteritems():
1432 if node != nullid:
1426 if node != nullid:
1433 tags[encoding.tolocal(name)] = node
1427 tags[encoding.tolocal(name)] = node
1434 tags['tip'] = self.changelog.tip()
1428 tags['tip'] = self.changelog.tip()
1435 tagtypes = dict([(encoding.tolocal(name), value)
1429 tagtypes = dict([(encoding.tolocal(name), value)
1436 for (name, value) in tagtypes.iteritems()])
1430 for (name, value) in tagtypes.iteritems()])
1437 return (tags, tagtypes)
1431 return (tags, tagtypes)
1438
1432
1439 def tagtype(self, tagname):
1433 def tagtype(self, tagname):
1440 '''
1434 '''
1441 return the type of the given tag. result can be:
1435 return the type of the given tag. result can be:
1442
1436
1443 'local' : a local tag
1437 'local' : a local tag
1444 'global' : a global tag
1438 'global' : a global tag
1445 None : tag does not exist
1439 None : tag does not exist
1446 '''
1440 '''
1447
1441
1448 return self._tagscache.tagtypes.get(tagname)
1442 return self._tagscache.tagtypes.get(tagname)
1449
1443
1450 def tagslist(self):
1444 def tagslist(self):
1451 '''return a list of tags ordered by revision'''
1445 '''return a list of tags ordered by revision'''
1452 if not self._tagscache.tagslist:
1446 if not self._tagscache.tagslist:
1453 l = []
1447 l = []
1454 for t, n in self.tags().iteritems():
1448 for t, n in self.tags().iteritems():
1455 l.append((self.changelog.rev(n), t, n))
1449 l.append((self.changelog.rev(n), t, n))
1456 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1450 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1457
1451
1458 return self._tagscache.tagslist
1452 return self._tagscache.tagslist
1459
1453
1460 def nodetags(self, node):
1454 def nodetags(self, node):
1461 '''return the tags associated with a node'''
1455 '''return the tags associated with a node'''
1462 if not self._tagscache.nodetagscache:
1456 if not self._tagscache.nodetagscache:
1463 nodetagscache = {}
1457 nodetagscache = {}
1464 for t, n in self._tagscache.tags.iteritems():
1458 for t, n in self._tagscache.tags.iteritems():
1465 nodetagscache.setdefault(n, []).append(t)
1459 nodetagscache.setdefault(n, []).append(t)
1466 for tags in nodetagscache.itervalues():
1460 for tags in nodetagscache.itervalues():
1467 tags.sort()
1461 tags.sort()
1468 self._tagscache.nodetagscache = nodetagscache
1462 self._tagscache.nodetagscache = nodetagscache
1469 return self._tagscache.nodetagscache.get(node, [])
1463 return self._tagscache.nodetagscache.get(node, [])
1470
1464
1471 def nodebookmarks(self, node):
1465 def nodebookmarks(self, node):
1472 """return the list of bookmarks pointing to the specified node"""
1466 """return the list of bookmarks pointing to the specified node"""
1473 return self._bookmarks.names(node)
1467 return self._bookmarks.names(node)
1474
1468
1475 def branchmap(self):
1469 def branchmap(self):
1476 '''returns a dictionary {branch: [branchheads]} with branchheads
1470 '''returns a dictionary {branch: [branchheads]} with branchheads
1477 ordered by increasing revision number'''
1471 ordered by increasing revision number'''
1478 branchmap.updatecache(self)
1472 branchmap.updatecache(self)
1479 return self._branchcaches[self.filtername]
1473 return self._branchcaches[self.filtername]
1480
1474
1481 @unfilteredmethod
1475 @unfilteredmethod
1482 def revbranchcache(self):
1476 def revbranchcache(self):
1483 if not self._revbranchcache:
1477 if not self._revbranchcache:
1484 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1478 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1485 return self._revbranchcache
1479 return self._revbranchcache
1486
1480
1487 def branchtip(self, branch, ignoremissing=False):
1481 def branchtip(self, branch, ignoremissing=False):
1488 '''return the tip node for a given branch
1482 '''return the tip node for a given branch
1489
1483
1490 If ignoremissing is True, then this method will not raise an error.
1484 If ignoremissing is True, then this method will not raise an error.
1491 This is helpful for callers that only expect None for a missing branch
1485 This is helpful for callers that only expect None for a missing branch
1492 (e.g. namespace).
1486 (e.g. namespace).
1493
1487
1494 '''
1488 '''
1495 try:
1489 try:
1496 return self.branchmap().branchtip(branch)
1490 return self.branchmap().branchtip(branch)
1497 except KeyError:
1491 except KeyError:
1498 if not ignoremissing:
1492 if not ignoremissing:
1499 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1493 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1500 else:
1494 else:
1501 pass
1495 pass
1502
1496
1503 def lookup(self, key):
1497 def lookup(self, key):
1504 return scmutil.revsymbol(self, key).node()
1498 return scmutil.revsymbol(self, key).node()
1505
1499
1506 def lookupbranch(self, key):
1500 def lookupbranch(self, key):
1507 if key in self.branchmap():
1501 if key in self.branchmap():
1508 return key
1502 return key
1509
1503
1510 return scmutil.revsymbol(self, key).branch()
1504 return scmutil.revsymbol(self, key).branch()
1511
1505
1512 def known(self, nodes):
1506 def known(self, nodes):
1513 cl = self.changelog
1507 cl = self.changelog
1514 nm = cl.nodemap
1508 nm = cl.nodemap
1515 filtered = cl.filteredrevs
1509 filtered = cl.filteredrevs
1516 result = []
1510 result = []
1517 for n in nodes:
1511 for n in nodes:
1518 r = nm.get(n)
1512 r = nm.get(n)
1519 resp = not (r is None or r in filtered)
1513 resp = not (r is None or r in filtered)
1520 result.append(resp)
1514 result.append(resp)
1521 return result
1515 return result
1522
1516
1523 def local(self):
1517 def local(self):
1524 return self
1518 return self
1525
1519
1526 def publishing(self):
1520 def publishing(self):
1527 # it's safe (and desirable) to trust the publish flag unconditionally
1521 # it's safe (and desirable) to trust the publish flag unconditionally
1528 # so that we don't finalize changes shared between users via ssh or nfs
1522 # so that we don't finalize changes shared between users via ssh or nfs
1529 return self.ui.configbool('phases', 'publish', untrusted=True)
1523 return self.ui.configbool('phases', 'publish', untrusted=True)
1530
1524
1531 def cancopy(self):
1525 def cancopy(self):
1532 # so statichttprepo's override of local() works
1526 # so statichttprepo's override of local() works
1533 if not self.local():
1527 if not self.local():
1534 return False
1528 return False
1535 if not self.publishing():
1529 if not self.publishing():
1536 return True
1530 return True
1537 # if publishing we can't copy if there is filtered content
1531 # if publishing we can't copy if there is filtered content
1538 return not self.filtered('visible').changelog.filteredrevs
1532 return not self.filtered('visible').changelog.filteredrevs
1539
1533
1540 def shared(self):
1534 def shared(self):
1541 '''the type of shared repository (None if not shared)'''
1535 '''the type of shared repository (None if not shared)'''
1542 if self.sharedpath != self.path:
1536 if self.sharedpath != self.path:
1543 return 'store'
1537 return 'store'
1544 return None
1538 return None
1545
1539
1546 def wjoin(self, f, *insidef):
1540 def wjoin(self, f, *insidef):
1547 return self.vfs.reljoin(self.root, f, *insidef)
1541 return self.vfs.reljoin(self.root, f, *insidef)
1548
1542
1549 def setparents(self, p1, p2=nullid):
1543 def setparents(self, p1, p2=nullid):
1550 with self.dirstate.parentchange():
1544 with self.dirstate.parentchange():
1551 copies = self.dirstate.setparents(p1, p2)
1545 copies = self.dirstate.setparents(p1, p2)
1552 pctx = self[p1]
1546 pctx = self[p1]
1553 if copies:
1547 if copies:
1554 # Adjust copy records, the dirstate cannot do it, it
1548 # Adjust copy records, the dirstate cannot do it, it
1555 # requires access to parents manifests. Preserve them
1549 # requires access to parents manifests. Preserve them
1556 # only for entries added to first parent.
1550 # only for entries added to first parent.
1557 for f in copies:
1551 for f in copies:
1558 if f not in pctx and copies[f] in pctx:
1552 if f not in pctx and copies[f] in pctx:
1559 self.dirstate.copy(copies[f], f)
1553 self.dirstate.copy(copies[f], f)
1560 if p2 == nullid:
1554 if p2 == nullid:
1561 for f, s in sorted(self.dirstate.copies().items()):
1555 for f, s in sorted(self.dirstate.copies().items()):
1562 if f not in pctx and s not in pctx:
1556 if f not in pctx and s not in pctx:
1563 self.dirstate.copy(None, f)
1557 self.dirstate.copy(None, f)
1564
1558
1565 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1559 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1566 """changeid can be a changeset revision, node, or tag.
1560 """changeid can be a changeset revision, node, or tag.
1567 fileid can be a file revision or node."""
1561 fileid can be a file revision or node."""
1568 return context.filectx(self, path, changeid, fileid,
1562 return context.filectx(self, path, changeid, fileid,
1569 changectx=changectx)
1563 changectx=changectx)
1570
1564
1571 def getcwd(self):
1565 def getcwd(self):
1572 return self.dirstate.getcwd()
1566 return self.dirstate.getcwd()
1573
1567
1574 def pathto(self, f, cwd=None):
1568 def pathto(self, f, cwd=None):
1575 return self.dirstate.pathto(f, cwd)
1569 return self.dirstate.pathto(f, cwd)
1576
1570
1577 def _loadfilter(self, filter):
1571 def _loadfilter(self, filter):
1578 if filter not in self._filterpats:
1572 if filter not in self._filterpats:
1579 l = []
1573 l = []
1580 for pat, cmd in self.ui.configitems(filter):
1574 for pat, cmd in self.ui.configitems(filter):
1581 if cmd == '!':
1575 if cmd == '!':
1582 continue
1576 continue
1583 mf = matchmod.match(self.root, '', [pat])
1577 mf = matchmod.match(self.root, '', [pat])
1584 fn = None
1578 fn = None
1585 params = cmd
1579 params = cmd
1586 for name, filterfn in self._datafilters.iteritems():
1580 for name, filterfn in self._datafilters.iteritems():
1587 if cmd.startswith(name):
1581 if cmd.startswith(name):
1588 fn = filterfn
1582 fn = filterfn
1589 params = cmd[len(name):].lstrip()
1583 params = cmd[len(name):].lstrip()
1590 break
1584 break
1591 if not fn:
1585 if not fn:
1592 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1586 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1593 # Wrap old filters not supporting keyword arguments
1587 # Wrap old filters not supporting keyword arguments
1594 if not pycompat.getargspec(fn)[2]:
1588 if not pycompat.getargspec(fn)[2]:
1595 oldfn = fn
1589 oldfn = fn
1596 fn = lambda s, c, **kwargs: oldfn(s, c)
1590 fn = lambda s, c, **kwargs: oldfn(s, c)
1597 l.append((mf, fn, params))
1591 l.append((mf, fn, params))
1598 self._filterpats[filter] = l
1592 self._filterpats[filter] = l
1599 return self._filterpats[filter]
1593 return self._filterpats[filter]
1600
1594
1601 def _filter(self, filterpats, filename, data):
1595 def _filter(self, filterpats, filename, data):
1602 for mf, fn, cmd in filterpats:
1596 for mf, fn, cmd in filterpats:
1603 if mf(filename):
1597 if mf(filename):
1604 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1598 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1605 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1599 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1606 break
1600 break
1607
1601
1608 return data
1602 return data
1609
1603
1610 @unfilteredpropertycache
1604 @unfilteredpropertycache
1611 def _encodefilterpats(self):
1605 def _encodefilterpats(self):
1612 return self._loadfilter('encode')
1606 return self._loadfilter('encode')
1613
1607
1614 @unfilteredpropertycache
1608 @unfilteredpropertycache
1615 def _decodefilterpats(self):
1609 def _decodefilterpats(self):
1616 return self._loadfilter('decode')
1610 return self._loadfilter('decode')
1617
1611
1618 def adddatafilter(self, name, filter):
1612 def adddatafilter(self, name, filter):
1619 self._datafilters[name] = filter
1613 self._datafilters[name] = filter
1620
1614
1621 def wread(self, filename):
1615 def wread(self, filename):
1622 if self.wvfs.islink(filename):
1616 if self.wvfs.islink(filename):
1623 data = self.wvfs.readlink(filename)
1617 data = self.wvfs.readlink(filename)
1624 else:
1618 else:
1625 data = self.wvfs.read(filename)
1619 data = self.wvfs.read(filename)
1626 return self._filter(self._encodefilterpats, filename, data)
1620 return self._filter(self._encodefilterpats, filename, data)
1627
1621
1628 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1622 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1629 """write ``data`` into ``filename`` in the working directory
1623 """write ``data`` into ``filename`` in the working directory
1630
1624
1631 This returns length of written (maybe decoded) data.
1625 This returns length of written (maybe decoded) data.
1632 """
1626 """
1633 data = self._filter(self._decodefilterpats, filename, data)
1627 data = self._filter(self._decodefilterpats, filename, data)
1634 if 'l' in flags:
1628 if 'l' in flags:
1635 self.wvfs.symlink(data, filename)
1629 self.wvfs.symlink(data, filename)
1636 else:
1630 else:
1637 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1631 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1638 **kwargs)
1632 **kwargs)
1639 if 'x' in flags:
1633 if 'x' in flags:
1640 self.wvfs.setflags(filename, False, True)
1634 self.wvfs.setflags(filename, False, True)
1641 else:
1635 else:
1642 self.wvfs.setflags(filename, False, False)
1636 self.wvfs.setflags(filename, False, False)
1643 return len(data)
1637 return len(data)
1644
1638
1645 def wwritedata(self, filename, data):
1639 def wwritedata(self, filename, data):
1646 return self._filter(self._decodefilterpats, filename, data)
1640 return self._filter(self._decodefilterpats, filename, data)
1647
1641
1648 def currenttransaction(self):
1642 def currenttransaction(self):
1649 """return the current transaction or None if non exists"""
1643 """return the current transaction or None if non exists"""
1650 if self._transref:
1644 if self._transref:
1651 tr = self._transref()
1645 tr = self._transref()
1652 else:
1646 else:
1653 tr = None
1647 tr = None
1654
1648
1655 if tr and tr.running():
1649 if tr and tr.running():
1656 return tr
1650 return tr
1657 return None
1651 return None
1658
1652
1659 def transaction(self, desc, report=None):
1653 def transaction(self, desc, report=None):
1660 if (self.ui.configbool('devel', 'all-warnings')
1654 if (self.ui.configbool('devel', 'all-warnings')
1661 or self.ui.configbool('devel', 'check-locks')):
1655 or self.ui.configbool('devel', 'check-locks')):
1662 if self._currentlock(self._lockref) is None:
1656 if self._currentlock(self._lockref) is None:
1663 raise error.ProgrammingError('transaction requires locking')
1657 raise error.ProgrammingError('transaction requires locking')
1664 tr = self.currenttransaction()
1658 tr = self.currenttransaction()
1665 if tr is not None:
1659 if tr is not None:
1666 return tr.nest(name=desc)
1660 return tr.nest(name=desc)
1667
1661
1668 # abort here if the journal already exists
1662 # abort here if the journal already exists
1669 if self.svfs.exists("journal"):
1663 if self.svfs.exists("journal"):
1670 raise error.RepoError(
1664 raise error.RepoError(
1671 _("abandoned transaction found"),
1665 _("abandoned transaction found"),
1672 hint=_("run 'hg recover' to clean up transaction"))
1666 hint=_("run 'hg recover' to clean up transaction"))
1673
1667
1674 idbase = "%.40f#%f" % (random.random(), time.time())
1668 idbase = "%.40f#%f" % (random.random(), time.time())
1675 ha = hex(hashlib.sha1(idbase).digest())
1669 ha = hex(hashlib.sha1(idbase).digest())
1676 txnid = 'TXN:' + ha
1670 txnid = 'TXN:' + ha
1677 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1671 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1678
1672
1679 self._writejournal(desc)
1673 self._writejournal(desc)
1680 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1674 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1681 if report:
1675 if report:
1682 rp = report
1676 rp = report
1683 else:
1677 else:
1684 rp = self.ui.warn
1678 rp = self.ui.warn
1685 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1679 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1686 # we must avoid cyclic reference between repo and transaction.
1680 # we must avoid cyclic reference between repo and transaction.
1687 reporef = weakref.ref(self)
1681 reporef = weakref.ref(self)
1688 # Code to track tag movement
1682 # Code to track tag movement
1689 #
1683 #
1690 # Since tags are all handled as file content, it is actually quite hard
1684 # Since tags are all handled as file content, it is actually quite hard
1691 # to track these movement from a code perspective. So we fallback to a
1685 # to track these movement from a code perspective. So we fallback to a
1692 # tracking at the repository level. One could envision to track changes
1686 # tracking at the repository level. One could envision to track changes
1693 # to the '.hgtags' file through changegroup apply but that fails to
1687 # to the '.hgtags' file through changegroup apply but that fails to
1694 # cope with case where transaction expose new heads without changegroup
1688 # cope with case where transaction expose new heads without changegroup
1695 # being involved (eg: phase movement).
1689 # being involved (eg: phase movement).
1696 #
1690 #
1697 # For now, We gate the feature behind a flag since this likely comes
1691 # For now, We gate the feature behind a flag since this likely comes
1698 # with performance impacts. The current code run more often than needed
1692 # with performance impacts. The current code run more often than needed
1699 # and do not use caches as much as it could. The current focus is on
1693 # and do not use caches as much as it could. The current focus is on
1700 # the behavior of the feature so we disable it by default. The flag
1694 # the behavior of the feature so we disable it by default. The flag
1701 # will be removed when we are happy with the performance impact.
1695 # will be removed when we are happy with the performance impact.
1702 #
1696 #
1703 # Once this feature is no longer experimental move the following
1697 # Once this feature is no longer experimental move the following
1704 # documentation to the appropriate help section:
1698 # documentation to the appropriate help section:
1705 #
1699 #
1706 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1700 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1707 # tags (new or changed or deleted tags). In addition the details of
1701 # tags (new or changed or deleted tags). In addition the details of
1708 # these changes are made available in a file at:
1702 # these changes are made available in a file at:
1709 # ``REPOROOT/.hg/changes/tags.changes``.
1703 # ``REPOROOT/.hg/changes/tags.changes``.
1710 # Make sure you check for HG_TAG_MOVED before reading that file as it
1704 # Make sure you check for HG_TAG_MOVED before reading that file as it
1711 # might exist from a previous transaction even if no tag were touched
1705 # might exist from a previous transaction even if no tag were touched
1712 # in this one. Changes are recorded in a line base format::
1706 # in this one. Changes are recorded in a line base format::
1713 #
1707 #
1714 # <action> <hex-node> <tag-name>\n
1708 # <action> <hex-node> <tag-name>\n
1715 #
1709 #
1716 # Actions are defined as follow:
1710 # Actions are defined as follow:
1717 # "-R": tag is removed,
1711 # "-R": tag is removed,
1718 # "+A": tag is added,
1712 # "+A": tag is added,
1719 # "-M": tag is moved (old value),
1713 # "-M": tag is moved (old value),
1720 # "+M": tag is moved (new value),
1714 # "+M": tag is moved (new value),
1721 tracktags = lambda x: None
1715 tracktags = lambda x: None
1722 # experimental config: experimental.hook-track-tags
1716 # experimental config: experimental.hook-track-tags
1723 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1717 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1724 if desc != 'strip' and shouldtracktags:
1718 if desc != 'strip' and shouldtracktags:
1725 oldheads = self.changelog.headrevs()
1719 oldheads = self.changelog.headrevs()
1726 def tracktags(tr2):
1720 def tracktags(tr2):
1727 repo = reporef()
1721 repo = reporef()
1728 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1722 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1729 newheads = repo.changelog.headrevs()
1723 newheads = repo.changelog.headrevs()
1730 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1724 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1731 # notes: we compare lists here.
1725 # notes: we compare lists here.
1732 # As we do it only once buiding set would not be cheaper
1726 # As we do it only once buiding set would not be cheaper
1733 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1727 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1734 if changes:
1728 if changes:
1735 tr2.hookargs['tag_moved'] = '1'
1729 tr2.hookargs['tag_moved'] = '1'
1736 with repo.vfs('changes/tags.changes', 'w',
1730 with repo.vfs('changes/tags.changes', 'w',
1737 atomictemp=True) as changesfile:
1731 atomictemp=True) as changesfile:
1738 # note: we do not register the file to the transaction
1732 # note: we do not register the file to the transaction
1739 # because we needs it to still exist on the transaction
1733 # because we needs it to still exist on the transaction
1740 # is close (for txnclose hooks)
1734 # is close (for txnclose hooks)
1741 tagsmod.writediff(changesfile, changes)
1735 tagsmod.writediff(changesfile, changes)
1742 def validate(tr2):
1736 def validate(tr2):
1743 """will run pre-closing hooks"""
1737 """will run pre-closing hooks"""
1744 # XXX the transaction API is a bit lacking here so we take a hacky
1738 # XXX the transaction API is a bit lacking here so we take a hacky
1745 # path for now
1739 # path for now
1746 #
1740 #
1747 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1741 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1748 # dict is copied before these run. In addition we needs the data
1742 # dict is copied before these run. In addition we needs the data
1749 # available to in memory hooks too.
1743 # available to in memory hooks too.
1750 #
1744 #
1751 # Moreover, we also need to make sure this runs before txnclose
1745 # Moreover, we also need to make sure this runs before txnclose
1752 # hooks and there is no "pending" mechanism that would execute
1746 # hooks and there is no "pending" mechanism that would execute
1753 # logic only if hooks are about to run.
1747 # logic only if hooks are about to run.
1754 #
1748 #
1755 # Fixing this limitation of the transaction is also needed to track
1749 # Fixing this limitation of the transaction is also needed to track
1756 # other families of changes (bookmarks, phases, obsolescence).
1750 # other families of changes (bookmarks, phases, obsolescence).
1757 #
1751 #
1758 # This will have to be fixed before we remove the experimental
1752 # This will have to be fixed before we remove the experimental
1759 # gating.
1753 # gating.
1760 tracktags(tr2)
1754 tracktags(tr2)
1761 repo = reporef()
1755 repo = reporef()
1762 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1756 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1763 scmutil.enforcesinglehead(repo, tr2, desc)
1757 scmutil.enforcesinglehead(repo, tr2, desc)
1764 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1758 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1765 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1759 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1766 args = tr.hookargs.copy()
1760 args = tr.hookargs.copy()
1767 args.update(bookmarks.preparehookargs(name, old, new))
1761 args.update(bookmarks.preparehookargs(name, old, new))
1768 repo.hook('pretxnclose-bookmark', throw=True,
1762 repo.hook('pretxnclose-bookmark', throw=True,
1769 txnname=desc,
1763 txnname=desc,
1770 **pycompat.strkwargs(args))
1764 **pycompat.strkwargs(args))
1771 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1765 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1772 cl = repo.unfiltered().changelog
1766 cl = repo.unfiltered().changelog
1773 for rev, (old, new) in tr.changes['phases'].items():
1767 for rev, (old, new) in tr.changes['phases'].items():
1774 args = tr.hookargs.copy()
1768 args = tr.hookargs.copy()
1775 node = hex(cl.node(rev))
1769 node = hex(cl.node(rev))
1776 args.update(phases.preparehookargs(node, old, new))
1770 args.update(phases.preparehookargs(node, old, new))
1777 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1771 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1778 **pycompat.strkwargs(args))
1772 **pycompat.strkwargs(args))
1779
1773
1780 repo.hook('pretxnclose', throw=True,
1774 repo.hook('pretxnclose', throw=True,
1781 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1775 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1782 def releasefn(tr, success):
1776 def releasefn(tr, success):
1783 repo = reporef()
1777 repo = reporef()
1784 if success:
1778 if success:
1785 # this should be explicitly invoked here, because
1779 # this should be explicitly invoked here, because
1786 # in-memory changes aren't written out at closing
1780 # in-memory changes aren't written out at closing
1787 # transaction, if tr.addfilegenerator (via
1781 # transaction, if tr.addfilegenerator (via
1788 # dirstate.write or so) isn't invoked while
1782 # dirstate.write or so) isn't invoked while
1789 # transaction running
1783 # transaction running
1790 repo.dirstate.write(None)
1784 repo.dirstate.write(None)
1791 else:
1785 else:
1792 # discard all changes (including ones already written
1786 # discard all changes (including ones already written
1793 # out) in this transaction
1787 # out) in this transaction
1794 narrowspec.restorebackup(self, 'journal.narrowspec')
1788 narrowspec.restorebackup(self, 'journal.narrowspec')
1795 repo.dirstate.restorebackup(None, 'journal.dirstate')
1789 repo.dirstate.restorebackup(None, 'journal.dirstate')
1796
1790
1797 repo.invalidate(clearfilecache=True)
1791 repo.invalidate(clearfilecache=True)
1798
1792
1799 tr = transaction.transaction(rp, self.svfs, vfsmap,
1793 tr = transaction.transaction(rp, self.svfs, vfsmap,
1800 "journal",
1794 "journal",
1801 "undo",
1795 "undo",
1802 aftertrans(renames),
1796 aftertrans(renames),
1803 self.store.createmode,
1797 self.store.createmode,
1804 validator=validate,
1798 validator=validate,
1805 releasefn=releasefn,
1799 releasefn=releasefn,
1806 checkambigfiles=_cachedfiles,
1800 checkambigfiles=_cachedfiles,
1807 name=desc)
1801 name=desc)
1808 tr.changes['origrepolen'] = len(self)
1802 tr.changes['origrepolen'] = len(self)
1809 tr.changes['obsmarkers'] = set()
1803 tr.changes['obsmarkers'] = set()
1810 tr.changes['phases'] = {}
1804 tr.changes['phases'] = {}
1811 tr.changes['bookmarks'] = {}
1805 tr.changes['bookmarks'] = {}
1812
1806
1813 tr.hookargs['txnid'] = txnid
1807 tr.hookargs['txnid'] = txnid
1814 # note: writing the fncache only during finalize mean that the file is
1808 # note: writing the fncache only during finalize mean that the file is
1815 # outdated when running hooks. As fncache is used for streaming clone,
1809 # outdated when running hooks. As fncache is used for streaming clone,
1816 # this is not expected to break anything that happen during the hooks.
1810 # this is not expected to break anything that happen during the hooks.
1817 tr.addfinalize('flush-fncache', self.store.write)
1811 tr.addfinalize('flush-fncache', self.store.write)
1818 def txnclosehook(tr2):
1812 def txnclosehook(tr2):
1819 """To be run if transaction is successful, will schedule a hook run
1813 """To be run if transaction is successful, will schedule a hook run
1820 """
1814 """
1821 # Don't reference tr2 in hook() so we don't hold a reference.
1815 # Don't reference tr2 in hook() so we don't hold a reference.
1822 # This reduces memory consumption when there are multiple
1816 # This reduces memory consumption when there are multiple
1823 # transactions per lock. This can likely go away if issue5045
1817 # transactions per lock. This can likely go away if issue5045
1824 # fixes the function accumulation.
1818 # fixes the function accumulation.
1825 hookargs = tr2.hookargs
1819 hookargs = tr2.hookargs
1826
1820
1827 def hookfunc():
1821 def hookfunc():
1828 repo = reporef()
1822 repo = reporef()
1829 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1823 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1830 bmchanges = sorted(tr.changes['bookmarks'].items())
1824 bmchanges = sorted(tr.changes['bookmarks'].items())
1831 for name, (old, new) in bmchanges:
1825 for name, (old, new) in bmchanges:
1832 args = tr.hookargs.copy()
1826 args = tr.hookargs.copy()
1833 args.update(bookmarks.preparehookargs(name, old, new))
1827 args.update(bookmarks.preparehookargs(name, old, new))
1834 repo.hook('txnclose-bookmark', throw=False,
1828 repo.hook('txnclose-bookmark', throw=False,
1835 txnname=desc, **pycompat.strkwargs(args))
1829 txnname=desc, **pycompat.strkwargs(args))
1836
1830
1837 if hook.hashook(repo.ui, 'txnclose-phase'):
1831 if hook.hashook(repo.ui, 'txnclose-phase'):
1838 cl = repo.unfiltered().changelog
1832 cl = repo.unfiltered().changelog
1839 phasemv = sorted(tr.changes['phases'].items())
1833 phasemv = sorted(tr.changes['phases'].items())
1840 for rev, (old, new) in phasemv:
1834 for rev, (old, new) in phasemv:
1841 args = tr.hookargs.copy()
1835 args = tr.hookargs.copy()
1842 node = hex(cl.node(rev))
1836 node = hex(cl.node(rev))
1843 args.update(phases.preparehookargs(node, old, new))
1837 args.update(phases.preparehookargs(node, old, new))
1844 repo.hook('txnclose-phase', throw=False, txnname=desc,
1838 repo.hook('txnclose-phase', throw=False, txnname=desc,
1845 **pycompat.strkwargs(args))
1839 **pycompat.strkwargs(args))
1846
1840
1847 repo.hook('txnclose', throw=False, txnname=desc,
1841 repo.hook('txnclose', throw=False, txnname=desc,
1848 **pycompat.strkwargs(hookargs))
1842 **pycompat.strkwargs(hookargs))
1849 reporef()._afterlock(hookfunc)
1843 reporef()._afterlock(hookfunc)
1850 tr.addfinalize('txnclose-hook', txnclosehook)
1844 tr.addfinalize('txnclose-hook', txnclosehook)
1851 # Include a leading "-" to make it happen before the transaction summary
1845 # Include a leading "-" to make it happen before the transaction summary
1852 # reports registered via scmutil.registersummarycallback() whose names
1846 # reports registered via scmutil.registersummarycallback() whose names
1853 # are 00-txnreport etc. That way, the caches will be warm when the
1847 # are 00-txnreport etc. That way, the caches will be warm when the
1854 # callbacks run.
1848 # callbacks run.
1855 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1849 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1856 def txnaborthook(tr2):
1850 def txnaborthook(tr2):
1857 """To be run if transaction is aborted
1851 """To be run if transaction is aborted
1858 """
1852 """
1859 reporef().hook('txnabort', throw=False, txnname=desc,
1853 reporef().hook('txnabort', throw=False, txnname=desc,
1860 **pycompat.strkwargs(tr2.hookargs))
1854 **pycompat.strkwargs(tr2.hookargs))
1861 tr.addabort('txnabort-hook', txnaborthook)
1855 tr.addabort('txnabort-hook', txnaborthook)
1862 # avoid eager cache invalidation. in-memory data should be identical
1856 # avoid eager cache invalidation. in-memory data should be identical
1863 # to stored data if transaction has no error.
1857 # to stored data if transaction has no error.
1864 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1858 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1865 self._transref = weakref.ref(tr)
1859 self._transref = weakref.ref(tr)
1866 scmutil.registersummarycallback(self, tr, desc)
1860 scmutil.registersummarycallback(self, tr, desc)
1867 return tr
1861 return tr
1868
1862
1869 def _journalfiles(self):
1863 def _journalfiles(self):
1870 return ((self.svfs, 'journal'),
1864 return ((self.svfs, 'journal'),
1871 (self.vfs, 'journal.dirstate'),
1865 (self.vfs, 'journal.dirstate'),
1872 (self.vfs, 'journal.branch'),
1866 (self.vfs, 'journal.branch'),
1873 (self.vfs, 'journal.desc'),
1867 (self.vfs, 'journal.desc'),
1874 (self.vfs, 'journal.bookmarks'),
1868 (self.vfs, 'journal.bookmarks'),
1875 (self.svfs, 'journal.phaseroots'))
1869 (self.svfs, 'journal.phaseroots'))
1876
1870
1877 def undofiles(self):
1871 def undofiles(self):
1878 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1872 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1879
1873
1880 @unfilteredmethod
1874 @unfilteredmethod
1881 def _writejournal(self, desc):
1875 def _writejournal(self, desc):
1882 self.dirstate.savebackup(None, 'journal.dirstate')
1876 self.dirstate.savebackup(None, 'journal.dirstate')
1883 narrowspec.savebackup(self, 'journal.narrowspec')
1877 narrowspec.savebackup(self, 'journal.narrowspec')
1884 self.vfs.write("journal.branch",
1878 self.vfs.write("journal.branch",
1885 encoding.fromlocal(self.dirstate.branch()))
1879 encoding.fromlocal(self.dirstate.branch()))
1886 self.vfs.write("journal.desc",
1880 self.vfs.write("journal.desc",
1887 "%d\n%s\n" % (len(self), desc))
1881 "%d\n%s\n" % (len(self), desc))
1888 self.vfs.write("journal.bookmarks",
1882 self.vfs.write("journal.bookmarks",
1889 self.vfs.tryread("bookmarks"))
1883 self.vfs.tryread("bookmarks"))
1890 self.svfs.write("journal.phaseroots",
1884 self.svfs.write("journal.phaseroots",
1891 self.svfs.tryread("phaseroots"))
1885 self.svfs.tryread("phaseroots"))
1892
1886
1893 def recover(self):
1887 def recover(self):
1894 with self.lock():
1888 with self.lock():
1895 if self.svfs.exists("journal"):
1889 if self.svfs.exists("journal"):
1896 self.ui.status(_("rolling back interrupted transaction\n"))
1890 self.ui.status(_("rolling back interrupted transaction\n"))
1897 vfsmap = {'': self.svfs,
1891 vfsmap = {'': self.svfs,
1898 'plain': self.vfs,}
1892 'plain': self.vfs,}
1899 transaction.rollback(self.svfs, vfsmap, "journal",
1893 transaction.rollback(self.svfs, vfsmap, "journal",
1900 self.ui.warn,
1894 self.ui.warn,
1901 checkambigfiles=_cachedfiles)
1895 checkambigfiles=_cachedfiles)
1902 self.invalidate()
1896 self.invalidate()
1903 return True
1897 return True
1904 else:
1898 else:
1905 self.ui.warn(_("no interrupted transaction available\n"))
1899 self.ui.warn(_("no interrupted transaction available\n"))
1906 return False
1900 return False
1907
1901
1908 def rollback(self, dryrun=False, force=False):
1902 def rollback(self, dryrun=False, force=False):
1909 wlock = lock = dsguard = None
1903 wlock = lock = dsguard = None
1910 try:
1904 try:
1911 wlock = self.wlock()
1905 wlock = self.wlock()
1912 lock = self.lock()
1906 lock = self.lock()
1913 if self.svfs.exists("undo"):
1907 if self.svfs.exists("undo"):
1914 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1908 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1915
1909
1916 return self._rollback(dryrun, force, dsguard)
1910 return self._rollback(dryrun, force, dsguard)
1917 else:
1911 else:
1918 self.ui.warn(_("no rollback information available\n"))
1912 self.ui.warn(_("no rollback information available\n"))
1919 return 1
1913 return 1
1920 finally:
1914 finally:
1921 release(dsguard, lock, wlock)
1915 release(dsguard, lock, wlock)
1922
1916
1923 @unfilteredmethod # Until we get smarter cache management
1917 @unfilteredmethod # Until we get smarter cache management
1924 def _rollback(self, dryrun, force, dsguard):
1918 def _rollback(self, dryrun, force, dsguard):
1925 ui = self.ui
1919 ui = self.ui
1926 try:
1920 try:
1927 args = self.vfs.read('undo.desc').splitlines()
1921 args = self.vfs.read('undo.desc').splitlines()
1928 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1922 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1929 if len(args) >= 3:
1923 if len(args) >= 3:
1930 detail = args[2]
1924 detail = args[2]
1931 oldtip = oldlen - 1
1925 oldtip = oldlen - 1
1932
1926
1933 if detail and ui.verbose:
1927 if detail and ui.verbose:
1934 msg = (_('repository tip rolled back to revision %d'
1928 msg = (_('repository tip rolled back to revision %d'
1935 ' (undo %s: %s)\n')
1929 ' (undo %s: %s)\n')
1936 % (oldtip, desc, detail))
1930 % (oldtip, desc, detail))
1937 else:
1931 else:
1938 msg = (_('repository tip rolled back to revision %d'
1932 msg = (_('repository tip rolled back to revision %d'
1939 ' (undo %s)\n')
1933 ' (undo %s)\n')
1940 % (oldtip, desc))
1934 % (oldtip, desc))
1941 except IOError:
1935 except IOError:
1942 msg = _('rolling back unknown transaction\n')
1936 msg = _('rolling back unknown transaction\n')
1943 desc = None
1937 desc = None
1944
1938
1945 if not force and self['.'] != self['tip'] and desc == 'commit':
1939 if not force and self['.'] != self['tip'] and desc == 'commit':
1946 raise error.Abort(
1940 raise error.Abort(
1947 _('rollback of last commit while not checked out '
1941 _('rollback of last commit while not checked out '
1948 'may lose data'), hint=_('use -f to force'))
1942 'may lose data'), hint=_('use -f to force'))
1949
1943
1950 ui.status(msg)
1944 ui.status(msg)
1951 if dryrun:
1945 if dryrun:
1952 return 0
1946 return 0
1953
1947
1954 parents = self.dirstate.parents()
1948 parents = self.dirstate.parents()
1955 self.destroying()
1949 self.destroying()
1956 vfsmap = {'plain': self.vfs, '': self.svfs}
1950 vfsmap = {'plain': self.vfs, '': self.svfs}
1957 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1951 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1958 checkambigfiles=_cachedfiles)
1952 checkambigfiles=_cachedfiles)
1959 if self.vfs.exists('undo.bookmarks'):
1953 if self.vfs.exists('undo.bookmarks'):
1960 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1954 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1961 if self.svfs.exists('undo.phaseroots'):
1955 if self.svfs.exists('undo.phaseroots'):
1962 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1956 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1963 self.invalidate()
1957 self.invalidate()
1964
1958
1965 parentgone = (parents[0] not in self.changelog.nodemap or
1959 parentgone = (parents[0] not in self.changelog.nodemap or
1966 parents[1] not in self.changelog.nodemap)
1960 parents[1] not in self.changelog.nodemap)
1967 if parentgone:
1961 if parentgone:
1968 # prevent dirstateguard from overwriting already restored one
1962 # prevent dirstateguard from overwriting already restored one
1969 dsguard.close()
1963 dsguard.close()
1970
1964
1971 narrowspec.restorebackup(self, 'undo.narrowspec')
1965 narrowspec.restorebackup(self, 'undo.narrowspec')
1972 self.dirstate.restorebackup(None, 'undo.dirstate')
1966 self.dirstate.restorebackup(None, 'undo.dirstate')
1973 try:
1967 try:
1974 branch = self.vfs.read('undo.branch')
1968 branch = self.vfs.read('undo.branch')
1975 self.dirstate.setbranch(encoding.tolocal(branch))
1969 self.dirstate.setbranch(encoding.tolocal(branch))
1976 except IOError:
1970 except IOError:
1977 ui.warn(_('named branch could not be reset: '
1971 ui.warn(_('named branch could not be reset: '
1978 'current branch is still \'%s\'\n')
1972 'current branch is still \'%s\'\n')
1979 % self.dirstate.branch())
1973 % self.dirstate.branch())
1980
1974
1981 parents = tuple([p.rev() for p in self[None].parents()])
1975 parents = tuple([p.rev() for p in self[None].parents()])
1982 if len(parents) > 1:
1976 if len(parents) > 1:
1983 ui.status(_('working directory now based on '
1977 ui.status(_('working directory now based on '
1984 'revisions %d and %d\n') % parents)
1978 'revisions %d and %d\n') % parents)
1985 else:
1979 else:
1986 ui.status(_('working directory now based on '
1980 ui.status(_('working directory now based on '
1987 'revision %d\n') % parents)
1981 'revision %d\n') % parents)
1988 mergemod.mergestate.clean(self, self['.'].node())
1982 mergemod.mergestate.clean(self, self['.'].node())
1989
1983
1990 # TODO: if we know which new heads may result from this rollback, pass
1984 # TODO: if we know which new heads may result from this rollback, pass
1991 # them to destroy(), which will prevent the branchhead cache from being
1985 # them to destroy(), which will prevent the branchhead cache from being
1992 # invalidated.
1986 # invalidated.
1993 self.destroyed()
1987 self.destroyed()
1994 return 0
1988 return 0
1995
1989
1996 def _buildcacheupdater(self, newtransaction):
1990 def _buildcacheupdater(self, newtransaction):
1997 """called during transaction to build the callback updating cache
1991 """called during transaction to build the callback updating cache
1998
1992
1999 Lives on the repository to help extension who might want to augment
1993 Lives on the repository to help extension who might want to augment
2000 this logic. For this purpose, the created transaction is passed to the
1994 this logic. For this purpose, the created transaction is passed to the
2001 method.
1995 method.
2002 """
1996 """
2003 # we must avoid cyclic reference between repo and transaction.
1997 # we must avoid cyclic reference between repo and transaction.
2004 reporef = weakref.ref(self)
1998 reporef = weakref.ref(self)
2005 def updater(tr):
1999 def updater(tr):
2006 repo = reporef()
2000 repo = reporef()
2007 repo.updatecaches(tr)
2001 repo.updatecaches(tr)
2008 return updater
2002 return updater
2009
2003
2010 @unfilteredmethod
2004 @unfilteredmethod
2011 def updatecaches(self, tr=None, full=False):
2005 def updatecaches(self, tr=None, full=False):
2012 """warm appropriate caches
2006 """warm appropriate caches
2013
2007
2014 If this function is called after a transaction closed. The transaction
2008 If this function is called after a transaction closed. The transaction
2015 will be available in the 'tr' argument. This can be used to selectively
2009 will be available in the 'tr' argument. This can be used to selectively
2016 update caches relevant to the changes in that transaction.
2010 update caches relevant to the changes in that transaction.
2017
2011
2018 If 'full' is set, make sure all caches the function knows about have
2012 If 'full' is set, make sure all caches the function knows about have
2019 up-to-date data. Even the ones usually loaded more lazily.
2013 up-to-date data. Even the ones usually loaded more lazily.
2020 """
2014 """
2021 if tr is not None and tr.hookargs.get('source') == 'strip':
2015 if tr is not None and tr.hookargs.get('source') == 'strip':
2022 # During strip, many caches are invalid but
2016 # During strip, many caches are invalid but
2023 # later call to `destroyed` will refresh them.
2017 # later call to `destroyed` will refresh them.
2024 return
2018 return
2025
2019
2026 if tr is None or tr.changes['origrepolen'] < len(self):
2020 if tr is None or tr.changes['origrepolen'] < len(self):
2027 # updating the unfiltered branchmap should refresh all the others,
2021 # updating the unfiltered branchmap should refresh all the others,
2028 self.ui.debug('updating the branch cache\n')
2022 self.ui.debug('updating the branch cache\n')
2029 branchmap.updatecache(self.filtered('served'))
2023 branchmap.updatecache(self.filtered('served'))
2030
2024
2031 if full:
2025 if full:
2032 rbc = self.revbranchcache()
2026 rbc = self.revbranchcache()
2033 for r in self.changelog:
2027 for r in self.changelog:
2034 rbc.branchinfo(r)
2028 rbc.branchinfo(r)
2035 rbc.write()
2029 rbc.write()
2036
2030
2037 # ensure the working copy parents are in the manifestfulltextcache
2031 # ensure the working copy parents are in the manifestfulltextcache
2038 for ctx in self['.'].parents():
2032 for ctx in self['.'].parents():
2039 ctx.manifest() # accessing the manifest is enough
2033 ctx.manifest() # accessing the manifest is enough
2040
2034
2041 def invalidatecaches(self):
2035 def invalidatecaches(self):
2042
2036
2043 if '_tagscache' in vars(self):
2037 if '_tagscache' in vars(self):
2044 # can't use delattr on proxy
2038 # can't use delattr on proxy
2045 del self.__dict__['_tagscache']
2039 del self.__dict__['_tagscache']
2046
2040
2047 self.unfiltered()._branchcaches.clear()
2041 self.unfiltered()._branchcaches.clear()
2048 self.invalidatevolatilesets()
2042 self.invalidatevolatilesets()
2049 self._sparsesignaturecache.clear()
2043 self._sparsesignaturecache.clear()
2050
2044
2051 def invalidatevolatilesets(self):
2045 def invalidatevolatilesets(self):
2052 self.filteredrevcache.clear()
2046 self.filteredrevcache.clear()
2053 obsolete.clearobscaches(self)
2047 obsolete.clearobscaches(self)
2054
2048
2055 def invalidatedirstate(self):
2049 def invalidatedirstate(self):
2056 '''Invalidates the dirstate, causing the next call to dirstate
2050 '''Invalidates the dirstate, causing the next call to dirstate
2057 to check if it was modified since the last time it was read,
2051 to check if it was modified since the last time it was read,
2058 rereading it if it has.
2052 rereading it if it has.
2059
2053
2060 This is different to dirstate.invalidate() that it doesn't always
2054 This is different to dirstate.invalidate() that it doesn't always
2061 rereads the dirstate. Use dirstate.invalidate() if you want to
2055 rereads the dirstate. Use dirstate.invalidate() if you want to
2062 explicitly read the dirstate again (i.e. restoring it to a previous
2056 explicitly read the dirstate again (i.e. restoring it to a previous
2063 known good state).'''
2057 known good state).'''
2064 if hasunfilteredcache(self, 'dirstate'):
2058 if hasunfilteredcache(self, 'dirstate'):
2065 for k in self.dirstate._filecache:
2059 for k in self.dirstate._filecache:
2066 try:
2060 try:
2067 delattr(self.dirstate, k)
2061 delattr(self.dirstate, k)
2068 except AttributeError:
2062 except AttributeError:
2069 pass
2063 pass
2070 delattr(self.unfiltered(), 'dirstate')
2064 delattr(self.unfiltered(), 'dirstate')
2071
2065
2072 def invalidate(self, clearfilecache=False):
2066 def invalidate(self, clearfilecache=False):
2073 '''Invalidates both store and non-store parts other than dirstate
2067 '''Invalidates both store and non-store parts other than dirstate
2074
2068
2075 If a transaction is running, invalidation of store is omitted,
2069 If a transaction is running, invalidation of store is omitted,
2076 because discarding in-memory changes might cause inconsistency
2070 because discarding in-memory changes might cause inconsistency
2077 (e.g. incomplete fncache causes unintentional failure, but
2071 (e.g. incomplete fncache causes unintentional failure, but
2078 redundant one doesn't).
2072 redundant one doesn't).
2079 '''
2073 '''
2080 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2074 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2081 for k in list(self._filecache.keys()):
2075 for k in list(self._filecache.keys()):
2082 # dirstate is invalidated separately in invalidatedirstate()
2076 # dirstate is invalidated separately in invalidatedirstate()
2083 if k == 'dirstate':
2077 if k == 'dirstate':
2084 continue
2078 continue
2085 if (k == 'changelog' and
2079 if (k == 'changelog' and
2086 self.currenttransaction() and
2080 self.currenttransaction() and
2087 self.changelog._delayed):
2081 self.changelog._delayed):
2088 # The changelog object may store unwritten revisions. We don't
2082 # The changelog object may store unwritten revisions. We don't
2089 # want to lose them.
2083 # want to lose them.
2090 # TODO: Solve the problem instead of working around it.
2084 # TODO: Solve the problem instead of working around it.
2091 continue
2085 continue
2092
2086
2093 if clearfilecache:
2087 if clearfilecache:
2094 del self._filecache[k]
2088 del self._filecache[k]
2095 try:
2089 try:
2096 delattr(unfiltered, k)
2090 delattr(unfiltered, k)
2097 except AttributeError:
2091 except AttributeError:
2098 pass
2092 pass
2099 self.invalidatecaches()
2093 self.invalidatecaches()
2100 if not self.currenttransaction():
2094 if not self.currenttransaction():
2101 # TODO: Changing contents of store outside transaction
2095 # TODO: Changing contents of store outside transaction
2102 # causes inconsistency. We should make in-memory store
2096 # causes inconsistency. We should make in-memory store
2103 # changes detectable, and abort if changed.
2097 # changes detectable, and abort if changed.
2104 self.store.invalidatecaches()
2098 self.store.invalidatecaches()
2105
2099
2106 def invalidateall(self):
2100 def invalidateall(self):
2107 '''Fully invalidates both store and non-store parts, causing the
2101 '''Fully invalidates both store and non-store parts, causing the
2108 subsequent operation to reread any outside changes.'''
2102 subsequent operation to reread any outside changes.'''
2109 # extension should hook this to invalidate its caches
2103 # extension should hook this to invalidate its caches
2110 self.invalidate()
2104 self.invalidate()
2111 self.invalidatedirstate()
2105 self.invalidatedirstate()
2112
2106
2113 @unfilteredmethod
2107 @unfilteredmethod
2114 def _refreshfilecachestats(self, tr):
2108 def _refreshfilecachestats(self, tr):
2115 """Reload stats of cached files so that they are flagged as valid"""
2109 """Reload stats of cached files so that they are flagged as valid"""
2116 for k, ce in self._filecache.items():
2110 for k, ce in self._filecache.items():
2117 k = pycompat.sysstr(k)
2111 k = pycompat.sysstr(k)
2118 if k == r'dirstate' or k not in self.__dict__:
2112 if k == r'dirstate' or k not in self.__dict__:
2119 continue
2113 continue
2120 ce.refresh()
2114 ce.refresh()
2121
2115
2122 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2116 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2123 inheritchecker=None, parentenvvar=None):
2117 inheritchecker=None, parentenvvar=None):
2124 parentlock = None
2118 parentlock = None
2125 # the contents of parentenvvar are used by the underlying lock to
2119 # the contents of parentenvvar are used by the underlying lock to
2126 # determine whether it can be inherited
2120 # determine whether it can be inherited
2127 if parentenvvar is not None:
2121 if parentenvvar is not None:
2128 parentlock = encoding.environ.get(parentenvvar)
2122 parentlock = encoding.environ.get(parentenvvar)
2129
2123
2130 timeout = 0
2124 timeout = 0
2131 warntimeout = 0
2125 warntimeout = 0
2132 if wait:
2126 if wait:
2133 timeout = self.ui.configint("ui", "timeout")
2127 timeout = self.ui.configint("ui", "timeout")
2134 warntimeout = self.ui.configint("ui", "timeout.warn")
2128 warntimeout = self.ui.configint("ui", "timeout.warn")
2135 # internal config: ui.signal-safe-lock
2129 # internal config: ui.signal-safe-lock
2136 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2130 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2137
2131
2138 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2132 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2139 releasefn=releasefn,
2133 releasefn=releasefn,
2140 acquirefn=acquirefn, desc=desc,
2134 acquirefn=acquirefn, desc=desc,
2141 inheritchecker=inheritchecker,
2135 inheritchecker=inheritchecker,
2142 parentlock=parentlock,
2136 parentlock=parentlock,
2143 signalsafe=signalsafe)
2137 signalsafe=signalsafe)
2144 return l
2138 return l
2145
2139
2146 def _afterlock(self, callback):
2140 def _afterlock(self, callback):
2147 """add a callback to be run when the repository is fully unlocked
2141 """add a callback to be run when the repository is fully unlocked
2148
2142
2149 The callback will be executed when the outermost lock is released
2143 The callback will be executed when the outermost lock is released
2150 (with wlock being higher level than 'lock')."""
2144 (with wlock being higher level than 'lock')."""
2151 for ref in (self._wlockref, self._lockref):
2145 for ref in (self._wlockref, self._lockref):
2152 l = ref and ref()
2146 l = ref and ref()
2153 if l and l.held:
2147 if l and l.held:
2154 l.postrelease.append(callback)
2148 l.postrelease.append(callback)
2155 break
2149 break
2156 else: # no lock have been found.
2150 else: # no lock have been found.
2157 callback()
2151 callback()
2158
2152
2159 def lock(self, wait=True):
2153 def lock(self, wait=True):
2160 '''Lock the repository store (.hg/store) and return a weak reference
2154 '''Lock the repository store (.hg/store) and return a weak reference
2161 to the lock. Use this before modifying the store (e.g. committing or
2155 to the lock. Use this before modifying the store (e.g. committing or
2162 stripping). If you are opening a transaction, get a lock as well.)
2156 stripping). If you are opening a transaction, get a lock as well.)
2163
2157
2164 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2158 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2165 'wlock' first to avoid a dead-lock hazard.'''
2159 'wlock' first to avoid a dead-lock hazard.'''
2166 l = self._currentlock(self._lockref)
2160 l = self._currentlock(self._lockref)
2167 if l is not None:
2161 if l is not None:
2168 l.lock()
2162 l.lock()
2169 return l
2163 return l
2170
2164
2171 l = self._lock(self.svfs, "lock", wait, None,
2165 l = self._lock(self.svfs, "lock", wait, None,
2172 self.invalidate, _('repository %s') % self.origroot)
2166 self.invalidate, _('repository %s') % self.origroot)
2173 self._lockref = weakref.ref(l)
2167 self._lockref = weakref.ref(l)
2174 return l
2168 return l
2175
2169
2176 def _wlockchecktransaction(self):
2170 def _wlockchecktransaction(self):
2177 if self.currenttransaction() is not None:
2171 if self.currenttransaction() is not None:
2178 raise error.LockInheritanceContractViolation(
2172 raise error.LockInheritanceContractViolation(
2179 'wlock cannot be inherited in the middle of a transaction')
2173 'wlock cannot be inherited in the middle of a transaction')
2180
2174
2181 def wlock(self, wait=True):
2175 def wlock(self, wait=True):
2182 '''Lock the non-store parts of the repository (everything under
2176 '''Lock the non-store parts of the repository (everything under
2183 .hg except .hg/store) and return a weak reference to the lock.
2177 .hg except .hg/store) and return a weak reference to the lock.
2184
2178
2185 Use this before modifying files in .hg.
2179 Use this before modifying files in .hg.
2186
2180
2187 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2181 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2188 'wlock' first to avoid a dead-lock hazard.'''
2182 'wlock' first to avoid a dead-lock hazard.'''
2189 l = self._wlockref and self._wlockref()
2183 l = self._wlockref and self._wlockref()
2190 if l is not None and l.held:
2184 if l is not None and l.held:
2191 l.lock()
2185 l.lock()
2192 return l
2186 return l
2193
2187
2194 # We do not need to check for non-waiting lock acquisition. Such
2188 # We do not need to check for non-waiting lock acquisition. Such
2195 # acquisition would not cause dead-lock as they would just fail.
2189 # acquisition would not cause dead-lock as they would just fail.
2196 if wait and (self.ui.configbool('devel', 'all-warnings')
2190 if wait and (self.ui.configbool('devel', 'all-warnings')
2197 or self.ui.configbool('devel', 'check-locks')):
2191 or self.ui.configbool('devel', 'check-locks')):
2198 if self._currentlock(self._lockref) is not None:
2192 if self._currentlock(self._lockref) is not None:
2199 self.ui.develwarn('"wlock" acquired after "lock"')
2193 self.ui.develwarn('"wlock" acquired after "lock"')
2200
2194
2201 def unlock():
2195 def unlock():
2202 if self.dirstate.pendingparentchange():
2196 if self.dirstate.pendingparentchange():
2203 self.dirstate.invalidate()
2197 self.dirstate.invalidate()
2204 else:
2198 else:
2205 self.dirstate.write(None)
2199 self.dirstate.write(None)
2206
2200
2207 self._filecache['dirstate'].refresh()
2201 self._filecache['dirstate'].refresh()
2208
2202
2209 l = self._lock(self.vfs, "wlock", wait, unlock,
2203 l = self._lock(self.vfs, "wlock", wait, unlock,
2210 self.invalidatedirstate, _('working directory of %s') %
2204 self.invalidatedirstate, _('working directory of %s') %
2211 self.origroot,
2205 self.origroot,
2212 inheritchecker=self._wlockchecktransaction,
2206 inheritchecker=self._wlockchecktransaction,
2213 parentenvvar='HG_WLOCK_LOCKER')
2207 parentenvvar='HG_WLOCK_LOCKER')
2214 self._wlockref = weakref.ref(l)
2208 self._wlockref = weakref.ref(l)
2215 return l
2209 return l
2216
2210
2217 def _currentlock(self, lockref):
2211 def _currentlock(self, lockref):
2218 """Returns the lock if it's held, or None if it's not."""
2212 """Returns the lock if it's held, or None if it's not."""
2219 if lockref is None:
2213 if lockref is None:
2220 return None
2214 return None
2221 l = lockref()
2215 l = lockref()
2222 if l is None or not l.held:
2216 if l is None or not l.held:
2223 return None
2217 return None
2224 return l
2218 return l
2225
2219
2226 def currentwlock(self):
2220 def currentwlock(self):
2227 """Returns the wlock if it's held, or None if it's not."""
2221 """Returns the wlock if it's held, or None if it's not."""
2228 return self._currentlock(self._wlockref)
2222 return self._currentlock(self._wlockref)
2229
2223
2230 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2224 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2231 """
2225 """
2232 commit an individual file as part of a larger transaction
2226 commit an individual file as part of a larger transaction
2233 """
2227 """
2234
2228
2235 fname = fctx.path()
2229 fname = fctx.path()
2236 fparent1 = manifest1.get(fname, nullid)
2230 fparent1 = manifest1.get(fname, nullid)
2237 fparent2 = manifest2.get(fname, nullid)
2231 fparent2 = manifest2.get(fname, nullid)
2238 if isinstance(fctx, context.filectx):
2232 if isinstance(fctx, context.filectx):
2239 node = fctx.filenode()
2233 node = fctx.filenode()
2240 if node in [fparent1, fparent2]:
2234 if node in [fparent1, fparent2]:
2241 self.ui.debug('reusing %s filelog entry\n' % fname)
2235 self.ui.debug('reusing %s filelog entry\n' % fname)
2242 if manifest1.flags(fname) != fctx.flags():
2236 if manifest1.flags(fname) != fctx.flags():
2243 changelist.append(fname)
2237 changelist.append(fname)
2244 return node
2238 return node
2245
2239
2246 flog = self.file(fname)
2240 flog = self.file(fname)
2247 meta = {}
2241 meta = {}
2248 copy = fctx.renamed()
2242 copy = fctx.renamed()
2249 if copy and copy[0] != fname:
2243 if copy and copy[0] != fname:
2250 # Mark the new revision of this file as a copy of another
2244 # Mark the new revision of this file as a copy of another
2251 # file. This copy data will effectively act as a parent
2245 # file. This copy data will effectively act as a parent
2252 # of this new revision. If this is a merge, the first
2246 # of this new revision. If this is a merge, the first
2253 # parent will be the nullid (meaning "look up the copy data")
2247 # parent will be the nullid (meaning "look up the copy data")
2254 # and the second one will be the other parent. For example:
2248 # and the second one will be the other parent. For example:
2255 #
2249 #
2256 # 0 --- 1 --- 3 rev1 changes file foo
2250 # 0 --- 1 --- 3 rev1 changes file foo
2257 # \ / rev2 renames foo to bar and changes it
2251 # \ / rev2 renames foo to bar and changes it
2258 # \- 2 -/ rev3 should have bar with all changes and
2252 # \- 2 -/ rev3 should have bar with all changes and
2259 # should record that bar descends from
2253 # should record that bar descends from
2260 # bar in rev2 and foo in rev1
2254 # bar in rev2 and foo in rev1
2261 #
2255 #
2262 # this allows this merge to succeed:
2256 # this allows this merge to succeed:
2263 #
2257 #
2264 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2258 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2265 # \ / merging rev3 and rev4 should use bar@rev2
2259 # \ / merging rev3 and rev4 should use bar@rev2
2266 # \- 2 --- 4 as the merge base
2260 # \- 2 --- 4 as the merge base
2267 #
2261 #
2268
2262
2269 cfname = copy[0]
2263 cfname = copy[0]
2270 crev = manifest1.get(cfname)
2264 crev = manifest1.get(cfname)
2271 newfparent = fparent2
2265 newfparent = fparent2
2272
2266
2273 if manifest2: # branch merge
2267 if manifest2: # branch merge
2274 if fparent2 == nullid or crev is None: # copied on remote side
2268 if fparent2 == nullid or crev is None: # copied on remote side
2275 if cfname in manifest2:
2269 if cfname in manifest2:
2276 crev = manifest2[cfname]
2270 crev = manifest2[cfname]
2277 newfparent = fparent1
2271 newfparent = fparent1
2278
2272
2279 # Here, we used to search backwards through history to try to find
2273 # Here, we used to search backwards through history to try to find
2280 # where the file copy came from if the source of a copy was not in
2274 # where the file copy came from if the source of a copy was not in
2281 # the parent directory. However, this doesn't actually make sense to
2275 # the parent directory. However, this doesn't actually make sense to
2282 # do (what does a copy from something not in your working copy even
2276 # do (what does a copy from something not in your working copy even
2283 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2277 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2284 # the user that copy information was dropped, so if they didn't
2278 # the user that copy information was dropped, so if they didn't
2285 # expect this outcome it can be fixed, but this is the correct
2279 # expect this outcome it can be fixed, but this is the correct
2286 # behavior in this circumstance.
2280 # behavior in this circumstance.
2287
2281
2288 if crev:
2282 if crev:
2289 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2283 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2290 meta["copy"] = cfname
2284 meta["copy"] = cfname
2291 meta["copyrev"] = hex(crev)
2285 meta["copyrev"] = hex(crev)
2292 fparent1, fparent2 = nullid, newfparent
2286 fparent1, fparent2 = nullid, newfparent
2293 else:
2287 else:
2294 self.ui.warn(_("warning: can't find ancestor for '%s' "
2288 self.ui.warn(_("warning: can't find ancestor for '%s' "
2295 "copied from '%s'!\n") % (fname, cfname))
2289 "copied from '%s'!\n") % (fname, cfname))
2296
2290
2297 elif fparent1 == nullid:
2291 elif fparent1 == nullid:
2298 fparent1, fparent2 = fparent2, nullid
2292 fparent1, fparent2 = fparent2, nullid
2299 elif fparent2 != nullid:
2293 elif fparent2 != nullid:
2300 # is one parent an ancestor of the other?
2294 # is one parent an ancestor of the other?
2301 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2295 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2302 if fparent1 in fparentancestors:
2296 if fparent1 in fparentancestors:
2303 fparent1, fparent2 = fparent2, nullid
2297 fparent1, fparent2 = fparent2, nullid
2304 elif fparent2 in fparentancestors:
2298 elif fparent2 in fparentancestors:
2305 fparent2 = nullid
2299 fparent2 = nullid
2306
2300
2307 # is the file changed?
2301 # is the file changed?
2308 text = fctx.data()
2302 text = fctx.data()
2309 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2303 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2310 changelist.append(fname)
2304 changelist.append(fname)
2311 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2305 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2312 # are just the flags changed during merge?
2306 # are just the flags changed during merge?
2313 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2307 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2314 changelist.append(fname)
2308 changelist.append(fname)
2315
2309
2316 return fparent1
2310 return fparent1
2317
2311
2318 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2312 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2319 """check for commit arguments that aren't committable"""
2313 """check for commit arguments that aren't committable"""
2320 if match.isexact() or match.prefix():
2314 if match.isexact() or match.prefix():
2321 matched = set(status.modified + status.added + status.removed)
2315 matched = set(status.modified + status.added + status.removed)
2322
2316
2323 for f in match.files():
2317 for f in match.files():
2324 f = self.dirstate.normalize(f)
2318 f = self.dirstate.normalize(f)
2325 if f == '.' or f in matched or f in wctx.substate:
2319 if f == '.' or f in matched or f in wctx.substate:
2326 continue
2320 continue
2327 if f in status.deleted:
2321 if f in status.deleted:
2328 fail(f, _('file not found!'))
2322 fail(f, _('file not found!'))
2329 if f in vdirs: # visited directory
2323 if f in vdirs: # visited directory
2330 d = f + '/'
2324 d = f + '/'
2331 for mf in matched:
2325 for mf in matched:
2332 if mf.startswith(d):
2326 if mf.startswith(d):
2333 break
2327 break
2334 else:
2328 else:
2335 fail(f, _("no match under directory!"))
2329 fail(f, _("no match under directory!"))
2336 elif f not in self.dirstate:
2330 elif f not in self.dirstate:
2337 fail(f, _("file not tracked!"))
2331 fail(f, _("file not tracked!"))
2338
2332
2339 @unfilteredmethod
2333 @unfilteredmethod
2340 def commit(self, text="", user=None, date=None, match=None, force=False,
2334 def commit(self, text="", user=None, date=None, match=None, force=False,
2341 editor=False, extra=None):
2335 editor=False, extra=None):
2342 """Add a new revision to current repository.
2336 """Add a new revision to current repository.
2343
2337
2344 Revision information is gathered from the working directory,
2338 Revision information is gathered from the working directory,
2345 match can be used to filter the committed files. If editor is
2339 match can be used to filter the committed files. If editor is
2346 supplied, it is called to get a commit message.
2340 supplied, it is called to get a commit message.
2347 """
2341 """
2348 if extra is None:
2342 if extra is None:
2349 extra = {}
2343 extra = {}
2350
2344
2351 def fail(f, msg):
2345 def fail(f, msg):
2352 raise error.Abort('%s: %s' % (f, msg))
2346 raise error.Abort('%s: %s' % (f, msg))
2353
2347
2354 if not match:
2348 if not match:
2355 match = matchmod.always(self.root, '')
2349 match = matchmod.always(self.root, '')
2356
2350
2357 if not force:
2351 if not force:
2358 vdirs = []
2352 vdirs = []
2359 match.explicitdir = vdirs.append
2353 match.explicitdir = vdirs.append
2360 match.bad = fail
2354 match.bad = fail
2361
2355
2362 wlock = lock = tr = None
2356 wlock = lock = tr = None
2363 try:
2357 try:
2364 wlock = self.wlock()
2358 wlock = self.wlock()
2365 lock = self.lock() # for recent changelog (see issue4368)
2359 lock = self.lock() # for recent changelog (see issue4368)
2366
2360
2367 wctx = self[None]
2361 wctx = self[None]
2368 merge = len(wctx.parents()) > 1
2362 merge = len(wctx.parents()) > 1
2369
2363
2370 if not force and merge and not match.always():
2364 if not force and merge and not match.always():
2371 raise error.Abort(_('cannot partially commit a merge '
2365 raise error.Abort(_('cannot partially commit a merge '
2372 '(do not specify files or patterns)'))
2366 '(do not specify files or patterns)'))
2373
2367
2374 status = self.status(match=match, clean=force)
2368 status = self.status(match=match, clean=force)
2375 if force:
2369 if force:
2376 status.modified.extend(status.clean) # mq may commit clean files
2370 status.modified.extend(status.clean) # mq may commit clean files
2377
2371
2378 # check subrepos
2372 # check subrepos
2379 subs, commitsubs, newstate = subrepoutil.precommit(
2373 subs, commitsubs, newstate = subrepoutil.precommit(
2380 self.ui, wctx, status, match, force=force)
2374 self.ui, wctx, status, match, force=force)
2381
2375
2382 # make sure all explicit patterns are matched
2376 # make sure all explicit patterns are matched
2383 if not force:
2377 if not force:
2384 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2378 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2385
2379
2386 cctx = context.workingcommitctx(self, status,
2380 cctx = context.workingcommitctx(self, status,
2387 text, user, date, extra)
2381 text, user, date, extra)
2388
2382
2389 # internal config: ui.allowemptycommit
2383 # internal config: ui.allowemptycommit
2390 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2384 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2391 or extra.get('close') or merge or cctx.files()
2385 or extra.get('close') or merge or cctx.files()
2392 or self.ui.configbool('ui', 'allowemptycommit'))
2386 or self.ui.configbool('ui', 'allowemptycommit'))
2393 if not allowemptycommit:
2387 if not allowemptycommit:
2394 return None
2388 return None
2395
2389
2396 if merge and cctx.deleted():
2390 if merge and cctx.deleted():
2397 raise error.Abort(_("cannot commit merge with missing files"))
2391 raise error.Abort(_("cannot commit merge with missing files"))
2398
2392
2399 ms = mergemod.mergestate.read(self)
2393 ms = mergemod.mergestate.read(self)
2400 mergeutil.checkunresolved(ms)
2394 mergeutil.checkunresolved(ms)
2401
2395
2402 if editor:
2396 if editor:
2403 cctx._text = editor(self, cctx, subs)
2397 cctx._text = editor(self, cctx, subs)
2404 edited = (text != cctx._text)
2398 edited = (text != cctx._text)
2405
2399
2406 # Save commit message in case this transaction gets rolled back
2400 # Save commit message in case this transaction gets rolled back
2407 # (e.g. by a pretxncommit hook). Leave the content alone on
2401 # (e.g. by a pretxncommit hook). Leave the content alone on
2408 # the assumption that the user will use the same editor again.
2402 # the assumption that the user will use the same editor again.
2409 msgfn = self.savecommitmessage(cctx._text)
2403 msgfn = self.savecommitmessage(cctx._text)
2410
2404
2411 # commit subs and write new state
2405 # commit subs and write new state
2412 if subs:
2406 if subs:
2413 for s in sorted(commitsubs):
2407 for s in sorted(commitsubs):
2414 sub = wctx.sub(s)
2408 sub = wctx.sub(s)
2415 self.ui.status(_('committing subrepository %s\n') %
2409 self.ui.status(_('committing subrepository %s\n') %
2416 subrepoutil.subrelpath(sub))
2410 subrepoutil.subrelpath(sub))
2417 sr = sub.commit(cctx._text, user, date)
2411 sr = sub.commit(cctx._text, user, date)
2418 newstate[s] = (newstate[s][0], sr)
2412 newstate[s] = (newstate[s][0], sr)
2419 subrepoutil.writestate(self, newstate)
2413 subrepoutil.writestate(self, newstate)
2420
2414
2421 p1, p2 = self.dirstate.parents()
2415 p1, p2 = self.dirstate.parents()
2422 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2416 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2423 try:
2417 try:
2424 self.hook("precommit", throw=True, parent1=hookp1,
2418 self.hook("precommit", throw=True, parent1=hookp1,
2425 parent2=hookp2)
2419 parent2=hookp2)
2426 tr = self.transaction('commit')
2420 tr = self.transaction('commit')
2427 ret = self.commitctx(cctx, True)
2421 ret = self.commitctx(cctx, True)
2428 except: # re-raises
2422 except: # re-raises
2429 if edited:
2423 if edited:
2430 self.ui.write(
2424 self.ui.write(
2431 _('note: commit message saved in %s\n') % msgfn)
2425 _('note: commit message saved in %s\n') % msgfn)
2432 raise
2426 raise
2433 # update bookmarks, dirstate and mergestate
2427 # update bookmarks, dirstate and mergestate
2434 bookmarks.update(self, [p1, p2], ret)
2428 bookmarks.update(self, [p1, p2], ret)
2435 cctx.markcommitted(ret)
2429 cctx.markcommitted(ret)
2436 ms.reset()
2430 ms.reset()
2437 tr.close()
2431 tr.close()
2438
2432
2439 finally:
2433 finally:
2440 lockmod.release(tr, lock, wlock)
2434 lockmod.release(tr, lock, wlock)
2441
2435
2442 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2436 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2443 # hack for command that use a temporary commit (eg: histedit)
2437 # hack for command that use a temporary commit (eg: histedit)
2444 # temporary commit got stripped before hook release
2438 # temporary commit got stripped before hook release
2445 if self.changelog.hasnode(ret):
2439 if self.changelog.hasnode(ret):
2446 self.hook("commit", node=node, parent1=parent1,
2440 self.hook("commit", node=node, parent1=parent1,
2447 parent2=parent2)
2441 parent2=parent2)
2448 self._afterlock(commithook)
2442 self._afterlock(commithook)
2449 return ret
2443 return ret
2450
2444
2451 @unfilteredmethod
2445 @unfilteredmethod
2452 def commitctx(self, ctx, error=False):
2446 def commitctx(self, ctx, error=False):
2453 """Add a new revision to current repository.
2447 """Add a new revision to current repository.
2454 Revision information is passed via the context argument.
2448 Revision information is passed via the context argument.
2455
2449
2456 ctx.files() should list all files involved in this commit, i.e.
2450 ctx.files() should list all files involved in this commit, i.e.
2457 modified/added/removed files. On merge, it may be wider than the
2451 modified/added/removed files. On merge, it may be wider than the
2458 ctx.files() to be committed, since any file nodes derived directly
2452 ctx.files() to be committed, since any file nodes derived directly
2459 from p1 or p2 are excluded from the committed ctx.files().
2453 from p1 or p2 are excluded from the committed ctx.files().
2460 """
2454 """
2461
2455
2462 tr = None
2456 tr = None
2463 p1, p2 = ctx.p1(), ctx.p2()
2457 p1, p2 = ctx.p1(), ctx.p2()
2464 user = ctx.user()
2458 user = ctx.user()
2465
2459
2466 lock = self.lock()
2460 lock = self.lock()
2467 try:
2461 try:
2468 tr = self.transaction("commit")
2462 tr = self.transaction("commit")
2469 trp = weakref.proxy(tr)
2463 trp = weakref.proxy(tr)
2470
2464
2471 if ctx.manifestnode():
2465 if ctx.manifestnode():
2472 # reuse an existing manifest revision
2466 # reuse an existing manifest revision
2473 self.ui.debug('reusing known manifest\n')
2467 self.ui.debug('reusing known manifest\n')
2474 mn = ctx.manifestnode()
2468 mn = ctx.manifestnode()
2475 files = ctx.files()
2469 files = ctx.files()
2476 elif ctx.files():
2470 elif ctx.files():
2477 m1ctx = p1.manifestctx()
2471 m1ctx = p1.manifestctx()
2478 m2ctx = p2.manifestctx()
2472 m2ctx = p2.manifestctx()
2479 mctx = m1ctx.copy()
2473 mctx = m1ctx.copy()
2480
2474
2481 m = mctx.read()
2475 m = mctx.read()
2482 m1 = m1ctx.read()
2476 m1 = m1ctx.read()
2483 m2 = m2ctx.read()
2477 m2 = m2ctx.read()
2484
2478
2485 # check in files
2479 # check in files
2486 added = []
2480 added = []
2487 changed = []
2481 changed = []
2488 removed = list(ctx.removed())
2482 removed = list(ctx.removed())
2489 linkrev = len(self)
2483 linkrev = len(self)
2490 self.ui.note(_("committing files:\n"))
2484 self.ui.note(_("committing files:\n"))
2491 for f in sorted(ctx.modified() + ctx.added()):
2485 for f in sorted(ctx.modified() + ctx.added()):
2492 self.ui.note(f + "\n")
2486 self.ui.note(f + "\n")
2493 try:
2487 try:
2494 fctx = ctx[f]
2488 fctx = ctx[f]
2495 if fctx is None:
2489 if fctx is None:
2496 removed.append(f)
2490 removed.append(f)
2497 else:
2491 else:
2498 added.append(f)
2492 added.append(f)
2499 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2493 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2500 trp, changed)
2494 trp, changed)
2501 m.setflag(f, fctx.flags())
2495 m.setflag(f, fctx.flags())
2502 except OSError as inst:
2496 except OSError as inst:
2503 self.ui.warn(_("trouble committing %s!\n") % f)
2497 self.ui.warn(_("trouble committing %s!\n") % f)
2504 raise
2498 raise
2505 except IOError as inst:
2499 except IOError as inst:
2506 errcode = getattr(inst, 'errno', errno.ENOENT)
2500 errcode = getattr(inst, 'errno', errno.ENOENT)
2507 if error or errcode and errcode != errno.ENOENT:
2501 if error or errcode and errcode != errno.ENOENT:
2508 self.ui.warn(_("trouble committing %s!\n") % f)
2502 self.ui.warn(_("trouble committing %s!\n") % f)
2509 raise
2503 raise
2510
2504
2511 # update manifest
2505 # update manifest
2512 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2506 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2513 drop = [f for f in removed if f in m]
2507 drop = [f for f in removed if f in m]
2514 for f in drop:
2508 for f in drop:
2515 del m[f]
2509 del m[f]
2516 files = changed + removed
2510 files = changed + removed
2517 md = None
2511 md = None
2518 if not files:
2512 if not files:
2519 # if no "files" actually changed in terms of the changelog,
2513 # if no "files" actually changed in terms of the changelog,
2520 # try hard to detect unmodified manifest entry so that the
2514 # try hard to detect unmodified manifest entry so that the
2521 # exact same commit can be reproduced later on convert.
2515 # exact same commit can be reproduced later on convert.
2522 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2516 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2523 if not files and md:
2517 if not files and md:
2524 self.ui.debug('not reusing manifest (no file change in '
2518 self.ui.debug('not reusing manifest (no file change in '
2525 'changelog, but manifest differs)\n')
2519 'changelog, but manifest differs)\n')
2526 if files or md:
2520 if files or md:
2527 self.ui.note(_("committing manifest\n"))
2521 self.ui.note(_("committing manifest\n"))
2528 # we're using narrowmatch here since it's already applied at
2522 # we're using narrowmatch here since it's already applied at
2529 # other stages (such as dirstate.walk), so we're already
2523 # other stages (such as dirstate.walk), so we're already
2530 # ignoring things outside of narrowspec in most cases. The
2524 # ignoring things outside of narrowspec in most cases. The
2531 # one case where we might have files outside the narrowspec
2525 # one case where we might have files outside the narrowspec
2532 # at this point is merges, and we already error out in the
2526 # at this point is merges, and we already error out in the
2533 # case where the merge has files outside of the narrowspec,
2527 # case where the merge has files outside of the narrowspec,
2534 # so this is safe.
2528 # so this is safe.
2535 mn = mctx.write(trp, linkrev,
2529 mn = mctx.write(trp, linkrev,
2536 p1.manifestnode(), p2.manifestnode(),
2530 p1.manifestnode(), p2.manifestnode(),
2537 added, drop, match=self.narrowmatch())
2531 added, drop, match=self.narrowmatch())
2538 else:
2532 else:
2539 self.ui.debug('reusing manifest form p1 (listed files '
2533 self.ui.debug('reusing manifest form p1 (listed files '
2540 'actually unchanged)\n')
2534 'actually unchanged)\n')
2541 mn = p1.manifestnode()
2535 mn = p1.manifestnode()
2542 else:
2536 else:
2543 self.ui.debug('reusing manifest from p1 (no file change)\n')
2537 self.ui.debug('reusing manifest from p1 (no file change)\n')
2544 mn = p1.manifestnode()
2538 mn = p1.manifestnode()
2545 files = []
2539 files = []
2546
2540
2547 # update changelog
2541 # update changelog
2548 self.ui.note(_("committing changelog\n"))
2542 self.ui.note(_("committing changelog\n"))
2549 self.changelog.delayupdate(tr)
2543 self.changelog.delayupdate(tr)
2550 n = self.changelog.add(mn, files, ctx.description(),
2544 n = self.changelog.add(mn, files, ctx.description(),
2551 trp, p1.node(), p2.node(),
2545 trp, p1.node(), p2.node(),
2552 user, ctx.date(), ctx.extra().copy())
2546 user, ctx.date(), ctx.extra().copy())
2553 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2547 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2554 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2548 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2555 parent2=xp2)
2549 parent2=xp2)
2556 # set the new commit is proper phase
2550 # set the new commit is proper phase
2557 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2551 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2558 if targetphase:
2552 if targetphase:
2559 # retract boundary do not alter parent changeset.
2553 # retract boundary do not alter parent changeset.
2560 # if a parent have higher the resulting phase will
2554 # if a parent have higher the resulting phase will
2561 # be compliant anyway
2555 # be compliant anyway
2562 #
2556 #
2563 # if minimal phase was 0 we don't need to retract anything
2557 # if minimal phase was 0 we don't need to retract anything
2564 phases.registernew(self, tr, targetphase, [n])
2558 phases.registernew(self, tr, targetphase, [n])
2565 tr.close()
2559 tr.close()
2566 return n
2560 return n
2567 finally:
2561 finally:
2568 if tr:
2562 if tr:
2569 tr.release()
2563 tr.release()
2570 lock.release()
2564 lock.release()
2571
2565
2572 @unfilteredmethod
2566 @unfilteredmethod
2573 def destroying(self):
2567 def destroying(self):
2574 '''Inform the repository that nodes are about to be destroyed.
2568 '''Inform the repository that nodes are about to be destroyed.
2575 Intended for use by strip and rollback, so there's a common
2569 Intended for use by strip and rollback, so there's a common
2576 place for anything that has to be done before destroying history.
2570 place for anything that has to be done before destroying history.
2577
2571
2578 This is mostly useful for saving state that is in memory and waiting
2572 This is mostly useful for saving state that is in memory and waiting
2579 to be flushed when the current lock is released. Because a call to
2573 to be flushed when the current lock is released. Because a call to
2580 destroyed is imminent, the repo will be invalidated causing those
2574 destroyed is imminent, the repo will be invalidated causing those
2581 changes to stay in memory (waiting for the next unlock), or vanish
2575 changes to stay in memory (waiting for the next unlock), or vanish
2582 completely.
2576 completely.
2583 '''
2577 '''
2584 # When using the same lock to commit and strip, the phasecache is left
2578 # When using the same lock to commit and strip, the phasecache is left
2585 # dirty after committing. Then when we strip, the repo is invalidated,
2579 # dirty after committing. Then when we strip, the repo is invalidated,
2586 # causing those changes to disappear.
2580 # causing those changes to disappear.
2587 if '_phasecache' in vars(self):
2581 if '_phasecache' in vars(self):
2588 self._phasecache.write()
2582 self._phasecache.write()
2589
2583
2590 @unfilteredmethod
2584 @unfilteredmethod
2591 def destroyed(self):
2585 def destroyed(self):
2592 '''Inform the repository that nodes have been destroyed.
2586 '''Inform the repository that nodes have been destroyed.
2593 Intended for use by strip and rollback, so there's a common
2587 Intended for use by strip and rollback, so there's a common
2594 place for anything that has to be done after destroying history.
2588 place for anything that has to be done after destroying history.
2595 '''
2589 '''
2596 # When one tries to:
2590 # When one tries to:
2597 # 1) destroy nodes thus calling this method (e.g. strip)
2591 # 1) destroy nodes thus calling this method (e.g. strip)
2598 # 2) use phasecache somewhere (e.g. commit)
2592 # 2) use phasecache somewhere (e.g. commit)
2599 #
2593 #
2600 # then 2) will fail because the phasecache contains nodes that were
2594 # then 2) will fail because the phasecache contains nodes that were
2601 # removed. We can either remove phasecache from the filecache,
2595 # removed. We can either remove phasecache from the filecache,
2602 # causing it to reload next time it is accessed, or simply filter
2596 # causing it to reload next time it is accessed, or simply filter
2603 # the removed nodes now and write the updated cache.
2597 # the removed nodes now and write the updated cache.
2604 self._phasecache.filterunknown(self)
2598 self._phasecache.filterunknown(self)
2605 self._phasecache.write()
2599 self._phasecache.write()
2606
2600
2607 # refresh all repository caches
2601 # refresh all repository caches
2608 self.updatecaches()
2602 self.updatecaches()
2609
2603
2610 # Ensure the persistent tag cache is updated. Doing it now
2604 # Ensure the persistent tag cache is updated. Doing it now
2611 # means that the tag cache only has to worry about destroyed
2605 # means that the tag cache only has to worry about destroyed
2612 # heads immediately after a strip/rollback. That in turn
2606 # heads immediately after a strip/rollback. That in turn
2613 # guarantees that "cachetip == currenttip" (comparing both rev
2607 # guarantees that "cachetip == currenttip" (comparing both rev
2614 # and node) always means no nodes have been added or destroyed.
2608 # and node) always means no nodes have been added or destroyed.
2615
2609
2616 # XXX this is suboptimal when qrefresh'ing: we strip the current
2610 # XXX this is suboptimal when qrefresh'ing: we strip the current
2617 # head, refresh the tag cache, then immediately add a new head.
2611 # head, refresh the tag cache, then immediately add a new head.
2618 # But I think doing it this way is necessary for the "instant
2612 # But I think doing it this way is necessary for the "instant
2619 # tag cache retrieval" case to work.
2613 # tag cache retrieval" case to work.
2620 self.invalidate()
2614 self.invalidate()
2621
2615
2622 def status(self, node1='.', node2=None, match=None,
2616 def status(self, node1='.', node2=None, match=None,
2623 ignored=False, clean=False, unknown=False,
2617 ignored=False, clean=False, unknown=False,
2624 listsubrepos=False):
2618 listsubrepos=False):
2625 '''a convenience method that calls node1.status(node2)'''
2619 '''a convenience method that calls node1.status(node2)'''
2626 return self[node1].status(node2, match, ignored, clean, unknown,
2620 return self[node1].status(node2, match, ignored, clean, unknown,
2627 listsubrepos)
2621 listsubrepos)
2628
2622
2629 def addpostdsstatus(self, ps):
2623 def addpostdsstatus(self, ps):
2630 """Add a callback to run within the wlock, at the point at which status
2624 """Add a callback to run within the wlock, at the point at which status
2631 fixups happen.
2625 fixups happen.
2632
2626
2633 On status completion, callback(wctx, status) will be called with the
2627 On status completion, callback(wctx, status) will be called with the
2634 wlock held, unless the dirstate has changed from underneath or the wlock
2628 wlock held, unless the dirstate has changed from underneath or the wlock
2635 couldn't be grabbed.
2629 couldn't be grabbed.
2636
2630
2637 Callbacks should not capture and use a cached copy of the dirstate --
2631 Callbacks should not capture and use a cached copy of the dirstate --
2638 it might change in the meanwhile. Instead, they should access the
2632 it might change in the meanwhile. Instead, they should access the
2639 dirstate via wctx.repo().dirstate.
2633 dirstate via wctx.repo().dirstate.
2640
2634
2641 This list is emptied out after each status run -- extensions should
2635 This list is emptied out after each status run -- extensions should
2642 make sure it adds to this list each time dirstate.status is called.
2636 make sure it adds to this list each time dirstate.status is called.
2643 Extensions should also make sure they don't call this for statuses
2637 Extensions should also make sure they don't call this for statuses
2644 that don't involve the dirstate.
2638 that don't involve the dirstate.
2645 """
2639 """
2646
2640
2647 # The list is located here for uniqueness reasons -- it is actually
2641 # The list is located here for uniqueness reasons -- it is actually
2648 # managed by the workingctx, but that isn't unique per-repo.
2642 # managed by the workingctx, but that isn't unique per-repo.
2649 self._postdsstatus.append(ps)
2643 self._postdsstatus.append(ps)
2650
2644
2651 def postdsstatus(self):
2645 def postdsstatus(self):
2652 """Used by workingctx to get the list of post-dirstate-status hooks."""
2646 """Used by workingctx to get the list of post-dirstate-status hooks."""
2653 return self._postdsstatus
2647 return self._postdsstatus
2654
2648
2655 def clearpostdsstatus(self):
2649 def clearpostdsstatus(self):
2656 """Used by workingctx to clear post-dirstate-status hooks."""
2650 """Used by workingctx to clear post-dirstate-status hooks."""
2657 del self._postdsstatus[:]
2651 del self._postdsstatus[:]
2658
2652
2659 def heads(self, start=None):
2653 def heads(self, start=None):
2660 if start is None:
2654 if start is None:
2661 cl = self.changelog
2655 cl = self.changelog
2662 headrevs = reversed(cl.headrevs())
2656 headrevs = reversed(cl.headrevs())
2663 return [cl.node(rev) for rev in headrevs]
2657 return [cl.node(rev) for rev in headrevs]
2664
2658
2665 heads = self.changelog.heads(start)
2659 heads = self.changelog.heads(start)
2666 # sort the output in rev descending order
2660 # sort the output in rev descending order
2667 return sorted(heads, key=self.changelog.rev, reverse=True)
2661 return sorted(heads, key=self.changelog.rev, reverse=True)
2668
2662
2669 def branchheads(self, branch=None, start=None, closed=False):
2663 def branchheads(self, branch=None, start=None, closed=False):
2670 '''return a (possibly filtered) list of heads for the given branch
2664 '''return a (possibly filtered) list of heads for the given branch
2671
2665
2672 Heads are returned in topological order, from newest to oldest.
2666 Heads are returned in topological order, from newest to oldest.
2673 If branch is None, use the dirstate branch.
2667 If branch is None, use the dirstate branch.
2674 If start is not None, return only heads reachable from start.
2668 If start is not None, return only heads reachable from start.
2675 If closed is True, return heads that are marked as closed as well.
2669 If closed is True, return heads that are marked as closed as well.
2676 '''
2670 '''
2677 if branch is None:
2671 if branch is None:
2678 branch = self[None].branch()
2672 branch = self[None].branch()
2679 branches = self.branchmap()
2673 branches = self.branchmap()
2680 if branch not in branches:
2674 if branch not in branches:
2681 return []
2675 return []
2682 # the cache returns heads ordered lowest to highest
2676 # the cache returns heads ordered lowest to highest
2683 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2677 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2684 if start is not None:
2678 if start is not None:
2685 # filter out the heads that cannot be reached from startrev
2679 # filter out the heads that cannot be reached from startrev
2686 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2680 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2687 bheads = [h for h in bheads if h in fbheads]
2681 bheads = [h for h in bheads if h in fbheads]
2688 return bheads
2682 return bheads
2689
2683
2690 def branches(self, nodes):
2684 def branches(self, nodes):
2691 if not nodes:
2685 if not nodes:
2692 nodes = [self.changelog.tip()]
2686 nodes = [self.changelog.tip()]
2693 b = []
2687 b = []
2694 for n in nodes:
2688 for n in nodes:
2695 t = n
2689 t = n
2696 while True:
2690 while True:
2697 p = self.changelog.parents(n)
2691 p = self.changelog.parents(n)
2698 if p[1] != nullid or p[0] == nullid:
2692 if p[1] != nullid or p[0] == nullid:
2699 b.append((t, n, p[0], p[1]))
2693 b.append((t, n, p[0], p[1]))
2700 break
2694 break
2701 n = p[0]
2695 n = p[0]
2702 return b
2696 return b
2703
2697
2704 def between(self, pairs):
2698 def between(self, pairs):
2705 r = []
2699 r = []
2706
2700
2707 for top, bottom in pairs:
2701 for top, bottom in pairs:
2708 n, l, i = top, [], 0
2702 n, l, i = top, [], 0
2709 f = 1
2703 f = 1
2710
2704
2711 while n != bottom and n != nullid:
2705 while n != bottom and n != nullid:
2712 p = self.changelog.parents(n)[0]
2706 p = self.changelog.parents(n)[0]
2713 if i == f:
2707 if i == f:
2714 l.append(n)
2708 l.append(n)
2715 f = f * 2
2709 f = f * 2
2716 n = p
2710 n = p
2717 i += 1
2711 i += 1
2718
2712
2719 r.append(l)
2713 r.append(l)
2720
2714
2721 return r
2715 return r
2722
2716
2723 def checkpush(self, pushop):
2717 def checkpush(self, pushop):
2724 """Extensions can override this function if additional checks have
2718 """Extensions can override this function if additional checks have
2725 to be performed before pushing, or call it if they override push
2719 to be performed before pushing, or call it if they override push
2726 command.
2720 command.
2727 """
2721 """
2728
2722
2729 @unfilteredpropertycache
2723 @unfilteredpropertycache
2730 def prepushoutgoinghooks(self):
2724 def prepushoutgoinghooks(self):
2731 """Return util.hooks consists of a pushop with repo, remote, outgoing
2725 """Return util.hooks consists of a pushop with repo, remote, outgoing
2732 methods, which are called before pushing changesets.
2726 methods, which are called before pushing changesets.
2733 """
2727 """
2734 return util.hooks()
2728 return util.hooks()
2735
2729
2736 def pushkey(self, namespace, key, old, new):
2730 def pushkey(self, namespace, key, old, new):
2737 try:
2731 try:
2738 tr = self.currenttransaction()
2732 tr = self.currenttransaction()
2739 hookargs = {}
2733 hookargs = {}
2740 if tr is not None:
2734 if tr is not None:
2741 hookargs.update(tr.hookargs)
2735 hookargs.update(tr.hookargs)
2742 hookargs = pycompat.strkwargs(hookargs)
2736 hookargs = pycompat.strkwargs(hookargs)
2743 hookargs[r'namespace'] = namespace
2737 hookargs[r'namespace'] = namespace
2744 hookargs[r'key'] = key
2738 hookargs[r'key'] = key
2745 hookargs[r'old'] = old
2739 hookargs[r'old'] = old
2746 hookargs[r'new'] = new
2740 hookargs[r'new'] = new
2747 self.hook('prepushkey', throw=True, **hookargs)
2741 self.hook('prepushkey', throw=True, **hookargs)
2748 except error.HookAbort as exc:
2742 except error.HookAbort as exc:
2749 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2743 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2750 if exc.hint:
2744 if exc.hint:
2751 self.ui.write_err(_("(%s)\n") % exc.hint)
2745 self.ui.write_err(_("(%s)\n") % exc.hint)
2752 return False
2746 return False
2753 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2747 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2754 ret = pushkey.push(self, namespace, key, old, new)
2748 ret = pushkey.push(self, namespace, key, old, new)
2755 def runhook():
2749 def runhook():
2756 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2750 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2757 ret=ret)
2751 ret=ret)
2758 self._afterlock(runhook)
2752 self._afterlock(runhook)
2759 return ret
2753 return ret
2760
2754
2761 def listkeys(self, namespace):
2755 def listkeys(self, namespace):
2762 self.hook('prelistkeys', throw=True, namespace=namespace)
2756 self.hook('prelistkeys', throw=True, namespace=namespace)
2763 self.ui.debug('listing keys for "%s"\n' % namespace)
2757 self.ui.debug('listing keys for "%s"\n' % namespace)
2764 values = pushkey.list(self, namespace)
2758 values = pushkey.list(self, namespace)
2765 self.hook('listkeys', namespace=namespace, values=values)
2759 self.hook('listkeys', namespace=namespace, values=values)
2766 return values
2760 return values
2767
2761
2768 def debugwireargs(self, one, two, three=None, four=None, five=None):
2762 def debugwireargs(self, one, two, three=None, four=None, five=None):
2769 '''used to test argument passing over the wire'''
2763 '''used to test argument passing over the wire'''
2770 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2764 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2771 pycompat.bytestr(four),
2765 pycompat.bytestr(four),
2772 pycompat.bytestr(five))
2766 pycompat.bytestr(five))
2773
2767
2774 def savecommitmessage(self, text):
2768 def savecommitmessage(self, text):
2775 fp = self.vfs('last-message.txt', 'wb')
2769 fp = self.vfs('last-message.txt', 'wb')
2776 try:
2770 try:
2777 fp.write(text)
2771 fp.write(text)
2778 finally:
2772 finally:
2779 fp.close()
2773 fp.close()
2780 return self.pathto(fp.name[len(self.root) + 1:])
2774 return self.pathto(fp.name[len(self.root) + 1:])
2781
2775
2782 # used to avoid circular references so destructors work
2776 # used to avoid circular references so destructors work
2783 def aftertrans(files):
2777 def aftertrans(files):
2784 renamefiles = [tuple(t) for t in files]
2778 renamefiles = [tuple(t) for t in files]
2785 def a():
2779 def a():
2786 for vfs, src, dest in renamefiles:
2780 for vfs, src, dest in renamefiles:
2787 # if src and dest refer to a same file, vfs.rename is a no-op,
2781 # if src and dest refer to a same file, vfs.rename is a no-op,
2788 # leaving both src and dest on disk. delete dest to make sure
2782 # leaving both src and dest on disk. delete dest to make sure
2789 # the rename couldn't be such a no-op.
2783 # the rename couldn't be such a no-op.
2790 vfs.tryunlink(dest)
2784 vfs.tryunlink(dest)
2791 try:
2785 try:
2792 vfs.rename(src, dest)
2786 vfs.rename(src, dest)
2793 except OSError: # journal file does not yet exist
2787 except OSError: # journal file does not yet exist
2794 pass
2788 pass
2795 return a
2789 return a
2796
2790
2797 def undoname(fn):
2791 def undoname(fn):
2798 base, name = os.path.split(fn)
2792 base, name = os.path.split(fn)
2799 assert name.startswith('journal')
2793 assert name.startswith('journal')
2800 return os.path.join(base, name.replace('journal', 'undo', 1))
2794 return os.path.join(base, name.replace('journal', 'undo', 1))
2801
2795
2802 def instance(ui, path, create, intents=None, createopts=None):
2796 def instance(ui, path, create, intents=None, createopts=None):
2803 localpath = util.urllocalpath(path)
2797 localpath = util.urllocalpath(path)
2804 if create:
2798 if create:
2805 createrepository(ui, localpath, createopts=createopts)
2799 createrepository(ui, localpath, createopts=createopts)
2806
2800
2807 return makelocalrepository(ui, localpath, intents=intents)
2801 return makelocalrepository(ui, localpath, intents=intents)
2808
2802
2809 def islocal(path):
2803 def islocal(path):
2810 return True
2804 return True
2811
2805
2812 def defaultcreateopts(ui, createopts=None):
2806 def defaultcreateopts(ui, createopts=None):
2813 """Populate the default creation options for a repository.
2807 """Populate the default creation options for a repository.
2814
2808
2815 A dictionary of explicitly requested creation options can be passed
2809 A dictionary of explicitly requested creation options can be passed
2816 in. Missing keys will be populated.
2810 in. Missing keys will be populated.
2817 """
2811 """
2818 createopts = dict(createopts or {})
2812 createopts = dict(createopts or {})
2819
2813
2820 if 'backend' not in createopts:
2814 if 'backend' not in createopts:
2821 # experimental config: storage.new-repo-backend
2815 # experimental config: storage.new-repo-backend
2822 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2816 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2823
2817
2824 return createopts
2818 return createopts
2825
2819
2826 def newreporequirements(ui, createopts):
2820 def newreporequirements(ui, createopts):
2827 """Determine the set of requirements for a new local repository.
2821 """Determine the set of requirements for a new local repository.
2828
2822
2829 Extensions can wrap this function to specify custom requirements for
2823 Extensions can wrap this function to specify custom requirements for
2830 new repositories.
2824 new repositories.
2831 """
2825 """
2832 # If the repo is being created from a shared repository, we copy
2826 # If the repo is being created from a shared repository, we copy
2833 # its requirements.
2827 # its requirements.
2834 if 'sharedrepo' in createopts:
2828 if 'sharedrepo' in createopts:
2835 requirements = set(createopts['sharedrepo'].requirements)
2829 requirements = set(createopts['sharedrepo'].requirements)
2836 if createopts.get('sharedrelative'):
2830 if createopts.get('sharedrelative'):
2837 requirements.add('relshared')
2831 requirements.add('relshared')
2838 else:
2832 else:
2839 requirements.add('shared')
2833 requirements.add('shared')
2840
2834
2841 return requirements
2835 return requirements
2842
2836
2843 if 'backend' not in createopts:
2837 if 'backend' not in createopts:
2844 raise error.ProgrammingError('backend key not present in createopts; '
2838 raise error.ProgrammingError('backend key not present in createopts; '
2845 'was defaultcreateopts() called?')
2839 'was defaultcreateopts() called?')
2846
2840
2847 if createopts['backend'] != 'revlogv1':
2841 if createopts['backend'] != 'revlogv1':
2848 raise error.Abort(_('unable to determine repository requirements for '
2842 raise error.Abort(_('unable to determine repository requirements for '
2849 'storage backend: %s') % createopts['backend'])
2843 'storage backend: %s') % createopts['backend'])
2850
2844
2851 requirements = {'revlogv1'}
2845 requirements = {'revlogv1'}
2852 if ui.configbool('format', 'usestore'):
2846 if ui.configbool('format', 'usestore'):
2853 requirements.add('store')
2847 requirements.add('store')
2854 if ui.configbool('format', 'usefncache'):
2848 if ui.configbool('format', 'usefncache'):
2855 requirements.add('fncache')
2849 requirements.add('fncache')
2856 if ui.configbool('format', 'dotencode'):
2850 if ui.configbool('format', 'dotencode'):
2857 requirements.add('dotencode')
2851 requirements.add('dotencode')
2858
2852
2859 compengine = ui.config('experimental', 'format.compression')
2853 compengine = ui.config('experimental', 'format.compression')
2860 if compengine not in util.compengines:
2854 if compengine not in util.compengines:
2861 raise error.Abort(_('compression engine %s defined by '
2855 raise error.Abort(_('compression engine %s defined by '
2862 'experimental.format.compression not available') %
2856 'experimental.format.compression not available') %
2863 compengine,
2857 compengine,
2864 hint=_('run "hg debuginstall" to list available '
2858 hint=_('run "hg debuginstall" to list available '
2865 'compression engines'))
2859 'compression engines'))
2866
2860
2867 # zlib is the historical default and doesn't need an explicit requirement.
2861 # zlib is the historical default and doesn't need an explicit requirement.
2868 if compengine != 'zlib':
2862 if compengine != 'zlib':
2869 requirements.add('exp-compression-%s' % compengine)
2863 requirements.add('exp-compression-%s' % compengine)
2870
2864
2871 if scmutil.gdinitconfig(ui):
2865 if scmutil.gdinitconfig(ui):
2872 requirements.add('generaldelta')
2866 requirements.add('generaldelta')
2873 if ui.configbool('experimental', 'treemanifest'):
2867 if ui.configbool('experimental', 'treemanifest'):
2874 requirements.add('treemanifest')
2868 requirements.add('treemanifest')
2875 # experimental config: format.sparse-revlog
2869 # experimental config: format.sparse-revlog
2876 if ui.configbool('format', 'sparse-revlog'):
2870 if ui.configbool('format', 'sparse-revlog'):
2877 requirements.add(SPARSEREVLOG_REQUIREMENT)
2871 requirements.add(SPARSEREVLOG_REQUIREMENT)
2878
2872
2879 revlogv2 = ui.config('experimental', 'revlogv2')
2873 revlogv2 = ui.config('experimental', 'revlogv2')
2880 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2874 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2881 requirements.remove('revlogv1')
2875 requirements.remove('revlogv1')
2882 # generaldelta is implied by revlogv2.
2876 # generaldelta is implied by revlogv2.
2883 requirements.discard('generaldelta')
2877 requirements.discard('generaldelta')
2884 requirements.add(REVLOGV2_REQUIREMENT)
2878 requirements.add(REVLOGV2_REQUIREMENT)
2885 # experimental config: format.internal-phase
2879 # experimental config: format.internal-phase
2886 if ui.configbool('format', 'internal-phase'):
2880 if ui.configbool('format', 'internal-phase'):
2887 requirements.add('internal-phase')
2881 requirements.add('internal-phase')
2888
2882
2889 if createopts.get('narrowfiles'):
2883 if createopts.get('narrowfiles'):
2890 requirements.add(repository.NARROW_REQUIREMENT)
2884 requirements.add(repository.NARROW_REQUIREMENT)
2891
2885
2892 return requirements
2886 return requirements
2893
2887
2894 def filterknowncreateopts(ui, createopts):
2888 def filterknowncreateopts(ui, createopts):
2895 """Filters a dict of repo creation options against options that are known.
2889 """Filters a dict of repo creation options against options that are known.
2896
2890
2897 Receives a dict of repo creation options and returns a dict of those
2891 Receives a dict of repo creation options and returns a dict of those
2898 options that we don't know how to handle.
2892 options that we don't know how to handle.
2899
2893
2900 This function is called as part of repository creation. If the
2894 This function is called as part of repository creation. If the
2901 returned dict contains any items, repository creation will not
2895 returned dict contains any items, repository creation will not
2902 be allowed, as it means there was a request to create a repository
2896 be allowed, as it means there was a request to create a repository
2903 with options not recognized by loaded code.
2897 with options not recognized by loaded code.
2904
2898
2905 Extensions can wrap this function to filter out creation options
2899 Extensions can wrap this function to filter out creation options
2906 they know how to handle.
2900 they know how to handle.
2907 """
2901 """
2908 known = {
2902 known = {
2909 'backend',
2903 'backend',
2910 'narrowfiles',
2904 'narrowfiles',
2911 'sharedrepo',
2905 'sharedrepo',
2912 'sharedrelative',
2906 'sharedrelative',
2913 'shareditems',
2907 'shareditems',
2914 }
2908 }
2915
2909
2916 return {k: v for k, v in createopts.items() if k not in known}
2910 return {k: v for k, v in createopts.items() if k not in known}
2917
2911
2918 def createrepository(ui, path, createopts=None):
2912 def createrepository(ui, path, createopts=None):
2919 """Create a new repository in a vfs.
2913 """Create a new repository in a vfs.
2920
2914
2921 ``path`` path to the new repo's working directory.
2915 ``path`` path to the new repo's working directory.
2922 ``createopts`` options for the new repository.
2916 ``createopts`` options for the new repository.
2923
2917
2924 The following keys for ``createopts`` are recognized:
2918 The following keys for ``createopts`` are recognized:
2925
2919
2926 backend
2920 backend
2927 The storage backend to use.
2921 The storage backend to use.
2928 narrowfiles
2922 narrowfiles
2929 Set up repository to support narrow file storage.
2923 Set up repository to support narrow file storage.
2930 sharedrepo
2924 sharedrepo
2931 Repository object from which storage should be shared.
2925 Repository object from which storage should be shared.
2932 sharedrelative
2926 sharedrelative
2933 Boolean indicating if the path to the shared repo should be
2927 Boolean indicating if the path to the shared repo should be
2934 stored as relative. By default, the pointer to the "parent" repo
2928 stored as relative. By default, the pointer to the "parent" repo
2935 is stored as an absolute path.
2929 is stored as an absolute path.
2936 shareditems
2930 shareditems
2937 Set of items to share to the new repository (in addition to storage).
2931 Set of items to share to the new repository (in addition to storage).
2938 """
2932 """
2939 createopts = defaultcreateopts(ui, createopts=createopts)
2933 createopts = defaultcreateopts(ui, createopts=createopts)
2940
2934
2941 unknownopts = filterknowncreateopts(ui, createopts)
2935 unknownopts = filterknowncreateopts(ui, createopts)
2942
2936
2943 if not isinstance(unknownopts, dict):
2937 if not isinstance(unknownopts, dict):
2944 raise error.ProgrammingError('filterknowncreateopts() did not return '
2938 raise error.ProgrammingError('filterknowncreateopts() did not return '
2945 'a dict')
2939 'a dict')
2946
2940
2947 if unknownopts:
2941 if unknownopts:
2948 raise error.Abort(_('unable to create repository because of unknown '
2942 raise error.Abort(_('unable to create repository because of unknown '
2949 'creation option: %s') %
2943 'creation option: %s') %
2950 ', '.join(sorted(unknownopts)),
2944 ', '.join(sorted(unknownopts)),
2951 hint=_('is a required extension not loaded?'))
2945 hint=_('is a required extension not loaded?'))
2952
2946
2953 requirements = newreporequirements(ui, createopts=createopts)
2947 requirements = newreporequirements(ui, createopts=createopts)
2954
2948
2955 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2949 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2956
2950
2957 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2951 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2958 if hgvfs.exists():
2952 if hgvfs.exists():
2959 raise error.RepoError(_('repository %s already exists') % path)
2953 raise error.RepoError(_('repository %s already exists') % path)
2960
2954
2961 if 'sharedrepo' in createopts:
2955 if 'sharedrepo' in createopts:
2962 sharedpath = createopts['sharedrepo'].sharedpath
2956 sharedpath = createopts['sharedrepo'].sharedpath
2963
2957
2964 if createopts.get('sharedrelative'):
2958 if createopts.get('sharedrelative'):
2965 try:
2959 try:
2966 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2960 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2967 except (IOError, ValueError) as e:
2961 except (IOError, ValueError) as e:
2968 # ValueError is raised on Windows if the drive letters differ
2962 # ValueError is raised on Windows if the drive letters differ
2969 # on each path.
2963 # on each path.
2970 raise error.Abort(_('cannot calculate relative path'),
2964 raise error.Abort(_('cannot calculate relative path'),
2971 hint=stringutil.forcebytestr(e))
2965 hint=stringutil.forcebytestr(e))
2972
2966
2973 if not wdirvfs.exists():
2967 if not wdirvfs.exists():
2974 wdirvfs.makedirs()
2968 wdirvfs.makedirs()
2975
2969
2976 hgvfs.makedir(notindexed=True)
2970 hgvfs.makedir(notindexed=True)
2977
2971
2978 if b'store' in requirements and 'sharedrepo' not in createopts:
2972 if b'store' in requirements and 'sharedrepo' not in createopts:
2979 hgvfs.mkdir(b'store')
2973 hgvfs.mkdir(b'store')
2980
2974
2981 # We create an invalid changelog outside the store so very old
2975 # We create an invalid changelog outside the store so very old
2982 # Mercurial versions (which didn't know about the requirements
2976 # Mercurial versions (which didn't know about the requirements
2983 # file) encounter an error on reading the changelog. This
2977 # file) encounter an error on reading the changelog. This
2984 # effectively locks out old clients and prevents them from
2978 # effectively locks out old clients and prevents them from
2985 # mucking with a repo in an unknown format.
2979 # mucking with a repo in an unknown format.
2986 #
2980 #
2987 # The revlog header has version 2, which won't be recognized by
2981 # The revlog header has version 2, which won't be recognized by
2988 # such old clients.
2982 # such old clients.
2989 hgvfs.append(b'00changelog.i',
2983 hgvfs.append(b'00changelog.i',
2990 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2984 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2991 b'layout')
2985 b'layout')
2992
2986
2993 scmutil.writerequires(hgvfs, requirements)
2987 scmutil.writerequires(hgvfs, requirements)
2994
2988
2995 # Write out file telling readers where to find the shared store.
2989 # Write out file telling readers where to find the shared store.
2996 if 'sharedrepo' in createopts:
2990 if 'sharedrepo' in createopts:
2997 hgvfs.write(b'sharedpath', sharedpath)
2991 hgvfs.write(b'sharedpath', sharedpath)
2998
2992
2999 if createopts.get('shareditems'):
2993 if createopts.get('shareditems'):
3000 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
2994 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3001 hgvfs.write(b'shared', shared)
2995 hgvfs.write(b'shared', shared)
3002
2996
3003 def poisonrepository(repo):
2997 def poisonrepository(repo):
3004 """Poison a repository instance so it can no longer be used."""
2998 """Poison a repository instance so it can no longer be used."""
3005 # Perform any cleanup on the instance.
2999 # Perform any cleanup on the instance.
3006 repo.close()
3000 repo.close()
3007
3001
3008 # Our strategy is to replace the type of the object with one that
3002 # Our strategy is to replace the type of the object with one that
3009 # has all attribute lookups result in error.
3003 # has all attribute lookups result in error.
3010 #
3004 #
3011 # But we have to allow the close() method because some constructors
3005 # But we have to allow the close() method because some constructors
3012 # of repos call close() on repo references.
3006 # of repos call close() on repo references.
3013 class poisonedrepository(object):
3007 class poisonedrepository(object):
3014 def __getattribute__(self, item):
3008 def __getattribute__(self, item):
3015 if item == r'close':
3009 if item == r'close':
3016 return object.__getattribute__(self, item)
3010 return object.__getattribute__(self, item)
3017
3011
3018 raise error.ProgrammingError('repo instances should not be used '
3012 raise error.ProgrammingError('repo instances should not be used '
3019 'after unshare')
3013 'after unshare')
3020
3014
3021 def close(self):
3015 def close(self):
3022 pass
3016 pass
3023
3017
3024 # We may have a repoview, which intercepts __setattr__. So be sure
3018 # We may have a repoview, which intercepts __setattr__. So be sure
3025 # we operate at the lowest level possible.
3019 # we operate at the lowest level possible.
3026 object.__setattr__(repo, r'__class__', poisonedrepository)
3020 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now