##// END OF EJS Templates
localrepo: capture repo interface factory functions as lambas...
Gregory Szorc -
r40030:6962ebc8 default
parent child Browse files
Show More
@@ -1,3000 +1,3002 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
95 def __set__(self, repo, value):
95 def __set__(self, repo, value):
96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
97 def __delete__(self, repo):
97 def __delete__(self, repo):
98 return super(_basefilecache, self).__delete__(repo.unfiltered())
98 return super(_basefilecache, self).__delete__(repo.unfiltered())
99
99
100 class repofilecache(_basefilecache):
100 class repofilecache(_basefilecache):
101 """filecache for files in .hg but outside of .hg/store"""
101 """filecache for files in .hg but outside of .hg/store"""
102 def __init__(self, *paths):
102 def __init__(self, *paths):
103 super(repofilecache, self).__init__(*paths)
103 super(repofilecache, self).__init__(*paths)
104 for path in paths:
104 for path in paths:
105 _cachedfiles.add((path, 'plain'))
105 _cachedfiles.add((path, 'plain'))
106
106
107 def join(self, obj, fname):
107 def join(self, obj, fname):
108 return obj.vfs.join(fname)
108 return obj.vfs.join(fname)
109
109
110 class storecache(_basefilecache):
110 class storecache(_basefilecache):
111 """filecache for files in the store"""
111 """filecache for files in the store"""
112 def __init__(self, *paths):
112 def __init__(self, *paths):
113 super(storecache, self).__init__(*paths)
113 super(storecache, self).__init__(*paths)
114 for path in paths:
114 for path in paths:
115 _cachedfiles.add((path, ''))
115 _cachedfiles.add((path, ''))
116
116
117 def join(self, obj, fname):
117 def join(self, obj, fname):
118 return obj.sjoin(fname)
118 return obj.sjoin(fname)
119
119
120 def isfilecached(repo, name):
120 def isfilecached(repo, name):
121 """check if a repo has already cached "name" filecache-ed property
121 """check if a repo has already cached "name" filecache-ed property
122
122
123 This returns (cachedobj-or-None, iscached) tuple.
123 This returns (cachedobj-or-None, iscached) tuple.
124 """
124 """
125 cacheentry = repo.unfiltered()._filecache.get(name, None)
125 cacheentry = repo.unfiltered()._filecache.get(name, None)
126 if not cacheentry:
126 if not cacheentry:
127 return None, False
127 return None, False
128 return cacheentry.obj, True
128 return cacheentry.obj, True
129
129
130 class unfilteredpropertycache(util.propertycache):
130 class unfilteredpropertycache(util.propertycache):
131 """propertycache that apply to unfiltered repo only"""
131 """propertycache that apply to unfiltered repo only"""
132
132
133 def __get__(self, repo, type=None):
133 def __get__(self, repo, type=None):
134 unfi = repo.unfiltered()
134 unfi = repo.unfiltered()
135 if unfi is repo:
135 if unfi is repo:
136 return super(unfilteredpropertycache, self).__get__(unfi)
136 return super(unfilteredpropertycache, self).__get__(unfi)
137 return getattr(unfi, self.name)
137 return getattr(unfi, self.name)
138
138
139 class filteredpropertycache(util.propertycache):
139 class filteredpropertycache(util.propertycache):
140 """propertycache that must take filtering in account"""
140 """propertycache that must take filtering in account"""
141
141
142 def cachevalue(self, obj, value):
142 def cachevalue(self, obj, value):
143 object.__setattr__(obj, self.name, value)
143 object.__setattr__(obj, self.name, value)
144
144
145
145
146 def hasunfilteredcache(repo, name):
146 def hasunfilteredcache(repo, name):
147 """check if a repo has an unfilteredpropertycache value for <name>"""
147 """check if a repo has an unfilteredpropertycache value for <name>"""
148 return name in vars(repo.unfiltered())
148 return name in vars(repo.unfiltered())
149
149
150 def unfilteredmethod(orig):
150 def unfilteredmethod(orig):
151 """decorate method that always need to be run on unfiltered version"""
151 """decorate method that always need to be run on unfiltered version"""
152 def wrapper(repo, *args, **kwargs):
152 def wrapper(repo, *args, **kwargs):
153 return orig(repo.unfiltered(), *args, **kwargs)
153 return orig(repo.unfiltered(), *args, **kwargs)
154 return wrapper
154 return wrapper
155
155
156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
157 'unbundle'}
157 'unbundle'}
158 legacycaps = moderncaps.union({'changegroupsubset'})
158 legacycaps = moderncaps.union({'changegroupsubset'})
159
159
160 @interfaceutil.implementer(repository.ipeercommandexecutor)
160 @interfaceutil.implementer(repository.ipeercommandexecutor)
161 class localcommandexecutor(object):
161 class localcommandexecutor(object):
162 def __init__(self, peer):
162 def __init__(self, peer):
163 self._peer = peer
163 self._peer = peer
164 self._sent = False
164 self._sent = False
165 self._closed = False
165 self._closed = False
166
166
167 def __enter__(self):
167 def __enter__(self):
168 return self
168 return self
169
169
170 def __exit__(self, exctype, excvalue, exctb):
170 def __exit__(self, exctype, excvalue, exctb):
171 self.close()
171 self.close()
172
172
173 def callcommand(self, command, args):
173 def callcommand(self, command, args):
174 if self._sent:
174 if self._sent:
175 raise error.ProgrammingError('callcommand() cannot be used after '
175 raise error.ProgrammingError('callcommand() cannot be used after '
176 'sendcommands()')
176 'sendcommands()')
177
177
178 if self._closed:
178 if self._closed:
179 raise error.ProgrammingError('callcommand() cannot be used after '
179 raise error.ProgrammingError('callcommand() cannot be used after '
180 'close()')
180 'close()')
181
181
182 # We don't need to support anything fancy. Just call the named
182 # We don't need to support anything fancy. Just call the named
183 # method on the peer and return a resolved future.
183 # method on the peer and return a resolved future.
184 fn = getattr(self._peer, pycompat.sysstr(command))
184 fn = getattr(self._peer, pycompat.sysstr(command))
185
185
186 f = pycompat.futures.Future()
186 f = pycompat.futures.Future()
187
187
188 try:
188 try:
189 result = fn(**pycompat.strkwargs(args))
189 result = fn(**pycompat.strkwargs(args))
190 except Exception:
190 except Exception:
191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
192 else:
192 else:
193 f.set_result(result)
193 f.set_result(result)
194
194
195 return f
195 return f
196
196
197 def sendcommands(self):
197 def sendcommands(self):
198 self._sent = True
198 self._sent = True
199
199
200 def close(self):
200 def close(self):
201 self._closed = True
201 self._closed = True
202
202
203 @interfaceutil.implementer(repository.ipeercommands)
203 @interfaceutil.implementer(repository.ipeercommands)
204 class localpeer(repository.peer):
204 class localpeer(repository.peer):
205 '''peer for a local repo; reflects only the most recent API'''
205 '''peer for a local repo; reflects only the most recent API'''
206
206
207 def __init__(self, repo, caps=None):
207 def __init__(self, repo, caps=None):
208 super(localpeer, self).__init__()
208 super(localpeer, self).__init__()
209
209
210 if caps is None:
210 if caps is None:
211 caps = moderncaps.copy()
211 caps = moderncaps.copy()
212 self._repo = repo.filtered('served')
212 self._repo = repo.filtered('served')
213 self.ui = repo.ui
213 self.ui = repo.ui
214 self._caps = repo._restrictcapabilities(caps)
214 self._caps = repo._restrictcapabilities(caps)
215
215
216 # Begin of _basepeer interface.
216 # Begin of _basepeer interface.
217
217
218 def url(self):
218 def url(self):
219 return self._repo.url()
219 return self._repo.url()
220
220
221 def local(self):
221 def local(self):
222 return self._repo
222 return self._repo
223
223
224 def peer(self):
224 def peer(self):
225 return self
225 return self
226
226
227 def canpush(self):
227 def canpush(self):
228 return True
228 return True
229
229
230 def close(self):
230 def close(self):
231 self._repo.close()
231 self._repo.close()
232
232
233 # End of _basepeer interface.
233 # End of _basepeer interface.
234
234
235 # Begin of _basewirecommands interface.
235 # Begin of _basewirecommands interface.
236
236
237 def branchmap(self):
237 def branchmap(self):
238 return self._repo.branchmap()
238 return self._repo.branchmap()
239
239
240 def capabilities(self):
240 def capabilities(self):
241 return self._caps
241 return self._caps
242
242
243 def clonebundles(self):
243 def clonebundles(self):
244 return self._repo.tryread('clonebundles.manifest')
244 return self._repo.tryread('clonebundles.manifest')
245
245
246 def debugwireargs(self, one, two, three=None, four=None, five=None):
246 def debugwireargs(self, one, two, three=None, four=None, five=None):
247 """Used to test argument passing over the wire"""
247 """Used to test argument passing over the wire"""
248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
249 pycompat.bytestr(four),
249 pycompat.bytestr(four),
250 pycompat.bytestr(five))
250 pycompat.bytestr(five))
251
251
252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
253 **kwargs):
253 **kwargs):
254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
255 common=common, bundlecaps=bundlecaps,
255 common=common, bundlecaps=bundlecaps,
256 **kwargs)[1]
256 **kwargs)[1]
257 cb = util.chunkbuffer(chunks)
257 cb = util.chunkbuffer(chunks)
258
258
259 if exchange.bundle2requested(bundlecaps):
259 if exchange.bundle2requested(bundlecaps):
260 # When requesting a bundle2, getbundle returns a stream to make the
260 # When requesting a bundle2, getbundle returns a stream to make the
261 # wire level function happier. We need to build a proper object
261 # wire level function happier. We need to build a proper object
262 # from it in local peer.
262 # from it in local peer.
263 return bundle2.getunbundler(self.ui, cb)
263 return bundle2.getunbundler(self.ui, cb)
264 else:
264 else:
265 return changegroup.getunbundler('01', cb, None)
265 return changegroup.getunbundler('01', cb, None)
266
266
267 def heads(self):
267 def heads(self):
268 return self._repo.heads()
268 return self._repo.heads()
269
269
270 def known(self, nodes):
270 def known(self, nodes):
271 return self._repo.known(nodes)
271 return self._repo.known(nodes)
272
272
273 def listkeys(self, namespace):
273 def listkeys(self, namespace):
274 return self._repo.listkeys(namespace)
274 return self._repo.listkeys(namespace)
275
275
276 def lookup(self, key):
276 def lookup(self, key):
277 return self._repo.lookup(key)
277 return self._repo.lookup(key)
278
278
279 def pushkey(self, namespace, key, old, new):
279 def pushkey(self, namespace, key, old, new):
280 return self._repo.pushkey(namespace, key, old, new)
280 return self._repo.pushkey(namespace, key, old, new)
281
281
282 def stream_out(self):
282 def stream_out(self):
283 raise error.Abort(_('cannot perform stream clone against local '
283 raise error.Abort(_('cannot perform stream clone against local '
284 'peer'))
284 'peer'))
285
285
286 def unbundle(self, bundle, heads, url):
286 def unbundle(self, bundle, heads, url):
287 """apply a bundle on a repo
287 """apply a bundle on a repo
288
288
289 This function handles the repo locking itself."""
289 This function handles the repo locking itself."""
290 try:
290 try:
291 try:
291 try:
292 bundle = exchange.readbundle(self.ui, bundle, None)
292 bundle = exchange.readbundle(self.ui, bundle, None)
293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
294 if util.safehasattr(ret, 'getchunks'):
294 if util.safehasattr(ret, 'getchunks'):
295 # This is a bundle20 object, turn it into an unbundler.
295 # This is a bundle20 object, turn it into an unbundler.
296 # This little dance should be dropped eventually when the
296 # This little dance should be dropped eventually when the
297 # API is finally improved.
297 # API is finally improved.
298 stream = util.chunkbuffer(ret.getchunks())
298 stream = util.chunkbuffer(ret.getchunks())
299 ret = bundle2.getunbundler(self.ui, stream)
299 ret = bundle2.getunbundler(self.ui, stream)
300 return ret
300 return ret
301 except Exception as exc:
301 except Exception as exc:
302 # If the exception contains output salvaged from a bundle2
302 # If the exception contains output salvaged from a bundle2
303 # reply, we need to make sure it is printed before continuing
303 # reply, we need to make sure it is printed before continuing
304 # to fail. So we build a bundle2 with such output and consume
304 # to fail. So we build a bundle2 with such output and consume
305 # it directly.
305 # it directly.
306 #
306 #
307 # This is not very elegant but allows a "simple" solution for
307 # This is not very elegant but allows a "simple" solution for
308 # issue4594
308 # issue4594
309 output = getattr(exc, '_bundle2salvagedoutput', ())
309 output = getattr(exc, '_bundle2salvagedoutput', ())
310 if output:
310 if output:
311 bundler = bundle2.bundle20(self._repo.ui)
311 bundler = bundle2.bundle20(self._repo.ui)
312 for out in output:
312 for out in output:
313 bundler.addpart(out)
313 bundler.addpart(out)
314 stream = util.chunkbuffer(bundler.getchunks())
314 stream = util.chunkbuffer(bundler.getchunks())
315 b = bundle2.getunbundler(self.ui, stream)
315 b = bundle2.getunbundler(self.ui, stream)
316 bundle2.processbundle(self._repo, b)
316 bundle2.processbundle(self._repo, b)
317 raise
317 raise
318 except error.PushRaced as exc:
318 except error.PushRaced as exc:
319 raise error.ResponseError(_('push failed:'),
319 raise error.ResponseError(_('push failed:'),
320 stringutil.forcebytestr(exc))
320 stringutil.forcebytestr(exc))
321
321
322 # End of _basewirecommands interface.
322 # End of _basewirecommands interface.
323
323
324 # Begin of peer interface.
324 # Begin of peer interface.
325
325
326 def commandexecutor(self):
326 def commandexecutor(self):
327 return localcommandexecutor(self)
327 return localcommandexecutor(self)
328
328
329 # End of peer interface.
329 # End of peer interface.
330
330
331 @interfaceutil.implementer(repository.ipeerlegacycommands)
331 @interfaceutil.implementer(repository.ipeerlegacycommands)
332 class locallegacypeer(localpeer):
332 class locallegacypeer(localpeer):
333 '''peer extension which implements legacy methods too; used for tests with
333 '''peer extension which implements legacy methods too; used for tests with
334 restricted capabilities'''
334 restricted capabilities'''
335
335
336 def __init__(self, repo):
336 def __init__(self, repo):
337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
338
338
339 # Begin of baselegacywirecommands interface.
339 # Begin of baselegacywirecommands interface.
340
340
341 def between(self, pairs):
341 def between(self, pairs):
342 return self._repo.between(pairs)
342 return self._repo.between(pairs)
343
343
344 def branches(self, nodes):
344 def branches(self, nodes):
345 return self._repo.branches(nodes)
345 return self._repo.branches(nodes)
346
346
347 def changegroup(self, nodes, source):
347 def changegroup(self, nodes, source):
348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
349 missingheads=self._repo.heads())
349 missingheads=self._repo.heads())
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
351
351
352 def changegroupsubset(self, bases, heads, source):
352 def changegroupsubset(self, bases, heads, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
354 missingheads=heads)
354 missingheads=heads)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 # End of baselegacywirecommands interface.
357 # End of baselegacywirecommands interface.
358
358
359 # Increment the sub-version when the revlog v2 format changes to lock out old
359 # Increment the sub-version when the revlog v2 format changes to lock out old
360 # clients.
360 # clients.
361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
362
362
363 # A repository with the sparserevlog feature will have delta chains that
363 # A repository with the sparserevlog feature will have delta chains that
364 # can spread over a larger span. Sparse reading cuts these large spans into
364 # can spread over a larger span. Sparse reading cuts these large spans into
365 # pieces, so that each piece isn't too big.
365 # pieces, so that each piece isn't too big.
366 # Without the sparserevlog capability, reading from the repository could use
366 # Without the sparserevlog capability, reading from the repository could use
367 # huge amounts of memory, because the whole span would be read at once,
367 # huge amounts of memory, because the whole span would be read at once,
368 # including all the intermediate revisions that aren't pertinent for the chain.
368 # including all the intermediate revisions that aren't pertinent for the chain.
369 # This is why once a repository has enabled sparse-read, it becomes required.
369 # This is why once a repository has enabled sparse-read, it becomes required.
370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
371
371
372 # Functions receiving (ui, features) that extensions can register to impact
372 # Functions receiving (ui, features) that extensions can register to impact
373 # the ability to load repositories with custom requirements. Only
373 # the ability to load repositories with custom requirements. Only
374 # functions defined in loaded extensions are called.
374 # functions defined in loaded extensions are called.
375 #
375 #
376 # The function receives a set of requirement strings that the repository
376 # The function receives a set of requirement strings that the repository
377 # is capable of opening. Functions will typically add elements to the
377 # is capable of opening. Functions will typically add elements to the
378 # set to reflect that the extension knows how to handle that requirements.
378 # set to reflect that the extension knows how to handle that requirements.
379 featuresetupfuncs = set()
379 featuresetupfuncs = set()
380
380
381 def makelocalrepository(baseui, path, intents=None):
381 def makelocalrepository(baseui, path, intents=None):
382 """Create a local repository object.
382 """Create a local repository object.
383
383
384 Given arguments needed to construct a local repository, this function
384 Given arguments needed to construct a local repository, this function
385 performs various early repository loading functionality (such as
385 performs various early repository loading functionality (such as
386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
387 the repository can be opened, derives a type suitable for representing
387 the repository can be opened, derives a type suitable for representing
388 that repository, and returns an instance of it.
388 that repository, and returns an instance of it.
389
389
390 The returned object conforms to the ``repository.completelocalrepository``
390 The returned object conforms to the ``repository.completelocalrepository``
391 interface.
391 interface.
392
392
393 The repository type is derived by calling a series of factory functions
393 The repository type is derived by calling a series of factory functions
394 for each aspect/interface of the final repository. These are defined by
394 for each aspect/interface of the final repository. These are defined by
395 ``REPO_INTERFACES``.
395 ``REPO_INTERFACES``.
396
396
397 Each factory function is called to produce a type implementing a specific
397 Each factory function is called to produce a type implementing a specific
398 interface. The cumulative list of returned types will be combined into a
398 interface. The cumulative list of returned types will be combined into a
399 new type and that type will be instantiated to represent the local
399 new type and that type will be instantiated to represent the local
400 repository.
400 repository.
401
401
402 The factory functions each receive various state that may be consulted
402 The factory functions each receive various state that may be consulted
403 as part of deriving a type.
403 as part of deriving a type.
404
404
405 Extensions should wrap these factory functions to customize repository type
405 Extensions should wrap these factory functions to customize repository type
406 creation. Note that an extension's wrapped function may be called even if
406 creation. Note that an extension's wrapped function may be called even if
407 that extension is not loaded for the repo being constructed. Extensions
407 that extension is not loaded for the repo being constructed. Extensions
408 should check if their ``__name__`` appears in the
408 should check if their ``__name__`` appears in the
409 ``extensionmodulenames`` set passed to the factory function and no-op if
409 ``extensionmodulenames`` set passed to the factory function and no-op if
410 not.
410 not.
411 """
411 """
412 ui = baseui.copy()
412 ui = baseui.copy()
413 # Prevent copying repo configuration.
413 # Prevent copying repo configuration.
414 ui.copy = baseui.copy
414 ui.copy = baseui.copy
415
415
416 # Working directory VFS rooted at repository root.
416 # Working directory VFS rooted at repository root.
417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
418
418
419 # Main VFS for .hg/ directory.
419 # Main VFS for .hg/ directory.
420 hgpath = wdirvfs.join(b'.hg')
420 hgpath = wdirvfs.join(b'.hg')
421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
422
422
423 # The .hg/ path should exist and should be a directory. All other
423 # The .hg/ path should exist and should be a directory. All other
424 # cases are errors.
424 # cases are errors.
425 if not hgvfs.isdir():
425 if not hgvfs.isdir():
426 try:
426 try:
427 hgvfs.stat()
427 hgvfs.stat()
428 except OSError as e:
428 except OSError as e:
429 if e.errno != errno.ENOENT:
429 if e.errno != errno.ENOENT:
430 raise
430 raise
431
431
432 raise error.RepoError(_(b'repository %s not found') % path)
432 raise error.RepoError(_(b'repository %s not found') % path)
433
433
434 # .hg/requires file contains a newline-delimited list of
434 # .hg/requires file contains a newline-delimited list of
435 # features/capabilities the opener (us) must have in order to use
435 # features/capabilities the opener (us) must have in order to use
436 # the repository. This file was introduced in Mercurial 0.9.2,
436 # the repository. This file was introduced in Mercurial 0.9.2,
437 # which means very old repositories may not have one. We assume
437 # which means very old repositories may not have one. We assume
438 # a missing file translates to no requirements.
438 # a missing file translates to no requirements.
439 try:
439 try:
440 requirements = set(hgvfs.read(b'requires').splitlines())
440 requirements = set(hgvfs.read(b'requires').splitlines())
441 except IOError as e:
441 except IOError as e:
442 if e.errno != errno.ENOENT:
442 if e.errno != errno.ENOENT:
443 raise
443 raise
444 requirements = set()
444 requirements = set()
445
445
446 # The .hg/hgrc file may load extensions or contain config options
446 # The .hg/hgrc file may load extensions or contain config options
447 # that influence repository construction. Attempt to load it and
447 # that influence repository construction. Attempt to load it and
448 # process any new extensions that it may have pulled in.
448 # process any new extensions that it may have pulled in.
449 try:
449 try:
450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
451 # Run this before extensions.loadall() so extensions can be
451 # Run this before extensions.loadall() so extensions can be
452 # automatically enabled.
452 # automatically enabled.
453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
454 except IOError:
454 except IOError:
455 pass
455 pass
456 else:
456 else:
457 extensions.loadall(ui)
457 extensions.loadall(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511
511
512 # The store has changed over time and the exact layout is dictated by
512 # The store has changed over time and the exact layout is dictated by
513 # requirements. The store interface abstracts differences across all
513 # requirements. The store interface abstracts differences across all
514 # of them.
514 # of them.
515 store = makestore(requirements, storebasepath,
515 store = makestore(requirements, storebasepath,
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
517 hgvfs.createmode = store.createmode
517 hgvfs.createmode = store.createmode
518
518
519 storevfs = store.vfs
519 storevfs = store.vfs
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
521
521
522 # The cache vfs is used to manage cache files.
522 # The cache vfs is used to manage cache files.
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
524 cachevfs.createmode = store.createmode
524 cachevfs.createmode = store.createmode
525
525
526 # Now resolve the type for the repository object. We do this by repeatedly
526 # Now resolve the type for the repository object. We do this by repeatedly
527 # calling a factory function to produces types for specific aspects of the
527 # calling a factory function to produces types for specific aspects of the
528 # repo's operation. The aggregate returned types are used as base classes
528 # repo's operation. The aggregate returned types are used as base classes
529 # for a dynamically-derived type, which will represent our new repository.
529 # for a dynamically-derived type, which will represent our new repository.
530
530
531 bases = []
531 bases = []
532 extrastate = {}
532 extrastate = {}
533
533
534 for iface, fn in REPO_INTERFACES:
534 for iface, fn in REPO_INTERFACES:
535 # We pass all potentially useful state to give extensions tons of
535 # We pass all potentially useful state to give extensions tons of
536 # flexibility.
536 # flexibility.
537 typ = fn(ui=ui,
537 typ = fn()(ui=ui,
538 intents=intents,
538 intents=intents,
539 requirements=requirements,
539 requirements=requirements,
540 features=features,
540 features=features,
541 wdirvfs=wdirvfs,
541 wdirvfs=wdirvfs,
542 hgvfs=hgvfs,
542 hgvfs=hgvfs,
543 store=store,
543 store=store,
544 storevfs=storevfs,
544 storevfs=storevfs,
545 storeoptions=storevfs.options,
545 storeoptions=storevfs.options,
546 cachevfs=cachevfs,
546 cachevfs=cachevfs,
547 extensionmodulenames=extensionmodulenames,
547 extensionmodulenames=extensionmodulenames,
548 extrastate=extrastate,
548 extrastate=extrastate,
549 baseclasses=bases)
549 baseclasses=bases)
550
550
551 if not isinstance(typ, type):
551 if not isinstance(typ, type):
552 raise error.ProgrammingError('unable to construct type for %s' %
552 raise error.ProgrammingError('unable to construct type for %s' %
553 iface)
553 iface)
554
554
555 bases.append(typ)
555 bases.append(typ)
556
556
557 # type() allows you to use characters in type names that wouldn't be
557 # type() allows you to use characters in type names that wouldn't be
558 # recognized as Python symbols in source code. We abuse that to add
558 # recognized as Python symbols in source code. We abuse that to add
559 # rich information about our constructed repo.
559 # rich information about our constructed repo.
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
561 wdirvfs.base,
561 wdirvfs.base,
562 b','.join(sorted(requirements))))
562 b','.join(sorted(requirements))))
563
563
564 cls = type(name, tuple(bases), {})
564 cls = type(name, tuple(bases), {})
565
565
566 return cls(
566 return cls(
567 baseui=baseui,
567 baseui=baseui,
568 ui=ui,
568 ui=ui,
569 origroot=path,
569 origroot=path,
570 wdirvfs=wdirvfs,
570 wdirvfs=wdirvfs,
571 hgvfs=hgvfs,
571 hgvfs=hgvfs,
572 requirements=requirements,
572 requirements=requirements,
573 supportedrequirements=supportedrequirements,
573 supportedrequirements=supportedrequirements,
574 sharedpath=storebasepath,
574 sharedpath=storebasepath,
575 store=store,
575 store=store,
576 cachevfs=cachevfs,
576 cachevfs=cachevfs,
577 features=features,
577 features=features,
578 intents=intents)
578 intents=intents)
579
579
580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
581 """Perform additional actions after .hg/hgrc is loaded.
581 """Perform additional actions after .hg/hgrc is loaded.
582
582
583 This function is called during repository loading immediately after
583 This function is called during repository loading immediately after
584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
585
585
586 The function can be used to validate configs, automatically add
586 The function can be used to validate configs, automatically add
587 options (including extensions) based on requirements, etc.
587 options (including extensions) based on requirements, etc.
588 """
588 """
589
589
590 # Map of requirements to list of extensions to load automatically when
590 # Map of requirements to list of extensions to load automatically when
591 # requirement is present.
591 # requirement is present.
592 autoextensions = {
592 autoextensions = {
593 b'largefiles': [b'largefiles'],
593 b'largefiles': [b'largefiles'],
594 b'lfs': [b'lfs'],
594 b'lfs': [b'lfs'],
595 }
595 }
596
596
597 for requirement, names in sorted(autoextensions.items()):
597 for requirement, names in sorted(autoextensions.items()):
598 if requirement not in requirements:
598 if requirement not in requirements:
599 continue
599 continue
600
600
601 for name in names:
601 for name in names:
602 if not ui.hasconfig(b'extensions', name):
602 if not ui.hasconfig(b'extensions', name):
603 ui.setconfig(b'extensions', name, b'', source='autoload')
603 ui.setconfig(b'extensions', name, b'', source='autoload')
604
604
605 def gathersupportedrequirements(ui):
605 def gathersupportedrequirements(ui):
606 """Determine the complete set of recognized requirements."""
606 """Determine the complete set of recognized requirements."""
607 # Start with all requirements supported by this file.
607 # Start with all requirements supported by this file.
608 supported = set(localrepository._basesupported)
608 supported = set(localrepository._basesupported)
609
609
610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
611 # relevant to this ui instance.
611 # relevant to this ui instance.
612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
613
613
614 for fn in featuresetupfuncs:
614 for fn in featuresetupfuncs:
615 if fn.__module__ in modules:
615 if fn.__module__ in modules:
616 fn(ui, supported)
616 fn(ui, supported)
617
617
618 # Add derived requirements from registered compression engines.
618 # Add derived requirements from registered compression engines.
619 for name in util.compengines:
619 for name in util.compengines:
620 engine = util.compengines[name]
620 engine = util.compengines[name]
621 if engine.revlogheader():
621 if engine.revlogheader():
622 supported.add(b'exp-compression-%s' % name)
622 supported.add(b'exp-compression-%s' % name)
623
623
624 return supported
624 return supported
625
625
626 def ensurerequirementsrecognized(requirements, supported):
626 def ensurerequirementsrecognized(requirements, supported):
627 """Validate that a set of local requirements is recognized.
627 """Validate that a set of local requirements is recognized.
628
628
629 Receives a set of requirements. Raises an ``error.RepoError`` if there
629 Receives a set of requirements. Raises an ``error.RepoError`` if there
630 exists any requirement in that set that currently loaded code doesn't
630 exists any requirement in that set that currently loaded code doesn't
631 recognize.
631 recognize.
632
632
633 Returns a set of supported requirements.
633 Returns a set of supported requirements.
634 """
634 """
635 missing = set()
635 missing = set()
636
636
637 for requirement in requirements:
637 for requirement in requirements:
638 if requirement in supported:
638 if requirement in supported:
639 continue
639 continue
640
640
641 if not requirement or not requirement[0:1].isalnum():
641 if not requirement or not requirement[0:1].isalnum():
642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
643
643
644 missing.add(requirement)
644 missing.add(requirement)
645
645
646 if missing:
646 if missing:
647 raise error.RequirementError(
647 raise error.RequirementError(
648 _(b'repository requires features unknown to this Mercurial: %s') %
648 _(b'repository requires features unknown to this Mercurial: %s') %
649 b' '.join(sorted(missing)),
649 b' '.join(sorted(missing)),
650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
651 b'for more information'))
651 b'for more information'))
652
652
653 def ensurerequirementscompatible(ui, requirements):
653 def ensurerequirementscompatible(ui, requirements):
654 """Validates that a set of recognized requirements is mutually compatible.
654 """Validates that a set of recognized requirements is mutually compatible.
655
655
656 Some requirements may not be compatible with others or require
656 Some requirements may not be compatible with others or require
657 config options that aren't enabled. This function is called during
657 config options that aren't enabled. This function is called during
658 repository opening to ensure that the set of requirements needed
658 repository opening to ensure that the set of requirements needed
659 to open a repository is sane and compatible with config options.
659 to open a repository is sane and compatible with config options.
660
660
661 Extensions can monkeypatch this function to perform additional
661 Extensions can monkeypatch this function to perform additional
662 checking.
662 checking.
663
663
664 ``error.RepoError`` should be raised on failure.
664 ``error.RepoError`` should be raised on failure.
665 """
665 """
666 if b'exp-sparse' in requirements and not sparse.enabled:
666 if b'exp-sparse' in requirements and not sparse.enabled:
667 raise error.RepoError(_(b'repository is using sparse feature but '
667 raise error.RepoError(_(b'repository is using sparse feature but '
668 b'sparse is not enabled; enable the '
668 b'sparse is not enabled; enable the '
669 b'"sparse" extensions to access'))
669 b'"sparse" extensions to access'))
670
670
671 def makestore(requirements, path, vfstype):
671 def makestore(requirements, path, vfstype):
672 """Construct a storage object for a repository."""
672 """Construct a storage object for a repository."""
673 if b'store' in requirements:
673 if b'store' in requirements:
674 if b'fncache' in requirements:
674 if b'fncache' in requirements:
675 return storemod.fncachestore(path, vfstype,
675 return storemod.fncachestore(path, vfstype,
676 b'dotencode' in requirements)
676 b'dotencode' in requirements)
677
677
678 return storemod.encodedstore(path, vfstype)
678 return storemod.encodedstore(path, vfstype)
679
679
680 return storemod.basicstore(path, vfstype)
680 return storemod.basicstore(path, vfstype)
681
681
682 def resolvestorevfsoptions(ui, requirements, features):
682 def resolvestorevfsoptions(ui, requirements, features):
683 """Resolve the options to pass to the store vfs opener.
683 """Resolve the options to pass to the store vfs opener.
684
684
685 The returned dict is used to influence behavior of the storage layer.
685 The returned dict is used to influence behavior of the storage layer.
686 """
686 """
687 options = {}
687 options = {}
688
688
689 if b'treemanifest' in requirements:
689 if b'treemanifest' in requirements:
690 options[b'treemanifest'] = True
690 options[b'treemanifest'] = True
691
691
692 # experimental config: format.manifestcachesize
692 # experimental config: format.manifestcachesize
693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
694 if manifestcachesize is not None:
694 if manifestcachesize is not None:
695 options[b'manifestcachesize'] = manifestcachesize
695 options[b'manifestcachesize'] = manifestcachesize
696
696
697 # In the absence of another requirement superseding a revlog-related
697 # In the absence of another requirement superseding a revlog-related
698 # requirement, we have to assume the repo is using revlog version 0.
698 # requirement, we have to assume the repo is using revlog version 0.
699 # This revlog format is super old and we don't bother trying to parse
699 # This revlog format is super old and we don't bother trying to parse
700 # opener options for it because those options wouldn't do anything
700 # opener options for it because those options wouldn't do anything
701 # meaningful on such old repos.
701 # meaningful on such old repos.
702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
704
704
705 return options
705 return options
706
706
707 def resolverevlogstorevfsoptions(ui, requirements, features):
707 def resolverevlogstorevfsoptions(ui, requirements, features):
708 """Resolve opener options specific to revlogs."""
708 """Resolve opener options specific to revlogs."""
709
709
710 options = {}
710 options = {}
711
711
712 if b'revlogv1' in requirements:
712 if b'revlogv1' in requirements:
713 options[b'revlogv1'] = True
713 options[b'revlogv1'] = True
714 if REVLOGV2_REQUIREMENT in requirements:
714 if REVLOGV2_REQUIREMENT in requirements:
715 options[b'revlogv2'] = True
715 options[b'revlogv2'] = True
716
716
717 if b'generaldelta' in requirements:
717 if b'generaldelta' in requirements:
718 options[b'generaldelta'] = True
718 options[b'generaldelta'] = True
719
719
720 # experimental config: format.chunkcachesize
720 # experimental config: format.chunkcachesize
721 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
721 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
722 if chunkcachesize is not None:
722 if chunkcachesize is not None:
723 options[b'chunkcachesize'] = chunkcachesize
723 options[b'chunkcachesize'] = chunkcachesize
724
724
725 deltabothparents = ui.configbool(b'storage',
725 deltabothparents = ui.configbool(b'storage',
726 b'revlog.optimize-delta-parent-choice')
726 b'revlog.optimize-delta-parent-choice')
727 options[b'deltabothparents'] = deltabothparents
727 options[b'deltabothparents'] = deltabothparents
728
728
729 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
729 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
730
730
731 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
731 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
732 if 0 <= chainspan:
732 if 0 <= chainspan:
733 options[b'maxdeltachainspan'] = chainspan
733 options[b'maxdeltachainspan'] = chainspan
734
734
735 mmapindexthreshold = ui.configbytes(b'experimental',
735 mmapindexthreshold = ui.configbytes(b'experimental',
736 b'mmapindexthreshold')
736 b'mmapindexthreshold')
737 if mmapindexthreshold is not None:
737 if mmapindexthreshold is not None:
738 options[b'mmapindexthreshold'] = mmapindexthreshold
738 options[b'mmapindexthreshold'] = mmapindexthreshold
739
739
740 withsparseread = ui.configbool(b'experimental', b'sparse-read')
740 withsparseread = ui.configbool(b'experimental', b'sparse-read')
741 srdensitythres = float(ui.config(b'experimental',
741 srdensitythres = float(ui.config(b'experimental',
742 b'sparse-read.density-threshold'))
742 b'sparse-read.density-threshold'))
743 srmingapsize = ui.configbytes(b'experimental',
743 srmingapsize = ui.configbytes(b'experimental',
744 b'sparse-read.min-gap-size')
744 b'sparse-read.min-gap-size')
745 options[b'with-sparse-read'] = withsparseread
745 options[b'with-sparse-read'] = withsparseread
746 options[b'sparse-read-density-threshold'] = srdensitythres
746 options[b'sparse-read-density-threshold'] = srdensitythres
747 options[b'sparse-read-min-gap-size'] = srmingapsize
747 options[b'sparse-read-min-gap-size'] = srmingapsize
748
748
749 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
749 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
750 options[b'sparse-revlog'] = sparserevlog
750 options[b'sparse-revlog'] = sparserevlog
751 if sparserevlog:
751 if sparserevlog:
752 options[b'generaldelta'] = True
752 options[b'generaldelta'] = True
753
753
754 maxchainlen = None
754 maxchainlen = None
755 if sparserevlog:
755 if sparserevlog:
756 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
756 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
757 # experimental config: format.maxchainlen
757 # experimental config: format.maxchainlen
758 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
758 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
759 if maxchainlen is not None:
759 if maxchainlen is not None:
760 options[b'maxchainlen'] = maxchainlen
760 options[b'maxchainlen'] = maxchainlen
761
761
762 for r in requirements:
762 for r in requirements:
763 if r.startswith(b'exp-compression-'):
763 if r.startswith(b'exp-compression-'):
764 options[b'compengine'] = r[len(b'exp-compression-'):]
764 options[b'compengine'] = r[len(b'exp-compression-'):]
765
765
766 if repository.NARROW_REQUIREMENT in requirements:
766 if repository.NARROW_REQUIREMENT in requirements:
767 options[b'enableellipsis'] = True
767 options[b'enableellipsis'] = True
768
768
769 return options
769 return options
770
770
771 def makemain(**kwargs):
771 def makemain(**kwargs):
772 """Produce a type conforming to ``ilocalrepositorymain``."""
772 """Produce a type conforming to ``ilocalrepositorymain``."""
773 return localrepository
773 return localrepository
774
774
775 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
775 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
776 class revlogfilestorage(object):
776 class revlogfilestorage(object):
777 """File storage when using revlogs."""
777 """File storage when using revlogs."""
778
778
779 def file(self, path):
779 def file(self, path):
780 if path[0] == b'/':
780 if path[0] == b'/':
781 path = path[1:]
781 path = path[1:]
782
782
783 return filelog.filelog(self.svfs, path)
783 return filelog.filelog(self.svfs, path)
784
784
785 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
785 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
786 class revlognarrowfilestorage(object):
786 class revlognarrowfilestorage(object):
787 """File storage when using revlogs and narrow files."""
787 """File storage when using revlogs and narrow files."""
788
788
789 def file(self, path):
789 def file(self, path):
790 if path[0] == b'/':
790 if path[0] == b'/':
791 path = path[1:]
791 path = path[1:]
792
792
793 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
793 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
794
794
795 def makefilestorage(requirements, features, **kwargs):
795 def makefilestorage(requirements, features, **kwargs):
796 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
796 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
797 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
797 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
798
798
799 if repository.NARROW_REQUIREMENT in requirements:
799 if repository.NARROW_REQUIREMENT in requirements:
800 return revlognarrowfilestorage
800 return revlognarrowfilestorage
801 else:
801 else:
802 return revlogfilestorage
802 return revlogfilestorage
803
803
804 # List of repository interfaces and factory functions for them. Each
804 # List of repository interfaces and factory functions for them. Each
805 # will be called in order during ``makelocalrepository()`` to iteratively
805 # will be called in order during ``makelocalrepository()`` to iteratively
806 # derive the final type for a local repository instance.
806 # derive the final type for a local repository instance. We capture the
807 # function as a lambda so we don't hold a reference and the module-level
808 # functions can be wrapped.
807 REPO_INTERFACES = [
809 REPO_INTERFACES = [
808 (repository.ilocalrepositorymain, makemain),
810 (repository.ilocalrepositorymain, lambda: makemain),
809 (repository.ilocalrepositoryfilestorage, makefilestorage),
811 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
810 ]
812 ]
811
813
812 @interfaceutil.implementer(repository.ilocalrepositorymain)
814 @interfaceutil.implementer(repository.ilocalrepositorymain)
813 class localrepository(object):
815 class localrepository(object):
814 """Main class for representing local repositories.
816 """Main class for representing local repositories.
815
817
816 All local repositories are instances of this class.
818 All local repositories are instances of this class.
817
819
818 Constructed on its own, instances of this class are not usable as
820 Constructed on its own, instances of this class are not usable as
819 repository objects. To obtain a usable repository object, call
821 repository objects. To obtain a usable repository object, call
820 ``hg.repository()``, ``localrepo.instance()``, or
822 ``hg.repository()``, ``localrepo.instance()``, or
821 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
823 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
822 ``instance()`` adds support for creating new repositories.
824 ``instance()`` adds support for creating new repositories.
823 ``hg.repository()`` adds more extension integration, including calling
825 ``hg.repository()`` adds more extension integration, including calling
824 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
826 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
825 used.
827 used.
826 """
828 """
827
829
828 # obsolete experimental requirements:
830 # obsolete experimental requirements:
829 # - manifestv2: An experimental new manifest format that allowed
831 # - manifestv2: An experimental new manifest format that allowed
830 # for stem compression of long paths. Experiment ended up not
832 # for stem compression of long paths. Experiment ended up not
831 # being successful (repository sizes went up due to worse delta
833 # being successful (repository sizes went up due to worse delta
832 # chains), and the code was deleted in 4.6.
834 # chains), and the code was deleted in 4.6.
833 supportedformats = {
835 supportedformats = {
834 'revlogv1',
836 'revlogv1',
835 'generaldelta',
837 'generaldelta',
836 'treemanifest',
838 'treemanifest',
837 REVLOGV2_REQUIREMENT,
839 REVLOGV2_REQUIREMENT,
838 SPARSEREVLOG_REQUIREMENT,
840 SPARSEREVLOG_REQUIREMENT,
839 }
841 }
840 _basesupported = supportedformats | {
842 _basesupported = supportedformats | {
841 'store',
843 'store',
842 'fncache',
844 'fncache',
843 'shared',
845 'shared',
844 'relshared',
846 'relshared',
845 'dotencode',
847 'dotencode',
846 'exp-sparse',
848 'exp-sparse',
847 'internal-phase'
849 'internal-phase'
848 }
850 }
849
851
850 # list of prefix for file which can be written without 'wlock'
852 # list of prefix for file which can be written without 'wlock'
851 # Extensions should extend this list when needed
853 # Extensions should extend this list when needed
852 _wlockfreeprefix = {
854 _wlockfreeprefix = {
853 # We migh consider requiring 'wlock' for the next
855 # We migh consider requiring 'wlock' for the next
854 # two, but pretty much all the existing code assume
856 # two, but pretty much all the existing code assume
855 # wlock is not needed so we keep them excluded for
857 # wlock is not needed so we keep them excluded for
856 # now.
858 # now.
857 'hgrc',
859 'hgrc',
858 'requires',
860 'requires',
859 # XXX cache is a complicatged business someone
861 # XXX cache is a complicatged business someone
860 # should investigate this in depth at some point
862 # should investigate this in depth at some point
861 'cache/',
863 'cache/',
862 # XXX shouldn't be dirstate covered by the wlock?
864 # XXX shouldn't be dirstate covered by the wlock?
863 'dirstate',
865 'dirstate',
864 # XXX bisect was still a bit too messy at the time
866 # XXX bisect was still a bit too messy at the time
865 # this changeset was introduced. Someone should fix
867 # this changeset was introduced. Someone should fix
866 # the remainig bit and drop this line
868 # the remainig bit and drop this line
867 'bisect.state',
869 'bisect.state',
868 }
870 }
869
871
870 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
872 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
871 supportedrequirements, sharedpath, store, cachevfs,
873 supportedrequirements, sharedpath, store, cachevfs,
872 features, intents=None):
874 features, intents=None):
873 """Create a new local repository instance.
875 """Create a new local repository instance.
874
876
875 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
877 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
876 or ``localrepo.makelocalrepository()`` for obtaining a new repository
878 or ``localrepo.makelocalrepository()`` for obtaining a new repository
877 object.
879 object.
878
880
879 Arguments:
881 Arguments:
880
882
881 baseui
883 baseui
882 ``ui.ui`` instance that ``ui`` argument was based off of.
884 ``ui.ui`` instance that ``ui`` argument was based off of.
883
885
884 ui
886 ui
885 ``ui.ui`` instance for use by the repository.
887 ``ui.ui`` instance for use by the repository.
886
888
887 origroot
889 origroot
888 ``bytes`` path to working directory root of this repository.
890 ``bytes`` path to working directory root of this repository.
889
891
890 wdirvfs
892 wdirvfs
891 ``vfs.vfs`` rooted at the working directory.
893 ``vfs.vfs`` rooted at the working directory.
892
894
893 hgvfs
895 hgvfs
894 ``vfs.vfs`` rooted at .hg/
896 ``vfs.vfs`` rooted at .hg/
895
897
896 requirements
898 requirements
897 ``set`` of bytestrings representing repository opening requirements.
899 ``set`` of bytestrings representing repository opening requirements.
898
900
899 supportedrequirements
901 supportedrequirements
900 ``set`` of bytestrings representing repository requirements that we
902 ``set`` of bytestrings representing repository requirements that we
901 know how to open. May be a supetset of ``requirements``.
903 know how to open. May be a supetset of ``requirements``.
902
904
903 sharedpath
905 sharedpath
904 ``bytes`` Defining path to storage base directory. Points to a
906 ``bytes`` Defining path to storage base directory. Points to a
905 ``.hg/`` directory somewhere.
907 ``.hg/`` directory somewhere.
906
908
907 store
909 store
908 ``store.basicstore`` (or derived) instance providing access to
910 ``store.basicstore`` (or derived) instance providing access to
909 versioned storage.
911 versioned storage.
910
912
911 cachevfs
913 cachevfs
912 ``vfs.vfs`` used for cache files.
914 ``vfs.vfs`` used for cache files.
913
915
914 features
916 features
915 ``set`` of bytestrings defining features/capabilities of this
917 ``set`` of bytestrings defining features/capabilities of this
916 instance.
918 instance.
917
919
918 intents
920 intents
919 ``set`` of system strings indicating what this repo will be used
921 ``set`` of system strings indicating what this repo will be used
920 for.
922 for.
921 """
923 """
922 self.baseui = baseui
924 self.baseui = baseui
923 self.ui = ui
925 self.ui = ui
924 self.origroot = origroot
926 self.origroot = origroot
925 # vfs rooted at working directory.
927 # vfs rooted at working directory.
926 self.wvfs = wdirvfs
928 self.wvfs = wdirvfs
927 self.root = wdirvfs.base
929 self.root = wdirvfs.base
928 # vfs rooted at .hg/. Used to access most non-store paths.
930 # vfs rooted at .hg/. Used to access most non-store paths.
929 self.vfs = hgvfs
931 self.vfs = hgvfs
930 self.path = hgvfs.base
932 self.path = hgvfs.base
931 self.requirements = requirements
933 self.requirements = requirements
932 self.supported = supportedrequirements
934 self.supported = supportedrequirements
933 self.sharedpath = sharedpath
935 self.sharedpath = sharedpath
934 self.store = store
936 self.store = store
935 self.cachevfs = cachevfs
937 self.cachevfs = cachevfs
936 self.features = features
938 self.features = features
937
939
938 self.filtername = None
940 self.filtername = None
939
941
940 if (self.ui.configbool('devel', 'all-warnings') or
942 if (self.ui.configbool('devel', 'all-warnings') or
941 self.ui.configbool('devel', 'check-locks')):
943 self.ui.configbool('devel', 'check-locks')):
942 self.vfs.audit = self._getvfsward(self.vfs.audit)
944 self.vfs.audit = self._getvfsward(self.vfs.audit)
943 # A list of callback to shape the phase if no data were found.
945 # A list of callback to shape the phase if no data were found.
944 # Callback are in the form: func(repo, roots) --> processed root.
946 # Callback are in the form: func(repo, roots) --> processed root.
945 # This list it to be filled by extension during repo setup
947 # This list it to be filled by extension during repo setup
946 self._phasedefaults = []
948 self._phasedefaults = []
947
949
948 color.setup(self.ui)
950 color.setup(self.ui)
949
951
950 self.spath = self.store.path
952 self.spath = self.store.path
951 self.svfs = self.store.vfs
953 self.svfs = self.store.vfs
952 self.sjoin = self.store.join
954 self.sjoin = self.store.join
953 if (self.ui.configbool('devel', 'all-warnings') or
955 if (self.ui.configbool('devel', 'all-warnings') or
954 self.ui.configbool('devel', 'check-locks')):
956 self.ui.configbool('devel', 'check-locks')):
955 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
957 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
956 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
958 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
957 else: # standard vfs
959 else: # standard vfs
958 self.svfs.audit = self._getsvfsward(self.svfs.audit)
960 self.svfs.audit = self._getsvfsward(self.svfs.audit)
959
961
960 self._dirstatevalidatewarned = False
962 self._dirstatevalidatewarned = False
961
963
962 self._branchcaches = {}
964 self._branchcaches = {}
963 self._revbranchcache = None
965 self._revbranchcache = None
964 self._filterpats = {}
966 self._filterpats = {}
965 self._datafilters = {}
967 self._datafilters = {}
966 self._transref = self._lockref = self._wlockref = None
968 self._transref = self._lockref = self._wlockref = None
967
969
968 # A cache for various files under .hg/ that tracks file changes,
970 # A cache for various files under .hg/ that tracks file changes,
969 # (used by the filecache decorator)
971 # (used by the filecache decorator)
970 #
972 #
971 # Maps a property name to its util.filecacheentry
973 # Maps a property name to its util.filecacheentry
972 self._filecache = {}
974 self._filecache = {}
973
975
974 # hold sets of revision to be filtered
976 # hold sets of revision to be filtered
975 # should be cleared when something might have changed the filter value:
977 # should be cleared when something might have changed the filter value:
976 # - new changesets,
978 # - new changesets,
977 # - phase change,
979 # - phase change,
978 # - new obsolescence marker,
980 # - new obsolescence marker,
979 # - working directory parent change,
981 # - working directory parent change,
980 # - bookmark changes
982 # - bookmark changes
981 self.filteredrevcache = {}
983 self.filteredrevcache = {}
982
984
983 # post-dirstate-status hooks
985 # post-dirstate-status hooks
984 self._postdsstatus = []
986 self._postdsstatus = []
985
987
986 # generic mapping between names and nodes
988 # generic mapping between names and nodes
987 self.names = namespaces.namespaces()
989 self.names = namespaces.namespaces()
988
990
989 # Key to signature value.
991 # Key to signature value.
990 self._sparsesignaturecache = {}
992 self._sparsesignaturecache = {}
991 # Signature to cached matcher instance.
993 # Signature to cached matcher instance.
992 self._sparsematchercache = {}
994 self._sparsematchercache = {}
993
995
994 def _getvfsward(self, origfunc):
996 def _getvfsward(self, origfunc):
995 """build a ward for self.vfs"""
997 """build a ward for self.vfs"""
996 rref = weakref.ref(self)
998 rref = weakref.ref(self)
997 def checkvfs(path, mode=None):
999 def checkvfs(path, mode=None):
998 ret = origfunc(path, mode=mode)
1000 ret = origfunc(path, mode=mode)
999 repo = rref()
1001 repo = rref()
1000 if (repo is None
1002 if (repo is None
1001 or not util.safehasattr(repo, '_wlockref')
1003 or not util.safehasattr(repo, '_wlockref')
1002 or not util.safehasattr(repo, '_lockref')):
1004 or not util.safehasattr(repo, '_lockref')):
1003 return
1005 return
1004 if mode in (None, 'r', 'rb'):
1006 if mode in (None, 'r', 'rb'):
1005 return
1007 return
1006 if path.startswith(repo.path):
1008 if path.startswith(repo.path):
1007 # truncate name relative to the repository (.hg)
1009 # truncate name relative to the repository (.hg)
1008 path = path[len(repo.path) + 1:]
1010 path = path[len(repo.path) + 1:]
1009 if path.startswith('cache/'):
1011 if path.startswith('cache/'):
1010 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1012 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1011 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1013 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1012 if path.startswith('journal.'):
1014 if path.startswith('journal.'):
1013 # journal is covered by 'lock'
1015 # journal is covered by 'lock'
1014 if repo._currentlock(repo._lockref) is None:
1016 if repo._currentlock(repo._lockref) is None:
1015 repo.ui.develwarn('write with no lock: "%s"' % path,
1017 repo.ui.develwarn('write with no lock: "%s"' % path,
1016 stacklevel=2, config='check-locks')
1018 stacklevel=2, config='check-locks')
1017 elif repo._currentlock(repo._wlockref) is None:
1019 elif repo._currentlock(repo._wlockref) is None:
1018 # rest of vfs files are covered by 'wlock'
1020 # rest of vfs files are covered by 'wlock'
1019 #
1021 #
1020 # exclude special files
1022 # exclude special files
1021 for prefix in self._wlockfreeprefix:
1023 for prefix in self._wlockfreeprefix:
1022 if path.startswith(prefix):
1024 if path.startswith(prefix):
1023 return
1025 return
1024 repo.ui.develwarn('write with no wlock: "%s"' % path,
1026 repo.ui.develwarn('write with no wlock: "%s"' % path,
1025 stacklevel=2, config='check-locks')
1027 stacklevel=2, config='check-locks')
1026 return ret
1028 return ret
1027 return checkvfs
1029 return checkvfs
1028
1030
1029 def _getsvfsward(self, origfunc):
1031 def _getsvfsward(self, origfunc):
1030 """build a ward for self.svfs"""
1032 """build a ward for self.svfs"""
1031 rref = weakref.ref(self)
1033 rref = weakref.ref(self)
1032 def checksvfs(path, mode=None):
1034 def checksvfs(path, mode=None):
1033 ret = origfunc(path, mode=mode)
1035 ret = origfunc(path, mode=mode)
1034 repo = rref()
1036 repo = rref()
1035 if repo is None or not util.safehasattr(repo, '_lockref'):
1037 if repo is None or not util.safehasattr(repo, '_lockref'):
1036 return
1038 return
1037 if mode in (None, 'r', 'rb'):
1039 if mode in (None, 'r', 'rb'):
1038 return
1040 return
1039 if path.startswith(repo.sharedpath):
1041 if path.startswith(repo.sharedpath):
1040 # truncate name relative to the repository (.hg)
1042 # truncate name relative to the repository (.hg)
1041 path = path[len(repo.sharedpath) + 1:]
1043 path = path[len(repo.sharedpath) + 1:]
1042 if repo._currentlock(repo._lockref) is None:
1044 if repo._currentlock(repo._lockref) is None:
1043 repo.ui.develwarn('write with no lock: "%s"' % path,
1045 repo.ui.develwarn('write with no lock: "%s"' % path,
1044 stacklevel=3)
1046 stacklevel=3)
1045 return ret
1047 return ret
1046 return checksvfs
1048 return checksvfs
1047
1049
1048 def close(self):
1050 def close(self):
1049 self._writecaches()
1051 self._writecaches()
1050
1052
1051 def _writecaches(self):
1053 def _writecaches(self):
1052 if self._revbranchcache:
1054 if self._revbranchcache:
1053 self._revbranchcache.write()
1055 self._revbranchcache.write()
1054
1056
1055 def _restrictcapabilities(self, caps):
1057 def _restrictcapabilities(self, caps):
1056 if self.ui.configbool('experimental', 'bundle2-advertise'):
1058 if self.ui.configbool('experimental', 'bundle2-advertise'):
1057 caps = set(caps)
1059 caps = set(caps)
1058 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1060 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1059 role='client'))
1061 role='client'))
1060 caps.add('bundle2=' + urlreq.quote(capsblob))
1062 caps.add('bundle2=' + urlreq.quote(capsblob))
1061 return caps
1063 return caps
1062
1064
1063 def _writerequirements(self):
1065 def _writerequirements(self):
1064 scmutil.writerequires(self.vfs, self.requirements)
1066 scmutil.writerequires(self.vfs, self.requirements)
1065
1067
1066 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1068 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1067 # self -> auditor -> self._checknested -> self
1069 # self -> auditor -> self._checknested -> self
1068
1070
1069 @property
1071 @property
1070 def auditor(self):
1072 def auditor(self):
1071 # This is only used by context.workingctx.match in order to
1073 # This is only used by context.workingctx.match in order to
1072 # detect files in subrepos.
1074 # detect files in subrepos.
1073 return pathutil.pathauditor(self.root, callback=self._checknested)
1075 return pathutil.pathauditor(self.root, callback=self._checknested)
1074
1076
1075 @property
1077 @property
1076 def nofsauditor(self):
1078 def nofsauditor(self):
1077 # This is only used by context.basectx.match in order to detect
1079 # This is only used by context.basectx.match in order to detect
1078 # files in subrepos.
1080 # files in subrepos.
1079 return pathutil.pathauditor(self.root, callback=self._checknested,
1081 return pathutil.pathauditor(self.root, callback=self._checknested,
1080 realfs=False, cached=True)
1082 realfs=False, cached=True)
1081
1083
1082 def _checknested(self, path):
1084 def _checknested(self, path):
1083 """Determine if path is a legal nested repository."""
1085 """Determine if path is a legal nested repository."""
1084 if not path.startswith(self.root):
1086 if not path.startswith(self.root):
1085 return False
1087 return False
1086 subpath = path[len(self.root) + 1:]
1088 subpath = path[len(self.root) + 1:]
1087 normsubpath = util.pconvert(subpath)
1089 normsubpath = util.pconvert(subpath)
1088
1090
1089 # XXX: Checking against the current working copy is wrong in
1091 # XXX: Checking against the current working copy is wrong in
1090 # the sense that it can reject things like
1092 # the sense that it can reject things like
1091 #
1093 #
1092 # $ hg cat -r 10 sub/x.txt
1094 # $ hg cat -r 10 sub/x.txt
1093 #
1095 #
1094 # if sub/ is no longer a subrepository in the working copy
1096 # if sub/ is no longer a subrepository in the working copy
1095 # parent revision.
1097 # parent revision.
1096 #
1098 #
1097 # However, it can of course also allow things that would have
1099 # However, it can of course also allow things that would have
1098 # been rejected before, such as the above cat command if sub/
1100 # been rejected before, such as the above cat command if sub/
1099 # is a subrepository now, but was a normal directory before.
1101 # is a subrepository now, but was a normal directory before.
1100 # The old path auditor would have rejected by mistake since it
1102 # The old path auditor would have rejected by mistake since it
1101 # panics when it sees sub/.hg/.
1103 # panics when it sees sub/.hg/.
1102 #
1104 #
1103 # All in all, checking against the working copy seems sensible
1105 # All in all, checking against the working copy seems sensible
1104 # since we want to prevent access to nested repositories on
1106 # since we want to prevent access to nested repositories on
1105 # the filesystem *now*.
1107 # the filesystem *now*.
1106 ctx = self[None]
1108 ctx = self[None]
1107 parts = util.splitpath(subpath)
1109 parts = util.splitpath(subpath)
1108 while parts:
1110 while parts:
1109 prefix = '/'.join(parts)
1111 prefix = '/'.join(parts)
1110 if prefix in ctx.substate:
1112 if prefix in ctx.substate:
1111 if prefix == normsubpath:
1113 if prefix == normsubpath:
1112 return True
1114 return True
1113 else:
1115 else:
1114 sub = ctx.sub(prefix)
1116 sub = ctx.sub(prefix)
1115 return sub.checknested(subpath[len(prefix) + 1:])
1117 return sub.checknested(subpath[len(prefix) + 1:])
1116 else:
1118 else:
1117 parts.pop()
1119 parts.pop()
1118 return False
1120 return False
1119
1121
1120 def peer(self):
1122 def peer(self):
1121 return localpeer(self) # not cached to avoid reference cycle
1123 return localpeer(self) # not cached to avoid reference cycle
1122
1124
1123 def unfiltered(self):
1125 def unfiltered(self):
1124 """Return unfiltered version of the repository
1126 """Return unfiltered version of the repository
1125
1127
1126 Intended to be overwritten by filtered repo."""
1128 Intended to be overwritten by filtered repo."""
1127 return self
1129 return self
1128
1130
1129 def filtered(self, name, visibilityexceptions=None):
1131 def filtered(self, name, visibilityexceptions=None):
1130 """Return a filtered version of a repository"""
1132 """Return a filtered version of a repository"""
1131 cls = repoview.newtype(self.unfiltered().__class__)
1133 cls = repoview.newtype(self.unfiltered().__class__)
1132 return cls(self, name, visibilityexceptions)
1134 return cls(self, name, visibilityexceptions)
1133
1135
1134 @repofilecache('bookmarks', 'bookmarks.current')
1136 @repofilecache('bookmarks', 'bookmarks.current')
1135 def _bookmarks(self):
1137 def _bookmarks(self):
1136 return bookmarks.bmstore(self)
1138 return bookmarks.bmstore(self)
1137
1139
1138 @property
1140 @property
1139 def _activebookmark(self):
1141 def _activebookmark(self):
1140 return self._bookmarks.active
1142 return self._bookmarks.active
1141
1143
1142 # _phasesets depend on changelog. what we need is to call
1144 # _phasesets depend on changelog. what we need is to call
1143 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1145 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1144 # can't be easily expressed in filecache mechanism.
1146 # can't be easily expressed in filecache mechanism.
1145 @storecache('phaseroots', '00changelog.i')
1147 @storecache('phaseroots', '00changelog.i')
1146 def _phasecache(self):
1148 def _phasecache(self):
1147 return phases.phasecache(self, self._phasedefaults)
1149 return phases.phasecache(self, self._phasedefaults)
1148
1150
1149 @storecache('obsstore')
1151 @storecache('obsstore')
1150 def obsstore(self):
1152 def obsstore(self):
1151 return obsolete.makestore(self.ui, self)
1153 return obsolete.makestore(self.ui, self)
1152
1154
1153 @storecache('00changelog.i')
1155 @storecache('00changelog.i')
1154 def changelog(self):
1156 def changelog(self):
1155 return changelog.changelog(self.svfs,
1157 return changelog.changelog(self.svfs,
1156 trypending=txnutil.mayhavepending(self.root))
1158 trypending=txnutil.mayhavepending(self.root))
1157
1159
1158 @storecache('00manifest.i')
1160 @storecache('00manifest.i')
1159 def manifestlog(self):
1161 def manifestlog(self):
1160 rootstore = manifest.manifestrevlog(self.svfs)
1162 rootstore = manifest.manifestrevlog(self.svfs)
1161 return manifest.manifestlog(self.svfs, self, rootstore)
1163 return manifest.manifestlog(self.svfs, self, rootstore)
1162
1164
1163 @repofilecache('dirstate')
1165 @repofilecache('dirstate')
1164 def dirstate(self):
1166 def dirstate(self):
1165 return self._makedirstate()
1167 return self._makedirstate()
1166
1168
1167 def _makedirstate(self):
1169 def _makedirstate(self):
1168 """Extension point for wrapping the dirstate per-repo."""
1170 """Extension point for wrapping the dirstate per-repo."""
1169 sparsematchfn = lambda: sparse.matcher(self)
1171 sparsematchfn = lambda: sparse.matcher(self)
1170
1172
1171 return dirstate.dirstate(self.vfs, self.ui, self.root,
1173 return dirstate.dirstate(self.vfs, self.ui, self.root,
1172 self._dirstatevalidate, sparsematchfn)
1174 self._dirstatevalidate, sparsematchfn)
1173
1175
1174 def _dirstatevalidate(self, node):
1176 def _dirstatevalidate(self, node):
1175 try:
1177 try:
1176 self.changelog.rev(node)
1178 self.changelog.rev(node)
1177 return node
1179 return node
1178 except error.LookupError:
1180 except error.LookupError:
1179 if not self._dirstatevalidatewarned:
1181 if not self._dirstatevalidatewarned:
1180 self._dirstatevalidatewarned = True
1182 self._dirstatevalidatewarned = True
1181 self.ui.warn(_("warning: ignoring unknown"
1183 self.ui.warn(_("warning: ignoring unknown"
1182 " working parent %s!\n") % short(node))
1184 " working parent %s!\n") % short(node))
1183 return nullid
1185 return nullid
1184
1186
1185 @storecache(narrowspec.FILENAME)
1187 @storecache(narrowspec.FILENAME)
1186 def narrowpats(self):
1188 def narrowpats(self):
1187 """matcher patterns for this repository's narrowspec
1189 """matcher patterns for this repository's narrowspec
1188
1190
1189 A tuple of (includes, excludes).
1191 A tuple of (includes, excludes).
1190 """
1192 """
1191 return narrowspec.load(self)
1193 return narrowspec.load(self)
1192
1194
1193 @storecache(narrowspec.FILENAME)
1195 @storecache(narrowspec.FILENAME)
1194 def _narrowmatch(self):
1196 def _narrowmatch(self):
1195 if repository.NARROW_REQUIREMENT not in self.requirements:
1197 if repository.NARROW_REQUIREMENT not in self.requirements:
1196 return matchmod.always(self.root, '')
1198 return matchmod.always(self.root, '')
1197 include, exclude = self.narrowpats
1199 include, exclude = self.narrowpats
1198 return narrowspec.match(self.root, include=include, exclude=exclude)
1200 return narrowspec.match(self.root, include=include, exclude=exclude)
1199
1201
1200 # TODO(martinvonz): make this property-like instead?
1202 # TODO(martinvonz): make this property-like instead?
1201 def narrowmatch(self):
1203 def narrowmatch(self):
1202 return self._narrowmatch
1204 return self._narrowmatch
1203
1205
1204 def setnarrowpats(self, newincludes, newexcludes):
1206 def setnarrowpats(self, newincludes, newexcludes):
1205 narrowspec.save(self, newincludes, newexcludes)
1207 narrowspec.save(self, newincludes, newexcludes)
1206 self.invalidate(clearfilecache=True)
1208 self.invalidate(clearfilecache=True)
1207
1209
1208 def __getitem__(self, changeid):
1210 def __getitem__(self, changeid):
1209 if changeid is None:
1211 if changeid is None:
1210 return context.workingctx(self)
1212 return context.workingctx(self)
1211 if isinstance(changeid, context.basectx):
1213 if isinstance(changeid, context.basectx):
1212 return changeid
1214 return changeid
1213 if isinstance(changeid, slice):
1215 if isinstance(changeid, slice):
1214 # wdirrev isn't contiguous so the slice shouldn't include it
1216 # wdirrev isn't contiguous so the slice shouldn't include it
1215 return [self[i]
1217 return [self[i]
1216 for i in pycompat.xrange(*changeid.indices(len(self)))
1218 for i in pycompat.xrange(*changeid.indices(len(self)))
1217 if i not in self.changelog.filteredrevs]
1219 if i not in self.changelog.filteredrevs]
1218 try:
1220 try:
1219 if isinstance(changeid, int):
1221 if isinstance(changeid, int):
1220 node = self.changelog.node(changeid)
1222 node = self.changelog.node(changeid)
1221 rev = changeid
1223 rev = changeid
1222 return context.changectx(self, rev, node)
1224 return context.changectx(self, rev, node)
1223 elif changeid == 'null':
1225 elif changeid == 'null':
1224 node = nullid
1226 node = nullid
1225 rev = nullrev
1227 rev = nullrev
1226 return context.changectx(self, rev, node)
1228 return context.changectx(self, rev, node)
1227 elif changeid == 'tip':
1229 elif changeid == 'tip':
1228 node = self.changelog.tip()
1230 node = self.changelog.tip()
1229 rev = self.changelog.rev(node)
1231 rev = self.changelog.rev(node)
1230 return context.changectx(self, rev, node)
1232 return context.changectx(self, rev, node)
1231 elif changeid == '.':
1233 elif changeid == '.':
1232 # this is a hack to delay/avoid loading obsmarkers
1234 # this is a hack to delay/avoid loading obsmarkers
1233 # when we know that '.' won't be hidden
1235 # when we know that '.' won't be hidden
1234 node = self.dirstate.p1()
1236 node = self.dirstate.p1()
1235 rev = self.unfiltered().changelog.rev(node)
1237 rev = self.unfiltered().changelog.rev(node)
1236 return context.changectx(self, rev, node)
1238 return context.changectx(self, rev, node)
1237 elif len(changeid) == 20:
1239 elif len(changeid) == 20:
1238 try:
1240 try:
1239 node = changeid
1241 node = changeid
1240 rev = self.changelog.rev(changeid)
1242 rev = self.changelog.rev(changeid)
1241 return context.changectx(self, rev, node)
1243 return context.changectx(self, rev, node)
1242 except error.FilteredLookupError:
1244 except error.FilteredLookupError:
1243 changeid = hex(changeid) # for the error message
1245 changeid = hex(changeid) # for the error message
1244 raise
1246 raise
1245 except LookupError:
1247 except LookupError:
1246 # check if it might have come from damaged dirstate
1248 # check if it might have come from damaged dirstate
1247 #
1249 #
1248 # XXX we could avoid the unfiltered if we had a recognizable
1250 # XXX we could avoid the unfiltered if we had a recognizable
1249 # exception for filtered changeset access
1251 # exception for filtered changeset access
1250 if (self.local()
1252 if (self.local()
1251 and changeid in self.unfiltered().dirstate.parents()):
1253 and changeid in self.unfiltered().dirstate.parents()):
1252 msg = _("working directory has unknown parent '%s'!")
1254 msg = _("working directory has unknown parent '%s'!")
1253 raise error.Abort(msg % short(changeid))
1255 raise error.Abort(msg % short(changeid))
1254 changeid = hex(changeid) # for the error message
1256 changeid = hex(changeid) # for the error message
1255
1257
1256 elif len(changeid) == 40:
1258 elif len(changeid) == 40:
1257 try:
1259 try:
1258 node = bin(changeid)
1260 node = bin(changeid)
1259 rev = self.changelog.rev(node)
1261 rev = self.changelog.rev(node)
1260 return context.changectx(self, rev, node)
1262 return context.changectx(self, rev, node)
1261 except error.FilteredLookupError:
1263 except error.FilteredLookupError:
1262 raise
1264 raise
1263 except LookupError:
1265 except LookupError:
1264 pass
1266 pass
1265 else:
1267 else:
1266 raise error.ProgrammingError(
1268 raise error.ProgrammingError(
1267 "unsupported changeid '%s' of type %s" %
1269 "unsupported changeid '%s' of type %s" %
1268 (changeid, type(changeid)))
1270 (changeid, type(changeid)))
1269
1271
1270 except (error.FilteredIndexError, error.FilteredLookupError):
1272 except (error.FilteredIndexError, error.FilteredLookupError):
1271 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1273 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1272 % pycompat.bytestr(changeid))
1274 % pycompat.bytestr(changeid))
1273 except IndexError:
1275 except IndexError:
1274 pass
1276 pass
1275 except error.WdirUnsupported:
1277 except error.WdirUnsupported:
1276 return context.workingctx(self)
1278 return context.workingctx(self)
1277 raise error.RepoLookupError(
1279 raise error.RepoLookupError(
1278 _("unknown revision '%s'") % changeid)
1280 _("unknown revision '%s'") % changeid)
1279
1281
1280 def __contains__(self, changeid):
1282 def __contains__(self, changeid):
1281 """True if the given changeid exists
1283 """True if the given changeid exists
1282
1284
1283 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1285 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1284 specified.
1286 specified.
1285 """
1287 """
1286 try:
1288 try:
1287 self[changeid]
1289 self[changeid]
1288 return True
1290 return True
1289 except error.RepoLookupError:
1291 except error.RepoLookupError:
1290 return False
1292 return False
1291
1293
1292 def __nonzero__(self):
1294 def __nonzero__(self):
1293 return True
1295 return True
1294
1296
1295 __bool__ = __nonzero__
1297 __bool__ = __nonzero__
1296
1298
1297 def __len__(self):
1299 def __len__(self):
1298 # no need to pay the cost of repoview.changelog
1300 # no need to pay the cost of repoview.changelog
1299 unfi = self.unfiltered()
1301 unfi = self.unfiltered()
1300 return len(unfi.changelog)
1302 return len(unfi.changelog)
1301
1303
1302 def __iter__(self):
1304 def __iter__(self):
1303 return iter(self.changelog)
1305 return iter(self.changelog)
1304
1306
1305 def revs(self, expr, *args):
1307 def revs(self, expr, *args):
1306 '''Find revisions matching a revset.
1308 '''Find revisions matching a revset.
1307
1309
1308 The revset is specified as a string ``expr`` that may contain
1310 The revset is specified as a string ``expr`` that may contain
1309 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1311 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1310
1312
1311 Revset aliases from the configuration are not expanded. To expand
1313 Revset aliases from the configuration are not expanded. To expand
1312 user aliases, consider calling ``scmutil.revrange()`` or
1314 user aliases, consider calling ``scmutil.revrange()`` or
1313 ``repo.anyrevs([expr], user=True)``.
1315 ``repo.anyrevs([expr], user=True)``.
1314
1316
1315 Returns a revset.abstractsmartset, which is a list-like interface
1317 Returns a revset.abstractsmartset, which is a list-like interface
1316 that contains integer revisions.
1318 that contains integer revisions.
1317 '''
1319 '''
1318 expr = revsetlang.formatspec(expr, *args)
1320 expr = revsetlang.formatspec(expr, *args)
1319 m = revset.match(None, expr)
1321 m = revset.match(None, expr)
1320 return m(self)
1322 return m(self)
1321
1323
1322 def set(self, expr, *args):
1324 def set(self, expr, *args):
1323 '''Find revisions matching a revset and emit changectx instances.
1325 '''Find revisions matching a revset and emit changectx instances.
1324
1326
1325 This is a convenience wrapper around ``revs()`` that iterates the
1327 This is a convenience wrapper around ``revs()`` that iterates the
1326 result and is a generator of changectx instances.
1328 result and is a generator of changectx instances.
1327
1329
1328 Revset aliases from the configuration are not expanded. To expand
1330 Revset aliases from the configuration are not expanded. To expand
1329 user aliases, consider calling ``scmutil.revrange()``.
1331 user aliases, consider calling ``scmutil.revrange()``.
1330 '''
1332 '''
1331 for r in self.revs(expr, *args):
1333 for r in self.revs(expr, *args):
1332 yield self[r]
1334 yield self[r]
1333
1335
1334 def anyrevs(self, specs, user=False, localalias=None):
1336 def anyrevs(self, specs, user=False, localalias=None):
1335 '''Find revisions matching one of the given revsets.
1337 '''Find revisions matching one of the given revsets.
1336
1338
1337 Revset aliases from the configuration are not expanded by default. To
1339 Revset aliases from the configuration are not expanded by default. To
1338 expand user aliases, specify ``user=True``. To provide some local
1340 expand user aliases, specify ``user=True``. To provide some local
1339 definitions overriding user aliases, set ``localalias`` to
1341 definitions overriding user aliases, set ``localalias`` to
1340 ``{name: definitionstring}``.
1342 ``{name: definitionstring}``.
1341 '''
1343 '''
1342 if user:
1344 if user:
1343 m = revset.matchany(self.ui, specs,
1345 m = revset.matchany(self.ui, specs,
1344 lookup=revset.lookupfn(self),
1346 lookup=revset.lookupfn(self),
1345 localalias=localalias)
1347 localalias=localalias)
1346 else:
1348 else:
1347 m = revset.matchany(None, specs, localalias=localalias)
1349 m = revset.matchany(None, specs, localalias=localalias)
1348 return m(self)
1350 return m(self)
1349
1351
1350 def url(self):
1352 def url(self):
1351 return 'file:' + self.root
1353 return 'file:' + self.root
1352
1354
1353 def hook(self, name, throw=False, **args):
1355 def hook(self, name, throw=False, **args):
1354 """Call a hook, passing this repo instance.
1356 """Call a hook, passing this repo instance.
1355
1357
1356 This a convenience method to aid invoking hooks. Extensions likely
1358 This a convenience method to aid invoking hooks. Extensions likely
1357 won't call this unless they have registered a custom hook or are
1359 won't call this unless they have registered a custom hook or are
1358 replacing code that is expected to call a hook.
1360 replacing code that is expected to call a hook.
1359 """
1361 """
1360 return hook.hook(self.ui, self, name, throw, **args)
1362 return hook.hook(self.ui, self, name, throw, **args)
1361
1363
1362 @filteredpropertycache
1364 @filteredpropertycache
1363 def _tagscache(self):
1365 def _tagscache(self):
1364 '''Returns a tagscache object that contains various tags related
1366 '''Returns a tagscache object that contains various tags related
1365 caches.'''
1367 caches.'''
1366
1368
1367 # This simplifies its cache management by having one decorated
1369 # This simplifies its cache management by having one decorated
1368 # function (this one) and the rest simply fetch things from it.
1370 # function (this one) and the rest simply fetch things from it.
1369 class tagscache(object):
1371 class tagscache(object):
1370 def __init__(self):
1372 def __init__(self):
1371 # These two define the set of tags for this repository. tags
1373 # These two define the set of tags for this repository. tags
1372 # maps tag name to node; tagtypes maps tag name to 'global' or
1374 # maps tag name to node; tagtypes maps tag name to 'global' or
1373 # 'local'. (Global tags are defined by .hgtags across all
1375 # 'local'. (Global tags are defined by .hgtags across all
1374 # heads, and local tags are defined in .hg/localtags.)
1376 # heads, and local tags are defined in .hg/localtags.)
1375 # They constitute the in-memory cache of tags.
1377 # They constitute the in-memory cache of tags.
1376 self.tags = self.tagtypes = None
1378 self.tags = self.tagtypes = None
1377
1379
1378 self.nodetagscache = self.tagslist = None
1380 self.nodetagscache = self.tagslist = None
1379
1381
1380 cache = tagscache()
1382 cache = tagscache()
1381 cache.tags, cache.tagtypes = self._findtags()
1383 cache.tags, cache.tagtypes = self._findtags()
1382
1384
1383 return cache
1385 return cache
1384
1386
1385 def tags(self):
1387 def tags(self):
1386 '''return a mapping of tag to node'''
1388 '''return a mapping of tag to node'''
1387 t = {}
1389 t = {}
1388 if self.changelog.filteredrevs:
1390 if self.changelog.filteredrevs:
1389 tags, tt = self._findtags()
1391 tags, tt = self._findtags()
1390 else:
1392 else:
1391 tags = self._tagscache.tags
1393 tags = self._tagscache.tags
1392 for k, v in tags.iteritems():
1394 for k, v in tags.iteritems():
1393 try:
1395 try:
1394 # ignore tags to unknown nodes
1396 # ignore tags to unknown nodes
1395 self.changelog.rev(v)
1397 self.changelog.rev(v)
1396 t[k] = v
1398 t[k] = v
1397 except (error.LookupError, ValueError):
1399 except (error.LookupError, ValueError):
1398 pass
1400 pass
1399 return t
1401 return t
1400
1402
1401 def _findtags(self):
1403 def _findtags(self):
1402 '''Do the hard work of finding tags. Return a pair of dicts
1404 '''Do the hard work of finding tags. Return a pair of dicts
1403 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1405 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1404 maps tag name to a string like \'global\' or \'local\'.
1406 maps tag name to a string like \'global\' or \'local\'.
1405 Subclasses or extensions are free to add their own tags, but
1407 Subclasses or extensions are free to add their own tags, but
1406 should be aware that the returned dicts will be retained for the
1408 should be aware that the returned dicts will be retained for the
1407 duration of the localrepo object.'''
1409 duration of the localrepo object.'''
1408
1410
1409 # XXX what tagtype should subclasses/extensions use? Currently
1411 # XXX what tagtype should subclasses/extensions use? Currently
1410 # mq and bookmarks add tags, but do not set the tagtype at all.
1412 # mq and bookmarks add tags, but do not set the tagtype at all.
1411 # Should each extension invent its own tag type? Should there
1413 # Should each extension invent its own tag type? Should there
1412 # be one tagtype for all such "virtual" tags? Or is the status
1414 # be one tagtype for all such "virtual" tags? Or is the status
1413 # quo fine?
1415 # quo fine?
1414
1416
1415
1417
1416 # map tag name to (node, hist)
1418 # map tag name to (node, hist)
1417 alltags = tagsmod.findglobaltags(self.ui, self)
1419 alltags = tagsmod.findglobaltags(self.ui, self)
1418 # map tag name to tag type
1420 # map tag name to tag type
1419 tagtypes = dict((tag, 'global') for tag in alltags)
1421 tagtypes = dict((tag, 'global') for tag in alltags)
1420
1422
1421 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1422
1424
1423 # Build the return dicts. Have to re-encode tag names because
1425 # Build the return dicts. Have to re-encode tag names because
1424 # the tags module always uses UTF-8 (in order not to lose info
1426 # the tags module always uses UTF-8 (in order not to lose info
1425 # writing to the cache), but the rest of Mercurial wants them in
1427 # writing to the cache), but the rest of Mercurial wants them in
1426 # local encoding.
1428 # local encoding.
1427 tags = {}
1429 tags = {}
1428 for (name, (node, hist)) in alltags.iteritems():
1430 for (name, (node, hist)) in alltags.iteritems():
1429 if node != nullid:
1431 if node != nullid:
1430 tags[encoding.tolocal(name)] = node
1432 tags[encoding.tolocal(name)] = node
1431 tags['tip'] = self.changelog.tip()
1433 tags['tip'] = self.changelog.tip()
1432 tagtypes = dict([(encoding.tolocal(name), value)
1434 tagtypes = dict([(encoding.tolocal(name), value)
1433 for (name, value) in tagtypes.iteritems()])
1435 for (name, value) in tagtypes.iteritems()])
1434 return (tags, tagtypes)
1436 return (tags, tagtypes)
1435
1437
1436 def tagtype(self, tagname):
1438 def tagtype(self, tagname):
1437 '''
1439 '''
1438 return the type of the given tag. result can be:
1440 return the type of the given tag. result can be:
1439
1441
1440 'local' : a local tag
1442 'local' : a local tag
1441 'global' : a global tag
1443 'global' : a global tag
1442 None : tag does not exist
1444 None : tag does not exist
1443 '''
1445 '''
1444
1446
1445 return self._tagscache.tagtypes.get(tagname)
1447 return self._tagscache.tagtypes.get(tagname)
1446
1448
1447 def tagslist(self):
1449 def tagslist(self):
1448 '''return a list of tags ordered by revision'''
1450 '''return a list of tags ordered by revision'''
1449 if not self._tagscache.tagslist:
1451 if not self._tagscache.tagslist:
1450 l = []
1452 l = []
1451 for t, n in self.tags().iteritems():
1453 for t, n in self.tags().iteritems():
1452 l.append((self.changelog.rev(n), t, n))
1454 l.append((self.changelog.rev(n), t, n))
1453 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1455 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1454
1456
1455 return self._tagscache.tagslist
1457 return self._tagscache.tagslist
1456
1458
1457 def nodetags(self, node):
1459 def nodetags(self, node):
1458 '''return the tags associated with a node'''
1460 '''return the tags associated with a node'''
1459 if not self._tagscache.nodetagscache:
1461 if not self._tagscache.nodetagscache:
1460 nodetagscache = {}
1462 nodetagscache = {}
1461 for t, n in self._tagscache.tags.iteritems():
1463 for t, n in self._tagscache.tags.iteritems():
1462 nodetagscache.setdefault(n, []).append(t)
1464 nodetagscache.setdefault(n, []).append(t)
1463 for tags in nodetagscache.itervalues():
1465 for tags in nodetagscache.itervalues():
1464 tags.sort()
1466 tags.sort()
1465 self._tagscache.nodetagscache = nodetagscache
1467 self._tagscache.nodetagscache = nodetagscache
1466 return self._tagscache.nodetagscache.get(node, [])
1468 return self._tagscache.nodetagscache.get(node, [])
1467
1469
1468 def nodebookmarks(self, node):
1470 def nodebookmarks(self, node):
1469 """return the list of bookmarks pointing to the specified node"""
1471 """return the list of bookmarks pointing to the specified node"""
1470 return self._bookmarks.names(node)
1472 return self._bookmarks.names(node)
1471
1473
1472 def branchmap(self):
1474 def branchmap(self):
1473 '''returns a dictionary {branch: [branchheads]} with branchheads
1475 '''returns a dictionary {branch: [branchheads]} with branchheads
1474 ordered by increasing revision number'''
1476 ordered by increasing revision number'''
1475 branchmap.updatecache(self)
1477 branchmap.updatecache(self)
1476 return self._branchcaches[self.filtername]
1478 return self._branchcaches[self.filtername]
1477
1479
1478 @unfilteredmethod
1480 @unfilteredmethod
1479 def revbranchcache(self):
1481 def revbranchcache(self):
1480 if not self._revbranchcache:
1482 if not self._revbranchcache:
1481 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1483 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1482 return self._revbranchcache
1484 return self._revbranchcache
1483
1485
1484 def branchtip(self, branch, ignoremissing=False):
1486 def branchtip(self, branch, ignoremissing=False):
1485 '''return the tip node for a given branch
1487 '''return the tip node for a given branch
1486
1488
1487 If ignoremissing is True, then this method will not raise an error.
1489 If ignoremissing is True, then this method will not raise an error.
1488 This is helpful for callers that only expect None for a missing branch
1490 This is helpful for callers that only expect None for a missing branch
1489 (e.g. namespace).
1491 (e.g. namespace).
1490
1492
1491 '''
1493 '''
1492 try:
1494 try:
1493 return self.branchmap().branchtip(branch)
1495 return self.branchmap().branchtip(branch)
1494 except KeyError:
1496 except KeyError:
1495 if not ignoremissing:
1497 if not ignoremissing:
1496 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1498 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1497 else:
1499 else:
1498 pass
1500 pass
1499
1501
1500 def lookup(self, key):
1502 def lookup(self, key):
1501 return scmutil.revsymbol(self, key).node()
1503 return scmutil.revsymbol(self, key).node()
1502
1504
1503 def lookupbranch(self, key):
1505 def lookupbranch(self, key):
1504 if key in self.branchmap():
1506 if key in self.branchmap():
1505 return key
1507 return key
1506
1508
1507 return scmutil.revsymbol(self, key).branch()
1509 return scmutil.revsymbol(self, key).branch()
1508
1510
1509 def known(self, nodes):
1511 def known(self, nodes):
1510 cl = self.changelog
1512 cl = self.changelog
1511 nm = cl.nodemap
1513 nm = cl.nodemap
1512 filtered = cl.filteredrevs
1514 filtered = cl.filteredrevs
1513 result = []
1515 result = []
1514 for n in nodes:
1516 for n in nodes:
1515 r = nm.get(n)
1517 r = nm.get(n)
1516 resp = not (r is None or r in filtered)
1518 resp = not (r is None or r in filtered)
1517 result.append(resp)
1519 result.append(resp)
1518 return result
1520 return result
1519
1521
1520 def local(self):
1522 def local(self):
1521 return self
1523 return self
1522
1524
1523 def publishing(self):
1525 def publishing(self):
1524 # it's safe (and desirable) to trust the publish flag unconditionally
1526 # it's safe (and desirable) to trust the publish flag unconditionally
1525 # so that we don't finalize changes shared between users via ssh or nfs
1527 # so that we don't finalize changes shared between users via ssh or nfs
1526 return self.ui.configbool('phases', 'publish', untrusted=True)
1528 return self.ui.configbool('phases', 'publish', untrusted=True)
1527
1529
1528 def cancopy(self):
1530 def cancopy(self):
1529 # so statichttprepo's override of local() works
1531 # so statichttprepo's override of local() works
1530 if not self.local():
1532 if not self.local():
1531 return False
1533 return False
1532 if not self.publishing():
1534 if not self.publishing():
1533 return True
1535 return True
1534 # if publishing we can't copy if there is filtered content
1536 # if publishing we can't copy if there is filtered content
1535 return not self.filtered('visible').changelog.filteredrevs
1537 return not self.filtered('visible').changelog.filteredrevs
1536
1538
1537 def shared(self):
1539 def shared(self):
1538 '''the type of shared repository (None if not shared)'''
1540 '''the type of shared repository (None if not shared)'''
1539 if self.sharedpath != self.path:
1541 if self.sharedpath != self.path:
1540 return 'store'
1542 return 'store'
1541 return None
1543 return None
1542
1544
1543 def wjoin(self, f, *insidef):
1545 def wjoin(self, f, *insidef):
1544 return self.vfs.reljoin(self.root, f, *insidef)
1546 return self.vfs.reljoin(self.root, f, *insidef)
1545
1547
1546 def setparents(self, p1, p2=nullid):
1548 def setparents(self, p1, p2=nullid):
1547 with self.dirstate.parentchange():
1549 with self.dirstate.parentchange():
1548 copies = self.dirstate.setparents(p1, p2)
1550 copies = self.dirstate.setparents(p1, p2)
1549 pctx = self[p1]
1551 pctx = self[p1]
1550 if copies:
1552 if copies:
1551 # Adjust copy records, the dirstate cannot do it, it
1553 # Adjust copy records, the dirstate cannot do it, it
1552 # requires access to parents manifests. Preserve them
1554 # requires access to parents manifests. Preserve them
1553 # only for entries added to first parent.
1555 # only for entries added to first parent.
1554 for f in copies:
1556 for f in copies:
1555 if f not in pctx and copies[f] in pctx:
1557 if f not in pctx and copies[f] in pctx:
1556 self.dirstate.copy(copies[f], f)
1558 self.dirstate.copy(copies[f], f)
1557 if p2 == nullid:
1559 if p2 == nullid:
1558 for f, s in sorted(self.dirstate.copies().items()):
1560 for f, s in sorted(self.dirstate.copies().items()):
1559 if f not in pctx and s not in pctx:
1561 if f not in pctx and s not in pctx:
1560 self.dirstate.copy(None, f)
1562 self.dirstate.copy(None, f)
1561
1563
1562 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1564 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1563 """changeid can be a changeset revision, node, or tag.
1565 """changeid can be a changeset revision, node, or tag.
1564 fileid can be a file revision or node."""
1566 fileid can be a file revision or node."""
1565 return context.filectx(self, path, changeid, fileid,
1567 return context.filectx(self, path, changeid, fileid,
1566 changectx=changectx)
1568 changectx=changectx)
1567
1569
1568 def getcwd(self):
1570 def getcwd(self):
1569 return self.dirstate.getcwd()
1571 return self.dirstate.getcwd()
1570
1572
1571 def pathto(self, f, cwd=None):
1573 def pathto(self, f, cwd=None):
1572 return self.dirstate.pathto(f, cwd)
1574 return self.dirstate.pathto(f, cwd)
1573
1575
1574 def _loadfilter(self, filter):
1576 def _loadfilter(self, filter):
1575 if filter not in self._filterpats:
1577 if filter not in self._filterpats:
1576 l = []
1578 l = []
1577 for pat, cmd in self.ui.configitems(filter):
1579 for pat, cmd in self.ui.configitems(filter):
1578 if cmd == '!':
1580 if cmd == '!':
1579 continue
1581 continue
1580 mf = matchmod.match(self.root, '', [pat])
1582 mf = matchmod.match(self.root, '', [pat])
1581 fn = None
1583 fn = None
1582 params = cmd
1584 params = cmd
1583 for name, filterfn in self._datafilters.iteritems():
1585 for name, filterfn in self._datafilters.iteritems():
1584 if cmd.startswith(name):
1586 if cmd.startswith(name):
1585 fn = filterfn
1587 fn = filterfn
1586 params = cmd[len(name):].lstrip()
1588 params = cmd[len(name):].lstrip()
1587 break
1589 break
1588 if not fn:
1590 if not fn:
1589 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1591 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1590 # Wrap old filters not supporting keyword arguments
1592 # Wrap old filters not supporting keyword arguments
1591 if not pycompat.getargspec(fn)[2]:
1593 if not pycompat.getargspec(fn)[2]:
1592 oldfn = fn
1594 oldfn = fn
1593 fn = lambda s, c, **kwargs: oldfn(s, c)
1595 fn = lambda s, c, **kwargs: oldfn(s, c)
1594 l.append((mf, fn, params))
1596 l.append((mf, fn, params))
1595 self._filterpats[filter] = l
1597 self._filterpats[filter] = l
1596 return self._filterpats[filter]
1598 return self._filterpats[filter]
1597
1599
1598 def _filter(self, filterpats, filename, data):
1600 def _filter(self, filterpats, filename, data):
1599 for mf, fn, cmd in filterpats:
1601 for mf, fn, cmd in filterpats:
1600 if mf(filename):
1602 if mf(filename):
1601 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1603 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1602 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1604 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1603 break
1605 break
1604
1606
1605 return data
1607 return data
1606
1608
1607 @unfilteredpropertycache
1609 @unfilteredpropertycache
1608 def _encodefilterpats(self):
1610 def _encodefilterpats(self):
1609 return self._loadfilter('encode')
1611 return self._loadfilter('encode')
1610
1612
1611 @unfilteredpropertycache
1613 @unfilteredpropertycache
1612 def _decodefilterpats(self):
1614 def _decodefilterpats(self):
1613 return self._loadfilter('decode')
1615 return self._loadfilter('decode')
1614
1616
1615 def adddatafilter(self, name, filter):
1617 def adddatafilter(self, name, filter):
1616 self._datafilters[name] = filter
1618 self._datafilters[name] = filter
1617
1619
1618 def wread(self, filename):
1620 def wread(self, filename):
1619 if self.wvfs.islink(filename):
1621 if self.wvfs.islink(filename):
1620 data = self.wvfs.readlink(filename)
1622 data = self.wvfs.readlink(filename)
1621 else:
1623 else:
1622 data = self.wvfs.read(filename)
1624 data = self.wvfs.read(filename)
1623 return self._filter(self._encodefilterpats, filename, data)
1625 return self._filter(self._encodefilterpats, filename, data)
1624
1626
1625 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1627 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1626 """write ``data`` into ``filename`` in the working directory
1628 """write ``data`` into ``filename`` in the working directory
1627
1629
1628 This returns length of written (maybe decoded) data.
1630 This returns length of written (maybe decoded) data.
1629 """
1631 """
1630 data = self._filter(self._decodefilterpats, filename, data)
1632 data = self._filter(self._decodefilterpats, filename, data)
1631 if 'l' in flags:
1633 if 'l' in flags:
1632 self.wvfs.symlink(data, filename)
1634 self.wvfs.symlink(data, filename)
1633 else:
1635 else:
1634 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1636 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1635 **kwargs)
1637 **kwargs)
1636 if 'x' in flags:
1638 if 'x' in flags:
1637 self.wvfs.setflags(filename, False, True)
1639 self.wvfs.setflags(filename, False, True)
1638 else:
1640 else:
1639 self.wvfs.setflags(filename, False, False)
1641 self.wvfs.setflags(filename, False, False)
1640 return len(data)
1642 return len(data)
1641
1643
1642 def wwritedata(self, filename, data):
1644 def wwritedata(self, filename, data):
1643 return self._filter(self._decodefilterpats, filename, data)
1645 return self._filter(self._decodefilterpats, filename, data)
1644
1646
1645 def currenttransaction(self):
1647 def currenttransaction(self):
1646 """return the current transaction or None if non exists"""
1648 """return the current transaction or None if non exists"""
1647 if self._transref:
1649 if self._transref:
1648 tr = self._transref()
1650 tr = self._transref()
1649 else:
1651 else:
1650 tr = None
1652 tr = None
1651
1653
1652 if tr and tr.running():
1654 if tr and tr.running():
1653 return tr
1655 return tr
1654 return None
1656 return None
1655
1657
1656 def transaction(self, desc, report=None):
1658 def transaction(self, desc, report=None):
1657 if (self.ui.configbool('devel', 'all-warnings')
1659 if (self.ui.configbool('devel', 'all-warnings')
1658 or self.ui.configbool('devel', 'check-locks')):
1660 or self.ui.configbool('devel', 'check-locks')):
1659 if self._currentlock(self._lockref) is None:
1661 if self._currentlock(self._lockref) is None:
1660 raise error.ProgrammingError('transaction requires locking')
1662 raise error.ProgrammingError('transaction requires locking')
1661 tr = self.currenttransaction()
1663 tr = self.currenttransaction()
1662 if tr is not None:
1664 if tr is not None:
1663 return tr.nest(name=desc)
1665 return tr.nest(name=desc)
1664
1666
1665 # abort here if the journal already exists
1667 # abort here if the journal already exists
1666 if self.svfs.exists("journal"):
1668 if self.svfs.exists("journal"):
1667 raise error.RepoError(
1669 raise error.RepoError(
1668 _("abandoned transaction found"),
1670 _("abandoned transaction found"),
1669 hint=_("run 'hg recover' to clean up transaction"))
1671 hint=_("run 'hg recover' to clean up transaction"))
1670
1672
1671 idbase = "%.40f#%f" % (random.random(), time.time())
1673 idbase = "%.40f#%f" % (random.random(), time.time())
1672 ha = hex(hashlib.sha1(idbase).digest())
1674 ha = hex(hashlib.sha1(idbase).digest())
1673 txnid = 'TXN:' + ha
1675 txnid = 'TXN:' + ha
1674 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1676 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1675
1677
1676 self._writejournal(desc)
1678 self._writejournal(desc)
1677 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1679 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1678 if report:
1680 if report:
1679 rp = report
1681 rp = report
1680 else:
1682 else:
1681 rp = self.ui.warn
1683 rp = self.ui.warn
1682 vfsmap = {'plain': self.vfs} # root of .hg/
1684 vfsmap = {'plain': self.vfs} # root of .hg/
1683 # we must avoid cyclic reference between repo and transaction.
1685 # we must avoid cyclic reference between repo and transaction.
1684 reporef = weakref.ref(self)
1686 reporef = weakref.ref(self)
1685 # Code to track tag movement
1687 # Code to track tag movement
1686 #
1688 #
1687 # Since tags are all handled as file content, it is actually quite hard
1689 # Since tags are all handled as file content, it is actually quite hard
1688 # to track these movement from a code perspective. So we fallback to a
1690 # to track these movement from a code perspective. So we fallback to a
1689 # tracking at the repository level. One could envision to track changes
1691 # tracking at the repository level. One could envision to track changes
1690 # to the '.hgtags' file through changegroup apply but that fails to
1692 # to the '.hgtags' file through changegroup apply but that fails to
1691 # cope with case where transaction expose new heads without changegroup
1693 # cope with case where transaction expose new heads without changegroup
1692 # being involved (eg: phase movement).
1694 # being involved (eg: phase movement).
1693 #
1695 #
1694 # For now, We gate the feature behind a flag since this likely comes
1696 # For now, We gate the feature behind a flag since this likely comes
1695 # with performance impacts. The current code run more often than needed
1697 # with performance impacts. The current code run more often than needed
1696 # and do not use caches as much as it could. The current focus is on
1698 # and do not use caches as much as it could. The current focus is on
1697 # the behavior of the feature so we disable it by default. The flag
1699 # the behavior of the feature so we disable it by default. The flag
1698 # will be removed when we are happy with the performance impact.
1700 # will be removed when we are happy with the performance impact.
1699 #
1701 #
1700 # Once this feature is no longer experimental move the following
1702 # Once this feature is no longer experimental move the following
1701 # documentation to the appropriate help section:
1703 # documentation to the appropriate help section:
1702 #
1704 #
1703 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1705 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1704 # tags (new or changed or deleted tags). In addition the details of
1706 # tags (new or changed or deleted tags). In addition the details of
1705 # these changes are made available in a file at:
1707 # these changes are made available in a file at:
1706 # ``REPOROOT/.hg/changes/tags.changes``.
1708 # ``REPOROOT/.hg/changes/tags.changes``.
1707 # Make sure you check for HG_TAG_MOVED before reading that file as it
1709 # Make sure you check for HG_TAG_MOVED before reading that file as it
1708 # might exist from a previous transaction even if no tag were touched
1710 # might exist from a previous transaction even if no tag were touched
1709 # in this one. Changes are recorded in a line base format::
1711 # in this one. Changes are recorded in a line base format::
1710 #
1712 #
1711 # <action> <hex-node> <tag-name>\n
1713 # <action> <hex-node> <tag-name>\n
1712 #
1714 #
1713 # Actions are defined as follow:
1715 # Actions are defined as follow:
1714 # "-R": tag is removed,
1716 # "-R": tag is removed,
1715 # "+A": tag is added,
1717 # "+A": tag is added,
1716 # "-M": tag is moved (old value),
1718 # "-M": tag is moved (old value),
1717 # "+M": tag is moved (new value),
1719 # "+M": tag is moved (new value),
1718 tracktags = lambda x: None
1720 tracktags = lambda x: None
1719 # experimental config: experimental.hook-track-tags
1721 # experimental config: experimental.hook-track-tags
1720 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1722 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1721 if desc != 'strip' and shouldtracktags:
1723 if desc != 'strip' and shouldtracktags:
1722 oldheads = self.changelog.headrevs()
1724 oldheads = self.changelog.headrevs()
1723 def tracktags(tr2):
1725 def tracktags(tr2):
1724 repo = reporef()
1726 repo = reporef()
1725 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1727 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1726 newheads = repo.changelog.headrevs()
1728 newheads = repo.changelog.headrevs()
1727 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1729 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1728 # notes: we compare lists here.
1730 # notes: we compare lists here.
1729 # As we do it only once buiding set would not be cheaper
1731 # As we do it only once buiding set would not be cheaper
1730 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1732 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1731 if changes:
1733 if changes:
1732 tr2.hookargs['tag_moved'] = '1'
1734 tr2.hookargs['tag_moved'] = '1'
1733 with repo.vfs('changes/tags.changes', 'w',
1735 with repo.vfs('changes/tags.changes', 'w',
1734 atomictemp=True) as changesfile:
1736 atomictemp=True) as changesfile:
1735 # note: we do not register the file to the transaction
1737 # note: we do not register the file to the transaction
1736 # because we needs it to still exist on the transaction
1738 # because we needs it to still exist on the transaction
1737 # is close (for txnclose hooks)
1739 # is close (for txnclose hooks)
1738 tagsmod.writediff(changesfile, changes)
1740 tagsmod.writediff(changesfile, changes)
1739 def validate(tr2):
1741 def validate(tr2):
1740 """will run pre-closing hooks"""
1742 """will run pre-closing hooks"""
1741 # XXX the transaction API is a bit lacking here so we take a hacky
1743 # XXX the transaction API is a bit lacking here so we take a hacky
1742 # path for now
1744 # path for now
1743 #
1745 #
1744 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1746 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1745 # dict is copied before these run. In addition we needs the data
1747 # dict is copied before these run. In addition we needs the data
1746 # available to in memory hooks too.
1748 # available to in memory hooks too.
1747 #
1749 #
1748 # Moreover, we also need to make sure this runs before txnclose
1750 # Moreover, we also need to make sure this runs before txnclose
1749 # hooks and there is no "pending" mechanism that would execute
1751 # hooks and there is no "pending" mechanism that would execute
1750 # logic only if hooks are about to run.
1752 # logic only if hooks are about to run.
1751 #
1753 #
1752 # Fixing this limitation of the transaction is also needed to track
1754 # Fixing this limitation of the transaction is also needed to track
1753 # other families of changes (bookmarks, phases, obsolescence).
1755 # other families of changes (bookmarks, phases, obsolescence).
1754 #
1756 #
1755 # This will have to be fixed before we remove the experimental
1757 # This will have to be fixed before we remove the experimental
1756 # gating.
1758 # gating.
1757 tracktags(tr2)
1759 tracktags(tr2)
1758 repo = reporef()
1760 repo = reporef()
1759 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1761 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1760 scmutil.enforcesinglehead(repo, tr2, desc)
1762 scmutil.enforcesinglehead(repo, tr2, desc)
1761 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1763 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1762 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1764 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1763 args = tr.hookargs.copy()
1765 args = tr.hookargs.copy()
1764 args.update(bookmarks.preparehookargs(name, old, new))
1766 args.update(bookmarks.preparehookargs(name, old, new))
1765 repo.hook('pretxnclose-bookmark', throw=True,
1767 repo.hook('pretxnclose-bookmark', throw=True,
1766 txnname=desc,
1768 txnname=desc,
1767 **pycompat.strkwargs(args))
1769 **pycompat.strkwargs(args))
1768 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1770 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1769 cl = repo.unfiltered().changelog
1771 cl = repo.unfiltered().changelog
1770 for rev, (old, new) in tr.changes['phases'].items():
1772 for rev, (old, new) in tr.changes['phases'].items():
1771 args = tr.hookargs.copy()
1773 args = tr.hookargs.copy()
1772 node = hex(cl.node(rev))
1774 node = hex(cl.node(rev))
1773 args.update(phases.preparehookargs(node, old, new))
1775 args.update(phases.preparehookargs(node, old, new))
1774 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1776 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1775 **pycompat.strkwargs(args))
1777 **pycompat.strkwargs(args))
1776
1778
1777 repo.hook('pretxnclose', throw=True,
1779 repo.hook('pretxnclose', throw=True,
1778 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1780 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1779 def releasefn(tr, success):
1781 def releasefn(tr, success):
1780 repo = reporef()
1782 repo = reporef()
1781 if success:
1783 if success:
1782 # this should be explicitly invoked here, because
1784 # this should be explicitly invoked here, because
1783 # in-memory changes aren't written out at closing
1785 # in-memory changes aren't written out at closing
1784 # transaction, if tr.addfilegenerator (via
1786 # transaction, if tr.addfilegenerator (via
1785 # dirstate.write or so) isn't invoked while
1787 # dirstate.write or so) isn't invoked while
1786 # transaction running
1788 # transaction running
1787 repo.dirstate.write(None)
1789 repo.dirstate.write(None)
1788 else:
1790 else:
1789 # discard all changes (including ones already written
1791 # discard all changes (including ones already written
1790 # out) in this transaction
1792 # out) in this transaction
1791 narrowspec.restorebackup(self, 'journal.narrowspec')
1793 narrowspec.restorebackup(self, 'journal.narrowspec')
1792 repo.dirstate.restorebackup(None, 'journal.dirstate')
1794 repo.dirstate.restorebackup(None, 'journal.dirstate')
1793
1795
1794 repo.invalidate(clearfilecache=True)
1796 repo.invalidate(clearfilecache=True)
1795
1797
1796 tr = transaction.transaction(rp, self.svfs, vfsmap,
1798 tr = transaction.transaction(rp, self.svfs, vfsmap,
1797 "journal",
1799 "journal",
1798 "undo",
1800 "undo",
1799 aftertrans(renames),
1801 aftertrans(renames),
1800 self.store.createmode,
1802 self.store.createmode,
1801 validator=validate,
1803 validator=validate,
1802 releasefn=releasefn,
1804 releasefn=releasefn,
1803 checkambigfiles=_cachedfiles,
1805 checkambigfiles=_cachedfiles,
1804 name=desc)
1806 name=desc)
1805 tr.changes['origrepolen'] = len(self)
1807 tr.changes['origrepolen'] = len(self)
1806 tr.changes['obsmarkers'] = set()
1808 tr.changes['obsmarkers'] = set()
1807 tr.changes['phases'] = {}
1809 tr.changes['phases'] = {}
1808 tr.changes['bookmarks'] = {}
1810 tr.changes['bookmarks'] = {}
1809
1811
1810 tr.hookargs['txnid'] = txnid
1812 tr.hookargs['txnid'] = txnid
1811 # note: writing the fncache only during finalize mean that the file is
1813 # note: writing the fncache only during finalize mean that the file is
1812 # outdated when running hooks. As fncache is used for streaming clone,
1814 # outdated when running hooks. As fncache is used for streaming clone,
1813 # this is not expected to break anything that happen during the hooks.
1815 # this is not expected to break anything that happen during the hooks.
1814 tr.addfinalize('flush-fncache', self.store.write)
1816 tr.addfinalize('flush-fncache', self.store.write)
1815 def txnclosehook(tr2):
1817 def txnclosehook(tr2):
1816 """To be run if transaction is successful, will schedule a hook run
1818 """To be run if transaction is successful, will schedule a hook run
1817 """
1819 """
1818 # Don't reference tr2 in hook() so we don't hold a reference.
1820 # Don't reference tr2 in hook() so we don't hold a reference.
1819 # This reduces memory consumption when there are multiple
1821 # This reduces memory consumption when there are multiple
1820 # transactions per lock. This can likely go away if issue5045
1822 # transactions per lock. This can likely go away if issue5045
1821 # fixes the function accumulation.
1823 # fixes the function accumulation.
1822 hookargs = tr2.hookargs
1824 hookargs = tr2.hookargs
1823
1825
1824 def hookfunc():
1826 def hookfunc():
1825 repo = reporef()
1827 repo = reporef()
1826 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1828 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1827 bmchanges = sorted(tr.changes['bookmarks'].items())
1829 bmchanges = sorted(tr.changes['bookmarks'].items())
1828 for name, (old, new) in bmchanges:
1830 for name, (old, new) in bmchanges:
1829 args = tr.hookargs.copy()
1831 args = tr.hookargs.copy()
1830 args.update(bookmarks.preparehookargs(name, old, new))
1832 args.update(bookmarks.preparehookargs(name, old, new))
1831 repo.hook('txnclose-bookmark', throw=False,
1833 repo.hook('txnclose-bookmark', throw=False,
1832 txnname=desc, **pycompat.strkwargs(args))
1834 txnname=desc, **pycompat.strkwargs(args))
1833
1835
1834 if hook.hashook(repo.ui, 'txnclose-phase'):
1836 if hook.hashook(repo.ui, 'txnclose-phase'):
1835 cl = repo.unfiltered().changelog
1837 cl = repo.unfiltered().changelog
1836 phasemv = sorted(tr.changes['phases'].items())
1838 phasemv = sorted(tr.changes['phases'].items())
1837 for rev, (old, new) in phasemv:
1839 for rev, (old, new) in phasemv:
1838 args = tr.hookargs.copy()
1840 args = tr.hookargs.copy()
1839 node = hex(cl.node(rev))
1841 node = hex(cl.node(rev))
1840 args.update(phases.preparehookargs(node, old, new))
1842 args.update(phases.preparehookargs(node, old, new))
1841 repo.hook('txnclose-phase', throw=False, txnname=desc,
1843 repo.hook('txnclose-phase', throw=False, txnname=desc,
1842 **pycompat.strkwargs(args))
1844 **pycompat.strkwargs(args))
1843
1845
1844 repo.hook('txnclose', throw=False, txnname=desc,
1846 repo.hook('txnclose', throw=False, txnname=desc,
1845 **pycompat.strkwargs(hookargs))
1847 **pycompat.strkwargs(hookargs))
1846 reporef()._afterlock(hookfunc)
1848 reporef()._afterlock(hookfunc)
1847 tr.addfinalize('txnclose-hook', txnclosehook)
1849 tr.addfinalize('txnclose-hook', txnclosehook)
1848 # Include a leading "-" to make it happen before the transaction summary
1850 # Include a leading "-" to make it happen before the transaction summary
1849 # reports registered via scmutil.registersummarycallback() whose names
1851 # reports registered via scmutil.registersummarycallback() whose names
1850 # are 00-txnreport etc. That way, the caches will be warm when the
1852 # are 00-txnreport etc. That way, the caches will be warm when the
1851 # callbacks run.
1853 # callbacks run.
1852 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1854 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1853 def txnaborthook(tr2):
1855 def txnaborthook(tr2):
1854 """To be run if transaction is aborted
1856 """To be run if transaction is aborted
1855 """
1857 """
1856 reporef().hook('txnabort', throw=False, txnname=desc,
1858 reporef().hook('txnabort', throw=False, txnname=desc,
1857 **pycompat.strkwargs(tr2.hookargs))
1859 **pycompat.strkwargs(tr2.hookargs))
1858 tr.addabort('txnabort-hook', txnaborthook)
1860 tr.addabort('txnabort-hook', txnaborthook)
1859 # avoid eager cache invalidation. in-memory data should be identical
1861 # avoid eager cache invalidation. in-memory data should be identical
1860 # to stored data if transaction has no error.
1862 # to stored data if transaction has no error.
1861 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1863 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1862 self._transref = weakref.ref(tr)
1864 self._transref = weakref.ref(tr)
1863 scmutil.registersummarycallback(self, tr, desc)
1865 scmutil.registersummarycallback(self, tr, desc)
1864 return tr
1866 return tr
1865
1867
1866 def _journalfiles(self):
1868 def _journalfiles(self):
1867 return ((self.svfs, 'journal'),
1869 return ((self.svfs, 'journal'),
1868 (self.vfs, 'journal.dirstate'),
1870 (self.vfs, 'journal.dirstate'),
1869 (self.vfs, 'journal.branch'),
1871 (self.vfs, 'journal.branch'),
1870 (self.vfs, 'journal.desc'),
1872 (self.vfs, 'journal.desc'),
1871 (self.vfs, 'journal.bookmarks'),
1873 (self.vfs, 'journal.bookmarks'),
1872 (self.svfs, 'journal.phaseroots'))
1874 (self.svfs, 'journal.phaseroots'))
1873
1875
1874 def undofiles(self):
1876 def undofiles(self):
1875 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1877 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1876
1878
1877 @unfilteredmethod
1879 @unfilteredmethod
1878 def _writejournal(self, desc):
1880 def _writejournal(self, desc):
1879 self.dirstate.savebackup(None, 'journal.dirstate')
1881 self.dirstate.savebackup(None, 'journal.dirstate')
1880 narrowspec.savebackup(self, 'journal.narrowspec')
1882 narrowspec.savebackup(self, 'journal.narrowspec')
1881 self.vfs.write("journal.branch",
1883 self.vfs.write("journal.branch",
1882 encoding.fromlocal(self.dirstate.branch()))
1884 encoding.fromlocal(self.dirstate.branch()))
1883 self.vfs.write("journal.desc",
1885 self.vfs.write("journal.desc",
1884 "%d\n%s\n" % (len(self), desc))
1886 "%d\n%s\n" % (len(self), desc))
1885 self.vfs.write("journal.bookmarks",
1887 self.vfs.write("journal.bookmarks",
1886 self.vfs.tryread("bookmarks"))
1888 self.vfs.tryread("bookmarks"))
1887 self.svfs.write("journal.phaseroots",
1889 self.svfs.write("journal.phaseroots",
1888 self.svfs.tryread("phaseroots"))
1890 self.svfs.tryread("phaseroots"))
1889
1891
1890 def recover(self):
1892 def recover(self):
1891 with self.lock():
1893 with self.lock():
1892 if self.svfs.exists("journal"):
1894 if self.svfs.exists("journal"):
1893 self.ui.status(_("rolling back interrupted transaction\n"))
1895 self.ui.status(_("rolling back interrupted transaction\n"))
1894 vfsmap = {'': self.svfs,
1896 vfsmap = {'': self.svfs,
1895 'plain': self.vfs,}
1897 'plain': self.vfs,}
1896 transaction.rollback(self.svfs, vfsmap, "journal",
1898 transaction.rollback(self.svfs, vfsmap, "journal",
1897 self.ui.warn,
1899 self.ui.warn,
1898 checkambigfiles=_cachedfiles)
1900 checkambigfiles=_cachedfiles)
1899 self.invalidate()
1901 self.invalidate()
1900 return True
1902 return True
1901 else:
1903 else:
1902 self.ui.warn(_("no interrupted transaction available\n"))
1904 self.ui.warn(_("no interrupted transaction available\n"))
1903 return False
1905 return False
1904
1906
1905 def rollback(self, dryrun=False, force=False):
1907 def rollback(self, dryrun=False, force=False):
1906 wlock = lock = dsguard = None
1908 wlock = lock = dsguard = None
1907 try:
1909 try:
1908 wlock = self.wlock()
1910 wlock = self.wlock()
1909 lock = self.lock()
1911 lock = self.lock()
1910 if self.svfs.exists("undo"):
1912 if self.svfs.exists("undo"):
1911 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1913 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1912
1914
1913 return self._rollback(dryrun, force, dsguard)
1915 return self._rollback(dryrun, force, dsguard)
1914 else:
1916 else:
1915 self.ui.warn(_("no rollback information available\n"))
1917 self.ui.warn(_("no rollback information available\n"))
1916 return 1
1918 return 1
1917 finally:
1919 finally:
1918 release(dsguard, lock, wlock)
1920 release(dsguard, lock, wlock)
1919
1921
1920 @unfilteredmethod # Until we get smarter cache management
1922 @unfilteredmethod # Until we get smarter cache management
1921 def _rollback(self, dryrun, force, dsguard):
1923 def _rollback(self, dryrun, force, dsguard):
1922 ui = self.ui
1924 ui = self.ui
1923 try:
1925 try:
1924 args = self.vfs.read('undo.desc').splitlines()
1926 args = self.vfs.read('undo.desc').splitlines()
1925 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1927 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1926 if len(args) >= 3:
1928 if len(args) >= 3:
1927 detail = args[2]
1929 detail = args[2]
1928 oldtip = oldlen - 1
1930 oldtip = oldlen - 1
1929
1931
1930 if detail and ui.verbose:
1932 if detail and ui.verbose:
1931 msg = (_('repository tip rolled back to revision %d'
1933 msg = (_('repository tip rolled back to revision %d'
1932 ' (undo %s: %s)\n')
1934 ' (undo %s: %s)\n')
1933 % (oldtip, desc, detail))
1935 % (oldtip, desc, detail))
1934 else:
1936 else:
1935 msg = (_('repository tip rolled back to revision %d'
1937 msg = (_('repository tip rolled back to revision %d'
1936 ' (undo %s)\n')
1938 ' (undo %s)\n')
1937 % (oldtip, desc))
1939 % (oldtip, desc))
1938 except IOError:
1940 except IOError:
1939 msg = _('rolling back unknown transaction\n')
1941 msg = _('rolling back unknown transaction\n')
1940 desc = None
1942 desc = None
1941
1943
1942 if not force and self['.'] != self['tip'] and desc == 'commit':
1944 if not force and self['.'] != self['tip'] and desc == 'commit':
1943 raise error.Abort(
1945 raise error.Abort(
1944 _('rollback of last commit while not checked out '
1946 _('rollback of last commit while not checked out '
1945 'may lose data'), hint=_('use -f to force'))
1947 'may lose data'), hint=_('use -f to force'))
1946
1948
1947 ui.status(msg)
1949 ui.status(msg)
1948 if dryrun:
1950 if dryrun:
1949 return 0
1951 return 0
1950
1952
1951 parents = self.dirstate.parents()
1953 parents = self.dirstate.parents()
1952 self.destroying()
1954 self.destroying()
1953 vfsmap = {'plain': self.vfs, '': self.svfs}
1955 vfsmap = {'plain': self.vfs, '': self.svfs}
1954 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1956 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1955 checkambigfiles=_cachedfiles)
1957 checkambigfiles=_cachedfiles)
1956 if self.vfs.exists('undo.bookmarks'):
1958 if self.vfs.exists('undo.bookmarks'):
1957 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1959 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1958 if self.svfs.exists('undo.phaseroots'):
1960 if self.svfs.exists('undo.phaseroots'):
1959 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1961 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1960 self.invalidate()
1962 self.invalidate()
1961
1963
1962 parentgone = (parents[0] not in self.changelog.nodemap or
1964 parentgone = (parents[0] not in self.changelog.nodemap or
1963 parents[1] not in self.changelog.nodemap)
1965 parents[1] not in self.changelog.nodemap)
1964 if parentgone:
1966 if parentgone:
1965 # prevent dirstateguard from overwriting already restored one
1967 # prevent dirstateguard from overwriting already restored one
1966 dsguard.close()
1968 dsguard.close()
1967
1969
1968 narrowspec.restorebackup(self, 'undo.narrowspec')
1970 narrowspec.restorebackup(self, 'undo.narrowspec')
1969 self.dirstate.restorebackup(None, 'undo.dirstate')
1971 self.dirstate.restorebackup(None, 'undo.dirstate')
1970 try:
1972 try:
1971 branch = self.vfs.read('undo.branch')
1973 branch = self.vfs.read('undo.branch')
1972 self.dirstate.setbranch(encoding.tolocal(branch))
1974 self.dirstate.setbranch(encoding.tolocal(branch))
1973 except IOError:
1975 except IOError:
1974 ui.warn(_('named branch could not be reset: '
1976 ui.warn(_('named branch could not be reset: '
1975 'current branch is still \'%s\'\n')
1977 'current branch is still \'%s\'\n')
1976 % self.dirstate.branch())
1978 % self.dirstate.branch())
1977
1979
1978 parents = tuple([p.rev() for p in self[None].parents()])
1980 parents = tuple([p.rev() for p in self[None].parents()])
1979 if len(parents) > 1:
1981 if len(parents) > 1:
1980 ui.status(_('working directory now based on '
1982 ui.status(_('working directory now based on '
1981 'revisions %d and %d\n') % parents)
1983 'revisions %d and %d\n') % parents)
1982 else:
1984 else:
1983 ui.status(_('working directory now based on '
1985 ui.status(_('working directory now based on '
1984 'revision %d\n') % parents)
1986 'revision %d\n') % parents)
1985 mergemod.mergestate.clean(self, self['.'].node())
1987 mergemod.mergestate.clean(self, self['.'].node())
1986
1988
1987 # TODO: if we know which new heads may result from this rollback, pass
1989 # TODO: if we know which new heads may result from this rollback, pass
1988 # them to destroy(), which will prevent the branchhead cache from being
1990 # them to destroy(), which will prevent the branchhead cache from being
1989 # invalidated.
1991 # invalidated.
1990 self.destroyed()
1992 self.destroyed()
1991 return 0
1993 return 0
1992
1994
1993 def _buildcacheupdater(self, newtransaction):
1995 def _buildcacheupdater(self, newtransaction):
1994 """called during transaction to build the callback updating cache
1996 """called during transaction to build the callback updating cache
1995
1997
1996 Lives on the repository to help extension who might want to augment
1998 Lives on the repository to help extension who might want to augment
1997 this logic. For this purpose, the created transaction is passed to the
1999 this logic. For this purpose, the created transaction is passed to the
1998 method.
2000 method.
1999 """
2001 """
2000 # we must avoid cyclic reference between repo and transaction.
2002 # we must avoid cyclic reference between repo and transaction.
2001 reporef = weakref.ref(self)
2003 reporef = weakref.ref(self)
2002 def updater(tr):
2004 def updater(tr):
2003 repo = reporef()
2005 repo = reporef()
2004 repo.updatecaches(tr)
2006 repo.updatecaches(tr)
2005 return updater
2007 return updater
2006
2008
2007 @unfilteredmethod
2009 @unfilteredmethod
2008 def updatecaches(self, tr=None, full=False):
2010 def updatecaches(self, tr=None, full=False):
2009 """warm appropriate caches
2011 """warm appropriate caches
2010
2012
2011 If this function is called after a transaction closed. The transaction
2013 If this function is called after a transaction closed. The transaction
2012 will be available in the 'tr' argument. This can be used to selectively
2014 will be available in the 'tr' argument. This can be used to selectively
2013 update caches relevant to the changes in that transaction.
2015 update caches relevant to the changes in that transaction.
2014
2016
2015 If 'full' is set, make sure all caches the function knows about have
2017 If 'full' is set, make sure all caches the function knows about have
2016 up-to-date data. Even the ones usually loaded more lazily.
2018 up-to-date data. Even the ones usually loaded more lazily.
2017 """
2019 """
2018 if tr is not None and tr.hookargs.get('source') == 'strip':
2020 if tr is not None and tr.hookargs.get('source') == 'strip':
2019 # During strip, many caches are invalid but
2021 # During strip, many caches are invalid but
2020 # later call to `destroyed` will refresh them.
2022 # later call to `destroyed` will refresh them.
2021 return
2023 return
2022
2024
2023 if tr is None or tr.changes['origrepolen'] < len(self):
2025 if tr is None or tr.changes['origrepolen'] < len(self):
2024 # updating the unfiltered branchmap should refresh all the others,
2026 # updating the unfiltered branchmap should refresh all the others,
2025 self.ui.debug('updating the branch cache\n')
2027 self.ui.debug('updating the branch cache\n')
2026 branchmap.updatecache(self.filtered('served'))
2028 branchmap.updatecache(self.filtered('served'))
2027
2029
2028 if full:
2030 if full:
2029 rbc = self.revbranchcache()
2031 rbc = self.revbranchcache()
2030 for r in self.changelog:
2032 for r in self.changelog:
2031 rbc.branchinfo(r)
2033 rbc.branchinfo(r)
2032 rbc.write()
2034 rbc.write()
2033
2035
2034 # ensure the working copy parents are in the manifestfulltextcache
2036 # ensure the working copy parents are in the manifestfulltextcache
2035 for ctx in self['.'].parents():
2037 for ctx in self['.'].parents():
2036 ctx.manifest() # accessing the manifest is enough
2038 ctx.manifest() # accessing the manifest is enough
2037
2039
2038 def invalidatecaches(self):
2040 def invalidatecaches(self):
2039
2041
2040 if '_tagscache' in vars(self):
2042 if '_tagscache' in vars(self):
2041 # can't use delattr on proxy
2043 # can't use delattr on proxy
2042 del self.__dict__['_tagscache']
2044 del self.__dict__['_tagscache']
2043
2045
2044 self.unfiltered()._branchcaches.clear()
2046 self.unfiltered()._branchcaches.clear()
2045 self.invalidatevolatilesets()
2047 self.invalidatevolatilesets()
2046 self._sparsesignaturecache.clear()
2048 self._sparsesignaturecache.clear()
2047
2049
2048 def invalidatevolatilesets(self):
2050 def invalidatevolatilesets(self):
2049 self.filteredrevcache.clear()
2051 self.filteredrevcache.clear()
2050 obsolete.clearobscaches(self)
2052 obsolete.clearobscaches(self)
2051
2053
2052 def invalidatedirstate(self):
2054 def invalidatedirstate(self):
2053 '''Invalidates the dirstate, causing the next call to dirstate
2055 '''Invalidates the dirstate, causing the next call to dirstate
2054 to check if it was modified since the last time it was read,
2056 to check if it was modified since the last time it was read,
2055 rereading it if it has.
2057 rereading it if it has.
2056
2058
2057 This is different to dirstate.invalidate() that it doesn't always
2059 This is different to dirstate.invalidate() that it doesn't always
2058 rereads the dirstate. Use dirstate.invalidate() if you want to
2060 rereads the dirstate. Use dirstate.invalidate() if you want to
2059 explicitly read the dirstate again (i.e. restoring it to a previous
2061 explicitly read the dirstate again (i.e. restoring it to a previous
2060 known good state).'''
2062 known good state).'''
2061 if hasunfilteredcache(self, 'dirstate'):
2063 if hasunfilteredcache(self, 'dirstate'):
2062 for k in self.dirstate._filecache:
2064 for k in self.dirstate._filecache:
2063 try:
2065 try:
2064 delattr(self.dirstate, k)
2066 delattr(self.dirstate, k)
2065 except AttributeError:
2067 except AttributeError:
2066 pass
2068 pass
2067 delattr(self.unfiltered(), 'dirstate')
2069 delattr(self.unfiltered(), 'dirstate')
2068
2070
2069 def invalidate(self, clearfilecache=False):
2071 def invalidate(self, clearfilecache=False):
2070 '''Invalidates both store and non-store parts other than dirstate
2072 '''Invalidates both store and non-store parts other than dirstate
2071
2073
2072 If a transaction is running, invalidation of store is omitted,
2074 If a transaction is running, invalidation of store is omitted,
2073 because discarding in-memory changes might cause inconsistency
2075 because discarding in-memory changes might cause inconsistency
2074 (e.g. incomplete fncache causes unintentional failure, but
2076 (e.g. incomplete fncache causes unintentional failure, but
2075 redundant one doesn't).
2077 redundant one doesn't).
2076 '''
2078 '''
2077 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2079 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2078 for k in list(self._filecache.keys()):
2080 for k in list(self._filecache.keys()):
2079 # dirstate is invalidated separately in invalidatedirstate()
2081 # dirstate is invalidated separately in invalidatedirstate()
2080 if k == 'dirstate':
2082 if k == 'dirstate':
2081 continue
2083 continue
2082 if (k == 'changelog' and
2084 if (k == 'changelog' and
2083 self.currenttransaction() and
2085 self.currenttransaction() and
2084 self.changelog._delayed):
2086 self.changelog._delayed):
2085 # The changelog object may store unwritten revisions. We don't
2087 # The changelog object may store unwritten revisions. We don't
2086 # want to lose them.
2088 # want to lose them.
2087 # TODO: Solve the problem instead of working around it.
2089 # TODO: Solve the problem instead of working around it.
2088 continue
2090 continue
2089
2091
2090 if clearfilecache:
2092 if clearfilecache:
2091 del self._filecache[k]
2093 del self._filecache[k]
2092 try:
2094 try:
2093 delattr(unfiltered, k)
2095 delattr(unfiltered, k)
2094 except AttributeError:
2096 except AttributeError:
2095 pass
2097 pass
2096 self.invalidatecaches()
2098 self.invalidatecaches()
2097 if not self.currenttransaction():
2099 if not self.currenttransaction():
2098 # TODO: Changing contents of store outside transaction
2100 # TODO: Changing contents of store outside transaction
2099 # causes inconsistency. We should make in-memory store
2101 # causes inconsistency. We should make in-memory store
2100 # changes detectable, and abort if changed.
2102 # changes detectable, and abort if changed.
2101 self.store.invalidatecaches()
2103 self.store.invalidatecaches()
2102
2104
2103 def invalidateall(self):
2105 def invalidateall(self):
2104 '''Fully invalidates both store and non-store parts, causing the
2106 '''Fully invalidates both store and non-store parts, causing the
2105 subsequent operation to reread any outside changes.'''
2107 subsequent operation to reread any outside changes.'''
2106 # extension should hook this to invalidate its caches
2108 # extension should hook this to invalidate its caches
2107 self.invalidate()
2109 self.invalidate()
2108 self.invalidatedirstate()
2110 self.invalidatedirstate()
2109
2111
2110 @unfilteredmethod
2112 @unfilteredmethod
2111 def _refreshfilecachestats(self, tr):
2113 def _refreshfilecachestats(self, tr):
2112 """Reload stats of cached files so that they are flagged as valid"""
2114 """Reload stats of cached files so that they are flagged as valid"""
2113 for k, ce in self._filecache.items():
2115 for k, ce in self._filecache.items():
2114 k = pycompat.sysstr(k)
2116 k = pycompat.sysstr(k)
2115 if k == r'dirstate' or k not in self.__dict__:
2117 if k == r'dirstate' or k not in self.__dict__:
2116 continue
2118 continue
2117 ce.refresh()
2119 ce.refresh()
2118
2120
2119 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2121 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2120 inheritchecker=None, parentenvvar=None):
2122 inheritchecker=None, parentenvvar=None):
2121 parentlock = None
2123 parentlock = None
2122 # the contents of parentenvvar are used by the underlying lock to
2124 # the contents of parentenvvar are used by the underlying lock to
2123 # determine whether it can be inherited
2125 # determine whether it can be inherited
2124 if parentenvvar is not None:
2126 if parentenvvar is not None:
2125 parentlock = encoding.environ.get(parentenvvar)
2127 parentlock = encoding.environ.get(parentenvvar)
2126
2128
2127 timeout = 0
2129 timeout = 0
2128 warntimeout = 0
2130 warntimeout = 0
2129 if wait:
2131 if wait:
2130 timeout = self.ui.configint("ui", "timeout")
2132 timeout = self.ui.configint("ui", "timeout")
2131 warntimeout = self.ui.configint("ui", "timeout.warn")
2133 warntimeout = self.ui.configint("ui", "timeout.warn")
2132 # internal config: ui.signal-safe-lock
2134 # internal config: ui.signal-safe-lock
2133 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2135 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2134
2136
2135 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2137 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2136 releasefn=releasefn,
2138 releasefn=releasefn,
2137 acquirefn=acquirefn, desc=desc,
2139 acquirefn=acquirefn, desc=desc,
2138 inheritchecker=inheritchecker,
2140 inheritchecker=inheritchecker,
2139 parentlock=parentlock,
2141 parentlock=parentlock,
2140 signalsafe=signalsafe)
2142 signalsafe=signalsafe)
2141 return l
2143 return l
2142
2144
2143 def _afterlock(self, callback):
2145 def _afterlock(self, callback):
2144 """add a callback to be run when the repository is fully unlocked
2146 """add a callback to be run when the repository is fully unlocked
2145
2147
2146 The callback will be executed when the outermost lock is released
2148 The callback will be executed when the outermost lock is released
2147 (with wlock being higher level than 'lock')."""
2149 (with wlock being higher level than 'lock')."""
2148 for ref in (self._wlockref, self._lockref):
2150 for ref in (self._wlockref, self._lockref):
2149 l = ref and ref()
2151 l = ref and ref()
2150 if l and l.held:
2152 if l and l.held:
2151 l.postrelease.append(callback)
2153 l.postrelease.append(callback)
2152 break
2154 break
2153 else: # no lock have been found.
2155 else: # no lock have been found.
2154 callback()
2156 callback()
2155
2157
2156 def lock(self, wait=True):
2158 def lock(self, wait=True):
2157 '''Lock the repository store (.hg/store) and return a weak reference
2159 '''Lock the repository store (.hg/store) and return a weak reference
2158 to the lock. Use this before modifying the store (e.g. committing or
2160 to the lock. Use this before modifying the store (e.g. committing or
2159 stripping). If you are opening a transaction, get a lock as well.)
2161 stripping). If you are opening a transaction, get a lock as well.)
2160
2162
2161 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2163 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2162 'wlock' first to avoid a dead-lock hazard.'''
2164 'wlock' first to avoid a dead-lock hazard.'''
2163 l = self._currentlock(self._lockref)
2165 l = self._currentlock(self._lockref)
2164 if l is not None:
2166 if l is not None:
2165 l.lock()
2167 l.lock()
2166 return l
2168 return l
2167
2169
2168 l = self._lock(self.svfs, "lock", wait, None,
2170 l = self._lock(self.svfs, "lock", wait, None,
2169 self.invalidate, _('repository %s') % self.origroot)
2171 self.invalidate, _('repository %s') % self.origroot)
2170 self._lockref = weakref.ref(l)
2172 self._lockref = weakref.ref(l)
2171 return l
2173 return l
2172
2174
2173 def _wlockchecktransaction(self):
2175 def _wlockchecktransaction(self):
2174 if self.currenttransaction() is not None:
2176 if self.currenttransaction() is not None:
2175 raise error.LockInheritanceContractViolation(
2177 raise error.LockInheritanceContractViolation(
2176 'wlock cannot be inherited in the middle of a transaction')
2178 'wlock cannot be inherited in the middle of a transaction')
2177
2179
2178 def wlock(self, wait=True):
2180 def wlock(self, wait=True):
2179 '''Lock the non-store parts of the repository (everything under
2181 '''Lock the non-store parts of the repository (everything under
2180 .hg except .hg/store) and return a weak reference to the lock.
2182 .hg except .hg/store) and return a weak reference to the lock.
2181
2183
2182 Use this before modifying files in .hg.
2184 Use this before modifying files in .hg.
2183
2185
2184 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2186 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2185 'wlock' first to avoid a dead-lock hazard.'''
2187 'wlock' first to avoid a dead-lock hazard.'''
2186 l = self._wlockref and self._wlockref()
2188 l = self._wlockref and self._wlockref()
2187 if l is not None and l.held:
2189 if l is not None and l.held:
2188 l.lock()
2190 l.lock()
2189 return l
2191 return l
2190
2192
2191 # We do not need to check for non-waiting lock acquisition. Such
2193 # We do not need to check for non-waiting lock acquisition. Such
2192 # acquisition would not cause dead-lock as they would just fail.
2194 # acquisition would not cause dead-lock as they would just fail.
2193 if wait and (self.ui.configbool('devel', 'all-warnings')
2195 if wait and (self.ui.configbool('devel', 'all-warnings')
2194 or self.ui.configbool('devel', 'check-locks')):
2196 or self.ui.configbool('devel', 'check-locks')):
2195 if self._currentlock(self._lockref) is not None:
2197 if self._currentlock(self._lockref) is not None:
2196 self.ui.develwarn('"wlock" acquired after "lock"')
2198 self.ui.develwarn('"wlock" acquired after "lock"')
2197
2199
2198 def unlock():
2200 def unlock():
2199 if self.dirstate.pendingparentchange():
2201 if self.dirstate.pendingparentchange():
2200 self.dirstate.invalidate()
2202 self.dirstate.invalidate()
2201 else:
2203 else:
2202 self.dirstate.write(None)
2204 self.dirstate.write(None)
2203
2205
2204 self._filecache['dirstate'].refresh()
2206 self._filecache['dirstate'].refresh()
2205
2207
2206 l = self._lock(self.vfs, "wlock", wait, unlock,
2208 l = self._lock(self.vfs, "wlock", wait, unlock,
2207 self.invalidatedirstate, _('working directory of %s') %
2209 self.invalidatedirstate, _('working directory of %s') %
2208 self.origroot,
2210 self.origroot,
2209 inheritchecker=self._wlockchecktransaction,
2211 inheritchecker=self._wlockchecktransaction,
2210 parentenvvar='HG_WLOCK_LOCKER')
2212 parentenvvar='HG_WLOCK_LOCKER')
2211 self._wlockref = weakref.ref(l)
2213 self._wlockref = weakref.ref(l)
2212 return l
2214 return l
2213
2215
2214 def _currentlock(self, lockref):
2216 def _currentlock(self, lockref):
2215 """Returns the lock if it's held, or None if it's not."""
2217 """Returns the lock if it's held, or None if it's not."""
2216 if lockref is None:
2218 if lockref is None:
2217 return None
2219 return None
2218 l = lockref()
2220 l = lockref()
2219 if l is None or not l.held:
2221 if l is None or not l.held:
2220 return None
2222 return None
2221 return l
2223 return l
2222
2224
2223 def currentwlock(self):
2225 def currentwlock(self):
2224 """Returns the wlock if it's held, or None if it's not."""
2226 """Returns the wlock if it's held, or None if it's not."""
2225 return self._currentlock(self._wlockref)
2227 return self._currentlock(self._wlockref)
2226
2228
2227 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2229 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2228 """
2230 """
2229 commit an individual file as part of a larger transaction
2231 commit an individual file as part of a larger transaction
2230 """
2232 """
2231
2233
2232 fname = fctx.path()
2234 fname = fctx.path()
2233 fparent1 = manifest1.get(fname, nullid)
2235 fparent1 = manifest1.get(fname, nullid)
2234 fparent2 = manifest2.get(fname, nullid)
2236 fparent2 = manifest2.get(fname, nullid)
2235 if isinstance(fctx, context.filectx):
2237 if isinstance(fctx, context.filectx):
2236 node = fctx.filenode()
2238 node = fctx.filenode()
2237 if node in [fparent1, fparent2]:
2239 if node in [fparent1, fparent2]:
2238 self.ui.debug('reusing %s filelog entry\n' % fname)
2240 self.ui.debug('reusing %s filelog entry\n' % fname)
2239 if manifest1.flags(fname) != fctx.flags():
2241 if manifest1.flags(fname) != fctx.flags():
2240 changelist.append(fname)
2242 changelist.append(fname)
2241 return node
2243 return node
2242
2244
2243 flog = self.file(fname)
2245 flog = self.file(fname)
2244 meta = {}
2246 meta = {}
2245 copy = fctx.renamed()
2247 copy = fctx.renamed()
2246 if copy and copy[0] != fname:
2248 if copy and copy[0] != fname:
2247 # Mark the new revision of this file as a copy of another
2249 # Mark the new revision of this file as a copy of another
2248 # file. This copy data will effectively act as a parent
2250 # file. This copy data will effectively act as a parent
2249 # of this new revision. If this is a merge, the first
2251 # of this new revision. If this is a merge, the first
2250 # parent will be the nullid (meaning "look up the copy data")
2252 # parent will be the nullid (meaning "look up the copy data")
2251 # and the second one will be the other parent. For example:
2253 # and the second one will be the other parent. For example:
2252 #
2254 #
2253 # 0 --- 1 --- 3 rev1 changes file foo
2255 # 0 --- 1 --- 3 rev1 changes file foo
2254 # \ / rev2 renames foo to bar and changes it
2256 # \ / rev2 renames foo to bar and changes it
2255 # \- 2 -/ rev3 should have bar with all changes and
2257 # \- 2 -/ rev3 should have bar with all changes and
2256 # should record that bar descends from
2258 # should record that bar descends from
2257 # bar in rev2 and foo in rev1
2259 # bar in rev2 and foo in rev1
2258 #
2260 #
2259 # this allows this merge to succeed:
2261 # this allows this merge to succeed:
2260 #
2262 #
2261 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2263 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2262 # \ / merging rev3 and rev4 should use bar@rev2
2264 # \ / merging rev3 and rev4 should use bar@rev2
2263 # \- 2 --- 4 as the merge base
2265 # \- 2 --- 4 as the merge base
2264 #
2266 #
2265
2267
2266 cfname = copy[0]
2268 cfname = copy[0]
2267 crev = manifest1.get(cfname)
2269 crev = manifest1.get(cfname)
2268 newfparent = fparent2
2270 newfparent = fparent2
2269
2271
2270 if manifest2: # branch merge
2272 if manifest2: # branch merge
2271 if fparent2 == nullid or crev is None: # copied on remote side
2273 if fparent2 == nullid or crev is None: # copied on remote side
2272 if cfname in manifest2:
2274 if cfname in manifest2:
2273 crev = manifest2[cfname]
2275 crev = manifest2[cfname]
2274 newfparent = fparent1
2276 newfparent = fparent1
2275
2277
2276 # Here, we used to search backwards through history to try to find
2278 # Here, we used to search backwards through history to try to find
2277 # where the file copy came from if the source of a copy was not in
2279 # where the file copy came from if the source of a copy was not in
2278 # the parent directory. However, this doesn't actually make sense to
2280 # the parent directory. However, this doesn't actually make sense to
2279 # do (what does a copy from something not in your working copy even
2281 # do (what does a copy from something not in your working copy even
2280 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2282 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2281 # the user that copy information was dropped, so if they didn't
2283 # the user that copy information was dropped, so if they didn't
2282 # expect this outcome it can be fixed, but this is the correct
2284 # expect this outcome it can be fixed, but this is the correct
2283 # behavior in this circumstance.
2285 # behavior in this circumstance.
2284
2286
2285 if crev:
2287 if crev:
2286 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2288 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2287 meta["copy"] = cfname
2289 meta["copy"] = cfname
2288 meta["copyrev"] = hex(crev)
2290 meta["copyrev"] = hex(crev)
2289 fparent1, fparent2 = nullid, newfparent
2291 fparent1, fparent2 = nullid, newfparent
2290 else:
2292 else:
2291 self.ui.warn(_("warning: can't find ancestor for '%s' "
2293 self.ui.warn(_("warning: can't find ancestor for '%s' "
2292 "copied from '%s'!\n") % (fname, cfname))
2294 "copied from '%s'!\n") % (fname, cfname))
2293
2295
2294 elif fparent1 == nullid:
2296 elif fparent1 == nullid:
2295 fparent1, fparent2 = fparent2, nullid
2297 fparent1, fparent2 = fparent2, nullid
2296 elif fparent2 != nullid:
2298 elif fparent2 != nullid:
2297 # is one parent an ancestor of the other?
2299 # is one parent an ancestor of the other?
2298 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2300 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2299 if fparent1 in fparentancestors:
2301 if fparent1 in fparentancestors:
2300 fparent1, fparent2 = fparent2, nullid
2302 fparent1, fparent2 = fparent2, nullid
2301 elif fparent2 in fparentancestors:
2303 elif fparent2 in fparentancestors:
2302 fparent2 = nullid
2304 fparent2 = nullid
2303
2305
2304 # is the file changed?
2306 # is the file changed?
2305 text = fctx.data()
2307 text = fctx.data()
2306 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2308 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2307 changelist.append(fname)
2309 changelist.append(fname)
2308 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2310 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2309 # are just the flags changed during merge?
2311 # are just the flags changed during merge?
2310 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2312 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2311 changelist.append(fname)
2313 changelist.append(fname)
2312
2314
2313 return fparent1
2315 return fparent1
2314
2316
2315 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2317 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2316 """check for commit arguments that aren't committable"""
2318 """check for commit arguments that aren't committable"""
2317 if match.isexact() or match.prefix():
2319 if match.isexact() or match.prefix():
2318 matched = set(status.modified + status.added + status.removed)
2320 matched = set(status.modified + status.added + status.removed)
2319
2321
2320 for f in match.files():
2322 for f in match.files():
2321 f = self.dirstate.normalize(f)
2323 f = self.dirstate.normalize(f)
2322 if f == '.' or f in matched or f in wctx.substate:
2324 if f == '.' or f in matched or f in wctx.substate:
2323 continue
2325 continue
2324 if f in status.deleted:
2326 if f in status.deleted:
2325 fail(f, _('file not found!'))
2327 fail(f, _('file not found!'))
2326 if f in vdirs: # visited directory
2328 if f in vdirs: # visited directory
2327 d = f + '/'
2329 d = f + '/'
2328 for mf in matched:
2330 for mf in matched:
2329 if mf.startswith(d):
2331 if mf.startswith(d):
2330 break
2332 break
2331 else:
2333 else:
2332 fail(f, _("no match under directory!"))
2334 fail(f, _("no match under directory!"))
2333 elif f not in self.dirstate:
2335 elif f not in self.dirstate:
2334 fail(f, _("file not tracked!"))
2336 fail(f, _("file not tracked!"))
2335
2337
2336 @unfilteredmethod
2338 @unfilteredmethod
2337 def commit(self, text="", user=None, date=None, match=None, force=False,
2339 def commit(self, text="", user=None, date=None, match=None, force=False,
2338 editor=False, extra=None):
2340 editor=False, extra=None):
2339 """Add a new revision to current repository.
2341 """Add a new revision to current repository.
2340
2342
2341 Revision information is gathered from the working directory,
2343 Revision information is gathered from the working directory,
2342 match can be used to filter the committed files. If editor is
2344 match can be used to filter the committed files. If editor is
2343 supplied, it is called to get a commit message.
2345 supplied, it is called to get a commit message.
2344 """
2346 """
2345 if extra is None:
2347 if extra is None:
2346 extra = {}
2348 extra = {}
2347
2349
2348 def fail(f, msg):
2350 def fail(f, msg):
2349 raise error.Abort('%s: %s' % (f, msg))
2351 raise error.Abort('%s: %s' % (f, msg))
2350
2352
2351 if not match:
2353 if not match:
2352 match = matchmod.always(self.root, '')
2354 match = matchmod.always(self.root, '')
2353
2355
2354 if not force:
2356 if not force:
2355 vdirs = []
2357 vdirs = []
2356 match.explicitdir = vdirs.append
2358 match.explicitdir = vdirs.append
2357 match.bad = fail
2359 match.bad = fail
2358
2360
2359 wlock = lock = tr = None
2361 wlock = lock = tr = None
2360 try:
2362 try:
2361 wlock = self.wlock()
2363 wlock = self.wlock()
2362 lock = self.lock() # for recent changelog (see issue4368)
2364 lock = self.lock() # for recent changelog (see issue4368)
2363
2365
2364 wctx = self[None]
2366 wctx = self[None]
2365 merge = len(wctx.parents()) > 1
2367 merge = len(wctx.parents()) > 1
2366
2368
2367 if not force and merge and not match.always():
2369 if not force and merge and not match.always():
2368 raise error.Abort(_('cannot partially commit a merge '
2370 raise error.Abort(_('cannot partially commit a merge '
2369 '(do not specify files or patterns)'))
2371 '(do not specify files or patterns)'))
2370
2372
2371 status = self.status(match=match, clean=force)
2373 status = self.status(match=match, clean=force)
2372 if force:
2374 if force:
2373 status.modified.extend(status.clean) # mq may commit clean files
2375 status.modified.extend(status.clean) # mq may commit clean files
2374
2376
2375 # check subrepos
2377 # check subrepos
2376 subs, commitsubs, newstate = subrepoutil.precommit(
2378 subs, commitsubs, newstate = subrepoutil.precommit(
2377 self.ui, wctx, status, match, force=force)
2379 self.ui, wctx, status, match, force=force)
2378
2380
2379 # make sure all explicit patterns are matched
2381 # make sure all explicit patterns are matched
2380 if not force:
2382 if not force:
2381 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2383 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2382
2384
2383 cctx = context.workingcommitctx(self, status,
2385 cctx = context.workingcommitctx(self, status,
2384 text, user, date, extra)
2386 text, user, date, extra)
2385
2387
2386 # internal config: ui.allowemptycommit
2388 # internal config: ui.allowemptycommit
2387 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2389 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2388 or extra.get('close') or merge or cctx.files()
2390 or extra.get('close') or merge or cctx.files()
2389 or self.ui.configbool('ui', 'allowemptycommit'))
2391 or self.ui.configbool('ui', 'allowemptycommit'))
2390 if not allowemptycommit:
2392 if not allowemptycommit:
2391 return None
2393 return None
2392
2394
2393 if merge and cctx.deleted():
2395 if merge and cctx.deleted():
2394 raise error.Abort(_("cannot commit merge with missing files"))
2396 raise error.Abort(_("cannot commit merge with missing files"))
2395
2397
2396 ms = mergemod.mergestate.read(self)
2398 ms = mergemod.mergestate.read(self)
2397 mergeutil.checkunresolved(ms)
2399 mergeutil.checkunresolved(ms)
2398
2400
2399 if editor:
2401 if editor:
2400 cctx._text = editor(self, cctx, subs)
2402 cctx._text = editor(self, cctx, subs)
2401 edited = (text != cctx._text)
2403 edited = (text != cctx._text)
2402
2404
2403 # Save commit message in case this transaction gets rolled back
2405 # Save commit message in case this transaction gets rolled back
2404 # (e.g. by a pretxncommit hook). Leave the content alone on
2406 # (e.g. by a pretxncommit hook). Leave the content alone on
2405 # the assumption that the user will use the same editor again.
2407 # the assumption that the user will use the same editor again.
2406 msgfn = self.savecommitmessage(cctx._text)
2408 msgfn = self.savecommitmessage(cctx._text)
2407
2409
2408 # commit subs and write new state
2410 # commit subs and write new state
2409 if subs:
2411 if subs:
2410 for s in sorted(commitsubs):
2412 for s in sorted(commitsubs):
2411 sub = wctx.sub(s)
2413 sub = wctx.sub(s)
2412 self.ui.status(_('committing subrepository %s\n') %
2414 self.ui.status(_('committing subrepository %s\n') %
2413 subrepoutil.subrelpath(sub))
2415 subrepoutil.subrelpath(sub))
2414 sr = sub.commit(cctx._text, user, date)
2416 sr = sub.commit(cctx._text, user, date)
2415 newstate[s] = (newstate[s][0], sr)
2417 newstate[s] = (newstate[s][0], sr)
2416 subrepoutil.writestate(self, newstate)
2418 subrepoutil.writestate(self, newstate)
2417
2419
2418 p1, p2 = self.dirstate.parents()
2420 p1, p2 = self.dirstate.parents()
2419 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2421 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2420 try:
2422 try:
2421 self.hook("precommit", throw=True, parent1=hookp1,
2423 self.hook("precommit", throw=True, parent1=hookp1,
2422 parent2=hookp2)
2424 parent2=hookp2)
2423 tr = self.transaction('commit')
2425 tr = self.transaction('commit')
2424 ret = self.commitctx(cctx, True)
2426 ret = self.commitctx(cctx, True)
2425 except: # re-raises
2427 except: # re-raises
2426 if edited:
2428 if edited:
2427 self.ui.write(
2429 self.ui.write(
2428 _('note: commit message saved in %s\n') % msgfn)
2430 _('note: commit message saved in %s\n') % msgfn)
2429 raise
2431 raise
2430 # update bookmarks, dirstate and mergestate
2432 # update bookmarks, dirstate and mergestate
2431 bookmarks.update(self, [p1, p2], ret)
2433 bookmarks.update(self, [p1, p2], ret)
2432 cctx.markcommitted(ret)
2434 cctx.markcommitted(ret)
2433 ms.reset()
2435 ms.reset()
2434 tr.close()
2436 tr.close()
2435
2437
2436 finally:
2438 finally:
2437 lockmod.release(tr, lock, wlock)
2439 lockmod.release(tr, lock, wlock)
2438
2440
2439 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2441 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2440 # hack for command that use a temporary commit (eg: histedit)
2442 # hack for command that use a temporary commit (eg: histedit)
2441 # temporary commit got stripped before hook release
2443 # temporary commit got stripped before hook release
2442 if self.changelog.hasnode(ret):
2444 if self.changelog.hasnode(ret):
2443 self.hook("commit", node=node, parent1=parent1,
2445 self.hook("commit", node=node, parent1=parent1,
2444 parent2=parent2)
2446 parent2=parent2)
2445 self._afterlock(commithook)
2447 self._afterlock(commithook)
2446 return ret
2448 return ret
2447
2449
2448 @unfilteredmethod
2450 @unfilteredmethod
2449 def commitctx(self, ctx, error=False):
2451 def commitctx(self, ctx, error=False):
2450 """Add a new revision to current repository.
2452 """Add a new revision to current repository.
2451 Revision information is passed via the context argument.
2453 Revision information is passed via the context argument.
2452
2454
2453 ctx.files() should list all files involved in this commit, i.e.
2455 ctx.files() should list all files involved in this commit, i.e.
2454 modified/added/removed files. On merge, it may be wider than the
2456 modified/added/removed files. On merge, it may be wider than the
2455 ctx.files() to be committed, since any file nodes derived directly
2457 ctx.files() to be committed, since any file nodes derived directly
2456 from p1 or p2 are excluded from the committed ctx.files().
2458 from p1 or p2 are excluded from the committed ctx.files().
2457 """
2459 """
2458
2460
2459 tr = None
2461 tr = None
2460 p1, p2 = ctx.p1(), ctx.p2()
2462 p1, p2 = ctx.p1(), ctx.p2()
2461 user = ctx.user()
2463 user = ctx.user()
2462
2464
2463 lock = self.lock()
2465 lock = self.lock()
2464 try:
2466 try:
2465 tr = self.transaction("commit")
2467 tr = self.transaction("commit")
2466 trp = weakref.proxy(tr)
2468 trp = weakref.proxy(tr)
2467
2469
2468 if ctx.manifestnode():
2470 if ctx.manifestnode():
2469 # reuse an existing manifest revision
2471 # reuse an existing manifest revision
2470 self.ui.debug('reusing known manifest\n')
2472 self.ui.debug('reusing known manifest\n')
2471 mn = ctx.manifestnode()
2473 mn = ctx.manifestnode()
2472 files = ctx.files()
2474 files = ctx.files()
2473 elif ctx.files():
2475 elif ctx.files():
2474 m1ctx = p1.manifestctx()
2476 m1ctx = p1.manifestctx()
2475 m2ctx = p2.manifestctx()
2477 m2ctx = p2.manifestctx()
2476 mctx = m1ctx.copy()
2478 mctx = m1ctx.copy()
2477
2479
2478 m = mctx.read()
2480 m = mctx.read()
2479 m1 = m1ctx.read()
2481 m1 = m1ctx.read()
2480 m2 = m2ctx.read()
2482 m2 = m2ctx.read()
2481
2483
2482 # check in files
2484 # check in files
2483 added = []
2485 added = []
2484 changed = []
2486 changed = []
2485 removed = list(ctx.removed())
2487 removed = list(ctx.removed())
2486 linkrev = len(self)
2488 linkrev = len(self)
2487 self.ui.note(_("committing files:\n"))
2489 self.ui.note(_("committing files:\n"))
2488 for f in sorted(ctx.modified() + ctx.added()):
2490 for f in sorted(ctx.modified() + ctx.added()):
2489 self.ui.note(f + "\n")
2491 self.ui.note(f + "\n")
2490 try:
2492 try:
2491 fctx = ctx[f]
2493 fctx = ctx[f]
2492 if fctx is None:
2494 if fctx is None:
2493 removed.append(f)
2495 removed.append(f)
2494 else:
2496 else:
2495 added.append(f)
2497 added.append(f)
2496 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2498 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2497 trp, changed)
2499 trp, changed)
2498 m.setflag(f, fctx.flags())
2500 m.setflag(f, fctx.flags())
2499 except OSError as inst:
2501 except OSError as inst:
2500 self.ui.warn(_("trouble committing %s!\n") % f)
2502 self.ui.warn(_("trouble committing %s!\n") % f)
2501 raise
2503 raise
2502 except IOError as inst:
2504 except IOError as inst:
2503 errcode = getattr(inst, 'errno', errno.ENOENT)
2505 errcode = getattr(inst, 'errno', errno.ENOENT)
2504 if error or errcode and errcode != errno.ENOENT:
2506 if error or errcode and errcode != errno.ENOENT:
2505 self.ui.warn(_("trouble committing %s!\n") % f)
2507 self.ui.warn(_("trouble committing %s!\n") % f)
2506 raise
2508 raise
2507
2509
2508 # update manifest
2510 # update manifest
2509 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2511 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2510 drop = [f for f in removed if f in m]
2512 drop = [f for f in removed if f in m]
2511 for f in drop:
2513 for f in drop:
2512 del m[f]
2514 del m[f]
2513 files = changed + removed
2515 files = changed + removed
2514 md = None
2516 md = None
2515 if not files:
2517 if not files:
2516 # if no "files" actually changed in terms of the changelog,
2518 # if no "files" actually changed in terms of the changelog,
2517 # try hard to detect unmodified manifest entry so that the
2519 # try hard to detect unmodified manifest entry so that the
2518 # exact same commit can be reproduced later on convert.
2520 # exact same commit can be reproduced later on convert.
2519 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2521 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2520 if not files and md:
2522 if not files and md:
2521 self.ui.debug('not reusing manifest (no file change in '
2523 self.ui.debug('not reusing manifest (no file change in '
2522 'changelog, but manifest differs)\n')
2524 'changelog, but manifest differs)\n')
2523 if files or md:
2525 if files or md:
2524 self.ui.note(_("committing manifest\n"))
2526 self.ui.note(_("committing manifest\n"))
2525 # we're using narrowmatch here since it's already applied at
2527 # we're using narrowmatch here since it's already applied at
2526 # other stages (such as dirstate.walk), so we're already
2528 # other stages (such as dirstate.walk), so we're already
2527 # ignoring things outside of narrowspec in most cases. The
2529 # ignoring things outside of narrowspec in most cases. The
2528 # one case where we might have files outside the narrowspec
2530 # one case where we might have files outside the narrowspec
2529 # at this point is merges, and we already error out in the
2531 # at this point is merges, and we already error out in the
2530 # case where the merge has files outside of the narrowspec,
2532 # case where the merge has files outside of the narrowspec,
2531 # so this is safe.
2533 # so this is safe.
2532 mn = mctx.write(trp, linkrev,
2534 mn = mctx.write(trp, linkrev,
2533 p1.manifestnode(), p2.manifestnode(),
2535 p1.manifestnode(), p2.manifestnode(),
2534 added, drop, match=self.narrowmatch())
2536 added, drop, match=self.narrowmatch())
2535 else:
2537 else:
2536 self.ui.debug('reusing manifest form p1 (listed files '
2538 self.ui.debug('reusing manifest form p1 (listed files '
2537 'actually unchanged)\n')
2539 'actually unchanged)\n')
2538 mn = p1.manifestnode()
2540 mn = p1.manifestnode()
2539 else:
2541 else:
2540 self.ui.debug('reusing manifest from p1 (no file change)\n')
2542 self.ui.debug('reusing manifest from p1 (no file change)\n')
2541 mn = p1.manifestnode()
2543 mn = p1.manifestnode()
2542 files = []
2544 files = []
2543
2545
2544 # update changelog
2546 # update changelog
2545 self.ui.note(_("committing changelog\n"))
2547 self.ui.note(_("committing changelog\n"))
2546 self.changelog.delayupdate(tr)
2548 self.changelog.delayupdate(tr)
2547 n = self.changelog.add(mn, files, ctx.description(),
2549 n = self.changelog.add(mn, files, ctx.description(),
2548 trp, p1.node(), p2.node(),
2550 trp, p1.node(), p2.node(),
2549 user, ctx.date(), ctx.extra().copy())
2551 user, ctx.date(), ctx.extra().copy())
2550 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2552 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2551 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2553 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2552 parent2=xp2)
2554 parent2=xp2)
2553 # set the new commit is proper phase
2555 # set the new commit is proper phase
2554 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2556 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2555 if targetphase:
2557 if targetphase:
2556 # retract boundary do not alter parent changeset.
2558 # retract boundary do not alter parent changeset.
2557 # if a parent have higher the resulting phase will
2559 # if a parent have higher the resulting phase will
2558 # be compliant anyway
2560 # be compliant anyway
2559 #
2561 #
2560 # if minimal phase was 0 we don't need to retract anything
2562 # if minimal phase was 0 we don't need to retract anything
2561 phases.registernew(self, tr, targetphase, [n])
2563 phases.registernew(self, tr, targetphase, [n])
2562 tr.close()
2564 tr.close()
2563 return n
2565 return n
2564 finally:
2566 finally:
2565 if tr:
2567 if tr:
2566 tr.release()
2568 tr.release()
2567 lock.release()
2569 lock.release()
2568
2570
2569 @unfilteredmethod
2571 @unfilteredmethod
2570 def destroying(self):
2572 def destroying(self):
2571 '''Inform the repository that nodes are about to be destroyed.
2573 '''Inform the repository that nodes are about to be destroyed.
2572 Intended for use by strip and rollback, so there's a common
2574 Intended for use by strip and rollback, so there's a common
2573 place for anything that has to be done before destroying history.
2575 place for anything that has to be done before destroying history.
2574
2576
2575 This is mostly useful for saving state that is in memory and waiting
2577 This is mostly useful for saving state that is in memory and waiting
2576 to be flushed when the current lock is released. Because a call to
2578 to be flushed when the current lock is released. Because a call to
2577 destroyed is imminent, the repo will be invalidated causing those
2579 destroyed is imminent, the repo will be invalidated causing those
2578 changes to stay in memory (waiting for the next unlock), or vanish
2580 changes to stay in memory (waiting for the next unlock), or vanish
2579 completely.
2581 completely.
2580 '''
2582 '''
2581 # When using the same lock to commit and strip, the phasecache is left
2583 # When using the same lock to commit and strip, the phasecache is left
2582 # dirty after committing. Then when we strip, the repo is invalidated,
2584 # dirty after committing. Then when we strip, the repo is invalidated,
2583 # causing those changes to disappear.
2585 # causing those changes to disappear.
2584 if '_phasecache' in vars(self):
2586 if '_phasecache' in vars(self):
2585 self._phasecache.write()
2587 self._phasecache.write()
2586
2588
2587 @unfilteredmethod
2589 @unfilteredmethod
2588 def destroyed(self):
2590 def destroyed(self):
2589 '''Inform the repository that nodes have been destroyed.
2591 '''Inform the repository that nodes have been destroyed.
2590 Intended for use by strip and rollback, so there's a common
2592 Intended for use by strip and rollback, so there's a common
2591 place for anything that has to be done after destroying history.
2593 place for anything that has to be done after destroying history.
2592 '''
2594 '''
2593 # When one tries to:
2595 # When one tries to:
2594 # 1) destroy nodes thus calling this method (e.g. strip)
2596 # 1) destroy nodes thus calling this method (e.g. strip)
2595 # 2) use phasecache somewhere (e.g. commit)
2597 # 2) use phasecache somewhere (e.g. commit)
2596 #
2598 #
2597 # then 2) will fail because the phasecache contains nodes that were
2599 # then 2) will fail because the phasecache contains nodes that were
2598 # removed. We can either remove phasecache from the filecache,
2600 # removed. We can either remove phasecache from the filecache,
2599 # causing it to reload next time it is accessed, or simply filter
2601 # causing it to reload next time it is accessed, or simply filter
2600 # the removed nodes now and write the updated cache.
2602 # the removed nodes now and write the updated cache.
2601 self._phasecache.filterunknown(self)
2603 self._phasecache.filterunknown(self)
2602 self._phasecache.write()
2604 self._phasecache.write()
2603
2605
2604 # refresh all repository caches
2606 # refresh all repository caches
2605 self.updatecaches()
2607 self.updatecaches()
2606
2608
2607 # Ensure the persistent tag cache is updated. Doing it now
2609 # Ensure the persistent tag cache is updated. Doing it now
2608 # means that the tag cache only has to worry about destroyed
2610 # means that the tag cache only has to worry about destroyed
2609 # heads immediately after a strip/rollback. That in turn
2611 # heads immediately after a strip/rollback. That in turn
2610 # guarantees that "cachetip == currenttip" (comparing both rev
2612 # guarantees that "cachetip == currenttip" (comparing both rev
2611 # and node) always means no nodes have been added or destroyed.
2613 # and node) always means no nodes have been added or destroyed.
2612
2614
2613 # XXX this is suboptimal when qrefresh'ing: we strip the current
2615 # XXX this is suboptimal when qrefresh'ing: we strip the current
2614 # head, refresh the tag cache, then immediately add a new head.
2616 # head, refresh the tag cache, then immediately add a new head.
2615 # But I think doing it this way is necessary for the "instant
2617 # But I think doing it this way is necessary for the "instant
2616 # tag cache retrieval" case to work.
2618 # tag cache retrieval" case to work.
2617 self.invalidate()
2619 self.invalidate()
2618
2620
2619 def status(self, node1='.', node2=None, match=None,
2621 def status(self, node1='.', node2=None, match=None,
2620 ignored=False, clean=False, unknown=False,
2622 ignored=False, clean=False, unknown=False,
2621 listsubrepos=False):
2623 listsubrepos=False):
2622 '''a convenience method that calls node1.status(node2)'''
2624 '''a convenience method that calls node1.status(node2)'''
2623 return self[node1].status(node2, match, ignored, clean, unknown,
2625 return self[node1].status(node2, match, ignored, clean, unknown,
2624 listsubrepos)
2626 listsubrepos)
2625
2627
2626 def addpostdsstatus(self, ps):
2628 def addpostdsstatus(self, ps):
2627 """Add a callback to run within the wlock, at the point at which status
2629 """Add a callback to run within the wlock, at the point at which status
2628 fixups happen.
2630 fixups happen.
2629
2631
2630 On status completion, callback(wctx, status) will be called with the
2632 On status completion, callback(wctx, status) will be called with the
2631 wlock held, unless the dirstate has changed from underneath or the wlock
2633 wlock held, unless the dirstate has changed from underneath or the wlock
2632 couldn't be grabbed.
2634 couldn't be grabbed.
2633
2635
2634 Callbacks should not capture and use a cached copy of the dirstate --
2636 Callbacks should not capture and use a cached copy of the dirstate --
2635 it might change in the meanwhile. Instead, they should access the
2637 it might change in the meanwhile. Instead, they should access the
2636 dirstate via wctx.repo().dirstate.
2638 dirstate via wctx.repo().dirstate.
2637
2639
2638 This list is emptied out after each status run -- extensions should
2640 This list is emptied out after each status run -- extensions should
2639 make sure it adds to this list each time dirstate.status is called.
2641 make sure it adds to this list each time dirstate.status is called.
2640 Extensions should also make sure they don't call this for statuses
2642 Extensions should also make sure they don't call this for statuses
2641 that don't involve the dirstate.
2643 that don't involve the dirstate.
2642 """
2644 """
2643
2645
2644 # The list is located here for uniqueness reasons -- it is actually
2646 # The list is located here for uniqueness reasons -- it is actually
2645 # managed by the workingctx, but that isn't unique per-repo.
2647 # managed by the workingctx, but that isn't unique per-repo.
2646 self._postdsstatus.append(ps)
2648 self._postdsstatus.append(ps)
2647
2649
2648 def postdsstatus(self):
2650 def postdsstatus(self):
2649 """Used by workingctx to get the list of post-dirstate-status hooks."""
2651 """Used by workingctx to get the list of post-dirstate-status hooks."""
2650 return self._postdsstatus
2652 return self._postdsstatus
2651
2653
2652 def clearpostdsstatus(self):
2654 def clearpostdsstatus(self):
2653 """Used by workingctx to clear post-dirstate-status hooks."""
2655 """Used by workingctx to clear post-dirstate-status hooks."""
2654 del self._postdsstatus[:]
2656 del self._postdsstatus[:]
2655
2657
2656 def heads(self, start=None):
2658 def heads(self, start=None):
2657 if start is None:
2659 if start is None:
2658 cl = self.changelog
2660 cl = self.changelog
2659 headrevs = reversed(cl.headrevs())
2661 headrevs = reversed(cl.headrevs())
2660 return [cl.node(rev) for rev in headrevs]
2662 return [cl.node(rev) for rev in headrevs]
2661
2663
2662 heads = self.changelog.heads(start)
2664 heads = self.changelog.heads(start)
2663 # sort the output in rev descending order
2665 # sort the output in rev descending order
2664 return sorted(heads, key=self.changelog.rev, reverse=True)
2666 return sorted(heads, key=self.changelog.rev, reverse=True)
2665
2667
2666 def branchheads(self, branch=None, start=None, closed=False):
2668 def branchheads(self, branch=None, start=None, closed=False):
2667 '''return a (possibly filtered) list of heads for the given branch
2669 '''return a (possibly filtered) list of heads for the given branch
2668
2670
2669 Heads are returned in topological order, from newest to oldest.
2671 Heads are returned in topological order, from newest to oldest.
2670 If branch is None, use the dirstate branch.
2672 If branch is None, use the dirstate branch.
2671 If start is not None, return only heads reachable from start.
2673 If start is not None, return only heads reachable from start.
2672 If closed is True, return heads that are marked as closed as well.
2674 If closed is True, return heads that are marked as closed as well.
2673 '''
2675 '''
2674 if branch is None:
2676 if branch is None:
2675 branch = self[None].branch()
2677 branch = self[None].branch()
2676 branches = self.branchmap()
2678 branches = self.branchmap()
2677 if branch not in branches:
2679 if branch not in branches:
2678 return []
2680 return []
2679 # the cache returns heads ordered lowest to highest
2681 # the cache returns heads ordered lowest to highest
2680 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2682 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2681 if start is not None:
2683 if start is not None:
2682 # filter out the heads that cannot be reached from startrev
2684 # filter out the heads that cannot be reached from startrev
2683 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2685 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2684 bheads = [h for h in bheads if h in fbheads]
2686 bheads = [h for h in bheads if h in fbheads]
2685 return bheads
2687 return bheads
2686
2688
2687 def branches(self, nodes):
2689 def branches(self, nodes):
2688 if not nodes:
2690 if not nodes:
2689 nodes = [self.changelog.tip()]
2691 nodes = [self.changelog.tip()]
2690 b = []
2692 b = []
2691 for n in nodes:
2693 for n in nodes:
2692 t = n
2694 t = n
2693 while True:
2695 while True:
2694 p = self.changelog.parents(n)
2696 p = self.changelog.parents(n)
2695 if p[1] != nullid or p[0] == nullid:
2697 if p[1] != nullid or p[0] == nullid:
2696 b.append((t, n, p[0], p[1]))
2698 b.append((t, n, p[0], p[1]))
2697 break
2699 break
2698 n = p[0]
2700 n = p[0]
2699 return b
2701 return b
2700
2702
2701 def between(self, pairs):
2703 def between(self, pairs):
2702 r = []
2704 r = []
2703
2705
2704 for top, bottom in pairs:
2706 for top, bottom in pairs:
2705 n, l, i = top, [], 0
2707 n, l, i = top, [], 0
2706 f = 1
2708 f = 1
2707
2709
2708 while n != bottom and n != nullid:
2710 while n != bottom and n != nullid:
2709 p = self.changelog.parents(n)[0]
2711 p = self.changelog.parents(n)[0]
2710 if i == f:
2712 if i == f:
2711 l.append(n)
2713 l.append(n)
2712 f = f * 2
2714 f = f * 2
2713 n = p
2715 n = p
2714 i += 1
2716 i += 1
2715
2717
2716 r.append(l)
2718 r.append(l)
2717
2719
2718 return r
2720 return r
2719
2721
2720 def checkpush(self, pushop):
2722 def checkpush(self, pushop):
2721 """Extensions can override this function if additional checks have
2723 """Extensions can override this function if additional checks have
2722 to be performed before pushing, or call it if they override push
2724 to be performed before pushing, or call it if they override push
2723 command.
2725 command.
2724 """
2726 """
2725
2727
2726 @unfilteredpropertycache
2728 @unfilteredpropertycache
2727 def prepushoutgoinghooks(self):
2729 def prepushoutgoinghooks(self):
2728 """Return util.hooks consists of a pushop with repo, remote, outgoing
2730 """Return util.hooks consists of a pushop with repo, remote, outgoing
2729 methods, which are called before pushing changesets.
2731 methods, which are called before pushing changesets.
2730 """
2732 """
2731 return util.hooks()
2733 return util.hooks()
2732
2734
2733 def pushkey(self, namespace, key, old, new):
2735 def pushkey(self, namespace, key, old, new):
2734 try:
2736 try:
2735 tr = self.currenttransaction()
2737 tr = self.currenttransaction()
2736 hookargs = {}
2738 hookargs = {}
2737 if tr is not None:
2739 if tr is not None:
2738 hookargs.update(tr.hookargs)
2740 hookargs.update(tr.hookargs)
2739 hookargs = pycompat.strkwargs(hookargs)
2741 hookargs = pycompat.strkwargs(hookargs)
2740 hookargs[r'namespace'] = namespace
2742 hookargs[r'namespace'] = namespace
2741 hookargs[r'key'] = key
2743 hookargs[r'key'] = key
2742 hookargs[r'old'] = old
2744 hookargs[r'old'] = old
2743 hookargs[r'new'] = new
2745 hookargs[r'new'] = new
2744 self.hook('prepushkey', throw=True, **hookargs)
2746 self.hook('prepushkey', throw=True, **hookargs)
2745 except error.HookAbort as exc:
2747 except error.HookAbort as exc:
2746 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2748 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2747 if exc.hint:
2749 if exc.hint:
2748 self.ui.write_err(_("(%s)\n") % exc.hint)
2750 self.ui.write_err(_("(%s)\n") % exc.hint)
2749 return False
2751 return False
2750 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2752 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2751 ret = pushkey.push(self, namespace, key, old, new)
2753 ret = pushkey.push(self, namespace, key, old, new)
2752 def runhook():
2754 def runhook():
2753 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2755 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2754 ret=ret)
2756 ret=ret)
2755 self._afterlock(runhook)
2757 self._afterlock(runhook)
2756 return ret
2758 return ret
2757
2759
2758 def listkeys(self, namespace):
2760 def listkeys(self, namespace):
2759 self.hook('prelistkeys', throw=True, namespace=namespace)
2761 self.hook('prelistkeys', throw=True, namespace=namespace)
2760 self.ui.debug('listing keys for "%s"\n' % namespace)
2762 self.ui.debug('listing keys for "%s"\n' % namespace)
2761 values = pushkey.list(self, namespace)
2763 values = pushkey.list(self, namespace)
2762 self.hook('listkeys', namespace=namespace, values=values)
2764 self.hook('listkeys', namespace=namespace, values=values)
2763 return values
2765 return values
2764
2766
2765 def debugwireargs(self, one, two, three=None, four=None, five=None):
2767 def debugwireargs(self, one, two, three=None, four=None, five=None):
2766 '''used to test argument passing over the wire'''
2768 '''used to test argument passing over the wire'''
2767 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2769 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2768 pycompat.bytestr(four),
2770 pycompat.bytestr(four),
2769 pycompat.bytestr(five))
2771 pycompat.bytestr(five))
2770
2772
2771 def savecommitmessage(self, text):
2773 def savecommitmessage(self, text):
2772 fp = self.vfs('last-message.txt', 'wb')
2774 fp = self.vfs('last-message.txt', 'wb')
2773 try:
2775 try:
2774 fp.write(text)
2776 fp.write(text)
2775 finally:
2777 finally:
2776 fp.close()
2778 fp.close()
2777 return self.pathto(fp.name[len(self.root) + 1:])
2779 return self.pathto(fp.name[len(self.root) + 1:])
2778
2780
2779 # used to avoid circular references so destructors work
2781 # used to avoid circular references so destructors work
2780 def aftertrans(files):
2782 def aftertrans(files):
2781 renamefiles = [tuple(t) for t in files]
2783 renamefiles = [tuple(t) for t in files]
2782 def a():
2784 def a():
2783 for vfs, src, dest in renamefiles:
2785 for vfs, src, dest in renamefiles:
2784 # if src and dest refer to a same file, vfs.rename is a no-op,
2786 # if src and dest refer to a same file, vfs.rename is a no-op,
2785 # leaving both src and dest on disk. delete dest to make sure
2787 # leaving both src and dest on disk. delete dest to make sure
2786 # the rename couldn't be such a no-op.
2788 # the rename couldn't be such a no-op.
2787 vfs.tryunlink(dest)
2789 vfs.tryunlink(dest)
2788 try:
2790 try:
2789 vfs.rename(src, dest)
2791 vfs.rename(src, dest)
2790 except OSError: # journal file does not yet exist
2792 except OSError: # journal file does not yet exist
2791 pass
2793 pass
2792 return a
2794 return a
2793
2795
2794 def undoname(fn):
2796 def undoname(fn):
2795 base, name = os.path.split(fn)
2797 base, name = os.path.split(fn)
2796 assert name.startswith('journal')
2798 assert name.startswith('journal')
2797 return os.path.join(base, name.replace('journal', 'undo', 1))
2799 return os.path.join(base, name.replace('journal', 'undo', 1))
2798
2800
2799 def instance(ui, path, create, intents=None, createopts=None):
2801 def instance(ui, path, create, intents=None, createopts=None):
2800 localpath = util.urllocalpath(path)
2802 localpath = util.urllocalpath(path)
2801 if create:
2803 if create:
2802 createrepository(ui, localpath, createopts=createopts)
2804 createrepository(ui, localpath, createopts=createopts)
2803
2805
2804 return makelocalrepository(ui, localpath, intents=intents)
2806 return makelocalrepository(ui, localpath, intents=intents)
2805
2807
2806 def islocal(path):
2808 def islocal(path):
2807 return True
2809 return True
2808
2810
2809 def newreporequirements(ui, createopts=None):
2811 def newreporequirements(ui, createopts=None):
2810 """Determine the set of requirements for a new local repository.
2812 """Determine the set of requirements for a new local repository.
2811
2813
2812 Extensions can wrap this function to specify custom requirements for
2814 Extensions can wrap this function to specify custom requirements for
2813 new repositories.
2815 new repositories.
2814 """
2816 """
2815 createopts = createopts or {}
2817 createopts = createopts or {}
2816
2818
2817 # If the repo is being created from a shared repository, we copy
2819 # If the repo is being created from a shared repository, we copy
2818 # its requirements.
2820 # its requirements.
2819 if 'sharedrepo' in createopts:
2821 if 'sharedrepo' in createopts:
2820 requirements = set(createopts['sharedrepo'].requirements)
2822 requirements = set(createopts['sharedrepo'].requirements)
2821 if createopts.get('sharedrelative'):
2823 if createopts.get('sharedrelative'):
2822 requirements.add('relshared')
2824 requirements.add('relshared')
2823 else:
2825 else:
2824 requirements.add('shared')
2826 requirements.add('shared')
2825
2827
2826 return requirements
2828 return requirements
2827
2829
2828 requirements = {'revlogv1'}
2830 requirements = {'revlogv1'}
2829 if ui.configbool('format', 'usestore'):
2831 if ui.configbool('format', 'usestore'):
2830 requirements.add('store')
2832 requirements.add('store')
2831 if ui.configbool('format', 'usefncache'):
2833 if ui.configbool('format', 'usefncache'):
2832 requirements.add('fncache')
2834 requirements.add('fncache')
2833 if ui.configbool('format', 'dotencode'):
2835 if ui.configbool('format', 'dotencode'):
2834 requirements.add('dotencode')
2836 requirements.add('dotencode')
2835
2837
2836 compengine = ui.config('experimental', 'format.compression')
2838 compengine = ui.config('experimental', 'format.compression')
2837 if compengine not in util.compengines:
2839 if compengine not in util.compengines:
2838 raise error.Abort(_('compression engine %s defined by '
2840 raise error.Abort(_('compression engine %s defined by '
2839 'experimental.format.compression not available') %
2841 'experimental.format.compression not available') %
2840 compengine,
2842 compengine,
2841 hint=_('run "hg debuginstall" to list available '
2843 hint=_('run "hg debuginstall" to list available '
2842 'compression engines'))
2844 'compression engines'))
2843
2845
2844 # zlib is the historical default and doesn't need an explicit requirement.
2846 # zlib is the historical default and doesn't need an explicit requirement.
2845 if compengine != 'zlib':
2847 if compengine != 'zlib':
2846 requirements.add('exp-compression-%s' % compengine)
2848 requirements.add('exp-compression-%s' % compengine)
2847
2849
2848 if scmutil.gdinitconfig(ui):
2850 if scmutil.gdinitconfig(ui):
2849 requirements.add('generaldelta')
2851 requirements.add('generaldelta')
2850 if ui.configbool('experimental', 'treemanifest'):
2852 if ui.configbool('experimental', 'treemanifest'):
2851 requirements.add('treemanifest')
2853 requirements.add('treemanifest')
2852 # experimental config: format.sparse-revlog
2854 # experimental config: format.sparse-revlog
2853 if ui.configbool('format', 'sparse-revlog'):
2855 if ui.configbool('format', 'sparse-revlog'):
2854 requirements.add(SPARSEREVLOG_REQUIREMENT)
2856 requirements.add(SPARSEREVLOG_REQUIREMENT)
2855
2857
2856 revlogv2 = ui.config('experimental', 'revlogv2')
2858 revlogv2 = ui.config('experimental', 'revlogv2')
2857 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2859 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2858 requirements.remove('revlogv1')
2860 requirements.remove('revlogv1')
2859 # generaldelta is implied by revlogv2.
2861 # generaldelta is implied by revlogv2.
2860 requirements.discard('generaldelta')
2862 requirements.discard('generaldelta')
2861 requirements.add(REVLOGV2_REQUIREMENT)
2863 requirements.add(REVLOGV2_REQUIREMENT)
2862 # experimental config: format.internal-phase
2864 # experimental config: format.internal-phase
2863 if ui.configbool('format', 'internal-phase'):
2865 if ui.configbool('format', 'internal-phase'):
2864 requirements.add('internal-phase')
2866 requirements.add('internal-phase')
2865
2867
2866 if createopts.get('narrowfiles'):
2868 if createopts.get('narrowfiles'):
2867 requirements.add(repository.NARROW_REQUIREMENT)
2869 requirements.add(repository.NARROW_REQUIREMENT)
2868
2870
2869 return requirements
2871 return requirements
2870
2872
2871 def filterknowncreateopts(ui, createopts):
2873 def filterknowncreateopts(ui, createopts):
2872 """Filters a dict of repo creation options against options that are known.
2874 """Filters a dict of repo creation options against options that are known.
2873
2875
2874 Receives a dict of repo creation options and returns a dict of those
2876 Receives a dict of repo creation options and returns a dict of those
2875 options that we don't know how to handle.
2877 options that we don't know how to handle.
2876
2878
2877 This function is called as part of repository creation. If the
2879 This function is called as part of repository creation. If the
2878 returned dict contains any items, repository creation will not
2880 returned dict contains any items, repository creation will not
2879 be allowed, as it means there was a request to create a repository
2881 be allowed, as it means there was a request to create a repository
2880 with options not recognized by loaded code.
2882 with options not recognized by loaded code.
2881
2883
2882 Extensions can wrap this function to filter out creation options
2884 Extensions can wrap this function to filter out creation options
2883 they know how to handle.
2885 they know how to handle.
2884 """
2886 """
2885 known = {
2887 known = {
2886 'narrowfiles',
2888 'narrowfiles',
2887 'sharedrepo',
2889 'sharedrepo',
2888 'sharedrelative',
2890 'sharedrelative',
2889 'shareditems',
2891 'shareditems',
2890 }
2892 }
2891
2893
2892 return {k: v for k, v in createopts.items() if k not in known}
2894 return {k: v for k, v in createopts.items() if k not in known}
2893
2895
2894 def createrepository(ui, path, createopts=None):
2896 def createrepository(ui, path, createopts=None):
2895 """Create a new repository in a vfs.
2897 """Create a new repository in a vfs.
2896
2898
2897 ``path`` path to the new repo's working directory.
2899 ``path`` path to the new repo's working directory.
2898 ``createopts`` options for the new repository.
2900 ``createopts`` options for the new repository.
2899
2901
2900 The following keys for ``createopts`` are recognized:
2902 The following keys for ``createopts`` are recognized:
2901
2903
2902 narrowfiles
2904 narrowfiles
2903 Set up repository to support narrow file storage.
2905 Set up repository to support narrow file storage.
2904 sharedrepo
2906 sharedrepo
2905 Repository object from which storage should be shared.
2907 Repository object from which storage should be shared.
2906 sharedrelative
2908 sharedrelative
2907 Boolean indicating if the path to the shared repo should be
2909 Boolean indicating if the path to the shared repo should be
2908 stored as relative. By default, the pointer to the "parent" repo
2910 stored as relative. By default, the pointer to the "parent" repo
2909 is stored as an absolute path.
2911 is stored as an absolute path.
2910 shareditems
2912 shareditems
2911 Set of items to share to the new repository (in addition to storage).
2913 Set of items to share to the new repository (in addition to storage).
2912 """
2914 """
2913 createopts = createopts or {}
2915 createopts = createopts or {}
2914
2916
2915 unknownopts = filterknowncreateopts(ui, createopts)
2917 unknownopts = filterknowncreateopts(ui, createopts)
2916
2918
2917 if not isinstance(unknownopts, dict):
2919 if not isinstance(unknownopts, dict):
2918 raise error.ProgrammingError('filterknowncreateopts() did not return '
2920 raise error.ProgrammingError('filterknowncreateopts() did not return '
2919 'a dict')
2921 'a dict')
2920
2922
2921 if unknownopts:
2923 if unknownopts:
2922 raise error.Abort(_('unable to create repository because of unknown '
2924 raise error.Abort(_('unable to create repository because of unknown '
2923 'creation option: %s') %
2925 'creation option: %s') %
2924 ', '.join(sorted(unknownopts)),
2926 ', '.join(sorted(unknownopts)),
2925 hint=_('is a required extension not loaded?'))
2927 hint=_('is a required extension not loaded?'))
2926
2928
2927 requirements = newreporequirements(ui, createopts=createopts)
2929 requirements = newreporequirements(ui, createopts=createopts)
2928
2930
2929 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2931 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2930
2932
2931 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2933 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2932 if hgvfs.exists():
2934 if hgvfs.exists():
2933 raise error.RepoError(_('repository %s already exists') % path)
2935 raise error.RepoError(_('repository %s already exists') % path)
2934
2936
2935 if 'sharedrepo' in createopts:
2937 if 'sharedrepo' in createopts:
2936 sharedpath = createopts['sharedrepo'].sharedpath
2938 sharedpath = createopts['sharedrepo'].sharedpath
2937
2939
2938 if createopts.get('sharedrelative'):
2940 if createopts.get('sharedrelative'):
2939 try:
2941 try:
2940 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2942 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2941 except (IOError, ValueError) as e:
2943 except (IOError, ValueError) as e:
2942 # ValueError is raised on Windows if the drive letters differ
2944 # ValueError is raised on Windows if the drive letters differ
2943 # on each path.
2945 # on each path.
2944 raise error.Abort(_('cannot calculate relative path'),
2946 raise error.Abort(_('cannot calculate relative path'),
2945 hint=stringutil.forcebytestr(e))
2947 hint=stringutil.forcebytestr(e))
2946
2948
2947 if not wdirvfs.exists():
2949 if not wdirvfs.exists():
2948 wdirvfs.makedirs()
2950 wdirvfs.makedirs()
2949
2951
2950 hgvfs.makedir(notindexed=True)
2952 hgvfs.makedir(notindexed=True)
2951
2953
2952 if b'store' in requirements and 'sharedrepo' not in createopts:
2954 if b'store' in requirements and 'sharedrepo' not in createopts:
2953 hgvfs.mkdir(b'store')
2955 hgvfs.mkdir(b'store')
2954
2956
2955 # We create an invalid changelog outside the store so very old
2957 # We create an invalid changelog outside the store so very old
2956 # Mercurial versions (which didn't know about the requirements
2958 # Mercurial versions (which didn't know about the requirements
2957 # file) encounter an error on reading the changelog. This
2959 # file) encounter an error on reading the changelog. This
2958 # effectively locks out old clients and prevents them from
2960 # effectively locks out old clients and prevents them from
2959 # mucking with a repo in an unknown format.
2961 # mucking with a repo in an unknown format.
2960 #
2962 #
2961 # The revlog header has version 2, which won't be recognized by
2963 # The revlog header has version 2, which won't be recognized by
2962 # such old clients.
2964 # such old clients.
2963 hgvfs.append(b'00changelog.i',
2965 hgvfs.append(b'00changelog.i',
2964 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2966 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2965 b'layout')
2967 b'layout')
2966
2968
2967 scmutil.writerequires(hgvfs, requirements)
2969 scmutil.writerequires(hgvfs, requirements)
2968
2970
2969 # Write out file telling readers where to find the shared store.
2971 # Write out file telling readers where to find the shared store.
2970 if 'sharedrepo' in createopts:
2972 if 'sharedrepo' in createopts:
2971 hgvfs.write(b'sharedpath', sharedpath)
2973 hgvfs.write(b'sharedpath', sharedpath)
2972
2974
2973 if createopts.get('shareditems'):
2975 if createopts.get('shareditems'):
2974 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
2976 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
2975 hgvfs.write(b'shared', shared)
2977 hgvfs.write(b'shared', shared)
2976
2978
2977 def poisonrepository(repo):
2979 def poisonrepository(repo):
2978 """Poison a repository instance so it can no longer be used."""
2980 """Poison a repository instance so it can no longer be used."""
2979 # Perform any cleanup on the instance.
2981 # Perform any cleanup on the instance.
2980 repo.close()
2982 repo.close()
2981
2983
2982 # Our strategy is to replace the type of the object with one that
2984 # Our strategy is to replace the type of the object with one that
2983 # has all attribute lookups result in error.
2985 # has all attribute lookups result in error.
2984 #
2986 #
2985 # But we have to allow the close() method because some constructors
2987 # But we have to allow the close() method because some constructors
2986 # of repos call close() on repo references.
2988 # of repos call close() on repo references.
2987 class poisonedrepository(object):
2989 class poisonedrepository(object):
2988 def __getattribute__(self, item):
2990 def __getattribute__(self, item):
2989 if item == r'close':
2991 if item == r'close':
2990 return object.__getattribute__(self, item)
2992 return object.__getattribute__(self, item)
2991
2993
2992 raise error.ProgrammingError('repo instances should not be used '
2994 raise error.ProgrammingError('repo instances should not be used '
2993 'after unshare')
2995 'after unshare')
2994
2996
2995 def close(self):
2997 def close(self):
2996 pass
2998 pass
2997
2999
2998 # We may have a repoview, which intercepts __setattr__. So be sure
3000 # We may have a repoview, which intercepts __setattr__. So be sure
2999 # we operate at the lowest level possible.
3001 # we operate at the lowest level possible.
3000 object.__setattr__(repo, r'__class__', poisonedrepository)
3002 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now