##// END OF EJS Templates
localrepo: add missing join()...
Gregory Szorc -
r39882:d3e761f9 default
parent child Browse files
Show More
@@ -1,2850 +1,2850 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store as storemod,
59 store as storemod,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 def makelocalrepository(baseui, path, intents=None):
379 def makelocalrepository(baseui, path, intents=None):
380 """Create a local repository object.
380 """Create a local repository object.
381
381
382 Given arguments needed to construct a local repository, this function
382 Given arguments needed to construct a local repository, this function
383 performs various early repository loading functionality (such as
383 performs various early repository loading functionality (such as
384 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
384 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
385 the repository can be opened, derives a type suitable for representing
385 the repository can be opened, derives a type suitable for representing
386 that repository, and returns an instance of it.
386 that repository, and returns an instance of it.
387
387
388 The returned object conforms to the ``repository.completelocalrepository``
388 The returned object conforms to the ``repository.completelocalrepository``
389 interface.
389 interface.
390
390
391 The repository type is derived by calling a series of factory functions
391 The repository type is derived by calling a series of factory functions
392 for each aspect/interface of the final repository. These are defined by
392 for each aspect/interface of the final repository. These are defined by
393 ``REPO_INTERFACES``.
393 ``REPO_INTERFACES``.
394
394
395 Each factory function is called to produce a type implementing a specific
395 Each factory function is called to produce a type implementing a specific
396 interface. The cumulative list of returned types will be combined into a
396 interface. The cumulative list of returned types will be combined into a
397 new type and that type will be instantiated to represent the local
397 new type and that type will be instantiated to represent the local
398 repository.
398 repository.
399
399
400 The factory functions each receive various state that may be consulted
400 The factory functions each receive various state that may be consulted
401 as part of deriving a type.
401 as part of deriving a type.
402
402
403 Extensions should wrap these factory functions to customize repository type
403 Extensions should wrap these factory functions to customize repository type
404 creation. Note that an extension's wrapped function may be called even if
404 creation. Note that an extension's wrapped function may be called even if
405 that extension is not loaded for the repo being constructed. Extensions
405 that extension is not loaded for the repo being constructed. Extensions
406 should check if their ``__name__`` appears in the
406 should check if their ``__name__`` appears in the
407 ``extensionmodulenames`` set passed to the factory function and no-op if
407 ``extensionmodulenames`` set passed to the factory function and no-op if
408 not.
408 not.
409 """
409 """
410 ui = baseui.copy()
410 ui = baseui.copy()
411 # Prevent copying repo configuration.
411 # Prevent copying repo configuration.
412 ui.copy = baseui.copy
412 ui.copy = baseui.copy
413
413
414 # Working directory VFS rooted at repository root.
414 # Working directory VFS rooted at repository root.
415 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
415 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
416
416
417 # Main VFS for .hg/ directory.
417 # Main VFS for .hg/ directory.
418 hgpath = wdirvfs.join(b'.hg')
418 hgpath = wdirvfs.join(b'.hg')
419 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
419 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
420
420
421 # The .hg/ path should exist and should be a directory. All other
421 # The .hg/ path should exist and should be a directory. All other
422 # cases are errors.
422 # cases are errors.
423 if not hgvfs.isdir():
423 if not hgvfs.isdir():
424 try:
424 try:
425 hgvfs.stat()
425 hgvfs.stat()
426 except OSError as e:
426 except OSError as e:
427 if e.errno != errno.ENOENT:
427 if e.errno != errno.ENOENT:
428 raise
428 raise
429
429
430 raise error.RepoError(_(b'repository %s not found') % path)
430 raise error.RepoError(_(b'repository %s not found') % path)
431
431
432 # .hg/requires file contains a newline-delimited list of
432 # .hg/requires file contains a newline-delimited list of
433 # features/capabilities the opener (us) must have in order to use
433 # features/capabilities the opener (us) must have in order to use
434 # the repository. This file was introduced in Mercurial 0.9.2,
434 # the repository. This file was introduced in Mercurial 0.9.2,
435 # which means very old repositories may not have one. We assume
435 # which means very old repositories may not have one. We assume
436 # a missing file translates to no requirements.
436 # a missing file translates to no requirements.
437 try:
437 try:
438 requirements = set(hgvfs.read(b'requires').splitlines())
438 requirements = set(hgvfs.read(b'requires').splitlines())
439 except IOError as e:
439 except IOError as e:
440 if e.errno != errno.ENOENT:
440 if e.errno != errno.ENOENT:
441 raise
441 raise
442 requirements = set()
442 requirements = set()
443
443
444 # The .hg/hgrc file may load extensions or contain config options
444 # The .hg/hgrc file may load extensions or contain config options
445 # that influence repository construction. Attempt to load it and
445 # that influence repository construction. Attempt to load it and
446 # process any new extensions that it may have pulled in.
446 # process any new extensions that it may have pulled in.
447 try:
447 try:
448 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
448 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
449 except IOError:
449 except IOError:
450 pass
450 pass
451 else:
451 else:
452 extensions.loadall(ui)
452 extensions.loadall(ui)
453
453
454 # Set of module names of extensions loaded for this repository.
454 # Set of module names of extensions loaded for this repository.
455 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
455 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
456
456
457 supportedrequirements = gathersupportedrequirements(ui)
457 supportedrequirements = gathersupportedrequirements(ui)
458
458
459 # We first validate the requirements are known.
459 # We first validate the requirements are known.
460 ensurerequirementsrecognized(requirements, supportedrequirements)
460 ensurerequirementsrecognized(requirements, supportedrequirements)
461
461
462 # Then we validate that the known set is reasonable to use together.
462 # Then we validate that the known set is reasonable to use together.
463 ensurerequirementscompatible(ui, requirements)
463 ensurerequirementscompatible(ui, requirements)
464
464
465 # TODO there are unhandled edge cases related to opening repositories with
465 # TODO there are unhandled edge cases related to opening repositories with
466 # shared storage. If storage is shared, we should also test for requirements
466 # shared storage. If storage is shared, we should also test for requirements
467 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
467 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
468 # that repo, as that repo may load extensions needed to open it. This is a
468 # that repo, as that repo may load extensions needed to open it. This is a
469 # bit complicated because we don't want the other hgrc to overwrite settings
469 # bit complicated because we don't want the other hgrc to overwrite settings
470 # in this hgrc.
470 # in this hgrc.
471 #
471 #
472 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
472 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
473 # file when sharing repos. But if a requirement is added after the share is
473 # file when sharing repos. But if a requirement is added after the share is
474 # performed, thereby introducing a new requirement for the opener, we may
474 # performed, thereby introducing a new requirement for the opener, we may
475 # will not see that and could encounter a run-time error interacting with
475 # will not see that and could encounter a run-time error interacting with
476 # that shared store since it has an unknown-to-us requirement.
476 # that shared store since it has an unknown-to-us requirement.
477
477
478 # At this point, we know we should be capable of opening the repository.
478 # At this point, we know we should be capable of opening the repository.
479 # Now get on with doing that.
479 # Now get on with doing that.
480
480
481 # The "store" part of the repository holds versioned data. How it is
481 # The "store" part of the repository holds versioned data. How it is
482 # accessed is determined by various requirements. The ``shared`` or
482 # accessed is determined by various requirements. The ``shared`` or
483 # ``relshared`` requirements indicate the store lives in the path contained
483 # ``relshared`` requirements indicate the store lives in the path contained
484 # in the ``.hg/sharedpath`` file. This is an absolute path for
484 # in the ``.hg/sharedpath`` file. This is an absolute path for
485 # ``shared`` and relative to ``.hg/`` for ``relshared``.
485 # ``shared`` and relative to ``.hg/`` for ``relshared``.
486 if b'shared' in requirements or b'relshared' in requirements:
486 if b'shared' in requirements or b'relshared' in requirements:
487 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
487 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
488 if b'relshared' in requirements:
488 if b'relshared' in requirements:
489 sharedpath = hgvfs.join(sharedpath)
489 sharedpath = hgvfs.join(sharedpath)
490
490
491 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
491 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
492
492
493 if not sharedvfs.exists():
493 if not sharedvfs.exists():
494 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
494 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
495 b'directory %s') % sharedvfs.base)
495 b'directory %s') % sharedvfs.base)
496
496
497 storebasepath = sharedvfs.base
497 storebasepath = sharedvfs.base
498 cachepath = sharedvfs.join(b'cache')
498 cachepath = sharedvfs.join(b'cache')
499 else:
499 else:
500 storebasepath = hgvfs.base
500 storebasepath = hgvfs.base
501 cachepath = hgvfs.join(b'cache')
501 cachepath = hgvfs.join(b'cache')
502
502
503 # The store has changed over time and the exact layout is dictated by
503 # The store has changed over time and the exact layout is dictated by
504 # requirements. The store interface abstracts differences across all
504 # requirements. The store interface abstracts differences across all
505 # of them.
505 # of them.
506 store = makestore(requirements, storebasepath,
506 store = makestore(requirements, storebasepath,
507 lambda base: vfsmod.vfs(base, cacheaudited=True))
507 lambda base: vfsmod.vfs(base, cacheaudited=True))
508 hgvfs.createmode = store.createmode
508 hgvfs.createmode = store.createmode
509
509
510 storevfs = store.vfs
510 storevfs = store.vfs
511 storevfs.options = resolvestorevfsoptions(ui, requirements)
511 storevfs.options = resolvestorevfsoptions(ui, requirements)
512
512
513 # The cache vfs is used to manage cache files.
513 # The cache vfs is used to manage cache files.
514 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
514 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
515 cachevfs.createmode = store.createmode
515 cachevfs.createmode = store.createmode
516
516
517 # Now resolve the type for the repository object. We do this by repeatedly
517 # Now resolve the type for the repository object. We do this by repeatedly
518 # calling a factory function to produces types for specific aspects of the
518 # calling a factory function to produces types for specific aspects of the
519 # repo's operation. The aggregate returned types are used as base classes
519 # repo's operation. The aggregate returned types are used as base classes
520 # for a dynamically-derived type, which will represent our new repository.
520 # for a dynamically-derived type, which will represent our new repository.
521
521
522 bases = []
522 bases = []
523 extrastate = {}
523 extrastate = {}
524
524
525 for iface, fn in REPO_INTERFACES:
525 for iface, fn in REPO_INTERFACES:
526 # We pass all potentially useful state to give extensions tons of
526 # We pass all potentially useful state to give extensions tons of
527 # flexibility.
527 # flexibility.
528 typ = fn(ui=ui,
528 typ = fn(ui=ui,
529 intents=intents,
529 intents=intents,
530 requirements=requirements,
530 requirements=requirements,
531 wdirvfs=wdirvfs,
531 wdirvfs=wdirvfs,
532 hgvfs=hgvfs,
532 hgvfs=hgvfs,
533 store=store,
533 store=store,
534 storevfs=storevfs,
534 storevfs=storevfs,
535 storeoptions=storevfs.options,
535 storeoptions=storevfs.options,
536 cachevfs=cachevfs,
536 cachevfs=cachevfs,
537 extensionmodulenames=extensionmodulenames,
537 extensionmodulenames=extensionmodulenames,
538 extrastate=extrastate,
538 extrastate=extrastate,
539 baseclasses=bases)
539 baseclasses=bases)
540
540
541 if not isinstance(typ, type):
541 if not isinstance(typ, type):
542 raise error.ProgrammingError('unable to construct type for %s' %
542 raise error.ProgrammingError('unable to construct type for %s' %
543 iface)
543 iface)
544
544
545 bases.append(typ)
545 bases.append(typ)
546
546
547 # type() allows you to use characters in type names that wouldn't be
547 # type() allows you to use characters in type names that wouldn't be
548 # recognized as Python symbols in source code. We abuse that to add
548 # recognized as Python symbols in source code. We abuse that to add
549 # rich information about our constructed repo.
549 # rich information about our constructed repo.
550 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
550 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
551 wdirvfs.base,
551 wdirvfs.base,
552 b','.join(sorted(requirements))))
552 b','.join(sorted(requirements))))
553
553
554 cls = type(name, tuple(bases), {})
554 cls = type(name, tuple(bases), {})
555
555
556 return cls(
556 return cls(
557 baseui=baseui,
557 baseui=baseui,
558 ui=ui,
558 ui=ui,
559 origroot=path,
559 origroot=path,
560 wdirvfs=wdirvfs,
560 wdirvfs=wdirvfs,
561 hgvfs=hgvfs,
561 hgvfs=hgvfs,
562 requirements=requirements,
562 requirements=requirements,
563 supportedrequirements=supportedrequirements,
563 supportedrequirements=supportedrequirements,
564 sharedpath=storebasepath,
564 sharedpath=storebasepath,
565 store=store,
565 store=store,
566 cachevfs=cachevfs,
566 cachevfs=cachevfs,
567 intents=intents)
567 intents=intents)
568
568
569 def gathersupportedrequirements(ui):
569 def gathersupportedrequirements(ui):
570 """Determine the complete set of recognized requirements."""
570 """Determine the complete set of recognized requirements."""
571 # Start with all requirements supported by this file.
571 # Start with all requirements supported by this file.
572 supported = set(localrepository._basesupported)
572 supported = set(localrepository._basesupported)
573
573
574 # Execute ``featuresetupfuncs`` entries if they belong to an extension
574 # Execute ``featuresetupfuncs`` entries if they belong to an extension
575 # relevant to this ui instance.
575 # relevant to this ui instance.
576 modules = {m.__name__ for n, m in extensions.extensions(ui)}
576 modules = {m.__name__ for n, m in extensions.extensions(ui)}
577
577
578 for fn in featuresetupfuncs:
578 for fn in featuresetupfuncs:
579 if fn.__module__ in modules:
579 if fn.__module__ in modules:
580 fn(ui, supported)
580 fn(ui, supported)
581
581
582 # Add derived requirements from registered compression engines.
582 # Add derived requirements from registered compression engines.
583 for name in util.compengines:
583 for name in util.compengines:
584 engine = util.compengines[name]
584 engine = util.compengines[name]
585 if engine.revlogheader():
585 if engine.revlogheader():
586 supported.add(b'exp-compression-%s' % name)
586 supported.add(b'exp-compression-%s' % name)
587
587
588 return supported
588 return supported
589
589
590 def ensurerequirementsrecognized(requirements, supported):
590 def ensurerequirementsrecognized(requirements, supported):
591 """Validate that a set of local requirements is recognized.
591 """Validate that a set of local requirements is recognized.
592
592
593 Receives a set of requirements. Raises an ``error.RepoError`` if there
593 Receives a set of requirements. Raises an ``error.RepoError`` if there
594 exists any requirement in that set that currently loaded code doesn't
594 exists any requirement in that set that currently loaded code doesn't
595 recognize.
595 recognize.
596
596
597 Returns a set of supported requirements.
597 Returns a set of supported requirements.
598 """
598 """
599 missing = set()
599 missing = set()
600
600
601 for requirement in requirements:
601 for requirement in requirements:
602 if requirement in supported:
602 if requirement in supported:
603 continue
603 continue
604
604
605 if not requirement or not requirement[0:1].isalnum():
605 if not requirement or not requirement[0:1].isalnum():
606 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
606 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
607
607
608 missing.add(requirement)
608 missing.add(requirement)
609
609
610 if missing:
610 if missing:
611 raise error.RequirementError(
611 raise error.RequirementError(
612 _(b'repository requires features unknown to this Mercurial: %s') %
612 _(b'repository requires features unknown to this Mercurial: %s') %
613 b' '.join(sorted(missing)),
613 b' '.join(sorted(missing)),
614 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
614 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
615 b'for more information'))
615 b'for more information'))
616
616
617 def ensurerequirementscompatible(ui, requirements):
617 def ensurerequirementscompatible(ui, requirements):
618 """Validates that a set of recognized requirements is mutually compatible.
618 """Validates that a set of recognized requirements is mutually compatible.
619
619
620 Some requirements may not be compatible with others or require
620 Some requirements may not be compatible with others or require
621 config options that aren't enabled. This function is called during
621 config options that aren't enabled. This function is called during
622 repository opening to ensure that the set of requirements needed
622 repository opening to ensure that the set of requirements needed
623 to open a repository is sane and compatible with config options.
623 to open a repository is sane and compatible with config options.
624
624
625 Extensions can monkeypatch this function to perform additional
625 Extensions can monkeypatch this function to perform additional
626 checking.
626 checking.
627
627
628 ``error.RepoError`` should be raised on failure.
628 ``error.RepoError`` should be raised on failure.
629 """
629 """
630 if b'exp-sparse' in requirements and not sparse.enabled:
630 if b'exp-sparse' in requirements and not sparse.enabled:
631 raise error.RepoError(_(b'repository is using sparse feature but '
631 raise error.RepoError(_(b'repository is using sparse feature but '
632 b'sparse is not enabled; enable the '
632 b'sparse is not enabled; enable the '
633 b'"sparse" extensions to access'))
633 b'"sparse" extensions to access'))
634
634
635 def makestore(requirements, path, vfstype):
635 def makestore(requirements, path, vfstype):
636 """Construct a storage object for a repository."""
636 """Construct a storage object for a repository."""
637 if b'store' in requirements:
637 if b'store' in requirements:
638 if b'fncache' in requirements:
638 if b'fncache' in requirements:
639 return storemod.fncachestore(path, vfstype,
639 return storemod.fncachestore(path, vfstype,
640 b'dotencode' in requirements)
640 b'dotencode' in requirements)
641
641
642 return storemod.encodedstore(path, vfstype)
642 return storemod.encodedstore(path, vfstype)
643
643
644 return storemod.basicstore(path, vfstype)
644 return storemod.basicstore(path, vfstype)
645
645
646 def resolvestorevfsoptions(ui, requirements):
646 def resolvestorevfsoptions(ui, requirements):
647 """Resolve the options to pass to the store vfs opener.
647 """Resolve the options to pass to the store vfs opener.
648
648
649 The returned dict is used to influence behavior of the storage layer.
649 The returned dict is used to influence behavior of the storage layer.
650 """
650 """
651 options = {}
651 options = {}
652
652
653 if b'treemanifest' in requirements:
653 if b'treemanifest' in requirements:
654 options[b'treemanifest'] = True
654 options[b'treemanifest'] = True
655
655
656 # experimental config: format.manifestcachesize
656 # experimental config: format.manifestcachesize
657 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
657 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
658 if manifestcachesize is not None:
658 if manifestcachesize is not None:
659 options[b'manifestcachesize'] = manifestcachesize
659 options[b'manifestcachesize'] = manifestcachesize
660
660
661 # In the absence of another requirement superseding a revlog-related
661 # In the absence of another requirement superseding a revlog-related
662 # requirement, we have to assume the repo is using revlog version 0.
662 # requirement, we have to assume the repo is using revlog version 0.
663 # This revlog format is super old and we don't bother trying to parse
663 # This revlog format is super old and we don't bother trying to parse
664 # opener options for it because those options wouldn't do anything
664 # opener options for it because those options wouldn't do anything
665 # meaningful on such old repos.
665 # meaningful on such old repos.
666 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
666 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
667 options.update(resolverevlogstorevfsoptions(ui, requirements))
667 options.update(resolverevlogstorevfsoptions(ui, requirements))
668
668
669 return options
669 return options
670
670
671 def resolverevlogstorevfsoptions(ui, requirements):
671 def resolverevlogstorevfsoptions(ui, requirements):
672 """Resolve opener options specific to revlogs."""
672 """Resolve opener options specific to revlogs."""
673
673
674 options = {}
674 options = {}
675
675
676 if b'revlogv1' in requirements:
676 if b'revlogv1' in requirements:
677 options[b'revlogv1'] = True
677 options[b'revlogv1'] = True
678 if REVLOGV2_REQUIREMENT in requirements:
678 if REVLOGV2_REQUIREMENT in requirements:
679 options[b'revlogv2'] = True
679 options[b'revlogv2'] = True
680
680
681 if b'generaldelta' in requirements:
681 if b'generaldelta' in requirements:
682 options[b'generaldelta'] = True
682 options[b'generaldelta'] = True
683
683
684 # experimental config: format.chunkcachesize
684 # experimental config: format.chunkcachesize
685 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
685 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
686 if chunkcachesize is not None:
686 if chunkcachesize is not None:
687 options[b'chunkcachesize'] = chunkcachesize
687 options[b'chunkcachesize'] = chunkcachesize
688
688
689 deltabothparents = ui.configbool(b'storage',
689 deltabothparents = ui.configbool(b'storage',
690 b'revlog.optimize-delta-parent-choice')
690 b'revlog.optimize-delta-parent-choice')
691 options[b'deltabothparents'] = deltabothparents
691 options[b'deltabothparents'] = deltabothparents
692
692
693 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
693 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
694
694
695 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
695 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
696 if 0 <= chainspan:
696 if 0 <= chainspan:
697 options[b'maxdeltachainspan'] = chainspan
697 options[b'maxdeltachainspan'] = chainspan
698
698
699 mmapindexthreshold = ui.configbytes(b'experimental',
699 mmapindexthreshold = ui.configbytes(b'experimental',
700 b'mmapindexthreshold')
700 b'mmapindexthreshold')
701 if mmapindexthreshold is not None:
701 if mmapindexthreshold is not None:
702 options[b'mmapindexthreshold'] = mmapindexthreshold
702 options[b'mmapindexthreshold'] = mmapindexthreshold
703
703
704 withsparseread = ui.configbool(b'experimental', b'sparse-read')
704 withsparseread = ui.configbool(b'experimental', b'sparse-read')
705 srdensitythres = float(ui.config(b'experimental',
705 srdensitythres = float(ui.config(b'experimental',
706 b'sparse-read.density-threshold'))
706 b'sparse-read.density-threshold'))
707 srmingapsize = ui.configbytes(b'experimental',
707 srmingapsize = ui.configbytes(b'experimental',
708 b'sparse-read.min-gap-size')
708 b'sparse-read.min-gap-size')
709 options[b'with-sparse-read'] = withsparseread
709 options[b'with-sparse-read'] = withsparseread
710 options[b'sparse-read-density-threshold'] = srdensitythres
710 options[b'sparse-read-density-threshold'] = srdensitythres
711 options[b'sparse-read-min-gap-size'] = srmingapsize
711 options[b'sparse-read-min-gap-size'] = srmingapsize
712
712
713 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
713 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
714 options[b'sparse-revlog'] = sparserevlog
714 options[b'sparse-revlog'] = sparserevlog
715 if sparserevlog:
715 if sparserevlog:
716 options[b'generaldelta'] = True
716 options[b'generaldelta'] = True
717
717
718 maxchainlen = None
718 maxchainlen = None
719 if sparserevlog:
719 if sparserevlog:
720 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
720 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
721 # experimental config: format.maxchainlen
721 # experimental config: format.maxchainlen
722 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
722 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
723 if maxchainlen is not None:
723 if maxchainlen is not None:
724 options[b'maxchainlen'] = maxchainlen
724 options[b'maxchainlen'] = maxchainlen
725
725
726 for r in requirements:
726 for r in requirements:
727 if r.startswith(b'exp-compression-'):
727 if r.startswith(b'exp-compression-'):
728 options[b'compengine'] = r[len(b'exp-compression-'):]
728 options[b'compengine'] = r[len(b'exp-compression-'):]
729
729
730 if repository.NARROW_REQUIREMENT in requirements:
730 if repository.NARROW_REQUIREMENT in requirements:
731 options[b'enableellipsis'] = True
731 options[b'enableellipsis'] = True
732
732
733 return options
733 return options
734
734
735 def makemain(**kwargs):
735 def makemain(**kwargs):
736 """Produce a type conforming to ``ilocalrepositorymain``."""
736 """Produce a type conforming to ``ilocalrepositorymain``."""
737 return localrepository
737 return localrepository
738
738
739 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
739 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
740 class revlogfilestorage(object):
740 class revlogfilestorage(object):
741 """File storage when using revlogs."""
741 """File storage when using revlogs."""
742
742
743 def file(self, path):
743 def file(self, path):
744 if path[0] == b'/':
744 if path[0] == b'/':
745 path = path[1:]
745 path = path[1:]
746
746
747 return filelog.filelog(self.svfs, path)
747 return filelog.filelog(self.svfs, path)
748
748
749 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
749 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
750 class revlognarrowfilestorage(object):
750 class revlognarrowfilestorage(object):
751 """File storage when using revlogs and narrow files."""
751 """File storage when using revlogs and narrow files."""
752
752
753 def file(self, path):
753 def file(self, path):
754 if path[0] == b'/':
754 if path[0] == b'/':
755 path = path[1:]
755 path = path[1:]
756
756
757 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
757 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
758
758
759 def makefilestorage(requirements, **kwargs):
759 def makefilestorage(requirements, **kwargs):
760 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
760 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
761 if repository.NARROW_REQUIREMENT in requirements:
761 if repository.NARROW_REQUIREMENT in requirements:
762 return revlognarrowfilestorage
762 return revlognarrowfilestorage
763 else:
763 else:
764 return revlogfilestorage
764 return revlogfilestorage
765
765
766 # List of repository interfaces and factory functions for them. Each
766 # List of repository interfaces and factory functions for them. Each
767 # will be called in order during ``makelocalrepository()`` to iteratively
767 # will be called in order during ``makelocalrepository()`` to iteratively
768 # derive the final type for a local repository instance.
768 # derive the final type for a local repository instance.
769 REPO_INTERFACES = [
769 REPO_INTERFACES = [
770 (repository.ilocalrepositorymain, makemain),
770 (repository.ilocalrepositorymain, makemain),
771 (repository.ilocalrepositoryfilestorage, makefilestorage),
771 (repository.ilocalrepositoryfilestorage, makefilestorage),
772 ]
772 ]
773
773
774 @interfaceutil.implementer(repository.ilocalrepositorymain)
774 @interfaceutil.implementer(repository.ilocalrepositorymain)
775 class localrepository(object):
775 class localrepository(object):
776 """Main class for representing local repositories.
776 """Main class for representing local repositories.
777
777
778 All local repositories are instances of this class.
778 All local repositories are instances of this class.
779
779
780 Constructed on its own, instances of this class are not usable as
780 Constructed on its own, instances of this class are not usable as
781 repository objects. To obtain a usable repository object, call
781 repository objects. To obtain a usable repository object, call
782 ``hg.repository()``, ``localrepo.instance()``, or
782 ``hg.repository()``, ``localrepo.instance()``, or
783 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
783 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
784 ``instance()`` adds support for creating new repositories.
784 ``instance()`` adds support for creating new repositories.
785 ``hg.repository()`` adds more extension integration, including calling
785 ``hg.repository()`` adds more extension integration, including calling
786 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
786 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
787 used.
787 used.
788 """
788 """
789
789
790 # obsolete experimental requirements:
790 # obsolete experimental requirements:
791 # - manifestv2: An experimental new manifest format that allowed
791 # - manifestv2: An experimental new manifest format that allowed
792 # for stem compression of long paths. Experiment ended up not
792 # for stem compression of long paths. Experiment ended up not
793 # being successful (repository sizes went up due to worse delta
793 # being successful (repository sizes went up due to worse delta
794 # chains), and the code was deleted in 4.6.
794 # chains), and the code was deleted in 4.6.
795 supportedformats = {
795 supportedformats = {
796 'revlogv1',
796 'revlogv1',
797 'generaldelta',
797 'generaldelta',
798 'treemanifest',
798 'treemanifest',
799 REVLOGV2_REQUIREMENT,
799 REVLOGV2_REQUIREMENT,
800 SPARSEREVLOG_REQUIREMENT,
800 SPARSEREVLOG_REQUIREMENT,
801 }
801 }
802 _basesupported = supportedformats | {
802 _basesupported = supportedformats | {
803 'store',
803 'store',
804 'fncache',
804 'fncache',
805 'shared',
805 'shared',
806 'relshared',
806 'relshared',
807 'dotencode',
807 'dotencode',
808 'exp-sparse',
808 'exp-sparse',
809 'internal-phase'
809 'internal-phase'
810 }
810 }
811
811
812 # list of prefix for file which can be written without 'wlock'
812 # list of prefix for file which can be written without 'wlock'
813 # Extensions should extend this list when needed
813 # Extensions should extend this list when needed
814 _wlockfreeprefix = {
814 _wlockfreeprefix = {
815 # We migh consider requiring 'wlock' for the next
815 # We migh consider requiring 'wlock' for the next
816 # two, but pretty much all the existing code assume
816 # two, but pretty much all the existing code assume
817 # wlock is not needed so we keep them excluded for
817 # wlock is not needed so we keep them excluded for
818 # now.
818 # now.
819 'hgrc',
819 'hgrc',
820 'requires',
820 'requires',
821 # XXX cache is a complicatged business someone
821 # XXX cache is a complicatged business someone
822 # should investigate this in depth at some point
822 # should investigate this in depth at some point
823 'cache/',
823 'cache/',
824 # XXX shouldn't be dirstate covered by the wlock?
824 # XXX shouldn't be dirstate covered by the wlock?
825 'dirstate',
825 'dirstate',
826 # XXX bisect was still a bit too messy at the time
826 # XXX bisect was still a bit too messy at the time
827 # this changeset was introduced. Someone should fix
827 # this changeset was introduced. Someone should fix
828 # the remainig bit and drop this line
828 # the remainig bit and drop this line
829 'bisect.state',
829 'bisect.state',
830 }
830 }
831
831
832 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
832 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
833 supportedrequirements, sharedpath, store, cachevfs,
833 supportedrequirements, sharedpath, store, cachevfs,
834 intents=None):
834 intents=None):
835 """Create a new local repository instance.
835 """Create a new local repository instance.
836
836
837 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
837 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
838 or ``localrepo.makelocalrepository()`` for obtaining a new repository
838 or ``localrepo.makelocalrepository()`` for obtaining a new repository
839 object.
839 object.
840
840
841 Arguments:
841 Arguments:
842
842
843 baseui
843 baseui
844 ``ui.ui`` instance that ``ui`` argument was based off of.
844 ``ui.ui`` instance that ``ui`` argument was based off of.
845
845
846 ui
846 ui
847 ``ui.ui`` instance for use by the repository.
847 ``ui.ui`` instance for use by the repository.
848
848
849 origroot
849 origroot
850 ``bytes`` path to working directory root of this repository.
850 ``bytes`` path to working directory root of this repository.
851
851
852 wdirvfs
852 wdirvfs
853 ``vfs.vfs`` rooted at the working directory.
853 ``vfs.vfs`` rooted at the working directory.
854
854
855 hgvfs
855 hgvfs
856 ``vfs.vfs`` rooted at .hg/
856 ``vfs.vfs`` rooted at .hg/
857
857
858 requirements
858 requirements
859 ``set`` of bytestrings representing repository opening requirements.
859 ``set`` of bytestrings representing repository opening requirements.
860
860
861 supportedrequirements
861 supportedrequirements
862 ``set`` of bytestrings representing repository requirements that we
862 ``set`` of bytestrings representing repository requirements that we
863 know how to open. May be a supetset of ``requirements``.
863 know how to open. May be a supetset of ``requirements``.
864
864
865 sharedpath
865 sharedpath
866 ``bytes`` Defining path to storage base directory. Points to a
866 ``bytes`` Defining path to storage base directory. Points to a
867 ``.hg/`` directory somewhere.
867 ``.hg/`` directory somewhere.
868
868
869 store
869 store
870 ``store.basicstore`` (or derived) instance providing access to
870 ``store.basicstore`` (or derived) instance providing access to
871 versioned storage.
871 versioned storage.
872
872
873 cachevfs
873 cachevfs
874 ``vfs.vfs`` used for cache files.
874 ``vfs.vfs`` used for cache files.
875
875
876 intents
876 intents
877 ``set`` of system strings indicating what this repo will be used
877 ``set`` of system strings indicating what this repo will be used
878 for.
878 for.
879 """
879 """
880 self.baseui = baseui
880 self.baseui = baseui
881 self.ui = ui
881 self.ui = ui
882 self.origroot = origroot
882 self.origroot = origroot
883 # vfs rooted at working directory.
883 # vfs rooted at working directory.
884 self.wvfs = wdirvfs
884 self.wvfs = wdirvfs
885 self.root = wdirvfs.base
885 self.root = wdirvfs.base
886 # vfs rooted at .hg/. Used to access most non-store paths.
886 # vfs rooted at .hg/. Used to access most non-store paths.
887 self.vfs = hgvfs
887 self.vfs = hgvfs
888 self.path = hgvfs.base
888 self.path = hgvfs.base
889 self.requirements = requirements
889 self.requirements = requirements
890 self.supported = supportedrequirements
890 self.supported = supportedrequirements
891 self.sharedpath = sharedpath
891 self.sharedpath = sharedpath
892 self.store = store
892 self.store = store
893 self.cachevfs = cachevfs
893 self.cachevfs = cachevfs
894
894
895 self.filtername = None
895 self.filtername = None
896
896
897 if (self.ui.configbool('devel', 'all-warnings') or
897 if (self.ui.configbool('devel', 'all-warnings') or
898 self.ui.configbool('devel', 'check-locks')):
898 self.ui.configbool('devel', 'check-locks')):
899 self.vfs.audit = self._getvfsward(self.vfs.audit)
899 self.vfs.audit = self._getvfsward(self.vfs.audit)
900 # A list of callback to shape the phase if no data were found.
900 # A list of callback to shape the phase if no data were found.
901 # Callback are in the form: func(repo, roots) --> processed root.
901 # Callback are in the form: func(repo, roots) --> processed root.
902 # This list it to be filled by extension during repo setup
902 # This list it to be filled by extension during repo setup
903 self._phasedefaults = []
903 self._phasedefaults = []
904
904
905 color.setup(self.ui)
905 color.setup(self.ui)
906
906
907 self.spath = self.store.path
907 self.spath = self.store.path
908 self.svfs = self.store.vfs
908 self.svfs = self.store.vfs
909 self.sjoin = self.store.join
909 self.sjoin = self.store.join
910 if (self.ui.configbool('devel', 'all-warnings') or
910 if (self.ui.configbool('devel', 'all-warnings') or
911 self.ui.configbool('devel', 'check-locks')):
911 self.ui.configbool('devel', 'check-locks')):
912 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
912 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
913 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
913 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
914 else: # standard vfs
914 else: # standard vfs
915 self.svfs.audit = self._getsvfsward(self.svfs.audit)
915 self.svfs.audit = self._getsvfsward(self.svfs.audit)
916
916
917 self._dirstatevalidatewarned = False
917 self._dirstatevalidatewarned = False
918
918
919 self._branchcaches = {}
919 self._branchcaches = {}
920 self._revbranchcache = None
920 self._revbranchcache = None
921 self._filterpats = {}
921 self._filterpats = {}
922 self._datafilters = {}
922 self._datafilters = {}
923 self._transref = self._lockref = self._wlockref = None
923 self._transref = self._lockref = self._wlockref = None
924
924
925 # A cache for various files under .hg/ that tracks file changes,
925 # A cache for various files under .hg/ that tracks file changes,
926 # (used by the filecache decorator)
926 # (used by the filecache decorator)
927 #
927 #
928 # Maps a property name to its util.filecacheentry
928 # Maps a property name to its util.filecacheentry
929 self._filecache = {}
929 self._filecache = {}
930
930
931 # hold sets of revision to be filtered
931 # hold sets of revision to be filtered
932 # should be cleared when something might have changed the filter value:
932 # should be cleared when something might have changed the filter value:
933 # - new changesets,
933 # - new changesets,
934 # - phase change,
934 # - phase change,
935 # - new obsolescence marker,
935 # - new obsolescence marker,
936 # - working directory parent change,
936 # - working directory parent change,
937 # - bookmark changes
937 # - bookmark changes
938 self.filteredrevcache = {}
938 self.filteredrevcache = {}
939
939
940 # post-dirstate-status hooks
940 # post-dirstate-status hooks
941 self._postdsstatus = []
941 self._postdsstatus = []
942
942
943 # generic mapping between names and nodes
943 # generic mapping between names and nodes
944 self.names = namespaces.namespaces()
944 self.names = namespaces.namespaces()
945
945
946 # Key to signature value.
946 # Key to signature value.
947 self._sparsesignaturecache = {}
947 self._sparsesignaturecache = {}
948 # Signature to cached matcher instance.
948 # Signature to cached matcher instance.
949 self._sparsematchercache = {}
949 self._sparsematchercache = {}
950
950
951 def _getvfsward(self, origfunc):
951 def _getvfsward(self, origfunc):
952 """build a ward for self.vfs"""
952 """build a ward for self.vfs"""
953 rref = weakref.ref(self)
953 rref = weakref.ref(self)
954 def checkvfs(path, mode=None):
954 def checkvfs(path, mode=None):
955 ret = origfunc(path, mode=mode)
955 ret = origfunc(path, mode=mode)
956 repo = rref()
956 repo = rref()
957 if (repo is None
957 if (repo is None
958 or not util.safehasattr(repo, '_wlockref')
958 or not util.safehasattr(repo, '_wlockref')
959 or not util.safehasattr(repo, '_lockref')):
959 or not util.safehasattr(repo, '_lockref')):
960 return
960 return
961 if mode in (None, 'r', 'rb'):
961 if mode in (None, 'r', 'rb'):
962 return
962 return
963 if path.startswith(repo.path):
963 if path.startswith(repo.path):
964 # truncate name relative to the repository (.hg)
964 # truncate name relative to the repository (.hg)
965 path = path[len(repo.path) + 1:]
965 path = path[len(repo.path) + 1:]
966 if path.startswith('cache/'):
966 if path.startswith('cache/'):
967 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
967 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
968 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
968 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
969 if path.startswith('journal.'):
969 if path.startswith('journal.'):
970 # journal is covered by 'lock'
970 # journal is covered by 'lock'
971 if repo._currentlock(repo._lockref) is None:
971 if repo._currentlock(repo._lockref) is None:
972 repo.ui.develwarn('write with no lock: "%s"' % path,
972 repo.ui.develwarn('write with no lock: "%s"' % path,
973 stacklevel=2, config='check-locks')
973 stacklevel=2, config='check-locks')
974 elif repo._currentlock(repo._wlockref) is None:
974 elif repo._currentlock(repo._wlockref) is None:
975 # rest of vfs files are covered by 'wlock'
975 # rest of vfs files are covered by 'wlock'
976 #
976 #
977 # exclude special files
977 # exclude special files
978 for prefix in self._wlockfreeprefix:
978 for prefix in self._wlockfreeprefix:
979 if path.startswith(prefix):
979 if path.startswith(prefix):
980 return
980 return
981 repo.ui.develwarn('write with no wlock: "%s"' % path,
981 repo.ui.develwarn('write with no wlock: "%s"' % path,
982 stacklevel=2, config='check-locks')
982 stacklevel=2, config='check-locks')
983 return ret
983 return ret
984 return checkvfs
984 return checkvfs
985
985
986 def _getsvfsward(self, origfunc):
986 def _getsvfsward(self, origfunc):
987 """build a ward for self.svfs"""
987 """build a ward for self.svfs"""
988 rref = weakref.ref(self)
988 rref = weakref.ref(self)
989 def checksvfs(path, mode=None):
989 def checksvfs(path, mode=None):
990 ret = origfunc(path, mode=mode)
990 ret = origfunc(path, mode=mode)
991 repo = rref()
991 repo = rref()
992 if repo is None or not util.safehasattr(repo, '_lockref'):
992 if repo is None or not util.safehasattr(repo, '_lockref'):
993 return
993 return
994 if mode in (None, 'r', 'rb'):
994 if mode in (None, 'r', 'rb'):
995 return
995 return
996 if path.startswith(repo.sharedpath):
996 if path.startswith(repo.sharedpath):
997 # truncate name relative to the repository (.hg)
997 # truncate name relative to the repository (.hg)
998 path = path[len(repo.sharedpath) + 1:]
998 path = path[len(repo.sharedpath) + 1:]
999 if repo._currentlock(repo._lockref) is None:
999 if repo._currentlock(repo._lockref) is None:
1000 repo.ui.develwarn('write with no lock: "%s"' % path,
1000 repo.ui.develwarn('write with no lock: "%s"' % path,
1001 stacklevel=3)
1001 stacklevel=3)
1002 return ret
1002 return ret
1003 return checksvfs
1003 return checksvfs
1004
1004
1005 def close(self):
1005 def close(self):
1006 self._writecaches()
1006 self._writecaches()
1007
1007
1008 def _writecaches(self):
1008 def _writecaches(self):
1009 if self._revbranchcache:
1009 if self._revbranchcache:
1010 self._revbranchcache.write()
1010 self._revbranchcache.write()
1011
1011
1012 def _restrictcapabilities(self, caps):
1012 def _restrictcapabilities(self, caps):
1013 if self.ui.configbool('experimental', 'bundle2-advertise'):
1013 if self.ui.configbool('experimental', 'bundle2-advertise'):
1014 caps = set(caps)
1014 caps = set(caps)
1015 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1015 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1016 role='client'))
1016 role='client'))
1017 caps.add('bundle2=' + urlreq.quote(capsblob))
1017 caps.add('bundle2=' + urlreq.quote(capsblob))
1018 return caps
1018 return caps
1019
1019
1020 def _writerequirements(self):
1020 def _writerequirements(self):
1021 scmutil.writerequires(self.vfs, self.requirements)
1021 scmutil.writerequires(self.vfs, self.requirements)
1022
1022
1023 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1023 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1024 # self -> auditor -> self._checknested -> self
1024 # self -> auditor -> self._checknested -> self
1025
1025
1026 @property
1026 @property
1027 def auditor(self):
1027 def auditor(self):
1028 # This is only used by context.workingctx.match in order to
1028 # This is only used by context.workingctx.match in order to
1029 # detect files in subrepos.
1029 # detect files in subrepos.
1030 return pathutil.pathauditor(self.root, callback=self._checknested)
1030 return pathutil.pathauditor(self.root, callback=self._checknested)
1031
1031
1032 @property
1032 @property
1033 def nofsauditor(self):
1033 def nofsauditor(self):
1034 # This is only used by context.basectx.match in order to detect
1034 # This is only used by context.basectx.match in order to detect
1035 # files in subrepos.
1035 # files in subrepos.
1036 return pathutil.pathauditor(self.root, callback=self._checknested,
1036 return pathutil.pathauditor(self.root, callback=self._checknested,
1037 realfs=False, cached=True)
1037 realfs=False, cached=True)
1038
1038
1039 def _checknested(self, path):
1039 def _checknested(self, path):
1040 """Determine if path is a legal nested repository."""
1040 """Determine if path is a legal nested repository."""
1041 if not path.startswith(self.root):
1041 if not path.startswith(self.root):
1042 return False
1042 return False
1043 subpath = path[len(self.root) + 1:]
1043 subpath = path[len(self.root) + 1:]
1044 normsubpath = util.pconvert(subpath)
1044 normsubpath = util.pconvert(subpath)
1045
1045
1046 # XXX: Checking against the current working copy is wrong in
1046 # XXX: Checking against the current working copy is wrong in
1047 # the sense that it can reject things like
1047 # the sense that it can reject things like
1048 #
1048 #
1049 # $ hg cat -r 10 sub/x.txt
1049 # $ hg cat -r 10 sub/x.txt
1050 #
1050 #
1051 # if sub/ is no longer a subrepository in the working copy
1051 # if sub/ is no longer a subrepository in the working copy
1052 # parent revision.
1052 # parent revision.
1053 #
1053 #
1054 # However, it can of course also allow things that would have
1054 # However, it can of course also allow things that would have
1055 # been rejected before, such as the above cat command if sub/
1055 # been rejected before, such as the above cat command if sub/
1056 # is a subrepository now, but was a normal directory before.
1056 # is a subrepository now, but was a normal directory before.
1057 # The old path auditor would have rejected by mistake since it
1057 # The old path auditor would have rejected by mistake since it
1058 # panics when it sees sub/.hg/.
1058 # panics when it sees sub/.hg/.
1059 #
1059 #
1060 # All in all, checking against the working copy seems sensible
1060 # All in all, checking against the working copy seems sensible
1061 # since we want to prevent access to nested repositories on
1061 # since we want to prevent access to nested repositories on
1062 # the filesystem *now*.
1062 # the filesystem *now*.
1063 ctx = self[None]
1063 ctx = self[None]
1064 parts = util.splitpath(subpath)
1064 parts = util.splitpath(subpath)
1065 while parts:
1065 while parts:
1066 prefix = '/'.join(parts)
1066 prefix = '/'.join(parts)
1067 if prefix in ctx.substate:
1067 if prefix in ctx.substate:
1068 if prefix == normsubpath:
1068 if prefix == normsubpath:
1069 return True
1069 return True
1070 else:
1070 else:
1071 sub = ctx.sub(prefix)
1071 sub = ctx.sub(prefix)
1072 return sub.checknested(subpath[len(prefix) + 1:])
1072 return sub.checknested(subpath[len(prefix) + 1:])
1073 else:
1073 else:
1074 parts.pop()
1074 parts.pop()
1075 return False
1075 return False
1076
1076
1077 def peer(self):
1077 def peer(self):
1078 return localpeer(self) # not cached to avoid reference cycle
1078 return localpeer(self) # not cached to avoid reference cycle
1079
1079
1080 def unfiltered(self):
1080 def unfiltered(self):
1081 """Return unfiltered version of the repository
1081 """Return unfiltered version of the repository
1082
1082
1083 Intended to be overwritten by filtered repo."""
1083 Intended to be overwritten by filtered repo."""
1084 return self
1084 return self
1085
1085
1086 def filtered(self, name, visibilityexceptions=None):
1086 def filtered(self, name, visibilityexceptions=None):
1087 """Return a filtered version of a repository"""
1087 """Return a filtered version of a repository"""
1088 cls = repoview.newtype(self.unfiltered().__class__)
1088 cls = repoview.newtype(self.unfiltered().__class__)
1089 return cls(self, name, visibilityexceptions)
1089 return cls(self, name, visibilityexceptions)
1090
1090
1091 @repofilecache('bookmarks', 'bookmarks.current')
1091 @repofilecache('bookmarks', 'bookmarks.current')
1092 def _bookmarks(self):
1092 def _bookmarks(self):
1093 return bookmarks.bmstore(self)
1093 return bookmarks.bmstore(self)
1094
1094
1095 @property
1095 @property
1096 def _activebookmark(self):
1096 def _activebookmark(self):
1097 return self._bookmarks.active
1097 return self._bookmarks.active
1098
1098
1099 # _phasesets depend on changelog. what we need is to call
1099 # _phasesets depend on changelog. what we need is to call
1100 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1100 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1101 # can't be easily expressed in filecache mechanism.
1101 # can't be easily expressed in filecache mechanism.
1102 @storecache('phaseroots', '00changelog.i')
1102 @storecache('phaseroots', '00changelog.i')
1103 def _phasecache(self):
1103 def _phasecache(self):
1104 return phases.phasecache(self, self._phasedefaults)
1104 return phases.phasecache(self, self._phasedefaults)
1105
1105
1106 @storecache('obsstore')
1106 @storecache('obsstore')
1107 def obsstore(self):
1107 def obsstore(self):
1108 return obsolete.makestore(self.ui, self)
1108 return obsolete.makestore(self.ui, self)
1109
1109
1110 @storecache('00changelog.i')
1110 @storecache('00changelog.i')
1111 def changelog(self):
1111 def changelog(self):
1112 return changelog.changelog(self.svfs,
1112 return changelog.changelog(self.svfs,
1113 trypending=txnutil.mayhavepending(self.root))
1113 trypending=txnutil.mayhavepending(self.root))
1114
1114
1115 @storecache('00manifest.i')
1115 @storecache('00manifest.i')
1116 def manifestlog(self):
1116 def manifestlog(self):
1117 rootstore = manifest.manifestrevlog(self.svfs)
1117 rootstore = manifest.manifestrevlog(self.svfs)
1118 return manifest.manifestlog(self.svfs, self, rootstore)
1118 return manifest.manifestlog(self.svfs, self, rootstore)
1119
1119
1120 @repofilecache('dirstate')
1120 @repofilecache('dirstate')
1121 def dirstate(self):
1121 def dirstate(self):
1122 return self._makedirstate()
1122 return self._makedirstate()
1123
1123
1124 def _makedirstate(self):
1124 def _makedirstate(self):
1125 """Extension point for wrapping the dirstate per-repo."""
1125 """Extension point for wrapping the dirstate per-repo."""
1126 sparsematchfn = lambda: sparse.matcher(self)
1126 sparsematchfn = lambda: sparse.matcher(self)
1127
1127
1128 return dirstate.dirstate(self.vfs, self.ui, self.root,
1128 return dirstate.dirstate(self.vfs, self.ui, self.root,
1129 self._dirstatevalidate, sparsematchfn)
1129 self._dirstatevalidate, sparsematchfn)
1130
1130
1131 def _dirstatevalidate(self, node):
1131 def _dirstatevalidate(self, node):
1132 try:
1132 try:
1133 self.changelog.rev(node)
1133 self.changelog.rev(node)
1134 return node
1134 return node
1135 except error.LookupError:
1135 except error.LookupError:
1136 if not self._dirstatevalidatewarned:
1136 if not self._dirstatevalidatewarned:
1137 self._dirstatevalidatewarned = True
1137 self._dirstatevalidatewarned = True
1138 self.ui.warn(_("warning: ignoring unknown"
1138 self.ui.warn(_("warning: ignoring unknown"
1139 " working parent %s!\n") % short(node))
1139 " working parent %s!\n") % short(node))
1140 return nullid
1140 return nullid
1141
1141
1142 @storecache(narrowspec.FILENAME)
1142 @storecache(narrowspec.FILENAME)
1143 def narrowpats(self):
1143 def narrowpats(self):
1144 """matcher patterns for this repository's narrowspec
1144 """matcher patterns for this repository's narrowspec
1145
1145
1146 A tuple of (includes, excludes).
1146 A tuple of (includes, excludes).
1147 """
1147 """
1148 return narrowspec.load(self)
1148 return narrowspec.load(self)
1149
1149
1150 @storecache(narrowspec.FILENAME)
1150 @storecache(narrowspec.FILENAME)
1151 def _narrowmatch(self):
1151 def _narrowmatch(self):
1152 if repository.NARROW_REQUIREMENT not in self.requirements:
1152 if repository.NARROW_REQUIREMENT not in self.requirements:
1153 return matchmod.always(self.root, '')
1153 return matchmod.always(self.root, '')
1154 include, exclude = self.narrowpats
1154 include, exclude = self.narrowpats
1155 return narrowspec.match(self.root, include=include, exclude=exclude)
1155 return narrowspec.match(self.root, include=include, exclude=exclude)
1156
1156
1157 # TODO(martinvonz): make this property-like instead?
1157 # TODO(martinvonz): make this property-like instead?
1158 def narrowmatch(self):
1158 def narrowmatch(self):
1159 return self._narrowmatch
1159 return self._narrowmatch
1160
1160
1161 def setnarrowpats(self, newincludes, newexcludes):
1161 def setnarrowpats(self, newincludes, newexcludes):
1162 narrowspec.save(self, newincludes, newexcludes)
1162 narrowspec.save(self, newincludes, newexcludes)
1163 self.invalidate(clearfilecache=True)
1163 self.invalidate(clearfilecache=True)
1164
1164
1165 def __getitem__(self, changeid):
1165 def __getitem__(self, changeid):
1166 if changeid is None:
1166 if changeid is None:
1167 return context.workingctx(self)
1167 return context.workingctx(self)
1168 if isinstance(changeid, context.basectx):
1168 if isinstance(changeid, context.basectx):
1169 return changeid
1169 return changeid
1170 if isinstance(changeid, slice):
1170 if isinstance(changeid, slice):
1171 # wdirrev isn't contiguous so the slice shouldn't include it
1171 # wdirrev isn't contiguous so the slice shouldn't include it
1172 return [context.changectx(self, i)
1172 return [context.changectx(self, i)
1173 for i in pycompat.xrange(*changeid.indices(len(self)))
1173 for i in pycompat.xrange(*changeid.indices(len(self)))
1174 if i not in self.changelog.filteredrevs]
1174 if i not in self.changelog.filteredrevs]
1175 try:
1175 try:
1176 return context.changectx(self, changeid)
1176 return context.changectx(self, changeid)
1177 except error.WdirUnsupported:
1177 except error.WdirUnsupported:
1178 return context.workingctx(self)
1178 return context.workingctx(self)
1179
1179
1180 def __contains__(self, changeid):
1180 def __contains__(self, changeid):
1181 """True if the given changeid exists
1181 """True if the given changeid exists
1182
1182
1183 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1183 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1184 specified.
1184 specified.
1185 """
1185 """
1186 try:
1186 try:
1187 self[changeid]
1187 self[changeid]
1188 return True
1188 return True
1189 except error.RepoLookupError:
1189 except error.RepoLookupError:
1190 return False
1190 return False
1191
1191
1192 def __nonzero__(self):
1192 def __nonzero__(self):
1193 return True
1193 return True
1194
1194
1195 __bool__ = __nonzero__
1195 __bool__ = __nonzero__
1196
1196
1197 def __len__(self):
1197 def __len__(self):
1198 # no need to pay the cost of repoview.changelog
1198 # no need to pay the cost of repoview.changelog
1199 unfi = self.unfiltered()
1199 unfi = self.unfiltered()
1200 return len(unfi.changelog)
1200 return len(unfi.changelog)
1201
1201
1202 def __iter__(self):
1202 def __iter__(self):
1203 return iter(self.changelog)
1203 return iter(self.changelog)
1204
1204
1205 def revs(self, expr, *args):
1205 def revs(self, expr, *args):
1206 '''Find revisions matching a revset.
1206 '''Find revisions matching a revset.
1207
1207
1208 The revset is specified as a string ``expr`` that may contain
1208 The revset is specified as a string ``expr`` that may contain
1209 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1209 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1210
1210
1211 Revset aliases from the configuration are not expanded. To expand
1211 Revset aliases from the configuration are not expanded. To expand
1212 user aliases, consider calling ``scmutil.revrange()`` or
1212 user aliases, consider calling ``scmutil.revrange()`` or
1213 ``repo.anyrevs([expr], user=True)``.
1213 ``repo.anyrevs([expr], user=True)``.
1214
1214
1215 Returns a revset.abstractsmartset, which is a list-like interface
1215 Returns a revset.abstractsmartset, which is a list-like interface
1216 that contains integer revisions.
1216 that contains integer revisions.
1217 '''
1217 '''
1218 expr = revsetlang.formatspec(expr, *args)
1218 expr = revsetlang.formatspec(expr, *args)
1219 m = revset.match(None, expr)
1219 m = revset.match(None, expr)
1220 return m(self)
1220 return m(self)
1221
1221
1222 def set(self, expr, *args):
1222 def set(self, expr, *args):
1223 '''Find revisions matching a revset and emit changectx instances.
1223 '''Find revisions matching a revset and emit changectx instances.
1224
1224
1225 This is a convenience wrapper around ``revs()`` that iterates the
1225 This is a convenience wrapper around ``revs()`` that iterates the
1226 result and is a generator of changectx instances.
1226 result and is a generator of changectx instances.
1227
1227
1228 Revset aliases from the configuration are not expanded. To expand
1228 Revset aliases from the configuration are not expanded. To expand
1229 user aliases, consider calling ``scmutil.revrange()``.
1229 user aliases, consider calling ``scmutil.revrange()``.
1230 '''
1230 '''
1231 for r in self.revs(expr, *args):
1231 for r in self.revs(expr, *args):
1232 yield self[r]
1232 yield self[r]
1233
1233
1234 def anyrevs(self, specs, user=False, localalias=None):
1234 def anyrevs(self, specs, user=False, localalias=None):
1235 '''Find revisions matching one of the given revsets.
1235 '''Find revisions matching one of the given revsets.
1236
1236
1237 Revset aliases from the configuration are not expanded by default. To
1237 Revset aliases from the configuration are not expanded by default. To
1238 expand user aliases, specify ``user=True``. To provide some local
1238 expand user aliases, specify ``user=True``. To provide some local
1239 definitions overriding user aliases, set ``localalias`` to
1239 definitions overriding user aliases, set ``localalias`` to
1240 ``{name: definitionstring}``.
1240 ``{name: definitionstring}``.
1241 '''
1241 '''
1242 if user:
1242 if user:
1243 m = revset.matchany(self.ui, specs,
1243 m = revset.matchany(self.ui, specs,
1244 lookup=revset.lookupfn(self),
1244 lookup=revset.lookupfn(self),
1245 localalias=localalias)
1245 localalias=localalias)
1246 else:
1246 else:
1247 m = revset.matchany(None, specs, localalias=localalias)
1247 m = revset.matchany(None, specs, localalias=localalias)
1248 return m(self)
1248 return m(self)
1249
1249
1250 def url(self):
1250 def url(self):
1251 return 'file:' + self.root
1251 return 'file:' + self.root
1252
1252
1253 def hook(self, name, throw=False, **args):
1253 def hook(self, name, throw=False, **args):
1254 """Call a hook, passing this repo instance.
1254 """Call a hook, passing this repo instance.
1255
1255
1256 This a convenience method to aid invoking hooks. Extensions likely
1256 This a convenience method to aid invoking hooks. Extensions likely
1257 won't call this unless they have registered a custom hook or are
1257 won't call this unless they have registered a custom hook or are
1258 replacing code that is expected to call a hook.
1258 replacing code that is expected to call a hook.
1259 """
1259 """
1260 return hook.hook(self.ui, self, name, throw, **args)
1260 return hook.hook(self.ui, self, name, throw, **args)
1261
1261
1262 @filteredpropertycache
1262 @filteredpropertycache
1263 def _tagscache(self):
1263 def _tagscache(self):
1264 '''Returns a tagscache object that contains various tags related
1264 '''Returns a tagscache object that contains various tags related
1265 caches.'''
1265 caches.'''
1266
1266
1267 # This simplifies its cache management by having one decorated
1267 # This simplifies its cache management by having one decorated
1268 # function (this one) and the rest simply fetch things from it.
1268 # function (this one) and the rest simply fetch things from it.
1269 class tagscache(object):
1269 class tagscache(object):
1270 def __init__(self):
1270 def __init__(self):
1271 # These two define the set of tags for this repository. tags
1271 # These two define the set of tags for this repository. tags
1272 # maps tag name to node; tagtypes maps tag name to 'global' or
1272 # maps tag name to node; tagtypes maps tag name to 'global' or
1273 # 'local'. (Global tags are defined by .hgtags across all
1273 # 'local'. (Global tags are defined by .hgtags across all
1274 # heads, and local tags are defined in .hg/localtags.)
1274 # heads, and local tags are defined in .hg/localtags.)
1275 # They constitute the in-memory cache of tags.
1275 # They constitute the in-memory cache of tags.
1276 self.tags = self.tagtypes = None
1276 self.tags = self.tagtypes = None
1277
1277
1278 self.nodetagscache = self.tagslist = None
1278 self.nodetagscache = self.tagslist = None
1279
1279
1280 cache = tagscache()
1280 cache = tagscache()
1281 cache.tags, cache.tagtypes = self._findtags()
1281 cache.tags, cache.tagtypes = self._findtags()
1282
1282
1283 return cache
1283 return cache
1284
1284
1285 def tags(self):
1285 def tags(self):
1286 '''return a mapping of tag to node'''
1286 '''return a mapping of tag to node'''
1287 t = {}
1287 t = {}
1288 if self.changelog.filteredrevs:
1288 if self.changelog.filteredrevs:
1289 tags, tt = self._findtags()
1289 tags, tt = self._findtags()
1290 else:
1290 else:
1291 tags = self._tagscache.tags
1291 tags = self._tagscache.tags
1292 for k, v in tags.iteritems():
1292 for k, v in tags.iteritems():
1293 try:
1293 try:
1294 # ignore tags to unknown nodes
1294 # ignore tags to unknown nodes
1295 self.changelog.rev(v)
1295 self.changelog.rev(v)
1296 t[k] = v
1296 t[k] = v
1297 except (error.LookupError, ValueError):
1297 except (error.LookupError, ValueError):
1298 pass
1298 pass
1299 return t
1299 return t
1300
1300
1301 def _findtags(self):
1301 def _findtags(self):
1302 '''Do the hard work of finding tags. Return a pair of dicts
1302 '''Do the hard work of finding tags. Return a pair of dicts
1303 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1303 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1304 maps tag name to a string like \'global\' or \'local\'.
1304 maps tag name to a string like \'global\' or \'local\'.
1305 Subclasses or extensions are free to add their own tags, but
1305 Subclasses or extensions are free to add their own tags, but
1306 should be aware that the returned dicts will be retained for the
1306 should be aware that the returned dicts will be retained for the
1307 duration of the localrepo object.'''
1307 duration of the localrepo object.'''
1308
1308
1309 # XXX what tagtype should subclasses/extensions use? Currently
1309 # XXX what tagtype should subclasses/extensions use? Currently
1310 # mq and bookmarks add tags, but do not set the tagtype at all.
1310 # mq and bookmarks add tags, but do not set the tagtype at all.
1311 # Should each extension invent its own tag type? Should there
1311 # Should each extension invent its own tag type? Should there
1312 # be one tagtype for all such "virtual" tags? Or is the status
1312 # be one tagtype for all such "virtual" tags? Or is the status
1313 # quo fine?
1313 # quo fine?
1314
1314
1315
1315
1316 # map tag name to (node, hist)
1316 # map tag name to (node, hist)
1317 alltags = tagsmod.findglobaltags(self.ui, self)
1317 alltags = tagsmod.findglobaltags(self.ui, self)
1318 # map tag name to tag type
1318 # map tag name to tag type
1319 tagtypes = dict((tag, 'global') for tag in alltags)
1319 tagtypes = dict((tag, 'global') for tag in alltags)
1320
1320
1321 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1321 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1322
1322
1323 # Build the return dicts. Have to re-encode tag names because
1323 # Build the return dicts. Have to re-encode tag names because
1324 # the tags module always uses UTF-8 (in order not to lose info
1324 # the tags module always uses UTF-8 (in order not to lose info
1325 # writing to the cache), but the rest of Mercurial wants them in
1325 # writing to the cache), but the rest of Mercurial wants them in
1326 # local encoding.
1326 # local encoding.
1327 tags = {}
1327 tags = {}
1328 for (name, (node, hist)) in alltags.iteritems():
1328 for (name, (node, hist)) in alltags.iteritems():
1329 if node != nullid:
1329 if node != nullid:
1330 tags[encoding.tolocal(name)] = node
1330 tags[encoding.tolocal(name)] = node
1331 tags['tip'] = self.changelog.tip()
1331 tags['tip'] = self.changelog.tip()
1332 tagtypes = dict([(encoding.tolocal(name), value)
1332 tagtypes = dict([(encoding.tolocal(name), value)
1333 for (name, value) in tagtypes.iteritems()])
1333 for (name, value) in tagtypes.iteritems()])
1334 return (tags, tagtypes)
1334 return (tags, tagtypes)
1335
1335
1336 def tagtype(self, tagname):
1336 def tagtype(self, tagname):
1337 '''
1337 '''
1338 return the type of the given tag. result can be:
1338 return the type of the given tag. result can be:
1339
1339
1340 'local' : a local tag
1340 'local' : a local tag
1341 'global' : a global tag
1341 'global' : a global tag
1342 None : tag does not exist
1342 None : tag does not exist
1343 '''
1343 '''
1344
1344
1345 return self._tagscache.tagtypes.get(tagname)
1345 return self._tagscache.tagtypes.get(tagname)
1346
1346
1347 def tagslist(self):
1347 def tagslist(self):
1348 '''return a list of tags ordered by revision'''
1348 '''return a list of tags ordered by revision'''
1349 if not self._tagscache.tagslist:
1349 if not self._tagscache.tagslist:
1350 l = []
1350 l = []
1351 for t, n in self.tags().iteritems():
1351 for t, n in self.tags().iteritems():
1352 l.append((self.changelog.rev(n), t, n))
1352 l.append((self.changelog.rev(n), t, n))
1353 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1353 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1354
1354
1355 return self._tagscache.tagslist
1355 return self._tagscache.tagslist
1356
1356
1357 def nodetags(self, node):
1357 def nodetags(self, node):
1358 '''return the tags associated with a node'''
1358 '''return the tags associated with a node'''
1359 if not self._tagscache.nodetagscache:
1359 if not self._tagscache.nodetagscache:
1360 nodetagscache = {}
1360 nodetagscache = {}
1361 for t, n in self._tagscache.tags.iteritems():
1361 for t, n in self._tagscache.tags.iteritems():
1362 nodetagscache.setdefault(n, []).append(t)
1362 nodetagscache.setdefault(n, []).append(t)
1363 for tags in nodetagscache.itervalues():
1363 for tags in nodetagscache.itervalues():
1364 tags.sort()
1364 tags.sort()
1365 self._tagscache.nodetagscache = nodetagscache
1365 self._tagscache.nodetagscache = nodetagscache
1366 return self._tagscache.nodetagscache.get(node, [])
1366 return self._tagscache.nodetagscache.get(node, [])
1367
1367
1368 def nodebookmarks(self, node):
1368 def nodebookmarks(self, node):
1369 """return the list of bookmarks pointing to the specified node"""
1369 """return the list of bookmarks pointing to the specified node"""
1370 return self._bookmarks.names(node)
1370 return self._bookmarks.names(node)
1371
1371
1372 def branchmap(self):
1372 def branchmap(self):
1373 '''returns a dictionary {branch: [branchheads]} with branchheads
1373 '''returns a dictionary {branch: [branchheads]} with branchheads
1374 ordered by increasing revision number'''
1374 ordered by increasing revision number'''
1375 branchmap.updatecache(self)
1375 branchmap.updatecache(self)
1376 return self._branchcaches[self.filtername]
1376 return self._branchcaches[self.filtername]
1377
1377
1378 @unfilteredmethod
1378 @unfilteredmethod
1379 def revbranchcache(self):
1379 def revbranchcache(self):
1380 if not self._revbranchcache:
1380 if not self._revbranchcache:
1381 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1381 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1382 return self._revbranchcache
1382 return self._revbranchcache
1383
1383
1384 def branchtip(self, branch, ignoremissing=False):
1384 def branchtip(self, branch, ignoremissing=False):
1385 '''return the tip node for a given branch
1385 '''return the tip node for a given branch
1386
1386
1387 If ignoremissing is True, then this method will not raise an error.
1387 If ignoremissing is True, then this method will not raise an error.
1388 This is helpful for callers that only expect None for a missing branch
1388 This is helpful for callers that only expect None for a missing branch
1389 (e.g. namespace).
1389 (e.g. namespace).
1390
1390
1391 '''
1391 '''
1392 try:
1392 try:
1393 return self.branchmap().branchtip(branch)
1393 return self.branchmap().branchtip(branch)
1394 except KeyError:
1394 except KeyError:
1395 if not ignoremissing:
1395 if not ignoremissing:
1396 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1396 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1397 else:
1397 else:
1398 pass
1398 pass
1399
1399
1400 def lookup(self, key):
1400 def lookup(self, key):
1401 return scmutil.revsymbol(self, key).node()
1401 return scmutil.revsymbol(self, key).node()
1402
1402
1403 def lookupbranch(self, key):
1403 def lookupbranch(self, key):
1404 if key in self.branchmap():
1404 if key in self.branchmap():
1405 return key
1405 return key
1406
1406
1407 return scmutil.revsymbol(self, key).branch()
1407 return scmutil.revsymbol(self, key).branch()
1408
1408
1409 def known(self, nodes):
1409 def known(self, nodes):
1410 cl = self.changelog
1410 cl = self.changelog
1411 nm = cl.nodemap
1411 nm = cl.nodemap
1412 filtered = cl.filteredrevs
1412 filtered = cl.filteredrevs
1413 result = []
1413 result = []
1414 for n in nodes:
1414 for n in nodes:
1415 r = nm.get(n)
1415 r = nm.get(n)
1416 resp = not (r is None or r in filtered)
1416 resp = not (r is None or r in filtered)
1417 result.append(resp)
1417 result.append(resp)
1418 return result
1418 return result
1419
1419
1420 def local(self):
1420 def local(self):
1421 return self
1421 return self
1422
1422
1423 def publishing(self):
1423 def publishing(self):
1424 # it's safe (and desirable) to trust the publish flag unconditionally
1424 # it's safe (and desirable) to trust the publish flag unconditionally
1425 # so that we don't finalize changes shared between users via ssh or nfs
1425 # so that we don't finalize changes shared between users via ssh or nfs
1426 return self.ui.configbool('phases', 'publish', untrusted=True)
1426 return self.ui.configbool('phases', 'publish', untrusted=True)
1427
1427
1428 def cancopy(self):
1428 def cancopy(self):
1429 # so statichttprepo's override of local() works
1429 # so statichttprepo's override of local() works
1430 if not self.local():
1430 if not self.local():
1431 return False
1431 return False
1432 if not self.publishing():
1432 if not self.publishing():
1433 return True
1433 return True
1434 # if publishing we can't copy if there is filtered content
1434 # if publishing we can't copy if there is filtered content
1435 return not self.filtered('visible').changelog.filteredrevs
1435 return not self.filtered('visible').changelog.filteredrevs
1436
1436
1437 def shared(self):
1437 def shared(self):
1438 '''the type of shared repository (None if not shared)'''
1438 '''the type of shared repository (None if not shared)'''
1439 if self.sharedpath != self.path:
1439 if self.sharedpath != self.path:
1440 return 'store'
1440 return 'store'
1441 return None
1441 return None
1442
1442
1443 def wjoin(self, f, *insidef):
1443 def wjoin(self, f, *insidef):
1444 return self.vfs.reljoin(self.root, f, *insidef)
1444 return self.vfs.reljoin(self.root, f, *insidef)
1445
1445
1446 def setparents(self, p1, p2=nullid):
1446 def setparents(self, p1, p2=nullid):
1447 with self.dirstate.parentchange():
1447 with self.dirstate.parentchange():
1448 copies = self.dirstate.setparents(p1, p2)
1448 copies = self.dirstate.setparents(p1, p2)
1449 pctx = self[p1]
1449 pctx = self[p1]
1450 if copies:
1450 if copies:
1451 # Adjust copy records, the dirstate cannot do it, it
1451 # Adjust copy records, the dirstate cannot do it, it
1452 # requires access to parents manifests. Preserve them
1452 # requires access to parents manifests. Preserve them
1453 # only for entries added to first parent.
1453 # only for entries added to first parent.
1454 for f in copies:
1454 for f in copies:
1455 if f not in pctx and copies[f] in pctx:
1455 if f not in pctx and copies[f] in pctx:
1456 self.dirstate.copy(copies[f], f)
1456 self.dirstate.copy(copies[f], f)
1457 if p2 == nullid:
1457 if p2 == nullid:
1458 for f, s in sorted(self.dirstate.copies().items()):
1458 for f, s in sorted(self.dirstate.copies().items()):
1459 if f not in pctx and s not in pctx:
1459 if f not in pctx and s not in pctx:
1460 self.dirstate.copy(None, f)
1460 self.dirstate.copy(None, f)
1461
1461
1462 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1462 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1463 """changeid can be a changeset revision, node, or tag.
1463 """changeid can be a changeset revision, node, or tag.
1464 fileid can be a file revision or node."""
1464 fileid can be a file revision or node."""
1465 return context.filectx(self, path, changeid, fileid,
1465 return context.filectx(self, path, changeid, fileid,
1466 changectx=changectx)
1466 changectx=changectx)
1467
1467
1468 def getcwd(self):
1468 def getcwd(self):
1469 return self.dirstate.getcwd()
1469 return self.dirstate.getcwd()
1470
1470
1471 def pathto(self, f, cwd=None):
1471 def pathto(self, f, cwd=None):
1472 return self.dirstate.pathto(f, cwd)
1472 return self.dirstate.pathto(f, cwd)
1473
1473
1474 def _loadfilter(self, filter):
1474 def _loadfilter(self, filter):
1475 if filter not in self._filterpats:
1475 if filter not in self._filterpats:
1476 l = []
1476 l = []
1477 for pat, cmd in self.ui.configitems(filter):
1477 for pat, cmd in self.ui.configitems(filter):
1478 if cmd == '!':
1478 if cmd == '!':
1479 continue
1479 continue
1480 mf = matchmod.match(self.root, '', [pat])
1480 mf = matchmod.match(self.root, '', [pat])
1481 fn = None
1481 fn = None
1482 params = cmd
1482 params = cmd
1483 for name, filterfn in self._datafilters.iteritems():
1483 for name, filterfn in self._datafilters.iteritems():
1484 if cmd.startswith(name):
1484 if cmd.startswith(name):
1485 fn = filterfn
1485 fn = filterfn
1486 params = cmd[len(name):].lstrip()
1486 params = cmd[len(name):].lstrip()
1487 break
1487 break
1488 if not fn:
1488 if not fn:
1489 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1489 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1490 # Wrap old filters not supporting keyword arguments
1490 # Wrap old filters not supporting keyword arguments
1491 if not pycompat.getargspec(fn)[2]:
1491 if not pycompat.getargspec(fn)[2]:
1492 oldfn = fn
1492 oldfn = fn
1493 fn = lambda s, c, **kwargs: oldfn(s, c)
1493 fn = lambda s, c, **kwargs: oldfn(s, c)
1494 l.append((mf, fn, params))
1494 l.append((mf, fn, params))
1495 self._filterpats[filter] = l
1495 self._filterpats[filter] = l
1496 return self._filterpats[filter]
1496 return self._filterpats[filter]
1497
1497
1498 def _filter(self, filterpats, filename, data):
1498 def _filter(self, filterpats, filename, data):
1499 for mf, fn, cmd in filterpats:
1499 for mf, fn, cmd in filterpats:
1500 if mf(filename):
1500 if mf(filename):
1501 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1501 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1502 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1502 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1503 break
1503 break
1504
1504
1505 return data
1505 return data
1506
1506
1507 @unfilteredpropertycache
1507 @unfilteredpropertycache
1508 def _encodefilterpats(self):
1508 def _encodefilterpats(self):
1509 return self._loadfilter('encode')
1509 return self._loadfilter('encode')
1510
1510
1511 @unfilteredpropertycache
1511 @unfilteredpropertycache
1512 def _decodefilterpats(self):
1512 def _decodefilterpats(self):
1513 return self._loadfilter('decode')
1513 return self._loadfilter('decode')
1514
1514
1515 def adddatafilter(self, name, filter):
1515 def adddatafilter(self, name, filter):
1516 self._datafilters[name] = filter
1516 self._datafilters[name] = filter
1517
1517
1518 def wread(self, filename):
1518 def wread(self, filename):
1519 if self.wvfs.islink(filename):
1519 if self.wvfs.islink(filename):
1520 data = self.wvfs.readlink(filename)
1520 data = self.wvfs.readlink(filename)
1521 else:
1521 else:
1522 data = self.wvfs.read(filename)
1522 data = self.wvfs.read(filename)
1523 return self._filter(self._encodefilterpats, filename, data)
1523 return self._filter(self._encodefilterpats, filename, data)
1524
1524
1525 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1525 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1526 """write ``data`` into ``filename`` in the working directory
1526 """write ``data`` into ``filename`` in the working directory
1527
1527
1528 This returns length of written (maybe decoded) data.
1528 This returns length of written (maybe decoded) data.
1529 """
1529 """
1530 data = self._filter(self._decodefilterpats, filename, data)
1530 data = self._filter(self._decodefilterpats, filename, data)
1531 if 'l' in flags:
1531 if 'l' in flags:
1532 self.wvfs.symlink(data, filename)
1532 self.wvfs.symlink(data, filename)
1533 else:
1533 else:
1534 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1534 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1535 **kwargs)
1535 **kwargs)
1536 if 'x' in flags:
1536 if 'x' in flags:
1537 self.wvfs.setflags(filename, False, True)
1537 self.wvfs.setflags(filename, False, True)
1538 else:
1538 else:
1539 self.wvfs.setflags(filename, False, False)
1539 self.wvfs.setflags(filename, False, False)
1540 return len(data)
1540 return len(data)
1541
1541
1542 def wwritedata(self, filename, data):
1542 def wwritedata(self, filename, data):
1543 return self._filter(self._decodefilterpats, filename, data)
1543 return self._filter(self._decodefilterpats, filename, data)
1544
1544
1545 def currenttransaction(self):
1545 def currenttransaction(self):
1546 """return the current transaction or None if non exists"""
1546 """return the current transaction or None if non exists"""
1547 if self._transref:
1547 if self._transref:
1548 tr = self._transref()
1548 tr = self._transref()
1549 else:
1549 else:
1550 tr = None
1550 tr = None
1551
1551
1552 if tr and tr.running():
1552 if tr and tr.running():
1553 return tr
1553 return tr
1554 return None
1554 return None
1555
1555
1556 def transaction(self, desc, report=None):
1556 def transaction(self, desc, report=None):
1557 if (self.ui.configbool('devel', 'all-warnings')
1557 if (self.ui.configbool('devel', 'all-warnings')
1558 or self.ui.configbool('devel', 'check-locks')):
1558 or self.ui.configbool('devel', 'check-locks')):
1559 if self._currentlock(self._lockref) is None:
1559 if self._currentlock(self._lockref) is None:
1560 raise error.ProgrammingError('transaction requires locking')
1560 raise error.ProgrammingError('transaction requires locking')
1561 tr = self.currenttransaction()
1561 tr = self.currenttransaction()
1562 if tr is not None:
1562 if tr is not None:
1563 return tr.nest(name=desc)
1563 return tr.nest(name=desc)
1564
1564
1565 # abort here if the journal already exists
1565 # abort here if the journal already exists
1566 if self.svfs.exists("journal"):
1566 if self.svfs.exists("journal"):
1567 raise error.RepoError(
1567 raise error.RepoError(
1568 _("abandoned transaction found"),
1568 _("abandoned transaction found"),
1569 hint=_("run 'hg recover' to clean up transaction"))
1569 hint=_("run 'hg recover' to clean up transaction"))
1570
1570
1571 idbase = "%.40f#%f" % (random.random(), time.time())
1571 idbase = "%.40f#%f" % (random.random(), time.time())
1572 ha = hex(hashlib.sha1(idbase).digest())
1572 ha = hex(hashlib.sha1(idbase).digest())
1573 txnid = 'TXN:' + ha
1573 txnid = 'TXN:' + ha
1574 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1574 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1575
1575
1576 self._writejournal(desc)
1576 self._writejournal(desc)
1577 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1577 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1578 if report:
1578 if report:
1579 rp = report
1579 rp = report
1580 else:
1580 else:
1581 rp = self.ui.warn
1581 rp = self.ui.warn
1582 vfsmap = {'plain': self.vfs} # root of .hg/
1582 vfsmap = {'plain': self.vfs} # root of .hg/
1583 # we must avoid cyclic reference between repo and transaction.
1583 # we must avoid cyclic reference between repo and transaction.
1584 reporef = weakref.ref(self)
1584 reporef = weakref.ref(self)
1585 # Code to track tag movement
1585 # Code to track tag movement
1586 #
1586 #
1587 # Since tags are all handled as file content, it is actually quite hard
1587 # Since tags are all handled as file content, it is actually quite hard
1588 # to track these movement from a code perspective. So we fallback to a
1588 # to track these movement from a code perspective. So we fallback to a
1589 # tracking at the repository level. One could envision to track changes
1589 # tracking at the repository level. One could envision to track changes
1590 # to the '.hgtags' file through changegroup apply but that fails to
1590 # to the '.hgtags' file through changegroup apply but that fails to
1591 # cope with case where transaction expose new heads without changegroup
1591 # cope with case where transaction expose new heads without changegroup
1592 # being involved (eg: phase movement).
1592 # being involved (eg: phase movement).
1593 #
1593 #
1594 # For now, We gate the feature behind a flag since this likely comes
1594 # For now, We gate the feature behind a flag since this likely comes
1595 # with performance impacts. The current code run more often than needed
1595 # with performance impacts. The current code run more often than needed
1596 # and do not use caches as much as it could. The current focus is on
1596 # and do not use caches as much as it could. The current focus is on
1597 # the behavior of the feature so we disable it by default. The flag
1597 # the behavior of the feature so we disable it by default. The flag
1598 # will be removed when we are happy with the performance impact.
1598 # will be removed when we are happy with the performance impact.
1599 #
1599 #
1600 # Once this feature is no longer experimental move the following
1600 # Once this feature is no longer experimental move the following
1601 # documentation to the appropriate help section:
1601 # documentation to the appropriate help section:
1602 #
1602 #
1603 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1603 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1604 # tags (new or changed or deleted tags). In addition the details of
1604 # tags (new or changed or deleted tags). In addition the details of
1605 # these changes are made available in a file at:
1605 # these changes are made available in a file at:
1606 # ``REPOROOT/.hg/changes/tags.changes``.
1606 # ``REPOROOT/.hg/changes/tags.changes``.
1607 # Make sure you check for HG_TAG_MOVED before reading that file as it
1607 # Make sure you check for HG_TAG_MOVED before reading that file as it
1608 # might exist from a previous transaction even if no tag were touched
1608 # might exist from a previous transaction even if no tag were touched
1609 # in this one. Changes are recorded in a line base format::
1609 # in this one. Changes are recorded in a line base format::
1610 #
1610 #
1611 # <action> <hex-node> <tag-name>\n
1611 # <action> <hex-node> <tag-name>\n
1612 #
1612 #
1613 # Actions are defined as follow:
1613 # Actions are defined as follow:
1614 # "-R": tag is removed,
1614 # "-R": tag is removed,
1615 # "+A": tag is added,
1615 # "+A": tag is added,
1616 # "-M": tag is moved (old value),
1616 # "-M": tag is moved (old value),
1617 # "+M": tag is moved (new value),
1617 # "+M": tag is moved (new value),
1618 tracktags = lambda x: None
1618 tracktags = lambda x: None
1619 # experimental config: experimental.hook-track-tags
1619 # experimental config: experimental.hook-track-tags
1620 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1620 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1621 if desc != 'strip' and shouldtracktags:
1621 if desc != 'strip' and shouldtracktags:
1622 oldheads = self.changelog.headrevs()
1622 oldheads = self.changelog.headrevs()
1623 def tracktags(tr2):
1623 def tracktags(tr2):
1624 repo = reporef()
1624 repo = reporef()
1625 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1625 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1626 newheads = repo.changelog.headrevs()
1626 newheads = repo.changelog.headrevs()
1627 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1627 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1628 # notes: we compare lists here.
1628 # notes: we compare lists here.
1629 # As we do it only once buiding set would not be cheaper
1629 # As we do it only once buiding set would not be cheaper
1630 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1630 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1631 if changes:
1631 if changes:
1632 tr2.hookargs['tag_moved'] = '1'
1632 tr2.hookargs['tag_moved'] = '1'
1633 with repo.vfs('changes/tags.changes', 'w',
1633 with repo.vfs('changes/tags.changes', 'w',
1634 atomictemp=True) as changesfile:
1634 atomictemp=True) as changesfile:
1635 # note: we do not register the file to the transaction
1635 # note: we do not register the file to the transaction
1636 # because we needs it to still exist on the transaction
1636 # because we needs it to still exist on the transaction
1637 # is close (for txnclose hooks)
1637 # is close (for txnclose hooks)
1638 tagsmod.writediff(changesfile, changes)
1638 tagsmod.writediff(changesfile, changes)
1639 def validate(tr2):
1639 def validate(tr2):
1640 """will run pre-closing hooks"""
1640 """will run pre-closing hooks"""
1641 # XXX the transaction API is a bit lacking here so we take a hacky
1641 # XXX the transaction API is a bit lacking here so we take a hacky
1642 # path for now
1642 # path for now
1643 #
1643 #
1644 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1644 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1645 # dict is copied before these run. In addition we needs the data
1645 # dict is copied before these run. In addition we needs the data
1646 # available to in memory hooks too.
1646 # available to in memory hooks too.
1647 #
1647 #
1648 # Moreover, we also need to make sure this runs before txnclose
1648 # Moreover, we also need to make sure this runs before txnclose
1649 # hooks and there is no "pending" mechanism that would execute
1649 # hooks and there is no "pending" mechanism that would execute
1650 # logic only if hooks are about to run.
1650 # logic only if hooks are about to run.
1651 #
1651 #
1652 # Fixing this limitation of the transaction is also needed to track
1652 # Fixing this limitation of the transaction is also needed to track
1653 # other families of changes (bookmarks, phases, obsolescence).
1653 # other families of changes (bookmarks, phases, obsolescence).
1654 #
1654 #
1655 # This will have to be fixed before we remove the experimental
1655 # This will have to be fixed before we remove the experimental
1656 # gating.
1656 # gating.
1657 tracktags(tr2)
1657 tracktags(tr2)
1658 repo = reporef()
1658 repo = reporef()
1659 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1659 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1660 scmutil.enforcesinglehead(repo, tr2, desc)
1660 scmutil.enforcesinglehead(repo, tr2, desc)
1661 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1661 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1662 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1662 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1663 args = tr.hookargs.copy()
1663 args = tr.hookargs.copy()
1664 args.update(bookmarks.preparehookargs(name, old, new))
1664 args.update(bookmarks.preparehookargs(name, old, new))
1665 repo.hook('pretxnclose-bookmark', throw=True,
1665 repo.hook('pretxnclose-bookmark', throw=True,
1666 txnname=desc,
1666 txnname=desc,
1667 **pycompat.strkwargs(args))
1667 **pycompat.strkwargs(args))
1668 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1668 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1669 cl = repo.unfiltered().changelog
1669 cl = repo.unfiltered().changelog
1670 for rev, (old, new) in tr.changes['phases'].items():
1670 for rev, (old, new) in tr.changes['phases'].items():
1671 args = tr.hookargs.copy()
1671 args = tr.hookargs.copy()
1672 node = hex(cl.node(rev))
1672 node = hex(cl.node(rev))
1673 args.update(phases.preparehookargs(node, old, new))
1673 args.update(phases.preparehookargs(node, old, new))
1674 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1674 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1675 **pycompat.strkwargs(args))
1675 **pycompat.strkwargs(args))
1676
1676
1677 repo.hook('pretxnclose', throw=True,
1677 repo.hook('pretxnclose', throw=True,
1678 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1678 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1679 def releasefn(tr, success):
1679 def releasefn(tr, success):
1680 repo = reporef()
1680 repo = reporef()
1681 if success:
1681 if success:
1682 # this should be explicitly invoked here, because
1682 # this should be explicitly invoked here, because
1683 # in-memory changes aren't written out at closing
1683 # in-memory changes aren't written out at closing
1684 # transaction, if tr.addfilegenerator (via
1684 # transaction, if tr.addfilegenerator (via
1685 # dirstate.write or so) isn't invoked while
1685 # dirstate.write or so) isn't invoked while
1686 # transaction running
1686 # transaction running
1687 repo.dirstate.write(None)
1687 repo.dirstate.write(None)
1688 else:
1688 else:
1689 # discard all changes (including ones already written
1689 # discard all changes (including ones already written
1690 # out) in this transaction
1690 # out) in this transaction
1691 narrowspec.restorebackup(self, 'journal.narrowspec')
1691 narrowspec.restorebackup(self, 'journal.narrowspec')
1692 repo.dirstate.restorebackup(None, 'journal.dirstate')
1692 repo.dirstate.restorebackup(None, 'journal.dirstate')
1693
1693
1694 repo.invalidate(clearfilecache=True)
1694 repo.invalidate(clearfilecache=True)
1695
1695
1696 tr = transaction.transaction(rp, self.svfs, vfsmap,
1696 tr = transaction.transaction(rp, self.svfs, vfsmap,
1697 "journal",
1697 "journal",
1698 "undo",
1698 "undo",
1699 aftertrans(renames),
1699 aftertrans(renames),
1700 self.store.createmode,
1700 self.store.createmode,
1701 validator=validate,
1701 validator=validate,
1702 releasefn=releasefn,
1702 releasefn=releasefn,
1703 checkambigfiles=_cachedfiles,
1703 checkambigfiles=_cachedfiles,
1704 name=desc)
1704 name=desc)
1705 tr.changes['origrepolen'] = len(self)
1705 tr.changes['origrepolen'] = len(self)
1706 tr.changes['obsmarkers'] = set()
1706 tr.changes['obsmarkers'] = set()
1707 tr.changes['phases'] = {}
1707 tr.changes['phases'] = {}
1708 tr.changes['bookmarks'] = {}
1708 tr.changes['bookmarks'] = {}
1709
1709
1710 tr.hookargs['txnid'] = txnid
1710 tr.hookargs['txnid'] = txnid
1711 # note: writing the fncache only during finalize mean that the file is
1711 # note: writing the fncache only during finalize mean that the file is
1712 # outdated when running hooks. As fncache is used for streaming clone,
1712 # outdated when running hooks. As fncache is used for streaming clone,
1713 # this is not expected to break anything that happen during the hooks.
1713 # this is not expected to break anything that happen during the hooks.
1714 tr.addfinalize('flush-fncache', self.store.write)
1714 tr.addfinalize('flush-fncache', self.store.write)
1715 def txnclosehook(tr2):
1715 def txnclosehook(tr2):
1716 """To be run if transaction is successful, will schedule a hook run
1716 """To be run if transaction is successful, will schedule a hook run
1717 """
1717 """
1718 # Don't reference tr2 in hook() so we don't hold a reference.
1718 # Don't reference tr2 in hook() so we don't hold a reference.
1719 # This reduces memory consumption when there are multiple
1719 # This reduces memory consumption when there are multiple
1720 # transactions per lock. This can likely go away if issue5045
1720 # transactions per lock. This can likely go away if issue5045
1721 # fixes the function accumulation.
1721 # fixes the function accumulation.
1722 hookargs = tr2.hookargs
1722 hookargs = tr2.hookargs
1723
1723
1724 def hookfunc():
1724 def hookfunc():
1725 repo = reporef()
1725 repo = reporef()
1726 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1726 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1727 bmchanges = sorted(tr.changes['bookmarks'].items())
1727 bmchanges = sorted(tr.changes['bookmarks'].items())
1728 for name, (old, new) in bmchanges:
1728 for name, (old, new) in bmchanges:
1729 args = tr.hookargs.copy()
1729 args = tr.hookargs.copy()
1730 args.update(bookmarks.preparehookargs(name, old, new))
1730 args.update(bookmarks.preparehookargs(name, old, new))
1731 repo.hook('txnclose-bookmark', throw=False,
1731 repo.hook('txnclose-bookmark', throw=False,
1732 txnname=desc, **pycompat.strkwargs(args))
1732 txnname=desc, **pycompat.strkwargs(args))
1733
1733
1734 if hook.hashook(repo.ui, 'txnclose-phase'):
1734 if hook.hashook(repo.ui, 'txnclose-phase'):
1735 cl = repo.unfiltered().changelog
1735 cl = repo.unfiltered().changelog
1736 phasemv = sorted(tr.changes['phases'].items())
1736 phasemv = sorted(tr.changes['phases'].items())
1737 for rev, (old, new) in phasemv:
1737 for rev, (old, new) in phasemv:
1738 args = tr.hookargs.copy()
1738 args = tr.hookargs.copy()
1739 node = hex(cl.node(rev))
1739 node = hex(cl.node(rev))
1740 args.update(phases.preparehookargs(node, old, new))
1740 args.update(phases.preparehookargs(node, old, new))
1741 repo.hook('txnclose-phase', throw=False, txnname=desc,
1741 repo.hook('txnclose-phase', throw=False, txnname=desc,
1742 **pycompat.strkwargs(args))
1742 **pycompat.strkwargs(args))
1743
1743
1744 repo.hook('txnclose', throw=False, txnname=desc,
1744 repo.hook('txnclose', throw=False, txnname=desc,
1745 **pycompat.strkwargs(hookargs))
1745 **pycompat.strkwargs(hookargs))
1746 reporef()._afterlock(hookfunc)
1746 reporef()._afterlock(hookfunc)
1747 tr.addfinalize('txnclose-hook', txnclosehook)
1747 tr.addfinalize('txnclose-hook', txnclosehook)
1748 # Include a leading "-" to make it happen before the transaction summary
1748 # Include a leading "-" to make it happen before the transaction summary
1749 # reports registered via scmutil.registersummarycallback() whose names
1749 # reports registered via scmutil.registersummarycallback() whose names
1750 # are 00-txnreport etc. That way, the caches will be warm when the
1750 # are 00-txnreport etc. That way, the caches will be warm when the
1751 # callbacks run.
1751 # callbacks run.
1752 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1752 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1753 def txnaborthook(tr2):
1753 def txnaborthook(tr2):
1754 """To be run if transaction is aborted
1754 """To be run if transaction is aborted
1755 """
1755 """
1756 reporef().hook('txnabort', throw=False, txnname=desc,
1756 reporef().hook('txnabort', throw=False, txnname=desc,
1757 **pycompat.strkwargs(tr2.hookargs))
1757 **pycompat.strkwargs(tr2.hookargs))
1758 tr.addabort('txnabort-hook', txnaborthook)
1758 tr.addabort('txnabort-hook', txnaborthook)
1759 # avoid eager cache invalidation. in-memory data should be identical
1759 # avoid eager cache invalidation. in-memory data should be identical
1760 # to stored data if transaction has no error.
1760 # to stored data if transaction has no error.
1761 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1761 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1762 self._transref = weakref.ref(tr)
1762 self._transref = weakref.ref(tr)
1763 scmutil.registersummarycallback(self, tr, desc)
1763 scmutil.registersummarycallback(self, tr, desc)
1764 return tr
1764 return tr
1765
1765
1766 def _journalfiles(self):
1766 def _journalfiles(self):
1767 return ((self.svfs, 'journal'),
1767 return ((self.svfs, 'journal'),
1768 (self.vfs, 'journal.dirstate'),
1768 (self.vfs, 'journal.dirstate'),
1769 (self.vfs, 'journal.branch'),
1769 (self.vfs, 'journal.branch'),
1770 (self.vfs, 'journal.desc'),
1770 (self.vfs, 'journal.desc'),
1771 (self.vfs, 'journal.bookmarks'),
1771 (self.vfs, 'journal.bookmarks'),
1772 (self.svfs, 'journal.phaseroots'))
1772 (self.svfs, 'journal.phaseroots'))
1773
1773
1774 def undofiles(self):
1774 def undofiles(self):
1775 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1775 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1776
1776
1777 @unfilteredmethod
1777 @unfilteredmethod
1778 def _writejournal(self, desc):
1778 def _writejournal(self, desc):
1779 self.dirstate.savebackup(None, 'journal.dirstate')
1779 self.dirstate.savebackup(None, 'journal.dirstate')
1780 narrowspec.savebackup(self, 'journal.narrowspec')
1780 narrowspec.savebackup(self, 'journal.narrowspec')
1781 self.vfs.write("journal.branch",
1781 self.vfs.write("journal.branch",
1782 encoding.fromlocal(self.dirstate.branch()))
1782 encoding.fromlocal(self.dirstate.branch()))
1783 self.vfs.write("journal.desc",
1783 self.vfs.write("journal.desc",
1784 "%d\n%s\n" % (len(self), desc))
1784 "%d\n%s\n" % (len(self), desc))
1785 self.vfs.write("journal.bookmarks",
1785 self.vfs.write("journal.bookmarks",
1786 self.vfs.tryread("bookmarks"))
1786 self.vfs.tryread("bookmarks"))
1787 self.svfs.write("journal.phaseroots",
1787 self.svfs.write("journal.phaseroots",
1788 self.svfs.tryread("phaseroots"))
1788 self.svfs.tryread("phaseroots"))
1789
1789
1790 def recover(self):
1790 def recover(self):
1791 with self.lock():
1791 with self.lock():
1792 if self.svfs.exists("journal"):
1792 if self.svfs.exists("journal"):
1793 self.ui.status(_("rolling back interrupted transaction\n"))
1793 self.ui.status(_("rolling back interrupted transaction\n"))
1794 vfsmap = {'': self.svfs,
1794 vfsmap = {'': self.svfs,
1795 'plain': self.vfs,}
1795 'plain': self.vfs,}
1796 transaction.rollback(self.svfs, vfsmap, "journal",
1796 transaction.rollback(self.svfs, vfsmap, "journal",
1797 self.ui.warn,
1797 self.ui.warn,
1798 checkambigfiles=_cachedfiles)
1798 checkambigfiles=_cachedfiles)
1799 self.invalidate()
1799 self.invalidate()
1800 return True
1800 return True
1801 else:
1801 else:
1802 self.ui.warn(_("no interrupted transaction available\n"))
1802 self.ui.warn(_("no interrupted transaction available\n"))
1803 return False
1803 return False
1804
1804
1805 def rollback(self, dryrun=False, force=False):
1805 def rollback(self, dryrun=False, force=False):
1806 wlock = lock = dsguard = None
1806 wlock = lock = dsguard = None
1807 try:
1807 try:
1808 wlock = self.wlock()
1808 wlock = self.wlock()
1809 lock = self.lock()
1809 lock = self.lock()
1810 if self.svfs.exists("undo"):
1810 if self.svfs.exists("undo"):
1811 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1811 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1812
1812
1813 return self._rollback(dryrun, force, dsguard)
1813 return self._rollback(dryrun, force, dsguard)
1814 else:
1814 else:
1815 self.ui.warn(_("no rollback information available\n"))
1815 self.ui.warn(_("no rollback information available\n"))
1816 return 1
1816 return 1
1817 finally:
1817 finally:
1818 release(dsguard, lock, wlock)
1818 release(dsguard, lock, wlock)
1819
1819
1820 @unfilteredmethod # Until we get smarter cache management
1820 @unfilteredmethod # Until we get smarter cache management
1821 def _rollback(self, dryrun, force, dsguard):
1821 def _rollback(self, dryrun, force, dsguard):
1822 ui = self.ui
1822 ui = self.ui
1823 try:
1823 try:
1824 args = self.vfs.read('undo.desc').splitlines()
1824 args = self.vfs.read('undo.desc').splitlines()
1825 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1825 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1826 if len(args) >= 3:
1826 if len(args) >= 3:
1827 detail = args[2]
1827 detail = args[2]
1828 oldtip = oldlen - 1
1828 oldtip = oldlen - 1
1829
1829
1830 if detail and ui.verbose:
1830 if detail and ui.verbose:
1831 msg = (_('repository tip rolled back to revision %d'
1831 msg = (_('repository tip rolled back to revision %d'
1832 ' (undo %s: %s)\n')
1832 ' (undo %s: %s)\n')
1833 % (oldtip, desc, detail))
1833 % (oldtip, desc, detail))
1834 else:
1834 else:
1835 msg = (_('repository tip rolled back to revision %d'
1835 msg = (_('repository tip rolled back to revision %d'
1836 ' (undo %s)\n')
1836 ' (undo %s)\n')
1837 % (oldtip, desc))
1837 % (oldtip, desc))
1838 except IOError:
1838 except IOError:
1839 msg = _('rolling back unknown transaction\n')
1839 msg = _('rolling back unknown transaction\n')
1840 desc = None
1840 desc = None
1841
1841
1842 if not force and self['.'] != self['tip'] and desc == 'commit':
1842 if not force and self['.'] != self['tip'] and desc == 'commit':
1843 raise error.Abort(
1843 raise error.Abort(
1844 _('rollback of last commit while not checked out '
1844 _('rollback of last commit while not checked out '
1845 'may lose data'), hint=_('use -f to force'))
1845 'may lose data'), hint=_('use -f to force'))
1846
1846
1847 ui.status(msg)
1847 ui.status(msg)
1848 if dryrun:
1848 if dryrun:
1849 return 0
1849 return 0
1850
1850
1851 parents = self.dirstate.parents()
1851 parents = self.dirstate.parents()
1852 self.destroying()
1852 self.destroying()
1853 vfsmap = {'plain': self.vfs, '': self.svfs}
1853 vfsmap = {'plain': self.vfs, '': self.svfs}
1854 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1854 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1855 checkambigfiles=_cachedfiles)
1855 checkambigfiles=_cachedfiles)
1856 if self.vfs.exists('undo.bookmarks'):
1856 if self.vfs.exists('undo.bookmarks'):
1857 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1857 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1858 if self.svfs.exists('undo.phaseroots'):
1858 if self.svfs.exists('undo.phaseroots'):
1859 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1859 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1860 self.invalidate()
1860 self.invalidate()
1861
1861
1862 parentgone = (parents[0] not in self.changelog.nodemap or
1862 parentgone = (parents[0] not in self.changelog.nodemap or
1863 parents[1] not in self.changelog.nodemap)
1863 parents[1] not in self.changelog.nodemap)
1864 if parentgone:
1864 if parentgone:
1865 # prevent dirstateguard from overwriting already restored one
1865 # prevent dirstateguard from overwriting already restored one
1866 dsguard.close()
1866 dsguard.close()
1867
1867
1868 narrowspec.restorebackup(self, 'undo.narrowspec')
1868 narrowspec.restorebackup(self, 'undo.narrowspec')
1869 self.dirstate.restorebackup(None, 'undo.dirstate')
1869 self.dirstate.restorebackup(None, 'undo.dirstate')
1870 try:
1870 try:
1871 branch = self.vfs.read('undo.branch')
1871 branch = self.vfs.read('undo.branch')
1872 self.dirstate.setbranch(encoding.tolocal(branch))
1872 self.dirstate.setbranch(encoding.tolocal(branch))
1873 except IOError:
1873 except IOError:
1874 ui.warn(_('named branch could not be reset: '
1874 ui.warn(_('named branch could not be reset: '
1875 'current branch is still \'%s\'\n')
1875 'current branch is still \'%s\'\n')
1876 % self.dirstate.branch())
1876 % self.dirstate.branch())
1877
1877
1878 parents = tuple([p.rev() for p in self[None].parents()])
1878 parents = tuple([p.rev() for p in self[None].parents()])
1879 if len(parents) > 1:
1879 if len(parents) > 1:
1880 ui.status(_('working directory now based on '
1880 ui.status(_('working directory now based on '
1881 'revisions %d and %d\n') % parents)
1881 'revisions %d and %d\n') % parents)
1882 else:
1882 else:
1883 ui.status(_('working directory now based on '
1883 ui.status(_('working directory now based on '
1884 'revision %d\n') % parents)
1884 'revision %d\n') % parents)
1885 mergemod.mergestate.clean(self, self['.'].node())
1885 mergemod.mergestate.clean(self, self['.'].node())
1886
1886
1887 # TODO: if we know which new heads may result from this rollback, pass
1887 # TODO: if we know which new heads may result from this rollback, pass
1888 # them to destroy(), which will prevent the branchhead cache from being
1888 # them to destroy(), which will prevent the branchhead cache from being
1889 # invalidated.
1889 # invalidated.
1890 self.destroyed()
1890 self.destroyed()
1891 return 0
1891 return 0
1892
1892
1893 def _buildcacheupdater(self, newtransaction):
1893 def _buildcacheupdater(self, newtransaction):
1894 """called during transaction to build the callback updating cache
1894 """called during transaction to build the callback updating cache
1895
1895
1896 Lives on the repository to help extension who might want to augment
1896 Lives on the repository to help extension who might want to augment
1897 this logic. For this purpose, the created transaction is passed to the
1897 this logic. For this purpose, the created transaction is passed to the
1898 method.
1898 method.
1899 """
1899 """
1900 # we must avoid cyclic reference between repo and transaction.
1900 # we must avoid cyclic reference between repo and transaction.
1901 reporef = weakref.ref(self)
1901 reporef = weakref.ref(self)
1902 def updater(tr):
1902 def updater(tr):
1903 repo = reporef()
1903 repo = reporef()
1904 repo.updatecaches(tr)
1904 repo.updatecaches(tr)
1905 return updater
1905 return updater
1906
1906
1907 @unfilteredmethod
1907 @unfilteredmethod
1908 def updatecaches(self, tr=None, full=False):
1908 def updatecaches(self, tr=None, full=False):
1909 """warm appropriate caches
1909 """warm appropriate caches
1910
1910
1911 If this function is called after a transaction closed. The transaction
1911 If this function is called after a transaction closed. The transaction
1912 will be available in the 'tr' argument. This can be used to selectively
1912 will be available in the 'tr' argument. This can be used to selectively
1913 update caches relevant to the changes in that transaction.
1913 update caches relevant to the changes in that transaction.
1914
1914
1915 If 'full' is set, make sure all caches the function knows about have
1915 If 'full' is set, make sure all caches the function knows about have
1916 up-to-date data. Even the ones usually loaded more lazily.
1916 up-to-date data. Even the ones usually loaded more lazily.
1917 """
1917 """
1918 if tr is not None and tr.hookargs.get('source') == 'strip':
1918 if tr is not None and tr.hookargs.get('source') == 'strip':
1919 # During strip, many caches are invalid but
1919 # During strip, many caches are invalid but
1920 # later call to `destroyed` will refresh them.
1920 # later call to `destroyed` will refresh them.
1921 return
1921 return
1922
1922
1923 if tr is None or tr.changes['origrepolen'] < len(self):
1923 if tr is None or tr.changes['origrepolen'] < len(self):
1924 # updating the unfiltered branchmap should refresh all the others,
1924 # updating the unfiltered branchmap should refresh all the others,
1925 self.ui.debug('updating the branch cache\n')
1925 self.ui.debug('updating the branch cache\n')
1926 branchmap.updatecache(self.filtered('served'))
1926 branchmap.updatecache(self.filtered('served'))
1927
1927
1928 if full:
1928 if full:
1929 rbc = self.revbranchcache()
1929 rbc = self.revbranchcache()
1930 for r in self.changelog:
1930 for r in self.changelog:
1931 rbc.branchinfo(r)
1931 rbc.branchinfo(r)
1932 rbc.write()
1932 rbc.write()
1933
1933
1934 # ensure the working copy parents are in the manifestfulltextcache
1934 # ensure the working copy parents are in the manifestfulltextcache
1935 for ctx in self['.'].parents():
1935 for ctx in self['.'].parents():
1936 ctx.manifest() # accessing the manifest is enough
1936 ctx.manifest() # accessing the manifest is enough
1937
1937
1938 def invalidatecaches(self):
1938 def invalidatecaches(self):
1939
1939
1940 if '_tagscache' in vars(self):
1940 if '_tagscache' in vars(self):
1941 # can't use delattr on proxy
1941 # can't use delattr on proxy
1942 del self.__dict__['_tagscache']
1942 del self.__dict__['_tagscache']
1943
1943
1944 self.unfiltered()._branchcaches.clear()
1944 self.unfiltered()._branchcaches.clear()
1945 self.invalidatevolatilesets()
1945 self.invalidatevolatilesets()
1946 self._sparsesignaturecache.clear()
1946 self._sparsesignaturecache.clear()
1947
1947
1948 def invalidatevolatilesets(self):
1948 def invalidatevolatilesets(self):
1949 self.filteredrevcache.clear()
1949 self.filteredrevcache.clear()
1950 obsolete.clearobscaches(self)
1950 obsolete.clearobscaches(self)
1951
1951
1952 def invalidatedirstate(self):
1952 def invalidatedirstate(self):
1953 '''Invalidates the dirstate, causing the next call to dirstate
1953 '''Invalidates the dirstate, causing the next call to dirstate
1954 to check if it was modified since the last time it was read,
1954 to check if it was modified since the last time it was read,
1955 rereading it if it has.
1955 rereading it if it has.
1956
1956
1957 This is different to dirstate.invalidate() that it doesn't always
1957 This is different to dirstate.invalidate() that it doesn't always
1958 rereads the dirstate. Use dirstate.invalidate() if you want to
1958 rereads the dirstate. Use dirstate.invalidate() if you want to
1959 explicitly read the dirstate again (i.e. restoring it to a previous
1959 explicitly read the dirstate again (i.e. restoring it to a previous
1960 known good state).'''
1960 known good state).'''
1961 if hasunfilteredcache(self, 'dirstate'):
1961 if hasunfilteredcache(self, 'dirstate'):
1962 for k in self.dirstate._filecache:
1962 for k in self.dirstate._filecache:
1963 try:
1963 try:
1964 delattr(self.dirstate, k)
1964 delattr(self.dirstate, k)
1965 except AttributeError:
1965 except AttributeError:
1966 pass
1966 pass
1967 delattr(self.unfiltered(), 'dirstate')
1967 delattr(self.unfiltered(), 'dirstate')
1968
1968
1969 def invalidate(self, clearfilecache=False):
1969 def invalidate(self, clearfilecache=False):
1970 '''Invalidates both store and non-store parts other than dirstate
1970 '''Invalidates both store and non-store parts other than dirstate
1971
1971
1972 If a transaction is running, invalidation of store is omitted,
1972 If a transaction is running, invalidation of store is omitted,
1973 because discarding in-memory changes might cause inconsistency
1973 because discarding in-memory changes might cause inconsistency
1974 (e.g. incomplete fncache causes unintentional failure, but
1974 (e.g. incomplete fncache causes unintentional failure, but
1975 redundant one doesn't).
1975 redundant one doesn't).
1976 '''
1976 '''
1977 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1977 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1978 for k in list(self._filecache.keys()):
1978 for k in list(self._filecache.keys()):
1979 # dirstate is invalidated separately in invalidatedirstate()
1979 # dirstate is invalidated separately in invalidatedirstate()
1980 if k == 'dirstate':
1980 if k == 'dirstate':
1981 continue
1981 continue
1982 if (k == 'changelog' and
1982 if (k == 'changelog' and
1983 self.currenttransaction() and
1983 self.currenttransaction() and
1984 self.changelog._delayed):
1984 self.changelog._delayed):
1985 # The changelog object may store unwritten revisions. We don't
1985 # The changelog object may store unwritten revisions. We don't
1986 # want to lose them.
1986 # want to lose them.
1987 # TODO: Solve the problem instead of working around it.
1987 # TODO: Solve the problem instead of working around it.
1988 continue
1988 continue
1989
1989
1990 if clearfilecache:
1990 if clearfilecache:
1991 del self._filecache[k]
1991 del self._filecache[k]
1992 try:
1992 try:
1993 delattr(unfiltered, k)
1993 delattr(unfiltered, k)
1994 except AttributeError:
1994 except AttributeError:
1995 pass
1995 pass
1996 self.invalidatecaches()
1996 self.invalidatecaches()
1997 if not self.currenttransaction():
1997 if not self.currenttransaction():
1998 # TODO: Changing contents of store outside transaction
1998 # TODO: Changing contents of store outside transaction
1999 # causes inconsistency. We should make in-memory store
1999 # causes inconsistency. We should make in-memory store
2000 # changes detectable, and abort if changed.
2000 # changes detectable, and abort if changed.
2001 self.store.invalidatecaches()
2001 self.store.invalidatecaches()
2002
2002
2003 def invalidateall(self):
2003 def invalidateall(self):
2004 '''Fully invalidates both store and non-store parts, causing the
2004 '''Fully invalidates both store and non-store parts, causing the
2005 subsequent operation to reread any outside changes.'''
2005 subsequent operation to reread any outside changes.'''
2006 # extension should hook this to invalidate its caches
2006 # extension should hook this to invalidate its caches
2007 self.invalidate()
2007 self.invalidate()
2008 self.invalidatedirstate()
2008 self.invalidatedirstate()
2009
2009
2010 @unfilteredmethod
2010 @unfilteredmethod
2011 def _refreshfilecachestats(self, tr):
2011 def _refreshfilecachestats(self, tr):
2012 """Reload stats of cached files so that they are flagged as valid"""
2012 """Reload stats of cached files so that they are flagged as valid"""
2013 for k, ce in self._filecache.items():
2013 for k, ce in self._filecache.items():
2014 k = pycompat.sysstr(k)
2014 k = pycompat.sysstr(k)
2015 if k == r'dirstate' or k not in self.__dict__:
2015 if k == r'dirstate' or k not in self.__dict__:
2016 continue
2016 continue
2017 ce.refresh()
2017 ce.refresh()
2018
2018
2019 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2019 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2020 inheritchecker=None, parentenvvar=None):
2020 inheritchecker=None, parentenvvar=None):
2021 parentlock = None
2021 parentlock = None
2022 # the contents of parentenvvar are used by the underlying lock to
2022 # the contents of parentenvvar are used by the underlying lock to
2023 # determine whether it can be inherited
2023 # determine whether it can be inherited
2024 if parentenvvar is not None:
2024 if parentenvvar is not None:
2025 parentlock = encoding.environ.get(parentenvvar)
2025 parentlock = encoding.environ.get(parentenvvar)
2026
2026
2027 timeout = 0
2027 timeout = 0
2028 warntimeout = 0
2028 warntimeout = 0
2029 if wait:
2029 if wait:
2030 timeout = self.ui.configint("ui", "timeout")
2030 timeout = self.ui.configint("ui", "timeout")
2031 warntimeout = self.ui.configint("ui", "timeout.warn")
2031 warntimeout = self.ui.configint("ui", "timeout.warn")
2032 # internal config: ui.signal-safe-lock
2032 # internal config: ui.signal-safe-lock
2033 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2033 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2034
2034
2035 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2035 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2036 releasefn=releasefn,
2036 releasefn=releasefn,
2037 acquirefn=acquirefn, desc=desc,
2037 acquirefn=acquirefn, desc=desc,
2038 inheritchecker=inheritchecker,
2038 inheritchecker=inheritchecker,
2039 parentlock=parentlock,
2039 parentlock=parentlock,
2040 signalsafe=signalsafe)
2040 signalsafe=signalsafe)
2041 return l
2041 return l
2042
2042
2043 def _afterlock(self, callback):
2043 def _afterlock(self, callback):
2044 """add a callback to be run when the repository is fully unlocked
2044 """add a callback to be run when the repository is fully unlocked
2045
2045
2046 The callback will be executed when the outermost lock is released
2046 The callback will be executed when the outermost lock is released
2047 (with wlock being higher level than 'lock')."""
2047 (with wlock being higher level than 'lock')."""
2048 for ref in (self._wlockref, self._lockref):
2048 for ref in (self._wlockref, self._lockref):
2049 l = ref and ref()
2049 l = ref and ref()
2050 if l and l.held:
2050 if l and l.held:
2051 l.postrelease.append(callback)
2051 l.postrelease.append(callback)
2052 break
2052 break
2053 else: # no lock have been found.
2053 else: # no lock have been found.
2054 callback()
2054 callback()
2055
2055
2056 def lock(self, wait=True):
2056 def lock(self, wait=True):
2057 '''Lock the repository store (.hg/store) and return a weak reference
2057 '''Lock the repository store (.hg/store) and return a weak reference
2058 to the lock. Use this before modifying the store (e.g. committing or
2058 to the lock. Use this before modifying the store (e.g. committing or
2059 stripping). If you are opening a transaction, get a lock as well.)
2059 stripping). If you are opening a transaction, get a lock as well.)
2060
2060
2061 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2061 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2062 'wlock' first to avoid a dead-lock hazard.'''
2062 'wlock' first to avoid a dead-lock hazard.'''
2063 l = self._currentlock(self._lockref)
2063 l = self._currentlock(self._lockref)
2064 if l is not None:
2064 if l is not None:
2065 l.lock()
2065 l.lock()
2066 return l
2066 return l
2067
2067
2068 l = self._lock(self.svfs, "lock", wait, None,
2068 l = self._lock(self.svfs, "lock", wait, None,
2069 self.invalidate, _('repository %s') % self.origroot)
2069 self.invalidate, _('repository %s') % self.origroot)
2070 self._lockref = weakref.ref(l)
2070 self._lockref = weakref.ref(l)
2071 return l
2071 return l
2072
2072
2073 def _wlockchecktransaction(self):
2073 def _wlockchecktransaction(self):
2074 if self.currenttransaction() is not None:
2074 if self.currenttransaction() is not None:
2075 raise error.LockInheritanceContractViolation(
2075 raise error.LockInheritanceContractViolation(
2076 'wlock cannot be inherited in the middle of a transaction')
2076 'wlock cannot be inherited in the middle of a transaction')
2077
2077
2078 def wlock(self, wait=True):
2078 def wlock(self, wait=True):
2079 '''Lock the non-store parts of the repository (everything under
2079 '''Lock the non-store parts of the repository (everything under
2080 .hg except .hg/store) and return a weak reference to the lock.
2080 .hg except .hg/store) and return a weak reference to the lock.
2081
2081
2082 Use this before modifying files in .hg.
2082 Use this before modifying files in .hg.
2083
2083
2084 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2084 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2085 'wlock' first to avoid a dead-lock hazard.'''
2085 'wlock' first to avoid a dead-lock hazard.'''
2086 l = self._wlockref and self._wlockref()
2086 l = self._wlockref and self._wlockref()
2087 if l is not None and l.held:
2087 if l is not None and l.held:
2088 l.lock()
2088 l.lock()
2089 return l
2089 return l
2090
2090
2091 # We do not need to check for non-waiting lock acquisition. Such
2091 # We do not need to check for non-waiting lock acquisition. Such
2092 # acquisition would not cause dead-lock as they would just fail.
2092 # acquisition would not cause dead-lock as they would just fail.
2093 if wait and (self.ui.configbool('devel', 'all-warnings')
2093 if wait and (self.ui.configbool('devel', 'all-warnings')
2094 or self.ui.configbool('devel', 'check-locks')):
2094 or self.ui.configbool('devel', 'check-locks')):
2095 if self._currentlock(self._lockref) is not None:
2095 if self._currentlock(self._lockref) is not None:
2096 self.ui.develwarn('"wlock" acquired after "lock"')
2096 self.ui.develwarn('"wlock" acquired after "lock"')
2097
2097
2098 def unlock():
2098 def unlock():
2099 if self.dirstate.pendingparentchange():
2099 if self.dirstate.pendingparentchange():
2100 self.dirstate.invalidate()
2100 self.dirstate.invalidate()
2101 else:
2101 else:
2102 self.dirstate.write(None)
2102 self.dirstate.write(None)
2103
2103
2104 self._filecache['dirstate'].refresh()
2104 self._filecache['dirstate'].refresh()
2105
2105
2106 l = self._lock(self.vfs, "wlock", wait, unlock,
2106 l = self._lock(self.vfs, "wlock", wait, unlock,
2107 self.invalidatedirstate, _('working directory of %s') %
2107 self.invalidatedirstate, _('working directory of %s') %
2108 self.origroot,
2108 self.origroot,
2109 inheritchecker=self._wlockchecktransaction,
2109 inheritchecker=self._wlockchecktransaction,
2110 parentenvvar='HG_WLOCK_LOCKER')
2110 parentenvvar='HG_WLOCK_LOCKER')
2111 self._wlockref = weakref.ref(l)
2111 self._wlockref = weakref.ref(l)
2112 return l
2112 return l
2113
2113
2114 def _currentlock(self, lockref):
2114 def _currentlock(self, lockref):
2115 """Returns the lock if it's held, or None if it's not."""
2115 """Returns the lock if it's held, or None if it's not."""
2116 if lockref is None:
2116 if lockref is None:
2117 return None
2117 return None
2118 l = lockref()
2118 l = lockref()
2119 if l is None or not l.held:
2119 if l is None or not l.held:
2120 return None
2120 return None
2121 return l
2121 return l
2122
2122
2123 def currentwlock(self):
2123 def currentwlock(self):
2124 """Returns the wlock if it's held, or None if it's not."""
2124 """Returns the wlock if it's held, or None if it's not."""
2125 return self._currentlock(self._wlockref)
2125 return self._currentlock(self._wlockref)
2126
2126
2127 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2127 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2128 """
2128 """
2129 commit an individual file as part of a larger transaction
2129 commit an individual file as part of a larger transaction
2130 """
2130 """
2131
2131
2132 fname = fctx.path()
2132 fname = fctx.path()
2133 fparent1 = manifest1.get(fname, nullid)
2133 fparent1 = manifest1.get(fname, nullid)
2134 fparent2 = manifest2.get(fname, nullid)
2134 fparent2 = manifest2.get(fname, nullid)
2135 if isinstance(fctx, context.filectx):
2135 if isinstance(fctx, context.filectx):
2136 node = fctx.filenode()
2136 node = fctx.filenode()
2137 if node in [fparent1, fparent2]:
2137 if node in [fparent1, fparent2]:
2138 self.ui.debug('reusing %s filelog entry\n' % fname)
2138 self.ui.debug('reusing %s filelog entry\n' % fname)
2139 if manifest1.flags(fname) != fctx.flags():
2139 if manifest1.flags(fname) != fctx.flags():
2140 changelist.append(fname)
2140 changelist.append(fname)
2141 return node
2141 return node
2142
2142
2143 flog = self.file(fname)
2143 flog = self.file(fname)
2144 meta = {}
2144 meta = {}
2145 copy = fctx.renamed()
2145 copy = fctx.renamed()
2146 if copy and copy[0] != fname:
2146 if copy and copy[0] != fname:
2147 # Mark the new revision of this file as a copy of another
2147 # Mark the new revision of this file as a copy of another
2148 # file. This copy data will effectively act as a parent
2148 # file. This copy data will effectively act as a parent
2149 # of this new revision. If this is a merge, the first
2149 # of this new revision. If this is a merge, the first
2150 # parent will be the nullid (meaning "look up the copy data")
2150 # parent will be the nullid (meaning "look up the copy data")
2151 # and the second one will be the other parent. For example:
2151 # and the second one will be the other parent. For example:
2152 #
2152 #
2153 # 0 --- 1 --- 3 rev1 changes file foo
2153 # 0 --- 1 --- 3 rev1 changes file foo
2154 # \ / rev2 renames foo to bar and changes it
2154 # \ / rev2 renames foo to bar and changes it
2155 # \- 2 -/ rev3 should have bar with all changes and
2155 # \- 2 -/ rev3 should have bar with all changes and
2156 # should record that bar descends from
2156 # should record that bar descends from
2157 # bar in rev2 and foo in rev1
2157 # bar in rev2 and foo in rev1
2158 #
2158 #
2159 # this allows this merge to succeed:
2159 # this allows this merge to succeed:
2160 #
2160 #
2161 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2161 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2162 # \ / merging rev3 and rev4 should use bar@rev2
2162 # \ / merging rev3 and rev4 should use bar@rev2
2163 # \- 2 --- 4 as the merge base
2163 # \- 2 --- 4 as the merge base
2164 #
2164 #
2165
2165
2166 cfname = copy[0]
2166 cfname = copy[0]
2167 crev = manifest1.get(cfname)
2167 crev = manifest1.get(cfname)
2168 newfparent = fparent2
2168 newfparent = fparent2
2169
2169
2170 if manifest2: # branch merge
2170 if manifest2: # branch merge
2171 if fparent2 == nullid or crev is None: # copied on remote side
2171 if fparent2 == nullid or crev is None: # copied on remote side
2172 if cfname in manifest2:
2172 if cfname in manifest2:
2173 crev = manifest2[cfname]
2173 crev = manifest2[cfname]
2174 newfparent = fparent1
2174 newfparent = fparent1
2175
2175
2176 # Here, we used to search backwards through history to try to find
2176 # Here, we used to search backwards through history to try to find
2177 # where the file copy came from if the source of a copy was not in
2177 # where the file copy came from if the source of a copy was not in
2178 # the parent directory. However, this doesn't actually make sense to
2178 # the parent directory. However, this doesn't actually make sense to
2179 # do (what does a copy from something not in your working copy even
2179 # do (what does a copy from something not in your working copy even
2180 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2180 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2181 # the user that copy information was dropped, so if they didn't
2181 # the user that copy information was dropped, so if they didn't
2182 # expect this outcome it can be fixed, but this is the correct
2182 # expect this outcome it can be fixed, but this is the correct
2183 # behavior in this circumstance.
2183 # behavior in this circumstance.
2184
2184
2185 if crev:
2185 if crev:
2186 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2186 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2187 meta["copy"] = cfname
2187 meta["copy"] = cfname
2188 meta["copyrev"] = hex(crev)
2188 meta["copyrev"] = hex(crev)
2189 fparent1, fparent2 = nullid, newfparent
2189 fparent1, fparent2 = nullid, newfparent
2190 else:
2190 else:
2191 self.ui.warn(_("warning: can't find ancestor for '%s' "
2191 self.ui.warn(_("warning: can't find ancestor for '%s' "
2192 "copied from '%s'!\n") % (fname, cfname))
2192 "copied from '%s'!\n") % (fname, cfname))
2193
2193
2194 elif fparent1 == nullid:
2194 elif fparent1 == nullid:
2195 fparent1, fparent2 = fparent2, nullid
2195 fparent1, fparent2 = fparent2, nullid
2196 elif fparent2 != nullid:
2196 elif fparent2 != nullid:
2197 # is one parent an ancestor of the other?
2197 # is one parent an ancestor of the other?
2198 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2198 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2199 if fparent1 in fparentancestors:
2199 if fparent1 in fparentancestors:
2200 fparent1, fparent2 = fparent2, nullid
2200 fparent1, fparent2 = fparent2, nullid
2201 elif fparent2 in fparentancestors:
2201 elif fparent2 in fparentancestors:
2202 fparent2 = nullid
2202 fparent2 = nullid
2203
2203
2204 # is the file changed?
2204 # is the file changed?
2205 text = fctx.data()
2205 text = fctx.data()
2206 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2206 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2207 changelist.append(fname)
2207 changelist.append(fname)
2208 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2208 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2209 # are just the flags changed during merge?
2209 # are just the flags changed during merge?
2210 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2210 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2211 changelist.append(fname)
2211 changelist.append(fname)
2212
2212
2213 return fparent1
2213 return fparent1
2214
2214
2215 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2215 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2216 """check for commit arguments that aren't committable"""
2216 """check for commit arguments that aren't committable"""
2217 if match.isexact() or match.prefix():
2217 if match.isexact() or match.prefix():
2218 matched = set(status.modified + status.added + status.removed)
2218 matched = set(status.modified + status.added + status.removed)
2219
2219
2220 for f in match.files():
2220 for f in match.files():
2221 f = self.dirstate.normalize(f)
2221 f = self.dirstate.normalize(f)
2222 if f == '.' or f in matched or f in wctx.substate:
2222 if f == '.' or f in matched or f in wctx.substate:
2223 continue
2223 continue
2224 if f in status.deleted:
2224 if f in status.deleted:
2225 fail(f, _('file not found!'))
2225 fail(f, _('file not found!'))
2226 if f in vdirs: # visited directory
2226 if f in vdirs: # visited directory
2227 d = f + '/'
2227 d = f + '/'
2228 for mf in matched:
2228 for mf in matched:
2229 if mf.startswith(d):
2229 if mf.startswith(d):
2230 break
2230 break
2231 else:
2231 else:
2232 fail(f, _("no match under directory!"))
2232 fail(f, _("no match under directory!"))
2233 elif f not in self.dirstate:
2233 elif f not in self.dirstate:
2234 fail(f, _("file not tracked!"))
2234 fail(f, _("file not tracked!"))
2235
2235
2236 @unfilteredmethod
2236 @unfilteredmethod
2237 def commit(self, text="", user=None, date=None, match=None, force=False,
2237 def commit(self, text="", user=None, date=None, match=None, force=False,
2238 editor=False, extra=None):
2238 editor=False, extra=None):
2239 """Add a new revision to current repository.
2239 """Add a new revision to current repository.
2240
2240
2241 Revision information is gathered from the working directory,
2241 Revision information is gathered from the working directory,
2242 match can be used to filter the committed files. If editor is
2242 match can be used to filter the committed files. If editor is
2243 supplied, it is called to get a commit message.
2243 supplied, it is called to get a commit message.
2244 """
2244 """
2245 if extra is None:
2245 if extra is None:
2246 extra = {}
2246 extra = {}
2247
2247
2248 def fail(f, msg):
2248 def fail(f, msg):
2249 raise error.Abort('%s: %s' % (f, msg))
2249 raise error.Abort('%s: %s' % (f, msg))
2250
2250
2251 if not match:
2251 if not match:
2252 match = matchmod.always(self.root, '')
2252 match = matchmod.always(self.root, '')
2253
2253
2254 if not force:
2254 if not force:
2255 vdirs = []
2255 vdirs = []
2256 match.explicitdir = vdirs.append
2256 match.explicitdir = vdirs.append
2257 match.bad = fail
2257 match.bad = fail
2258
2258
2259 wlock = lock = tr = None
2259 wlock = lock = tr = None
2260 try:
2260 try:
2261 wlock = self.wlock()
2261 wlock = self.wlock()
2262 lock = self.lock() # for recent changelog (see issue4368)
2262 lock = self.lock() # for recent changelog (see issue4368)
2263
2263
2264 wctx = self[None]
2264 wctx = self[None]
2265 merge = len(wctx.parents()) > 1
2265 merge = len(wctx.parents()) > 1
2266
2266
2267 if not force and merge and not match.always():
2267 if not force and merge and not match.always():
2268 raise error.Abort(_('cannot partially commit a merge '
2268 raise error.Abort(_('cannot partially commit a merge '
2269 '(do not specify files or patterns)'))
2269 '(do not specify files or patterns)'))
2270
2270
2271 status = self.status(match=match, clean=force)
2271 status = self.status(match=match, clean=force)
2272 if force:
2272 if force:
2273 status.modified.extend(status.clean) # mq may commit clean files
2273 status.modified.extend(status.clean) # mq may commit clean files
2274
2274
2275 # check subrepos
2275 # check subrepos
2276 subs, commitsubs, newstate = subrepoutil.precommit(
2276 subs, commitsubs, newstate = subrepoutil.precommit(
2277 self.ui, wctx, status, match, force=force)
2277 self.ui, wctx, status, match, force=force)
2278
2278
2279 # make sure all explicit patterns are matched
2279 # make sure all explicit patterns are matched
2280 if not force:
2280 if not force:
2281 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2281 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2282
2282
2283 cctx = context.workingcommitctx(self, status,
2283 cctx = context.workingcommitctx(self, status,
2284 text, user, date, extra)
2284 text, user, date, extra)
2285
2285
2286 # internal config: ui.allowemptycommit
2286 # internal config: ui.allowemptycommit
2287 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2287 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2288 or extra.get('close') or merge or cctx.files()
2288 or extra.get('close') or merge or cctx.files()
2289 or self.ui.configbool('ui', 'allowemptycommit'))
2289 or self.ui.configbool('ui', 'allowemptycommit'))
2290 if not allowemptycommit:
2290 if not allowemptycommit:
2291 return None
2291 return None
2292
2292
2293 if merge and cctx.deleted():
2293 if merge and cctx.deleted():
2294 raise error.Abort(_("cannot commit merge with missing files"))
2294 raise error.Abort(_("cannot commit merge with missing files"))
2295
2295
2296 ms = mergemod.mergestate.read(self)
2296 ms = mergemod.mergestate.read(self)
2297 mergeutil.checkunresolved(ms)
2297 mergeutil.checkunresolved(ms)
2298
2298
2299 if editor:
2299 if editor:
2300 cctx._text = editor(self, cctx, subs)
2300 cctx._text = editor(self, cctx, subs)
2301 edited = (text != cctx._text)
2301 edited = (text != cctx._text)
2302
2302
2303 # Save commit message in case this transaction gets rolled back
2303 # Save commit message in case this transaction gets rolled back
2304 # (e.g. by a pretxncommit hook). Leave the content alone on
2304 # (e.g. by a pretxncommit hook). Leave the content alone on
2305 # the assumption that the user will use the same editor again.
2305 # the assumption that the user will use the same editor again.
2306 msgfn = self.savecommitmessage(cctx._text)
2306 msgfn = self.savecommitmessage(cctx._text)
2307
2307
2308 # commit subs and write new state
2308 # commit subs and write new state
2309 if subs:
2309 if subs:
2310 for s in sorted(commitsubs):
2310 for s in sorted(commitsubs):
2311 sub = wctx.sub(s)
2311 sub = wctx.sub(s)
2312 self.ui.status(_('committing subrepository %s\n') %
2312 self.ui.status(_('committing subrepository %s\n') %
2313 subrepoutil.subrelpath(sub))
2313 subrepoutil.subrelpath(sub))
2314 sr = sub.commit(cctx._text, user, date)
2314 sr = sub.commit(cctx._text, user, date)
2315 newstate[s] = (newstate[s][0], sr)
2315 newstate[s] = (newstate[s][0], sr)
2316 subrepoutil.writestate(self, newstate)
2316 subrepoutil.writestate(self, newstate)
2317
2317
2318 p1, p2 = self.dirstate.parents()
2318 p1, p2 = self.dirstate.parents()
2319 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2319 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2320 try:
2320 try:
2321 self.hook("precommit", throw=True, parent1=hookp1,
2321 self.hook("precommit", throw=True, parent1=hookp1,
2322 parent2=hookp2)
2322 parent2=hookp2)
2323 tr = self.transaction('commit')
2323 tr = self.transaction('commit')
2324 ret = self.commitctx(cctx, True)
2324 ret = self.commitctx(cctx, True)
2325 except: # re-raises
2325 except: # re-raises
2326 if edited:
2326 if edited:
2327 self.ui.write(
2327 self.ui.write(
2328 _('note: commit message saved in %s\n') % msgfn)
2328 _('note: commit message saved in %s\n') % msgfn)
2329 raise
2329 raise
2330 # update bookmarks, dirstate and mergestate
2330 # update bookmarks, dirstate and mergestate
2331 bookmarks.update(self, [p1, p2], ret)
2331 bookmarks.update(self, [p1, p2], ret)
2332 cctx.markcommitted(ret)
2332 cctx.markcommitted(ret)
2333 ms.reset()
2333 ms.reset()
2334 tr.close()
2334 tr.close()
2335
2335
2336 finally:
2336 finally:
2337 lockmod.release(tr, lock, wlock)
2337 lockmod.release(tr, lock, wlock)
2338
2338
2339 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2339 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2340 # hack for command that use a temporary commit (eg: histedit)
2340 # hack for command that use a temporary commit (eg: histedit)
2341 # temporary commit got stripped before hook release
2341 # temporary commit got stripped before hook release
2342 if self.changelog.hasnode(ret):
2342 if self.changelog.hasnode(ret):
2343 self.hook("commit", node=node, parent1=parent1,
2343 self.hook("commit", node=node, parent1=parent1,
2344 parent2=parent2)
2344 parent2=parent2)
2345 self._afterlock(commithook)
2345 self._afterlock(commithook)
2346 return ret
2346 return ret
2347
2347
2348 @unfilteredmethod
2348 @unfilteredmethod
2349 def commitctx(self, ctx, error=False):
2349 def commitctx(self, ctx, error=False):
2350 """Add a new revision to current repository.
2350 """Add a new revision to current repository.
2351 Revision information is passed via the context argument.
2351 Revision information is passed via the context argument.
2352
2352
2353 ctx.files() should list all files involved in this commit, i.e.
2353 ctx.files() should list all files involved in this commit, i.e.
2354 modified/added/removed files. On merge, it may be wider than the
2354 modified/added/removed files. On merge, it may be wider than the
2355 ctx.files() to be committed, since any file nodes derived directly
2355 ctx.files() to be committed, since any file nodes derived directly
2356 from p1 or p2 are excluded from the committed ctx.files().
2356 from p1 or p2 are excluded from the committed ctx.files().
2357 """
2357 """
2358
2358
2359 tr = None
2359 tr = None
2360 p1, p2 = ctx.p1(), ctx.p2()
2360 p1, p2 = ctx.p1(), ctx.p2()
2361 user = ctx.user()
2361 user = ctx.user()
2362
2362
2363 lock = self.lock()
2363 lock = self.lock()
2364 try:
2364 try:
2365 tr = self.transaction("commit")
2365 tr = self.transaction("commit")
2366 trp = weakref.proxy(tr)
2366 trp = weakref.proxy(tr)
2367
2367
2368 if ctx.manifestnode():
2368 if ctx.manifestnode():
2369 # reuse an existing manifest revision
2369 # reuse an existing manifest revision
2370 self.ui.debug('reusing known manifest\n')
2370 self.ui.debug('reusing known manifest\n')
2371 mn = ctx.manifestnode()
2371 mn = ctx.manifestnode()
2372 files = ctx.files()
2372 files = ctx.files()
2373 elif ctx.files():
2373 elif ctx.files():
2374 m1ctx = p1.manifestctx()
2374 m1ctx = p1.manifestctx()
2375 m2ctx = p2.manifestctx()
2375 m2ctx = p2.manifestctx()
2376 mctx = m1ctx.copy()
2376 mctx = m1ctx.copy()
2377
2377
2378 m = mctx.read()
2378 m = mctx.read()
2379 m1 = m1ctx.read()
2379 m1 = m1ctx.read()
2380 m2 = m2ctx.read()
2380 m2 = m2ctx.read()
2381
2381
2382 # check in files
2382 # check in files
2383 added = []
2383 added = []
2384 changed = []
2384 changed = []
2385 removed = list(ctx.removed())
2385 removed = list(ctx.removed())
2386 linkrev = len(self)
2386 linkrev = len(self)
2387 self.ui.note(_("committing files:\n"))
2387 self.ui.note(_("committing files:\n"))
2388 for f in sorted(ctx.modified() + ctx.added()):
2388 for f in sorted(ctx.modified() + ctx.added()):
2389 self.ui.note(f + "\n")
2389 self.ui.note(f + "\n")
2390 try:
2390 try:
2391 fctx = ctx[f]
2391 fctx = ctx[f]
2392 if fctx is None:
2392 if fctx is None:
2393 removed.append(f)
2393 removed.append(f)
2394 else:
2394 else:
2395 added.append(f)
2395 added.append(f)
2396 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2396 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2397 trp, changed)
2397 trp, changed)
2398 m.setflag(f, fctx.flags())
2398 m.setflag(f, fctx.flags())
2399 except OSError as inst:
2399 except OSError as inst:
2400 self.ui.warn(_("trouble committing %s!\n") % f)
2400 self.ui.warn(_("trouble committing %s!\n") % f)
2401 raise
2401 raise
2402 except IOError as inst:
2402 except IOError as inst:
2403 errcode = getattr(inst, 'errno', errno.ENOENT)
2403 errcode = getattr(inst, 'errno', errno.ENOENT)
2404 if error or errcode and errcode != errno.ENOENT:
2404 if error or errcode and errcode != errno.ENOENT:
2405 self.ui.warn(_("trouble committing %s!\n") % f)
2405 self.ui.warn(_("trouble committing %s!\n") % f)
2406 raise
2406 raise
2407
2407
2408 # update manifest
2408 # update manifest
2409 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2409 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2410 drop = [f for f in removed if f in m]
2410 drop = [f for f in removed if f in m]
2411 for f in drop:
2411 for f in drop:
2412 del m[f]
2412 del m[f]
2413 files = changed + removed
2413 files = changed + removed
2414 md = None
2414 md = None
2415 if not files:
2415 if not files:
2416 # if no "files" actually changed in terms of the changelog,
2416 # if no "files" actually changed in terms of the changelog,
2417 # try hard to detect unmodified manifest entry so that the
2417 # try hard to detect unmodified manifest entry so that the
2418 # exact same commit can be reproduced later on convert.
2418 # exact same commit can be reproduced later on convert.
2419 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2419 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2420 if not files and md:
2420 if not files and md:
2421 self.ui.debug('not reusing manifest (no file change in '
2421 self.ui.debug('not reusing manifest (no file change in '
2422 'changelog, but manifest differs)\n')
2422 'changelog, but manifest differs)\n')
2423 if files or md:
2423 if files or md:
2424 self.ui.note(_("committing manifest\n"))
2424 self.ui.note(_("committing manifest\n"))
2425 # we're using narrowmatch here since it's already applied at
2425 # we're using narrowmatch here since it's already applied at
2426 # other stages (such as dirstate.walk), so we're already
2426 # other stages (such as dirstate.walk), so we're already
2427 # ignoring things outside of narrowspec in most cases. The
2427 # ignoring things outside of narrowspec in most cases. The
2428 # one case where we might have files outside the narrowspec
2428 # one case where we might have files outside the narrowspec
2429 # at this point is merges, and we already error out in the
2429 # at this point is merges, and we already error out in the
2430 # case where the merge has files outside of the narrowspec,
2430 # case where the merge has files outside of the narrowspec,
2431 # so this is safe.
2431 # so this is safe.
2432 mn = mctx.write(trp, linkrev,
2432 mn = mctx.write(trp, linkrev,
2433 p1.manifestnode(), p2.manifestnode(),
2433 p1.manifestnode(), p2.manifestnode(),
2434 added, drop, match=self.narrowmatch())
2434 added, drop, match=self.narrowmatch())
2435 else:
2435 else:
2436 self.ui.debug('reusing manifest form p1 (listed files '
2436 self.ui.debug('reusing manifest form p1 (listed files '
2437 'actually unchanged)\n')
2437 'actually unchanged)\n')
2438 mn = p1.manifestnode()
2438 mn = p1.manifestnode()
2439 else:
2439 else:
2440 self.ui.debug('reusing manifest from p1 (no file change)\n')
2440 self.ui.debug('reusing manifest from p1 (no file change)\n')
2441 mn = p1.manifestnode()
2441 mn = p1.manifestnode()
2442 files = []
2442 files = []
2443
2443
2444 # update changelog
2444 # update changelog
2445 self.ui.note(_("committing changelog\n"))
2445 self.ui.note(_("committing changelog\n"))
2446 self.changelog.delayupdate(tr)
2446 self.changelog.delayupdate(tr)
2447 n = self.changelog.add(mn, files, ctx.description(),
2447 n = self.changelog.add(mn, files, ctx.description(),
2448 trp, p1.node(), p2.node(),
2448 trp, p1.node(), p2.node(),
2449 user, ctx.date(), ctx.extra().copy())
2449 user, ctx.date(), ctx.extra().copy())
2450 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2450 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2451 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2451 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2452 parent2=xp2)
2452 parent2=xp2)
2453 # set the new commit is proper phase
2453 # set the new commit is proper phase
2454 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2454 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2455 if targetphase:
2455 if targetphase:
2456 # retract boundary do not alter parent changeset.
2456 # retract boundary do not alter parent changeset.
2457 # if a parent have higher the resulting phase will
2457 # if a parent have higher the resulting phase will
2458 # be compliant anyway
2458 # be compliant anyway
2459 #
2459 #
2460 # if minimal phase was 0 we don't need to retract anything
2460 # if minimal phase was 0 we don't need to retract anything
2461 phases.registernew(self, tr, targetphase, [n])
2461 phases.registernew(self, tr, targetphase, [n])
2462 tr.close()
2462 tr.close()
2463 return n
2463 return n
2464 finally:
2464 finally:
2465 if tr:
2465 if tr:
2466 tr.release()
2466 tr.release()
2467 lock.release()
2467 lock.release()
2468
2468
2469 @unfilteredmethod
2469 @unfilteredmethod
2470 def destroying(self):
2470 def destroying(self):
2471 '''Inform the repository that nodes are about to be destroyed.
2471 '''Inform the repository that nodes are about to be destroyed.
2472 Intended for use by strip and rollback, so there's a common
2472 Intended for use by strip and rollback, so there's a common
2473 place for anything that has to be done before destroying history.
2473 place for anything that has to be done before destroying history.
2474
2474
2475 This is mostly useful for saving state that is in memory and waiting
2475 This is mostly useful for saving state that is in memory and waiting
2476 to be flushed when the current lock is released. Because a call to
2476 to be flushed when the current lock is released. Because a call to
2477 destroyed is imminent, the repo will be invalidated causing those
2477 destroyed is imminent, the repo will be invalidated causing those
2478 changes to stay in memory (waiting for the next unlock), or vanish
2478 changes to stay in memory (waiting for the next unlock), or vanish
2479 completely.
2479 completely.
2480 '''
2480 '''
2481 # When using the same lock to commit and strip, the phasecache is left
2481 # When using the same lock to commit and strip, the phasecache is left
2482 # dirty after committing. Then when we strip, the repo is invalidated,
2482 # dirty after committing. Then when we strip, the repo is invalidated,
2483 # causing those changes to disappear.
2483 # causing those changes to disappear.
2484 if '_phasecache' in vars(self):
2484 if '_phasecache' in vars(self):
2485 self._phasecache.write()
2485 self._phasecache.write()
2486
2486
2487 @unfilteredmethod
2487 @unfilteredmethod
2488 def destroyed(self):
2488 def destroyed(self):
2489 '''Inform the repository that nodes have been destroyed.
2489 '''Inform the repository that nodes have been destroyed.
2490 Intended for use by strip and rollback, so there's a common
2490 Intended for use by strip and rollback, so there's a common
2491 place for anything that has to be done after destroying history.
2491 place for anything that has to be done after destroying history.
2492 '''
2492 '''
2493 # When one tries to:
2493 # When one tries to:
2494 # 1) destroy nodes thus calling this method (e.g. strip)
2494 # 1) destroy nodes thus calling this method (e.g. strip)
2495 # 2) use phasecache somewhere (e.g. commit)
2495 # 2) use phasecache somewhere (e.g. commit)
2496 #
2496 #
2497 # then 2) will fail because the phasecache contains nodes that were
2497 # then 2) will fail because the phasecache contains nodes that were
2498 # removed. We can either remove phasecache from the filecache,
2498 # removed. We can either remove phasecache from the filecache,
2499 # causing it to reload next time it is accessed, or simply filter
2499 # causing it to reload next time it is accessed, or simply filter
2500 # the removed nodes now and write the updated cache.
2500 # the removed nodes now and write the updated cache.
2501 self._phasecache.filterunknown(self)
2501 self._phasecache.filterunknown(self)
2502 self._phasecache.write()
2502 self._phasecache.write()
2503
2503
2504 # refresh all repository caches
2504 # refresh all repository caches
2505 self.updatecaches()
2505 self.updatecaches()
2506
2506
2507 # Ensure the persistent tag cache is updated. Doing it now
2507 # Ensure the persistent tag cache is updated. Doing it now
2508 # means that the tag cache only has to worry about destroyed
2508 # means that the tag cache only has to worry about destroyed
2509 # heads immediately after a strip/rollback. That in turn
2509 # heads immediately after a strip/rollback. That in turn
2510 # guarantees that "cachetip == currenttip" (comparing both rev
2510 # guarantees that "cachetip == currenttip" (comparing both rev
2511 # and node) always means no nodes have been added or destroyed.
2511 # and node) always means no nodes have been added or destroyed.
2512
2512
2513 # XXX this is suboptimal when qrefresh'ing: we strip the current
2513 # XXX this is suboptimal when qrefresh'ing: we strip the current
2514 # head, refresh the tag cache, then immediately add a new head.
2514 # head, refresh the tag cache, then immediately add a new head.
2515 # But I think doing it this way is necessary for the "instant
2515 # But I think doing it this way is necessary for the "instant
2516 # tag cache retrieval" case to work.
2516 # tag cache retrieval" case to work.
2517 self.invalidate()
2517 self.invalidate()
2518
2518
2519 def status(self, node1='.', node2=None, match=None,
2519 def status(self, node1='.', node2=None, match=None,
2520 ignored=False, clean=False, unknown=False,
2520 ignored=False, clean=False, unknown=False,
2521 listsubrepos=False):
2521 listsubrepos=False):
2522 '''a convenience method that calls node1.status(node2)'''
2522 '''a convenience method that calls node1.status(node2)'''
2523 return self[node1].status(node2, match, ignored, clean, unknown,
2523 return self[node1].status(node2, match, ignored, clean, unknown,
2524 listsubrepos)
2524 listsubrepos)
2525
2525
2526 def addpostdsstatus(self, ps):
2526 def addpostdsstatus(self, ps):
2527 """Add a callback to run within the wlock, at the point at which status
2527 """Add a callback to run within the wlock, at the point at which status
2528 fixups happen.
2528 fixups happen.
2529
2529
2530 On status completion, callback(wctx, status) will be called with the
2530 On status completion, callback(wctx, status) will be called with the
2531 wlock held, unless the dirstate has changed from underneath or the wlock
2531 wlock held, unless the dirstate has changed from underneath or the wlock
2532 couldn't be grabbed.
2532 couldn't be grabbed.
2533
2533
2534 Callbacks should not capture and use a cached copy of the dirstate --
2534 Callbacks should not capture and use a cached copy of the dirstate --
2535 it might change in the meanwhile. Instead, they should access the
2535 it might change in the meanwhile. Instead, they should access the
2536 dirstate via wctx.repo().dirstate.
2536 dirstate via wctx.repo().dirstate.
2537
2537
2538 This list is emptied out after each status run -- extensions should
2538 This list is emptied out after each status run -- extensions should
2539 make sure it adds to this list each time dirstate.status is called.
2539 make sure it adds to this list each time dirstate.status is called.
2540 Extensions should also make sure they don't call this for statuses
2540 Extensions should also make sure they don't call this for statuses
2541 that don't involve the dirstate.
2541 that don't involve the dirstate.
2542 """
2542 """
2543
2543
2544 # The list is located here for uniqueness reasons -- it is actually
2544 # The list is located here for uniqueness reasons -- it is actually
2545 # managed by the workingctx, but that isn't unique per-repo.
2545 # managed by the workingctx, but that isn't unique per-repo.
2546 self._postdsstatus.append(ps)
2546 self._postdsstatus.append(ps)
2547
2547
2548 def postdsstatus(self):
2548 def postdsstatus(self):
2549 """Used by workingctx to get the list of post-dirstate-status hooks."""
2549 """Used by workingctx to get the list of post-dirstate-status hooks."""
2550 return self._postdsstatus
2550 return self._postdsstatus
2551
2551
2552 def clearpostdsstatus(self):
2552 def clearpostdsstatus(self):
2553 """Used by workingctx to clear post-dirstate-status hooks."""
2553 """Used by workingctx to clear post-dirstate-status hooks."""
2554 del self._postdsstatus[:]
2554 del self._postdsstatus[:]
2555
2555
2556 def heads(self, start=None):
2556 def heads(self, start=None):
2557 if start is None:
2557 if start is None:
2558 cl = self.changelog
2558 cl = self.changelog
2559 headrevs = reversed(cl.headrevs())
2559 headrevs = reversed(cl.headrevs())
2560 return [cl.node(rev) for rev in headrevs]
2560 return [cl.node(rev) for rev in headrevs]
2561
2561
2562 heads = self.changelog.heads(start)
2562 heads = self.changelog.heads(start)
2563 # sort the output in rev descending order
2563 # sort the output in rev descending order
2564 return sorted(heads, key=self.changelog.rev, reverse=True)
2564 return sorted(heads, key=self.changelog.rev, reverse=True)
2565
2565
2566 def branchheads(self, branch=None, start=None, closed=False):
2566 def branchheads(self, branch=None, start=None, closed=False):
2567 '''return a (possibly filtered) list of heads for the given branch
2567 '''return a (possibly filtered) list of heads for the given branch
2568
2568
2569 Heads are returned in topological order, from newest to oldest.
2569 Heads are returned in topological order, from newest to oldest.
2570 If branch is None, use the dirstate branch.
2570 If branch is None, use the dirstate branch.
2571 If start is not None, return only heads reachable from start.
2571 If start is not None, return only heads reachable from start.
2572 If closed is True, return heads that are marked as closed as well.
2572 If closed is True, return heads that are marked as closed as well.
2573 '''
2573 '''
2574 if branch is None:
2574 if branch is None:
2575 branch = self[None].branch()
2575 branch = self[None].branch()
2576 branches = self.branchmap()
2576 branches = self.branchmap()
2577 if branch not in branches:
2577 if branch not in branches:
2578 return []
2578 return []
2579 # the cache returns heads ordered lowest to highest
2579 # the cache returns heads ordered lowest to highest
2580 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2580 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2581 if start is not None:
2581 if start is not None:
2582 # filter out the heads that cannot be reached from startrev
2582 # filter out the heads that cannot be reached from startrev
2583 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2583 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2584 bheads = [h for h in bheads if h in fbheads]
2584 bheads = [h for h in bheads if h in fbheads]
2585 return bheads
2585 return bheads
2586
2586
2587 def branches(self, nodes):
2587 def branches(self, nodes):
2588 if not nodes:
2588 if not nodes:
2589 nodes = [self.changelog.tip()]
2589 nodes = [self.changelog.tip()]
2590 b = []
2590 b = []
2591 for n in nodes:
2591 for n in nodes:
2592 t = n
2592 t = n
2593 while True:
2593 while True:
2594 p = self.changelog.parents(n)
2594 p = self.changelog.parents(n)
2595 if p[1] != nullid or p[0] == nullid:
2595 if p[1] != nullid or p[0] == nullid:
2596 b.append((t, n, p[0], p[1]))
2596 b.append((t, n, p[0], p[1]))
2597 break
2597 break
2598 n = p[0]
2598 n = p[0]
2599 return b
2599 return b
2600
2600
2601 def between(self, pairs):
2601 def between(self, pairs):
2602 r = []
2602 r = []
2603
2603
2604 for top, bottom in pairs:
2604 for top, bottom in pairs:
2605 n, l, i = top, [], 0
2605 n, l, i = top, [], 0
2606 f = 1
2606 f = 1
2607
2607
2608 while n != bottom and n != nullid:
2608 while n != bottom and n != nullid:
2609 p = self.changelog.parents(n)[0]
2609 p = self.changelog.parents(n)[0]
2610 if i == f:
2610 if i == f:
2611 l.append(n)
2611 l.append(n)
2612 f = f * 2
2612 f = f * 2
2613 n = p
2613 n = p
2614 i += 1
2614 i += 1
2615
2615
2616 r.append(l)
2616 r.append(l)
2617
2617
2618 return r
2618 return r
2619
2619
2620 def checkpush(self, pushop):
2620 def checkpush(self, pushop):
2621 """Extensions can override this function if additional checks have
2621 """Extensions can override this function if additional checks have
2622 to be performed before pushing, or call it if they override push
2622 to be performed before pushing, or call it if they override push
2623 command.
2623 command.
2624 """
2624 """
2625
2625
2626 @unfilteredpropertycache
2626 @unfilteredpropertycache
2627 def prepushoutgoinghooks(self):
2627 def prepushoutgoinghooks(self):
2628 """Return util.hooks consists of a pushop with repo, remote, outgoing
2628 """Return util.hooks consists of a pushop with repo, remote, outgoing
2629 methods, which are called before pushing changesets.
2629 methods, which are called before pushing changesets.
2630 """
2630 """
2631 return util.hooks()
2631 return util.hooks()
2632
2632
2633 def pushkey(self, namespace, key, old, new):
2633 def pushkey(self, namespace, key, old, new):
2634 try:
2634 try:
2635 tr = self.currenttransaction()
2635 tr = self.currenttransaction()
2636 hookargs = {}
2636 hookargs = {}
2637 if tr is not None:
2637 if tr is not None:
2638 hookargs.update(tr.hookargs)
2638 hookargs.update(tr.hookargs)
2639 hookargs = pycompat.strkwargs(hookargs)
2639 hookargs = pycompat.strkwargs(hookargs)
2640 hookargs[r'namespace'] = namespace
2640 hookargs[r'namespace'] = namespace
2641 hookargs[r'key'] = key
2641 hookargs[r'key'] = key
2642 hookargs[r'old'] = old
2642 hookargs[r'old'] = old
2643 hookargs[r'new'] = new
2643 hookargs[r'new'] = new
2644 self.hook('prepushkey', throw=True, **hookargs)
2644 self.hook('prepushkey', throw=True, **hookargs)
2645 except error.HookAbort as exc:
2645 except error.HookAbort as exc:
2646 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2646 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2647 if exc.hint:
2647 if exc.hint:
2648 self.ui.write_err(_("(%s)\n") % exc.hint)
2648 self.ui.write_err(_("(%s)\n") % exc.hint)
2649 return False
2649 return False
2650 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2650 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2651 ret = pushkey.push(self, namespace, key, old, new)
2651 ret = pushkey.push(self, namespace, key, old, new)
2652 def runhook():
2652 def runhook():
2653 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2653 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2654 ret=ret)
2654 ret=ret)
2655 self._afterlock(runhook)
2655 self._afterlock(runhook)
2656 return ret
2656 return ret
2657
2657
2658 def listkeys(self, namespace):
2658 def listkeys(self, namespace):
2659 self.hook('prelistkeys', throw=True, namespace=namespace)
2659 self.hook('prelistkeys', throw=True, namespace=namespace)
2660 self.ui.debug('listing keys for "%s"\n' % namespace)
2660 self.ui.debug('listing keys for "%s"\n' % namespace)
2661 values = pushkey.list(self, namespace)
2661 values = pushkey.list(self, namespace)
2662 self.hook('listkeys', namespace=namespace, values=values)
2662 self.hook('listkeys', namespace=namespace, values=values)
2663 return values
2663 return values
2664
2664
2665 def debugwireargs(self, one, two, three=None, four=None, five=None):
2665 def debugwireargs(self, one, two, three=None, four=None, five=None):
2666 '''used to test argument passing over the wire'''
2666 '''used to test argument passing over the wire'''
2667 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2667 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2668 pycompat.bytestr(four),
2668 pycompat.bytestr(four),
2669 pycompat.bytestr(five))
2669 pycompat.bytestr(five))
2670
2670
2671 def savecommitmessage(self, text):
2671 def savecommitmessage(self, text):
2672 fp = self.vfs('last-message.txt', 'wb')
2672 fp = self.vfs('last-message.txt', 'wb')
2673 try:
2673 try:
2674 fp.write(text)
2674 fp.write(text)
2675 finally:
2675 finally:
2676 fp.close()
2676 fp.close()
2677 return self.pathto(fp.name[len(self.root) + 1:])
2677 return self.pathto(fp.name[len(self.root) + 1:])
2678
2678
2679 # used to avoid circular references so destructors work
2679 # used to avoid circular references so destructors work
2680 def aftertrans(files):
2680 def aftertrans(files):
2681 renamefiles = [tuple(t) for t in files]
2681 renamefiles = [tuple(t) for t in files]
2682 def a():
2682 def a():
2683 for vfs, src, dest in renamefiles:
2683 for vfs, src, dest in renamefiles:
2684 # if src and dest refer to a same file, vfs.rename is a no-op,
2684 # if src and dest refer to a same file, vfs.rename is a no-op,
2685 # leaving both src and dest on disk. delete dest to make sure
2685 # leaving both src and dest on disk. delete dest to make sure
2686 # the rename couldn't be such a no-op.
2686 # the rename couldn't be such a no-op.
2687 vfs.tryunlink(dest)
2687 vfs.tryunlink(dest)
2688 try:
2688 try:
2689 vfs.rename(src, dest)
2689 vfs.rename(src, dest)
2690 except OSError: # journal file does not yet exist
2690 except OSError: # journal file does not yet exist
2691 pass
2691 pass
2692 return a
2692 return a
2693
2693
2694 def undoname(fn):
2694 def undoname(fn):
2695 base, name = os.path.split(fn)
2695 base, name = os.path.split(fn)
2696 assert name.startswith('journal')
2696 assert name.startswith('journal')
2697 return os.path.join(base, name.replace('journal', 'undo', 1))
2697 return os.path.join(base, name.replace('journal', 'undo', 1))
2698
2698
2699 def instance(ui, path, create, intents=None, createopts=None):
2699 def instance(ui, path, create, intents=None, createopts=None):
2700 localpath = util.urllocalpath(path)
2700 localpath = util.urllocalpath(path)
2701 if create:
2701 if create:
2702 createrepository(ui, localpath, createopts=createopts)
2702 createrepository(ui, localpath, createopts=createopts)
2703
2703
2704 return makelocalrepository(ui, localpath, intents=intents)
2704 return makelocalrepository(ui, localpath, intents=intents)
2705
2705
2706 def islocal(path):
2706 def islocal(path):
2707 return True
2707 return True
2708
2708
2709 def newreporequirements(ui, createopts=None):
2709 def newreporequirements(ui, createopts=None):
2710 """Determine the set of requirements for a new local repository.
2710 """Determine the set of requirements for a new local repository.
2711
2711
2712 Extensions can wrap this function to specify custom requirements for
2712 Extensions can wrap this function to specify custom requirements for
2713 new repositories.
2713 new repositories.
2714 """
2714 """
2715 createopts = createopts or {}
2715 createopts = createopts or {}
2716
2716
2717 requirements = {'revlogv1'}
2717 requirements = {'revlogv1'}
2718 if ui.configbool('format', 'usestore'):
2718 if ui.configbool('format', 'usestore'):
2719 requirements.add('store')
2719 requirements.add('store')
2720 if ui.configbool('format', 'usefncache'):
2720 if ui.configbool('format', 'usefncache'):
2721 requirements.add('fncache')
2721 requirements.add('fncache')
2722 if ui.configbool('format', 'dotencode'):
2722 if ui.configbool('format', 'dotencode'):
2723 requirements.add('dotencode')
2723 requirements.add('dotencode')
2724
2724
2725 compengine = ui.config('experimental', 'format.compression')
2725 compengine = ui.config('experimental', 'format.compression')
2726 if compengine not in util.compengines:
2726 if compengine not in util.compengines:
2727 raise error.Abort(_('compression engine %s defined by '
2727 raise error.Abort(_('compression engine %s defined by '
2728 'experimental.format.compression not available') %
2728 'experimental.format.compression not available') %
2729 compengine,
2729 compengine,
2730 hint=_('run "hg debuginstall" to list available '
2730 hint=_('run "hg debuginstall" to list available '
2731 'compression engines'))
2731 'compression engines'))
2732
2732
2733 # zlib is the historical default and doesn't need an explicit requirement.
2733 # zlib is the historical default and doesn't need an explicit requirement.
2734 if compengine != 'zlib':
2734 if compengine != 'zlib':
2735 requirements.add('exp-compression-%s' % compengine)
2735 requirements.add('exp-compression-%s' % compengine)
2736
2736
2737 if scmutil.gdinitconfig(ui):
2737 if scmutil.gdinitconfig(ui):
2738 requirements.add('generaldelta')
2738 requirements.add('generaldelta')
2739 if ui.configbool('experimental', 'treemanifest'):
2739 if ui.configbool('experimental', 'treemanifest'):
2740 requirements.add('treemanifest')
2740 requirements.add('treemanifest')
2741 # experimental config: format.sparse-revlog
2741 # experimental config: format.sparse-revlog
2742 if ui.configbool('format', 'sparse-revlog'):
2742 if ui.configbool('format', 'sparse-revlog'):
2743 requirements.add(SPARSEREVLOG_REQUIREMENT)
2743 requirements.add(SPARSEREVLOG_REQUIREMENT)
2744
2744
2745 revlogv2 = ui.config('experimental', 'revlogv2')
2745 revlogv2 = ui.config('experimental', 'revlogv2')
2746 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2746 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2747 requirements.remove('revlogv1')
2747 requirements.remove('revlogv1')
2748 # generaldelta is implied by revlogv2.
2748 # generaldelta is implied by revlogv2.
2749 requirements.discard('generaldelta')
2749 requirements.discard('generaldelta')
2750 requirements.add(REVLOGV2_REQUIREMENT)
2750 requirements.add(REVLOGV2_REQUIREMENT)
2751 # experimental config: format.internal-phase
2751 # experimental config: format.internal-phase
2752 if ui.configbool('format', 'internal-phase'):
2752 if ui.configbool('format', 'internal-phase'):
2753 requirements.add('internal-phase')
2753 requirements.add('internal-phase')
2754
2754
2755 if createopts.get('narrowfiles'):
2755 if createopts.get('narrowfiles'):
2756 requirements.add(repository.NARROW_REQUIREMENT)
2756 requirements.add(repository.NARROW_REQUIREMENT)
2757
2757
2758 return requirements
2758 return requirements
2759
2759
2760 def filterknowncreateopts(ui, createopts):
2760 def filterknowncreateopts(ui, createopts):
2761 """Filters a dict of repo creation options against options that are known.
2761 """Filters a dict of repo creation options against options that are known.
2762
2762
2763 Receives a dict of repo creation options and returns a dict of those
2763 Receives a dict of repo creation options and returns a dict of those
2764 options that we don't know how to handle.
2764 options that we don't know how to handle.
2765
2765
2766 This function is called as part of repository creation. If the
2766 This function is called as part of repository creation. If the
2767 returned dict contains any items, repository creation will not
2767 returned dict contains any items, repository creation will not
2768 be allowed, as it means there was a request to create a repository
2768 be allowed, as it means there was a request to create a repository
2769 with options not recognized by loaded code.
2769 with options not recognized by loaded code.
2770
2770
2771 Extensions can wrap this function to filter out creation options
2771 Extensions can wrap this function to filter out creation options
2772 they know how to handle.
2772 they know how to handle.
2773 """
2773 """
2774 known = {'narrowfiles'}
2774 known = {'narrowfiles'}
2775
2775
2776 return {k: v for k, v in createopts.items() if k not in known}
2776 return {k: v for k, v in createopts.items() if k not in known}
2777
2777
2778 def createrepository(ui, path, createopts=None):
2778 def createrepository(ui, path, createopts=None):
2779 """Create a new repository in a vfs.
2779 """Create a new repository in a vfs.
2780
2780
2781 ``path`` path to the new repo's working directory.
2781 ``path`` path to the new repo's working directory.
2782 ``createopts`` options for the new repository.
2782 ``createopts`` options for the new repository.
2783 """
2783 """
2784 createopts = createopts or {}
2784 createopts = createopts or {}
2785
2785
2786 unknownopts = filterknowncreateopts(ui, createopts)
2786 unknownopts = filterknowncreateopts(ui, createopts)
2787
2787
2788 if not isinstance(unknownopts, dict):
2788 if not isinstance(unknownopts, dict):
2789 raise error.ProgrammingError('filterknowncreateopts() did not return '
2789 raise error.ProgrammingError('filterknowncreateopts() did not return '
2790 'a dict')
2790 'a dict')
2791
2791
2792 if unknownopts:
2792 if unknownopts:
2793 raise error.Abort(_('unable to create repository because of unknown '
2793 raise error.Abort(_('unable to create repository because of unknown '
2794 'creation option: %s') %
2794 'creation option: %s') %
2795 ', '.sorted(unknownopts),
2795 ', '.join(sorted(unknownopts)),
2796 hint=_('is a required extension not loaded?'))
2796 hint=_('is a required extension not loaded?'))
2797
2797
2798 requirements = newreporequirements(ui, createopts=createopts)
2798 requirements = newreporequirements(ui, createopts=createopts)
2799
2799
2800 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2800 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2801 if not wdirvfs.exists():
2801 if not wdirvfs.exists():
2802 wdirvfs.makedirs()
2802 wdirvfs.makedirs()
2803
2803
2804 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2804 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2805 if hgvfs.exists():
2805 if hgvfs.exists():
2806 raise error.RepoError(_('repository %s already exists') % path)
2806 raise error.RepoError(_('repository %s already exists') % path)
2807
2807
2808 hgvfs.makedir(notindexed=True)
2808 hgvfs.makedir(notindexed=True)
2809
2809
2810 if b'store' in requirements:
2810 if b'store' in requirements:
2811 hgvfs.mkdir(b'store')
2811 hgvfs.mkdir(b'store')
2812
2812
2813 # We create an invalid changelog outside the store so very old
2813 # We create an invalid changelog outside the store so very old
2814 # Mercurial versions (which didn't know about the requirements
2814 # Mercurial versions (which didn't know about the requirements
2815 # file) encounter an error on reading the changelog. This
2815 # file) encounter an error on reading the changelog. This
2816 # effectively locks out old clients and prevents them from
2816 # effectively locks out old clients and prevents them from
2817 # mucking with a repo in an unknown format.
2817 # mucking with a repo in an unknown format.
2818 #
2818 #
2819 # The revlog header has version 2, which won't be recognized by
2819 # The revlog header has version 2, which won't be recognized by
2820 # such old clients.
2820 # such old clients.
2821 hgvfs.append(b'00changelog.i',
2821 hgvfs.append(b'00changelog.i',
2822 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2822 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2823 b'layout')
2823 b'layout')
2824
2824
2825 scmutil.writerequires(hgvfs, requirements)
2825 scmutil.writerequires(hgvfs, requirements)
2826
2826
2827 def poisonrepository(repo):
2827 def poisonrepository(repo):
2828 """Poison a repository instance so it can no longer be used."""
2828 """Poison a repository instance so it can no longer be used."""
2829 # Perform any cleanup on the instance.
2829 # Perform any cleanup on the instance.
2830 repo.close()
2830 repo.close()
2831
2831
2832 # Our strategy is to replace the type of the object with one that
2832 # Our strategy is to replace the type of the object with one that
2833 # has all attribute lookups result in error.
2833 # has all attribute lookups result in error.
2834 #
2834 #
2835 # But we have to allow the close() method because some constructors
2835 # But we have to allow the close() method because some constructors
2836 # of repos call close() on repo references.
2836 # of repos call close() on repo references.
2837 class poisonedrepository(object):
2837 class poisonedrepository(object):
2838 def __getattribute__(self, item):
2838 def __getattribute__(self, item):
2839 if item == r'close':
2839 if item == r'close':
2840 return object.__getattribute__(self, item)
2840 return object.__getattribute__(self, item)
2841
2841
2842 raise error.ProgrammingError('repo instances should not be used '
2842 raise error.ProgrammingError('repo instances should not be used '
2843 'after unshare')
2843 'after unshare')
2844
2844
2845 def close(self):
2845 def close(self):
2846 pass
2846 pass
2847
2847
2848 # We may have a repoview, which intercepts __setattr__. So be sure
2848 # We may have a repoview, which intercepts __setattr__. So be sure
2849 # we operate at the lowest level possible.
2849 # we operate at the lowest level possible.
2850 object.__setattr__(repo, r'__class__', poisonedrepository)
2850 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now