##// END OF EJS Templates
compression: only declare revlog support for available engine...
marmoute -
r42304:b970fece default
parent child Browse files
Show More
@@ -1,3117 +1,3117 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 def isfilecached(repo, name):
125 def isfilecached(repo, name):
126 """check if a repo has already cached "name" filecache-ed property
126 """check if a repo has already cached "name" filecache-ed property
127
127
128 This returns (cachedobj-or-None, iscached) tuple.
128 This returns (cachedobj-or-None, iscached) tuple.
129 """
129 """
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
131 if not cacheentry:
131 if not cacheentry:
132 return None, False
132 return None, False
133 return cacheentry.obj, True
133 return cacheentry.obj, True
134
134
135 class unfilteredpropertycache(util.propertycache):
135 class unfilteredpropertycache(util.propertycache):
136 """propertycache that apply to unfiltered repo only"""
136 """propertycache that apply to unfiltered repo only"""
137
137
138 def __get__(self, repo, type=None):
138 def __get__(self, repo, type=None):
139 unfi = repo.unfiltered()
139 unfi = repo.unfiltered()
140 if unfi is repo:
140 if unfi is repo:
141 return super(unfilteredpropertycache, self).__get__(unfi)
141 return super(unfilteredpropertycache, self).__get__(unfi)
142 return getattr(unfi, self.name)
142 return getattr(unfi, self.name)
143
143
144 class filteredpropertycache(util.propertycache):
144 class filteredpropertycache(util.propertycache):
145 """propertycache that must take filtering in account"""
145 """propertycache that must take filtering in account"""
146
146
147 def cachevalue(self, obj, value):
147 def cachevalue(self, obj, value):
148 object.__setattr__(obj, self.name, value)
148 object.__setattr__(obj, self.name, value)
149
149
150
150
151 def hasunfilteredcache(repo, name):
151 def hasunfilteredcache(repo, name):
152 """check if a repo has an unfilteredpropertycache value for <name>"""
152 """check if a repo has an unfilteredpropertycache value for <name>"""
153 return name in vars(repo.unfiltered())
153 return name in vars(repo.unfiltered())
154
154
155 def unfilteredmethod(orig):
155 def unfilteredmethod(orig):
156 """decorate method that always need to be run on unfiltered version"""
156 """decorate method that always need to be run on unfiltered version"""
157 def wrapper(repo, *args, **kwargs):
157 def wrapper(repo, *args, **kwargs):
158 return orig(repo.unfiltered(), *args, **kwargs)
158 return orig(repo.unfiltered(), *args, **kwargs)
159 return wrapper
159 return wrapper
160
160
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
162 'unbundle'}
162 'unbundle'}
163 legacycaps = moderncaps.union({'changegroupsubset'})
163 legacycaps = moderncaps.union({'changegroupsubset'})
164
164
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
166 class localcommandexecutor(object):
166 class localcommandexecutor(object):
167 def __init__(self, peer):
167 def __init__(self, peer):
168 self._peer = peer
168 self._peer = peer
169 self._sent = False
169 self._sent = False
170 self._closed = False
170 self._closed = False
171
171
172 def __enter__(self):
172 def __enter__(self):
173 return self
173 return self
174
174
175 def __exit__(self, exctype, excvalue, exctb):
175 def __exit__(self, exctype, excvalue, exctb):
176 self.close()
176 self.close()
177
177
178 def callcommand(self, command, args):
178 def callcommand(self, command, args):
179 if self._sent:
179 if self._sent:
180 raise error.ProgrammingError('callcommand() cannot be used after '
180 raise error.ProgrammingError('callcommand() cannot be used after '
181 'sendcommands()')
181 'sendcommands()')
182
182
183 if self._closed:
183 if self._closed:
184 raise error.ProgrammingError('callcommand() cannot be used after '
184 raise error.ProgrammingError('callcommand() cannot be used after '
185 'close()')
185 'close()')
186
186
187 # We don't need to support anything fancy. Just call the named
187 # We don't need to support anything fancy. Just call the named
188 # method on the peer and return a resolved future.
188 # method on the peer and return a resolved future.
189 fn = getattr(self._peer, pycompat.sysstr(command))
189 fn = getattr(self._peer, pycompat.sysstr(command))
190
190
191 f = pycompat.futures.Future()
191 f = pycompat.futures.Future()
192
192
193 try:
193 try:
194 result = fn(**pycompat.strkwargs(args))
194 result = fn(**pycompat.strkwargs(args))
195 except Exception:
195 except Exception:
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
197 else:
197 else:
198 f.set_result(result)
198 f.set_result(result)
199
199
200 return f
200 return f
201
201
202 def sendcommands(self):
202 def sendcommands(self):
203 self._sent = True
203 self._sent = True
204
204
205 def close(self):
205 def close(self):
206 self._closed = True
206 self._closed = True
207
207
208 @interfaceutil.implementer(repository.ipeercommands)
208 @interfaceutil.implementer(repository.ipeercommands)
209 class localpeer(repository.peer):
209 class localpeer(repository.peer):
210 '''peer for a local repo; reflects only the most recent API'''
210 '''peer for a local repo; reflects only the most recent API'''
211
211
212 def __init__(self, repo, caps=None):
212 def __init__(self, repo, caps=None):
213 super(localpeer, self).__init__()
213 super(localpeer, self).__init__()
214
214
215 if caps is None:
215 if caps is None:
216 caps = moderncaps.copy()
216 caps = moderncaps.copy()
217 self._repo = repo.filtered('served')
217 self._repo = repo.filtered('served')
218 self.ui = repo.ui
218 self.ui = repo.ui
219 self._caps = repo._restrictcapabilities(caps)
219 self._caps = repo._restrictcapabilities(caps)
220
220
221 # Begin of _basepeer interface.
221 # Begin of _basepeer interface.
222
222
223 def url(self):
223 def url(self):
224 return self._repo.url()
224 return self._repo.url()
225
225
226 def local(self):
226 def local(self):
227 return self._repo
227 return self._repo
228
228
229 def peer(self):
229 def peer(self):
230 return self
230 return self
231
231
232 def canpush(self):
232 def canpush(self):
233 return True
233 return True
234
234
235 def close(self):
235 def close(self):
236 self._repo.close()
236 self._repo.close()
237
237
238 # End of _basepeer interface.
238 # End of _basepeer interface.
239
239
240 # Begin of _basewirecommands interface.
240 # Begin of _basewirecommands interface.
241
241
242 def branchmap(self):
242 def branchmap(self):
243 return self._repo.branchmap()
243 return self._repo.branchmap()
244
244
245 def capabilities(self):
245 def capabilities(self):
246 return self._caps
246 return self._caps
247
247
248 def clonebundles(self):
248 def clonebundles(self):
249 return self._repo.tryread('clonebundles.manifest')
249 return self._repo.tryread('clonebundles.manifest')
250
250
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
252 """Used to test argument passing over the wire"""
252 """Used to test argument passing over the wire"""
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
254 pycompat.bytestr(four),
254 pycompat.bytestr(four),
255 pycompat.bytestr(five))
255 pycompat.bytestr(five))
256
256
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
258 **kwargs):
258 **kwargs):
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
260 common=common, bundlecaps=bundlecaps,
260 common=common, bundlecaps=bundlecaps,
261 **kwargs)[1]
261 **kwargs)[1]
262 cb = util.chunkbuffer(chunks)
262 cb = util.chunkbuffer(chunks)
263
263
264 if exchange.bundle2requested(bundlecaps):
264 if exchange.bundle2requested(bundlecaps):
265 # When requesting a bundle2, getbundle returns a stream to make the
265 # When requesting a bundle2, getbundle returns a stream to make the
266 # wire level function happier. We need to build a proper object
266 # wire level function happier. We need to build a proper object
267 # from it in local peer.
267 # from it in local peer.
268 return bundle2.getunbundler(self.ui, cb)
268 return bundle2.getunbundler(self.ui, cb)
269 else:
269 else:
270 return changegroup.getunbundler('01', cb, None)
270 return changegroup.getunbundler('01', cb, None)
271
271
272 def heads(self):
272 def heads(self):
273 return self._repo.heads()
273 return self._repo.heads()
274
274
275 def known(self, nodes):
275 def known(self, nodes):
276 return self._repo.known(nodes)
276 return self._repo.known(nodes)
277
277
278 def listkeys(self, namespace):
278 def listkeys(self, namespace):
279 return self._repo.listkeys(namespace)
279 return self._repo.listkeys(namespace)
280
280
281 def lookup(self, key):
281 def lookup(self, key):
282 return self._repo.lookup(key)
282 return self._repo.lookup(key)
283
283
284 def pushkey(self, namespace, key, old, new):
284 def pushkey(self, namespace, key, old, new):
285 return self._repo.pushkey(namespace, key, old, new)
285 return self._repo.pushkey(namespace, key, old, new)
286
286
287 def stream_out(self):
287 def stream_out(self):
288 raise error.Abort(_('cannot perform stream clone against local '
288 raise error.Abort(_('cannot perform stream clone against local '
289 'peer'))
289 'peer'))
290
290
291 def unbundle(self, bundle, heads, url):
291 def unbundle(self, bundle, heads, url):
292 """apply a bundle on a repo
292 """apply a bundle on a repo
293
293
294 This function handles the repo locking itself."""
294 This function handles the repo locking itself."""
295 try:
295 try:
296 try:
296 try:
297 bundle = exchange.readbundle(self.ui, bundle, None)
297 bundle = exchange.readbundle(self.ui, bundle, None)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
299 if util.safehasattr(ret, 'getchunks'):
299 if util.safehasattr(ret, 'getchunks'):
300 # This is a bundle20 object, turn it into an unbundler.
300 # This is a bundle20 object, turn it into an unbundler.
301 # This little dance should be dropped eventually when the
301 # This little dance should be dropped eventually when the
302 # API is finally improved.
302 # API is finally improved.
303 stream = util.chunkbuffer(ret.getchunks())
303 stream = util.chunkbuffer(ret.getchunks())
304 ret = bundle2.getunbundler(self.ui, stream)
304 ret = bundle2.getunbundler(self.ui, stream)
305 return ret
305 return ret
306 except Exception as exc:
306 except Exception as exc:
307 # If the exception contains output salvaged from a bundle2
307 # If the exception contains output salvaged from a bundle2
308 # reply, we need to make sure it is printed before continuing
308 # reply, we need to make sure it is printed before continuing
309 # to fail. So we build a bundle2 with such output and consume
309 # to fail. So we build a bundle2 with such output and consume
310 # it directly.
310 # it directly.
311 #
311 #
312 # This is not very elegant but allows a "simple" solution for
312 # This is not very elegant but allows a "simple" solution for
313 # issue4594
313 # issue4594
314 output = getattr(exc, '_bundle2salvagedoutput', ())
314 output = getattr(exc, '_bundle2salvagedoutput', ())
315 if output:
315 if output:
316 bundler = bundle2.bundle20(self._repo.ui)
316 bundler = bundle2.bundle20(self._repo.ui)
317 for out in output:
317 for out in output:
318 bundler.addpart(out)
318 bundler.addpart(out)
319 stream = util.chunkbuffer(bundler.getchunks())
319 stream = util.chunkbuffer(bundler.getchunks())
320 b = bundle2.getunbundler(self.ui, stream)
320 b = bundle2.getunbundler(self.ui, stream)
321 bundle2.processbundle(self._repo, b)
321 bundle2.processbundle(self._repo, b)
322 raise
322 raise
323 except error.PushRaced as exc:
323 except error.PushRaced as exc:
324 raise error.ResponseError(_('push failed:'),
324 raise error.ResponseError(_('push failed:'),
325 stringutil.forcebytestr(exc))
325 stringutil.forcebytestr(exc))
326
326
327 # End of _basewirecommands interface.
327 # End of _basewirecommands interface.
328
328
329 # Begin of peer interface.
329 # Begin of peer interface.
330
330
331 def commandexecutor(self):
331 def commandexecutor(self):
332 return localcommandexecutor(self)
332 return localcommandexecutor(self)
333
333
334 # End of peer interface.
334 # End of peer interface.
335
335
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
337 class locallegacypeer(localpeer):
337 class locallegacypeer(localpeer):
338 '''peer extension which implements legacy methods too; used for tests with
338 '''peer extension which implements legacy methods too; used for tests with
339 restricted capabilities'''
339 restricted capabilities'''
340
340
341 def __init__(self, repo):
341 def __init__(self, repo):
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
343
343
344 # Begin of baselegacywirecommands interface.
344 # Begin of baselegacywirecommands interface.
345
345
346 def between(self, pairs):
346 def between(self, pairs):
347 return self._repo.between(pairs)
347 return self._repo.between(pairs)
348
348
349 def branches(self, nodes):
349 def branches(self, nodes):
350 return self._repo.branches(nodes)
350 return self._repo.branches(nodes)
351
351
352 def changegroup(self, nodes, source):
352 def changegroup(self, nodes, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
354 missingheads=self._repo.heads())
354 missingheads=self._repo.heads())
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 def changegroupsubset(self, bases, heads, source):
357 def changegroupsubset(self, bases, heads, source):
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
359 missingheads=heads)
359 missingheads=heads)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
361
361
362 # End of baselegacywirecommands interface.
362 # End of baselegacywirecommands interface.
363
363
364 # Increment the sub-version when the revlog v2 format changes to lock out old
364 # Increment the sub-version when the revlog v2 format changes to lock out old
365 # clients.
365 # clients.
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
367
367
368 # A repository with the sparserevlog feature will have delta chains that
368 # A repository with the sparserevlog feature will have delta chains that
369 # can spread over a larger span. Sparse reading cuts these large spans into
369 # can spread over a larger span. Sparse reading cuts these large spans into
370 # pieces, so that each piece isn't too big.
370 # pieces, so that each piece isn't too big.
371 # Without the sparserevlog capability, reading from the repository could use
371 # Without the sparserevlog capability, reading from the repository could use
372 # huge amounts of memory, because the whole span would be read at once,
372 # huge amounts of memory, because the whole span would be read at once,
373 # including all the intermediate revisions that aren't pertinent for the chain.
373 # including all the intermediate revisions that aren't pertinent for the chain.
374 # This is why once a repository has enabled sparse-read, it becomes required.
374 # This is why once a repository has enabled sparse-read, it becomes required.
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
376
376
377 # Functions receiving (ui, features) that extensions can register to impact
377 # Functions receiving (ui, features) that extensions can register to impact
378 # the ability to load repositories with custom requirements. Only
378 # the ability to load repositories with custom requirements. Only
379 # functions defined in loaded extensions are called.
379 # functions defined in loaded extensions are called.
380 #
380 #
381 # The function receives a set of requirement strings that the repository
381 # The function receives a set of requirement strings that the repository
382 # is capable of opening. Functions will typically add elements to the
382 # is capable of opening. Functions will typically add elements to the
383 # set to reflect that the extension knows how to handle that requirements.
383 # set to reflect that the extension knows how to handle that requirements.
384 featuresetupfuncs = set()
384 featuresetupfuncs = set()
385
385
386 def makelocalrepository(baseui, path, intents=None):
386 def makelocalrepository(baseui, path, intents=None):
387 """Create a local repository object.
387 """Create a local repository object.
388
388
389 Given arguments needed to construct a local repository, this function
389 Given arguments needed to construct a local repository, this function
390 performs various early repository loading functionality (such as
390 performs various early repository loading functionality (such as
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
392 the repository can be opened, derives a type suitable for representing
392 the repository can be opened, derives a type suitable for representing
393 that repository, and returns an instance of it.
393 that repository, and returns an instance of it.
394
394
395 The returned object conforms to the ``repository.completelocalrepository``
395 The returned object conforms to the ``repository.completelocalrepository``
396 interface.
396 interface.
397
397
398 The repository type is derived by calling a series of factory functions
398 The repository type is derived by calling a series of factory functions
399 for each aspect/interface of the final repository. These are defined by
399 for each aspect/interface of the final repository. These are defined by
400 ``REPO_INTERFACES``.
400 ``REPO_INTERFACES``.
401
401
402 Each factory function is called to produce a type implementing a specific
402 Each factory function is called to produce a type implementing a specific
403 interface. The cumulative list of returned types will be combined into a
403 interface. The cumulative list of returned types will be combined into a
404 new type and that type will be instantiated to represent the local
404 new type and that type will be instantiated to represent the local
405 repository.
405 repository.
406
406
407 The factory functions each receive various state that may be consulted
407 The factory functions each receive various state that may be consulted
408 as part of deriving a type.
408 as part of deriving a type.
409
409
410 Extensions should wrap these factory functions to customize repository type
410 Extensions should wrap these factory functions to customize repository type
411 creation. Note that an extension's wrapped function may be called even if
411 creation. Note that an extension's wrapped function may be called even if
412 that extension is not loaded for the repo being constructed. Extensions
412 that extension is not loaded for the repo being constructed. Extensions
413 should check if their ``__name__`` appears in the
413 should check if their ``__name__`` appears in the
414 ``extensionmodulenames`` set passed to the factory function and no-op if
414 ``extensionmodulenames`` set passed to the factory function and no-op if
415 not.
415 not.
416 """
416 """
417 ui = baseui.copy()
417 ui = baseui.copy()
418 # Prevent copying repo configuration.
418 # Prevent copying repo configuration.
419 ui.copy = baseui.copy
419 ui.copy = baseui.copy
420
420
421 # Working directory VFS rooted at repository root.
421 # Working directory VFS rooted at repository root.
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
423
423
424 # Main VFS for .hg/ directory.
424 # Main VFS for .hg/ directory.
425 hgpath = wdirvfs.join(b'.hg')
425 hgpath = wdirvfs.join(b'.hg')
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
427
427
428 # The .hg/ path should exist and should be a directory. All other
428 # The .hg/ path should exist and should be a directory. All other
429 # cases are errors.
429 # cases are errors.
430 if not hgvfs.isdir():
430 if not hgvfs.isdir():
431 try:
431 try:
432 hgvfs.stat()
432 hgvfs.stat()
433 except OSError as e:
433 except OSError as e:
434 if e.errno != errno.ENOENT:
434 if e.errno != errno.ENOENT:
435 raise
435 raise
436
436
437 raise error.RepoError(_(b'repository %s not found') % path)
437 raise error.RepoError(_(b'repository %s not found') % path)
438
438
439 # .hg/requires file contains a newline-delimited list of
439 # .hg/requires file contains a newline-delimited list of
440 # features/capabilities the opener (us) must have in order to use
440 # features/capabilities the opener (us) must have in order to use
441 # the repository. This file was introduced in Mercurial 0.9.2,
441 # the repository. This file was introduced in Mercurial 0.9.2,
442 # which means very old repositories may not have one. We assume
442 # which means very old repositories may not have one. We assume
443 # a missing file translates to no requirements.
443 # a missing file translates to no requirements.
444 try:
444 try:
445 requirements = set(hgvfs.read(b'requires').splitlines())
445 requirements = set(hgvfs.read(b'requires').splitlines())
446 except IOError as e:
446 except IOError as e:
447 if e.errno != errno.ENOENT:
447 if e.errno != errno.ENOENT:
448 raise
448 raise
449 requirements = set()
449 requirements = set()
450
450
451 # The .hg/hgrc file may load extensions or contain config options
451 # The .hg/hgrc file may load extensions or contain config options
452 # that influence repository construction. Attempt to load it and
452 # that influence repository construction. Attempt to load it and
453 # process any new extensions that it may have pulled in.
453 # process any new extensions that it may have pulled in.
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
456 extensions.loadall(ui)
456 extensions.loadall(ui)
457 extensions.populateui(ui)
457 extensions.populateui(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511 wcachepath = hgvfs.join(b'wcache')
511 wcachepath = hgvfs.join(b'wcache')
512
512
513
513
514 # The store has changed over time and the exact layout is dictated by
514 # The store has changed over time and the exact layout is dictated by
515 # requirements. The store interface abstracts differences across all
515 # requirements. The store interface abstracts differences across all
516 # of them.
516 # of them.
517 store = makestore(requirements, storebasepath,
517 store = makestore(requirements, storebasepath,
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
519 hgvfs.createmode = store.createmode
519 hgvfs.createmode = store.createmode
520
520
521 storevfs = store.vfs
521 storevfs = store.vfs
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
523
523
524 # The cache vfs is used to manage cache files.
524 # The cache vfs is used to manage cache files.
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
526 cachevfs.createmode = store.createmode
526 cachevfs.createmode = store.createmode
527 # The cache vfs is used to manage cache files related to the working copy
527 # The cache vfs is used to manage cache files related to the working copy
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
529 wcachevfs.createmode = store.createmode
529 wcachevfs.createmode = store.createmode
530
530
531 # Now resolve the type for the repository object. We do this by repeatedly
531 # Now resolve the type for the repository object. We do this by repeatedly
532 # calling a factory function to produces types for specific aspects of the
532 # calling a factory function to produces types for specific aspects of the
533 # repo's operation. The aggregate returned types are used as base classes
533 # repo's operation. The aggregate returned types are used as base classes
534 # for a dynamically-derived type, which will represent our new repository.
534 # for a dynamically-derived type, which will represent our new repository.
535
535
536 bases = []
536 bases = []
537 extrastate = {}
537 extrastate = {}
538
538
539 for iface, fn in REPO_INTERFACES:
539 for iface, fn in REPO_INTERFACES:
540 # We pass all potentially useful state to give extensions tons of
540 # We pass all potentially useful state to give extensions tons of
541 # flexibility.
541 # flexibility.
542 typ = fn()(ui=ui,
542 typ = fn()(ui=ui,
543 intents=intents,
543 intents=intents,
544 requirements=requirements,
544 requirements=requirements,
545 features=features,
545 features=features,
546 wdirvfs=wdirvfs,
546 wdirvfs=wdirvfs,
547 hgvfs=hgvfs,
547 hgvfs=hgvfs,
548 store=store,
548 store=store,
549 storevfs=storevfs,
549 storevfs=storevfs,
550 storeoptions=storevfs.options,
550 storeoptions=storevfs.options,
551 cachevfs=cachevfs,
551 cachevfs=cachevfs,
552 wcachevfs=wcachevfs,
552 wcachevfs=wcachevfs,
553 extensionmodulenames=extensionmodulenames,
553 extensionmodulenames=extensionmodulenames,
554 extrastate=extrastate,
554 extrastate=extrastate,
555 baseclasses=bases)
555 baseclasses=bases)
556
556
557 if not isinstance(typ, type):
557 if not isinstance(typ, type):
558 raise error.ProgrammingError('unable to construct type for %s' %
558 raise error.ProgrammingError('unable to construct type for %s' %
559 iface)
559 iface)
560
560
561 bases.append(typ)
561 bases.append(typ)
562
562
563 # type() allows you to use characters in type names that wouldn't be
563 # type() allows you to use characters in type names that wouldn't be
564 # recognized as Python symbols in source code. We abuse that to add
564 # recognized as Python symbols in source code. We abuse that to add
565 # rich information about our constructed repo.
565 # rich information about our constructed repo.
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
567 wdirvfs.base,
567 wdirvfs.base,
568 b','.join(sorted(requirements))))
568 b','.join(sorted(requirements))))
569
569
570 cls = type(name, tuple(bases), {})
570 cls = type(name, tuple(bases), {})
571
571
572 return cls(
572 return cls(
573 baseui=baseui,
573 baseui=baseui,
574 ui=ui,
574 ui=ui,
575 origroot=path,
575 origroot=path,
576 wdirvfs=wdirvfs,
576 wdirvfs=wdirvfs,
577 hgvfs=hgvfs,
577 hgvfs=hgvfs,
578 requirements=requirements,
578 requirements=requirements,
579 supportedrequirements=supportedrequirements,
579 supportedrequirements=supportedrequirements,
580 sharedpath=storebasepath,
580 sharedpath=storebasepath,
581 store=store,
581 store=store,
582 cachevfs=cachevfs,
582 cachevfs=cachevfs,
583 wcachevfs=wcachevfs,
583 wcachevfs=wcachevfs,
584 features=features,
584 features=features,
585 intents=intents)
585 intents=intents)
586
586
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
588 """Load hgrc files/content into a ui instance.
588 """Load hgrc files/content into a ui instance.
589
589
590 This is called during repository opening to load any additional
590 This is called during repository opening to load any additional
591 config files or settings relevant to the current repository.
591 config files or settings relevant to the current repository.
592
592
593 Returns a bool indicating whether any additional configs were loaded.
593 Returns a bool indicating whether any additional configs were loaded.
594
594
595 Extensions should monkeypatch this function to modify how per-repo
595 Extensions should monkeypatch this function to modify how per-repo
596 configs are loaded. For example, an extension may wish to pull in
596 configs are loaded. For example, an extension may wish to pull in
597 configs from alternate files or sources.
597 configs from alternate files or sources.
598 """
598 """
599 try:
599 try:
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
601 return True
601 return True
602 except IOError:
602 except IOError:
603 return False
603 return False
604
604
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
606 """Perform additional actions after .hg/hgrc is loaded.
606 """Perform additional actions after .hg/hgrc is loaded.
607
607
608 This function is called during repository loading immediately after
608 This function is called during repository loading immediately after
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
610
610
611 The function can be used to validate configs, automatically add
611 The function can be used to validate configs, automatically add
612 options (including extensions) based on requirements, etc.
612 options (including extensions) based on requirements, etc.
613 """
613 """
614
614
615 # Map of requirements to list of extensions to load automatically when
615 # Map of requirements to list of extensions to load automatically when
616 # requirement is present.
616 # requirement is present.
617 autoextensions = {
617 autoextensions = {
618 b'largefiles': [b'largefiles'],
618 b'largefiles': [b'largefiles'],
619 b'lfs': [b'lfs'],
619 b'lfs': [b'lfs'],
620 }
620 }
621
621
622 for requirement, names in sorted(autoextensions.items()):
622 for requirement, names in sorted(autoextensions.items()):
623 if requirement not in requirements:
623 if requirement not in requirements:
624 continue
624 continue
625
625
626 for name in names:
626 for name in names:
627 if not ui.hasconfig(b'extensions', name):
627 if not ui.hasconfig(b'extensions', name):
628 ui.setconfig(b'extensions', name, b'', source='autoload')
628 ui.setconfig(b'extensions', name, b'', source='autoload')
629
629
630 def gathersupportedrequirements(ui):
630 def gathersupportedrequirements(ui):
631 """Determine the complete set of recognized requirements."""
631 """Determine the complete set of recognized requirements."""
632 # Start with all requirements supported by this file.
632 # Start with all requirements supported by this file.
633 supported = set(localrepository._basesupported)
633 supported = set(localrepository._basesupported)
634
634
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
636 # relevant to this ui instance.
636 # relevant to this ui instance.
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
638
638
639 for fn in featuresetupfuncs:
639 for fn in featuresetupfuncs:
640 if fn.__module__ in modules:
640 if fn.__module__ in modules:
641 fn(ui, supported)
641 fn(ui, supported)
642
642
643 # Add derived requirements from registered compression engines.
643 # Add derived requirements from registered compression engines.
644 for name in util.compengines:
644 for name in util.compengines:
645 engine = util.compengines[name]
645 engine = util.compengines[name]
646 if engine.revlogheader():
646 if engine.available() and engine.revlogheader():
647 supported.add(b'exp-compression-%s' % name)
647 supported.add(b'exp-compression-%s' % name)
648
648
649 return supported
649 return supported
650
650
651 def ensurerequirementsrecognized(requirements, supported):
651 def ensurerequirementsrecognized(requirements, supported):
652 """Validate that a set of local requirements is recognized.
652 """Validate that a set of local requirements is recognized.
653
653
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
655 exists any requirement in that set that currently loaded code doesn't
655 exists any requirement in that set that currently loaded code doesn't
656 recognize.
656 recognize.
657
657
658 Returns a set of supported requirements.
658 Returns a set of supported requirements.
659 """
659 """
660 missing = set()
660 missing = set()
661
661
662 for requirement in requirements:
662 for requirement in requirements:
663 if requirement in supported:
663 if requirement in supported:
664 continue
664 continue
665
665
666 if not requirement or not requirement[0:1].isalnum():
666 if not requirement or not requirement[0:1].isalnum():
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
668
668
669 missing.add(requirement)
669 missing.add(requirement)
670
670
671 if missing:
671 if missing:
672 raise error.RequirementError(
672 raise error.RequirementError(
673 _(b'repository requires features unknown to this Mercurial: %s') %
673 _(b'repository requires features unknown to this Mercurial: %s') %
674 b' '.join(sorted(missing)),
674 b' '.join(sorted(missing)),
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
676 b'for more information'))
676 b'for more information'))
677
677
678 def ensurerequirementscompatible(ui, requirements):
678 def ensurerequirementscompatible(ui, requirements):
679 """Validates that a set of recognized requirements is mutually compatible.
679 """Validates that a set of recognized requirements is mutually compatible.
680
680
681 Some requirements may not be compatible with others or require
681 Some requirements may not be compatible with others or require
682 config options that aren't enabled. This function is called during
682 config options that aren't enabled. This function is called during
683 repository opening to ensure that the set of requirements needed
683 repository opening to ensure that the set of requirements needed
684 to open a repository is sane and compatible with config options.
684 to open a repository is sane and compatible with config options.
685
685
686 Extensions can monkeypatch this function to perform additional
686 Extensions can monkeypatch this function to perform additional
687 checking.
687 checking.
688
688
689 ``error.RepoError`` should be raised on failure.
689 ``error.RepoError`` should be raised on failure.
690 """
690 """
691 if b'exp-sparse' in requirements and not sparse.enabled:
691 if b'exp-sparse' in requirements and not sparse.enabled:
692 raise error.RepoError(_(b'repository is using sparse feature but '
692 raise error.RepoError(_(b'repository is using sparse feature but '
693 b'sparse is not enabled; enable the '
693 b'sparse is not enabled; enable the '
694 b'"sparse" extensions to access'))
694 b'"sparse" extensions to access'))
695
695
696 def makestore(requirements, path, vfstype):
696 def makestore(requirements, path, vfstype):
697 """Construct a storage object for a repository."""
697 """Construct a storage object for a repository."""
698 if b'store' in requirements:
698 if b'store' in requirements:
699 if b'fncache' in requirements:
699 if b'fncache' in requirements:
700 return storemod.fncachestore(path, vfstype,
700 return storemod.fncachestore(path, vfstype,
701 b'dotencode' in requirements)
701 b'dotencode' in requirements)
702
702
703 return storemod.encodedstore(path, vfstype)
703 return storemod.encodedstore(path, vfstype)
704
704
705 return storemod.basicstore(path, vfstype)
705 return storemod.basicstore(path, vfstype)
706
706
707 def resolvestorevfsoptions(ui, requirements, features):
707 def resolvestorevfsoptions(ui, requirements, features):
708 """Resolve the options to pass to the store vfs opener.
708 """Resolve the options to pass to the store vfs opener.
709
709
710 The returned dict is used to influence behavior of the storage layer.
710 The returned dict is used to influence behavior of the storage layer.
711 """
711 """
712 options = {}
712 options = {}
713
713
714 if b'treemanifest' in requirements:
714 if b'treemanifest' in requirements:
715 options[b'treemanifest'] = True
715 options[b'treemanifest'] = True
716
716
717 # experimental config: format.manifestcachesize
717 # experimental config: format.manifestcachesize
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
719 if manifestcachesize is not None:
719 if manifestcachesize is not None:
720 options[b'manifestcachesize'] = manifestcachesize
720 options[b'manifestcachesize'] = manifestcachesize
721
721
722 # In the absence of another requirement superseding a revlog-related
722 # In the absence of another requirement superseding a revlog-related
723 # requirement, we have to assume the repo is using revlog version 0.
723 # requirement, we have to assume the repo is using revlog version 0.
724 # This revlog format is super old and we don't bother trying to parse
724 # This revlog format is super old and we don't bother trying to parse
725 # opener options for it because those options wouldn't do anything
725 # opener options for it because those options wouldn't do anything
726 # meaningful on such old repos.
726 # meaningful on such old repos.
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
729
729
730 return options
730 return options
731
731
732 def resolverevlogstorevfsoptions(ui, requirements, features):
732 def resolverevlogstorevfsoptions(ui, requirements, features):
733 """Resolve opener options specific to revlogs."""
733 """Resolve opener options specific to revlogs."""
734
734
735 options = {}
735 options = {}
736 options[b'flagprocessors'] = {}
736 options[b'flagprocessors'] = {}
737
737
738 if b'revlogv1' in requirements:
738 if b'revlogv1' in requirements:
739 options[b'revlogv1'] = True
739 options[b'revlogv1'] = True
740 if REVLOGV2_REQUIREMENT in requirements:
740 if REVLOGV2_REQUIREMENT in requirements:
741 options[b'revlogv2'] = True
741 options[b'revlogv2'] = True
742
742
743 if b'generaldelta' in requirements:
743 if b'generaldelta' in requirements:
744 options[b'generaldelta'] = True
744 options[b'generaldelta'] = True
745
745
746 # experimental config: format.chunkcachesize
746 # experimental config: format.chunkcachesize
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
748 if chunkcachesize is not None:
748 if chunkcachesize is not None:
749 options[b'chunkcachesize'] = chunkcachesize
749 options[b'chunkcachesize'] = chunkcachesize
750
750
751 deltabothparents = ui.configbool(b'storage',
751 deltabothparents = ui.configbool(b'storage',
752 b'revlog.optimize-delta-parent-choice')
752 b'revlog.optimize-delta-parent-choice')
753 options[b'deltabothparents'] = deltabothparents
753 options[b'deltabothparents'] = deltabothparents
754
754
755 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
755 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
756 lazydeltabase = False
756 lazydeltabase = False
757 if lazydelta:
757 if lazydelta:
758 lazydeltabase = ui.configbool(b'storage',
758 lazydeltabase = ui.configbool(b'storage',
759 b'revlog.reuse-external-delta-parent')
759 b'revlog.reuse-external-delta-parent')
760 if lazydeltabase is None:
760 if lazydeltabase is None:
761 lazydeltabase = not scmutil.gddeltaconfig(ui)
761 lazydeltabase = not scmutil.gddeltaconfig(ui)
762 options[b'lazydelta'] = lazydelta
762 options[b'lazydelta'] = lazydelta
763 options[b'lazydeltabase'] = lazydeltabase
763 options[b'lazydeltabase'] = lazydeltabase
764
764
765 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
765 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
766 if 0 <= chainspan:
766 if 0 <= chainspan:
767 options[b'maxdeltachainspan'] = chainspan
767 options[b'maxdeltachainspan'] = chainspan
768
768
769 mmapindexthreshold = ui.configbytes(b'experimental',
769 mmapindexthreshold = ui.configbytes(b'experimental',
770 b'mmapindexthreshold')
770 b'mmapindexthreshold')
771 if mmapindexthreshold is not None:
771 if mmapindexthreshold is not None:
772 options[b'mmapindexthreshold'] = mmapindexthreshold
772 options[b'mmapindexthreshold'] = mmapindexthreshold
773
773
774 withsparseread = ui.configbool(b'experimental', b'sparse-read')
774 withsparseread = ui.configbool(b'experimental', b'sparse-read')
775 srdensitythres = float(ui.config(b'experimental',
775 srdensitythres = float(ui.config(b'experimental',
776 b'sparse-read.density-threshold'))
776 b'sparse-read.density-threshold'))
777 srmingapsize = ui.configbytes(b'experimental',
777 srmingapsize = ui.configbytes(b'experimental',
778 b'sparse-read.min-gap-size')
778 b'sparse-read.min-gap-size')
779 options[b'with-sparse-read'] = withsparseread
779 options[b'with-sparse-read'] = withsparseread
780 options[b'sparse-read-density-threshold'] = srdensitythres
780 options[b'sparse-read-density-threshold'] = srdensitythres
781 options[b'sparse-read-min-gap-size'] = srmingapsize
781 options[b'sparse-read-min-gap-size'] = srmingapsize
782
782
783 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
783 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
784 options[b'sparse-revlog'] = sparserevlog
784 options[b'sparse-revlog'] = sparserevlog
785 if sparserevlog:
785 if sparserevlog:
786 options[b'generaldelta'] = True
786 options[b'generaldelta'] = True
787
787
788 maxchainlen = None
788 maxchainlen = None
789 if sparserevlog:
789 if sparserevlog:
790 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
790 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
791 # experimental config: format.maxchainlen
791 # experimental config: format.maxchainlen
792 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
792 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
793 if maxchainlen is not None:
793 if maxchainlen is not None:
794 options[b'maxchainlen'] = maxchainlen
794 options[b'maxchainlen'] = maxchainlen
795
795
796 for r in requirements:
796 for r in requirements:
797 if r.startswith(b'exp-compression-'):
797 if r.startswith(b'exp-compression-'):
798 options[b'compengine'] = r[len(b'exp-compression-'):]
798 options[b'compengine'] = r[len(b'exp-compression-'):]
799
799
800 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
800 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
801 if options[b'zlib.level'] is not None:
801 if options[b'zlib.level'] is not None:
802 if not (0 <= options[b'zlib.level'] <= 9):
802 if not (0 <= options[b'zlib.level'] <= 9):
803 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
803 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
804 raise error.Abort(msg % options[b'zlib.level'])
804 raise error.Abort(msg % options[b'zlib.level'])
805 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
805 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
806 if options[b'zstd.level'] is not None:
806 if options[b'zstd.level'] is not None:
807 if not (0 <= options[b'zstd.level'] <= 22):
807 if not (0 <= options[b'zstd.level'] <= 22):
808 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
808 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
809 raise error.Abort(msg % options[b'zstd.level'])
809 raise error.Abort(msg % options[b'zstd.level'])
810
810
811 if repository.NARROW_REQUIREMENT in requirements:
811 if repository.NARROW_REQUIREMENT in requirements:
812 options[b'enableellipsis'] = True
812 options[b'enableellipsis'] = True
813
813
814 return options
814 return options
815
815
816 def makemain(**kwargs):
816 def makemain(**kwargs):
817 """Produce a type conforming to ``ilocalrepositorymain``."""
817 """Produce a type conforming to ``ilocalrepositorymain``."""
818 return localrepository
818 return localrepository
819
819
820 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
820 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
821 class revlogfilestorage(object):
821 class revlogfilestorage(object):
822 """File storage when using revlogs."""
822 """File storage when using revlogs."""
823
823
824 def file(self, path):
824 def file(self, path):
825 if path[0] == b'/':
825 if path[0] == b'/':
826 path = path[1:]
826 path = path[1:]
827
827
828 return filelog.filelog(self.svfs, path)
828 return filelog.filelog(self.svfs, path)
829
829
830 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
830 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
831 class revlognarrowfilestorage(object):
831 class revlognarrowfilestorage(object):
832 """File storage when using revlogs and narrow files."""
832 """File storage when using revlogs and narrow files."""
833
833
834 def file(self, path):
834 def file(self, path):
835 if path[0] == b'/':
835 if path[0] == b'/':
836 path = path[1:]
836 path = path[1:]
837
837
838 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
838 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
839
839
840 def makefilestorage(requirements, features, **kwargs):
840 def makefilestorage(requirements, features, **kwargs):
841 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
841 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
842 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
842 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
843 features.add(repository.REPO_FEATURE_STREAM_CLONE)
843 features.add(repository.REPO_FEATURE_STREAM_CLONE)
844
844
845 if repository.NARROW_REQUIREMENT in requirements:
845 if repository.NARROW_REQUIREMENT in requirements:
846 return revlognarrowfilestorage
846 return revlognarrowfilestorage
847 else:
847 else:
848 return revlogfilestorage
848 return revlogfilestorage
849
849
850 # List of repository interfaces and factory functions for them. Each
850 # List of repository interfaces and factory functions for them. Each
851 # will be called in order during ``makelocalrepository()`` to iteratively
851 # will be called in order during ``makelocalrepository()`` to iteratively
852 # derive the final type for a local repository instance. We capture the
852 # derive the final type for a local repository instance. We capture the
853 # function as a lambda so we don't hold a reference and the module-level
853 # function as a lambda so we don't hold a reference and the module-level
854 # functions can be wrapped.
854 # functions can be wrapped.
855 REPO_INTERFACES = [
855 REPO_INTERFACES = [
856 (repository.ilocalrepositorymain, lambda: makemain),
856 (repository.ilocalrepositorymain, lambda: makemain),
857 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
857 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
858 ]
858 ]
859
859
860 @interfaceutil.implementer(repository.ilocalrepositorymain)
860 @interfaceutil.implementer(repository.ilocalrepositorymain)
861 class localrepository(object):
861 class localrepository(object):
862 """Main class for representing local repositories.
862 """Main class for representing local repositories.
863
863
864 All local repositories are instances of this class.
864 All local repositories are instances of this class.
865
865
866 Constructed on its own, instances of this class are not usable as
866 Constructed on its own, instances of this class are not usable as
867 repository objects. To obtain a usable repository object, call
867 repository objects. To obtain a usable repository object, call
868 ``hg.repository()``, ``localrepo.instance()``, or
868 ``hg.repository()``, ``localrepo.instance()``, or
869 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
869 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
870 ``instance()`` adds support for creating new repositories.
870 ``instance()`` adds support for creating new repositories.
871 ``hg.repository()`` adds more extension integration, including calling
871 ``hg.repository()`` adds more extension integration, including calling
872 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
872 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
873 used.
873 used.
874 """
874 """
875
875
876 # obsolete experimental requirements:
876 # obsolete experimental requirements:
877 # - manifestv2: An experimental new manifest format that allowed
877 # - manifestv2: An experimental new manifest format that allowed
878 # for stem compression of long paths. Experiment ended up not
878 # for stem compression of long paths. Experiment ended up not
879 # being successful (repository sizes went up due to worse delta
879 # being successful (repository sizes went up due to worse delta
880 # chains), and the code was deleted in 4.6.
880 # chains), and the code was deleted in 4.6.
881 supportedformats = {
881 supportedformats = {
882 'revlogv1',
882 'revlogv1',
883 'generaldelta',
883 'generaldelta',
884 'treemanifest',
884 'treemanifest',
885 REVLOGV2_REQUIREMENT,
885 REVLOGV2_REQUIREMENT,
886 SPARSEREVLOG_REQUIREMENT,
886 SPARSEREVLOG_REQUIREMENT,
887 }
887 }
888 _basesupported = supportedformats | {
888 _basesupported = supportedformats | {
889 'store',
889 'store',
890 'fncache',
890 'fncache',
891 'shared',
891 'shared',
892 'relshared',
892 'relshared',
893 'dotencode',
893 'dotencode',
894 'exp-sparse',
894 'exp-sparse',
895 'internal-phase'
895 'internal-phase'
896 }
896 }
897
897
898 # list of prefix for file which can be written without 'wlock'
898 # list of prefix for file which can be written without 'wlock'
899 # Extensions should extend this list when needed
899 # Extensions should extend this list when needed
900 _wlockfreeprefix = {
900 _wlockfreeprefix = {
901 # We migh consider requiring 'wlock' for the next
901 # We migh consider requiring 'wlock' for the next
902 # two, but pretty much all the existing code assume
902 # two, but pretty much all the existing code assume
903 # wlock is not needed so we keep them excluded for
903 # wlock is not needed so we keep them excluded for
904 # now.
904 # now.
905 'hgrc',
905 'hgrc',
906 'requires',
906 'requires',
907 # XXX cache is a complicatged business someone
907 # XXX cache is a complicatged business someone
908 # should investigate this in depth at some point
908 # should investigate this in depth at some point
909 'cache/',
909 'cache/',
910 # XXX shouldn't be dirstate covered by the wlock?
910 # XXX shouldn't be dirstate covered by the wlock?
911 'dirstate',
911 'dirstate',
912 # XXX bisect was still a bit too messy at the time
912 # XXX bisect was still a bit too messy at the time
913 # this changeset was introduced. Someone should fix
913 # this changeset was introduced. Someone should fix
914 # the remainig bit and drop this line
914 # the remainig bit and drop this line
915 'bisect.state',
915 'bisect.state',
916 }
916 }
917
917
918 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
918 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
919 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
919 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
920 features, intents=None):
920 features, intents=None):
921 """Create a new local repository instance.
921 """Create a new local repository instance.
922
922
923 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
923 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
924 or ``localrepo.makelocalrepository()`` for obtaining a new repository
924 or ``localrepo.makelocalrepository()`` for obtaining a new repository
925 object.
925 object.
926
926
927 Arguments:
927 Arguments:
928
928
929 baseui
929 baseui
930 ``ui.ui`` instance that ``ui`` argument was based off of.
930 ``ui.ui`` instance that ``ui`` argument was based off of.
931
931
932 ui
932 ui
933 ``ui.ui`` instance for use by the repository.
933 ``ui.ui`` instance for use by the repository.
934
934
935 origroot
935 origroot
936 ``bytes`` path to working directory root of this repository.
936 ``bytes`` path to working directory root of this repository.
937
937
938 wdirvfs
938 wdirvfs
939 ``vfs.vfs`` rooted at the working directory.
939 ``vfs.vfs`` rooted at the working directory.
940
940
941 hgvfs
941 hgvfs
942 ``vfs.vfs`` rooted at .hg/
942 ``vfs.vfs`` rooted at .hg/
943
943
944 requirements
944 requirements
945 ``set`` of bytestrings representing repository opening requirements.
945 ``set`` of bytestrings representing repository opening requirements.
946
946
947 supportedrequirements
947 supportedrequirements
948 ``set`` of bytestrings representing repository requirements that we
948 ``set`` of bytestrings representing repository requirements that we
949 know how to open. May be a supetset of ``requirements``.
949 know how to open. May be a supetset of ``requirements``.
950
950
951 sharedpath
951 sharedpath
952 ``bytes`` Defining path to storage base directory. Points to a
952 ``bytes`` Defining path to storage base directory. Points to a
953 ``.hg/`` directory somewhere.
953 ``.hg/`` directory somewhere.
954
954
955 store
955 store
956 ``store.basicstore`` (or derived) instance providing access to
956 ``store.basicstore`` (or derived) instance providing access to
957 versioned storage.
957 versioned storage.
958
958
959 cachevfs
959 cachevfs
960 ``vfs.vfs`` used for cache files.
960 ``vfs.vfs`` used for cache files.
961
961
962 wcachevfs
962 wcachevfs
963 ``vfs.vfs`` used for cache files related to the working copy.
963 ``vfs.vfs`` used for cache files related to the working copy.
964
964
965 features
965 features
966 ``set`` of bytestrings defining features/capabilities of this
966 ``set`` of bytestrings defining features/capabilities of this
967 instance.
967 instance.
968
968
969 intents
969 intents
970 ``set`` of system strings indicating what this repo will be used
970 ``set`` of system strings indicating what this repo will be used
971 for.
971 for.
972 """
972 """
973 self.baseui = baseui
973 self.baseui = baseui
974 self.ui = ui
974 self.ui = ui
975 self.origroot = origroot
975 self.origroot = origroot
976 # vfs rooted at working directory.
976 # vfs rooted at working directory.
977 self.wvfs = wdirvfs
977 self.wvfs = wdirvfs
978 self.root = wdirvfs.base
978 self.root = wdirvfs.base
979 # vfs rooted at .hg/. Used to access most non-store paths.
979 # vfs rooted at .hg/. Used to access most non-store paths.
980 self.vfs = hgvfs
980 self.vfs = hgvfs
981 self.path = hgvfs.base
981 self.path = hgvfs.base
982 self.requirements = requirements
982 self.requirements = requirements
983 self.supported = supportedrequirements
983 self.supported = supportedrequirements
984 self.sharedpath = sharedpath
984 self.sharedpath = sharedpath
985 self.store = store
985 self.store = store
986 self.cachevfs = cachevfs
986 self.cachevfs = cachevfs
987 self.wcachevfs = wcachevfs
987 self.wcachevfs = wcachevfs
988 self.features = features
988 self.features = features
989
989
990 self.filtername = None
990 self.filtername = None
991
991
992 if (self.ui.configbool('devel', 'all-warnings') or
992 if (self.ui.configbool('devel', 'all-warnings') or
993 self.ui.configbool('devel', 'check-locks')):
993 self.ui.configbool('devel', 'check-locks')):
994 self.vfs.audit = self._getvfsward(self.vfs.audit)
994 self.vfs.audit = self._getvfsward(self.vfs.audit)
995 # A list of callback to shape the phase if no data were found.
995 # A list of callback to shape the phase if no data were found.
996 # Callback are in the form: func(repo, roots) --> processed root.
996 # Callback are in the form: func(repo, roots) --> processed root.
997 # This list it to be filled by extension during repo setup
997 # This list it to be filled by extension during repo setup
998 self._phasedefaults = []
998 self._phasedefaults = []
999
999
1000 color.setup(self.ui)
1000 color.setup(self.ui)
1001
1001
1002 self.spath = self.store.path
1002 self.spath = self.store.path
1003 self.svfs = self.store.vfs
1003 self.svfs = self.store.vfs
1004 self.sjoin = self.store.join
1004 self.sjoin = self.store.join
1005 if (self.ui.configbool('devel', 'all-warnings') or
1005 if (self.ui.configbool('devel', 'all-warnings') or
1006 self.ui.configbool('devel', 'check-locks')):
1006 self.ui.configbool('devel', 'check-locks')):
1007 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1007 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1008 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1008 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1009 else: # standard vfs
1009 else: # standard vfs
1010 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1010 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1011
1011
1012 self._dirstatevalidatewarned = False
1012 self._dirstatevalidatewarned = False
1013
1013
1014 self._branchcaches = branchmap.BranchMapCache()
1014 self._branchcaches = branchmap.BranchMapCache()
1015 self._revbranchcache = None
1015 self._revbranchcache = None
1016 self._filterpats = {}
1016 self._filterpats = {}
1017 self._datafilters = {}
1017 self._datafilters = {}
1018 self._transref = self._lockref = self._wlockref = None
1018 self._transref = self._lockref = self._wlockref = None
1019
1019
1020 # A cache for various files under .hg/ that tracks file changes,
1020 # A cache for various files under .hg/ that tracks file changes,
1021 # (used by the filecache decorator)
1021 # (used by the filecache decorator)
1022 #
1022 #
1023 # Maps a property name to its util.filecacheentry
1023 # Maps a property name to its util.filecacheentry
1024 self._filecache = {}
1024 self._filecache = {}
1025
1025
1026 # hold sets of revision to be filtered
1026 # hold sets of revision to be filtered
1027 # should be cleared when something might have changed the filter value:
1027 # should be cleared when something might have changed the filter value:
1028 # - new changesets,
1028 # - new changesets,
1029 # - phase change,
1029 # - phase change,
1030 # - new obsolescence marker,
1030 # - new obsolescence marker,
1031 # - working directory parent change,
1031 # - working directory parent change,
1032 # - bookmark changes
1032 # - bookmark changes
1033 self.filteredrevcache = {}
1033 self.filteredrevcache = {}
1034
1034
1035 # post-dirstate-status hooks
1035 # post-dirstate-status hooks
1036 self._postdsstatus = []
1036 self._postdsstatus = []
1037
1037
1038 # generic mapping between names and nodes
1038 # generic mapping between names and nodes
1039 self.names = namespaces.namespaces()
1039 self.names = namespaces.namespaces()
1040
1040
1041 # Key to signature value.
1041 # Key to signature value.
1042 self._sparsesignaturecache = {}
1042 self._sparsesignaturecache = {}
1043 # Signature to cached matcher instance.
1043 # Signature to cached matcher instance.
1044 self._sparsematchercache = {}
1044 self._sparsematchercache = {}
1045
1045
1046 def _getvfsward(self, origfunc):
1046 def _getvfsward(self, origfunc):
1047 """build a ward for self.vfs"""
1047 """build a ward for self.vfs"""
1048 rref = weakref.ref(self)
1048 rref = weakref.ref(self)
1049 def checkvfs(path, mode=None):
1049 def checkvfs(path, mode=None):
1050 ret = origfunc(path, mode=mode)
1050 ret = origfunc(path, mode=mode)
1051 repo = rref()
1051 repo = rref()
1052 if (repo is None
1052 if (repo is None
1053 or not util.safehasattr(repo, '_wlockref')
1053 or not util.safehasattr(repo, '_wlockref')
1054 or not util.safehasattr(repo, '_lockref')):
1054 or not util.safehasattr(repo, '_lockref')):
1055 return
1055 return
1056 if mode in (None, 'r', 'rb'):
1056 if mode in (None, 'r', 'rb'):
1057 return
1057 return
1058 if path.startswith(repo.path):
1058 if path.startswith(repo.path):
1059 # truncate name relative to the repository (.hg)
1059 # truncate name relative to the repository (.hg)
1060 path = path[len(repo.path) + 1:]
1060 path = path[len(repo.path) + 1:]
1061 if path.startswith('cache/'):
1061 if path.startswith('cache/'):
1062 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1062 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1063 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1063 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1064 if path.startswith('journal.') or path.startswith('undo.'):
1064 if path.startswith('journal.') or path.startswith('undo.'):
1065 # journal is covered by 'lock'
1065 # journal is covered by 'lock'
1066 if repo._currentlock(repo._lockref) is None:
1066 if repo._currentlock(repo._lockref) is None:
1067 repo.ui.develwarn('write with no lock: "%s"' % path,
1067 repo.ui.develwarn('write with no lock: "%s"' % path,
1068 stacklevel=3, config='check-locks')
1068 stacklevel=3, config='check-locks')
1069 elif repo._currentlock(repo._wlockref) is None:
1069 elif repo._currentlock(repo._wlockref) is None:
1070 # rest of vfs files are covered by 'wlock'
1070 # rest of vfs files are covered by 'wlock'
1071 #
1071 #
1072 # exclude special files
1072 # exclude special files
1073 for prefix in self._wlockfreeprefix:
1073 for prefix in self._wlockfreeprefix:
1074 if path.startswith(prefix):
1074 if path.startswith(prefix):
1075 return
1075 return
1076 repo.ui.develwarn('write with no wlock: "%s"' % path,
1076 repo.ui.develwarn('write with no wlock: "%s"' % path,
1077 stacklevel=3, config='check-locks')
1077 stacklevel=3, config='check-locks')
1078 return ret
1078 return ret
1079 return checkvfs
1079 return checkvfs
1080
1080
1081 def _getsvfsward(self, origfunc):
1081 def _getsvfsward(self, origfunc):
1082 """build a ward for self.svfs"""
1082 """build a ward for self.svfs"""
1083 rref = weakref.ref(self)
1083 rref = weakref.ref(self)
1084 def checksvfs(path, mode=None):
1084 def checksvfs(path, mode=None):
1085 ret = origfunc(path, mode=mode)
1085 ret = origfunc(path, mode=mode)
1086 repo = rref()
1086 repo = rref()
1087 if repo is None or not util.safehasattr(repo, '_lockref'):
1087 if repo is None or not util.safehasattr(repo, '_lockref'):
1088 return
1088 return
1089 if mode in (None, 'r', 'rb'):
1089 if mode in (None, 'r', 'rb'):
1090 return
1090 return
1091 if path.startswith(repo.sharedpath):
1091 if path.startswith(repo.sharedpath):
1092 # truncate name relative to the repository (.hg)
1092 # truncate name relative to the repository (.hg)
1093 path = path[len(repo.sharedpath) + 1:]
1093 path = path[len(repo.sharedpath) + 1:]
1094 if repo._currentlock(repo._lockref) is None:
1094 if repo._currentlock(repo._lockref) is None:
1095 repo.ui.develwarn('write with no lock: "%s"' % path,
1095 repo.ui.develwarn('write with no lock: "%s"' % path,
1096 stacklevel=4)
1096 stacklevel=4)
1097 return ret
1097 return ret
1098 return checksvfs
1098 return checksvfs
1099
1099
1100 def close(self):
1100 def close(self):
1101 self._writecaches()
1101 self._writecaches()
1102
1102
1103 def _writecaches(self):
1103 def _writecaches(self):
1104 if self._revbranchcache:
1104 if self._revbranchcache:
1105 self._revbranchcache.write()
1105 self._revbranchcache.write()
1106
1106
1107 def _restrictcapabilities(self, caps):
1107 def _restrictcapabilities(self, caps):
1108 if self.ui.configbool('experimental', 'bundle2-advertise'):
1108 if self.ui.configbool('experimental', 'bundle2-advertise'):
1109 caps = set(caps)
1109 caps = set(caps)
1110 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1110 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1111 role='client'))
1111 role='client'))
1112 caps.add('bundle2=' + urlreq.quote(capsblob))
1112 caps.add('bundle2=' + urlreq.quote(capsblob))
1113 return caps
1113 return caps
1114
1114
1115 def _writerequirements(self):
1115 def _writerequirements(self):
1116 scmutil.writerequires(self.vfs, self.requirements)
1116 scmutil.writerequires(self.vfs, self.requirements)
1117
1117
1118 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1118 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1119 # self -> auditor -> self._checknested -> self
1119 # self -> auditor -> self._checknested -> self
1120
1120
1121 @property
1121 @property
1122 def auditor(self):
1122 def auditor(self):
1123 # This is only used by context.workingctx.match in order to
1123 # This is only used by context.workingctx.match in order to
1124 # detect files in subrepos.
1124 # detect files in subrepos.
1125 return pathutil.pathauditor(self.root, callback=self._checknested)
1125 return pathutil.pathauditor(self.root, callback=self._checknested)
1126
1126
1127 @property
1127 @property
1128 def nofsauditor(self):
1128 def nofsauditor(self):
1129 # This is only used by context.basectx.match in order to detect
1129 # This is only used by context.basectx.match in order to detect
1130 # files in subrepos.
1130 # files in subrepos.
1131 return pathutil.pathauditor(self.root, callback=self._checknested,
1131 return pathutil.pathauditor(self.root, callback=self._checknested,
1132 realfs=False, cached=True)
1132 realfs=False, cached=True)
1133
1133
1134 def _checknested(self, path):
1134 def _checknested(self, path):
1135 """Determine if path is a legal nested repository."""
1135 """Determine if path is a legal nested repository."""
1136 if not path.startswith(self.root):
1136 if not path.startswith(self.root):
1137 return False
1137 return False
1138 subpath = path[len(self.root) + 1:]
1138 subpath = path[len(self.root) + 1:]
1139 normsubpath = util.pconvert(subpath)
1139 normsubpath = util.pconvert(subpath)
1140
1140
1141 # XXX: Checking against the current working copy is wrong in
1141 # XXX: Checking against the current working copy is wrong in
1142 # the sense that it can reject things like
1142 # the sense that it can reject things like
1143 #
1143 #
1144 # $ hg cat -r 10 sub/x.txt
1144 # $ hg cat -r 10 sub/x.txt
1145 #
1145 #
1146 # if sub/ is no longer a subrepository in the working copy
1146 # if sub/ is no longer a subrepository in the working copy
1147 # parent revision.
1147 # parent revision.
1148 #
1148 #
1149 # However, it can of course also allow things that would have
1149 # However, it can of course also allow things that would have
1150 # been rejected before, such as the above cat command if sub/
1150 # been rejected before, such as the above cat command if sub/
1151 # is a subrepository now, but was a normal directory before.
1151 # is a subrepository now, but was a normal directory before.
1152 # The old path auditor would have rejected by mistake since it
1152 # The old path auditor would have rejected by mistake since it
1153 # panics when it sees sub/.hg/.
1153 # panics when it sees sub/.hg/.
1154 #
1154 #
1155 # All in all, checking against the working copy seems sensible
1155 # All in all, checking against the working copy seems sensible
1156 # since we want to prevent access to nested repositories on
1156 # since we want to prevent access to nested repositories on
1157 # the filesystem *now*.
1157 # the filesystem *now*.
1158 ctx = self[None]
1158 ctx = self[None]
1159 parts = util.splitpath(subpath)
1159 parts = util.splitpath(subpath)
1160 while parts:
1160 while parts:
1161 prefix = '/'.join(parts)
1161 prefix = '/'.join(parts)
1162 if prefix in ctx.substate:
1162 if prefix in ctx.substate:
1163 if prefix == normsubpath:
1163 if prefix == normsubpath:
1164 return True
1164 return True
1165 else:
1165 else:
1166 sub = ctx.sub(prefix)
1166 sub = ctx.sub(prefix)
1167 return sub.checknested(subpath[len(prefix) + 1:])
1167 return sub.checknested(subpath[len(prefix) + 1:])
1168 else:
1168 else:
1169 parts.pop()
1169 parts.pop()
1170 return False
1170 return False
1171
1171
1172 def peer(self):
1172 def peer(self):
1173 return localpeer(self) # not cached to avoid reference cycle
1173 return localpeer(self) # not cached to avoid reference cycle
1174
1174
1175 def unfiltered(self):
1175 def unfiltered(self):
1176 """Return unfiltered version of the repository
1176 """Return unfiltered version of the repository
1177
1177
1178 Intended to be overwritten by filtered repo."""
1178 Intended to be overwritten by filtered repo."""
1179 return self
1179 return self
1180
1180
1181 def filtered(self, name, visibilityexceptions=None):
1181 def filtered(self, name, visibilityexceptions=None):
1182 """Return a filtered version of a repository
1182 """Return a filtered version of a repository
1183
1183
1184 The `name` parameter is the identifier of the requested view. This
1184 The `name` parameter is the identifier of the requested view. This
1185 will return a repoview object set "exactly" to the specified view.
1185 will return a repoview object set "exactly" to the specified view.
1186
1186
1187 This function does not apply recursive filtering to a repository. For
1187 This function does not apply recursive filtering to a repository. For
1188 example calling `repo.filtered("served")` will return a repoview using
1188 example calling `repo.filtered("served")` will return a repoview using
1189 the "served" view, regardless of the initial view used by `repo`.
1189 the "served" view, regardless of the initial view used by `repo`.
1190
1190
1191 In other word, there is always only one level of `repoview` "filtering".
1191 In other word, there is always only one level of `repoview` "filtering".
1192 """
1192 """
1193 cls = repoview.newtype(self.unfiltered().__class__)
1193 cls = repoview.newtype(self.unfiltered().__class__)
1194 return cls(self, name, visibilityexceptions)
1194 return cls(self, name, visibilityexceptions)
1195
1195
1196 @repofilecache('bookmarks', 'bookmarks.current')
1196 @repofilecache('bookmarks', 'bookmarks.current')
1197 def _bookmarks(self):
1197 def _bookmarks(self):
1198 return bookmarks.bmstore(self)
1198 return bookmarks.bmstore(self)
1199
1199
1200 @property
1200 @property
1201 def _activebookmark(self):
1201 def _activebookmark(self):
1202 return self._bookmarks.active
1202 return self._bookmarks.active
1203
1203
1204 # _phasesets depend on changelog. what we need is to call
1204 # _phasesets depend on changelog. what we need is to call
1205 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1205 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1206 # can't be easily expressed in filecache mechanism.
1206 # can't be easily expressed in filecache mechanism.
1207 @storecache('phaseroots', '00changelog.i')
1207 @storecache('phaseroots', '00changelog.i')
1208 def _phasecache(self):
1208 def _phasecache(self):
1209 return phases.phasecache(self, self._phasedefaults)
1209 return phases.phasecache(self, self._phasedefaults)
1210
1210
1211 @storecache('obsstore')
1211 @storecache('obsstore')
1212 def obsstore(self):
1212 def obsstore(self):
1213 return obsolete.makestore(self.ui, self)
1213 return obsolete.makestore(self.ui, self)
1214
1214
1215 @storecache('00changelog.i')
1215 @storecache('00changelog.i')
1216 def changelog(self):
1216 def changelog(self):
1217 return changelog.changelog(self.svfs,
1217 return changelog.changelog(self.svfs,
1218 trypending=txnutil.mayhavepending(self.root))
1218 trypending=txnutil.mayhavepending(self.root))
1219
1219
1220 @storecache('00manifest.i')
1220 @storecache('00manifest.i')
1221 def manifestlog(self):
1221 def manifestlog(self):
1222 rootstore = manifest.manifestrevlog(self.svfs)
1222 rootstore = manifest.manifestrevlog(self.svfs)
1223 return manifest.manifestlog(self.svfs, self, rootstore,
1223 return manifest.manifestlog(self.svfs, self, rootstore,
1224 self._storenarrowmatch)
1224 self._storenarrowmatch)
1225
1225
1226 @repofilecache('dirstate')
1226 @repofilecache('dirstate')
1227 def dirstate(self):
1227 def dirstate(self):
1228 return self._makedirstate()
1228 return self._makedirstate()
1229
1229
1230 def _makedirstate(self):
1230 def _makedirstate(self):
1231 """Extension point for wrapping the dirstate per-repo."""
1231 """Extension point for wrapping the dirstate per-repo."""
1232 sparsematchfn = lambda: sparse.matcher(self)
1232 sparsematchfn = lambda: sparse.matcher(self)
1233
1233
1234 return dirstate.dirstate(self.vfs, self.ui, self.root,
1234 return dirstate.dirstate(self.vfs, self.ui, self.root,
1235 self._dirstatevalidate, sparsematchfn)
1235 self._dirstatevalidate, sparsematchfn)
1236
1236
1237 def _dirstatevalidate(self, node):
1237 def _dirstatevalidate(self, node):
1238 try:
1238 try:
1239 self.changelog.rev(node)
1239 self.changelog.rev(node)
1240 return node
1240 return node
1241 except error.LookupError:
1241 except error.LookupError:
1242 if not self._dirstatevalidatewarned:
1242 if not self._dirstatevalidatewarned:
1243 self._dirstatevalidatewarned = True
1243 self._dirstatevalidatewarned = True
1244 self.ui.warn(_("warning: ignoring unknown"
1244 self.ui.warn(_("warning: ignoring unknown"
1245 " working parent %s!\n") % short(node))
1245 " working parent %s!\n") % short(node))
1246 return nullid
1246 return nullid
1247
1247
1248 @storecache(narrowspec.FILENAME)
1248 @storecache(narrowspec.FILENAME)
1249 def narrowpats(self):
1249 def narrowpats(self):
1250 """matcher patterns for this repository's narrowspec
1250 """matcher patterns for this repository's narrowspec
1251
1251
1252 A tuple of (includes, excludes).
1252 A tuple of (includes, excludes).
1253 """
1253 """
1254 return narrowspec.load(self)
1254 return narrowspec.load(self)
1255
1255
1256 @storecache(narrowspec.FILENAME)
1256 @storecache(narrowspec.FILENAME)
1257 def _storenarrowmatch(self):
1257 def _storenarrowmatch(self):
1258 if repository.NARROW_REQUIREMENT not in self.requirements:
1258 if repository.NARROW_REQUIREMENT not in self.requirements:
1259 return matchmod.always()
1259 return matchmod.always()
1260 include, exclude = self.narrowpats
1260 include, exclude = self.narrowpats
1261 return narrowspec.match(self.root, include=include, exclude=exclude)
1261 return narrowspec.match(self.root, include=include, exclude=exclude)
1262
1262
1263 @storecache(narrowspec.FILENAME)
1263 @storecache(narrowspec.FILENAME)
1264 def _narrowmatch(self):
1264 def _narrowmatch(self):
1265 if repository.NARROW_REQUIREMENT not in self.requirements:
1265 if repository.NARROW_REQUIREMENT not in self.requirements:
1266 return matchmod.always()
1266 return matchmod.always()
1267 narrowspec.checkworkingcopynarrowspec(self)
1267 narrowspec.checkworkingcopynarrowspec(self)
1268 include, exclude = self.narrowpats
1268 include, exclude = self.narrowpats
1269 return narrowspec.match(self.root, include=include, exclude=exclude)
1269 return narrowspec.match(self.root, include=include, exclude=exclude)
1270
1270
1271 def narrowmatch(self, match=None, includeexact=False):
1271 def narrowmatch(self, match=None, includeexact=False):
1272 """matcher corresponding the the repo's narrowspec
1272 """matcher corresponding the the repo's narrowspec
1273
1273
1274 If `match` is given, then that will be intersected with the narrow
1274 If `match` is given, then that will be intersected with the narrow
1275 matcher.
1275 matcher.
1276
1276
1277 If `includeexact` is True, then any exact matches from `match` will
1277 If `includeexact` is True, then any exact matches from `match` will
1278 be included even if they're outside the narrowspec.
1278 be included even if they're outside the narrowspec.
1279 """
1279 """
1280 if match:
1280 if match:
1281 if includeexact and not self._narrowmatch.always():
1281 if includeexact and not self._narrowmatch.always():
1282 # do not exclude explicitly-specified paths so that they can
1282 # do not exclude explicitly-specified paths so that they can
1283 # be warned later on
1283 # be warned later on
1284 em = matchmod.exact(match.files())
1284 em = matchmod.exact(match.files())
1285 nm = matchmod.unionmatcher([self._narrowmatch, em])
1285 nm = matchmod.unionmatcher([self._narrowmatch, em])
1286 return matchmod.intersectmatchers(match, nm)
1286 return matchmod.intersectmatchers(match, nm)
1287 return matchmod.intersectmatchers(match, self._narrowmatch)
1287 return matchmod.intersectmatchers(match, self._narrowmatch)
1288 return self._narrowmatch
1288 return self._narrowmatch
1289
1289
1290 def setnarrowpats(self, newincludes, newexcludes):
1290 def setnarrowpats(self, newincludes, newexcludes):
1291 narrowspec.save(self, newincludes, newexcludes)
1291 narrowspec.save(self, newincludes, newexcludes)
1292 self.invalidate(clearfilecache=True)
1292 self.invalidate(clearfilecache=True)
1293
1293
1294 def __getitem__(self, changeid):
1294 def __getitem__(self, changeid):
1295 if changeid is None:
1295 if changeid is None:
1296 return context.workingctx(self)
1296 return context.workingctx(self)
1297 if isinstance(changeid, context.basectx):
1297 if isinstance(changeid, context.basectx):
1298 return changeid
1298 return changeid
1299 if isinstance(changeid, slice):
1299 if isinstance(changeid, slice):
1300 # wdirrev isn't contiguous so the slice shouldn't include it
1300 # wdirrev isn't contiguous so the slice shouldn't include it
1301 return [self[i]
1301 return [self[i]
1302 for i in pycompat.xrange(*changeid.indices(len(self)))
1302 for i in pycompat.xrange(*changeid.indices(len(self)))
1303 if i not in self.changelog.filteredrevs]
1303 if i not in self.changelog.filteredrevs]
1304 try:
1304 try:
1305 if isinstance(changeid, int):
1305 if isinstance(changeid, int):
1306 node = self.changelog.node(changeid)
1306 node = self.changelog.node(changeid)
1307 rev = changeid
1307 rev = changeid
1308 elif changeid == 'null':
1308 elif changeid == 'null':
1309 node = nullid
1309 node = nullid
1310 rev = nullrev
1310 rev = nullrev
1311 elif changeid == 'tip':
1311 elif changeid == 'tip':
1312 node = self.changelog.tip()
1312 node = self.changelog.tip()
1313 rev = self.changelog.rev(node)
1313 rev = self.changelog.rev(node)
1314 elif changeid == '.':
1314 elif changeid == '.':
1315 # this is a hack to delay/avoid loading obsmarkers
1315 # this is a hack to delay/avoid loading obsmarkers
1316 # when we know that '.' won't be hidden
1316 # when we know that '.' won't be hidden
1317 node = self.dirstate.p1()
1317 node = self.dirstate.p1()
1318 rev = self.unfiltered().changelog.rev(node)
1318 rev = self.unfiltered().changelog.rev(node)
1319 elif len(changeid) == 20:
1319 elif len(changeid) == 20:
1320 try:
1320 try:
1321 node = changeid
1321 node = changeid
1322 rev = self.changelog.rev(changeid)
1322 rev = self.changelog.rev(changeid)
1323 except error.FilteredLookupError:
1323 except error.FilteredLookupError:
1324 changeid = hex(changeid) # for the error message
1324 changeid = hex(changeid) # for the error message
1325 raise
1325 raise
1326 except LookupError:
1326 except LookupError:
1327 # check if it might have come from damaged dirstate
1327 # check if it might have come from damaged dirstate
1328 #
1328 #
1329 # XXX we could avoid the unfiltered if we had a recognizable
1329 # XXX we could avoid the unfiltered if we had a recognizable
1330 # exception for filtered changeset access
1330 # exception for filtered changeset access
1331 if (self.local()
1331 if (self.local()
1332 and changeid in self.unfiltered().dirstate.parents()):
1332 and changeid in self.unfiltered().dirstate.parents()):
1333 msg = _("working directory has unknown parent '%s'!")
1333 msg = _("working directory has unknown parent '%s'!")
1334 raise error.Abort(msg % short(changeid))
1334 raise error.Abort(msg % short(changeid))
1335 changeid = hex(changeid) # for the error message
1335 changeid = hex(changeid) # for the error message
1336 raise
1336 raise
1337
1337
1338 elif len(changeid) == 40:
1338 elif len(changeid) == 40:
1339 node = bin(changeid)
1339 node = bin(changeid)
1340 rev = self.changelog.rev(node)
1340 rev = self.changelog.rev(node)
1341 else:
1341 else:
1342 raise error.ProgrammingError(
1342 raise error.ProgrammingError(
1343 "unsupported changeid '%s' of type %s" %
1343 "unsupported changeid '%s' of type %s" %
1344 (changeid, type(changeid)))
1344 (changeid, type(changeid)))
1345
1345
1346 return context.changectx(self, rev, node)
1346 return context.changectx(self, rev, node)
1347
1347
1348 except (error.FilteredIndexError, error.FilteredLookupError):
1348 except (error.FilteredIndexError, error.FilteredLookupError):
1349 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1349 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1350 % pycompat.bytestr(changeid))
1350 % pycompat.bytestr(changeid))
1351 except (IndexError, LookupError):
1351 except (IndexError, LookupError):
1352 raise error.RepoLookupError(
1352 raise error.RepoLookupError(
1353 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1353 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1354 except error.WdirUnsupported:
1354 except error.WdirUnsupported:
1355 return context.workingctx(self)
1355 return context.workingctx(self)
1356
1356
1357 def __contains__(self, changeid):
1357 def __contains__(self, changeid):
1358 """True if the given changeid exists
1358 """True if the given changeid exists
1359
1359
1360 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1360 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1361 specified.
1361 specified.
1362 """
1362 """
1363 try:
1363 try:
1364 self[changeid]
1364 self[changeid]
1365 return True
1365 return True
1366 except error.RepoLookupError:
1366 except error.RepoLookupError:
1367 return False
1367 return False
1368
1368
1369 def __nonzero__(self):
1369 def __nonzero__(self):
1370 return True
1370 return True
1371
1371
1372 __bool__ = __nonzero__
1372 __bool__ = __nonzero__
1373
1373
1374 def __len__(self):
1374 def __len__(self):
1375 # no need to pay the cost of repoview.changelog
1375 # no need to pay the cost of repoview.changelog
1376 unfi = self.unfiltered()
1376 unfi = self.unfiltered()
1377 return len(unfi.changelog)
1377 return len(unfi.changelog)
1378
1378
1379 def __iter__(self):
1379 def __iter__(self):
1380 return iter(self.changelog)
1380 return iter(self.changelog)
1381
1381
1382 def revs(self, expr, *args):
1382 def revs(self, expr, *args):
1383 '''Find revisions matching a revset.
1383 '''Find revisions matching a revset.
1384
1384
1385 The revset is specified as a string ``expr`` that may contain
1385 The revset is specified as a string ``expr`` that may contain
1386 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1386 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1387
1387
1388 Revset aliases from the configuration are not expanded. To expand
1388 Revset aliases from the configuration are not expanded. To expand
1389 user aliases, consider calling ``scmutil.revrange()`` or
1389 user aliases, consider calling ``scmutil.revrange()`` or
1390 ``repo.anyrevs([expr], user=True)``.
1390 ``repo.anyrevs([expr], user=True)``.
1391
1391
1392 Returns a revset.abstractsmartset, which is a list-like interface
1392 Returns a revset.abstractsmartset, which is a list-like interface
1393 that contains integer revisions.
1393 that contains integer revisions.
1394 '''
1394 '''
1395 tree = revsetlang.spectree(expr, *args)
1395 tree = revsetlang.spectree(expr, *args)
1396 return revset.makematcher(tree)(self)
1396 return revset.makematcher(tree)(self)
1397
1397
1398 def set(self, expr, *args):
1398 def set(self, expr, *args):
1399 '''Find revisions matching a revset and emit changectx instances.
1399 '''Find revisions matching a revset and emit changectx instances.
1400
1400
1401 This is a convenience wrapper around ``revs()`` that iterates the
1401 This is a convenience wrapper around ``revs()`` that iterates the
1402 result and is a generator of changectx instances.
1402 result and is a generator of changectx instances.
1403
1403
1404 Revset aliases from the configuration are not expanded. To expand
1404 Revset aliases from the configuration are not expanded. To expand
1405 user aliases, consider calling ``scmutil.revrange()``.
1405 user aliases, consider calling ``scmutil.revrange()``.
1406 '''
1406 '''
1407 for r in self.revs(expr, *args):
1407 for r in self.revs(expr, *args):
1408 yield self[r]
1408 yield self[r]
1409
1409
1410 def anyrevs(self, specs, user=False, localalias=None):
1410 def anyrevs(self, specs, user=False, localalias=None):
1411 '''Find revisions matching one of the given revsets.
1411 '''Find revisions matching one of the given revsets.
1412
1412
1413 Revset aliases from the configuration are not expanded by default. To
1413 Revset aliases from the configuration are not expanded by default. To
1414 expand user aliases, specify ``user=True``. To provide some local
1414 expand user aliases, specify ``user=True``. To provide some local
1415 definitions overriding user aliases, set ``localalias`` to
1415 definitions overriding user aliases, set ``localalias`` to
1416 ``{name: definitionstring}``.
1416 ``{name: definitionstring}``.
1417 '''
1417 '''
1418 if user:
1418 if user:
1419 m = revset.matchany(self.ui, specs,
1419 m = revset.matchany(self.ui, specs,
1420 lookup=revset.lookupfn(self),
1420 lookup=revset.lookupfn(self),
1421 localalias=localalias)
1421 localalias=localalias)
1422 else:
1422 else:
1423 m = revset.matchany(None, specs, localalias=localalias)
1423 m = revset.matchany(None, specs, localalias=localalias)
1424 return m(self)
1424 return m(self)
1425
1425
1426 def url(self):
1426 def url(self):
1427 return 'file:' + self.root
1427 return 'file:' + self.root
1428
1428
1429 def hook(self, name, throw=False, **args):
1429 def hook(self, name, throw=False, **args):
1430 """Call a hook, passing this repo instance.
1430 """Call a hook, passing this repo instance.
1431
1431
1432 This a convenience method to aid invoking hooks. Extensions likely
1432 This a convenience method to aid invoking hooks. Extensions likely
1433 won't call this unless they have registered a custom hook or are
1433 won't call this unless they have registered a custom hook or are
1434 replacing code that is expected to call a hook.
1434 replacing code that is expected to call a hook.
1435 """
1435 """
1436 return hook.hook(self.ui, self, name, throw, **args)
1436 return hook.hook(self.ui, self, name, throw, **args)
1437
1437
1438 @filteredpropertycache
1438 @filteredpropertycache
1439 def _tagscache(self):
1439 def _tagscache(self):
1440 '''Returns a tagscache object that contains various tags related
1440 '''Returns a tagscache object that contains various tags related
1441 caches.'''
1441 caches.'''
1442
1442
1443 # This simplifies its cache management by having one decorated
1443 # This simplifies its cache management by having one decorated
1444 # function (this one) and the rest simply fetch things from it.
1444 # function (this one) and the rest simply fetch things from it.
1445 class tagscache(object):
1445 class tagscache(object):
1446 def __init__(self):
1446 def __init__(self):
1447 # These two define the set of tags for this repository. tags
1447 # These two define the set of tags for this repository. tags
1448 # maps tag name to node; tagtypes maps tag name to 'global' or
1448 # maps tag name to node; tagtypes maps tag name to 'global' or
1449 # 'local'. (Global tags are defined by .hgtags across all
1449 # 'local'. (Global tags are defined by .hgtags across all
1450 # heads, and local tags are defined in .hg/localtags.)
1450 # heads, and local tags are defined in .hg/localtags.)
1451 # They constitute the in-memory cache of tags.
1451 # They constitute the in-memory cache of tags.
1452 self.tags = self.tagtypes = None
1452 self.tags = self.tagtypes = None
1453
1453
1454 self.nodetagscache = self.tagslist = None
1454 self.nodetagscache = self.tagslist = None
1455
1455
1456 cache = tagscache()
1456 cache = tagscache()
1457 cache.tags, cache.tagtypes = self._findtags()
1457 cache.tags, cache.tagtypes = self._findtags()
1458
1458
1459 return cache
1459 return cache
1460
1460
1461 def tags(self):
1461 def tags(self):
1462 '''return a mapping of tag to node'''
1462 '''return a mapping of tag to node'''
1463 t = {}
1463 t = {}
1464 if self.changelog.filteredrevs:
1464 if self.changelog.filteredrevs:
1465 tags, tt = self._findtags()
1465 tags, tt = self._findtags()
1466 else:
1466 else:
1467 tags = self._tagscache.tags
1467 tags = self._tagscache.tags
1468 rev = self.changelog.rev
1468 rev = self.changelog.rev
1469 for k, v in tags.iteritems():
1469 for k, v in tags.iteritems():
1470 try:
1470 try:
1471 # ignore tags to unknown nodes
1471 # ignore tags to unknown nodes
1472 rev(v)
1472 rev(v)
1473 t[k] = v
1473 t[k] = v
1474 except (error.LookupError, ValueError):
1474 except (error.LookupError, ValueError):
1475 pass
1475 pass
1476 return t
1476 return t
1477
1477
1478 def _findtags(self):
1478 def _findtags(self):
1479 '''Do the hard work of finding tags. Return a pair of dicts
1479 '''Do the hard work of finding tags. Return a pair of dicts
1480 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1480 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1481 maps tag name to a string like \'global\' or \'local\'.
1481 maps tag name to a string like \'global\' or \'local\'.
1482 Subclasses or extensions are free to add their own tags, but
1482 Subclasses or extensions are free to add their own tags, but
1483 should be aware that the returned dicts will be retained for the
1483 should be aware that the returned dicts will be retained for the
1484 duration of the localrepo object.'''
1484 duration of the localrepo object.'''
1485
1485
1486 # XXX what tagtype should subclasses/extensions use? Currently
1486 # XXX what tagtype should subclasses/extensions use? Currently
1487 # mq and bookmarks add tags, but do not set the tagtype at all.
1487 # mq and bookmarks add tags, but do not set the tagtype at all.
1488 # Should each extension invent its own tag type? Should there
1488 # Should each extension invent its own tag type? Should there
1489 # be one tagtype for all such "virtual" tags? Or is the status
1489 # be one tagtype for all such "virtual" tags? Or is the status
1490 # quo fine?
1490 # quo fine?
1491
1491
1492
1492
1493 # map tag name to (node, hist)
1493 # map tag name to (node, hist)
1494 alltags = tagsmod.findglobaltags(self.ui, self)
1494 alltags = tagsmod.findglobaltags(self.ui, self)
1495 # map tag name to tag type
1495 # map tag name to tag type
1496 tagtypes = dict((tag, 'global') for tag in alltags)
1496 tagtypes = dict((tag, 'global') for tag in alltags)
1497
1497
1498 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1498 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1499
1499
1500 # Build the return dicts. Have to re-encode tag names because
1500 # Build the return dicts. Have to re-encode tag names because
1501 # the tags module always uses UTF-8 (in order not to lose info
1501 # the tags module always uses UTF-8 (in order not to lose info
1502 # writing to the cache), but the rest of Mercurial wants them in
1502 # writing to the cache), but the rest of Mercurial wants them in
1503 # local encoding.
1503 # local encoding.
1504 tags = {}
1504 tags = {}
1505 for (name, (node, hist)) in alltags.iteritems():
1505 for (name, (node, hist)) in alltags.iteritems():
1506 if node != nullid:
1506 if node != nullid:
1507 tags[encoding.tolocal(name)] = node
1507 tags[encoding.tolocal(name)] = node
1508 tags['tip'] = self.changelog.tip()
1508 tags['tip'] = self.changelog.tip()
1509 tagtypes = dict([(encoding.tolocal(name), value)
1509 tagtypes = dict([(encoding.tolocal(name), value)
1510 for (name, value) in tagtypes.iteritems()])
1510 for (name, value) in tagtypes.iteritems()])
1511 return (tags, tagtypes)
1511 return (tags, tagtypes)
1512
1512
1513 def tagtype(self, tagname):
1513 def tagtype(self, tagname):
1514 '''
1514 '''
1515 return the type of the given tag. result can be:
1515 return the type of the given tag. result can be:
1516
1516
1517 'local' : a local tag
1517 'local' : a local tag
1518 'global' : a global tag
1518 'global' : a global tag
1519 None : tag does not exist
1519 None : tag does not exist
1520 '''
1520 '''
1521
1521
1522 return self._tagscache.tagtypes.get(tagname)
1522 return self._tagscache.tagtypes.get(tagname)
1523
1523
1524 def tagslist(self):
1524 def tagslist(self):
1525 '''return a list of tags ordered by revision'''
1525 '''return a list of tags ordered by revision'''
1526 if not self._tagscache.tagslist:
1526 if not self._tagscache.tagslist:
1527 l = []
1527 l = []
1528 for t, n in self.tags().iteritems():
1528 for t, n in self.tags().iteritems():
1529 l.append((self.changelog.rev(n), t, n))
1529 l.append((self.changelog.rev(n), t, n))
1530 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1530 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1531
1531
1532 return self._tagscache.tagslist
1532 return self._tagscache.tagslist
1533
1533
1534 def nodetags(self, node):
1534 def nodetags(self, node):
1535 '''return the tags associated with a node'''
1535 '''return the tags associated with a node'''
1536 if not self._tagscache.nodetagscache:
1536 if not self._tagscache.nodetagscache:
1537 nodetagscache = {}
1537 nodetagscache = {}
1538 for t, n in self._tagscache.tags.iteritems():
1538 for t, n in self._tagscache.tags.iteritems():
1539 nodetagscache.setdefault(n, []).append(t)
1539 nodetagscache.setdefault(n, []).append(t)
1540 for tags in nodetagscache.itervalues():
1540 for tags in nodetagscache.itervalues():
1541 tags.sort()
1541 tags.sort()
1542 self._tagscache.nodetagscache = nodetagscache
1542 self._tagscache.nodetagscache = nodetagscache
1543 return self._tagscache.nodetagscache.get(node, [])
1543 return self._tagscache.nodetagscache.get(node, [])
1544
1544
1545 def nodebookmarks(self, node):
1545 def nodebookmarks(self, node):
1546 """return the list of bookmarks pointing to the specified node"""
1546 """return the list of bookmarks pointing to the specified node"""
1547 return self._bookmarks.names(node)
1547 return self._bookmarks.names(node)
1548
1548
1549 def branchmap(self):
1549 def branchmap(self):
1550 '''returns a dictionary {branch: [branchheads]} with branchheads
1550 '''returns a dictionary {branch: [branchheads]} with branchheads
1551 ordered by increasing revision number'''
1551 ordered by increasing revision number'''
1552 return self._branchcaches[self]
1552 return self._branchcaches[self]
1553
1553
1554 @unfilteredmethod
1554 @unfilteredmethod
1555 def revbranchcache(self):
1555 def revbranchcache(self):
1556 if not self._revbranchcache:
1556 if not self._revbranchcache:
1557 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1557 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1558 return self._revbranchcache
1558 return self._revbranchcache
1559
1559
1560 def branchtip(self, branch, ignoremissing=False):
1560 def branchtip(self, branch, ignoremissing=False):
1561 '''return the tip node for a given branch
1561 '''return the tip node for a given branch
1562
1562
1563 If ignoremissing is True, then this method will not raise an error.
1563 If ignoremissing is True, then this method will not raise an error.
1564 This is helpful for callers that only expect None for a missing branch
1564 This is helpful for callers that only expect None for a missing branch
1565 (e.g. namespace).
1565 (e.g. namespace).
1566
1566
1567 '''
1567 '''
1568 try:
1568 try:
1569 return self.branchmap().branchtip(branch)
1569 return self.branchmap().branchtip(branch)
1570 except KeyError:
1570 except KeyError:
1571 if not ignoremissing:
1571 if not ignoremissing:
1572 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1572 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1573 else:
1573 else:
1574 pass
1574 pass
1575
1575
1576 def lookup(self, key):
1576 def lookup(self, key):
1577 node = scmutil.revsymbol(self, key).node()
1577 node = scmutil.revsymbol(self, key).node()
1578 if node is None:
1578 if node is None:
1579 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1579 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1580 return node
1580 return node
1581
1581
1582 def lookupbranch(self, key):
1582 def lookupbranch(self, key):
1583 if self.branchmap().hasbranch(key):
1583 if self.branchmap().hasbranch(key):
1584 return key
1584 return key
1585
1585
1586 return scmutil.revsymbol(self, key).branch()
1586 return scmutil.revsymbol(self, key).branch()
1587
1587
1588 def known(self, nodes):
1588 def known(self, nodes):
1589 cl = self.changelog
1589 cl = self.changelog
1590 nm = cl.nodemap
1590 nm = cl.nodemap
1591 filtered = cl.filteredrevs
1591 filtered = cl.filteredrevs
1592 result = []
1592 result = []
1593 for n in nodes:
1593 for n in nodes:
1594 r = nm.get(n)
1594 r = nm.get(n)
1595 resp = not (r is None or r in filtered)
1595 resp = not (r is None or r in filtered)
1596 result.append(resp)
1596 result.append(resp)
1597 return result
1597 return result
1598
1598
1599 def local(self):
1599 def local(self):
1600 return self
1600 return self
1601
1601
1602 def publishing(self):
1602 def publishing(self):
1603 # it's safe (and desirable) to trust the publish flag unconditionally
1603 # it's safe (and desirable) to trust the publish flag unconditionally
1604 # so that we don't finalize changes shared between users via ssh or nfs
1604 # so that we don't finalize changes shared between users via ssh or nfs
1605 return self.ui.configbool('phases', 'publish', untrusted=True)
1605 return self.ui.configbool('phases', 'publish', untrusted=True)
1606
1606
1607 def cancopy(self):
1607 def cancopy(self):
1608 # so statichttprepo's override of local() works
1608 # so statichttprepo's override of local() works
1609 if not self.local():
1609 if not self.local():
1610 return False
1610 return False
1611 if not self.publishing():
1611 if not self.publishing():
1612 return True
1612 return True
1613 # if publishing we can't copy if there is filtered content
1613 # if publishing we can't copy if there is filtered content
1614 return not self.filtered('visible').changelog.filteredrevs
1614 return not self.filtered('visible').changelog.filteredrevs
1615
1615
1616 def shared(self):
1616 def shared(self):
1617 '''the type of shared repository (None if not shared)'''
1617 '''the type of shared repository (None if not shared)'''
1618 if self.sharedpath != self.path:
1618 if self.sharedpath != self.path:
1619 return 'store'
1619 return 'store'
1620 return None
1620 return None
1621
1621
1622 def wjoin(self, f, *insidef):
1622 def wjoin(self, f, *insidef):
1623 return self.vfs.reljoin(self.root, f, *insidef)
1623 return self.vfs.reljoin(self.root, f, *insidef)
1624
1624
1625 def setparents(self, p1, p2=nullid):
1625 def setparents(self, p1, p2=nullid):
1626 with self.dirstate.parentchange():
1626 with self.dirstate.parentchange():
1627 copies = self.dirstate.setparents(p1, p2)
1627 copies = self.dirstate.setparents(p1, p2)
1628 pctx = self[p1]
1628 pctx = self[p1]
1629 if copies:
1629 if copies:
1630 # Adjust copy records, the dirstate cannot do it, it
1630 # Adjust copy records, the dirstate cannot do it, it
1631 # requires access to parents manifests. Preserve them
1631 # requires access to parents manifests. Preserve them
1632 # only for entries added to first parent.
1632 # only for entries added to first parent.
1633 for f in copies:
1633 for f in copies:
1634 if f not in pctx and copies[f] in pctx:
1634 if f not in pctx and copies[f] in pctx:
1635 self.dirstate.copy(copies[f], f)
1635 self.dirstate.copy(copies[f], f)
1636 if p2 == nullid:
1636 if p2 == nullid:
1637 for f, s in sorted(self.dirstate.copies().items()):
1637 for f, s in sorted(self.dirstate.copies().items()):
1638 if f not in pctx and s not in pctx:
1638 if f not in pctx and s not in pctx:
1639 self.dirstate.copy(None, f)
1639 self.dirstate.copy(None, f)
1640
1640
1641 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1641 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1642 """changeid must be a changeset revision, if specified.
1642 """changeid must be a changeset revision, if specified.
1643 fileid can be a file revision or node."""
1643 fileid can be a file revision or node."""
1644 return context.filectx(self, path, changeid, fileid,
1644 return context.filectx(self, path, changeid, fileid,
1645 changectx=changectx)
1645 changectx=changectx)
1646
1646
1647 def getcwd(self):
1647 def getcwd(self):
1648 return self.dirstate.getcwd()
1648 return self.dirstate.getcwd()
1649
1649
1650 def pathto(self, f, cwd=None):
1650 def pathto(self, f, cwd=None):
1651 return self.dirstate.pathto(f, cwd)
1651 return self.dirstate.pathto(f, cwd)
1652
1652
1653 def _loadfilter(self, filter):
1653 def _loadfilter(self, filter):
1654 if filter not in self._filterpats:
1654 if filter not in self._filterpats:
1655 l = []
1655 l = []
1656 for pat, cmd in self.ui.configitems(filter):
1656 for pat, cmd in self.ui.configitems(filter):
1657 if cmd == '!':
1657 if cmd == '!':
1658 continue
1658 continue
1659 mf = matchmod.match(self.root, '', [pat])
1659 mf = matchmod.match(self.root, '', [pat])
1660 fn = None
1660 fn = None
1661 params = cmd
1661 params = cmd
1662 for name, filterfn in self._datafilters.iteritems():
1662 for name, filterfn in self._datafilters.iteritems():
1663 if cmd.startswith(name):
1663 if cmd.startswith(name):
1664 fn = filterfn
1664 fn = filterfn
1665 params = cmd[len(name):].lstrip()
1665 params = cmd[len(name):].lstrip()
1666 break
1666 break
1667 if not fn:
1667 if not fn:
1668 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1668 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1669 # Wrap old filters not supporting keyword arguments
1669 # Wrap old filters not supporting keyword arguments
1670 if not pycompat.getargspec(fn)[2]:
1670 if not pycompat.getargspec(fn)[2]:
1671 oldfn = fn
1671 oldfn = fn
1672 fn = lambda s, c, **kwargs: oldfn(s, c)
1672 fn = lambda s, c, **kwargs: oldfn(s, c)
1673 l.append((mf, fn, params))
1673 l.append((mf, fn, params))
1674 self._filterpats[filter] = l
1674 self._filterpats[filter] = l
1675 return self._filterpats[filter]
1675 return self._filterpats[filter]
1676
1676
1677 def _filter(self, filterpats, filename, data):
1677 def _filter(self, filterpats, filename, data):
1678 for mf, fn, cmd in filterpats:
1678 for mf, fn, cmd in filterpats:
1679 if mf(filename):
1679 if mf(filename):
1680 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1680 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1681 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1681 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1682 break
1682 break
1683
1683
1684 return data
1684 return data
1685
1685
1686 @unfilteredpropertycache
1686 @unfilteredpropertycache
1687 def _encodefilterpats(self):
1687 def _encodefilterpats(self):
1688 return self._loadfilter('encode')
1688 return self._loadfilter('encode')
1689
1689
1690 @unfilteredpropertycache
1690 @unfilteredpropertycache
1691 def _decodefilterpats(self):
1691 def _decodefilterpats(self):
1692 return self._loadfilter('decode')
1692 return self._loadfilter('decode')
1693
1693
1694 def adddatafilter(self, name, filter):
1694 def adddatafilter(self, name, filter):
1695 self._datafilters[name] = filter
1695 self._datafilters[name] = filter
1696
1696
1697 def wread(self, filename):
1697 def wread(self, filename):
1698 if self.wvfs.islink(filename):
1698 if self.wvfs.islink(filename):
1699 data = self.wvfs.readlink(filename)
1699 data = self.wvfs.readlink(filename)
1700 else:
1700 else:
1701 data = self.wvfs.read(filename)
1701 data = self.wvfs.read(filename)
1702 return self._filter(self._encodefilterpats, filename, data)
1702 return self._filter(self._encodefilterpats, filename, data)
1703
1703
1704 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1704 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1705 """write ``data`` into ``filename`` in the working directory
1705 """write ``data`` into ``filename`` in the working directory
1706
1706
1707 This returns length of written (maybe decoded) data.
1707 This returns length of written (maybe decoded) data.
1708 """
1708 """
1709 data = self._filter(self._decodefilterpats, filename, data)
1709 data = self._filter(self._decodefilterpats, filename, data)
1710 if 'l' in flags:
1710 if 'l' in flags:
1711 self.wvfs.symlink(data, filename)
1711 self.wvfs.symlink(data, filename)
1712 else:
1712 else:
1713 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1713 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1714 **kwargs)
1714 **kwargs)
1715 if 'x' in flags:
1715 if 'x' in flags:
1716 self.wvfs.setflags(filename, False, True)
1716 self.wvfs.setflags(filename, False, True)
1717 else:
1717 else:
1718 self.wvfs.setflags(filename, False, False)
1718 self.wvfs.setflags(filename, False, False)
1719 return len(data)
1719 return len(data)
1720
1720
1721 def wwritedata(self, filename, data):
1721 def wwritedata(self, filename, data):
1722 return self._filter(self._decodefilterpats, filename, data)
1722 return self._filter(self._decodefilterpats, filename, data)
1723
1723
1724 def currenttransaction(self):
1724 def currenttransaction(self):
1725 """return the current transaction or None if non exists"""
1725 """return the current transaction or None if non exists"""
1726 if self._transref:
1726 if self._transref:
1727 tr = self._transref()
1727 tr = self._transref()
1728 else:
1728 else:
1729 tr = None
1729 tr = None
1730
1730
1731 if tr and tr.running():
1731 if tr and tr.running():
1732 return tr
1732 return tr
1733 return None
1733 return None
1734
1734
1735 def transaction(self, desc, report=None):
1735 def transaction(self, desc, report=None):
1736 if (self.ui.configbool('devel', 'all-warnings')
1736 if (self.ui.configbool('devel', 'all-warnings')
1737 or self.ui.configbool('devel', 'check-locks')):
1737 or self.ui.configbool('devel', 'check-locks')):
1738 if self._currentlock(self._lockref) is None:
1738 if self._currentlock(self._lockref) is None:
1739 raise error.ProgrammingError('transaction requires locking')
1739 raise error.ProgrammingError('transaction requires locking')
1740 tr = self.currenttransaction()
1740 tr = self.currenttransaction()
1741 if tr is not None:
1741 if tr is not None:
1742 return tr.nest(name=desc)
1742 return tr.nest(name=desc)
1743
1743
1744 # abort here if the journal already exists
1744 # abort here if the journal already exists
1745 if self.svfs.exists("journal"):
1745 if self.svfs.exists("journal"):
1746 raise error.RepoError(
1746 raise error.RepoError(
1747 _("abandoned transaction found"),
1747 _("abandoned transaction found"),
1748 hint=_("run 'hg recover' to clean up transaction"))
1748 hint=_("run 'hg recover' to clean up transaction"))
1749
1749
1750 idbase = "%.40f#%f" % (random.random(), time.time())
1750 idbase = "%.40f#%f" % (random.random(), time.time())
1751 ha = hex(hashlib.sha1(idbase).digest())
1751 ha = hex(hashlib.sha1(idbase).digest())
1752 txnid = 'TXN:' + ha
1752 txnid = 'TXN:' + ha
1753 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1753 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1754
1754
1755 self._writejournal(desc)
1755 self._writejournal(desc)
1756 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1756 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1757 if report:
1757 if report:
1758 rp = report
1758 rp = report
1759 else:
1759 else:
1760 rp = self.ui.warn
1760 rp = self.ui.warn
1761 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1761 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1762 # we must avoid cyclic reference between repo and transaction.
1762 # we must avoid cyclic reference between repo and transaction.
1763 reporef = weakref.ref(self)
1763 reporef = weakref.ref(self)
1764 # Code to track tag movement
1764 # Code to track tag movement
1765 #
1765 #
1766 # Since tags are all handled as file content, it is actually quite hard
1766 # Since tags are all handled as file content, it is actually quite hard
1767 # to track these movement from a code perspective. So we fallback to a
1767 # to track these movement from a code perspective. So we fallback to a
1768 # tracking at the repository level. One could envision to track changes
1768 # tracking at the repository level. One could envision to track changes
1769 # to the '.hgtags' file through changegroup apply but that fails to
1769 # to the '.hgtags' file through changegroup apply but that fails to
1770 # cope with case where transaction expose new heads without changegroup
1770 # cope with case where transaction expose new heads without changegroup
1771 # being involved (eg: phase movement).
1771 # being involved (eg: phase movement).
1772 #
1772 #
1773 # For now, We gate the feature behind a flag since this likely comes
1773 # For now, We gate the feature behind a flag since this likely comes
1774 # with performance impacts. The current code run more often than needed
1774 # with performance impacts. The current code run more often than needed
1775 # and do not use caches as much as it could. The current focus is on
1775 # and do not use caches as much as it could. The current focus is on
1776 # the behavior of the feature so we disable it by default. The flag
1776 # the behavior of the feature so we disable it by default. The flag
1777 # will be removed when we are happy with the performance impact.
1777 # will be removed when we are happy with the performance impact.
1778 #
1778 #
1779 # Once this feature is no longer experimental move the following
1779 # Once this feature is no longer experimental move the following
1780 # documentation to the appropriate help section:
1780 # documentation to the appropriate help section:
1781 #
1781 #
1782 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1782 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1783 # tags (new or changed or deleted tags). In addition the details of
1783 # tags (new or changed or deleted tags). In addition the details of
1784 # these changes are made available in a file at:
1784 # these changes are made available in a file at:
1785 # ``REPOROOT/.hg/changes/tags.changes``.
1785 # ``REPOROOT/.hg/changes/tags.changes``.
1786 # Make sure you check for HG_TAG_MOVED before reading that file as it
1786 # Make sure you check for HG_TAG_MOVED before reading that file as it
1787 # might exist from a previous transaction even if no tag were touched
1787 # might exist from a previous transaction even if no tag were touched
1788 # in this one. Changes are recorded in a line base format::
1788 # in this one. Changes are recorded in a line base format::
1789 #
1789 #
1790 # <action> <hex-node> <tag-name>\n
1790 # <action> <hex-node> <tag-name>\n
1791 #
1791 #
1792 # Actions are defined as follow:
1792 # Actions are defined as follow:
1793 # "-R": tag is removed,
1793 # "-R": tag is removed,
1794 # "+A": tag is added,
1794 # "+A": tag is added,
1795 # "-M": tag is moved (old value),
1795 # "-M": tag is moved (old value),
1796 # "+M": tag is moved (new value),
1796 # "+M": tag is moved (new value),
1797 tracktags = lambda x: None
1797 tracktags = lambda x: None
1798 # experimental config: experimental.hook-track-tags
1798 # experimental config: experimental.hook-track-tags
1799 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1799 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1800 if desc != 'strip' and shouldtracktags:
1800 if desc != 'strip' and shouldtracktags:
1801 oldheads = self.changelog.headrevs()
1801 oldheads = self.changelog.headrevs()
1802 def tracktags(tr2):
1802 def tracktags(tr2):
1803 repo = reporef()
1803 repo = reporef()
1804 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1804 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1805 newheads = repo.changelog.headrevs()
1805 newheads = repo.changelog.headrevs()
1806 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1806 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1807 # notes: we compare lists here.
1807 # notes: we compare lists here.
1808 # As we do it only once buiding set would not be cheaper
1808 # As we do it only once buiding set would not be cheaper
1809 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1809 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1810 if changes:
1810 if changes:
1811 tr2.hookargs['tag_moved'] = '1'
1811 tr2.hookargs['tag_moved'] = '1'
1812 with repo.vfs('changes/tags.changes', 'w',
1812 with repo.vfs('changes/tags.changes', 'w',
1813 atomictemp=True) as changesfile:
1813 atomictemp=True) as changesfile:
1814 # note: we do not register the file to the transaction
1814 # note: we do not register the file to the transaction
1815 # because we needs it to still exist on the transaction
1815 # because we needs it to still exist on the transaction
1816 # is close (for txnclose hooks)
1816 # is close (for txnclose hooks)
1817 tagsmod.writediff(changesfile, changes)
1817 tagsmod.writediff(changesfile, changes)
1818 def validate(tr2):
1818 def validate(tr2):
1819 """will run pre-closing hooks"""
1819 """will run pre-closing hooks"""
1820 # XXX the transaction API is a bit lacking here so we take a hacky
1820 # XXX the transaction API is a bit lacking here so we take a hacky
1821 # path for now
1821 # path for now
1822 #
1822 #
1823 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1823 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1824 # dict is copied before these run. In addition we needs the data
1824 # dict is copied before these run. In addition we needs the data
1825 # available to in memory hooks too.
1825 # available to in memory hooks too.
1826 #
1826 #
1827 # Moreover, we also need to make sure this runs before txnclose
1827 # Moreover, we also need to make sure this runs before txnclose
1828 # hooks and there is no "pending" mechanism that would execute
1828 # hooks and there is no "pending" mechanism that would execute
1829 # logic only if hooks are about to run.
1829 # logic only if hooks are about to run.
1830 #
1830 #
1831 # Fixing this limitation of the transaction is also needed to track
1831 # Fixing this limitation of the transaction is also needed to track
1832 # other families of changes (bookmarks, phases, obsolescence).
1832 # other families of changes (bookmarks, phases, obsolescence).
1833 #
1833 #
1834 # This will have to be fixed before we remove the experimental
1834 # This will have to be fixed before we remove the experimental
1835 # gating.
1835 # gating.
1836 tracktags(tr2)
1836 tracktags(tr2)
1837 repo = reporef()
1837 repo = reporef()
1838 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1838 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1839 scmutil.enforcesinglehead(repo, tr2, desc)
1839 scmutil.enforcesinglehead(repo, tr2, desc)
1840 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1840 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1841 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1841 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1842 args = tr.hookargs.copy()
1842 args = tr.hookargs.copy()
1843 args.update(bookmarks.preparehookargs(name, old, new))
1843 args.update(bookmarks.preparehookargs(name, old, new))
1844 repo.hook('pretxnclose-bookmark', throw=True,
1844 repo.hook('pretxnclose-bookmark', throw=True,
1845 **pycompat.strkwargs(args))
1845 **pycompat.strkwargs(args))
1846 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1846 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1847 cl = repo.unfiltered().changelog
1847 cl = repo.unfiltered().changelog
1848 for rev, (old, new) in tr.changes['phases'].items():
1848 for rev, (old, new) in tr.changes['phases'].items():
1849 args = tr.hookargs.copy()
1849 args = tr.hookargs.copy()
1850 node = hex(cl.node(rev))
1850 node = hex(cl.node(rev))
1851 args.update(phases.preparehookargs(node, old, new))
1851 args.update(phases.preparehookargs(node, old, new))
1852 repo.hook('pretxnclose-phase', throw=True,
1852 repo.hook('pretxnclose-phase', throw=True,
1853 **pycompat.strkwargs(args))
1853 **pycompat.strkwargs(args))
1854
1854
1855 repo.hook('pretxnclose', throw=True,
1855 repo.hook('pretxnclose', throw=True,
1856 **pycompat.strkwargs(tr.hookargs))
1856 **pycompat.strkwargs(tr.hookargs))
1857 def releasefn(tr, success):
1857 def releasefn(tr, success):
1858 repo = reporef()
1858 repo = reporef()
1859 if success:
1859 if success:
1860 # this should be explicitly invoked here, because
1860 # this should be explicitly invoked here, because
1861 # in-memory changes aren't written out at closing
1861 # in-memory changes aren't written out at closing
1862 # transaction, if tr.addfilegenerator (via
1862 # transaction, if tr.addfilegenerator (via
1863 # dirstate.write or so) isn't invoked while
1863 # dirstate.write or so) isn't invoked while
1864 # transaction running
1864 # transaction running
1865 repo.dirstate.write(None)
1865 repo.dirstate.write(None)
1866 else:
1866 else:
1867 # discard all changes (including ones already written
1867 # discard all changes (including ones already written
1868 # out) in this transaction
1868 # out) in this transaction
1869 narrowspec.restorebackup(self, 'journal.narrowspec')
1869 narrowspec.restorebackup(self, 'journal.narrowspec')
1870 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1870 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1871 repo.dirstate.restorebackup(None, 'journal.dirstate')
1871 repo.dirstate.restorebackup(None, 'journal.dirstate')
1872
1872
1873 repo.invalidate(clearfilecache=True)
1873 repo.invalidate(clearfilecache=True)
1874
1874
1875 tr = transaction.transaction(rp, self.svfs, vfsmap,
1875 tr = transaction.transaction(rp, self.svfs, vfsmap,
1876 "journal",
1876 "journal",
1877 "undo",
1877 "undo",
1878 aftertrans(renames),
1878 aftertrans(renames),
1879 self.store.createmode,
1879 self.store.createmode,
1880 validator=validate,
1880 validator=validate,
1881 releasefn=releasefn,
1881 releasefn=releasefn,
1882 checkambigfiles=_cachedfiles,
1882 checkambigfiles=_cachedfiles,
1883 name=desc)
1883 name=desc)
1884 tr.changes['origrepolen'] = len(self)
1884 tr.changes['origrepolen'] = len(self)
1885 tr.changes['obsmarkers'] = set()
1885 tr.changes['obsmarkers'] = set()
1886 tr.changes['phases'] = {}
1886 tr.changes['phases'] = {}
1887 tr.changes['bookmarks'] = {}
1887 tr.changes['bookmarks'] = {}
1888
1888
1889 tr.hookargs['txnid'] = txnid
1889 tr.hookargs['txnid'] = txnid
1890 tr.hookargs['txnname'] = desc
1890 tr.hookargs['txnname'] = desc
1891 # note: writing the fncache only during finalize mean that the file is
1891 # note: writing the fncache only during finalize mean that the file is
1892 # outdated when running hooks. As fncache is used for streaming clone,
1892 # outdated when running hooks. As fncache is used for streaming clone,
1893 # this is not expected to break anything that happen during the hooks.
1893 # this is not expected to break anything that happen during the hooks.
1894 tr.addfinalize('flush-fncache', self.store.write)
1894 tr.addfinalize('flush-fncache', self.store.write)
1895 def txnclosehook(tr2):
1895 def txnclosehook(tr2):
1896 """To be run if transaction is successful, will schedule a hook run
1896 """To be run if transaction is successful, will schedule a hook run
1897 """
1897 """
1898 # Don't reference tr2 in hook() so we don't hold a reference.
1898 # Don't reference tr2 in hook() so we don't hold a reference.
1899 # This reduces memory consumption when there are multiple
1899 # This reduces memory consumption when there are multiple
1900 # transactions per lock. This can likely go away if issue5045
1900 # transactions per lock. This can likely go away if issue5045
1901 # fixes the function accumulation.
1901 # fixes the function accumulation.
1902 hookargs = tr2.hookargs
1902 hookargs = tr2.hookargs
1903
1903
1904 def hookfunc():
1904 def hookfunc():
1905 repo = reporef()
1905 repo = reporef()
1906 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1906 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1907 bmchanges = sorted(tr.changes['bookmarks'].items())
1907 bmchanges = sorted(tr.changes['bookmarks'].items())
1908 for name, (old, new) in bmchanges:
1908 for name, (old, new) in bmchanges:
1909 args = tr.hookargs.copy()
1909 args = tr.hookargs.copy()
1910 args.update(bookmarks.preparehookargs(name, old, new))
1910 args.update(bookmarks.preparehookargs(name, old, new))
1911 repo.hook('txnclose-bookmark', throw=False,
1911 repo.hook('txnclose-bookmark', throw=False,
1912 **pycompat.strkwargs(args))
1912 **pycompat.strkwargs(args))
1913
1913
1914 if hook.hashook(repo.ui, 'txnclose-phase'):
1914 if hook.hashook(repo.ui, 'txnclose-phase'):
1915 cl = repo.unfiltered().changelog
1915 cl = repo.unfiltered().changelog
1916 phasemv = sorted(tr.changes['phases'].items())
1916 phasemv = sorted(tr.changes['phases'].items())
1917 for rev, (old, new) in phasemv:
1917 for rev, (old, new) in phasemv:
1918 args = tr.hookargs.copy()
1918 args = tr.hookargs.copy()
1919 node = hex(cl.node(rev))
1919 node = hex(cl.node(rev))
1920 args.update(phases.preparehookargs(node, old, new))
1920 args.update(phases.preparehookargs(node, old, new))
1921 repo.hook('txnclose-phase', throw=False,
1921 repo.hook('txnclose-phase', throw=False,
1922 **pycompat.strkwargs(args))
1922 **pycompat.strkwargs(args))
1923
1923
1924 repo.hook('txnclose', throw=False,
1924 repo.hook('txnclose', throw=False,
1925 **pycompat.strkwargs(hookargs))
1925 **pycompat.strkwargs(hookargs))
1926 reporef()._afterlock(hookfunc)
1926 reporef()._afterlock(hookfunc)
1927 tr.addfinalize('txnclose-hook', txnclosehook)
1927 tr.addfinalize('txnclose-hook', txnclosehook)
1928 # Include a leading "-" to make it happen before the transaction summary
1928 # Include a leading "-" to make it happen before the transaction summary
1929 # reports registered via scmutil.registersummarycallback() whose names
1929 # reports registered via scmutil.registersummarycallback() whose names
1930 # are 00-txnreport etc. That way, the caches will be warm when the
1930 # are 00-txnreport etc. That way, the caches will be warm when the
1931 # callbacks run.
1931 # callbacks run.
1932 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1932 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1933 def txnaborthook(tr2):
1933 def txnaborthook(tr2):
1934 """To be run if transaction is aborted
1934 """To be run if transaction is aborted
1935 """
1935 """
1936 reporef().hook('txnabort', throw=False,
1936 reporef().hook('txnabort', throw=False,
1937 **pycompat.strkwargs(tr2.hookargs))
1937 **pycompat.strkwargs(tr2.hookargs))
1938 tr.addabort('txnabort-hook', txnaborthook)
1938 tr.addabort('txnabort-hook', txnaborthook)
1939 # avoid eager cache invalidation. in-memory data should be identical
1939 # avoid eager cache invalidation. in-memory data should be identical
1940 # to stored data if transaction has no error.
1940 # to stored data if transaction has no error.
1941 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1941 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1942 self._transref = weakref.ref(tr)
1942 self._transref = weakref.ref(tr)
1943 scmutil.registersummarycallback(self, tr, desc)
1943 scmutil.registersummarycallback(self, tr, desc)
1944 return tr
1944 return tr
1945
1945
1946 def _journalfiles(self):
1946 def _journalfiles(self):
1947 return ((self.svfs, 'journal'),
1947 return ((self.svfs, 'journal'),
1948 (self.svfs, 'journal.narrowspec'),
1948 (self.svfs, 'journal.narrowspec'),
1949 (self.vfs, 'journal.narrowspec.dirstate'),
1949 (self.vfs, 'journal.narrowspec.dirstate'),
1950 (self.vfs, 'journal.dirstate'),
1950 (self.vfs, 'journal.dirstate'),
1951 (self.vfs, 'journal.branch'),
1951 (self.vfs, 'journal.branch'),
1952 (self.vfs, 'journal.desc'),
1952 (self.vfs, 'journal.desc'),
1953 (self.vfs, 'journal.bookmarks'),
1953 (self.vfs, 'journal.bookmarks'),
1954 (self.svfs, 'journal.phaseroots'))
1954 (self.svfs, 'journal.phaseroots'))
1955
1955
1956 def undofiles(self):
1956 def undofiles(self):
1957 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1957 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1958
1958
1959 @unfilteredmethod
1959 @unfilteredmethod
1960 def _writejournal(self, desc):
1960 def _writejournal(self, desc):
1961 self.dirstate.savebackup(None, 'journal.dirstate')
1961 self.dirstate.savebackup(None, 'journal.dirstate')
1962 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1962 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1963 narrowspec.savebackup(self, 'journal.narrowspec')
1963 narrowspec.savebackup(self, 'journal.narrowspec')
1964 self.vfs.write("journal.branch",
1964 self.vfs.write("journal.branch",
1965 encoding.fromlocal(self.dirstate.branch()))
1965 encoding.fromlocal(self.dirstate.branch()))
1966 self.vfs.write("journal.desc",
1966 self.vfs.write("journal.desc",
1967 "%d\n%s\n" % (len(self), desc))
1967 "%d\n%s\n" % (len(self), desc))
1968 self.vfs.write("journal.bookmarks",
1968 self.vfs.write("journal.bookmarks",
1969 self.vfs.tryread("bookmarks"))
1969 self.vfs.tryread("bookmarks"))
1970 self.svfs.write("journal.phaseroots",
1970 self.svfs.write("journal.phaseroots",
1971 self.svfs.tryread("phaseroots"))
1971 self.svfs.tryread("phaseroots"))
1972
1972
1973 def recover(self):
1973 def recover(self):
1974 with self.lock():
1974 with self.lock():
1975 if self.svfs.exists("journal"):
1975 if self.svfs.exists("journal"):
1976 self.ui.status(_("rolling back interrupted transaction\n"))
1976 self.ui.status(_("rolling back interrupted transaction\n"))
1977 vfsmap = {'': self.svfs,
1977 vfsmap = {'': self.svfs,
1978 'plain': self.vfs,}
1978 'plain': self.vfs,}
1979 transaction.rollback(self.svfs, vfsmap, "journal",
1979 transaction.rollback(self.svfs, vfsmap, "journal",
1980 self.ui.warn,
1980 self.ui.warn,
1981 checkambigfiles=_cachedfiles)
1981 checkambigfiles=_cachedfiles)
1982 self.invalidate()
1982 self.invalidate()
1983 return True
1983 return True
1984 else:
1984 else:
1985 self.ui.warn(_("no interrupted transaction available\n"))
1985 self.ui.warn(_("no interrupted transaction available\n"))
1986 return False
1986 return False
1987
1987
1988 def rollback(self, dryrun=False, force=False):
1988 def rollback(self, dryrun=False, force=False):
1989 wlock = lock = dsguard = None
1989 wlock = lock = dsguard = None
1990 try:
1990 try:
1991 wlock = self.wlock()
1991 wlock = self.wlock()
1992 lock = self.lock()
1992 lock = self.lock()
1993 if self.svfs.exists("undo"):
1993 if self.svfs.exists("undo"):
1994 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1994 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1995
1995
1996 return self._rollback(dryrun, force, dsguard)
1996 return self._rollback(dryrun, force, dsguard)
1997 else:
1997 else:
1998 self.ui.warn(_("no rollback information available\n"))
1998 self.ui.warn(_("no rollback information available\n"))
1999 return 1
1999 return 1
2000 finally:
2000 finally:
2001 release(dsguard, lock, wlock)
2001 release(dsguard, lock, wlock)
2002
2002
2003 @unfilteredmethod # Until we get smarter cache management
2003 @unfilteredmethod # Until we get smarter cache management
2004 def _rollback(self, dryrun, force, dsguard):
2004 def _rollback(self, dryrun, force, dsguard):
2005 ui = self.ui
2005 ui = self.ui
2006 try:
2006 try:
2007 args = self.vfs.read('undo.desc').splitlines()
2007 args = self.vfs.read('undo.desc').splitlines()
2008 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2008 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2009 if len(args) >= 3:
2009 if len(args) >= 3:
2010 detail = args[2]
2010 detail = args[2]
2011 oldtip = oldlen - 1
2011 oldtip = oldlen - 1
2012
2012
2013 if detail and ui.verbose:
2013 if detail and ui.verbose:
2014 msg = (_('repository tip rolled back to revision %d'
2014 msg = (_('repository tip rolled back to revision %d'
2015 ' (undo %s: %s)\n')
2015 ' (undo %s: %s)\n')
2016 % (oldtip, desc, detail))
2016 % (oldtip, desc, detail))
2017 else:
2017 else:
2018 msg = (_('repository tip rolled back to revision %d'
2018 msg = (_('repository tip rolled back to revision %d'
2019 ' (undo %s)\n')
2019 ' (undo %s)\n')
2020 % (oldtip, desc))
2020 % (oldtip, desc))
2021 except IOError:
2021 except IOError:
2022 msg = _('rolling back unknown transaction\n')
2022 msg = _('rolling back unknown transaction\n')
2023 desc = None
2023 desc = None
2024
2024
2025 if not force and self['.'] != self['tip'] and desc == 'commit':
2025 if not force and self['.'] != self['tip'] and desc == 'commit':
2026 raise error.Abort(
2026 raise error.Abort(
2027 _('rollback of last commit while not checked out '
2027 _('rollback of last commit while not checked out '
2028 'may lose data'), hint=_('use -f to force'))
2028 'may lose data'), hint=_('use -f to force'))
2029
2029
2030 ui.status(msg)
2030 ui.status(msg)
2031 if dryrun:
2031 if dryrun:
2032 return 0
2032 return 0
2033
2033
2034 parents = self.dirstate.parents()
2034 parents = self.dirstate.parents()
2035 self.destroying()
2035 self.destroying()
2036 vfsmap = {'plain': self.vfs, '': self.svfs}
2036 vfsmap = {'plain': self.vfs, '': self.svfs}
2037 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2037 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2038 checkambigfiles=_cachedfiles)
2038 checkambigfiles=_cachedfiles)
2039 if self.vfs.exists('undo.bookmarks'):
2039 if self.vfs.exists('undo.bookmarks'):
2040 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2040 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2041 if self.svfs.exists('undo.phaseroots'):
2041 if self.svfs.exists('undo.phaseroots'):
2042 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2042 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2043 self.invalidate()
2043 self.invalidate()
2044
2044
2045 parentgone = any(p not in self.changelog.nodemap for p in parents)
2045 parentgone = any(p not in self.changelog.nodemap for p in parents)
2046 if parentgone:
2046 if parentgone:
2047 # prevent dirstateguard from overwriting already restored one
2047 # prevent dirstateguard from overwriting already restored one
2048 dsguard.close()
2048 dsguard.close()
2049
2049
2050 narrowspec.restorebackup(self, 'undo.narrowspec')
2050 narrowspec.restorebackup(self, 'undo.narrowspec')
2051 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2051 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2052 self.dirstate.restorebackup(None, 'undo.dirstate')
2052 self.dirstate.restorebackup(None, 'undo.dirstate')
2053 try:
2053 try:
2054 branch = self.vfs.read('undo.branch')
2054 branch = self.vfs.read('undo.branch')
2055 self.dirstate.setbranch(encoding.tolocal(branch))
2055 self.dirstate.setbranch(encoding.tolocal(branch))
2056 except IOError:
2056 except IOError:
2057 ui.warn(_('named branch could not be reset: '
2057 ui.warn(_('named branch could not be reset: '
2058 'current branch is still \'%s\'\n')
2058 'current branch is still \'%s\'\n')
2059 % self.dirstate.branch())
2059 % self.dirstate.branch())
2060
2060
2061 parents = tuple([p.rev() for p in self[None].parents()])
2061 parents = tuple([p.rev() for p in self[None].parents()])
2062 if len(parents) > 1:
2062 if len(parents) > 1:
2063 ui.status(_('working directory now based on '
2063 ui.status(_('working directory now based on '
2064 'revisions %d and %d\n') % parents)
2064 'revisions %d and %d\n') % parents)
2065 else:
2065 else:
2066 ui.status(_('working directory now based on '
2066 ui.status(_('working directory now based on '
2067 'revision %d\n') % parents)
2067 'revision %d\n') % parents)
2068 mergemod.mergestate.clean(self, self['.'].node())
2068 mergemod.mergestate.clean(self, self['.'].node())
2069
2069
2070 # TODO: if we know which new heads may result from this rollback, pass
2070 # TODO: if we know which new heads may result from this rollback, pass
2071 # them to destroy(), which will prevent the branchhead cache from being
2071 # them to destroy(), which will prevent the branchhead cache from being
2072 # invalidated.
2072 # invalidated.
2073 self.destroyed()
2073 self.destroyed()
2074 return 0
2074 return 0
2075
2075
2076 def _buildcacheupdater(self, newtransaction):
2076 def _buildcacheupdater(self, newtransaction):
2077 """called during transaction to build the callback updating cache
2077 """called during transaction to build the callback updating cache
2078
2078
2079 Lives on the repository to help extension who might want to augment
2079 Lives on the repository to help extension who might want to augment
2080 this logic. For this purpose, the created transaction is passed to the
2080 this logic. For this purpose, the created transaction is passed to the
2081 method.
2081 method.
2082 """
2082 """
2083 # we must avoid cyclic reference between repo and transaction.
2083 # we must avoid cyclic reference between repo and transaction.
2084 reporef = weakref.ref(self)
2084 reporef = weakref.ref(self)
2085 def updater(tr):
2085 def updater(tr):
2086 repo = reporef()
2086 repo = reporef()
2087 repo.updatecaches(tr)
2087 repo.updatecaches(tr)
2088 return updater
2088 return updater
2089
2089
2090 @unfilteredmethod
2090 @unfilteredmethod
2091 def updatecaches(self, tr=None, full=False):
2091 def updatecaches(self, tr=None, full=False):
2092 """warm appropriate caches
2092 """warm appropriate caches
2093
2093
2094 If this function is called after a transaction closed. The transaction
2094 If this function is called after a transaction closed. The transaction
2095 will be available in the 'tr' argument. This can be used to selectively
2095 will be available in the 'tr' argument. This can be used to selectively
2096 update caches relevant to the changes in that transaction.
2096 update caches relevant to the changes in that transaction.
2097
2097
2098 If 'full' is set, make sure all caches the function knows about have
2098 If 'full' is set, make sure all caches the function knows about have
2099 up-to-date data. Even the ones usually loaded more lazily.
2099 up-to-date data. Even the ones usually loaded more lazily.
2100 """
2100 """
2101 if tr is not None and tr.hookargs.get('source') == 'strip':
2101 if tr is not None and tr.hookargs.get('source') == 'strip':
2102 # During strip, many caches are invalid but
2102 # During strip, many caches are invalid but
2103 # later call to `destroyed` will refresh them.
2103 # later call to `destroyed` will refresh them.
2104 return
2104 return
2105
2105
2106 if tr is None or tr.changes['origrepolen'] < len(self):
2106 if tr is None or tr.changes['origrepolen'] < len(self):
2107 # accessing the 'ser ved' branchmap should refresh all the others,
2107 # accessing the 'ser ved' branchmap should refresh all the others,
2108 self.ui.debug('updating the branch cache\n')
2108 self.ui.debug('updating the branch cache\n')
2109 self.filtered('served').branchmap()
2109 self.filtered('served').branchmap()
2110 self.filtered('served.hidden').branchmap()
2110 self.filtered('served.hidden').branchmap()
2111
2111
2112 if full:
2112 if full:
2113 unfi = self.unfiltered()
2113 unfi = self.unfiltered()
2114 rbc = unfi.revbranchcache()
2114 rbc = unfi.revbranchcache()
2115 for r in unfi.changelog:
2115 for r in unfi.changelog:
2116 rbc.branchinfo(r)
2116 rbc.branchinfo(r)
2117 rbc.write()
2117 rbc.write()
2118
2118
2119 # ensure the working copy parents are in the manifestfulltextcache
2119 # ensure the working copy parents are in the manifestfulltextcache
2120 for ctx in self['.'].parents():
2120 for ctx in self['.'].parents():
2121 ctx.manifest() # accessing the manifest is enough
2121 ctx.manifest() # accessing the manifest is enough
2122
2122
2123 # accessing tags warm the cache
2123 # accessing tags warm the cache
2124 self.tags()
2124 self.tags()
2125 self.filtered('served').tags()
2125 self.filtered('served').tags()
2126
2126
2127 def invalidatecaches(self):
2127 def invalidatecaches(self):
2128
2128
2129 if r'_tagscache' in vars(self):
2129 if r'_tagscache' in vars(self):
2130 # can't use delattr on proxy
2130 # can't use delattr on proxy
2131 del self.__dict__[r'_tagscache']
2131 del self.__dict__[r'_tagscache']
2132
2132
2133 self._branchcaches.clear()
2133 self._branchcaches.clear()
2134 self.invalidatevolatilesets()
2134 self.invalidatevolatilesets()
2135 self._sparsesignaturecache.clear()
2135 self._sparsesignaturecache.clear()
2136
2136
2137 def invalidatevolatilesets(self):
2137 def invalidatevolatilesets(self):
2138 self.filteredrevcache.clear()
2138 self.filteredrevcache.clear()
2139 obsolete.clearobscaches(self)
2139 obsolete.clearobscaches(self)
2140
2140
2141 def invalidatedirstate(self):
2141 def invalidatedirstate(self):
2142 '''Invalidates the dirstate, causing the next call to dirstate
2142 '''Invalidates the dirstate, causing the next call to dirstate
2143 to check if it was modified since the last time it was read,
2143 to check if it was modified since the last time it was read,
2144 rereading it if it has.
2144 rereading it if it has.
2145
2145
2146 This is different to dirstate.invalidate() that it doesn't always
2146 This is different to dirstate.invalidate() that it doesn't always
2147 rereads the dirstate. Use dirstate.invalidate() if you want to
2147 rereads the dirstate. Use dirstate.invalidate() if you want to
2148 explicitly read the dirstate again (i.e. restoring it to a previous
2148 explicitly read the dirstate again (i.e. restoring it to a previous
2149 known good state).'''
2149 known good state).'''
2150 if hasunfilteredcache(self, r'dirstate'):
2150 if hasunfilteredcache(self, r'dirstate'):
2151 for k in self.dirstate._filecache:
2151 for k in self.dirstate._filecache:
2152 try:
2152 try:
2153 delattr(self.dirstate, k)
2153 delattr(self.dirstate, k)
2154 except AttributeError:
2154 except AttributeError:
2155 pass
2155 pass
2156 delattr(self.unfiltered(), r'dirstate')
2156 delattr(self.unfiltered(), r'dirstate')
2157
2157
2158 def invalidate(self, clearfilecache=False):
2158 def invalidate(self, clearfilecache=False):
2159 '''Invalidates both store and non-store parts other than dirstate
2159 '''Invalidates both store and non-store parts other than dirstate
2160
2160
2161 If a transaction is running, invalidation of store is omitted,
2161 If a transaction is running, invalidation of store is omitted,
2162 because discarding in-memory changes might cause inconsistency
2162 because discarding in-memory changes might cause inconsistency
2163 (e.g. incomplete fncache causes unintentional failure, but
2163 (e.g. incomplete fncache causes unintentional failure, but
2164 redundant one doesn't).
2164 redundant one doesn't).
2165 '''
2165 '''
2166 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2166 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2167 for k in list(self._filecache.keys()):
2167 for k in list(self._filecache.keys()):
2168 # dirstate is invalidated separately in invalidatedirstate()
2168 # dirstate is invalidated separately in invalidatedirstate()
2169 if k == 'dirstate':
2169 if k == 'dirstate':
2170 continue
2170 continue
2171 if (k == 'changelog' and
2171 if (k == 'changelog' and
2172 self.currenttransaction() and
2172 self.currenttransaction() and
2173 self.changelog._delayed):
2173 self.changelog._delayed):
2174 # The changelog object may store unwritten revisions. We don't
2174 # The changelog object may store unwritten revisions. We don't
2175 # want to lose them.
2175 # want to lose them.
2176 # TODO: Solve the problem instead of working around it.
2176 # TODO: Solve the problem instead of working around it.
2177 continue
2177 continue
2178
2178
2179 if clearfilecache:
2179 if clearfilecache:
2180 del self._filecache[k]
2180 del self._filecache[k]
2181 try:
2181 try:
2182 delattr(unfiltered, k)
2182 delattr(unfiltered, k)
2183 except AttributeError:
2183 except AttributeError:
2184 pass
2184 pass
2185 self.invalidatecaches()
2185 self.invalidatecaches()
2186 if not self.currenttransaction():
2186 if not self.currenttransaction():
2187 # TODO: Changing contents of store outside transaction
2187 # TODO: Changing contents of store outside transaction
2188 # causes inconsistency. We should make in-memory store
2188 # causes inconsistency. We should make in-memory store
2189 # changes detectable, and abort if changed.
2189 # changes detectable, and abort if changed.
2190 self.store.invalidatecaches()
2190 self.store.invalidatecaches()
2191
2191
2192 def invalidateall(self):
2192 def invalidateall(self):
2193 '''Fully invalidates both store and non-store parts, causing the
2193 '''Fully invalidates both store and non-store parts, causing the
2194 subsequent operation to reread any outside changes.'''
2194 subsequent operation to reread any outside changes.'''
2195 # extension should hook this to invalidate its caches
2195 # extension should hook this to invalidate its caches
2196 self.invalidate()
2196 self.invalidate()
2197 self.invalidatedirstate()
2197 self.invalidatedirstate()
2198
2198
2199 @unfilteredmethod
2199 @unfilteredmethod
2200 def _refreshfilecachestats(self, tr):
2200 def _refreshfilecachestats(self, tr):
2201 """Reload stats of cached files so that they are flagged as valid"""
2201 """Reload stats of cached files so that they are flagged as valid"""
2202 for k, ce in self._filecache.items():
2202 for k, ce in self._filecache.items():
2203 k = pycompat.sysstr(k)
2203 k = pycompat.sysstr(k)
2204 if k == r'dirstate' or k not in self.__dict__:
2204 if k == r'dirstate' or k not in self.__dict__:
2205 continue
2205 continue
2206 ce.refresh()
2206 ce.refresh()
2207
2207
2208 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2208 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2209 inheritchecker=None, parentenvvar=None):
2209 inheritchecker=None, parentenvvar=None):
2210 parentlock = None
2210 parentlock = None
2211 # the contents of parentenvvar are used by the underlying lock to
2211 # the contents of parentenvvar are used by the underlying lock to
2212 # determine whether it can be inherited
2212 # determine whether it can be inherited
2213 if parentenvvar is not None:
2213 if parentenvvar is not None:
2214 parentlock = encoding.environ.get(parentenvvar)
2214 parentlock = encoding.environ.get(parentenvvar)
2215
2215
2216 timeout = 0
2216 timeout = 0
2217 warntimeout = 0
2217 warntimeout = 0
2218 if wait:
2218 if wait:
2219 timeout = self.ui.configint("ui", "timeout")
2219 timeout = self.ui.configint("ui", "timeout")
2220 warntimeout = self.ui.configint("ui", "timeout.warn")
2220 warntimeout = self.ui.configint("ui", "timeout.warn")
2221 # internal config: ui.signal-safe-lock
2221 # internal config: ui.signal-safe-lock
2222 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2222 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2223
2223
2224 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2224 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2225 releasefn=releasefn,
2225 releasefn=releasefn,
2226 acquirefn=acquirefn, desc=desc,
2226 acquirefn=acquirefn, desc=desc,
2227 inheritchecker=inheritchecker,
2227 inheritchecker=inheritchecker,
2228 parentlock=parentlock,
2228 parentlock=parentlock,
2229 signalsafe=signalsafe)
2229 signalsafe=signalsafe)
2230 return l
2230 return l
2231
2231
2232 def _afterlock(self, callback):
2232 def _afterlock(self, callback):
2233 """add a callback to be run when the repository is fully unlocked
2233 """add a callback to be run when the repository is fully unlocked
2234
2234
2235 The callback will be executed when the outermost lock is released
2235 The callback will be executed when the outermost lock is released
2236 (with wlock being higher level than 'lock')."""
2236 (with wlock being higher level than 'lock')."""
2237 for ref in (self._wlockref, self._lockref):
2237 for ref in (self._wlockref, self._lockref):
2238 l = ref and ref()
2238 l = ref and ref()
2239 if l and l.held:
2239 if l and l.held:
2240 l.postrelease.append(callback)
2240 l.postrelease.append(callback)
2241 break
2241 break
2242 else: # no lock have been found.
2242 else: # no lock have been found.
2243 callback()
2243 callback()
2244
2244
2245 def lock(self, wait=True):
2245 def lock(self, wait=True):
2246 '''Lock the repository store (.hg/store) and return a weak reference
2246 '''Lock the repository store (.hg/store) and return a weak reference
2247 to the lock. Use this before modifying the store (e.g. committing or
2247 to the lock. Use this before modifying the store (e.g. committing or
2248 stripping). If you are opening a transaction, get a lock as well.)
2248 stripping). If you are opening a transaction, get a lock as well.)
2249
2249
2250 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2250 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2251 'wlock' first to avoid a dead-lock hazard.'''
2251 'wlock' first to avoid a dead-lock hazard.'''
2252 l = self._currentlock(self._lockref)
2252 l = self._currentlock(self._lockref)
2253 if l is not None:
2253 if l is not None:
2254 l.lock()
2254 l.lock()
2255 return l
2255 return l
2256
2256
2257 l = self._lock(vfs=self.svfs,
2257 l = self._lock(vfs=self.svfs,
2258 lockname="lock",
2258 lockname="lock",
2259 wait=wait,
2259 wait=wait,
2260 releasefn=None,
2260 releasefn=None,
2261 acquirefn=self.invalidate,
2261 acquirefn=self.invalidate,
2262 desc=_('repository %s') % self.origroot)
2262 desc=_('repository %s') % self.origroot)
2263 self._lockref = weakref.ref(l)
2263 self._lockref = weakref.ref(l)
2264 return l
2264 return l
2265
2265
2266 def _wlockchecktransaction(self):
2266 def _wlockchecktransaction(self):
2267 if self.currenttransaction() is not None:
2267 if self.currenttransaction() is not None:
2268 raise error.LockInheritanceContractViolation(
2268 raise error.LockInheritanceContractViolation(
2269 'wlock cannot be inherited in the middle of a transaction')
2269 'wlock cannot be inherited in the middle of a transaction')
2270
2270
2271 def wlock(self, wait=True):
2271 def wlock(self, wait=True):
2272 '''Lock the non-store parts of the repository (everything under
2272 '''Lock the non-store parts of the repository (everything under
2273 .hg except .hg/store) and return a weak reference to the lock.
2273 .hg except .hg/store) and return a weak reference to the lock.
2274
2274
2275 Use this before modifying files in .hg.
2275 Use this before modifying files in .hg.
2276
2276
2277 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2277 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2278 'wlock' first to avoid a dead-lock hazard.'''
2278 'wlock' first to avoid a dead-lock hazard.'''
2279 l = self._wlockref and self._wlockref()
2279 l = self._wlockref and self._wlockref()
2280 if l is not None and l.held:
2280 if l is not None and l.held:
2281 l.lock()
2281 l.lock()
2282 return l
2282 return l
2283
2283
2284 # We do not need to check for non-waiting lock acquisition. Such
2284 # We do not need to check for non-waiting lock acquisition. Such
2285 # acquisition would not cause dead-lock as they would just fail.
2285 # acquisition would not cause dead-lock as they would just fail.
2286 if wait and (self.ui.configbool('devel', 'all-warnings')
2286 if wait and (self.ui.configbool('devel', 'all-warnings')
2287 or self.ui.configbool('devel', 'check-locks')):
2287 or self.ui.configbool('devel', 'check-locks')):
2288 if self._currentlock(self._lockref) is not None:
2288 if self._currentlock(self._lockref) is not None:
2289 self.ui.develwarn('"wlock" acquired after "lock"')
2289 self.ui.develwarn('"wlock" acquired after "lock"')
2290
2290
2291 def unlock():
2291 def unlock():
2292 if self.dirstate.pendingparentchange():
2292 if self.dirstate.pendingparentchange():
2293 self.dirstate.invalidate()
2293 self.dirstate.invalidate()
2294 else:
2294 else:
2295 self.dirstate.write(None)
2295 self.dirstate.write(None)
2296
2296
2297 self._filecache['dirstate'].refresh()
2297 self._filecache['dirstate'].refresh()
2298
2298
2299 l = self._lock(self.vfs, "wlock", wait, unlock,
2299 l = self._lock(self.vfs, "wlock", wait, unlock,
2300 self.invalidatedirstate, _('working directory of %s') %
2300 self.invalidatedirstate, _('working directory of %s') %
2301 self.origroot,
2301 self.origroot,
2302 inheritchecker=self._wlockchecktransaction,
2302 inheritchecker=self._wlockchecktransaction,
2303 parentenvvar='HG_WLOCK_LOCKER')
2303 parentenvvar='HG_WLOCK_LOCKER')
2304 self._wlockref = weakref.ref(l)
2304 self._wlockref = weakref.ref(l)
2305 return l
2305 return l
2306
2306
2307 def _currentlock(self, lockref):
2307 def _currentlock(self, lockref):
2308 """Returns the lock if it's held, or None if it's not."""
2308 """Returns the lock if it's held, or None if it's not."""
2309 if lockref is None:
2309 if lockref is None:
2310 return None
2310 return None
2311 l = lockref()
2311 l = lockref()
2312 if l is None or not l.held:
2312 if l is None or not l.held:
2313 return None
2313 return None
2314 return l
2314 return l
2315
2315
2316 def currentwlock(self):
2316 def currentwlock(self):
2317 """Returns the wlock if it's held, or None if it's not."""
2317 """Returns the wlock if it's held, or None if it's not."""
2318 return self._currentlock(self._wlockref)
2318 return self._currentlock(self._wlockref)
2319
2319
2320 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2320 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2321 """
2321 """
2322 commit an individual file as part of a larger transaction
2322 commit an individual file as part of a larger transaction
2323 """
2323 """
2324
2324
2325 fname = fctx.path()
2325 fname = fctx.path()
2326 fparent1 = manifest1.get(fname, nullid)
2326 fparent1 = manifest1.get(fname, nullid)
2327 fparent2 = manifest2.get(fname, nullid)
2327 fparent2 = manifest2.get(fname, nullid)
2328 if isinstance(fctx, context.filectx):
2328 if isinstance(fctx, context.filectx):
2329 node = fctx.filenode()
2329 node = fctx.filenode()
2330 if node in [fparent1, fparent2]:
2330 if node in [fparent1, fparent2]:
2331 self.ui.debug('reusing %s filelog entry\n' % fname)
2331 self.ui.debug('reusing %s filelog entry\n' % fname)
2332 if manifest1.flags(fname) != fctx.flags():
2332 if manifest1.flags(fname) != fctx.flags():
2333 changelist.append(fname)
2333 changelist.append(fname)
2334 return node
2334 return node
2335
2335
2336 flog = self.file(fname)
2336 flog = self.file(fname)
2337 meta = {}
2337 meta = {}
2338 cfname = fctx.copysource()
2338 cfname = fctx.copysource()
2339 if cfname and cfname != fname:
2339 if cfname and cfname != fname:
2340 # Mark the new revision of this file as a copy of another
2340 # Mark the new revision of this file as a copy of another
2341 # file. This copy data will effectively act as a parent
2341 # file. This copy data will effectively act as a parent
2342 # of this new revision. If this is a merge, the first
2342 # of this new revision. If this is a merge, the first
2343 # parent will be the nullid (meaning "look up the copy data")
2343 # parent will be the nullid (meaning "look up the copy data")
2344 # and the second one will be the other parent. For example:
2344 # and the second one will be the other parent. For example:
2345 #
2345 #
2346 # 0 --- 1 --- 3 rev1 changes file foo
2346 # 0 --- 1 --- 3 rev1 changes file foo
2347 # \ / rev2 renames foo to bar and changes it
2347 # \ / rev2 renames foo to bar and changes it
2348 # \- 2 -/ rev3 should have bar with all changes and
2348 # \- 2 -/ rev3 should have bar with all changes and
2349 # should record that bar descends from
2349 # should record that bar descends from
2350 # bar in rev2 and foo in rev1
2350 # bar in rev2 and foo in rev1
2351 #
2351 #
2352 # this allows this merge to succeed:
2352 # this allows this merge to succeed:
2353 #
2353 #
2354 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2354 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2355 # \ / merging rev3 and rev4 should use bar@rev2
2355 # \ / merging rev3 and rev4 should use bar@rev2
2356 # \- 2 --- 4 as the merge base
2356 # \- 2 --- 4 as the merge base
2357 #
2357 #
2358
2358
2359 cnode = manifest1.get(cfname)
2359 cnode = manifest1.get(cfname)
2360 newfparent = fparent2
2360 newfparent = fparent2
2361
2361
2362 if manifest2: # branch merge
2362 if manifest2: # branch merge
2363 if fparent2 == nullid or cnode is None: # copied on remote side
2363 if fparent2 == nullid or cnode is None: # copied on remote side
2364 if cfname in manifest2:
2364 if cfname in manifest2:
2365 cnode = manifest2[cfname]
2365 cnode = manifest2[cfname]
2366 newfparent = fparent1
2366 newfparent = fparent1
2367
2367
2368 # Here, we used to search backwards through history to try to find
2368 # Here, we used to search backwards through history to try to find
2369 # where the file copy came from if the source of a copy was not in
2369 # where the file copy came from if the source of a copy was not in
2370 # the parent directory. However, this doesn't actually make sense to
2370 # the parent directory. However, this doesn't actually make sense to
2371 # do (what does a copy from something not in your working copy even
2371 # do (what does a copy from something not in your working copy even
2372 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2372 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2373 # the user that copy information was dropped, so if they didn't
2373 # the user that copy information was dropped, so if they didn't
2374 # expect this outcome it can be fixed, but this is the correct
2374 # expect this outcome it can be fixed, but this is the correct
2375 # behavior in this circumstance.
2375 # behavior in this circumstance.
2376
2376
2377 if cnode:
2377 if cnode:
2378 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2378 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2379 meta["copy"] = cfname
2379 meta["copy"] = cfname
2380 meta["copyrev"] = hex(cnode)
2380 meta["copyrev"] = hex(cnode)
2381 fparent1, fparent2 = nullid, newfparent
2381 fparent1, fparent2 = nullid, newfparent
2382 else:
2382 else:
2383 self.ui.warn(_("warning: can't find ancestor for '%s' "
2383 self.ui.warn(_("warning: can't find ancestor for '%s' "
2384 "copied from '%s'!\n") % (fname, cfname))
2384 "copied from '%s'!\n") % (fname, cfname))
2385
2385
2386 elif fparent1 == nullid:
2386 elif fparent1 == nullid:
2387 fparent1, fparent2 = fparent2, nullid
2387 fparent1, fparent2 = fparent2, nullid
2388 elif fparent2 != nullid:
2388 elif fparent2 != nullid:
2389 # is one parent an ancestor of the other?
2389 # is one parent an ancestor of the other?
2390 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2390 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2391 if fparent1 in fparentancestors:
2391 if fparent1 in fparentancestors:
2392 fparent1, fparent2 = fparent2, nullid
2392 fparent1, fparent2 = fparent2, nullid
2393 elif fparent2 in fparentancestors:
2393 elif fparent2 in fparentancestors:
2394 fparent2 = nullid
2394 fparent2 = nullid
2395
2395
2396 # is the file changed?
2396 # is the file changed?
2397 text = fctx.data()
2397 text = fctx.data()
2398 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2398 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2399 changelist.append(fname)
2399 changelist.append(fname)
2400 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2400 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2401 # are just the flags changed during merge?
2401 # are just the flags changed during merge?
2402 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2402 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2403 changelist.append(fname)
2403 changelist.append(fname)
2404
2404
2405 return fparent1
2405 return fparent1
2406
2406
2407 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2407 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2408 """check for commit arguments that aren't committable"""
2408 """check for commit arguments that aren't committable"""
2409 if match.isexact() or match.prefix():
2409 if match.isexact() or match.prefix():
2410 matched = set(status.modified + status.added + status.removed)
2410 matched = set(status.modified + status.added + status.removed)
2411
2411
2412 for f in match.files():
2412 for f in match.files():
2413 f = self.dirstate.normalize(f)
2413 f = self.dirstate.normalize(f)
2414 if f == '.' or f in matched or f in wctx.substate:
2414 if f == '.' or f in matched or f in wctx.substate:
2415 continue
2415 continue
2416 if f in status.deleted:
2416 if f in status.deleted:
2417 fail(f, _('file not found!'))
2417 fail(f, _('file not found!'))
2418 if f in vdirs: # visited directory
2418 if f in vdirs: # visited directory
2419 d = f + '/'
2419 d = f + '/'
2420 for mf in matched:
2420 for mf in matched:
2421 if mf.startswith(d):
2421 if mf.startswith(d):
2422 break
2422 break
2423 else:
2423 else:
2424 fail(f, _("no match under directory!"))
2424 fail(f, _("no match under directory!"))
2425 elif f not in self.dirstate:
2425 elif f not in self.dirstate:
2426 fail(f, _("file not tracked!"))
2426 fail(f, _("file not tracked!"))
2427
2427
2428 @unfilteredmethod
2428 @unfilteredmethod
2429 def commit(self, text="", user=None, date=None, match=None, force=False,
2429 def commit(self, text="", user=None, date=None, match=None, force=False,
2430 editor=False, extra=None):
2430 editor=False, extra=None):
2431 """Add a new revision to current repository.
2431 """Add a new revision to current repository.
2432
2432
2433 Revision information is gathered from the working directory,
2433 Revision information is gathered from the working directory,
2434 match can be used to filter the committed files. If editor is
2434 match can be used to filter the committed files. If editor is
2435 supplied, it is called to get a commit message.
2435 supplied, it is called to get a commit message.
2436 """
2436 """
2437 if extra is None:
2437 if extra is None:
2438 extra = {}
2438 extra = {}
2439
2439
2440 def fail(f, msg):
2440 def fail(f, msg):
2441 raise error.Abort('%s: %s' % (f, msg))
2441 raise error.Abort('%s: %s' % (f, msg))
2442
2442
2443 if not match:
2443 if not match:
2444 match = matchmod.always()
2444 match = matchmod.always()
2445
2445
2446 if not force:
2446 if not force:
2447 vdirs = []
2447 vdirs = []
2448 match.explicitdir = vdirs.append
2448 match.explicitdir = vdirs.append
2449 match.bad = fail
2449 match.bad = fail
2450
2450
2451 # lock() for recent changelog (see issue4368)
2451 # lock() for recent changelog (see issue4368)
2452 with self.wlock(), self.lock():
2452 with self.wlock(), self.lock():
2453 wctx = self[None]
2453 wctx = self[None]
2454 merge = len(wctx.parents()) > 1
2454 merge = len(wctx.parents()) > 1
2455
2455
2456 if not force and merge and not match.always():
2456 if not force and merge and not match.always():
2457 raise error.Abort(_('cannot partially commit a merge '
2457 raise error.Abort(_('cannot partially commit a merge '
2458 '(do not specify files or patterns)'))
2458 '(do not specify files or patterns)'))
2459
2459
2460 status = self.status(match=match, clean=force)
2460 status = self.status(match=match, clean=force)
2461 if force:
2461 if force:
2462 status.modified.extend(status.clean) # mq may commit clean files
2462 status.modified.extend(status.clean) # mq may commit clean files
2463
2463
2464 # check subrepos
2464 # check subrepos
2465 subs, commitsubs, newstate = subrepoutil.precommit(
2465 subs, commitsubs, newstate = subrepoutil.precommit(
2466 self.ui, wctx, status, match, force=force)
2466 self.ui, wctx, status, match, force=force)
2467
2467
2468 # make sure all explicit patterns are matched
2468 # make sure all explicit patterns are matched
2469 if not force:
2469 if not force:
2470 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2470 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2471
2471
2472 cctx = context.workingcommitctx(self, status,
2472 cctx = context.workingcommitctx(self, status,
2473 text, user, date, extra)
2473 text, user, date, extra)
2474
2474
2475 # internal config: ui.allowemptycommit
2475 # internal config: ui.allowemptycommit
2476 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2476 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2477 or extra.get('close') or merge or cctx.files()
2477 or extra.get('close') or merge or cctx.files()
2478 or self.ui.configbool('ui', 'allowemptycommit'))
2478 or self.ui.configbool('ui', 'allowemptycommit'))
2479 if not allowemptycommit:
2479 if not allowemptycommit:
2480 return None
2480 return None
2481
2481
2482 if merge and cctx.deleted():
2482 if merge and cctx.deleted():
2483 raise error.Abort(_("cannot commit merge with missing files"))
2483 raise error.Abort(_("cannot commit merge with missing files"))
2484
2484
2485 ms = mergemod.mergestate.read(self)
2485 ms = mergemod.mergestate.read(self)
2486 mergeutil.checkunresolved(ms)
2486 mergeutil.checkunresolved(ms)
2487
2487
2488 if editor:
2488 if editor:
2489 cctx._text = editor(self, cctx, subs)
2489 cctx._text = editor(self, cctx, subs)
2490 edited = (text != cctx._text)
2490 edited = (text != cctx._text)
2491
2491
2492 # Save commit message in case this transaction gets rolled back
2492 # Save commit message in case this transaction gets rolled back
2493 # (e.g. by a pretxncommit hook). Leave the content alone on
2493 # (e.g. by a pretxncommit hook). Leave the content alone on
2494 # the assumption that the user will use the same editor again.
2494 # the assumption that the user will use the same editor again.
2495 msgfn = self.savecommitmessage(cctx._text)
2495 msgfn = self.savecommitmessage(cctx._text)
2496
2496
2497 # commit subs and write new state
2497 # commit subs and write new state
2498 if subs:
2498 if subs:
2499 uipathfn = scmutil.getuipathfn(self)
2499 uipathfn = scmutil.getuipathfn(self)
2500 for s in sorted(commitsubs):
2500 for s in sorted(commitsubs):
2501 sub = wctx.sub(s)
2501 sub = wctx.sub(s)
2502 self.ui.status(_('committing subrepository %s\n') %
2502 self.ui.status(_('committing subrepository %s\n') %
2503 uipathfn(subrepoutil.subrelpath(sub)))
2503 uipathfn(subrepoutil.subrelpath(sub)))
2504 sr = sub.commit(cctx._text, user, date)
2504 sr = sub.commit(cctx._text, user, date)
2505 newstate[s] = (newstate[s][0], sr)
2505 newstate[s] = (newstate[s][0], sr)
2506 subrepoutil.writestate(self, newstate)
2506 subrepoutil.writestate(self, newstate)
2507
2507
2508 p1, p2 = self.dirstate.parents()
2508 p1, p2 = self.dirstate.parents()
2509 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2509 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2510 try:
2510 try:
2511 self.hook("precommit", throw=True, parent1=hookp1,
2511 self.hook("precommit", throw=True, parent1=hookp1,
2512 parent2=hookp2)
2512 parent2=hookp2)
2513 with self.transaction('commit'):
2513 with self.transaction('commit'):
2514 ret = self.commitctx(cctx, True)
2514 ret = self.commitctx(cctx, True)
2515 # update bookmarks, dirstate and mergestate
2515 # update bookmarks, dirstate and mergestate
2516 bookmarks.update(self, [p1, p2], ret)
2516 bookmarks.update(self, [p1, p2], ret)
2517 cctx.markcommitted(ret)
2517 cctx.markcommitted(ret)
2518 ms.reset()
2518 ms.reset()
2519 except: # re-raises
2519 except: # re-raises
2520 if edited:
2520 if edited:
2521 self.ui.write(
2521 self.ui.write(
2522 _('note: commit message saved in %s\n') % msgfn)
2522 _('note: commit message saved in %s\n') % msgfn)
2523 raise
2523 raise
2524
2524
2525 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2525 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2526 # hack for command that use a temporary commit (eg: histedit)
2526 # hack for command that use a temporary commit (eg: histedit)
2527 # temporary commit got stripped before hook release
2527 # temporary commit got stripped before hook release
2528 if self.changelog.hasnode(ret):
2528 if self.changelog.hasnode(ret):
2529 self.hook("commit", node=node, parent1=parent1,
2529 self.hook("commit", node=node, parent1=parent1,
2530 parent2=parent2)
2530 parent2=parent2)
2531 self._afterlock(commithook)
2531 self._afterlock(commithook)
2532 return ret
2532 return ret
2533
2533
2534 @unfilteredmethod
2534 @unfilteredmethod
2535 def commitctx(self, ctx, error=False):
2535 def commitctx(self, ctx, error=False):
2536 """Add a new revision to current repository.
2536 """Add a new revision to current repository.
2537 Revision information is passed via the context argument.
2537 Revision information is passed via the context argument.
2538
2538
2539 ctx.files() should list all files involved in this commit, i.e.
2539 ctx.files() should list all files involved in this commit, i.e.
2540 modified/added/removed files. On merge, it may be wider than the
2540 modified/added/removed files. On merge, it may be wider than the
2541 ctx.files() to be committed, since any file nodes derived directly
2541 ctx.files() to be committed, since any file nodes derived directly
2542 from p1 or p2 are excluded from the committed ctx.files().
2542 from p1 or p2 are excluded from the committed ctx.files().
2543 """
2543 """
2544
2544
2545 p1, p2 = ctx.p1(), ctx.p2()
2545 p1, p2 = ctx.p1(), ctx.p2()
2546 user = ctx.user()
2546 user = ctx.user()
2547
2547
2548 with self.lock(), self.transaction("commit") as tr:
2548 with self.lock(), self.transaction("commit") as tr:
2549 trp = weakref.proxy(tr)
2549 trp = weakref.proxy(tr)
2550
2550
2551 if ctx.manifestnode():
2551 if ctx.manifestnode():
2552 # reuse an existing manifest revision
2552 # reuse an existing manifest revision
2553 self.ui.debug('reusing known manifest\n')
2553 self.ui.debug('reusing known manifest\n')
2554 mn = ctx.manifestnode()
2554 mn = ctx.manifestnode()
2555 files = ctx.files()
2555 files = ctx.files()
2556 elif ctx.files():
2556 elif ctx.files():
2557 m1ctx = p1.manifestctx()
2557 m1ctx = p1.manifestctx()
2558 m2ctx = p2.manifestctx()
2558 m2ctx = p2.manifestctx()
2559 mctx = m1ctx.copy()
2559 mctx = m1ctx.copy()
2560
2560
2561 m = mctx.read()
2561 m = mctx.read()
2562 m1 = m1ctx.read()
2562 m1 = m1ctx.read()
2563 m2 = m2ctx.read()
2563 m2 = m2ctx.read()
2564
2564
2565 # check in files
2565 # check in files
2566 added = []
2566 added = []
2567 changed = []
2567 changed = []
2568 removed = list(ctx.removed())
2568 removed = list(ctx.removed())
2569 linkrev = len(self)
2569 linkrev = len(self)
2570 self.ui.note(_("committing files:\n"))
2570 self.ui.note(_("committing files:\n"))
2571 uipathfn = scmutil.getuipathfn(self)
2571 uipathfn = scmutil.getuipathfn(self)
2572 for f in sorted(ctx.modified() + ctx.added()):
2572 for f in sorted(ctx.modified() + ctx.added()):
2573 self.ui.note(uipathfn(f) + "\n")
2573 self.ui.note(uipathfn(f) + "\n")
2574 try:
2574 try:
2575 fctx = ctx[f]
2575 fctx = ctx[f]
2576 if fctx is None:
2576 if fctx is None:
2577 removed.append(f)
2577 removed.append(f)
2578 else:
2578 else:
2579 added.append(f)
2579 added.append(f)
2580 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2580 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2581 trp, changed)
2581 trp, changed)
2582 m.setflag(f, fctx.flags())
2582 m.setflag(f, fctx.flags())
2583 except OSError:
2583 except OSError:
2584 self.ui.warn(_("trouble committing %s!\n") %
2584 self.ui.warn(_("trouble committing %s!\n") %
2585 uipathfn(f))
2585 uipathfn(f))
2586 raise
2586 raise
2587 except IOError as inst:
2587 except IOError as inst:
2588 errcode = getattr(inst, 'errno', errno.ENOENT)
2588 errcode = getattr(inst, 'errno', errno.ENOENT)
2589 if error or errcode and errcode != errno.ENOENT:
2589 if error or errcode and errcode != errno.ENOENT:
2590 self.ui.warn(_("trouble committing %s!\n") %
2590 self.ui.warn(_("trouble committing %s!\n") %
2591 uipathfn(f))
2591 uipathfn(f))
2592 raise
2592 raise
2593
2593
2594 # update manifest
2594 # update manifest
2595 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2595 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2596 drop = [f for f in removed if f in m]
2596 drop = [f for f in removed if f in m]
2597 for f in drop:
2597 for f in drop:
2598 del m[f]
2598 del m[f]
2599 files = changed + removed
2599 files = changed + removed
2600 md = None
2600 md = None
2601 if not files:
2601 if not files:
2602 # if no "files" actually changed in terms of the changelog,
2602 # if no "files" actually changed in terms of the changelog,
2603 # try hard to detect unmodified manifest entry so that the
2603 # try hard to detect unmodified manifest entry so that the
2604 # exact same commit can be reproduced later on convert.
2604 # exact same commit can be reproduced later on convert.
2605 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2605 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2606 if not files and md:
2606 if not files and md:
2607 self.ui.debug('not reusing manifest (no file change in '
2607 self.ui.debug('not reusing manifest (no file change in '
2608 'changelog, but manifest differs)\n')
2608 'changelog, but manifest differs)\n')
2609 if files or md:
2609 if files or md:
2610 self.ui.note(_("committing manifest\n"))
2610 self.ui.note(_("committing manifest\n"))
2611 # we're using narrowmatch here since it's already applied at
2611 # we're using narrowmatch here since it's already applied at
2612 # other stages (such as dirstate.walk), so we're already
2612 # other stages (such as dirstate.walk), so we're already
2613 # ignoring things outside of narrowspec in most cases. The
2613 # ignoring things outside of narrowspec in most cases. The
2614 # one case where we might have files outside the narrowspec
2614 # one case where we might have files outside the narrowspec
2615 # at this point is merges, and we already error out in the
2615 # at this point is merges, and we already error out in the
2616 # case where the merge has files outside of the narrowspec,
2616 # case where the merge has files outside of the narrowspec,
2617 # so this is safe.
2617 # so this is safe.
2618 mn = mctx.write(trp, linkrev,
2618 mn = mctx.write(trp, linkrev,
2619 p1.manifestnode(), p2.manifestnode(),
2619 p1.manifestnode(), p2.manifestnode(),
2620 added, drop, match=self.narrowmatch())
2620 added, drop, match=self.narrowmatch())
2621 else:
2621 else:
2622 self.ui.debug('reusing manifest form p1 (listed files '
2622 self.ui.debug('reusing manifest form p1 (listed files '
2623 'actually unchanged)\n')
2623 'actually unchanged)\n')
2624 mn = p1.manifestnode()
2624 mn = p1.manifestnode()
2625 else:
2625 else:
2626 self.ui.debug('reusing manifest from p1 (no file change)\n')
2626 self.ui.debug('reusing manifest from p1 (no file change)\n')
2627 mn = p1.manifestnode()
2627 mn = p1.manifestnode()
2628 files = []
2628 files = []
2629
2629
2630 # update changelog
2630 # update changelog
2631 self.ui.note(_("committing changelog\n"))
2631 self.ui.note(_("committing changelog\n"))
2632 self.changelog.delayupdate(tr)
2632 self.changelog.delayupdate(tr)
2633 n = self.changelog.add(mn, files, ctx.description(),
2633 n = self.changelog.add(mn, files, ctx.description(),
2634 trp, p1.node(), p2.node(),
2634 trp, p1.node(), p2.node(),
2635 user, ctx.date(), ctx.extra().copy())
2635 user, ctx.date(), ctx.extra().copy())
2636 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2636 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2637 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2637 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2638 parent2=xp2)
2638 parent2=xp2)
2639 # set the new commit is proper phase
2639 # set the new commit is proper phase
2640 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2640 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2641 if targetphase:
2641 if targetphase:
2642 # retract boundary do not alter parent changeset.
2642 # retract boundary do not alter parent changeset.
2643 # if a parent have higher the resulting phase will
2643 # if a parent have higher the resulting phase will
2644 # be compliant anyway
2644 # be compliant anyway
2645 #
2645 #
2646 # if minimal phase was 0 we don't need to retract anything
2646 # if minimal phase was 0 we don't need to retract anything
2647 phases.registernew(self, tr, targetphase, [n])
2647 phases.registernew(self, tr, targetphase, [n])
2648 return n
2648 return n
2649
2649
2650 @unfilteredmethod
2650 @unfilteredmethod
2651 def destroying(self):
2651 def destroying(self):
2652 '''Inform the repository that nodes are about to be destroyed.
2652 '''Inform the repository that nodes are about to be destroyed.
2653 Intended for use by strip and rollback, so there's a common
2653 Intended for use by strip and rollback, so there's a common
2654 place for anything that has to be done before destroying history.
2654 place for anything that has to be done before destroying history.
2655
2655
2656 This is mostly useful for saving state that is in memory and waiting
2656 This is mostly useful for saving state that is in memory and waiting
2657 to be flushed when the current lock is released. Because a call to
2657 to be flushed when the current lock is released. Because a call to
2658 destroyed is imminent, the repo will be invalidated causing those
2658 destroyed is imminent, the repo will be invalidated causing those
2659 changes to stay in memory (waiting for the next unlock), or vanish
2659 changes to stay in memory (waiting for the next unlock), or vanish
2660 completely.
2660 completely.
2661 '''
2661 '''
2662 # When using the same lock to commit and strip, the phasecache is left
2662 # When using the same lock to commit and strip, the phasecache is left
2663 # dirty after committing. Then when we strip, the repo is invalidated,
2663 # dirty after committing. Then when we strip, the repo is invalidated,
2664 # causing those changes to disappear.
2664 # causing those changes to disappear.
2665 if '_phasecache' in vars(self):
2665 if '_phasecache' in vars(self):
2666 self._phasecache.write()
2666 self._phasecache.write()
2667
2667
2668 @unfilteredmethod
2668 @unfilteredmethod
2669 def destroyed(self):
2669 def destroyed(self):
2670 '''Inform the repository that nodes have been destroyed.
2670 '''Inform the repository that nodes have been destroyed.
2671 Intended for use by strip and rollback, so there's a common
2671 Intended for use by strip and rollback, so there's a common
2672 place for anything that has to be done after destroying history.
2672 place for anything that has to be done after destroying history.
2673 '''
2673 '''
2674 # When one tries to:
2674 # When one tries to:
2675 # 1) destroy nodes thus calling this method (e.g. strip)
2675 # 1) destroy nodes thus calling this method (e.g. strip)
2676 # 2) use phasecache somewhere (e.g. commit)
2676 # 2) use phasecache somewhere (e.g. commit)
2677 #
2677 #
2678 # then 2) will fail because the phasecache contains nodes that were
2678 # then 2) will fail because the phasecache contains nodes that were
2679 # removed. We can either remove phasecache from the filecache,
2679 # removed. We can either remove phasecache from the filecache,
2680 # causing it to reload next time it is accessed, or simply filter
2680 # causing it to reload next time it is accessed, or simply filter
2681 # the removed nodes now and write the updated cache.
2681 # the removed nodes now and write the updated cache.
2682 self._phasecache.filterunknown(self)
2682 self._phasecache.filterunknown(self)
2683 self._phasecache.write()
2683 self._phasecache.write()
2684
2684
2685 # refresh all repository caches
2685 # refresh all repository caches
2686 self.updatecaches()
2686 self.updatecaches()
2687
2687
2688 # Ensure the persistent tag cache is updated. Doing it now
2688 # Ensure the persistent tag cache is updated. Doing it now
2689 # means that the tag cache only has to worry about destroyed
2689 # means that the tag cache only has to worry about destroyed
2690 # heads immediately after a strip/rollback. That in turn
2690 # heads immediately after a strip/rollback. That in turn
2691 # guarantees that "cachetip == currenttip" (comparing both rev
2691 # guarantees that "cachetip == currenttip" (comparing both rev
2692 # and node) always means no nodes have been added or destroyed.
2692 # and node) always means no nodes have been added or destroyed.
2693
2693
2694 # XXX this is suboptimal when qrefresh'ing: we strip the current
2694 # XXX this is suboptimal when qrefresh'ing: we strip the current
2695 # head, refresh the tag cache, then immediately add a new head.
2695 # head, refresh the tag cache, then immediately add a new head.
2696 # But I think doing it this way is necessary for the "instant
2696 # But I think doing it this way is necessary for the "instant
2697 # tag cache retrieval" case to work.
2697 # tag cache retrieval" case to work.
2698 self.invalidate()
2698 self.invalidate()
2699
2699
2700 def status(self, node1='.', node2=None, match=None,
2700 def status(self, node1='.', node2=None, match=None,
2701 ignored=False, clean=False, unknown=False,
2701 ignored=False, clean=False, unknown=False,
2702 listsubrepos=False):
2702 listsubrepos=False):
2703 '''a convenience method that calls node1.status(node2)'''
2703 '''a convenience method that calls node1.status(node2)'''
2704 return self[node1].status(node2, match, ignored, clean, unknown,
2704 return self[node1].status(node2, match, ignored, clean, unknown,
2705 listsubrepos)
2705 listsubrepos)
2706
2706
2707 def addpostdsstatus(self, ps):
2707 def addpostdsstatus(self, ps):
2708 """Add a callback to run within the wlock, at the point at which status
2708 """Add a callback to run within the wlock, at the point at which status
2709 fixups happen.
2709 fixups happen.
2710
2710
2711 On status completion, callback(wctx, status) will be called with the
2711 On status completion, callback(wctx, status) will be called with the
2712 wlock held, unless the dirstate has changed from underneath or the wlock
2712 wlock held, unless the dirstate has changed from underneath or the wlock
2713 couldn't be grabbed.
2713 couldn't be grabbed.
2714
2714
2715 Callbacks should not capture and use a cached copy of the dirstate --
2715 Callbacks should not capture and use a cached copy of the dirstate --
2716 it might change in the meanwhile. Instead, they should access the
2716 it might change in the meanwhile. Instead, they should access the
2717 dirstate via wctx.repo().dirstate.
2717 dirstate via wctx.repo().dirstate.
2718
2718
2719 This list is emptied out after each status run -- extensions should
2719 This list is emptied out after each status run -- extensions should
2720 make sure it adds to this list each time dirstate.status is called.
2720 make sure it adds to this list each time dirstate.status is called.
2721 Extensions should also make sure they don't call this for statuses
2721 Extensions should also make sure they don't call this for statuses
2722 that don't involve the dirstate.
2722 that don't involve the dirstate.
2723 """
2723 """
2724
2724
2725 # The list is located here for uniqueness reasons -- it is actually
2725 # The list is located here for uniqueness reasons -- it is actually
2726 # managed by the workingctx, but that isn't unique per-repo.
2726 # managed by the workingctx, but that isn't unique per-repo.
2727 self._postdsstatus.append(ps)
2727 self._postdsstatus.append(ps)
2728
2728
2729 def postdsstatus(self):
2729 def postdsstatus(self):
2730 """Used by workingctx to get the list of post-dirstate-status hooks."""
2730 """Used by workingctx to get the list of post-dirstate-status hooks."""
2731 return self._postdsstatus
2731 return self._postdsstatus
2732
2732
2733 def clearpostdsstatus(self):
2733 def clearpostdsstatus(self):
2734 """Used by workingctx to clear post-dirstate-status hooks."""
2734 """Used by workingctx to clear post-dirstate-status hooks."""
2735 del self._postdsstatus[:]
2735 del self._postdsstatus[:]
2736
2736
2737 def heads(self, start=None):
2737 def heads(self, start=None):
2738 if start is None:
2738 if start is None:
2739 cl = self.changelog
2739 cl = self.changelog
2740 headrevs = reversed(cl.headrevs())
2740 headrevs = reversed(cl.headrevs())
2741 return [cl.node(rev) for rev in headrevs]
2741 return [cl.node(rev) for rev in headrevs]
2742
2742
2743 heads = self.changelog.heads(start)
2743 heads = self.changelog.heads(start)
2744 # sort the output in rev descending order
2744 # sort the output in rev descending order
2745 return sorted(heads, key=self.changelog.rev, reverse=True)
2745 return sorted(heads, key=self.changelog.rev, reverse=True)
2746
2746
2747 def branchheads(self, branch=None, start=None, closed=False):
2747 def branchheads(self, branch=None, start=None, closed=False):
2748 '''return a (possibly filtered) list of heads for the given branch
2748 '''return a (possibly filtered) list of heads for the given branch
2749
2749
2750 Heads are returned in topological order, from newest to oldest.
2750 Heads are returned in topological order, from newest to oldest.
2751 If branch is None, use the dirstate branch.
2751 If branch is None, use the dirstate branch.
2752 If start is not None, return only heads reachable from start.
2752 If start is not None, return only heads reachable from start.
2753 If closed is True, return heads that are marked as closed as well.
2753 If closed is True, return heads that are marked as closed as well.
2754 '''
2754 '''
2755 if branch is None:
2755 if branch is None:
2756 branch = self[None].branch()
2756 branch = self[None].branch()
2757 branches = self.branchmap()
2757 branches = self.branchmap()
2758 if not branches.hasbranch(branch):
2758 if not branches.hasbranch(branch):
2759 return []
2759 return []
2760 # the cache returns heads ordered lowest to highest
2760 # the cache returns heads ordered lowest to highest
2761 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2761 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2762 if start is not None:
2762 if start is not None:
2763 # filter out the heads that cannot be reached from startrev
2763 # filter out the heads that cannot be reached from startrev
2764 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2764 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2765 bheads = [h for h in bheads if h in fbheads]
2765 bheads = [h for h in bheads if h in fbheads]
2766 return bheads
2766 return bheads
2767
2767
2768 def branches(self, nodes):
2768 def branches(self, nodes):
2769 if not nodes:
2769 if not nodes:
2770 nodes = [self.changelog.tip()]
2770 nodes = [self.changelog.tip()]
2771 b = []
2771 b = []
2772 for n in nodes:
2772 for n in nodes:
2773 t = n
2773 t = n
2774 while True:
2774 while True:
2775 p = self.changelog.parents(n)
2775 p = self.changelog.parents(n)
2776 if p[1] != nullid or p[0] == nullid:
2776 if p[1] != nullid or p[0] == nullid:
2777 b.append((t, n, p[0], p[1]))
2777 b.append((t, n, p[0], p[1]))
2778 break
2778 break
2779 n = p[0]
2779 n = p[0]
2780 return b
2780 return b
2781
2781
2782 def between(self, pairs):
2782 def between(self, pairs):
2783 r = []
2783 r = []
2784
2784
2785 for top, bottom in pairs:
2785 for top, bottom in pairs:
2786 n, l, i = top, [], 0
2786 n, l, i = top, [], 0
2787 f = 1
2787 f = 1
2788
2788
2789 while n != bottom and n != nullid:
2789 while n != bottom and n != nullid:
2790 p = self.changelog.parents(n)[0]
2790 p = self.changelog.parents(n)[0]
2791 if i == f:
2791 if i == f:
2792 l.append(n)
2792 l.append(n)
2793 f = f * 2
2793 f = f * 2
2794 n = p
2794 n = p
2795 i += 1
2795 i += 1
2796
2796
2797 r.append(l)
2797 r.append(l)
2798
2798
2799 return r
2799 return r
2800
2800
2801 def checkpush(self, pushop):
2801 def checkpush(self, pushop):
2802 """Extensions can override this function if additional checks have
2802 """Extensions can override this function if additional checks have
2803 to be performed before pushing, or call it if they override push
2803 to be performed before pushing, or call it if they override push
2804 command.
2804 command.
2805 """
2805 """
2806
2806
2807 @unfilteredpropertycache
2807 @unfilteredpropertycache
2808 def prepushoutgoinghooks(self):
2808 def prepushoutgoinghooks(self):
2809 """Return util.hooks consists of a pushop with repo, remote, outgoing
2809 """Return util.hooks consists of a pushop with repo, remote, outgoing
2810 methods, which are called before pushing changesets.
2810 methods, which are called before pushing changesets.
2811 """
2811 """
2812 return util.hooks()
2812 return util.hooks()
2813
2813
2814 def pushkey(self, namespace, key, old, new):
2814 def pushkey(self, namespace, key, old, new):
2815 try:
2815 try:
2816 tr = self.currenttransaction()
2816 tr = self.currenttransaction()
2817 hookargs = {}
2817 hookargs = {}
2818 if tr is not None:
2818 if tr is not None:
2819 hookargs.update(tr.hookargs)
2819 hookargs.update(tr.hookargs)
2820 hookargs = pycompat.strkwargs(hookargs)
2820 hookargs = pycompat.strkwargs(hookargs)
2821 hookargs[r'namespace'] = namespace
2821 hookargs[r'namespace'] = namespace
2822 hookargs[r'key'] = key
2822 hookargs[r'key'] = key
2823 hookargs[r'old'] = old
2823 hookargs[r'old'] = old
2824 hookargs[r'new'] = new
2824 hookargs[r'new'] = new
2825 self.hook('prepushkey', throw=True, **hookargs)
2825 self.hook('prepushkey', throw=True, **hookargs)
2826 except error.HookAbort as exc:
2826 except error.HookAbort as exc:
2827 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2827 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2828 if exc.hint:
2828 if exc.hint:
2829 self.ui.write_err(_("(%s)\n") % exc.hint)
2829 self.ui.write_err(_("(%s)\n") % exc.hint)
2830 return False
2830 return False
2831 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2831 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2832 ret = pushkey.push(self, namespace, key, old, new)
2832 ret = pushkey.push(self, namespace, key, old, new)
2833 def runhook():
2833 def runhook():
2834 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2834 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2835 ret=ret)
2835 ret=ret)
2836 self._afterlock(runhook)
2836 self._afterlock(runhook)
2837 return ret
2837 return ret
2838
2838
2839 def listkeys(self, namespace):
2839 def listkeys(self, namespace):
2840 self.hook('prelistkeys', throw=True, namespace=namespace)
2840 self.hook('prelistkeys', throw=True, namespace=namespace)
2841 self.ui.debug('listing keys for "%s"\n' % namespace)
2841 self.ui.debug('listing keys for "%s"\n' % namespace)
2842 values = pushkey.list(self, namespace)
2842 values = pushkey.list(self, namespace)
2843 self.hook('listkeys', namespace=namespace, values=values)
2843 self.hook('listkeys', namespace=namespace, values=values)
2844 return values
2844 return values
2845
2845
2846 def debugwireargs(self, one, two, three=None, four=None, five=None):
2846 def debugwireargs(self, one, two, three=None, four=None, five=None):
2847 '''used to test argument passing over the wire'''
2847 '''used to test argument passing over the wire'''
2848 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2848 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2849 pycompat.bytestr(four),
2849 pycompat.bytestr(four),
2850 pycompat.bytestr(five))
2850 pycompat.bytestr(five))
2851
2851
2852 def savecommitmessage(self, text):
2852 def savecommitmessage(self, text):
2853 fp = self.vfs('last-message.txt', 'wb')
2853 fp = self.vfs('last-message.txt', 'wb')
2854 try:
2854 try:
2855 fp.write(text)
2855 fp.write(text)
2856 finally:
2856 finally:
2857 fp.close()
2857 fp.close()
2858 return self.pathto(fp.name[len(self.root) + 1:])
2858 return self.pathto(fp.name[len(self.root) + 1:])
2859
2859
2860 # used to avoid circular references so destructors work
2860 # used to avoid circular references so destructors work
2861 def aftertrans(files):
2861 def aftertrans(files):
2862 renamefiles = [tuple(t) for t in files]
2862 renamefiles = [tuple(t) for t in files]
2863 def a():
2863 def a():
2864 for vfs, src, dest in renamefiles:
2864 for vfs, src, dest in renamefiles:
2865 # if src and dest refer to a same file, vfs.rename is a no-op,
2865 # if src and dest refer to a same file, vfs.rename is a no-op,
2866 # leaving both src and dest on disk. delete dest to make sure
2866 # leaving both src and dest on disk. delete dest to make sure
2867 # the rename couldn't be such a no-op.
2867 # the rename couldn't be such a no-op.
2868 vfs.tryunlink(dest)
2868 vfs.tryunlink(dest)
2869 try:
2869 try:
2870 vfs.rename(src, dest)
2870 vfs.rename(src, dest)
2871 except OSError: # journal file does not yet exist
2871 except OSError: # journal file does not yet exist
2872 pass
2872 pass
2873 return a
2873 return a
2874
2874
2875 def undoname(fn):
2875 def undoname(fn):
2876 base, name = os.path.split(fn)
2876 base, name = os.path.split(fn)
2877 assert name.startswith('journal')
2877 assert name.startswith('journal')
2878 return os.path.join(base, name.replace('journal', 'undo', 1))
2878 return os.path.join(base, name.replace('journal', 'undo', 1))
2879
2879
2880 def instance(ui, path, create, intents=None, createopts=None):
2880 def instance(ui, path, create, intents=None, createopts=None):
2881 localpath = util.urllocalpath(path)
2881 localpath = util.urllocalpath(path)
2882 if create:
2882 if create:
2883 createrepository(ui, localpath, createopts=createopts)
2883 createrepository(ui, localpath, createopts=createopts)
2884
2884
2885 return makelocalrepository(ui, localpath, intents=intents)
2885 return makelocalrepository(ui, localpath, intents=intents)
2886
2886
2887 def islocal(path):
2887 def islocal(path):
2888 return True
2888 return True
2889
2889
2890 def defaultcreateopts(ui, createopts=None):
2890 def defaultcreateopts(ui, createopts=None):
2891 """Populate the default creation options for a repository.
2891 """Populate the default creation options for a repository.
2892
2892
2893 A dictionary of explicitly requested creation options can be passed
2893 A dictionary of explicitly requested creation options can be passed
2894 in. Missing keys will be populated.
2894 in. Missing keys will be populated.
2895 """
2895 """
2896 createopts = dict(createopts or {})
2896 createopts = dict(createopts or {})
2897
2897
2898 if 'backend' not in createopts:
2898 if 'backend' not in createopts:
2899 # experimental config: storage.new-repo-backend
2899 # experimental config: storage.new-repo-backend
2900 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2900 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2901
2901
2902 return createopts
2902 return createopts
2903
2903
2904 def newreporequirements(ui, createopts):
2904 def newreporequirements(ui, createopts):
2905 """Determine the set of requirements for a new local repository.
2905 """Determine the set of requirements for a new local repository.
2906
2906
2907 Extensions can wrap this function to specify custom requirements for
2907 Extensions can wrap this function to specify custom requirements for
2908 new repositories.
2908 new repositories.
2909 """
2909 """
2910 # If the repo is being created from a shared repository, we copy
2910 # If the repo is being created from a shared repository, we copy
2911 # its requirements.
2911 # its requirements.
2912 if 'sharedrepo' in createopts:
2912 if 'sharedrepo' in createopts:
2913 requirements = set(createopts['sharedrepo'].requirements)
2913 requirements = set(createopts['sharedrepo'].requirements)
2914 if createopts.get('sharedrelative'):
2914 if createopts.get('sharedrelative'):
2915 requirements.add('relshared')
2915 requirements.add('relshared')
2916 else:
2916 else:
2917 requirements.add('shared')
2917 requirements.add('shared')
2918
2918
2919 return requirements
2919 return requirements
2920
2920
2921 if 'backend' not in createopts:
2921 if 'backend' not in createopts:
2922 raise error.ProgrammingError('backend key not present in createopts; '
2922 raise error.ProgrammingError('backend key not present in createopts; '
2923 'was defaultcreateopts() called?')
2923 'was defaultcreateopts() called?')
2924
2924
2925 if createopts['backend'] != 'revlogv1':
2925 if createopts['backend'] != 'revlogv1':
2926 raise error.Abort(_('unable to determine repository requirements for '
2926 raise error.Abort(_('unable to determine repository requirements for '
2927 'storage backend: %s') % createopts['backend'])
2927 'storage backend: %s') % createopts['backend'])
2928
2928
2929 requirements = {'revlogv1'}
2929 requirements = {'revlogv1'}
2930 if ui.configbool('format', 'usestore'):
2930 if ui.configbool('format', 'usestore'):
2931 requirements.add('store')
2931 requirements.add('store')
2932 if ui.configbool('format', 'usefncache'):
2932 if ui.configbool('format', 'usefncache'):
2933 requirements.add('fncache')
2933 requirements.add('fncache')
2934 if ui.configbool('format', 'dotencode'):
2934 if ui.configbool('format', 'dotencode'):
2935 requirements.add('dotencode')
2935 requirements.add('dotencode')
2936
2936
2937 compengine = ui.config('format', 'revlog-compression')
2937 compengine = ui.config('format', 'revlog-compression')
2938 if compengine not in util.compengines:
2938 if compengine not in util.compengines:
2939 raise error.Abort(_('compression engine %s defined by '
2939 raise error.Abort(_('compression engine %s defined by '
2940 'format.revlog-compression not available') %
2940 'format.revlog-compression not available') %
2941 compengine,
2941 compengine,
2942 hint=_('run "hg debuginstall" to list available '
2942 hint=_('run "hg debuginstall" to list available '
2943 'compression engines'))
2943 'compression engines'))
2944
2944
2945 # zlib is the historical default and doesn't need an explicit requirement.
2945 # zlib is the historical default and doesn't need an explicit requirement.
2946 if compengine != 'zlib':
2946 if compengine != 'zlib':
2947 requirements.add('exp-compression-%s' % compengine)
2947 requirements.add('exp-compression-%s' % compengine)
2948
2948
2949 if scmutil.gdinitconfig(ui):
2949 if scmutil.gdinitconfig(ui):
2950 requirements.add('generaldelta')
2950 requirements.add('generaldelta')
2951 if ui.configbool('format', 'sparse-revlog'):
2951 if ui.configbool('format', 'sparse-revlog'):
2952 requirements.add(SPARSEREVLOG_REQUIREMENT)
2952 requirements.add(SPARSEREVLOG_REQUIREMENT)
2953 if ui.configbool('experimental', 'treemanifest'):
2953 if ui.configbool('experimental', 'treemanifest'):
2954 requirements.add('treemanifest')
2954 requirements.add('treemanifest')
2955
2955
2956 revlogv2 = ui.config('experimental', 'revlogv2')
2956 revlogv2 = ui.config('experimental', 'revlogv2')
2957 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2957 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2958 requirements.remove('revlogv1')
2958 requirements.remove('revlogv1')
2959 # generaldelta is implied by revlogv2.
2959 # generaldelta is implied by revlogv2.
2960 requirements.discard('generaldelta')
2960 requirements.discard('generaldelta')
2961 requirements.add(REVLOGV2_REQUIREMENT)
2961 requirements.add(REVLOGV2_REQUIREMENT)
2962 # experimental config: format.internal-phase
2962 # experimental config: format.internal-phase
2963 if ui.configbool('format', 'internal-phase'):
2963 if ui.configbool('format', 'internal-phase'):
2964 requirements.add('internal-phase')
2964 requirements.add('internal-phase')
2965
2965
2966 if createopts.get('narrowfiles'):
2966 if createopts.get('narrowfiles'):
2967 requirements.add(repository.NARROW_REQUIREMENT)
2967 requirements.add(repository.NARROW_REQUIREMENT)
2968
2968
2969 if createopts.get('lfs'):
2969 if createopts.get('lfs'):
2970 requirements.add('lfs')
2970 requirements.add('lfs')
2971
2971
2972 return requirements
2972 return requirements
2973
2973
2974 def filterknowncreateopts(ui, createopts):
2974 def filterknowncreateopts(ui, createopts):
2975 """Filters a dict of repo creation options against options that are known.
2975 """Filters a dict of repo creation options against options that are known.
2976
2976
2977 Receives a dict of repo creation options and returns a dict of those
2977 Receives a dict of repo creation options and returns a dict of those
2978 options that we don't know how to handle.
2978 options that we don't know how to handle.
2979
2979
2980 This function is called as part of repository creation. If the
2980 This function is called as part of repository creation. If the
2981 returned dict contains any items, repository creation will not
2981 returned dict contains any items, repository creation will not
2982 be allowed, as it means there was a request to create a repository
2982 be allowed, as it means there was a request to create a repository
2983 with options not recognized by loaded code.
2983 with options not recognized by loaded code.
2984
2984
2985 Extensions can wrap this function to filter out creation options
2985 Extensions can wrap this function to filter out creation options
2986 they know how to handle.
2986 they know how to handle.
2987 """
2987 """
2988 known = {
2988 known = {
2989 'backend',
2989 'backend',
2990 'lfs',
2990 'lfs',
2991 'narrowfiles',
2991 'narrowfiles',
2992 'sharedrepo',
2992 'sharedrepo',
2993 'sharedrelative',
2993 'sharedrelative',
2994 'shareditems',
2994 'shareditems',
2995 'shallowfilestore',
2995 'shallowfilestore',
2996 }
2996 }
2997
2997
2998 return {k: v for k, v in createopts.items() if k not in known}
2998 return {k: v for k, v in createopts.items() if k not in known}
2999
2999
3000 def createrepository(ui, path, createopts=None):
3000 def createrepository(ui, path, createopts=None):
3001 """Create a new repository in a vfs.
3001 """Create a new repository in a vfs.
3002
3002
3003 ``path`` path to the new repo's working directory.
3003 ``path`` path to the new repo's working directory.
3004 ``createopts`` options for the new repository.
3004 ``createopts`` options for the new repository.
3005
3005
3006 The following keys for ``createopts`` are recognized:
3006 The following keys for ``createopts`` are recognized:
3007
3007
3008 backend
3008 backend
3009 The storage backend to use.
3009 The storage backend to use.
3010 lfs
3010 lfs
3011 Repository will be created with ``lfs`` requirement. The lfs extension
3011 Repository will be created with ``lfs`` requirement. The lfs extension
3012 will automatically be loaded when the repository is accessed.
3012 will automatically be loaded when the repository is accessed.
3013 narrowfiles
3013 narrowfiles
3014 Set up repository to support narrow file storage.
3014 Set up repository to support narrow file storage.
3015 sharedrepo
3015 sharedrepo
3016 Repository object from which storage should be shared.
3016 Repository object from which storage should be shared.
3017 sharedrelative
3017 sharedrelative
3018 Boolean indicating if the path to the shared repo should be
3018 Boolean indicating if the path to the shared repo should be
3019 stored as relative. By default, the pointer to the "parent" repo
3019 stored as relative. By default, the pointer to the "parent" repo
3020 is stored as an absolute path.
3020 is stored as an absolute path.
3021 shareditems
3021 shareditems
3022 Set of items to share to the new repository (in addition to storage).
3022 Set of items to share to the new repository (in addition to storage).
3023 shallowfilestore
3023 shallowfilestore
3024 Indicates that storage for files should be shallow (not all ancestor
3024 Indicates that storage for files should be shallow (not all ancestor
3025 revisions are known).
3025 revisions are known).
3026 """
3026 """
3027 createopts = defaultcreateopts(ui, createopts=createopts)
3027 createopts = defaultcreateopts(ui, createopts=createopts)
3028
3028
3029 unknownopts = filterknowncreateopts(ui, createopts)
3029 unknownopts = filterknowncreateopts(ui, createopts)
3030
3030
3031 if not isinstance(unknownopts, dict):
3031 if not isinstance(unknownopts, dict):
3032 raise error.ProgrammingError('filterknowncreateopts() did not return '
3032 raise error.ProgrammingError('filterknowncreateopts() did not return '
3033 'a dict')
3033 'a dict')
3034
3034
3035 if unknownopts:
3035 if unknownopts:
3036 raise error.Abort(_('unable to create repository because of unknown '
3036 raise error.Abort(_('unable to create repository because of unknown '
3037 'creation option: %s') %
3037 'creation option: %s') %
3038 ', '.join(sorted(unknownopts)),
3038 ', '.join(sorted(unknownopts)),
3039 hint=_('is a required extension not loaded?'))
3039 hint=_('is a required extension not loaded?'))
3040
3040
3041 requirements = newreporequirements(ui, createopts=createopts)
3041 requirements = newreporequirements(ui, createopts=createopts)
3042
3042
3043 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3043 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3044
3044
3045 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3045 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3046 if hgvfs.exists():
3046 if hgvfs.exists():
3047 raise error.RepoError(_('repository %s already exists') % path)
3047 raise error.RepoError(_('repository %s already exists') % path)
3048
3048
3049 if 'sharedrepo' in createopts:
3049 if 'sharedrepo' in createopts:
3050 sharedpath = createopts['sharedrepo'].sharedpath
3050 sharedpath = createopts['sharedrepo'].sharedpath
3051
3051
3052 if createopts.get('sharedrelative'):
3052 if createopts.get('sharedrelative'):
3053 try:
3053 try:
3054 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3054 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3055 except (IOError, ValueError) as e:
3055 except (IOError, ValueError) as e:
3056 # ValueError is raised on Windows if the drive letters differ
3056 # ValueError is raised on Windows if the drive letters differ
3057 # on each path.
3057 # on each path.
3058 raise error.Abort(_('cannot calculate relative path'),
3058 raise error.Abort(_('cannot calculate relative path'),
3059 hint=stringutil.forcebytestr(e))
3059 hint=stringutil.forcebytestr(e))
3060
3060
3061 if not wdirvfs.exists():
3061 if not wdirvfs.exists():
3062 wdirvfs.makedirs()
3062 wdirvfs.makedirs()
3063
3063
3064 hgvfs.makedir(notindexed=True)
3064 hgvfs.makedir(notindexed=True)
3065 if 'sharedrepo' not in createopts:
3065 if 'sharedrepo' not in createopts:
3066 hgvfs.mkdir(b'cache')
3066 hgvfs.mkdir(b'cache')
3067 hgvfs.mkdir(b'wcache')
3067 hgvfs.mkdir(b'wcache')
3068
3068
3069 if b'store' in requirements and 'sharedrepo' not in createopts:
3069 if b'store' in requirements and 'sharedrepo' not in createopts:
3070 hgvfs.mkdir(b'store')
3070 hgvfs.mkdir(b'store')
3071
3071
3072 # We create an invalid changelog outside the store so very old
3072 # We create an invalid changelog outside the store so very old
3073 # Mercurial versions (which didn't know about the requirements
3073 # Mercurial versions (which didn't know about the requirements
3074 # file) encounter an error on reading the changelog. This
3074 # file) encounter an error on reading the changelog. This
3075 # effectively locks out old clients and prevents them from
3075 # effectively locks out old clients and prevents them from
3076 # mucking with a repo in an unknown format.
3076 # mucking with a repo in an unknown format.
3077 #
3077 #
3078 # The revlog header has version 2, which won't be recognized by
3078 # The revlog header has version 2, which won't be recognized by
3079 # such old clients.
3079 # such old clients.
3080 hgvfs.append(b'00changelog.i',
3080 hgvfs.append(b'00changelog.i',
3081 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3081 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3082 b'layout')
3082 b'layout')
3083
3083
3084 scmutil.writerequires(hgvfs, requirements)
3084 scmutil.writerequires(hgvfs, requirements)
3085
3085
3086 # Write out file telling readers where to find the shared store.
3086 # Write out file telling readers where to find the shared store.
3087 if 'sharedrepo' in createopts:
3087 if 'sharedrepo' in createopts:
3088 hgvfs.write(b'sharedpath', sharedpath)
3088 hgvfs.write(b'sharedpath', sharedpath)
3089
3089
3090 if createopts.get('shareditems'):
3090 if createopts.get('shareditems'):
3091 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3091 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3092 hgvfs.write(b'shared', shared)
3092 hgvfs.write(b'shared', shared)
3093
3093
3094 def poisonrepository(repo):
3094 def poisonrepository(repo):
3095 """Poison a repository instance so it can no longer be used."""
3095 """Poison a repository instance so it can no longer be used."""
3096 # Perform any cleanup on the instance.
3096 # Perform any cleanup on the instance.
3097 repo.close()
3097 repo.close()
3098
3098
3099 # Our strategy is to replace the type of the object with one that
3099 # Our strategy is to replace the type of the object with one that
3100 # has all attribute lookups result in error.
3100 # has all attribute lookups result in error.
3101 #
3101 #
3102 # But we have to allow the close() method because some constructors
3102 # But we have to allow the close() method because some constructors
3103 # of repos call close() on repo references.
3103 # of repos call close() on repo references.
3104 class poisonedrepository(object):
3104 class poisonedrepository(object):
3105 def __getattribute__(self, item):
3105 def __getattribute__(self, item):
3106 if item == r'close':
3106 if item == r'close':
3107 return object.__getattribute__(self, item)
3107 return object.__getattribute__(self, item)
3108
3108
3109 raise error.ProgrammingError('repo instances should not be used '
3109 raise error.ProgrammingError('repo instances should not be used '
3110 'after unshare')
3110 'after unshare')
3111
3111
3112 def close(self):
3112 def close(self):
3113 pass
3113 pass
3114
3114
3115 # We may have a repoview, which intercepts __setattr__. So be sure
3115 # We may have a repoview, which intercepts __setattr__. So be sure
3116 # we operate at the lowest level possible.
3116 # we operate at the lowest level possible.
3117 object.__setattr__(repo, r'__class__', poisonedrepository)
3117 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now