##// END OF EJS Templates
localrepo: slightly simplify check for removed parents in _rollback()...
Martin von Zweigbergk -
r41408:f1086a15 default draft
parent child Browse files
Show More
@@ -1,3075 +1,3074 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 def isfilecached(repo, name):
125 def isfilecached(repo, name):
126 """check if a repo has already cached "name" filecache-ed property
126 """check if a repo has already cached "name" filecache-ed property
127
127
128 This returns (cachedobj-or-None, iscached) tuple.
128 This returns (cachedobj-or-None, iscached) tuple.
129 """
129 """
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
131 if not cacheentry:
131 if not cacheentry:
132 return None, False
132 return None, False
133 return cacheentry.obj, True
133 return cacheentry.obj, True
134
134
135 class unfilteredpropertycache(util.propertycache):
135 class unfilteredpropertycache(util.propertycache):
136 """propertycache that apply to unfiltered repo only"""
136 """propertycache that apply to unfiltered repo only"""
137
137
138 def __get__(self, repo, type=None):
138 def __get__(self, repo, type=None):
139 unfi = repo.unfiltered()
139 unfi = repo.unfiltered()
140 if unfi is repo:
140 if unfi is repo:
141 return super(unfilteredpropertycache, self).__get__(unfi)
141 return super(unfilteredpropertycache, self).__get__(unfi)
142 return getattr(unfi, self.name)
142 return getattr(unfi, self.name)
143
143
144 class filteredpropertycache(util.propertycache):
144 class filteredpropertycache(util.propertycache):
145 """propertycache that must take filtering in account"""
145 """propertycache that must take filtering in account"""
146
146
147 def cachevalue(self, obj, value):
147 def cachevalue(self, obj, value):
148 object.__setattr__(obj, self.name, value)
148 object.__setattr__(obj, self.name, value)
149
149
150
150
151 def hasunfilteredcache(repo, name):
151 def hasunfilteredcache(repo, name):
152 """check if a repo has an unfilteredpropertycache value for <name>"""
152 """check if a repo has an unfilteredpropertycache value for <name>"""
153 return name in vars(repo.unfiltered())
153 return name in vars(repo.unfiltered())
154
154
155 def unfilteredmethod(orig):
155 def unfilteredmethod(orig):
156 """decorate method that always need to be run on unfiltered version"""
156 """decorate method that always need to be run on unfiltered version"""
157 def wrapper(repo, *args, **kwargs):
157 def wrapper(repo, *args, **kwargs):
158 return orig(repo.unfiltered(), *args, **kwargs)
158 return orig(repo.unfiltered(), *args, **kwargs)
159 return wrapper
159 return wrapper
160
160
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
162 'unbundle'}
162 'unbundle'}
163 legacycaps = moderncaps.union({'changegroupsubset'})
163 legacycaps = moderncaps.union({'changegroupsubset'})
164
164
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
166 class localcommandexecutor(object):
166 class localcommandexecutor(object):
167 def __init__(self, peer):
167 def __init__(self, peer):
168 self._peer = peer
168 self._peer = peer
169 self._sent = False
169 self._sent = False
170 self._closed = False
170 self._closed = False
171
171
172 def __enter__(self):
172 def __enter__(self):
173 return self
173 return self
174
174
175 def __exit__(self, exctype, excvalue, exctb):
175 def __exit__(self, exctype, excvalue, exctb):
176 self.close()
176 self.close()
177
177
178 def callcommand(self, command, args):
178 def callcommand(self, command, args):
179 if self._sent:
179 if self._sent:
180 raise error.ProgrammingError('callcommand() cannot be used after '
180 raise error.ProgrammingError('callcommand() cannot be used after '
181 'sendcommands()')
181 'sendcommands()')
182
182
183 if self._closed:
183 if self._closed:
184 raise error.ProgrammingError('callcommand() cannot be used after '
184 raise error.ProgrammingError('callcommand() cannot be used after '
185 'close()')
185 'close()')
186
186
187 # We don't need to support anything fancy. Just call the named
187 # We don't need to support anything fancy. Just call the named
188 # method on the peer and return a resolved future.
188 # method on the peer and return a resolved future.
189 fn = getattr(self._peer, pycompat.sysstr(command))
189 fn = getattr(self._peer, pycompat.sysstr(command))
190
190
191 f = pycompat.futures.Future()
191 f = pycompat.futures.Future()
192
192
193 try:
193 try:
194 result = fn(**pycompat.strkwargs(args))
194 result = fn(**pycompat.strkwargs(args))
195 except Exception:
195 except Exception:
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
197 else:
197 else:
198 f.set_result(result)
198 f.set_result(result)
199
199
200 return f
200 return f
201
201
202 def sendcommands(self):
202 def sendcommands(self):
203 self._sent = True
203 self._sent = True
204
204
205 def close(self):
205 def close(self):
206 self._closed = True
206 self._closed = True
207
207
208 @interfaceutil.implementer(repository.ipeercommands)
208 @interfaceutil.implementer(repository.ipeercommands)
209 class localpeer(repository.peer):
209 class localpeer(repository.peer):
210 '''peer for a local repo; reflects only the most recent API'''
210 '''peer for a local repo; reflects only the most recent API'''
211
211
212 def __init__(self, repo, caps=None):
212 def __init__(self, repo, caps=None):
213 super(localpeer, self).__init__()
213 super(localpeer, self).__init__()
214
214
215 if caps is None:
215 if caps is None:
216 caps = moderncaps.copy()
216 caps = moderncaps.copy()
217 self._repo = repo.filtered('served')
217 self._repo = repo.filtered('served')
218 self.ui = repo.ui
218 self.ui = repo.ui
219 self._caps = repo._restrictcapabilities(caps)
219 self._caps = repo._restrictcapabilities(caps)
220
220
221 # Begin of _basepeer interface.
221 # Begin of _basepeer interface.
222
222
223 def url(self):
223 def url(self):
224 return self._repo.url()
224 return self._repo.url()
225
225
226 def local(self):
226 def local(self):
227 return self._repo
227 return self._repo
228
228
229 def peer(self):
229 def peer(self):
230 return self
230 return self
231
231
232 def canpush(self):
232 def canpush(self):
233 return True
233 return True
234
234
235 def close(self):
235 def close(self):
236 self._repo.close()
236 self._repo.close()
237
237
238 # End of _basepeer interface.
238 # End of _basepeer interface.
239
239
240 # Begin of _basewirecommands interface.
240 # Begin of _basewirecommands interface.
241
241
242 def branchmap(self):
242 def branchmap(self):
243 return self._repo.branchmap()
243 return self._repo.branchmap()
244
244
245 def capabilities(self):
245 def capabilities(self):
246 return self._caps
246 return self._caps
247
247
248 def clonebundles(self):
248 def clonebundles(self):
249 return self._repo.tryread('clonebundles.manifest')
249 return self._repo.tryread('clonebundles.manifest')
250
250
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
252 """Used to test argument passing over the wire"""
252 """Used to test argument passing over the wire"""
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
254 pycompat.bytestr(four),
254 pycompat.bytestr(four),
255 pycompat.bytestr(five))
255 pycompat.bytestr(five))
256
256
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
258 **kwargs):
258 **kwargs):
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
260 common=common, bundlecaps=bundlecaps,
260 common=common, bundlecaps=bundlecaps,
261 **kwargs)[1]
261 **kwargs)[1]
262 cb = util.chunkbuffer(chunks)
262 cb = util.chunkbuffer(chunks)
263
263
264 if exchange.bundle2requested(bundlecaps):
264 if exchange.bundle2requested(bundlecaps):
265 # When requesting a bundle2, getbundle returns a stream to make the
265 # When requesting a bundle2, getbundle returns a stream to make the
266 # wire level function happier. We need to build a proper object
266 # wire level function happier. We need to build a proper object
267 # from it in local peer.
267 # from it in local peer.
268 return bundle2.getunbundler(self.ui, cb)
268 return bundle2.getunbundler(self.ui, cb)
269 else:
269 else:
270 return changegroup.getunbundler('01', cb, None)
270 return changegroup.getunbundler('01', cb, None)
271
271
272 def heads(self):
272 def heads(self):
273 return self._repo.heads()
273 return self._repo.heads()
274
274
275 def known(self, nodes):
275 def known(self, nodes):
276 return self._repo.known(nodes)
276 return self._repo.known(nodes)
277
277
278 def listkeys(self, namespace):
278 def listkeys(self, namespace):
279 return self._repo.listkeys(namespace)
279 return self._repo.listkeys(namespace)
280
280
281 def lookup(self, key):
281 def lookup(self, key):
282 return self._repo.lookup(key)
282 return self._repo.lookup(key)
283
283
284 def pushkey(self, namespace, key, old, new):
284 def pushkey(self, namespace, key, old, new):
285 return self._repo.pushkey(namespace, key, old, new)
285 return self._repo.pushkey(namespace, key, old, new)
286
286
287 def stream_out(self):
287 def stream_out(self):
288 raise error.Abort(_('cannot perform stream clone against local '
288 raise error.Abort(_('cannot perform stream clone against local '
289 'peer'))
289 'peer'))
290
290
291 def unbundle(self, bundle, heads, url):
291 def unbundle(self, bundle, heads, url):
292 """apply a bundle on a repo
292 """apply a bundle on a repo
293
293
294 This function handles the repo locking itself."""
294 This function handles the repo locking itself."""
295 try:
295 try:
296 try:
296 try:
297 bundle = exchange.readbundle(self.ui, bundle, None)
297 bundle = exchange.readbundle(self.ui, bundle, None)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
299 if util.safehasattr(ret, 'getchunks'):
299 if util.safehasattr(ret, 'getchunks'):
300 # This is a bundle20 object, turn it into an unbundler.
300 # This is a bundle20 object, turn it into an unbundler.
301 # This little dance should be dropped eventually when the
301 # This little dance should be dropped eventually when the
302 # API is finally improved.
302 # API is finally improved.
303 stream = util.chunkbuffer(ret.getchunks())
303 stream = util.chunkbuffer(ret.getchunks())
304 ret = bundle2.getunbundler(self.ui, stream)
304 ret = bundle2.getunbundler(self.ui, stream)
305 return ret
305 return ret
306 except Exception as exc:
306 except Exception as exc:
307 # If the exception contains output salvaged from a bundle2
307 # If the exception contains output salvaged from a bundle2
308 # reply, we need to make sure it is printed before continuing
308 # reply, we need to make sure it is printed before continuing
309 # to fail. So we build a bundle2 with such output and consume
309 # to fail. So we build a bundle2 with such output and consume
310 # it directly.
310 # it directly.
311 #
311 #
312 # This is not very elegant but allows a "simple" solution for
312 # This is not very elegant but allows a "simple" solution for
313 # issue4594
313 # issue4594
314 output = getattr(exc, '_bundle2salvagedoutput', ())
314 output = getattr(exc, '_bundle2salvagedoutput', ())
315 if output:
315 if output:
316 bundler = bundle2.bundle20(self._repo.ui)
316 bundler = bundle2.bundle20(self._repo.ui)
317 for out in output:
317 for out in output:
318 bundler.addpart(out)
318 bundler.addpart(out)
319 stream = util.chunkbuffer(bundler.getchunks())
319 stream = util.chunkbuffer(bundler.getchunks())
320 b = bundle2.getunbundler(self.ui, stream)
320 b = bundle2.getunbundler(self.ui, stream)
321 bundle2.processbundle(self._repo, b)
321 bundle2.processbundle(self._repo, b)
322 raise
322 raise
323 except error.PushRaced as exc:
323 except error.PushRaced as exc:
324 raise error.ResponseError(_('push failed:'),
324 raise error.ResponseError(_('push failed:'),
325 stringutil.forcebytestr(exc))
325 stringutil.forcebytestr(exc))
326
326
327 # End of _basewirecommands interface.
327 # End of _basewirecommands interface.
328
328
329 # Begin of peer interface.
329 # Begin of peer interface.
330
330
331 def commandexecutor(self):
331 def commandexecutor(self):
332 return localcommandexecutor(self)
332 return localcommandexecutor(self)
333
333
334 # End of peer interface.
334 # End of peer interface.
335
335
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
337 class locallegacypeer(localpeer):
337 class locallegacypeer(localpeer):
338 '''peer extension which implements legacy methods too; used for tests with
338 '''peer extension which implements legacy methods too; used for tests with
339 restricted capabilities'''
339 restricted capabilities'''
340
340
341 def __init__(self, repo):
341 def __init__(self, repo):
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
343
343
344 # Begin of baselegacywirecommands interface.
344 # Begin of baselegacywirecommands interface.
345
345
346 def between(self, pairs):
346 def between(self, pairs):
347 return self._repo.between(pairs)
347 return self._repo.between(pairs)
348
348
349 def branches(self, nodes):
349 def branches(self, nodes):
350 return self._repo.branches(nodes)
350 return self._repo.branches(nodes)
351
351
352 def changegroup(self, nodes, source):
352 def changegroup(self, nodes, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
354 missingheads=self._repo.heads())
354 missingheads=self._repo.heads())
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 def changegroupsubset(self, bases, heads, source):
357 def changegroupsubset(self, bases, heads, source):
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
359 missingheads=heads)
359 missingheads=heads)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
361
361
362 # End of baselegacywirecommands interface.
362 # End of baselegacywirecommands interface.
363
363
364 # Increment the sub-version when the revlog v2 format changes to lock out old
364 # Increment the sub-version when the revlog v2 format changes to lock out old
365 # clients.
365 # clients.
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
367
367
368 # A repository with the sparserevlog feature will have delta chains that
368 # A repository with the sparserevlog feature will have delta chains that
369 # can spread over a larger span. Sparse reading cuts these large spans into
369 # can spread over a larger span. Sparse reading cuts these large spans into
370 # pieces, so that each piece isn't too big.
370 # pieces, so that each piece isn't too big.
371 # Without the sparserevlog capability, reading from the repository could use
371 # Without the sparserevlog capability, reading from the repository could use
372 # huge amounts of memory, because the whole span would be read at once,
372 # huge amounts of memory, because the whole span would be read at once,
373 # including all the intermediate revisions that aren't pertinent for the chain.
373 # including all the intermediate revisions that aren't pertinent for the chain.
374 # This is why once a repository has enabled sparse-read, it becomes required.
374 # This is why once a repository has enabled sparse-read, it becomes required.
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
376
376
377 # Functions receiving (ui, features) that extensions can register to impact
377 # Functions receiving (ui, features) that extensions can register to impact
378 # the ability to load repositories with custom requirements. Only
378 # the ability to load repositories with custom requirements. Only
379 # functions defined in loaded extensions are called.
379 # functions defined in loaded extensions are called.
380 #
380 #
381 # The function receives a set of requirement strings that the repository
381 # The function receives a set of requirement strings that the repository
382 # is capable of opening. Functions will typically add elements to the
382 # is capable of opening. Functions will typically add elements to the
383 # set to reflect that the extension knows how to handle that requirements.
383 # set to reflect that the extension knows how to handle that requirements.
384 featuresetupfuncs = set()
384 featuresetupfuncs = set()
385
385
386 def makelocalrepository(baseui, path, intents=None):
386 def makelocalrepository(baseui, path, intents=None):
387 """Create a local repository object.
387 """Create a local repository object.
388
388
389 Given arguments needed to construct a local repository, this function
389 Given arguments needed to construct a local repository, this function
390 performs various early repository loading functionality (such as
390 performs various early repository loading functionality (such as
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
392 the repository can be opened, derives a type suitable for representing
392 the repository can be opened, derives a type suitable for representing
393 that repository, and returns an instance of it.
393 that repository, and returns an instance of it.
394
394
395 The returned object conforms to the ``repository.completelocalrepository``
395 The returned object conforms to the ``repository.completelocalrepository``
396 interface.
396 interface.
397
397
398 The repository type is derived by calling a series of factory functions
398 The repository type is derived by calling a series of factory functions
399 for each aspect/interface of the final repository. These are defined by
399 for each aspect/interface of the final repository. These are defined by
400 ``REPO_INTERFACES``.
400 ``REPO_INTERFACES``.
401
401
402 Each factory function is called to produce a type implementing a specific
402 Each factory function is called to produce a type implementing a specific
403 interface. The cumulative list of returned types will be combined into a
403 interface. The cumulative list of returned types will be combined into a
404 new type and that type will be instantiated to represent the local
404 new type and that type will be instantiated to represent the local
405 repository.
405 repository.
406
406
407 The factory functions each receive various state that may be consulted
407 The factory functions each receive various state that may be consulted
408 as part of deriving a type.
408 as part of deriving a type.
409
409
410 Extensions should wrap these factory functions to customize repository type
410 Extensions should wrap these factory functions to customize repository type
411 creation. Note that an extension's wrapped function may be called even if
411 creation. Note that an extension's wrapped function may be called even if
412 that extension is not loaded for the repo being constructed. Extensions
412 that extension is not loaded for the repo being constructed. Extensions
413 should check if their ``__name__`` appears in the
413 should check if their ``__name__`` appears in the
414 ``extensionmodulenames`` set passed to the factory function and no-op if
414 ``extensionmodulenames`` set passed to the factory function and no-op if
415 not.
415 not.
416 """
416 """
417 ui = baseui.copy()
417 ui = baseui.copy()
418 # Prevent copying repo configuration.
418 # Prevent copying repo configuration.
419 ui.copy = baseui.copy
419 ui.copy = baseui.copy
420
420
421 # Working directory VFS rooted at repository root.
421 # Working directory VFS rooted at repository root.
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
423
423
424 # Main VFS for .hg/ directory.
424 # Main VFS for .hg/ directory.
425 hgpath = wdirvfs.join(b'.hg')
425 hgpath = wdirvfs.join(b'.hg')
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
427
427
428 # The .hg/ path should exist and should be a directory. All other
428 # The .hg/ path should exist and should be a directory. All other
429 # cases are errors.
429 # cases are errors.
430 if not hgvfs.isdir():
430 if not hgvfs.isdir():
431 try:
431 try:
432 hgvfs.stat()
432 hgvfs.stat()
433 except OSError as e:
433 except OSError as e:
434 if e.errno != errno.ENOENT:
434 if e.errno != errno.ENOENT:
435 raise
435 raise
436
436
437 raise error.RepoError(_(b'repository %s not found') % path)
437 raise error.RepoError(_(b'repository %s not found') % path)
438
438
439 # .hg/requires file contains a newline-delimited list of
439 # .hg/requires file contains a newline-delimited list of
440 # features/capabilities the opener (us) must have in order to use
440 # features/capabilities the opener (us) must have in order to use
441 # the repository. This file was introduced in Mercurial 0.9.2,
441 # the repository. This file was introduced in Mercurial 0.9.2,
442 # which means very old repositories may not have one. We assume
442 # which means very old repositories may not have one. We assume
443 # a missing file translates to no requirements.
443 # a missing file translates to no requirements.
444 try:
444 try:
445 requirements = set(hgvfs.read(b'requires').splitlines())
445 requirements = set(hgvfs.read(b'requires').splitlines())
446 except IOError as e:
446 except IOError as e:
447 if e.errno != errno.ENOENT:
447 if e.errno != errno.ENOENT:
448 raise
448 raise
449 requirements = set()
449 requirements = set()
450
450
451 # The .hg/hgrc file may load extensions or contain config options
451 # The .hg/hgrc file may load extensions or contain config options
452 # that influence repository construction. Attempt to load it and
452 # that influence repository construction. Attempt to load it and
453 # process any new extensions that it may have pulled in.
453 # process any new extensions that it may have pulled in.
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
456 extensions.loadall(ui)
456 extensions.loadall(ui)
457 extensions.populateui(ui)
457 extensions.populateui(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511 wcachepath = hgvfs.join(b'wcache')
511 wcachepath = hgvfs.join(b'wcache')
512
512
513
513
514 # The store has changed over time and the exact layout is dictated by
514 # The store has changed over time and the exact layout is dictated by
515 # requirements. The store interface abstracts differences across all
515 # requirements. The store interface abstracts differences across all
516 # of them.
516 # of them.
517 store = makestore(requirements, storebasepath,
517 store = makestore(requirements, storebasepath,
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
519 hgvfs.createmode = store.createmode
519 hgvfs.createmode = store.createmode
520
520
521 storevfs = store.vfs
521 storevfs = store.vfs
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
523
523
524 # The cache vfs is used to manage cache files.
524 # The cache vfs is used to manage cache files.
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
526 cachevfs.createmode = store.createmode
526 cachevfs.createmode = store.createmode
527 # The cache vfs is used to manage cache files related to the working copy
527 # The cache vfs is used to manage cache files related to the working copy
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
529 wcachevfs.createmode = store.createmode
529 wcachevfs.createmode = store.createmode
530
530
531 # Now resolve the type for the repository object. We do this by repeatedly
531 # Now resolve the type for the repository object. We do this by repeatedly
532 # calling a factory function to produces types for specific aspects of the
532 # calling a factory function to produces types for specific aspects of the
533 # repo's operation. The aggregate returned types are used as base classes
533 # repo's operation. The aggregate returned types are used as base classes
534 # for a dynamically-derived type, which will represent our new repository.
534 # for a dynamically-derived type, which will represent our new repository.
535
535
536 bases = []
536 bases = []
537 extrastate = {}
537 extrastate = {}
538
538
539 for iface, fn in REPO_INTERFACES:
539 for iface, fn in REPO_INTERFACES:
540 # We pass all potentially useful state to give extensions tons of
540 # We pass all potentially useful state to give extensions tons of
541 # flexibility.
541 # flexibility.
542 typ = fn()(ui=ui,
542 typ = fn()(ui=ui,
543 intents=intents,
543 intents=intents,
544 requirements=requirements,
544 requirements=requirements,
545 features=features,
545 features=features,
546 wdirvfs=wdirvfs,
546 wdirvfs=wdirvfs,
547 hgvfs=hgvfs,
547 hgvfs=hgvfs,
548 store=store,
548 store=store,
549 storevfs=storevfs,
549 storevfs=storevfs,
550 storeoptions=storevfs.options,
550 storeoptions=storevfs.options,
551 cachevfs=cachevfs,
551 cachevfs=cachevfs,
552 wcachevfs=wcachevfs,
552 wcachevfs=wcachevfs,
553 extensionmodulenames=extensionmodulenames,
553 extensionmodulenames=extensionmodulenames,
554 extrastate=extrastate,
554 extrastate=extrastate,
555 baseclasses=bases)
555 baseclasses=bases)
556
556
557 if not isinstance(typ, type):
557 if not isinstance(typ, type):
558 raise error.ProgrammingError('unable to construct type for %s' %
558 raise error.ProgrammingError('unable to construct type for %s' %
559 iface)
559 iface)
560
560
561 bases.append(typ)
561 bases.append(typ)
562
562
563 # type() allows you to use characters in type names that wouldn't be
563 # type() allows you to use characters in type names that wouldn't be
564 # recognized as Python symbols in source code. We abuse that to add
564 # recognized as Python symbols in source code. We abuse that to add
565 # rich information about our constructed repo.
565 # rich information about our constructed repo.
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
567 wdirvfs.base,
567 wdirvfs.base,
568 b','.join(sorted(requirements))))
568 b','.join(sorted(requirements))))
569
569
570 cls = type(name, tuple(bases), {})
570 cls = type(name, tuple(bases), {})
571
571
572 return cls(
572 return cls(
573 baseui=baseui,
573 baseui=baseui,
574 ui=ui,
574 ui=ui,
575 origroot=path,
575 origroot=path,
576 wdirvfs=wdirvfs,
576 wdirvfs=wdirvfs,
577 hgvfs=hgvfs,
577 hgvfs=hgvfs,
578 requirements=requirements,
578 requirements=requirements,
579 supportedrequirements=supportedrequirements,
579 supportedrequirements=supportedrequirements,
580 sharedpath=storebasepath,
580 sharedpath=storebasepath,
581 store=store,
581 store=store,
582 cachevfs=cachevfs,
582 cachevfs=cachevfs,
583 wcachevfs=wcachevfs,
583 wcachevfs=wcachevfs,
584 features=features,
584 features=features,
585 intents=intents)
585 intents=intents)
586
586
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
588 """Load hgrc files/content into a ui instance.
588 """Load hgrc files/content into a ui instance.
589
589
590 This is called during repository opening to load any additional
590 This is called during repository opening to load any additional
591 config files or settings relevant to the current repository.
591 config files or settings relevant to the current repository.
592
592
593 Returns a bool indicating whether any additional configs were loaded.
593 Returns a bool indicating whether any additional configs were loaded.
594
594
595 Extensions should monkeypatch this function to modify how per-repo
595 Extensions should monkeypatch this function to modify how per-repo
596 configs are loaded. For example, an extension may wish to pull in
596 configs are loaded. For example, an extension may wish to pull in
597 configs from alternate files or sources.
597 configs from alternate files or sources.
598 """
598 """
599 try:
599 try:
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
601 return True
601 return True
602 except IOError:
602 except IOError:
603 return False
603 return False
604
604
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
606 """Perform additional actions after .hg/hgrc is loaded.
606 """Perform additional actions after .hg/hgrc is loaded.
607
607
608 This function is called during repository loading immediately after
608 This function is called during repository loading immediately after
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
610
610
611 The function can be used to validate configs, automatically add
611 The function can be used to validate configs, automatically add
612 options (including extensions) based on requirements, etc.
612 options (including extensions) based on requirements, etc.
613 """
613 """
614
614
615 # Map of requirements to list of extensions to load automatically when
615 # Map of requirements to list of extensions to load automatically when
616 # requirement is present.
616 # requirement is present.
617 autoextensions = {
617 autoextensions = {
618 b'largefiles': [b'largefiles'],
618 b'largefiles': [b'largefiles'],
619 b'lfs': [b'lfs'],
619 b'lfs': [b'lfs'],
620 }
620 }
621
621
622 for requirement, names in sorted(autoextensions.items()):
622 for requirement, names in sorted(autoextensions.items()):
623 if requirement not in requirements:
623 if requirement not in requirements:
624 continue
624 continue
625
625
626 for name in names:
626 for name in names:
627 if not ui.hasconfig(b'extensions', name):
627 if not ui.hasconfig(b'extensions', name):
628 ui.setconfig(b'extensions', name, b'', source='autoload')
628 ui.setconfig(b'extensions', name, b'', source='autoload')
629
629
630 def gathersupportedrequirements(ui):
630 def gathersupportedrequirements(ui):
631 """Determine the complete set of recognized requirements."""
631 """Determine the complete set of recognized requirements."""
632 # Start with all requirements supported by this file.
632 # Start with all requirements supported by this file.
633 supported = set(localrepository._basesupported)
633 supported = set(localrepository._basesupported)
634
634
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
636 # relevant to this ui instance.
636 # relevant to this ui instance.
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
638
638
639 for fn in featuresetupfuncs:
639 for fn in featuresetupfuncs:
640 if fn.__module__ in modules:
640 if fn.__module__ in modules:
641 fn(ui, supported)
641 fn(ui, supported)
642
642
643 # Add derived requirements from registered compression engines.
643 # Add derived requirements from registered compression engines.
644 for name in util.compengines:
644 for name in util.compengines:
645 engine = util.compengines[name]
645 engine = util.compengines[name]
646 if engine.revlogheader():
646 if engine.revlogheader():
647 supported.add(b'exp-compression-%s' % name)
647 supported.add(b'exp-compression-%s' % name)
648
648
649 return supported
649 return supported
650
650
651 def ensurerequirementsrecognized(requirements, supported):
651 def ensurerequirementsrecognized(requirements, supported):
652 """Validate that a set of local requirements is recognized.
652 """Validate that a set of local requirements is recognized.
653
653
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
655 exists any requirement in that set that currently loaded code doesn't
655 exists any requirement in that set that currently loaded code doesn't
656 recognize.
656 recognize.
657
657
658 Returns a set of supported requirements.
658 Returns a set of supported requirements.
659 """
659 """
660 missing = set()
660 missing = set()
661
661
662 for requirement in requirements:
662 for requirement in requirements:
663 if requirement in supported:
663 if requirement in supported:
664 continue
664 continue
665
665
666 if not requirement or not requirement[0:1].isalnum():
666 if not requirement or not requirement[0:1].isalnum():
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
668
668
669 missing.add(requirement)
669 missing.add(requirement)
670
670
671 if missing:
671 if missing:
672 raise error.RequirementError(
672 raise error.RequirementError(
673 _(b'repository requires features unknown to this Mercurial: %s') %
673 _(b'repository requires features unknown to this Mercurial: %s') %
674 b' '.join(sorted(missing)),
674 b' '.join(sorted(missing)),
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
676 b'for more information'))
676 b'for more information'))
677
677
678 def ensurerequirementscompatible(ui, requirements):
678 def ensurerequirementscompatible(ui, requirements):
679 """Validates that a set of recognized requirements is mutually compatible.
679 """Validates that a set of recognized requirements is mutually compatible.
680
680
681 Some requirements may not be compatible with others or require
681 Some requirements may not be compatible with others or require
682 config options that aren't enabled. This function is called during
682 config options that aren't enabled. This function is called during
683 repository opening to ensure that the set of requirements needed
683 repository opening to ensure that the set of requirements needed
684 to open a repository is sane and compatible with config options.
684 to open a repository is sane and compatible with config options.
685
685
686 Extensions can monkeypatch this function to perform additional
686 Extensions can monkeypatch this function to perform additional
687 checking.
687 checking.
688
688
689 ``error.RepoError`` should be raised on failure.
689 ``error.RepoError`` should be raised on failure.
690 """
690 """
691 if b'exp-sparse' in requirements and not sparse.enabled:
691 if b'exp-sparse' in requirements and not sparse.enabled:
692 raise error.RepoError(_(b'repository is using sparse feature but '
692 raise error.RepoError(_(b'repository is using sparse feature but '
693 b'sparse is not enabled; enable the '
693 b'sparse is not enabled; enable the '
694 b'"sparse" extensions to access'))
694 b'"sparse" extensions to access'))
695
695
696 def makestore(requirements, path, vfstype):
696 def makestore(requirements, path, vfstype):
697 """Construct a storage object for a repository."""
697 """Construct a storage object for a repository."""
698 if b'store' in requirements:
698 if b'store' in requirements:
699 if b'fncache' in requirements:
699 if b'fncache' in requirements:
700 return storemod.fncachestore(path, vfstype,
700 return storemod.fncachestore(path, vfstype,
701 b'dotencode' in requirements)
701 b'dotencode' in requirements)
702
702
703 return storemod.encodedstore(path, vfstype)
703 return storemod.encodedstore(path, vfstype)
704
704
705 return storemod.basicstore(path, vfstype)
705 return storemod.basicstore(path, vfstype)
706
706
707 def resolvestorevfsoptions(ui, requirements, features):
707 def resolvestorevfsoptions(ui, requirements, features):
708 """Resolve the options to pass to the store vfs opener.
708 """Resolve the options to pass to the store vfs opener.
709
709
710 The returned dict is used to influence behavior of the storage layer.
710 The returned dict is used to influence behavior of the storage layer.
711 """
711 """
712 options = {}
712 options = {}
713
713
714 if b'treemanifest' in requirements:
714 if b'treemanifest' in requirements:
715 options[b'treemanifest'] = True
715 options[b'treemanifest'] = True
716
716
717 # experimental config: format.manifestcachesize
717 # experimental config: format.manifestcachesize
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
719 if manifestcachesize is not None:
719 if manifestcachesize is not None:
720 options[b'manifestcachesize'] = manifestcachesize
720 options[b'manifestcachesize'] = manifestcachesize
721
721
722 # In the absence of another requirement superseding a revlog-related
722 # In the absence of another requirement superseding a revlog-related
723 # requirement, we have to assume the repo is using revlog version 0.
723 # requirement, we have to assume the repo is using revlog version 0.
724 # This revlog format is super old and we don't bother trying to parse
724 # This revlog format is super old and we don't bother trying to parse
725 # opener options for it because those options wouldn't do anything
725 # opener options for it because those options wouldn't do anything
726 # meaningful on such old repos.
726 # meaningful on such old repos.
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
729
729
730 return options
730 return options
731
731
732 def resolverevlogstorevfsoptions(ui, requirements, features):
732 def resolverevlogstorevfsoptions(ui, requirements, features):
733 """Resolve opener options specific to revlogs."""
733 """Resolve opener options specific to revlogs."""
734
734
735 options = {}
735 options = {}
736 options[b'flagprocessors'] = {}
736 options[b'flagprocessors'] = {}
737
737
738 if b'revlogv1' in requirements:
738 if b'revlogv1' in requirements:
739 options[b'revlogv1'] = True
739 options[b'revlogv1'] = True
740 if REVLOGV2_REQUIREMENT in requirements:
740 if REVLOGV2_REQUIREMENT in requirements:
741 options[b'revlogv2'] = True
741 options[b'revlogv2'] = True
742
742
743 if b'generaldelta' in requirements:
743 if b'generaldelta' in requirements:
744 options[b'generaldelta'] = True
744 options[b'generaldelta'] = True
745
745
746 # experimental config: format.chunkcachesize
746 # experimental config: format.chunkcachesize
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
748 if chunkcachesize is not None:
748 if chunkcachesize is not None:
749 options[b'chunkcachesize'] = chunkcachesize
749 options[b'chunkcachesize'] = chunkcachesize
750
750
751 deltabothparents = ui.configbool(b'storage',
751 deltabothparents = ui.configbool(b'storage',
752 b'revlog.optimize-delta-parent-choice')
752 b'revlog.optimize-delta-parent-choice')
753 options[b'deltabothparents'] = deltabothparents
753 options[b'deltabothparents'] = deltabothparents
754
754
755 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
755 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
756
756
757 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
757 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
758 if 0 <= chainspan:
758 if 0 <= chainspan:
759 options[b'maxdeltachainspan'] = chainspan
759 options[b'maxdeltachainspan'] = chainspan
760
760
761 mmapindexthreshold = ui.configbytes(b'experimental',
761 mmapindexthreshold = ui.configbytes(b'experimental',
762 b'mmapindexthreshold')
762 b'mmapindexthreshold')
763 if mmapindexthreshold is not None:
763 if mmapindexthreshold is not None:
764 options[b'mmapindexthreshold'] = mmapindexthreshold
764 options[b'mmapindexthreshold'] = mmapindexthreshold
765
765
766 withsparseread = ui.configbool(b'experimental', b'sparse-read')
766 withsparseread = ui.configbool(b'experimental', b'sparse-read')
767 srdensitythres = float(ui.config(b'experimental',
767 srdensitythres = float(ui.config(b'experimental',
768 b'sparse-read.density-threshold'))
768 b'sparse-read.density-threshold'))
769 srmingapsize = ui.configbytes(b'experimental',
769 srmingapsize = ui.configbytes(b'experimental',
770 b'sparse-read.min-gap-size')
770 b'sparse-read.min-gap-size')
771 options[b'with-sparse-read'] = withsparseread
771 options[b'with-sparse-read'] = withsparseread
772 options[b'sparse-read-density-threshold'] = srdensitythres
772 options[b'sparse-read-density-threshold'] = srdensitythres
773 options[b'sparse-read-min-gap-size'] = srmingapsize
773 options[b'sparse-read-min-gap-size'] = srmingapsize
774
774
775 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
775 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
776 options[b'sparse-revlog'] = sparserevlog
776 options[b'sparse-revlog'] = sparserevlog
777 if sparserevlog:
777 if sparserevlog:
778 options[b'generaldelta'] = True
778 options[b'generaldelta'] = True
779
779
780 maxchainlen = None
780 maxchainlen = None
781 if sparserevlog:
781 if sparserevlog:
782 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
782 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
783 # experimental config: format.maxchainlen
783 # experimental config: format.maxchainlen
784 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
784 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
785 if maxchainlen is not None:
785 if maxchainlen is not None:
786 options[b'maxchainlen'] = maxchainlen
786 options[b'maxchainlen'] = maxchainlen
787
787
788 for r in requirements:
788 for r in requirements:
789 if r.startswith(b'exp-compression-'):
789 if r.startswith(b'exp-compression-'):
790 options[b'compengine'] = r[len(b'exp-compression-'):]
790 options[b'compengine'] = r[len(b'exp-compression-'):]
791
791
792 if repository.NARROW_REQUIREMENT in requirements:
792 if repository.NARROW_REQUIREMENT in requirements:
793 options[b'enableellipsis'] = True
793 options[b'enableellipsis'] = True
794
794
795 return options
795 return options
796
796
797 def makemain(**kwargs):
797 def makemain(**kwargs):
798 """Produce a type conforming to ``ilocalrepositorymain``."""
798 """Produce a type conforming to ``ilocalrepositorymain``."""
799 return localrepository
799 return localrepository
800
800
801 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
801 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
802 class revlogfilestorage(object):
802 class revlogfilestorage(object):
803 """File storage when using revlogs."""
803 """File storage when using revlogs."""
804
804
805 def file(self, path):
805 def file(self, path):
806 if path[0] == b'/':
806 if path[0] == b'/':
807 path = path[1:]
807 path = path[1:]
808
808
809 return filelog.filelog(self.svfs, path)
809 return filelog.filelog(self.svfs, path)
810
810
811 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
811 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
812 class revlognarrowfilestorage(object):
812 class revlognarrowfilestorage(object):
813 """File storage when using revlogs and narrow files."""
813 """File storage when using revlogs and narrow files."""
814
814
815 def file(self, path):
815 def file(self, path):
816 if path[0] == b'/':
816 if path[0] == b'/':
817 path = path[1:]
817 path = path[1:]
818
818
819 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
819 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
820
820
821 def makefilestorage(requirements, features, **kwargs):
821 def makefilestorage(requirements, features, **kwargs):
822 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
822 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
823 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
823 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
824 features.add(repository.REPO_FEATURE_STREAM_CLONE)
824 features.add(repository.REPO_FEATURE_STREAM_CLONE)
825
825
826 if repository.NARROW_REQUIREMENT in requirements:
826 if repository.NARROW_REQUIREMENT in requirements:
827 return revlognarrowfilestorage
827 return revlognarrowfilestorage
828 else:
828 else:
829 return revlogfilestorage
829 return revlogfilestorage
830
830
831 # List of repository interfaces and factory functions for them. Each
831 # List of repository interfaces and factory functions for them. Each
832 # will be called in order during ``makelocalrepository()`` to iteratively
832 # will be called in order during ``makelocalrepository()`` to iteratively
833 # derive the final type for a local repository instance. We capture the
833 # derive the final type for a local repository instance. We capture the
834 # function as a lambda so we don't hold a reference and the module-level
834 # function as a lambda so we don't hold a reference and the module-level
835 # functions can be wrapped.
835 # functions can be wrapped.
836 REPO_INTERFACES = [
836 REPO_INTERFACES = [
837 (repository.ilocalrepositorymain, lambda: makemain),
837 (repository.ilocalrepositorymain, lambda: makemain),
838 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
838 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
839 ]
839 ]
840
840
841 @interfaceutil.implementer(repository.ilocalrepositorymain)
841 @interfaceutil.implementer(repository.ilocalrepositorymain)
842 class localrepository(object):
842 class localrepository(object):
843 """Main class for representing local repositories.
843 """Main class for representing local repositories.
844
844
845 All local repositories are instances of this class.
845 All local repositories are instances of this class.
846
846
847 Constructed on its own, instances of this class are not usable as
847 Constructed on its own, instances of this class are not usable as
848 repository objects. To obtain a usable repository object, call
848 repository objects. To obtain a usable repository object, call
849 ``hg.repository()``, ``localrepo.instance()``, or
849 ``hg.repository()``, ``localrepo.instance()``, or
850 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
850 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
851 ``instance()`` adds support for creating new repositories.
851 ``instance()`` adds support for creating new repositories.
852 ``hg.repository()`` adds more extension integration, including calling
852 ``hg.repository()`` adds more extension integration, including calling
853 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
853 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
854 used.
854 used.
855 """
855 """
856
856
857 # obsolete experimental requirements:
857 # obsolete experimental requirements:
858 # - manifestv2: An experimental new manifest format that allowed
858 # - manifestv2: An experimental new manifest format that allowed
859 # for stem compression of long paths. Experiment ended up not
859 # for stem compression of long paths. Experiment ended up not
860 # being successful (repository sizes went up due to worse delta
860 # being successful (repository sizes went up due to worse delta
861 # chains), and the code was deleted in 4.6.
861 # chains), and the code was deleted in 4.6.
862 supportedformats = {
862 supportedformats = {
863 'revlogv1',
863 'revlogv1',
864 'generaldelta',
864 'generaldelta',
865 'treemanifest',
865 'treemanifest',
866 REVLOGV2_REQUIREMENT,
866 REVLOGV2_REQUIREMENT,
867 SPARSEREVLOG_REQUIREMENT,
867 SPARSEREVLOG_REQUIREMENT,
868 }
868 }
869 _basesupported = supportedformats | {
869 _basesupported = supportedformats | {
870 'store',
870 'store',
871 'fncache',
871 'fncache',
872 'shared',
872 'shared',
873 'relshared',
873 'relshared',
874 'dotencode',
874 'dotencode',
875 'exp-sparse',
875 'exp-sparse',
876 'internal-phase'
876 'internal-phase'
877 }
877 }
878
878
879 # list of prefix for file which can be written without 'wlock'
879 # list of prefix for file which can be written without 'wlock'
880 # Extensions should extend this list when needed
880 # Extensions should extend this list when needed
881 _wlockfreeprefix = {
881 _wlockfreeprefix = {
882 # We migh consider requiring 'wlock' for the next
882 # We migh consider requiring 'wlock' for the next
883 # two, but pretty much all the existing code assume
883 # two, but pretty much all the existing code assume
884 # wlock is not needed so we keep them excluded for
884 # wlock is not needed so we keep them excluded for
885 # now.
885 # now.
886 'hgrc',
886 'hgrc',
887 'requires',
887 'requires',
888 # XXX cache is a complicatged business someone
888 # XXX cache is a complicatged business someone
889 # should investigate this in depth at some point
889 # should investigate this in depth at some point
890 'cache/',
890 'cache/',
891 # XXX shouldn't be dirstate covered by the wlock?
891 # XXX shouldn't be dirstate covered by the wlock?
892 'dirstate',
892 'dirstate',
893 # XXX bisect was still a bit too messy at the time
893 # XXX bisect was still a bit too messy at the time
894 # this changeset was introduced. Someone should fix
894 # this changeset was introduced. Someone should fix
895 # the remainig bit and drop this line
895 # the remainig bit and drop this line
896 'bisect.state',
896 'bisect.state',
897 }
897 }
898
898
899 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
899 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
900 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
900 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
901 features, intents=None):
901 features, intents=None):
902 """Create a new local repository instance.
902 """Create a new local repository instance.
903
903
904 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
904 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
905 or ``localrepo.makelocalrepository()`` for obtaining a new repository
905 or ``localrepo.makelocalrepository()`` for obtaining a new repository
906 object.
906 object.
907
907
908 Arguments:
908 Arguments:
909
909
910 baseui
910 baseui
911 ``ui.ui`` instance that ``ui`` argument was based off of.
911 ``ui.ui`` instance that ``ui`` argument was based off of.
912
912
913 ui
913 ui
914 ``ui.ui`` instance for use by the repository.
914 ``ui.ui`` instance for use by the repository.
915
915
916 origroot
916 origroot
917 ``bytes`` path to working directory root of this repository.
917 ``bytes`` path to working directory root of this repository.
918
918
919 wdirvfs
919 wdirvfs
920 ``vfs.vfs`` rooted at the working directory.
920 ``vfs.vfs`` rooted at the working directory.
921
921
922 hgvfs
922 hgvfs
923 ``vfs.vfs`` rooted at .hg/
923 ``vfs.vfs`` rooted at .hg/
924
924
925 requirements
925 requirements
926 ``set`` of bytestrings representing repository opening requirements.
926 ``set`` of bytestrings representing repository opening requirements.
927
927
928 supportedrequirements
928 supportedrequirements
929 ``set`` of bytestrings representing repository requirements that we
929 ``set`` of bytestrings representing repository requirements that we
930 know how to open. May be a supetset of ``requirements``.
930 know how to open. May be a supetset of ``requirements``.
931
931
932 sharedpath
932 sharedpath
933 ``bytes`` Defining path to storage base directory. Points to a
933 ``bytes`` Defining path to storage base directory. Points to a
934 ``.hg/`` directory somewhere.
934 ``.hg/`` directory somewhere.
935
935
936 store
936 store
937 ``store.basicstore`` (or derived) instance providing access to
937 ``store.basicstore`` (or derived) instance providing access to
938 versioned storage.
938 versioned storage.
939
939
940 cachevfs
940 cachevfs
941 ``vfs.vfs`` used for cache files.
941 ``vfs.vfs`` used for cache files.
942
942
943 wcachevfs
943 wcachevfs
944 ``vfs.vfs`` used for cache files related to the working copy.
944 ``vfs.vfs`` used for cache files related to the working copy.
945
945
946 features
946 features
947 ``set`` of bytestrings defining features/capabilities of this
947 ``set`` of bytestrings defining features/capabilities of this
948 instance.
948 instance.
949
949
950 intents
950 intents
951 ``set`` of system strings indicating what this repo will be used
951 ``set`` of system strings indicating what this repo will be used
952 for.
952 for.
953 """
953 """
954 self.baseui = baseui
954 self.baseui = baseui
955 self.ui = ui
955 self.ui = ui
956 self.origroot = origroot
956 self.origroot = origroot
957 # vfs rooted at working directory.
957 # vfs rooted at working directory.
958 self.wvfs = wdirvfs
958 self.wvfs = wdirvfs
959 self.root = wdirvfs.base
959 self.root = wdirvfs.base
960 # vfs rooted at .hg/. Used to access most non-store paths.
960 # vfs rooted at .hg/. Used to access most non-store paths.
961 self.vfs = hgvfs
961 self.vfs = hgvfs
962 self.path = hgvfs.base
962 self.path = hgvfs.base
963 self.requirements = requirements
963 self.requirements = requirements
964 self.supported = supportedrequirements
964 self.supported = supportedrequirements
965 self.sharedpath = sharedpath
965 self.sharedpath = sharedpath
966 self.store = store
966 self.store = store
967 self.cachevfs = cachevfs
967 self.cachevfs = cachevfs
968 self.wcachevfs = wcachevfs
968 self.wcachevfs = wcachevfs
969 self.features = features
969 self.features = features
970
970
971 self.filtername = None
971 self.filtername = None
972
972
973 if (self.ui.configbool('devel', 'all-warnings') or
973 if (self.ui.configbool('devel', 'all-warnings') or
974 self.ui.configbool('devel', 'check-locks')):
974 self.ui.configbool('devel', 'check-locks')):
975 self.vfs.audit = self._getvfsward(self.vfs.audit)
975 self.vfs.audit = self._getvfsward(self.vfs.audit)
976 # A list of callback to shape the phase if no data were found.
976 # A list of callback to shape the phase if no data were found.
977 # Callback are in the form: func(repo, roots) --> processed root.
977 # Callback are in the form: func(repo, roots) --> processed root.
978 # This list it to be filled by extension during repo setup
978 # This list it to be filled by extension during repo setup
979 self._phasedefaults = []
979 self._phasedefaults = []
980
980
981 color.setup(self.ui)
981 color.setup(self.ui)
982
982
983 self.spath = self.store.path
983 self.spath = self.store.path
984 self.svfs = self.store.vfs
984 self.svfs = self.store.vfs
985 self.sjoin = self.store.join
985 self.sjoin = self.store.join
986 if (self.ui.configbool('devel', 'all-warnings') or
986 if (self.ui.configbool('devel', 'all-warnings') or
987 self.ui.configbool('devel', 'check-locks')):
987 self.ui.configbool('devel', 'check-locks')):
988 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
988 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
989 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
989 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
990 else: # standard vfs
990 else: # standard vfs
991 self.svfs.audit = self._getsvfsward(self.svfs.audit)
991 self.svfs.audit = self._getsvfsward(self.svfs.audit)
992
992
993 self._dirstatevalidatewarned = False
993 self._dirstatevalidatewarned = False
994
994
995 self._branchcaches = {}
995 self._branchcaches = {}
996 self._revbranchcache = None
996 self._revbranchcache = None
997 self._filterpats = {}
997 self._filterpats = {}
998 self._datafilters = {}
998 self._datafilters = {}
999 self._transref = self._lockref = self._wlockref = None
999 self._transref = self._lockref = self._wlockref = None
1000
1000
1001 # A cache for various files under .hg/ that tracks file changes,
1001 # A cache for various files under .hg/ that tracks file changes,
1002 # (used by the filecache decorator)
1002 # (used by the filecache decorator)
1003 #
1003 #
1004 # Maps a property name to its util.filecacheentry
1004 # Maps a property name to its util.filecacheentry
1005 self._filecache = {}
1005 self._filecache = {}
1006
1006
1007 # hold sets of revision to be filtered
1007 # hold sets of revision to be filtered
1008 # should be cleared when something might have changed the filter value:
1008 # should be cleared when something might have changed the filter value:
1009 # - new changesets,
1009 # - new changesets,
1010 # - phase change,
1010 # - phase change,
1011 # - new obsolescence marker,
1011 # - new obsolescence marker,
1012 # - working directory parent change,
1012 # - working directory parent change,
1013 # - bookmark changes
1013 # - bookmark changes
1014 self.filteredrevcache = {}
1014 self.filteredrevcache = {}
1015
1015
1016 # post-dirstate-status hooks
1016 # post-dirstate-status hooks
1017 self._postdsstatus = []
1017 self._postdsstatus = []
1018
1018
1019 # generic mapping between names and nodes
1019 # generic mapping between names and nodes
1020 self.names = namespaces.namespaces()
1020 self.names = namespaces.namespaces()
1021
1021
1022 # Key to signature value.
1022 # Key to signature value.
1023 self._sparsesignaturecache = {}
1023 self._sparsesignaturecache = {}
1024 # Signature to cached matcher instance.
1024 # Signature to cached matcher instance.
1025 self._sparsematchercache = {}
1025 self._sparsematchercache = {}
1026
1026
1027 def _getvfsward(self, origfunc):
1027 def _getvfsward(self, origfunc):
1028 """build a ward for self.vfs"""
1028 """build a ward for self.vfs"""
1029 rref = weakref.ref(self)
1029 rref = weakref.ref(self)
1030 def checkvfs(path, mode=None):
1030 def checkvfs(path, mode=None):
1031 ret = origfunc(path, mode=mode)
1031 ret = origfunc(path, mode=mode)
1032 repo = rref()
1032 repo = rref()
1033 if (repo is None
1033 if (repo is None
1034 or not util.safehasattr(repo, '_wlockref')
1034 or not util.safehasattr(repo, '_wlockref')
1035 or not util.safehasattr(repo, '_lockref')):
1035 or not util.safehasattr(repo, '_lockref')):
1036 return
1036 return
1037 if mode in (None, 'r', 'rb'):
1037 if mode in (None, 'r', 'rb'):
1038 return
1038 return
1039 if path.startswith(repo.path):
1039 if path.startswith(repo.path):
1040 # truncate name relative to the repository (.hg)
1040 # truncate name relative to the repository (.hg)
1041 path = path[len(repo.path) + 1:]
1041 path = path[len(repo.path) + 1:]
1042 if path.startswith('cache/'):
1042 if path.startswith('cache/'):
1043 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1043 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1044 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1044 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1045 if path.startswith('journal.') or path.startswith('undo.'):
1045 if path.startswith('journal.') or path.startswith('undo.'):
1046 # journal is covered by 'lock'
1046 # journal is covered by 'lock'
1047 if repo._currentlock(repo._lockref) is None:
1047 if repo._currentlock(repo._lockref) is None:
1048 repo.ui.develwarn('write with no lock: "%s"' % path,
1048 repo.ui.develwarn('write with no lock: "%s"' % path,
1049 stacklevel=3, config='check-locks')
1049 stacklevel=3, config='check-locks')
1050 elif repo._currentlock(repo._wlockref) is None:
1050 elif repo._currentlock(repo._wlockref) is None:
1051 # rest of vfs files are covered by 'wlock'
1051 # rest of vfs files are covered by 'wlock'
1052 #
1052 #
1053 # exclude special files
1053 # exclude special files
1054 for prefix in self._wlockfreeprefix:
1054 for prefix in self._wlockfreeprefix:
1055 if path.startswith(prefix):
1055 if path.startswith(prefix):
1056 return
1056 return
1057 repo.ui.develwarn('write with no wlock: "%s"' % path,
1057 repo.ui.develwarn('write with no wlock: "%s"' % path,
1058 stacklevel=3, config='check-locks')
1058 stacklevel=3, config='check-locks')
1059 return ret
1059 return ret
1060 return checkvfs
1060 return checkvfs
1061
1061
1062 def _getsvfsward(self, origfunc):
1062 def _getsvfsward(self, origfunc):
1063 """build a ward for self.svfs"""
1063 """build a ward for self.svfs"""
1064 rref = weakref.ref(self)
1064 rref = weakref.ref(self)
1065 def checksvfs(path, mode=None):
1065 def checksvfs(path, mode=None):
1066 ret = origfunc(path, mode=mode)
1066 ret = origfunc(path, mode=mode)
1067 repo = rref()
1067 repo = rref()
1068 if repo is None or not util.safehasattr(repo, '_lockref'):
1068 if repo is None or not util.safehasattr(repo, '_lockref'):
1069 return
1069 return
1070 if mode in (None, 'r', 'rb'):
1070 if mode in (None, 'r', 'rb'):
1071 return
1071 return
1072 if path.startswith(repo.sharedpath):
1072 if path.startswith(repo.sharedpath):
1073 # truncate name relative to the repository (.hg)
1073 # truncate name relative to the repository (.hg)
1074 path = path[len(repo.sharedpath) + 1:]
1074 path = path[len(repo.sharedpath) + 1:]
1075 if repo._currentlock(repo._lockref) is None:
1075 if repo._currentlock(repo._lockref) is None:
1076 repo.ui.develwarn('write with no lock: "%s"' % path,
1076 repo.ui.develwarn('write with no lock: "%s"' % path,
1077 stacklevel=4)
1077 stacklevel=4)
1078 return ret
1078 return ret
1079 return checksvfs
1079 return checksvfs
1080
1080
1081 def close(self):
1081 def close(self):
1082 self._writecaches()
1082 self._writecaches()
1083
1083
1084 def _writecaches(self):
1084 def _writecaches(self):
1085 if self._revbranchcache:
1085 if self._revbranchcache:
1086 self._revbranchcache.write()
1086 self._revbranchcache.write()
1087
1087
1088 def _restrictcapabilities(self, caps):
1088 def _restrictcapabilities(self, caps):
1089 if self.ui.configbool('experimental', 'bundle2-advertise'):
1089 if self.ui.configbool('experimental', 'bundle2-advertise'):
1090 caps = set(caps)
1090 caps = set(caps)
1091 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1091 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1092 role='client'))
1092 role='client'))
1093 caps.add('bundle2=' + urlreq.quote(capsblob))
1093 caps.add('bundle2=' + urlreq.quote(capsblob))
1094 return caps
1094 return caps
1095
1095
1096 def _writerequirements(self):
1096 def _writerequirements(self):
1097 scmutil.writerequires(self.vfs, self.requirements)
1097 scmutil.writerequires(self.vfs, self.requirements)
1098
1098
1099 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1099 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1100 # self -> auditor -> self._checknested -> self
1100 # self -> auditor -> self._checknested -> self
1101
1101
1102 @property
1102 @property
1103 def auditor(self):
1103 def auditor(self):
1104 # This is only used by context.workingctx.match in order to
1104 # This is only used by context.workingctx.match in order to
1105 # detect files in subrepos.
1105 # detect files in subrepos.
1106 return pathutil.pathauditor(self.root, callback=self._checknested)
1106 return pathutil.pathauditor(self.root, callback=self._checknested)
1107
1107
1108 @property
1108 @property
1109 def nofsauditor(self):
1109 def nofsauditor(self):
1110 # This is only used by context.basectx.match in order to detect
1110 # This is only used by context.basectx.match in order to detect
1111 # files in subrepos.
1111 # files in subrepos.
1112 return pathutil.pathauditor(self.root, callback=self._checknested,
1112 return pathutil.pathauditor(self.root, callback=self._checknested,
1113 realfs=False, cached=True)
1113 realfs=False, cached=True)
1114
1114
1115 def _checknested(self, path):
1115 def _checknested(self, path):
1116 """Determine if path is a legal nested repository."""
1116 """Determine if path is a legal nested repository."""
1117 if not path.startswith(self.root):
1117 if not path.startswith(self.root):
1118 return False
1118 return False
1119 subpath = path[len(self.root) + 1:]
1119 subpath = path[len(self.root) + 1:]
1120 normsubpath = util.pconvert(subpath)
1120 normsubpath = util.pconvert(subpath)
1121
1121
1122 # XXX: Checking against the current working copy is wrong in
1122 # XXX: Checking against the current working copy is wrong in
1123 # the sense that it can reject things like
1123 # the sense that it can reject things like
1124 #
1124 #
1125 # $ hg cat -r 10 sub/x.txt
1125 # $ hg cat -r 10 sub/x.txt
1126 #
1126 #
1127 # if sub/ is no longer a subrepository in the working copy
1127 # if sub/ is no longer a subrepository in the working copy
1128 # parent revision.
1128 # parent revision.
1129 #
1129 #
1130 # However, it can of course also allow things that would have
1130 # However, it can of course also allow things that would have
1131 # been rejected before, such as the above cat command if sub/
1131 # been rejected before, such as the above cat command if sub/
1132 # is a subrepository now, but was a normal directory before.
1132 # is a subrepository now, but was a normal directory before.
1133 # The old path auditor would have rejected by mistake since it
1133 # The old path auditor would have rejected by mistake since it
1134 # panics when it sees sub/.hg/.
1134 # panics when it sees sub/.hg/.
1135 #
1135 #
1136 # All in all, checking against the working copy seems sensible
1136 # All in all, checking against the working copy seems sensible
1137 # since we want to prevent access to nested repositories on
1137 # since we want to prevent access to nested repositories on
1138 # the filesystem *now*.
1138 # the filesystem *now*.
1139 ctx = self[None]
1139 ctx = self[None]
1140 parts = util.splitpath(subpath)
1140 parts = util.splitpath(subpath)
1141 while parts:
1141 while parts:
1142 prefix = '/'.join(parts)
1142 prefix = '/'.join(parts)
1143 if prefix in ctx.substate:
1143 if prefix in ctx.substate:
1144 if prefix == normsubpath:
1144 if prefix == normsubpath:
1145 return True
1145 return True
1146 else:
1146 else:
1147 sub = ctx.sub(prefix)
1147 sub = ctx.sub(prefix)
1148 return sub.checknested(subpath[len(prefix) + 1:])
1148 return sub.checknested(subpath[len(prefix) + 1:])
1149 else:
1149 else:
1150 parts.pop()
1150 parts.pop()
1151 return False
1151 return False
1152
1152
1153 def peer(self):
1153 def peer(self):
1154 return localpeer(self) # not cached to avoid reference cycle
1154 return localpeer(self) # not cached to avoid reference cycle
1155
1155
1156 def unfiltered(self):
1156 def unfiltered(self):
1157 """Return unfiltered version of the repository
1157 """Return unfiltered version of the repository
1158
1158
1159 Intended to be overwritten by filtered repo."""
1159 Intended to be overwritten by filtered repo."""
1160 return self
1160 return self
1161
1161
1162 def filtered(self, name, visibilityexceptions=None):
1162 def filtered(self, name, visibilityexceptions=None):
1163 """Return a filtered version of a repository"""
1163 """Return a filtered version of a repository"""
1164 cls = repoview.newtype(self.unfiltered().__class__)
1164 cls = repoview.newtype(self.unfiltered().__class__)
1165 return cls(self, name, visibilityexceptions)
1165 return cls(self, name, visibilityexceptions)
1166
1166
1167 @repofilecache('bookmarks', 'bookmarks.current')
1167 @repofilecache('bookmarks', 'bookmarks.current')
1168 def _bookmarks(self):
1168 def _bookmarks(self):
1169 return bookmarks.bmstore(self)
1169 return bookmarks.bmstore(self)
1170
1170
1171 @property
1171 @property
1172 def _activebookmark(self):
1172 def _activebookmark(self):
1173 return self._bookmarks.active
1173 return self._bookmarks.active
1174
1174
1175 # _phasesets depend on changelog. what we need is to call
1175 # _phasesets depend on changelog. what we need is to call
1176 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1176 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1177 # can't be easily expressed in filecache mechanism.
1177 # can't be easily expressed in filecache mechanism.
1178 @storecache('phaseroots', '00changelog.i')
1178 @storecache('phaseroots', '00changelog.i')
1179 def _phasecache(self):
1179 def _phasecache(self):
1180 return phases.phasecache(self, self._phasedefaults)
1180 return phases.phasecache(self, self._phasedefaults)
1181
1181
1182 @storecache('obsstore')
1182 @storecache('obsstore')
1183 def obsstore(self):
1183 def obsstore(self):
1184 return obsolete.makestore(self.ui, self)
1184 return obsolete.makestore(self.ui, self)
1185
1185
1186 @storecache('00changelog.i')
1186 @storecache('00changelog.i')
1187 def changelog(self):
1187 def changelog(self):
1188 return changelog.changelog(self.svfs,
1188 return changelog.changelog(self.svfs,
1189 trypending=txnutil.mayhavepending(self.root))
1189 trypending=txnutil.mayhavepending(self.root))
1190
1190
1191 @storecache('00manifest.i')
1191 @storecache('00manifest.i')
1192 def manifestlog(self):
1192 def manifestlog(self):
1193 rootstore = manifest.manifestrevlog(self.svfs)
1193 rootstore = manifest.manifestrevlog(self.svfs)
1194 return manifest.manifestlog(self.svfs, self, rootstore,
1194 return manifest.manifestlog(self.svfs, self, rootstore,
1195 self._storenarrowmatch)
1195 self._storenarrowmatch)
1196
1196
1197 @repofilecache('dirstate')
1197 @repofilecache('dirstate')
1198 def dirstate(self):
1198 def dirstate(self):
1199 return self._makedirstate()
1199 return self._makedirstate()
1200
1200
1201 def _makedirstate(self):
1201 def _makedirstate(self):
1202 """Extension point for wrapping the dirstate per-repo."""
1202 """Extension point for wrapping the dirstate per-repo."""
1203 sparsematchfn = lambda: sparse.matcher(self)
1203 sparsematchfn = lambda: sparse.matcher(self)
1204
1204
1205 return dirstate.dirstate(self.vfs, self.ui, self.root,
1205 return dirstate.dirstate(self.vfs, self.ui, self.root,
1206 self._dirstatevalidate, sparsematchfn)
1206 self._dirstatevalidate, sparsematchfn)
1207
1207
1208 def _dirstatevalidate(self, node):
1208 def _dirstatevalidate(self, node):
1209 try:
1209 try:
1210 self.changelog.rev(node)
1210 self.changelog.rev(node)
1211 return node
1211 return node
1212 except error.LookupError:
1212 except error.LookupError:
1213 if not self._dirstatevalidatewarned:
1213 if not self._dirstatevalidatewarned:
1214 self._dirstatevalidatewarned = True
1214 self._dirstatevalidatewarned = True
1215 self.ui.warn(_("warning: ignoring unknown"
1215 self.ui.warn(_("warning: ignoring unknown"
1216 " working parent %s!\n") % short(node))
1216 " working parent %s!\n") % short(node))
1217 return nullid
1217 return nullid
1218
1218
1219 @storecache(narrowspec.FILENAME)
1219 @storecache(narrowspec.FILENAME)
1220 def narrowpats(self):
1220 def narrowpats(self):
1221 """matcher patterns for this repository's narrowspec
1221 """matcher patterns for this repository's narrowspec
1222
1222
1223 A tuple of (includes, excludes).
1223 A tuple of (includes, excludes).
1224 """
1224 """
1225 return narrowspec.load(self)
1225 return narrowspec.load(self)
1226
1226
1227 @storecache(narrowspec.FILENAME)
1227 @storecache(narrowspec.FILENAME)
1228 def _storenarrowmatch(self):
1228 def _storenarrowmatch(self):
1229 if repository.NARROW_REQUIREMENT not in self.requirements:
1229 if repository.NARROW_REQUIREMENT not in self.requirements:
1230 return matchmod.always(self.root, '')
1230 return matchmod.always(self.root, '')
1231 include, exclude = self.narrowpats
1231 include, exclude = self.narrowpats
1232 return narrowspec.match(self.root, include=include, exclude=exclude)
1232 return narrowspec.match(self.root, include=include, exclude=exclude)
1233
1233
1234 @storecache(narrowspec.FILENAME)
1234 @storecache(narrowspec.FILENAME)
1235 def _narrowmatch(self):
1235 def _narrowmatch(self):
1236 if repository.NARROW_REQUIREMENT not in self.requirements:
1236 if repository.NARROW_REQUIREMENT not in self.requirements:
1237 return matchmod.always(self.root, '')
1237 return matchmod.always(self.root, '')
1238 narrowspec.checkworkingcopynarrowspec(self)
1238 narrowspec.checkworkingcopynarrowspec(self)
1239 include, exclude = self.narrowpats
1239 include, exclude = self.narrowpats
1240 return narrowspec.match(self.root, include=include, exclude=exclude)
1240 return narrowspec.match(self.root, include=include, exclude=exclude)
1241
1241
1242 def narrowmatch(self, match=None, includeexact=False):
1242 def narrowmatch(self, match=None, includeexact=False):
1243 """matcher corresponding the the repo's narrowspec
1243 """matcher corresponding the the repo's narrowspec
1244
1244
1245 If `match` is given, then that will be intersected with the narrow
1245 If `match` is given, then that will be intersected with the narrow
1246 matcher.
1246 matcher.
1247
1247
1248 If `includeexact` is True, then any exact matches from `match` will
1248 If `includeexact` is True, then any exact matches from `match` will
1249 be included even if they're outside the narrowspec.
1249 be included even if they're outside the narrowspec.
1250 """
1250 """
1251 if match:
1251 if match:
1252 if includeexact and not self._narrowmatch.always():
1252 if includeexact and not self._narrowmatch.always():
1253 # do not exclude explicitly-specified paths so that they can
1253 # do not exclude explicitly-specified paths so that they can
1254 # be warned later on
1254 # be warned later on
1255 em = matchmod.exact(match._root, match._cwd, match.files())
1255 em = matchmod.exact(match._root, match._cwd, match.files())
1256 nm = matchmod.unionmatcher([self._narrowmatch, em])
1256 nm = matchmod.unionmatcher([self._narrowmatch, em])
1257 return matchmod.intersectmatchers(match, nm)
1257 return matchmod.intersectmatchers(match, nm)
1258 return matchmod.intersectmatchers(match, self._narrowmatch)
1258 return matchmod.intersectmatchers(match, self._narrowmatch)
1259 return self._narrowmatch
1259 return self._narrowmatch
1260
1260
1261 def setnarrowpats(self, newincludes, newexcludes):
1261 def setnarrowpats(self, newincludes, newexcludes):
1262 narrowspec.save(self, newincludes, newexcludes)
1262 narrowspec.save(self, newincludes, newexcludes)
1263 self.invalidate(clearfilecache=True)
1263 self.invalidate(clearfilecache=True)
1264
1264
1265 def __getitem__(self, changeid):
1265 def __getitem__(self, changeid):
1266 if changeid is None:
1266 if changeid is None:
1267 return context.workingctx(self)
1267 return context.workingctx(self)
1268 if isinstance(changeid, context.basectx):
1268 if isinstance(changeid, context.basectx):
1269 return changeid
1269 return changeid
1270 if isinstance(changeid, slice):
1270 if isinstance(changeid, slice):
1271 # wdirrev isn't contiguous so the slice shouldn't include it
1271 # wdirrev isn't contiguous so the slice shouldn't include it
1272 return [self[i]
1272 return [self[i]
1273 for i in pycompat.xrange(*changeid.indices(len(self)))
1273 for i in pycompat.xrange(*changeid.indices(len(self)))
1274 if i not in self.changelog.filteredrevs]
1274 if i not in self.changelog.filteredrevs]
1275 try:
1275 try:
1276 if isinstance(changeid, int):
1276 if isinstance(changeid, int):
1277 node = self.changelog.node(changeid)
1277 node = self.changelog.node(changeid)
1278 rev = changeid
1278 rev = changeid
1279 elif changeid == 'null':
1279 elif changeid == 'null':
1280 node = nullid
1280 node = nullid
1281 rev = nullrev
1281 rev = nullrev
1282 elif changeid == 'tip':
1282 elif changeid == 'tip':
1283 node = self.changelog.tip()
1283 node = self.changelog.tip()
1284 rev = self.changelog.rev(node)
1284 rev = self.changelog.rev(node)
1285 elif changeid == '.':
1285 elif changeid == '.':
1286 # this is a hack to delay/avoid loading obsmarkers
1286 # this is a hack to delay/avoid loading obsmarkers
1287 # when we know that '.' won't be hidden
1287 # when we know that '.' won't be hidden
1288 node = self.dirstate.p1()
1288 node = self.dirstate.p1()
1289 rev = self.unfiltered().changelog.rev(node)
1289 rev = self.unfiltered().changelog.rev(node)
1290 elif len(changeid) == 20:
1290 elif len(changeid) == 20:
1291 try:
1291 try:
1292 node = changeid
1292 node = changeid
1293 rev = self.changelog.rev(changeid)
1293 rev = self.changelog.rev(changeid)
1294 except error.FilteredLookupError:
1294 except error.FilteredLookupError:
1295 changeid = hex(changeid) # for the error message
1295 changeid = hex(changeid) # for the error message
1296 raise
1296 raise
1297 except LookupError:
1297 except LookupError:
1298 # check if it might have come from damaged dirstate
1298 # check if it might have come from damaged dirstate
1299 #
1299 #
1300 # XXX we could avoid the unfiltered if we had a recognizable
1300 # XXX we could avoid the unfiltered if we had a recognizable
1301 # exception for filtered changeset access
1301 # exception for filtered changeset access
1302 if (self.local()
1302 if (self.local()
1303 and changeid in self.unfiltered().dirstate.parents()):
1303 and changeid in self.unfiltered().dirstate.parents()):
1304 msg = _("working directory has unknown parent '%s'!")
1304 msg = _("working directory has unknown parent '%s'!")
1305 raise error.Abort(msg % short(changeid))
1305 raise error.Abort(msg % short(changeid))
1306 changeid = hex(changeid) # for the error message
1306 changeid = hex(changeid) # for the error message
1307 raise
1307 raise
1308
1308
1309 elif len(changeid) == 40:
1309 elif len(changeid) == 40:
1310 node = bin(changeid)
1310 node = bin(changeid)
1311 rev = self.changelog.rev(node)
1311 rev = self.changelog.rev(node)
1312 else:
1312 else:
1313 raise error.ProgrammingError(
1313 raise error.ProgrammingError(
1314 "unsupported changeid '%s' of type %s" %
1314 "unsupported changeid '%s' of type %s" %
1315 (changeid, type(changeid)))
1315 (changeid, type(changeid)))
1316
1316
1317 return context.changectx(self, rev, node)
1317 return context.changectx(self, rev, node)
1318
1318
1319 except (error.FilteredIndexError, error.FilteredLookupError):
1319 except (error.FilteredIndexError, error.FilteredLookupError):
1320 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1320 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1321 % pycompat.bytestr(changeid))
1321 % pycompat.bytestr(changeid))
1322 except (IndexError, LookupError):
1322 except (IndexError, LookupError):
1323 raise error.RepoLookupError(
1323 raise error.RepoLookupError(
1324 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1324 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1325 except error.WdirUnsupported:
1325 except error.WdirUnsupported:
1326 return context.workingctx(self)
1326 return context.workingctx(self)
1327
1327
1328 def __contains__(self, changeid):
1328 def __contains__(self, changeid):
1329 """True if the given changeid exists
1329 """True if the given changeid exists
1330
1330
1331 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1331 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1332 specified.
1332 specified.
1333 """
1333 """
1334 try:
1334 try:
1335 self[changeid]
1335 self[changeid]
1336 return True
1336 return True
1337 except error.RepoLookupError:
1337 except error.RepoLookupError:
1338 return False
1338 return False
1339
1339
1340 def __nonzero__(self):
1340 def __nonzero__(self):
1341 return True
1341 return True
1342
1342
1343 __bool__ = __nonzero__
1343 __bool__ = __nonzero__
1344
1344
1345 def __len__(self):
1345 def __len__(self):
1346 # no need to pay the cost of repoview.changelog
1346 # no need to pay the cost of repoview.changelog
1347 unfi = self.unfiltered()
1347 unfi = self.unfiltered()
1348 return len(unfi.changelog)
1348 return len(unfi.changelog)
1349
1349
1350 def __iter__(self):
1350 def __iter__(self):
1351 return iter(self.changelog)
1351 return iter(self.changelog)
1352
1352
1353 def revs(self, expr, *args):
1353 def revs(self, expr, *args):
1354 '''Find revisions matching a revset.
1354 '''Find revisions matching a revset.
1355
1355
1356 The revset is specified as a string ``expr`` that may contain
1356 The revset is specified as a string ``expr`` that may contain
1357 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1357 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1358
1358
1359 Revset aliases from the configuration are not expanded. To expand
1359 Revset aliases from the configuration are not expanded. To expand
1360 user aliases, consider calling ``scmutil.revrange()`` or
1360 user aliases, consider calling ``scmutil.revrange()`` or
1361 ``repo.anyrevs([expr], user=True)``.
1361 ``repo.anyrevs([expr], user=True)``.
1362
1362
1363 Returns a revset.abstractsmartset, which is a list-like interface
1363 Returns a revset.abstractsmartset, which is a list-like interface
1364 that contains integer revisions.
1364 that contains integer revisions.
1365 '''
1365 '''
1366 tree = revsetlang.spectree(expr, *args)
1366 tree = revsetlang.spectree(expr, *args)
1367 return revset.makematcher(tree)(self)
1367 return revset.makematcher(tree)(self)
1368
1368
1369 def set(self, expr, *args):
1369 def set(self, expr, *args):
1370 '''Find revisions matching a revset and emit changectx instances.
1370 '''Find revisions matching a revset and emit changectx instances.
1371
1371
1372 This is a convenience wrapper around ``revs()`` that iterates the
1372 This is a convenience wrapper around ``revs()`` that iterates the
1373 result and is a generator of changectx instances.
1373 result and is a generator of changectx instances.
1374
1374
1375 Revset aliases from the configuration are not expanded. To expand
1375 Revset aliases from the configuration are not expanded. To expand
1376 user aliases, consider calling ``scmutil.revrange()``.
1376 user aliases, consider calling ``scmutil.revrange()``.
1377 '''
1377 '''
1378 for r in self.revs(expr, *args):
1378 for r in self.revs(expr, *args):
1379 yield self[r]
1379 yield self[r]
1380
1380
1381 def anyrevs(self, specs, user=False, localalias=None):
1381 def anyrevs(self, specs, user=False, localalias=None):
1382 '''Find revisions matching one of the given revsets.
1382 '''Find revisions matching one of the given revsets.
1383
1383
1384 Revset aliases from the configuration are not expanded by default. To
1384 Revset aliases from the configuration are not expanded by default. To
1385 expand user aliases, specify ``user=True``. To provide some local
1385 expand user aliases, specify ``user=True``. To provide some local
1386 definitions overriding user aliases, set ``localalias`` to
1386 definitions overriding user aliases, set ``localalias`` to
1387 ``{name: definitionstring}``.
1387 ``{name: definitionstring}``.
1388 '''
1388 '''
1389 if user:
1389 if user:
1390 m = revset.matchany(self.ui, specs,
1390 m = revset.matchany(self.ui, specs,
1391 lookup=revset.lookupfn(self),
1391 lookup=revset.lookupfn(self),
1392 localalias=localalias)
1392 localalias=localalias)
1393 else:
1393 else:
1394 m = revset.matchany(None, specs, localalias=localalias)
1394 m = revset.matchany(None, specs, localalias=localalias)
1395 return m(self)
1395 return m(self)
1396
1396
1397 def url(self):
1397 def url(self):
1398 return 'file:' + self.root
1398 return 'file:' + self.root
1399
1399
1400 def hook(self, name, throw=False, **args):
1400 def hook(self, name, throw=False, **args):
1401 """Call a hook, passing this repo instance.
1401 """Call a hook, passing this repo instance.
1402
1402
1403 This a convenience method to aid invoking hooks. Extensions likely
1403 This a convenience method to aid invoking hooks. Extensions likely
1404 won't call this unless they have registered a custom hook or are
1404 won't call this unless they have registered a custom hook or are
1405 replacing code that is expected to call a hook.
1405 replacing code that is expected to call a hook.
1406 """
1406 """
1407 return hook.hook(self.ui, self, name, throw, **args)
1407 return hook.hook(self.ui, self, name, throw, **args)
1408
1408
1409 @filteredpropertycache
1409 @filteredpropertycache
1410 def _tagscache(self):
1410 def _tagscache(self):
1411 '''Returns a tagscache object that contains various tags related
1411 '''Returns a tagscache object that contains various tags related
1412 caches.'''
1412 caches.'''
1413
1413
1414 # This simplifies its cache management by having one decorated
1414 # This simplifies its cache management by having one decorated
1415 # function (this one) and the rest simply fetch things from it.
1415 # function (this one) and the rest simply fetch things from it.
1416 class tagscache(object):
1416 class tagscache(object):
1417 def __init__(self):
1417 def __init__(self):
1418 # These two define the set of tags for this repository. tags
1418 # These two define the set of tags for this repository. tags
1419 # maps tag name to node; tagtypes maps tag name to 'global' or
1419 # maps tag name to node; tagtypes maps tag name to 'global' or
1420 # 'local'. (Global tags are defined by .hgtags across all
1420 # 'local'. (Global tags are defined by .hgtags across all
1421 # heads, and local tags are defined in .hg/localtags.)
1421 # heads, and local tags are defined in .hg/localtags.)
1422 # They constitute the in-memory cache of tags.
1422 # They constitute the in-memory cache of tags.
1423 self.tags = self.tagtypes = None
1423 self.tags = self.tagtypes = None
1424
1424
1425 self.nodetagscache = self.tagslist = None
1425 self.nodetagscache = self.tagslist = None
1426
1426
1427 cache = tagscache()
1427 cache = tagscache()
1428 cache.tags, cache.tagtypes = self._findtags()
1428 cache.tags, cache.tagtypes = self._findtags()
1429
1429
1430 return cache
1430 return cache
1431
1431
1432 def tags(self):
1432 def tags(self):
1433 '''return a mapping of tag to node'''
1433 '''return a mapping of tag to node'''
1434 t = {}
1434 t = {}
1435 if self.changelog.filteredrevs:
1435 if self.changelog.filteredrevs:
1436 tags, tt = self._findtags()
1436 tags, tt = self._findtags()
1437 else:
1437 else:
1438 tags = self._tagscache.tags
1438 tags = self._tagscache.tags
1439 rev = self.changelog.rev
1439 rev = self.changelog.rev
1440 for k, v in tags.iteritems():
1440 for k, v in tags.iteritems():
1441 try:
1441 try:
1442 # ignore tags to unknown nodes
1442 # ignore tags to unknown nodes
1443 rev(v)
1443 rev(v)
1444 t[k] = v
1444 t[k] = v
1445 except (error.LookupError, ValueError):
1445 except (error.LookupError, ValueError):
1446 pass
1446 pass
1447 return t
1447 return t
1448
1448
1449 def _findtags(self):
1449 def _findtags(self):
1450 '''Do the hard work of finding tags. Return a pair of dicts
1450 '''Do the hard work of finding tags. Return a pair of dicts
1451 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1451 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1452 maps tag name to a string like \'global\' or \'local\'.
1452 maps tag name to a string like \'global\' or \'local\'.
1453 Subclasses or extensions are free to add their own tags, but
1453 Subclasses or extensions are free to add their own tags, but
1454 should be aware that the returned dicts will be retained for the
1454 should be aware that the returned dicts will be retained for the
1455 duration of the localrepo object.'''
1455 duration of the localrepo object.'''
1456
1456
1457 # XXX what tagtype should subclasses/extensions use? Currently
1457 # XXX what tagtype should subclasses/extensions use? Currently
1458 # mq and bookmarks add tags, but do not set the tagtype at all.
1458 # mq and bookmarks add tags, but do not set the tagtype at all.
1459 # Should each extension invent its own tag type? Should there
1459 # Should each extension invent its own tag type? Should there
1460 # be one tagtype for all such "virtual" tags? Or is the status
1460 # be one tagtype for all such "virtual" tags? Or is the status
1461 # quo fine?
1461 # quo fine?
1462
1462
1463
1463
1464 # map tag name to (node, hist)
1464 # map tag name to (node, hist)
1465 alltags = tagsmod.findglobaltags(self.ui, self)
1465 alltags = tagsmod.findglobaltags(self.ui, self)
1466 # map tag name to tag type
1466 # map tag name to tag type
1467 tagtypes = dict((tag, 'global') for tag in alltags)
1467 tagtypes = dict((tag, 'global') for tag in alltags)
1468
1468
1469 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1469 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1470
1470
1471 # Build the return dicts. Have to re-encode tag names because
1471 # Build the return dicts. Have to re-encode tag names because
1472 # the tags module always uses UTF-8 (in order not to lose info
1472 # the tags module always uses UTF-8 (in order not to lose info
1473 # writing to the cache), but the rest of Mercurial wants them in
1473 # writing to the cache), but the rest of Mercurial wants them in
1474 # local encoding.
1474 # local encoding.
1475 tags = {}
1475 tags = {}
1476 for (name, (node, hist)) in alltags.iteritems():
1476 for (name, (node, hist)) in alltags.iteritems():
1477 if node != nullid:
1477 if node != nullid:
1478 tags[encoding.tolocal(name)] = node
1478 tags[encoding.tolocal(name)] = node
1479 tags['tip'] = self.changelog.tip()
1479 tags['tip'] = self.changelog.tip()
1480 tagtypes = dict([(encoding.tolocal(name), value)
1480 tagtypes = dict([(encoding.tolocal(name), value)
1481 for (name, value) in tagtypes.iteritems()])
1481 for (name, value) in tagtypes.iteritems()])
1482 return (tags, tagtypes)
1482 return (tags, tagtypes)
1483
1483
1484 def tagtype(self, tagname):
1484 def tagtype(self, tagname):
1485 '''
1485 '''
1486 return the type of the given tag. result can be:
1486 return the type of the given tag. result can be:
1487
1487
1488 'local' : a local tag
1488 'local' : a local tag
1489 'global' : a global tag
1489 'global' : a global tag
1490 None : tag does not exist
1490 None : tag does not exist
1491 '''
1491 '''
1492
1492
1493 return self._tagscache.tagtypes.get(tagname)
1493 return self._tagscache.tagtypes.get(tagname)
1494
1494
1495 def tagslist(self):
1495 def tagslist(self):
1496 '''return a list of tags ordered by revision'''
1496 '''return a list of tags ordered by revision'''
1497 if not self._tagscache.tagslist:
1497 if not self._tagscache.tagslist:
1498 l = []
1498 l = []
1499 for t, n in self.tags().iteritems():
1499 for t, n in self.tags().iteritems():
1500 l.append((self.changelog.rev(n), t, n))
1500 l.append((self.changelog.rev(n), t, n))
1501 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1501 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1502
1502
1503 return self._tagscache.tagslist
1503 return self._tagscache.tagslist
1504
1504
1505 def nodetags(self, node):
1505 def nodetags(self, node):
1506 '''return the tags associated with a node'''
1506 '''return the tags associated with a node'''
1507 if not self._tagscache.nodetagscache:
1507 if not self._tagscache.nodetagscache:
1508 nodetagscache = {}
1508 nodetagscache = {}
1509 for t, n in self._tagscache.tags.iteritems():
1509 for t, n in self._tagscache.tags.iteritems():
1510 nodetagscache.setdefault(n, []).append(t)
1510 nodetagscache.setdefault(n, []).append(t)
1511 for tags in nodetagscache.itervalues():
1511 for tags in nodetagscache.itervalues():
1512 tags.sort()
1512 tags.sort()
1513 self._tagscache.nodetagscache = nodetagscache
1513 self._tagscache.nodetagscache = nodetagscache
1514 return self._tagscache.nodetagscache.get(node, [])
1514 return self._tagscache.nodetagscache.get(node, [])
1515
1515
1516 def nodebookmarks(self, node):
1516 def nodebookmarks(self, node):
1517 """return the list of bookmarks pointing to the specified node"""
1517 """return the list of bookmarks pointing to the specified node"""
1518 return self._bookmarks.names(node)
1518 return self._bookmarks.names(node)
1519
1519
1520 def branchmap(self):
1520 def branchmap(self):
1521 '''returns a dictionary {branch: [branchheads]} with branchheads
1521 '''returns a dictionary {branch: [branchheads]} with branchheads
1522 ordered by increasing revision number'''
1522 ordered by increasing revision number'''
1523 branchmap.updatecache(self)
1523 branchmap.updatecache(self)
1524 return self._branchcaches[self.filtername]
1524 return self._branchcaches[self.filtername]
1525
1525
1526 @unfilteredmethod
1526 @unfilteredmethod
1527 def revbranchcache(self):
1527 def revbranchcache(self):
1528 if not self._revbranchcache:
1528 if not self._revbranchcache:
1529 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1529 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1530 return self._revbranchcache
1530 return self._revbranchcache
1531
1531
1532 def branchtip(self, branch, ignoremissing=False):
1532 def branchtip(self, branch, ignoremissing=False):
1533 '''return the tip node for a given branch
1533 '''return the tip node for a given branch
1534
1534
1535 If ignoremissing is True, then this method will not raise an error.
1535 If ignoremissing is True, then this method will not raise an error.
1536 This is helpful for callers that only expect None for a missing branch
1536 This is helpful for callers that only expect None for a missing branch
1537 (e.g. namespace).
1537 (e.g. namespace).
1538
1538
1539 '''
1539 '''
1540 try:
1540 try:
1541 return self.branchmap().branchtip(branch)
1541 return self.branchmap().branchtip(branch)
1542 except KeyError:
1542 except KeyError:
1543 if not ignoremissing:
1543 if not ignoremissing:
1544 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1544 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1545 else:
1545 else:
1546 pass
1546 pass
1547
1547
1548 def lookup(self, key):
1548 def lookup(self, key):
1549 return scmutil.revsymbol(self, key).node()
1549 return scmutil.revsymbol(self, key).node()
1550
1550
1551 def lookupbranch(self, key):
1551 def lookupbranch(self, key):
1552 if key in self.branchmap():
1552 if key in self.branchmap():
1553 return key
1553 return key
1554
1554
1555 return scmutil.revsymbol(self, key).branch()
1555 return scmutil.revsymbol(self, key).branch()
1556
1556
1557 def known(self, nodes):
1557 def known(self, nodes):
1558 cl = self.changelog
1558 cl = self.changelog
1559 nm = cl.nodemap
1559 nm = cl.nodemap
1560 filtered = cl.filteredrevs
1560 filtered = cl.filteredrevs
1561 result = []
1561 result = []
1562 for n in nodes:
1562 for n in nodes:
1563 r = nm.get(n)
1563 r = nm.get(n)
1564 resp = not (r is None or r in filtered)
1564 resp = not (r is None or r in filtered)
1565 result.append(resp)
1565 result.append(resp)
1566 return result
1566 return result
1567
1567
1568 def local(self):
1568 def local(self):
1569 return self
1569 return self
1570
1570
1571 def publishing(self):
1571 def publishing(self):
1572 # it's safe (and desirable) to trust the publish flag unconditionally
1572 # it's safe (and desirable) to trust the publish flag unconditionally
1573 # so that we don't finalize changes shared between users via ssh or nfs
1573 # so that we don't finalize changes shared between users via ssh or nfs
1574 return self.ui.configbool('phases', 'publish', untrusted=True)
1574 return self.ui.configbool('phases', 'publish', untrusted=True)
1575
1575
1576 def cancopy(self):
1576 def cancopy(self):
1577 # so statichttprepo's override of local() works
1577 # so statichttprepo's override of local() works
1578 if not self.local():
1578 if not self.local():
1579 return False
1579 return False
1580 if not self.publishing():
1580 if not self.publishing():
1581 return True
1581 return True
1582 # if publishing we can't copy if there is filtered content
1582 # if publishing we can't copy if there is filtered content
1583 return not self.filtered('visible').changelog.filteredrevs
1583 return not self.filtered('visible').changelog.filteredrevs
1584
1584
1585 def shared(self):
1585 def shared(self):
1586 '''the type of shared repository (None if not shared)'''
1586 '''the type of shared repository (None if not shared)'''
1587 if self.sharedpath != self.path:
1587 if self.sharedpath != self.path:
1588 return 'store'
1588 return 'store'
1589 return None
1589 return None
1590
1590
1591 def wjoin(self, f, *insidef):
1591 def wjoin(self, f, *insidef):
1592 return self.vfs.reljoin(self.root, f, *insidef)
1592 return self.vfs.reljoin(self.root, f, *insidef)
1593
1593
1594 def setparents(self, p1, p2=nullid):
1594 def setparents(self, p1, p2=nullid):
1595 with self.dirstate.parentchange():
1595 with self.dirstate.parentchange():
1596 copies = self.dirstate.setparents(p1, p2)
1596 copies = self.dirstate.setparents(p1, p2)
1597 pctx = self[p1]
1597 pctx = self[p1]
1598 if copies:
1598 if copies:
1599 # Adjust copy records, the dirstate cannot do it, it
1599 # Adjust copy records, the dirstate cannot do it, it
1600 # requires access to parents manifests. Preserve them
1600 # requires access to parents manifests. Preserve them
1601 # only for entries added to first parent.
1601 # only for entries added to first parent.
1602 for f in copies:
1602 for f in copies:
1603 if f not in pctx and copies[f] in pctx:
1603 if f not in pctx and copies[f] in pctx:
1604 self.dirstate.copy(copies[f], f)
1604 self.dirstate.copy(copies[f], f)
1605 if p2 == nullid:
1605 if p2 == nullid:
1606 for f, s in sorted(self.dirstate.copies().items()):
1606 for f, s in sorted(self.dirstate.copies().items()):
1607 if f not in pctx and s not in pctx:
1607 if f not in pctx and s not in pctx:
1608 self.dirstate.copy(None, f)
1608 self.dirstate.copy(None, f)
1609
1609
1610 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1610 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1611 """changeid must be a changeset revision, if specified.
1611 """changeid must be a changeset revision, if specified.
1612 fileid can be a file revision or node."""
1612 fileid can be a file revision or node."""
1613 return context.filectx(self, path, changeid, fileid,
1613 return context.filectx(self, path, changeid, fileid,
1614 changectx=changectx)
1614 changectx=changectx)
1615
1615
1616 def getcwd(self):
1616 def getcwd(self):
1617 return self.dirstate.getcwd()
1617 return self.dirstate.getcwd()
1618
1618
1619 def pathto(self, f, cwd=None):
1619 def pathto(self, f, cwd=None):
1620 return self.dirstate.pathto(f, cwd)
1620 return self.dirstate.pathto(f, cwd)
1621
1621
1622 def _loadfilter(self, filter):
1622 def _loadfilter(self, filter):
1623 if filter not in self._filterpats:
1623 if filter not in self._filterpats:
1624 l = []
1624 l = []
1625 for pat, cmd in self.ui.configitems(filter):
1625 for pat, cmd in self.ui.configitems(filter):
1626 if cmd == '!':
1626 if cmd == '!':
1627 continue
1627 continue
1628 mf = matchmod.match(self.root, '', [pat])
1628 mf = matchmod.match(self.root, '', [pat])
1629 fn = None
1629 fn = None
1630 params = cmd
1630 params = cmd
1631 for name, filterfn in self._datafilters.iteritems():
1631 for name, filterfn in self._datafilters.iteritems():
1632 if cmd.startswith(name):
1632 if cmd.startswith(name):
1633 fn = filterfn
1633 fn = filterfn
1634 params = cmd[len(name):].lstrip()
1634 params = cmd[len(name):].lstrip()
1635 break
1635 break
1636 if not fn:
1636 if not fn:
1637 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1637 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1638 # Wrap old filters not supporting keyword arguments
1638 # Wrap old filters not supporting keyword arguments
1639 if not pycompat.getargspec(fn)[2]:
1639 if not pycompat.getargspec(fn)[2]:
1640 oldfn = fn
1640 oldfn = fn
1641 fn = lambda s, c, **kwargs: oldfn(s, c)
1641 fn = lambda s, c, **kwargs: oldfn(s, c)
1642 l.append((mf, fn, params))
1642 l.append((mf, fn, params))
1643 self._filterpats[filter] = l
1643 self._filterpats[filter] = l
1644 return self._filterpats[filter]
1644 return self._filterpats[filter]
1645
1645
1646 def _filter(self, filterpats, filename, data):
1646 def _filter(self, filterpats, filename, data):
1647 for mf, fn, cmd in filterpats:
1647 for mf, fn, cmd in filterpats:
1648 if mf(filename):
1648 if mf(filename):
1649 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1649 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1650 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1650 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1651 break
1651 break
1652
1652
1653 return data
1653 return data
1654
1654
1655 @unfilteredpropertycache
1655 @unfilteredpropertycache
1656 def _encodefilterpats(self):
1656 def _encodefilterpats(self):
1657 return self._loadfilter('encode')
1657 return self._loadfilter('encode')
1658
1658
1659 @unfilteredpropertycache
1659 @unfilteredpropertycache
1660 def _decodefilterpats(self):
1660 def _decodefilterpats(self):
1661 return self._loadfilter('decode')
1661 return self._loadfilter('decode')
1662
1662
1663 def adddatafilter(self, name, filter):
1663 def adddatafilter(self, name, filter):
1664 self._datafilters[name] = filter
1664 self._datafilters[name] = filter
1665
1665
1666 def wread(self, filename):
1666 def wread(self, filename):
1667 if self.wvfs.islink(filename):
1667 if self.wvfs.islink(filename):
1668 data = self.wvfs.readlink(filename)
1668 data = self.wvfs.readlink(filename)
1669 else:
1669 else:
1670 data = self.wvfs.read(filename)
1670 data = self.wvfs.read(filename)
1671 return self._filter(self._encodefilterpats, filename, data)
1671 return self._filter(self._encodefilterpats, filename, data)
1672
1672
1673 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1673 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1674 """write ``data`` into ``filename`` in the working directory
1674 """write ``data`` into ``filename`` in the working directory
1675
1675
1676 This returns length of written (maybe decoded) data.
1676 This returns length of written (maybe decoded) data.
1677 """
1677 """
1678 data = self._filter(self._decodefilterpats, filename, data)
1678 data = self._filter(self._decodefilterpats, filename, data)
1679 if 'l' in flags:
1679 if 'l' in flags:
1680 self.wvfs.symlink(data, filename)
1680 self.wvfs.symlink(data, filename)
1681 else:
1681 else:
1682 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1682 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1683 **kwargs)
1683 **kwargs)
1684 if 'x' in flags:
1684 if 'x' in flags:
1685 self.wvfs.setflags(filename, False, True)
1685 self.wvfs.setflags(filename, False, True)
1686 else:
1686 else:
1687 self.wvfs.setflags(filename, False, False)
1687 self.wvfs.setflags(filename, False, False)
1688 return len(data)
1688 return len(data)
1689
1689
1690 def wwritedata(self, filename, data):
1690 def wwritedata(self, filename, data):
1691 return self._filter(self._decodefilterpats, filename, data)
1691 return self._filter(self._decodefilterpats, filename, data)
1692
1692
1693 def currenttransaction(self):
1693 def currenttransaction(self):
1694 """return the current transaction or None if non exists"""
1694 """return the current transaction or None if non exists"""
1695 if self._transref:
1695 if self._transref:
1696 tr = self._transref()
1696 tr = self._transref()
1697 else:
1697 else:
1698 tr = None
1698 tr = None
1699
1699
1700 if tr and tr.running():
1700 if tr and tr.running():
1701 return tr
1701 return tr
1702 return None
1702 return None
1703
1703
1704 def transaction(self, desc, report=None):
1704 def transaction(self, desc, report=None):
1705 if (self.ui.configbool('devel', 'all-warnings')
1705 if (self.ui.configbool('devel', 'all-warnings')
1706 or self.ui.configbool('devel', 'check-locks')):
1706 or self.ui.configbool('devel', 'check-locks')):
1707 if self._currentlock(self._lockref) is None:
1707 if self._currentlock(self._lockref) is None:
1708 raise error.ProgrammingError('transaction requires locking')
1708 raise error.ProgrammingError('transaction requires locking')
1709 tr = self.currenttransaction()
1709 tr = self.currenttransaction()
1710 if tr is not None:
1710 if tr is not None:
1711 return tr.nest(name=desc)
1711 return tr.nest(name=desc)
1712
1712
1713 # abort here if the journal already exists
1713 # abort here if the journal already exists
1714 if self.svfs.exists("journal"):
1714 if self.svfs.exists("journal"):
1715 raise error.RepoError(
1715 raise error.RepoError(
1716 _("abandoned transaction found"),
1716 _("abandoned transaction found"),
1717 hint=_("run 'hg recover' to clean up transaction"))
1717 hint=_("run 'hg recover' to clean up transaction"))
1718
1718
1719 idbase = "%.40f#%f" % (random.random(), time.time())
1719 idbase = "%.40f#%f" % (random.random(), time.time())
1720 ha = hex(hashlib.sha1(idbase).digest())
1720 ha = hex(hashlib.sha1(idbase).digest())
1721 txnid = 'TXN:' + ha
1721 txnid = 'TXN:' + ha
1722 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1722 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1723
1723
1724 self._writejournal(desc)
1724 self._writejournal(desc)
1725 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1725 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1726 if report:
1726 if report:
1727 rp = report
1727 rp = report
1728 else:
1728 else:
1729 rp = self.ui.warn
1729 rp = self.ui.warn
1730 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1730 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1731 # we must avoid cyclic reference between repo and transaction.
1731 # we must avoid cyclic reference between repo and transaction.
1732 reporef = weakref.ref(self)
1732 reporef = weakref.ref(self)
1733 # Code to track tag movement
1733 # Code to track tag movement
1734 #
1734 #
1735 # Since tags are all handled as file content, it is actually quite hard
1735 # Since tags are all handled as file content, it is actually quite hard
1736 # to track these movement from a code perspective. So we fallback to a
1736 # to track these movement from a code perspective. So we fallback to a
1737 # tracking at the repository level. One could envision to track changes
1737 # tracking at the repository level. One could envision to track changes
1738 # to the '.hgtags' file through changegroup apply but that fails to
1738 # to the '.hgtags' file through changegroup apply but that fails to
1739 # cope with case where transaction expose new heads without changegroup
1739 # cope with case where transaction expose new heads without changegroup
1740 # being involved (eg: phase movement).
1740 # being involved (eg: phase movement).
1741 #
1741 #
1742 # For now, We gate the feature behind a flag since this likely comes
1742 # For now, We gate the feature behind a flag since this likely comes
1743 # with performance impacts. The current code run more often than needed
1743 # with performance impacts. The current code run more often than needed
1744 # and do not use caches as much as it could. The current focus is on
1744 # and do not use caches as much as it could. The current focus is on
1745 # the behavior of the feature so we disable it by default. The flag
1745 # the behavior of the feature so we disable it by default. The flag
1746 # will be removed when we are happy with the performance impact.
1746 # will be removed when we are happy with the performance impact.
1747 #
1747 #
1748 # Once this feature is no longer experimental move the following
1748 # Once this feature is no longer experimental move the following
1749 # documentation to the appropriate help section:
1749 # documentation to the appropriate help section:
1750 #
1750 #
1751 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1751 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1752 # tags (new or changed or deleted tags). In addition the details of
1752 # tags (new or changed or deleted tags). In addition the details of
1753 # these changes are made available in a file at:
1753 # these changes are made available in a file at:
1754 # ``REPOROOT/.hg/changes/tags.changes``.
1754 # ``REPOROOT/.hg/changes/tags.changes``.
1755 # Make sure you check for HG_TAG_MOVED before reading that file as it
1755 # Make sure you check for HG_TAG_MOVED before reading that file as it
1756 # might exist from a previous transaction even if no tag were touched
1756 # might exist from a previous transaction even if no tag were touched
1757 # in this one. Changes are recorded in a line base format::
1757 # in this one. Changes are recorded in a line base format::
1758 #
1758 #
1759 # <action> <hex-node> <tag-name>\n
1759 # <action> <hex-node> <tag-name>\n
1760 #
1760 #
1761 # Actions are defined as follow:
1761 # Actions are defined as follow:
1762 # "-R": tag is removed,
1762 # "-R": tag is removed,
1763 # "+A": tag is added,
1763 # "+A": tag is added,
1764 # "-M": tag is moved (old value),
1764 # "-M": tag is moved (old value),
1765 # "+M": tag is moved (new value),
1765 # "+M": tag is moved (new value),
1766 tracktags = lambda x: None
1766 tracktags = lambda x: None
1767 # experimental config: experimental.hook-track-tags
1767 # experimental config: experimental.hook-track-tags
1768 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1768 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1769 if desc != 'strip' and shouldtracktags:
1769 if desc != 'strip' and shouldtracktags:
1770 oldheads = self.changelog.headrevs()
1770 oldheads = self.changelog.headrevs()
1771 def tracktags(tr2):
1771 def tracktags(tr2):
1772 repo = reporef()
1772 repo = reporef()
1773 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1773 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1774 newheads = repo.changelog.headrevs()
1774 newheads = repo.changelog.headrevs()
1775 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1775 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1776 # notes: we compare lists here.
1776 # notes: we compare lists here.
1777 # As we do it only once buiding set would not be cheaper
1777 # As we do it only once buiding set would not be cheaper
1778 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1778 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1779 if changes:
1779 if changes:
1780 tr2.hookargs['tag_moved'] = '1'
1780 tr2.hookargs['tag_moved'] = '1'
1781 with repo.vfs('changes/tags.changes', 'w',
1781 with repo.vfs('changes/tags.changes', 'w',
1782 atomictemp=True) as changesfile:
1782 atomictemp=True) as changesfile:
1783 # note: we do not register the file to the transaction
1783 # note: we do not register the file to the transaction
1784 # because we needs it to still exist on the transaction
1784 # because we needs it to still exist on the transaction
1785 # is close (for txnclose hooks)
1785 # is close (for txnclose hooks)
1786 tagsmod.writediff(changesfile, changes)
1786 tagsmod.writediff(changesfile, changes)
1787 def validate(tr2):
1787 def validate(tr2):
1788 """will run pre-closing hooks"""
1788 """will run pre-closing hooks"""
1789 # XXX the transaction API is a bit lacking here so we take a hacky
1789 # XXX the transaction API is a bit lacking here so we take a hacky
1790 # path for now
1790 # path for now
1791 #
1791 #
1792 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1792 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1793 # dict is copied before these run. In addition we needs the data
1793 # dict is copied before these run. In addition we needs the data
1794 # available to in memory hooks too.
1794 # available to in memory hooks too.
1795 #
1795 #
1796 # Moreover, we also need to make sure this runs before txnclose
1796 # Moreover, we also need to make sure this runs before txnclose
1797 # hooks and there is no "pending" mechanism that would execute
1797 # hooks and there is no "pending" mechanism that would execute
1798 # logic only if hooks are about to run.
1798 # logic only if hooks are about to run.
1799 #
1799 #
1800 # Fixing this limitation of the transaction is also needed to track
1800 # Fixing this limitation of the transaction is also needed to track
1801 # other families of changes (bookmarks, phases, obsolescence).
1801 # other families of changes (bookmarks, phases, obsolescence).
1802 #
1802 #
1803 # This will have to be fixed before we remove the experimental
1803 # This will have to be fixed before we remove the experimental
1804 # gating.
1804 # gating.
1805 tracktags(tr2)
1805 tracktags(tr2)
1806 repo = reporef()
1806 repo = reporef()
1807 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1807 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1808 scmutil.enforcesinglehead(repo, tr2, desc)
1808 scmutil.enforcesinglehead(repo, tr2, desc)
1809 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1809 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1810 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1810 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1811 args = tr.hookargs.copy()
1811 args = tr.hookargs.copy()
1812 args.update(bookmarks.preparehookargs(name, old, new))
1812 args.update(bookmarks.preparehookargs(name, old, new))
1813 repo.hook('pretxnclose-bookmark', throw=True,
1813 repo.hook('pretxnclose-bookmark', throw=True,
1814 txnname=desc,
1814 txnname=desc,
1815 **pycompat.strkwargs(args))
1815 **pycompat.strkwargs(args))
1816 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1816 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1817 cl = repo.unfiltered().changelog
1817 cl = repo.unfiltered().changelog
1818 for rev, (old, new) in tr.changes['phases'].items():
1818 for rev, (old, new) in tr.changes['phases'].items():
1819 args = tr.hookargs.copy()
1819 args = tr.hookargs.copy()
1820 node = hex(cl.node(rev))
1820 node = hex(cl.node(rev))
1821 args.update(phases.preparehookargs(node, old, new))
1821 args.update(phases.preparehookargs(node, old, new))
1822 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1822 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1823 **pycompat.strkwargs(args))
1823 **pycompat.strkwargs(args))
1824
1824
1825 repo.hook('pretxnclose', throw=True,
1825 repo.hook('pretxnclose', throw=True,
1826 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1826 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1827 def releasefn(tr, success):
1827 def releasefn(tr, success):
1828 repo = reporef()
1828 repo = reporef()
1829 if success:
1829 if success:
1830 # this should be explicitly invoked here, because
1830 # this should be explicitly invoked here, because
1831 # in-memory changes aren't written out at closing
1831 # in-memory changes aren't written out at closing
1832 # transaction, if tr.addfilegenerator (via
1832 # transaction, if tr.addfilegenerator (via
1833 # dirstate.write or so) isn't invoked while
1833 # dirstate.write or so) isn't invoked while
1834 # transaction running
1834 # transaction running
1835 repo.dirstate.write(None)
1835 repo.dirstate.write(None)
1836 else:
1836 else:
1837 # discard all changes (including ones already written
1837 # discard all changes (including ones already written
1838 # out) in this transaction
1838 # out) in this transaction
1839 narrowspec.restorebackup(self, 'journal.narrowspec')
1839 narrowspec.restorebackup(self, 'journal.narrowspec')
1840 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1840 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1841 repo.dirstate.restorebackup(None, 'journal.dirstate')
1841 repo.dirstate.restorebackup(None, 'journal.dirstate')
1842
1842
1843 repo.invalidate(clearfilecache=True)
1843 repo.invalidate(clearfilecache=True)
1844
1844
1845 tr = transaction.transaction(rp, self.svfs, vfsmap,
1845 tr = transaction.transaction(rp, self.svfs, vfsmap,
1846 "journal",
1846 "journal",
1847 "undo",
1847 "undo",
1848 aftertrans(renames),
1848 aftertrans(renames),
1849 self.store.createmode,
1849 self.store.createmode,
1850 validator=validate,
1850 validator=validate,
1851 releasefn=releasefn,
1851 releasefn=releasefn,
1852 checkambigfiles=_cachedfiles,
1852 checkambigfiles=_cachedfiles,
1853 name=desc)
1853 name=desc)
1854 tr.changes['origrepolen'] = len(self)
1854 tr.changes['origrepolen'] = len(self)
1855 tr.changes['obsmarkers'] = set()
1855 tr.changes['obsmarkers'] = set()
1856 tr.changes['phases'] = {}
1856 tr.changes['phases'] = {}
1857 tr.changes['bookmarks'] = {}
1857 tr.changes['bookmarks'] = {}
1858
1858
1859 tr.hookargs['txnid'] = txnid
1859 tr.hookargs['txnid'] = txnid
1860 # note: writing the fncache only during finalize mean that the file is
1860 # note: writing the fncache only during finalize mean that the file is
1861 # outdated when running hooks. As fncache is used for streaming clone,
1861 # outdated when running hooks. As fncache is used for streaming clone,
1862 # this is not expected to break anything that happen during the hooks.
1862 # this is not expected to break anything that happen during the hooks.
1863 tr.addfinalize('flush-fncache', self.store.write)
1863 tr.addfinalize('flush-fncache', self.store.write)
1864 def txnclosehook(tr2):
1864 def txnclosehook(tr2):
1865 """To be run if transaction is successful, will schedule a hook run
1865 """To be run if transaction is successful, will schedule a hook run
1866 """
1866 """
1867 # Don't reference tr2 in hook() so we don't hold a reference.
1867 # Don't reference tr2 in hook() so we don't hold a reference.
1868 # This reduces memory consumption when there are multiple
1868 # This reduces memory consumption when there are multiple
1869 # transactions per lock. This can likely go away if issue5045
1869 # transactions per lock. This can likely go away if issue5045
1870 # fixes the function accumulation.
1870 # fixes the function accumulation.
1871 hookargs = tr2.hookargs
1871 hookargs = tr2.hookargs
1872
1872
1873 def hookfunc():
1873 def hookfunc():
1874 repo = reporef()
1874 repo = reporef()
1875 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1875 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1876 bmchanges = sorted(tr.changes['bookmarks'].items())
1876 bmchanges = sorted(tr.changes['bookmarks'].items())
1877 for name, (old, new) in bmchanges:
1877 for name, (old, new) in bmchanges:
1878 args = tr.hookargs.copy()
1878 args = tr.hookargs.copy()
1879 args.update(bookmarks.preparehookargs(name, old, new))
1879 args.update(bookmarks.preparehookargs(name, old, new))
1880 repo.hook('txnclose-bookmark', throw=False,
1880 repo.hook('txnclose-bookmark', throw=False,
1881 txnname=desc, **pycompat.strkwargs(args))
1881 txnname=desc, **pycompat.strkwargs(args))
1882
1882
1883 if hook.hashook(repo.ui, 'txnclose-phase'):
1883 if hook.hashook(repo.ui, 'txnclose-phase'):
1884 cl = repo.unfiltered().changelog
1884 cl = repo.unfiltered().changelog
1885 phasemv = sorted(tr.changes['phases'].items())
1885 phasemv = sorted(tr.changes['phases'].items())
1886 for rev, (old, new) in phasemv:
1886 for rev, (old, new) in phasemv:
1887 args = tr.hookargs.copy()
1887 args = tr.hookargs.copy()
1888 node = hex(cl.node(rev))
1888 node = hex(cl.node(rev))
1889 args.update(phases.preparehookargs(node, old, new))
1889 args.update(phases.preparehookargs(node, old, new))
1890 repo.hook('txnclose-phase', throw=False, txnname=desc,
1890 repo.hook('txnclose-phase', throw=False, txnname=desc,
1891 **pycompat.strkwargs(args))
1891 **pycompat.strkwargs(args))
1892
1892
1893 repo.hook('txnclose', throw=False, txnname=desc,
1893 repo.hook('txnclose', throw=False, txnname=desc,
1894 **pycompat.strkwargs(hookargs))
1894 **pycompat.strkwargs(hookargs))
1895 reporef()._afterlock(hookfunc)
1895 reporef()._afterlock(hookfunc)
1896 tr.addfinalize('txnclose-hook', txnclosehook)
1896 tr.addfinalize('txnclose-hook', txnclosehook)
1897 # Include a leading "-" to make it happen before the transaction summary
1897 # Include a leading "-" to make it happen before the transaction summary
1898 # reports registered via scmutil.registersummarycallback() whose names
1898 # reports registered via scmutil.registersummarycallback() whose names
1899 # are 00-txnreport etc. That way, the caches will be warm when the
1899 # are 00-txnreport etc. That way, the caches will be warm when the
1900 # callbacks run.
1900 # callbacks run.
1901 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1901 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1902 def txnaborthook(tr2):
1902 def txnaborthook(tr2):
1903 """To be run if transaction is aborted
1903 """To be run if transaction is aborted
1904 """
1904 """
1905 reporef().hook('txnabort', throw=False, txnname=desc,
1905 reporef().hook('txnabort', throw=False, txnname=desc,
1906 **pycompat.strkwargs(tr2.hookargs))
1906 **pycompat.strkwargs(tr2.hookargs))
1907 tr.addabort('txnabort-hook', txnaborthook)
1907 tr.addabort('txnabort-hook', txnaborthook)
1908 # avoid eager cache invalidation. in-memory data should be identical
1908 # avoid eager cache invalidation. in-memory data should be identical
1909 # to stored data if transaction has no error.
1909 # to stored data if transaction has no error.
1910 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1910 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1911 self._transref = weakref.ref(tr)
1911 self._transref = weakref.ref(tr)
1912 scmutil.registersummarycallback(self, tr, desc)
1912 scmutil.registersummarycallback(self, tr, desc)
1913 return tr
1913 return tr
1914
1914
1915 def _journalfiles(self):
1915 def _journalfiles(self):
1916 return ((self.svfs, 'journal'),
1916 return ((self.svfs, 'journal'),
1917 (self.svfs, 'journal.narrowspec'),
1917 (self.svfs, 'journal.narrowspec'),
1918 (self.vfs, 'journal.narrowspec.dirstate'),
1918 (self.vfs, 'journal.narrowspec.dirstate'),
1919 (self.vfs, 'journal.dirstate'),
1919 (self.vfs, 'journal.dirstate'),
1920 (self.vfs, 'journal.branch'),
1920 (self.vfs, 'journal.branch'),
1921 (self.vfs, 'journal.desc'),
1921 (self.vfs, 'journal.desc'),
1922 (self.vfs, 'journal.bookmarks'),
1922 (self.vfs, 'journal.bookmarks'),
1923 (self.svfs, 'journal.phaseroots'))
1923 (self.svfs, 'journal.phaseroots'))
1924
1924
1925 def undofiles(self):
1925 def undofiles(self):
1926 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1926 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1927
1927
1928 @unfilteredmethod
1928 @unfilteredmethod
1929 def _writejournal(self, desc):
1929 def _writejournal(self, desc):
1930 self.dirstate.savebackup(None, 'journal.dirstate')
1930 self.dirstate.savebackup(None, 'journal.dirstate')
1931 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1931 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1932 narrowspec.savebackup(self, 'journal.narrowspec')
1932 narrowspec.savebackup(self, 'journal.narrowspec')
1933 self.vfs.write("journal.branch",
1933 self.vfs.write("journal.branch",
1934 encoding.fromlocal(self.dirstate.branch()))
1934 encoding.fromlocal(self.dirstate.branch()))
1935 self.vfs.write("journal.desc",
1935 self.vfs.write("journal.desc",
1936 "%d\n%s\n" % (len(self), desc))
1936 "%d\n%s\n" % (len(self), desc))
1937 self.vfs.write("journal.bookmarks",
1937 self.vfs.write("journal.bookmarks",
1938 self.vfs.tryread("bookmarks"))
1938 self.vfs.tryread("bookmarks"))
1939 self.svfs.write("journal.phaseroots",
1939 self.svfs.write("journal.phaseroots",
1940 self.svfs.tryread("phaseroots"))
1940 self.svfs.tryread("phaseroots"))
1941
1941
1942 def recover(self):
1942 def recover(self):
1943 with self.lock():
1943 with self.lock():
1944 if self.svfs.exists("journal"):
1944 if self.svfs.exists("journal"):
1945 self.ui.status(_("rolling back interrupted transaction\n"))
1945 self.ui.status(_("rolling back interrupted transaction\n"))
1946 vfsmap = {'': self.svfs,
1946 vfsmap = {'': self.svfs,
1947 'plain': self.vfs,}
1947 'plain': self.vfs,}
1948 transaction.rollback(self.svfs, vfsmap, "journal",
1948 transaction.rollback(self.svfs, vfsmap, "journal",
1949 self.ui.warn,
1949 self.ui.warn,
1950 checkambigfiles=_cachedfiles)
1950 checkambigfiles=_cachedfiles)
1951 self.invalidate()
1951 self.invalidate()
1952 return True
1952 return True
1953 else:
1953 else:
1954 self.ui.warn(_("no interrupted transaction available\n"))
1954 self.ui.warn(_("no interrupted transaction available\n"))
1955 return False
1955 return False
1956
1956
1957 def rollback(self, dryrun=False, force=False):
1957 def rollback(self, dryrun=False, force=False):
1958 wlock = lock = dsguard = None
1958 wlock = lock = dsguard = None
1959 try:
1959 try:
1960 wlock = self.wlock()
1960 wlock = self.wlock()
1961 lock = self.lock()
1961 lock = self.lock()
1962 if self.svfs.exists("undo"):
1962 if self.svfs.exists("undo"):
1963 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1963 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1964
1964
1965 return self._rollback(dryrun, force, dsguard)
1965 return self._rollback(dryrun, force, dsguard)
1966 else:
1966 else:
1967 self.ui.warn(_("no rollback information available\n"))
1967 self.ui.warn(_("no rollback information available\n"))
1968 return 1
1968 return 1
1969 finally:
1969 finally:
1970 release(dsguard, lock, wlock)
1970 release(dsguard, lock, wlock)
1971
1971
1972 @unfilteredmethod # Until we get smarter cache management
1972 @unfilteredmethod # Until we get smarter cache management
1973 def _rollback(self, dryrun, force, dsguard):
1973 def _rollback(self, dryrun, force, dsguard):
1974 ui = self.ui
1974 ui = self.ui
1975 try:
1975 try:
1976 args = self.vfs.read('undo.desc').splitlines()
1976 args = self.vfs.read('undo.desc').splitlines()
1977 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1977 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1978 if len(args) >= 3:
1978 if len(args) >= 3:
1979 detail = args[2]
1979 detail = args[2]
1980 oldtip = oldlen - 1
1980 oldtip = oldlen - 1
1981
1981
1982 if detail and ui.verbose:
1982 if detail and ui.verbose:
1983 msg = (_('repository tip rolled back to revision %d'
1983 msg = (_('repository tip rolled back to revision %d'
1984 ' (undo %s: %s)\n')
1984 ' (undo %s: %s)\n')
1985 % (oldtip, desc, detail))
1985 % (oldtip, desc, detail))
1986 else:
1986 else:
1987 msg = (_('repository tip rolled back to revision %d'
1987 msg = (_('repository tip rolled back to revision %d'
1988 ' (undo %s)\n')
1988 ' (undo %s)\n')
1989 % (oldtip, desc))
1989 % (oldtip, desc))
1990 except IOError:
1990 except IOError:
1991 msg = _('rolling back unknown transaction\n')
1991 msg = _('rolling back unknown transaction\n')
1992 desc = None
1992 desc = None
1993
1993
1994 if not force and self['.'] != self['tip'] and desc == 'commit':
1994 if not force and self['.'] != self['tip'] and desc == 'commit':
1995 raise error.Abort(
1995 raise error.Abort(
1996 _('rollback of last commit while not checked out '
1996 _('rollback of last commit while not checked out '
1997 'may lose data'), hint=_('use -f to force'))
1997 'may lose data'), hint=_('use -f to force'))
1998
1998
1999 ui.status(msg)
1999 ui.status(msg)
2000 if dryrun:
2000 if dryrun:
2001 return 0
2001 return 0
2002
2002
2003 parents = self.dirstate.parents()
2003 parents = self.dirstate.parents()
2004 self.destroying()
2004 self.destroying()
2005 vfsmap = {'plain': self.vfs, '': self.svfs}
2005 vfsmap = {'plain': self.vfs, '': self.svfs}
2006 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2006 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2007 checkambigfiles=_cachedfiles)
2007 checkambigfiles=_cachedfiles)
2008 if self.vfs.exists('undo.bookmarks'):
2008 if self.vfs.exists('undo.bookmarks'):
2009 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2009 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2010 if self.svfs.exists('undo.phaseroots'):
2010 if self.svfs.exists('undo.phaseroots'):
2011 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2011 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2012 self.invalidate()
2012 self.invalidate()
2013
2013
2014 parentgone = (parents[0] not in self.changelog.nodemap or
2014 parentgone = any(p not in self.changelog.nodemap for p in parents)
2015 parents[1] not in self.changelog.nodemap)
2016 if parentgone:
2015 if parentgone:
2017 # prevent dirstateguard from overwriting already restored one
2016 # prevent dirstateguard from overwriting already restored one
2018 dsguard.close()
2017 dsguard.close()
2019
2018
2020 narrowspec.restorebackup(self, 'undo.narrowspec')
2019 narrowspec.restorebackup(self, 'undo.narrowspec')
2021 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2020 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2022 self.dirstate.restorebackup(None, 'undo.dirstate')
2021 self.dirstate.restorebackup(None, 'undo.dirstate')
2023 try:
2022 try:
2024 branch = self.vfs.read('undo.branch')
2023 branch = self.vfs.read('undo.branch')
2025 self.dirstate.setbranch(encoding.tolocal(branch))
2024 self.dirstate.setbranch(encoding.tolocal(branch))
2026 except IOError:
2025 except IOError:
2027 ui.warn(_('named branch could not be reset: '
2026 ui.warn(_('named branch could not be reset: '
2028 'current branch is still \'%s\'\n')
2027 'current branch is still \'%s\'\n')
2029 % self.dirstate.branch())
2028 % self.dirstate.branch())
2030
2029
2031 parents = tuple([p.rev() for p in self[None].parents()])
2030 parents = tuple([p.rev() for p in self[None].parents()])
2032 if len(parents) > 1:
2031 if len(parents) > 1:
2033 ui.status(_('working directory now based on '
2032 ui.status(_('working directory now based on '
2034 'revisions %d and %d\n') % parents)
2033 'revisions %d and %d\n') % parents)
2035 else:
2034 else:
2036 ui.status(_('working directory now based on '
2035 ui.status(_('working directory now based on '
2037 'revision %d\n') % parents)
2036 'revision %d\n') % parents)
2038 mergemod.mergestate.clean(self, self['.'].node())
2037 mergemod.mergestate.clean(self, self['.'].node())
2039
2038
2040 # TODO: if we know which new heads may result from this rollback, pass
2039 # TODO: if we know which new heads may result from this rollback, pass
2041 # them to destroy(), which will prevent the branchhead cache from being
2040 # them to destroy(), which will prevent the branchhead cache from being
2042 # invalidated.
2041 # invalidated.
2043 self.destroyed()
2042 self.destroyed()
2044 return 0
2043 return 0
2045
2044
2046 def _buildcacheupdater(self, newtransaction):
2045 def _buildcacheupdater(self, newtransaction):
2047 """called during transaction to build the callback updating cache
2046 """called during transaction to build the callback updating cache
2048
2047
2049 Lives on the repository to help extension who might want to augment
2048 Lives on the repository to help extension who might want to augment
2050 this logic. For this purpose, the created transaction is passed to the
2049 this logic. For this purpose, the created transaction is passed to the
2051 method.
2050 method.
2052 """
2051 """
2053 # we must avoid cyclic reference between repo and transaction.
2052 # we must avoid cyclic reference between repo and transaction.
2054 reporef = weakref.ref(self)
2053 reporef = weakref.ref(self)
2055 def updater(tr):
2054 def updater(tr):
2056 repo = reporef()
2055 repo = reporef()
2057 repo.updatecaches(tr)
2056 repo.updatecaches(tr)
2058 return updater
2057 return updater
2059
2058
2060 @unfilteredmethod
2059 @unfilteredmethod
2061 def updatecaches(self, tr=None, full=False):
2060 def updatecaches(self, tr=None, full=False):
2062 """warm appropriate caches
2061 """warm appropriate caches
2063
2062
2064 If this function is called after a transaction closed. The transaction
2063 If this function is called after a transaction closed. The transaction
2065 will be available in the 'tr' argument. This can be used to selectively
2064 will be available in the 'tr' argument. This can be used to selectively
2066 update caches relevant to the changes in that transaction.
2065 update caches relevant to the changes in that transaction.
2067
2066
2068 If 'full' is set, make sure all caches the function knows about have
2067 If 'full' is set, make sure all caches the function knows about have
2069 up-to-date data. Even the ones usually loaded more lazily.
2068 up-to-date data. Even the ones usually loaded more lazily.
2070 """
2069 """
2071 if tr is not None and tr.hookargs.get('source') == 'strip':
2070 if tr is not None and tr.hookargs.get('source') == 'strip':
2072 # During strip, many caches are invalid but
2071 # During strip, many caches are invalid but
2073 # later call to `destroyed` will refresh them.
2072 # later call to `destroyed` will refresh them.
2074 return
2073 return
2075
2074
2076 if tr is None or tr.changes['origrepolen'] < len(self):
2075 if tr is None or tr.changes['origrepolen'] < len(self):
2077 # updating the unfiltered branchmap should refresh all the others,
2076 # updating the unfiltered branchmap should refresh all the others,
2078 self.ui.debug('updating the branch cache\n')
2077 self.ui.debug('updating the branch cache\n')
2079 branchmap.updatecache(self.filtered('served'))
2078 branchmap.updatecache(self.filtered('served'))
2080
2079
2081 if full:
2080 if full:
2082 rbc = self.revbranchcache()
2081 rbc = self.revbranchcache()
2083 for r in self.changelog:
2082 for r in self.changelog:
2084 rbc.branchinfo(r)
2083 rbc.branchinfo(r)
2085 rbc.write()
2084 rbc.write()
2086
2085
2087 # ensure the working copy parents are in the manifestfulltextcache
2086 # ensure the working copy parents are in the manifestfulltextcache
2088 for ctx in self['.'].parents():
2087 for ctx in self['.'].parents():
2089 ctx.manifest() # accessing the manifest is enough
2088 ctx.manifest() # accessing the manifest is enough
2090
2089
2091 def invalidatecaches(self):
2090 def invalidatecaches(self):
2092
2091
2093 if r'_tagscache' in vars(self):
2092 if r'_tagscache' in vars(self):
2094 # can't use delattr on proxy
2093 # can't use delattr on proxy
2095 del self.__dict__[r'_tagscache']
2094 del self.__dict__[r'_tagscache']
2096
2095
2097 self.unfiltered()._branchcaches.clear()
2096 self.unfiltered()._branchcaches.clear()
2098 self.invalidatevolatilesets()
2097 self.invalidatevolatilesets()
2099 self._sparsesignaturecache.clear()
2098 self._sparsesignaturecache.clear()
2100
2099
2101 def invalidatevolatilesets(self):
2100 def invalidatevolatilesets(self):
2102 self.filteredrevcache.clear()
2101 self.filteredrevcache.clear()
2103 obsolete.clearobscaches(self)
2102 obsolete.clearobscaches(self)
2104
2103
2105 def invalidatedirstate(self):
2104 def invalidatedirstate(self):
2106 '''Invalidates the dirstate, causing the next call to dirstate
2105 '''Invalidates the dirstate, causing the next call to dirstate
2107 to check if it was modified since the last time it was read,
2106 to check if it was modified since the last time it was read,
2108 rereading it if it has.
2107 rereading it if it has.
2109
2108
2110 This is different to dirstate.invalidate() that it doesn't always
2109 This is different to dirstate.invalidate() that it doesn't always
2111 rereads the dirstate. Use dirstate.invalidate() if you want to
2110 rereads the dirstate. Use dirstate.invalidate() if you want to
2112 explicitly read the dirstate again (i.e. restoring it to a previous
2111 explicitly read the dirstate again (i.e. restoring it to a previous
2113 known good state).'''
2112 known good state).'''
2114 if hasunfilteredcache(self, r'dirstate'):
2113 if hasunfilteredcache(self, r'dirstate'):
2115 for k in self.dirstate._filecache:
2114 for k in self.dirstate._filecache:
2116 try:
2115 try:
2117 delattr(self.dirstate, k)
2116 delattr(self.dirstate, k)
2118 except AttributeError:
2117 except AttributeError:
2119 pass
2118 pass
2120 delattr(self.unfiltered(), r'dirstate')
2119 delattr(self.unfiltered(), r'dirstate')
2121
2120
2122 def invalidate(self, clearfilecache=False):
2121 def invalidate(self, clearfilecache=False):
2123 '''Invalidates both store and non-store parts other than dirstate
2122 '''Invalidates both store and non-store parts other than dirstate
2124
2123
2125 If a transaction is running, invalidation of store is omitted,
2124 If a transaction is running, invalidation of store is omitted,
2126 because discarding in-memory changes might cause inconsistency
2125 because discarding in-memory changes might cause inconsistency
2127 (e.g. incomplete fncache causes unintentional failure, but
2126 (e.g. incomplete fncache causes unintentional failure, but
2128 redundant one doesn't).
2127 redundant one doesn't).
2129 '''
2128 '''
2130 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2129 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2131 for k in list(self._filecache.keys()):
2130 for k in list(self._filecache.keys()):
2132 # dirstate is invalidated separately in invalidatedirstate()
2131 # dirstate is invalidated separately in invalidatedirstate()
2133 if k == 'dirstate':
2132 if k == 'dirstate':
2134 continue
2133 continue
2135 if (k == 'changelog' and
2134 if (k == 'changelog' and
2136 self.currenttransaction() and
2135 self.currenttransaction() and
2137 self.changelog._delayed):
2136 self.changelog._delayed):
2138 # The changelog object may store unwritten revisions. We don't
2137 # The changelog object may store unwritten revisions. We don't
2139 # want to lose them.
2138 # want to lose them.
2140 # TODO: Solve the problem instead of working around it.
2139 # TODO: Solve the problem instead of working around it.
2141 continue
2140 continue
2142
2141
2143 if clearfilecache:
2142 if clearfilecache:
2144 del self._filecache[k]
2143 del self._filecache[k]
2145 try:
2144 try:
2146 delattr(unfiltered, k)
2145 delattr(unfiltered, k)
2147 except AttributeError:
2146 except AttributeError:
2148 pass
2147 pass
2149 self.invalidatecaches()
2148 self.invalidatecaches()
2150 if not self.currenttransaction():
2149 if not self.currenttransaction():
2151 # TODO: Changing contents of store outside transaction
2150 # TODO: Changing contents of store outside transaction
2152 # causes inconsistency. We should make in-memory store
2151 # causes inconsistency. We should make in-memory store
2153 # changes detectable, and abort if changed.
2152 # changes detectable, and abort if changed.
2154 self.store.invalidatecaches()
2153 self.store.invalidatecaches()
2155
2154
2156 def invalidateall(self):
2155 def invalidateall(self):
2157 '''Fully invalidates both store and non-store parts, causing the
2156 '''Fully invalidates both store and non-store parts, causing the
2158 subsequent operation to reread any outside changes.'''
2157 subsequent operation to reread any outside changes.'''
2159 # extension should hook this to invalidate its caches
2158 # extension should hook this to invalidate its caches
2160 self.invalidate()
2159 self.invalidate()
2161 self.invalidatedirstate()
2160 self.invalidatedirstate()
2162
2161
2163 @unfilteredmethod
2162 @unfilteredmethod
2164 def _refreshfilecachestats(self, tr):
2163 def _refreshfilecachestats(self, tr):
2165 """Reload stats of cached files so that they are flagged as valid"""
2164 """Reload stats of cached files so that they are flagged as valid"""
2166 for k, ce in self._filecache.items():
2165 for k, ce in self._filecache.items():
2167 k = pycompat.sysstr(k)
2166 k = pycompat.sysstr(k)
2168 if k == r'dirstate' or k not in self.__dict__:
2167 if k == r'dirstate' or k not in self.__dict__:
2169 continue
2168 continue
2170 ce.refresh()
2169 ce.refresh()
2171
2170
2172 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2171 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2173 inheritchecker=None, parentenvvar=None):
2172 inheritchecker=None, parentenvvar=None):
2174 parentlock = None
2173 parentlock = None
2175 # the contents of parentenvvar are used by the underlying lock to
2174 # the contents of parentenvvar are used by the underlying lock to
2176 # determine whether it can be inherited
2175 # determine whether it can be inherited
2177 if parentenvvar is not None:
2176 if parentenvvar is not None:
2178 parentlock = encoding.environ.get(parentenvvar)
2177 parentlock = encoding.environ.get(parentenvvar)
2179
2178
2180 timeout = 0
2179 timeout = 0
2181 warntimeout = 0
2180 warntimeout = 0
2182 if wait:
2181 if wait:
2183 timeout = self.ui.configint("ui", "timeout")
2182 timeout = self.ui.configint("ui", "timeout")
2184 warntimeout = self.ui.configint("ui", "timeout.warn")
2183 warntimeout = self.ui.configint("ui", "timeout.warn")
2185 # internal config: ui.signal-safe-lock
2184 # internal config: ui.signal-safe-lock
2186 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2185 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2187
2186
2188 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2187 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2189 releasefn=releasefn,
2188 releasefn=releasefn,
2190 acquirefn=acquirefn, desc=desc,
2189 acquirefn=acquirefn, desc=desc,
2191 inheritchecker=inheritchecker,
2190 inheritchecker=inheritchecker,
2192 parentlock=parentlock,
2191 parentlock=parentlock,
2193 signalsafe=signalsafe)
2192 signalsafe=signalsafe)
2194 return l
2193 return l
2195
2194
2196 def _afterlock(self, callback):
2195 def _afterlock(self, callback):
2197 """add a callback to be run when the repository is fully unlocked
2196 """add a callback to be run when the repository is fully unlocked
2198
2197
2199 The callback will be executed when the outermost lock is released
2198 The callback will be executed when the outermost lock is released
2200 (with wlock being higher level than 'lock')."""
2199 (with wlock being higher level than 'lock')."""
2201 for ref in (self._wlockref, self._lockref):
2200 for ref in (self._wlockref, self._lockref):
2202 l = ref and ref()
2201 l = ref and ref()
2203 if l and l.held:
2202 if l and l.held:
2204 l.postrelease.append(callback)
2203 l.postrelease.append(callback)
2205 break
2204 break
2206 else: # no lock have been found.
2205 else: # no lock have been found.
2207 callback()
2206 callback()
2208
2207
2209 def lock(self, wait=True):
2208 def lock(self, wait=True):
2210 '''Lock the repository store (.hg/store) and return a weak reference
2209 '''Lock the repository store (.hg/store) and return a weak reference
2211 to the lock. Use this before modifying the store (e.g. committing or
2210 to the lock. Use this before modifying the store (e.g. committing or
2212 stripping). If you are opening a transaction, get a lock as well.)
2211 stripping). If you are opening a transaction, get a lock as well.)
2213
2212
2214 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2213 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2215 'wlock' first to avoid a dead-lock hazard.'''
2214 'wlock' first to avoid a dead-lock hazard.'''
2216 l = self._currentlock(self._lockref)
2215 l = self._currentlock(self._lockref)
2217 if l is not None:
2216 if l is not None:
2218 l.lock()
2217 l.lock()
2219 return l
2218 return l
2220
2219
2221 l = self._lock(self.svfs, "lock", wait, None,
2220 l = self._lock(self.svfs, "lock", wait, None,
2222 self.invalidate, _('repository %s') % self.origroot)
2221 self.invalidate, _('repository %s') % self.origroot)
2223 self._lockref = weakref.ref(l)
2222 self._lockref = weakref.ref(l)
2224 return l
2223 return l
2225
2224
2226 def _wlockchecktransaction(self):
2225 def _wlockchecktransaction(self):
2227 if self.currenttransaction() is not None:
2226 if self.currenttransaction() is not None:
2228 raise error.LockInheritanceContractViolation(
2227 raise error.LockInheritanceContractViolation(
2229 'wlock cannot be inherited in the middle of a transaction')
2228 'wlock cannot be inherited in the middle of a transaction')
2230
2229
2231 def wlock(self, wait=True):
2230 def wlock(self, wait=True):
2232 '''Lock the non-store parts of the repository (everything under
2231 '''Lock the non-store parts of the repository (everything under
2233 .hg except .hg/store) and return a weak reference to the lock.
2232 .hg except .hg/store) and return a weak reference to the lock.
2234
2233
2235 Use this before modifying files in .hg.
2234 Use this before modifying files in .hg.
2236
2235
2237 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2236 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2238 'wlock' first to avoid a dead-lock hazard.'''
2237 'wlock' first to avoid a dead-lock hazard.'''
2239 l = self._wlockref and self._wlockref()
2238 l = self._wlockref and self._wlockref()
2240 if l is not None and l.held:
2239 if l is not None and l.held:
2241 l.lock()
2240 l.lock()
2242 return l
2241 return l
2243
2242
2244 # We do not need to check for non-waiting lock acquisition. Such
2243 # We do not need to check for non-waiting lock acquisition. Such
2245 # acquisition would not cause dead-lock as they would just fail.
2244 # acquisition would not cause dead-lock as they would just fail.
2246 if wait and (self.ui.configbool('devel', 'all-warnings')
2245 if wait and (self.ui.configbool('devel', 'all-warnings')
2247 or self.ui.configbool('devel', 'check-locks')):
2246 or self.ui.configbool('devel', 'check-locks')):
2248 if self._currentlock(self._lockref) is not None:
2247 if self._currentlock(self._lockref) is not None:
2249 self.ui.develwarn('"wlock" acquired after "lock"')
2248 self.ui.develwarn('"wlock" acquired after "lock"')
2250
2249
2251 def unlock():
2250 def unlock():
2252 if self.dirstate.pendingparentchange():
2251 if self.dirstate.pendingparentchange():
2253 self.dirstate.invalidate()
2252 self.dirstate.invalidate()
2254 else:
2253 else:
2255 self.dirstate.write(None)
2254 self.dirstate.write(None)
2256
2255
2257 self._filecache['dirstate'].refresh()
2256 self._filecache['dirstate'].refresh()
2258
2257
2259 l = self._lock(self.vfs, "wlock", wait, unlock,
2258 l = self._lock(self.vfs, "wlock", wait, unlock,
2260 self.invalidatedirstate, _('working directory of %s') %
2259 self.invalidatedirstate, _('working directory of %s') %
2261 self.origroot,
2260 self.origroot,
2262 inheritchecker=self._wlockchecktransaction,
2261 inheritchecker=self._wlockchecktransaction,
2263 parentenvvar='HG_WLOCK_LOCKER')
2262 parentenvvar='HG_WLOCK_LOCKER')
2264 self._wlockref = weakref.ref(l)
2263 self._wlockref = weakref.ref(l)
2265 return l
2264 return l
2266
2265
2267 def _currentlock(self, lockref):
2266 def _currentlock(self, lockref):
2268 """Returns the lock if it's held, or None if it's not."""
2267 """Returns the lock if it's held, or None if it's not."""
2269 if lockref is None:
2268 if lockref is None:
2270 return None
2269 return None
2271 l = lockref()
2270 l = lockref()
2272 if l is None or not l.held:
2271 if l is None or not l.held:
2273 return None
2272 return None
2274 return l
2273 return l
2275
2274
2276 def currentwlock(self):
2275 def currentwlock(self):
2277 """Returns the wlock if it's held, or None if it's not."""
2276 """Returns the wlock if it's held, or None if it's not."""
2278 return self._currentlock(self._wlockref)
2277 return self._currentlock(self._wlockref)
2279
2278
2280 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2279 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2281 """
2280 """
2282 commit an individual file as part of a larger transaction
2281 commit an individual file as part of a larger transaction
2283 """
2282 """
2284
2283
2285 fname = fctx.path()
2284 fname = fctx.path()
2286 fparent1 = manifest1.get(fname, nullid)
2285 fparent1 = manifest1.get(fname, nullid)
2287 fparent2 = manifest2.get(fname, nullid)
2286 fparent2 = manifest2.get(fname, nullid)
2288 if isinstance(fctx, context.filectx):
2287 if isinstance(fctx, context.filectx):
2289 node = fctx.filenode()
2288 node = fctx.filenode()
2290 if node in [fparent1, fparent2]:
2289 if node in [fparent1, fparent2]:
2291 self.ui.debug('reusing %s filelog entry\n' % fname)
2290 self.ui.debug('reusing %s filelog entry\n' % fname)
2292 if manifest1.flags(fname) != fctx.flags():
2291 if manifest1.flags(fname) != fctx.flags():
2293 changelist.append(fname)
2292 changelist.append(fname)
2294 return node
2293 return node
2295
2294
2296 flog = self.file(fname)
2295 flog = self.file(fname)
2297 meta = {}
2296 meta = {}
2298 copy = fctx.renamed()
2297 copy = fctx.renamed()
2299 if copy and copy[0] != fname:
2298 if copy and copy[0] != fname:
2300 # Mark the new revision of this file as a copy of another
2299 # Mark the new revision of this file as a copy of another
2301 # file. This copy data will effectively act as a parent
2300 # file. This copy data will effectively act as a parent
2302 # of this new revision. If this is a merge, the first
2301 # of this new revision. If this is a merge, the first
2303 # parent will be the nullid (meaning "look up the copy data")
2302 # parent will be the nullid (meaning "look up the copy data")
2304 # and the second one will be the other parent. For example:
2303 # and the second one will be the other parent. For example:
2305 #
2304 #
2306 # 0 --- 1 --- 3 rev1 changes file foo
2305 # 0 --- 1 --- 3 rev1 changes file foo
2307 # \ / rev2 renames foo to bar and changes it
2306 # \ / rev2 renames foo to bar and changes it
2308 # \- 2 -/ rev3 should have bar with all changes and
2307 # \- 2 -/ rev3 should have bar with all changes and
2309 # should record that bar descends from
2308 # should record that bar descends from
2310 # bar in rev2 and foo in rev1
2309 # bar in rev2 and foo in rev1
2311 #
2310 #
2312 # this allows this merge to succeed:
2311 # this allows this merge to succeed:
2313 #
2312 #
2314 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2313 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2315 # \ / merging rev3 and rev4 should use bar@rev2
2314 # \ / merging rev3 and rev4 should use bar@rev2
2316 # \- 2 --- 4 as the merge base
2315 # \- 2 --- 4 as the merge base
2317 #
2316 #
2318
2317
2319 cfname = copy[0]
2318 cfname = copy[0]
2320 crev = manifest1.get(cfname)
2319 crev = manifest1.get(cfname)
2321 newfparent = fparent2
2320 newfparent = fparent2
2322
2321
2323 if manifest2: # branch merge
2322 if manifest2: # branch merge
2324 if fparent2 == nullid or crev is None: # copied on remote side
2323 if fparent2 == nullid or crev is None: # copied on remote side
2325 if cfname in manifest2:
2324 if cfname in manifest2:
2326 crev = manifest2[cfname]
2325 crev = manifest2[cfname]
2327 newfparent = fparent1
2326 newfparent = fparent1
2328
2327
2329 # Here, we used to search backwards through history to try to find
2328 # Here, we used to search backwards through history to try to find
2330 # where the file copy came from if the source of a copy was not in
2329 # where the file copy came from if the source of a copy was not in
2331 # the parent directory. However, this doesn't actually make sense to
2330 # the parent directory. However, this doesn't actually make sense to
2332 # do (what does a copy from something not in your working copy even
2331 # do (what does a copy from something not in your working copy even
2333 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2332 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2334 # the user that copy information was dropped, so if they didn't
2333 # the user that copy information was dropped, so if they didn't
2335 # expect this outcome it can be fixed, but this is the correct
2334 # expect this outcome it can be fixed, but this is the correct
2336 # behavior in this circumstance.
2335 # behavior in this circumstance.
2337
2336
2338 if crev:
2337 if crev:
2339 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2338 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2340 meta["copy"] = cfname
2339 meta["copy"] = cfname
2341 meta["copyrev"] = hex(crev)
2340 meta["copyrev"] = hex(crev)
2342 fparent1, fparent2 = nullid, newfparent
2341 fparent1, fparent2 = nullid, newfparent
2343 else:
2342 else:
2344 self.ui.warn(_("warning: can't find ancestor for '%s' "
2343 self.ui.warn(_("warning: can't find ancestor for '%s' "
2345 "copied from '%s'!\n") % (fname, cfname))
2344 "copied from '%s'!\n") % (fname, cfname))
2346
2345
2347 elif fparent1 == nullid:
2346 elif fparent1 == nullid:
2348 fparent1, fparent2 = fparent2, nullid
2347 fparent1, fparent2 = fparent2, nullid
2349 elif fparent2 != nullid:
2348 elif fparent2 != nullid:
2350 # is one parent an ancestor of the other?
2349 # is one parent an ancestor of the other?
2351 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2350 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2352 if fparent1 in fparentancestors:
2351 if fparent1 in fparentancestors:
2353 fparent1, fparent2 = fparent2, nullid
2352 fparent1, fparent2 = fparent2, nullid
2354 elif fparent2 in fparentancestors:
2353 elif fparent2 in fparentancestors:
2355 fparent2 = nullid
2354 fparent2 = nullid
2356
2355
2357 # is the file changed?
2356 # is the file changed?
2358 text = fctx.data()
2357 text = fctx.data()
2359 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2358 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2360 changelist.append(fname)
2359 changelist.append(fname)
2361 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2360 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2362 # are just the flags changed during merge?
2361 # are just the flags changed during merge?
2363 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2362 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2364 changelist.append(fname)
2363 changelist.append(fname)
2365
2364
2366 return fparent1
2365 return fparent1
2367
2366
2368 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2367 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2369 """check for commit arguments that aren't committable"""
2368 """check for commit arguments that aren't committable"""
2370 if match.isexact() or match.prefix():
2369 if match.isexact() or match.prefix():
2371 matched = set(status.modified + status.added + status.removed)
2370 matched = set(status.modified + status.added + status.removed)
2372
2371
2373 for f in match.files():
2372 for f in match.files():
2374 f = self.dirstate.normalize(f)
2373 f = self.dirstate.normalize(f)
2375 if f == '.' or f in matched or f in wctx.substate:
2374 if f == '.' or f in matched or f in wctx.substate:
2376 continue
2375 continue
2377 if f in status.deleted:
2376 if f in status.deleted:
2378 fail(f, _('file not found!'))
2377 fail(f, _('file not found!'))
2379 if f in vdirs: # visited directory
2378 if f in vdirs: # visited directory
2380 d = f + '/'
2379 d = f + '/'
2381 for mf in matched:
2380 for mf in matched:
2382 if mf.startswith(d):
2381 if mf.startswith(d):
2383 break
2382 break
2384 else:
2383 else:
2385 fail(f, _("no match under directory!"))
2384 fail(f, _("no match under directory!"))
2386 elif f not in self.dirstate:
2385 elif f not in self.dirstate:
2387 fail(f, _("file not tracked!"))
2386 fail(f, _("file not tracked!"))
2388
2387
2389 @unfilteredmethod
2388 @unfilteredmethod
2390 def commit(self, text="", user=None, date=None, match=None, force=False,
2389 def commit(self, text="", user=None, date=None, match=None, force=False,
2391 editor=False, extra=None):
2390 editor=False, extra=None):
2392 """Add a new revision to current repository.
2391 """Add a new revision to current repository.
2393
2392
2394 Revision information is gathered from the working directory,
2393 Revision information is gathered from the working directory,
2395 match can be used to filter the committed files. If editor is
2394 match can be used to filter the committed files. If editor is
2396 supplied, it is called to get a commit message.
2395 supplied, it is called to get a commit message.
2397 """
2396 """
2398 if extra is None:
2397 if extra is None:
2399 extra = {}
2398 extra = {}
2400
2399
2401 def fail(f, msg):
2400 def fail(f, msg):
2402 raise error.Abort('%s: %s' % (f, msg))
2401 raise error.Abort('%s: %s' % (f, msg))
2403
2402
2404 if not match:
2403 if not match:
2405 match = matchmod.always(self.root, '')
2404 match = matchmod.always(self.root, '')
2406
2405
2407 if not force:
2406 if not force:
2408 vdirs = []
2407 vdirs = []
2409 match.explicitdir = vdirs.append
2408 match.explicitdir = vdirs.append
2410 match.bad = fail
2409 match.bad = fail
2411
2410
2412 # lock() for recent changelog (see issue4368)
2411 # lock() for recent changelog (see issue4368)
2413 with self.wlock(), self.lock():
2412 with self.wlock(), self.lock():
2414 wctx = self[None]
2413 wctx = self[None]
2415 merge = len(wctx.parents()) > 1
2414 merge = len(wctx.parents()) > 1
2416
2415
2417 if not force and merge and not match.always():
2416 if not force and merge and not match.always():
2418 raise error.Abort(_('cannot partially commit a merge '
2417 raise error.Abort(_('cannot partially commit a merge '
2419 '(do not specify files or patterns)'))
2418 '(do not specify files or patterns)'))
2420
2419
2421 status = self.status(match=match, clean=force)
2420 status = self.status(match=match, clean=force)
2422 if force:
2421 if force:
2423 status.modified.extend(status.clean) # mq may commit clean files
2422 status.modified.extend(status.clean) # mq may commit clean files
2424
2423
2425 # check subrepos
2424 # check subrepos
2426 subs, commitsubs, newstate = subrepoutil.precommit(
2425 subs, commitsubs, newstate = subrepoutil.precommit(
2427 self.ui, wctx, status, match, force=force)
2426 self.ui, wctx, status, match, force=force)
2428
2427
2429 # make sure all explicit patterns are matched
2428 # make sure all explicit patterns are matched
2430 if not force:
2429 if not force:
2431 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2430 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2432
2431
2433 cctx = context.workingcommitctx(self, status,
2432 cctx = context.workingcommitctx(self, status,
2434 text, user, date, extra)
2433 text, user, date, extra)
2435
2434
2436 # internal config: ui.allowemptycommit
2435 # internal config: ui.allowemptycommit
2437 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2436 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2438 or extra.get('close') or merge or cctx.files()
2437 or extra.get('close') or merge or cctx.files()
2439 or self.ui.configbool('ui', 'allowemptycommit'))
2438 or self.ui.configbool('ui', 'allowemptycommit'))
2440 if not allowemptycommit:
2439 if not allowemptycommit:
2441 return None
2440 return None
2442
2441
2443 if merge and cctx.deleted():
2442 if merge and cctx.deleted():
2444 raise error.Abort(_("cannot commit merge with missing files"))
2443 raise error.Abort(_("cannot commit merge with missing files"))
2445
2444
2446 ms = mergemod.mergestate.read(self)
2445 ms = mergemod.mergestate.read(self)
2447 mergeutil.checkunresolved(ms)
2446 mergeutil.checkunresolved(ms)
2448
2447
2449 if editor:
2448 if editor:
2450 cctx._text = editor(self, cctx, subs)
2449 cctx._text = editor(self, cctx, subs)
2451 edited = (text != cctx._text)
2450 edited = (text != cctx._text)
2452
2451
2453 # Save commit message in case this transaction gets rolled back
2452 # Save commit message in case this transaction gets rolled back
2454 # (e.g. by a pretxncommit hook). Leave the content alone on
2453 # (e.g. by a pretxncommit hook). Leave the content alone on
2455 # the assumption that the user will use the same editor again.
2454 # the assumption that the user will use the same editor again.
2456 msgfn = self.savecommitmessage(cctx._text)
2455 msgfn = self.savecommitmessage(cctx._text)
2457
2456
2458 # commit subs and write new state
2457 # commit subs and write new state
2459 if subs:
2458 if subs:
2460 for s in sorted(commitsubs):
2459 for s in sorted(commitsubs):
2461 sub = wctx.sub(s)
2460 sub = wctx.sub(s)
2462 self.ui.status(_('committing subrepository %s\n') %
2461 self.ui.status(_('committing subrepository %s\n') %
2463 subrepoutil.subrelpath(sub))
2462 subrepoutil.subrelpath(sub))
2464 sr = sub.commit(cctx._text, user, date)
2463 sr = sub.commit(cctx._text, user, date)
2465 newstate[s] = (newstate[s][0], sr)
2464 newstate[s] = (newstate[s][0], sr)
2466 subrepoutil.writestate(self, newstate)
2465 subrepoutil.writestate(self, newstate)
2467
2466
2468 p1, p2 = self.dirstate.parents()
2467 p1, p2 = self.dirstate.parents()
2469 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2468 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2470 try:
2469 try:
2471 self.hook("precommit", throw=True, parent1=hookp1,
2470 self.hook("precommit", throw=True, parent1=hookp1,
2472 parent2=hookp2)
2471 parent2=hookp2)
2473 with self.transaction('commit'):
2472 with self.transaction('commit'):
2474 ret = self.commitctx(cctx, True)
2473 ret = self.commitctx(cctx, True)
2475 # update bookmarks, dirstate and mergestate
2474 # update bookmarks, dirstate and mergestate
2476 bookmarks.update(self, [p1, p2], ret)
2475 bookmarks.update(self, [p1, p2], ret)
2477 cctx.markcommitted(ret)
2476 cctx.markcommitted(ret)
2478 ms.reset()
2477 ms.reset()
2479 except: # re-raises
2478 except: # re-raises
2480 if edited:
2479 if edited:
2481 self.ui.write(
2480 self.ui.write(
2482 _('note: commit message saved in %s\n') % msgfn)
2481 _('note: commit message saved in %s\n') % msgfn)
2483 raise
2482 raise
2484
2483
2485 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2484 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2486 # hack for command that use a temporary commit (eg: histedit)
2485 # hack for command that use a temporary commit (eg: histedit)
2487 # temporary commit got stripped before hook release
2486 # temporary commit got stripped before hook release
2488 if self.changelog.hasnode(ret):
2487 if self.changelog.hasnode(ret):
2489 self.hook("commit", node=node, parent1=parent1,
2488 self.hook("commit", node=node, parent1=parent1,
2490 parent2=parent2)
2489 parent2=parent2)
2491 self._afterlock(commithook)
2490 self._afterlock(commithook)
2492 return ret
2491 return ret
2493
2492
2494 @unfilteredmethod
2493 @unfilteredmethod
2495 def commitctx(self, ctx, error=False):
2494 def commitctx(self, ctx, error=False):
2496 """Add a new revision to current repository.
2495 """Add a new revision to current repository.
2497 Revision information is passed via the context argument.
2496 Revision information is passed via the context argument.
2498
2497
2499 ctx.files() should list all files involved in this commit, i.e.
2498 ctx.files() should list all files involved in this commit, i.e.
2500 modified/added/removed files. On merge, it may be wider than the
2499 modified/added/removed files. On merge, it may be wider than the
2501 ctx.files() to be committed, since any file nodes derived directly
2500 ctx.files() to be committed, since any file nodes derived directly
2502 from p1 or p2 are excluded from the committed ctx.files().
2501 from p1 or p2 are excluded from the committed ctx.files().
2503 """
2502 """
2504
2503
2505 p1, p2 = ctx.p1(), ctx.p2()
2504 p1, p2 = ctx.p1(), ctx.p2()
2506 user = ctx.user()
2505 user = ctx.user()
2507
2506
2508 with self.lock(), self.transaction("commit") as tr:
2507 with self.lock(), self.transaction("commit") as tr:
2509 trp = weakref.proxy(tr)
2508 trp = weakref.proxy(tr)
2510
2509
2511 if ctx.manifestnode():
2510 if ctx.manifestnode():
2512 # reuse an existing manifest revision
2511 # reuse an existing manifest revision
2513 self.ui.debug('reusing known manifest\n')
2512 self.ui.debug('reusing known manifest\n')
2514 mn = ctx.manifestnode()
2513 mn = ctx.manifestnode()
2515 files = ctx.files()
2514 files = ctx.files()
2516 elif ctx.files():
2515 elif ctx.files():
2517 m1ctx = p1.manifestctx()
2516 m1ctx = p1.manifestctx()
2518 m2ctx = p2.manifestctx()
2517 m2ctx = p2.manifestctx()
2519 mctx = m1ctx.copy()
2518 mctx = m1ctx.copy()
2520
2519
2521 m = mctx.read()
2520 m = mctx.read()
2522 m1 = m1ctx.read()
2521 m1 = m1ctx.read()
2523 m2 = m2ctx.read()
2522 m2 = m2ctx.read()
2524
2523
2525 # check in files
2524 # check in files
2526 added = []
2525 added = []
2527 changed = []
2526 changed = []
2528 removed = list(ctx.removed())
2527 removed = list(ctx.removed())
2529 linkrev = len(self)
2528 linkrev = len(self)
2530 self.ui.note(_("committing files:\n"))
2529 self.ui.note(_("committing files:\n"))
2531 for f in sorted(ctx.modified() + ctx.added()):
2530 for f in sorted(ctx.modified() + ctx.added()):
2532 self.ui.note(f + "\n")
2531 self.ui.note(f + "\n")
2533 try:
2532 try:
2534 fctx = ctx[f]
2533 fctx = ctx[f]
2535 if fctx is None:
2534 if fctx is None:
2536 removed.append(f)
2535 removed.append(f)
2537 else:
2536 else:
2538 added.append(f)
2537 added.append(f)
2539 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2538 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2540 trp, changed)
2539 trp, changed)
2541 m.setflag(f, fctx.flags())
2540 m.setflag(f, fctx.flags())
2542 except OSError:
2541 except OSError:
2543 self.ui.warn(_("trouble committing %s!\n") % f)
2542 self.ui.warn(_("trouble committing %s!\n") % f)
2544 raise
2543 raise
2545 except IOError as inst:
2544 except IOError as inst:
2546 errcode = getattr(inst, 'errno', errno.ENOENT)
2545 errcode = getattr(inst, 'errno', errno.ENOENT)
2547 if error or errcode and errcode != errno.ENOENT:
2546 if error or errcode and errcode != errno.ENOENT:
2548 self.ui.warn(_("trouble committing %s!\n") % f)
2547 self.ui.warn(_("trouble committing %s!\n") % f)
2549 raise
2548 raise
2550
2549
2551 # update manifest
2550 # update manifest
2552 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2551 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2553 drop = [f for f in removed if f in m]
2552 drop = [f for f in removed if f in m]
2554 for f in drop:
2553 for f in drop:
2555 del m[f]
2554 del m[f]
2556 files = changed + removed
2555 files = changed + removed
2557 md = None
2556 md = None
2558 if not files:
2557 if not files:
2559 # if no "files" actually changed in terms of the changelog,
2558 # if no "files" actually changed in terms of the changelog,
2560 # try hard to detect unmodified manifest entry so that the
2559 # try hard to detect unmodified manifest entry so that the
2561 # exact same commit can be reproduced later on convert.
2560 # exact same commit can be reproduced later on convert.
2562 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2561 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2563 if not files and md:
2562 if not files and md:
2564 self.ui.debug('not reusing manifest (no file change in '
2563 self.ui.debug('not reusing manifest (no file change in '
2565 'changelog, but manifest differs)\n')
2564 'changelog, but manifest differs)\n')
2566 if files or md:
2565 if files or md:
2567 self.ui.note(_("committing manifest\n"))
2566 self.ui.note(_("committing manifest\n"))
2568 # we're using narrowmatch here since it's already applied at
2567 # we're using narrowmatch here since it's already applied at
2569 # other stages (such as dirstate.walk), so we're already
2568 # other stages (such as dirstate.walk), so we're already
2570 # ignoring things outside of narrowspec in most cases. The
2569 # ignoring things outside of narrowspec in most cases. The
2571 # one case where we might have files outside the narrowspec
2570 # one case where we might have files outside the narrowspec
2572 # at this point is merges, and we already error out in the
2571 # at this point is merges, and we already error out in the
2573 # case where the merge has files outside of the narrowspec,
2572 # case where the merge has files outside of the narrowspec,
2574 # so this is safe.
2573 # so this is safe.
2575 mn = mctx.write(trp, linkrev,
2574 mn = mctx.write(trp, linkrev,
2576 p1.manifestnode(), p2.manifestnode(),
2575 p1.manifestnode(), p2.manifestnode(),
2577 added, drop, match=self.narrowmatch())
2576 added, drop, match=self.narrowmatch())
2578 else:
2577 else:
2579 self.ui.debug('reusing manifest form p1 (listed files '
2578 self.ui.debug('reusing manifest form p1 (listed files '
2580 'actually unchanged)\n')
2579 'actually unchanged)\n')
2581 mn = p1.manifestnode()
2580 mn = p1.manifestnode()
2582 else:
2581 else:
2583 self.ui.debug('reusing manifest from p1 (no file change)\n')
2582 self.ui.debug('reusing manifest from p1 (no file change)\n')
2584 mn = p1.manifestnode()
2583 mn = p1.manifestnode()
2585 files = []
2584 files = []
2586
2585
2587 # update changelog
2586 # update changelog
2588 self.ui.note(_("committing changelog\n"))
2587 self.ui.note(_("committing changelog\n"))
2589 self.changelog.delayupdate(tr)
2588 self.changelog.delayupdate(tr)
2590 n = self.changelog.add(mn, files, ctx.description(),
2589 n = self.changelog.add(mn, files, ctx.description(),
2591 trp, p1.node(), p2.node(),
2590 trp, p1.node(), p2.node(),
2592 user, ctx.date(), ctx.extra().copy())
2591 user, ctx.date(), ctx.extra().copy())
2593 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2592 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2594 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2593 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2595 parent2=xp2)
2594 parent2=xp2)
2596 # set the new commit is proper phase
2595 # set the new commit is proper phase
2597 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2596 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2598 if targetphase:
2597 if targetphase:
2599 # retract boundary do not alter parent changeset.
2598 # retract boundary do not alter parent changeset.
2600 # if a parent have higher the resulting phase will
2599 # if a parent have higher the resulting phase will
2601 # be compliant anyway
2600 # be compliant anyway
2602 #
2601 #
2603 # if minimal phase was 0 we don't need to retract anything
2602 # if minimal phase was 0 we don't need to retract anything
2604 phases.registernew(self, tr, targetphase, [n])
2603 phases.registernew(self, tr, targetphase, [n])
2605 return n
2604 return n
2606
2605
2607 @unfilteredmethod
2606 @unfilteredmethod
2608 def destroying(self):
2607 def destroying(self):
2609 '''Inform the repository that nodes are about to be destroyed.
2608 '''Inform the repository that nodes are about to be destroyed.
2610 Intended for use by strip and rollback, so there's a common
2609 Intended for use by strip and rollback, so there's a common
2611 place for anything that has to be done before destroying history.
2610 place for anything that has to be done before destroying history.
2612
2611
2613 This is mostly useful for saving state that is in memory and waiting
2612 This is mostly useful for saving state that is in memory and waiting
2614 to be flushed when the current lock is released. Because a call to
2613 to be flushed when the current lock is released. Because a call to
2615 destroyed is imminent, the repo will be invalidated causing those
2614 destroyed is imminent, the repo will be invalidated causing those
2616 changes to stay in memory (waiting for the next unlock), or vanish
2615 changes to stay in memory (waiting for the next unlock), or vanish
2617 completely.
2616 completely.
2618 '''
2617 '''
2619 # When using the same lock to commit and strip, the phasecache is left
2618 # When using the same lock to commit and strip, the phasecache is left
2620 # dirty after committing. Then when we strip, the repo is invalidated,
2619 # dirty after committing. Then when we strip, the repo is invalidated,
2621 # causing those changes to disappear.
2620 # causing those changes to disappear.
2622 if '_phasecache' in vars(self):
2621 if '_phasecache' in vars(self):
2623 self._phasecache.write()
2622 self._phasecache.write()
2624
2623
2625 @unfilteredmethod
2624 @unfilteredmethod
2626 def destroyed(self):
2625 def destroyed(self):
2627 '''Inform the repository that nodes have been destroyed.
2626 '''Inform the repository that nodes have been destroyed.
2628 Intended for use by strip and rollback, so there's a common
2627 Intended for use by strip and rollback, so there's a common
2629 place for anything that has to be done after destroying history.
2628 place for anything that has to be done after destroying history.
2630 '''
2629 '''
2631 # When one tries to:
2630 # When one tries to:
2632 # 1) destroy nodes thus calling this method (e.g. strip)
2631 # 1) destroy nodes thus calling this method (e.g. strip)
2633 # 2) use phasecache somewhere (e.g. commit)
2632 # 2) use phasecache somewhere (e.g. commit)
2634 #
2633 #
2635 # then 2) will fail because the phasecache contains nodes that were
2634 # then 2) will fail because the phasecache contains nodes that were
2636 # removed. We can either remove phasecache from the filecache,
2635 # removed. We can either remove phasecache from the filecache,
2637 # causing it to reload next time it is accessed, or simply filter
2636 # causing it to reload next time it is accessed, or simply filter
2638 # the removed nodes now and write the updated cache.
2637 # the removed nodes now and write the updated cache.
2639 self._phasecache.filterunknown(self)
2638 self._phasecache.filterunknown(self)
2640 self._phasecache.write()
2639 self._phasecache.write()
2641
2640
2642 # refresh all repository caches
2641 # refresh all repository caches
2643 self.updatecaches()
2642 self.updatecaches()
2644
2643
2645 # Ensure the persistent tag cache is updated. Doing it now
2644 # Ensure the persistent tag cache is updated. Doing it now
2646 # means that the tag cache only has to worry about destroyed
2645 # means that the tag cache only has to worry about destroyed
2647 # heads immediately after a strip/rollback. That in turn
2646 # heads immediately after a strip/rollback. That in turn
2648 # guarantees that "cachetip == currenttip" (comparing both rev
2647 # guarantees that "cachetip == currenttip" (comparing both rev
2649 # and node) always means no nodes have been added or destroyed.
2648 # and node) always means no nodes have been added or destroyed.
2650
2649
2651 # XXX this is suboptimal when qrefresh'ing: we strip the current
2650 # XXX this is suboptimal when qrefresh'ing: we strip the current
2652 # head, refresh the tag cache, then immediately add a new head.
2651 # head, refresh the tag cache, then immediately add a new head.
2653 # But I think doing it this way is necessary for the "instant
2652 # But I think doing it this way is necessary for the "instant
2654 # tag cache retrieval" case to work.
2653 # tag cache retrieval" case to work.
2655 self.invalidate()
2654 self.invalidate()
2656
2655
2657 def status(self, node1='.', node2=None, match=None,
2656 def status(self, node1='.', node2=None, match=None,
2658 ignored=False, clean=False, unknown=False,
2657 ignored=False, clean=False, unknown=False,
2659 listsubrepos=False):
2658 listsubrepos=False):
2660 '''a convenience method that calls node1.status(node2)'''
2659 '''a convenience method that calls node1.status(node2)'''
2661 return self[node1].status(node2, match, ignored, clean, unknown,
2660 return self[node1].status(node2, match, ignored, clean, unknown,
2662 listsubrepos)
2661 listsubrepos)
2663
2662
2664 def addpostdsstatus(self, ps):
2663 def addpostdsstatus(self, ps):
2665 """Add a callback to run within the wlock, at the point at which status
2664 """Add a callback to run within the wlock, at the point at which status
2666 fixups happen.
2665 fixups happen.
2667
2666
2668 On status completion, callback(wctx, status) will be called with the
2667 On status completion, callback(wctx, status) will be called with the
2669 wlock held, unless the dirstate has changed from underneath or the wlock
2668 wlock held, unless the dirstate has changed from underneath or the wlock
2670 couldn't be grabbed.
2669 couldn't be grabbed.
2671
2670
2672 Callbacks should not capture and use a cached copy of the dirstate --
2671 Callbacks should not capture and use a cached copy of the dirstate --
2673 it might change in the meanwhile. Instead, they should access the
2672 it might change in the meanwhile. Instead, they should access the
2674 dirstate via wctx.repo().dirstate.
2673 dirstate via wctx.repo().dirstate.
2675
2674
2676 This list is emptied out after each status run -- extensions should
2675 This list is emptied out after each status run -- extensions should
2677 make sure it adds to this list each time dirstate.status is called.
2676 make sure it adds to this list each time dirstate.status is called.
2678 Extensions should also make sure they don't call this for statuses
2677 Extensions should also make sure they don't call this for statuses
2679 that don't involve the dirstate.
2678 that don't involve the dirstate.
2680 """
2679 """
2681
2680
2682 # The list is located here for uniqueness reasons -- it is actually
2681 # The list is located here for uniqueness reasons -- it is actually
2683 # managed by the workingctx, but that isn't unique per-repo.
2682 # managed by the workingctx, but that isn't unique per-repo.
2684 self._postdsstatus.append(ps)
2683 self._postdsstatus.append(ps)
2685
2684
2686 def postdsstatus(self):
2685 def postdsstatus(self):
2687 """Used by workingctx to get the list of post-dirstate-status hooks."""
2686 """Used by workingctx to get the list of post-dirstate-status hooks."""
2688 return self._postdsstatus
2687 return self._postdsstatus
2689
2688
2690 def clearpostdsstatus(self):
2689 def clearpostdsstatus(self):
2691 """Used by workingctx to clear post-dirstate-status hooks."""
2690 """Used by workingctx to clear post-dirstate-status hooks."""
2692 del self._postdsstatus[:]
2691 del self._postdsstatus[:]
2693
2692
2694 def heads(self, start=None):
2693 def heads(self, start=None):
2695 if start is None:
2694 if start is None:
2696 cl = self.changelog
2695 cl = self.changelog
2697 headrevs = reversed(cl.headrevs())
2696 headrevs = reversed(cl.headrevs())
2698 return [cl.node(rev) for rev in headrevs]
2697 return [cl.node(rev) for rev in headrevs]
2699
2698
2700 heads = self.changelog.heads(start)
2699 heads = self.changelog.heads(start)
2701 # sort the output in rev descending order
2700 # sort the output in rev descending order
2702 return sorted(heads, key=self.changelog.rev, reverse=True)
2701 return sorted(heads, key=self.changelog.rev, reverse=True)
2703
2702
2704 def branchheads(self, branch=None, start=None, closed=False):
2703 def branchheads(self, branch=None, start=None, closed=False):
2705 '''return a (possibly filtered) list of heads for the given branch
2704 '''return a (possibly filtered) list of heads for the given branch
2706
2705
2707 Heads are returned in topological order, from newest to oldest.
2706 Heads are returned in topological order, from newest to oldest.
2708 If branch is None, use the dirstate branch.
2707 If branch is None, use the dirstate branch.
2709 If start is not None, return only heads reachable from start.
2708 If start is not None, return only heads reachable from start.
2710 If closed is True, return heads that are marked as closed as well.
2709 If closed is True, return heads that are marked as closed as well.
2711 '''
2710 '''
2712 if branch is None:
2711 if branch is None:
2713 branch = self[None].branch()
2712 branch = self[None].branch()
2714 branches = self.branchmap()
2713 branches = self.branchmap()
2715 if branch not in branches:
2714 if branch not in branches:
2716 return []
2715 return []
2717 # the cache returns heads ordered lowest to highest
2716 # the cache returns heads ordered lowest to highest
2718 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2717 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2719 if start is not None:
2718 if start is not None:
2720 # filter out the heads that cannot be reached from startrev
2719 # filter out the heads that cannot be reached from startrev
2721 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2720 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2722 bheads = [h for h in bheads if h in fbheads]
2721 bheads = [h for h in bheads if h in fbheads]
2723 return bheads
2722 return bheads
2724
2723
2725 def branches(self, nodes):
2724 def branches(self, nodes):
2726 if not nodes:
2725 if not nodes:
2727 nodes = [self.changelog.tip()]
2726 nodes = [self.changelog.tip()]
2728 b = []
2727 b = []
2729 for n in nodes:
2728 for n in nodes:
2730 t = n
2729 t = n
2731 while True:
2730 while True:
2732 p = self.changelog.parents(n)
2731 p = self.changelog.parents(n)
2733 if p[1] != nullid or p[0] == nullid:
2732 if p[1] != nullid or p[0] == nullid:
2734 b.append((t, n, p[0], p[1]))
2733 b.append((t, n, p[0], p[1]))
2735 break
2734 break
2736 n = p[0]
2735 n = p[0]
2737 return b
2736 return b
2738
2737
2739 def between(self, pairs):
2738 def between(self, pairs):
2740 r = []
2739 r = []
2741
2740
2742 for top, bottom in pairs:
2741 for top, bottom in pairs:
2743 n, l, i = top, [], 0
2742 n, l, i = top, [], 0
2744 f = 1
2743 f = 1
2745
2744
2746 while n != bottom and n != nullid:
2745 while n != bottom and n != nullid:
2747 p = self.changelog.parents(n)[0]
2746 p = self.changelog.parents(n)[0]
2748 if i == f:
2747 if i == f:
2749 l.append(n)
2748 l.append(n)
2750 f = f * 2
2749 f = f * 2
2751 n = p
2750 n = p
2752 i += 1
2751 i += 1
2753
2752
2754 r.append(l)
2753 r.append(l)
2755
2754
2756 return r
2755 return r
2757
2756
2758 def checkpush(self, pushop):
2757 def checkpush(self, pushop):
2759 """Extensions can override this function if additional checks have
2758 """Extensions can override this function if additional checks have
2760 to be performed before pushing, or call it if they override push
2759 to be performed before pushing, or call it if they override push
2761 command.
2760 command.
2762 """
2761 """
2763
2762
2764 @unfilteredpropertycache
2763 @unfilteredpropertycache
2765 def prepushoutgoinghooks(self):
2764 def prepushoutgoinghooks(self):
2766 """Return util.hooks consists of a pushop with repo, remote, outgoing
2765 """Return util.hooks consists of a pushop with repo, remote, outgoing
2767 methods, which are called before pushing changesets.
2766 methods, which are called before pushing changesets.
2768 """
2767 """
2769 return util.hooks()
2768 return util.hooks()
2770
2769
2771 def pushkey(self, namespace, key, old, new):
2770 def pushkey(self, namespace, key, old, new):
2772 try:
2771 try:
2773 tr = self.currenttransaction()
2772 tr = self.currenttransaction()
2774 hookargs = {}
2773 hookargs = {}
2775 if tr is not None:
2774 if tr is not None:
2776 hookargs.update(tr.hookargs)
2775 hookargs.update(tr.hookargs)
2777 hookargs = pycompat.strkwargs(hookargs)
2776 hookargs = pycompat.strkwargs(hookargs)
2778 hookargs[r'namespace'] = namespace
2777 hookargs[r'namespace'] = namespace
2779 hookargs[r'key'] = key
2778 hookargs[r'key'] = key
2780 hookargs[r'old'] = old
2779 hookargs[r'old'] = old
2781 hookargs[r'new'] = new
2780 hookargs[r'new'] = new
2782 self.hook('prepushkey', throw=True, **hookargs)
2781 self.hook('prepushkey', throw=True, **hookargs)
2783 except error.HookAbort as exc:
2782 except error.HookAbort as exc:
2784 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2783 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2785 if exc.hint:
2784 if exc.hint:
2786 self.ui.write_err(_("(%s)\n") % exc.hint)
2785 self.ui.write_err(_("(%s)\n") % exc.hint)
2787 return False
2786 return False
2788 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2787 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2789 ret = pushkey.push(self, namespace, key, old, new)
2788 ret = pushkey.push(self, namespace, key, old, new)
2790 def runhook():
2789 def runhook():
2791 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2790 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2792 ret=ret)
2791 ret=ret)
2793 self._afterlock(runhook)
2792 self._afterlock(runhook)
2794 return ret
2793 return ret
2795
2794
2796 def listkeys(self, namespace):
2795 def listkeys(self, namespace):
2797 self.hook('prelistkeys', throw=True, namespace=namespace)
2796 self.hook('prelistkeys', throw=True, namespace=namespace)
2798 self.ui.debug('listing keys for "%s"\n' % namespace)
2797 self.ui.debug('listing keys for "%s"\n' % namespace)
2799 values = pushkey.list(self, namespace)
2798 values = pushkey.list(self, namespace)
2800 self.hook('listkeys', namespace=namespace, values=values)
2799 self.hook('listkeys', namespace=namespace, values=values)
2801 return values
2800 return values
2802
2801
2803 def debugwireargs(self, one, two, three=None, four=None, five=None):
2802 def debugwireargs(self, one, two, three=None, four=None, five=None):
2804 '''used to test argument passing over the wire'''
2803 '''used to test argument passing over the wire'''
2805 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2804 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2806 pycompat.bytestr(four),
2805 pycompat.bytestr(four),
2807 pycompat.bytestr(five))
2806 pycompat.bytestr(five))
2808
2807
2809 def savecommitmessage(self, text):
2808 def savecommitmessage(self, text):
2810 fp = self.vfs('last-message.txt', 'wb')
2809 fp = self.vfs('last-message.txt', 'wb')
2811 try:
2810 try:
2812 fp.write(text)
2811 fp.write(text)
2813 finally:
2812 finally:
2814 fp.close()
2813 fp.close()
2815 return self.pathto(fp.name[len(self.root) + 1:])
2814 return self.pathto(fp.name[len(self.root) + 1:])
2816
2815
2817 # used to avoid circular references so destructors work
2816 # used to avoid circular references so destructors work
2818 def aftertrans(files):
2817 def aftertrans(files):
2819 renamefiles = [tuple(t) for t in files]
2818 renamefiles = [tuple(t) for t in files]
2820 def a():
2819 def a():
2821 for vfs, src, dest in renamefiles:
2820 for vfs, src, dest in renamefiles:
2822 # if src and dest refer to a same file, vfs.rename is a no-op,
2821 # if src and dest refer to a same file, vfs.rename is a no-op,
2823 # leaving both src and dest on disk. delete dest to make sure
2822 # leaving both src and dest on disk. delete dest to make sure
2824 # the rename couldn't be such a no-op.
2823 # the rename couldn't be such a no-op.
2825 vfs.tryunlink(dest)
2824 vfs.tryunlink(dest)
2826 try:
2825 try:
2827 vfs.rename(src, dest)
2826 vfs.rename(src, dest)
2828 except OSError: # journal file does not yet exist
2827 except OSError: # journal file does not yet exist
2829 pass
2828 pass
2830 return a
2829 return a
2831
2830
2832 def undoname(fn):
2831 def undoname(fn):
2833 base, name = os.path.split(fn)
2832 base, name = os.path.split(fn)
2834 assert name.startswith('journal')
2833 assert name.startswith('journal')
2835 return os.path.join(base, name.replace('journal', 'undo', 1))
2834 return os.path.join(base, name.replace('journal', 'undo', 1))
2836
2835
2837 def instance(ui, path, create, intents=None, createopts=None):
2836 def instance(ui, path, create, intents=None, createopts=None):
2838 localpath = util.urllocalpath(path)
2837 localpath = util.urllocalpath(path)
2839 if create:
2838 if create:
2840 createrepository(ui, localpath, createopts=createopts)
2839 createrepository(ui, localpath, createopts=createopts)
2841
2840
2842 return makelocalrepository(ui, localpath, intents=intents)
2841 return makelocalrepository(ui, localpath, intents=intents)
2843
2842
2844 def islocal(path):
2843 def islocal(path):
2845 return True
2844 return True
2846
2845
2847 def defaultcreateopts(ui, createopts=None):
2846 def defaultcreateopts(ui, createopts=None):
2848 """Populate the default creation options for a repository.
2847 """Populate the default creation options for a repository.
2849
2848
2850 A dictionary of explicitly requested creation options can be passed
2849 A dictionary of explicitly requested creation options can be passed
2851 in. Missing keys will be populated.
2850 in. Missing keys will be populated.
2852 """
2851 """
2853 createopts = dict(createopts or {})
2852 createopts = dict(createopts or {})
2854
2853
2855 if 'backend' not in createopts:
2854 if 'backend' not in createopts:
2856 # experimental config: storage.new-repo-backend
2855 # experimental config: storage.new-repo-backend
2857 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2856 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2858
2857
2859 return createopts
2858 return createopts
2860
2859
2861 def newreporequirements(ui, createopts):
2860 def newreporequirements(ui, createopts):
2862 """Determine the set of requirements for a new local repository.
2861 """Determine the set of requirements for a new local repository.
2863
2862
2864 Extensions can wrap this function to specify custom requirements for
2863 Extensions can wrap this function to specify custom requirements for
2865 new repositories.
2864 new repositories.
2866 """
2865 """
2867 # If the repo is being created from a shared repository, we copy
2866 # If the repo is being created from a shared repository, we copy
2868 # its requirements.
2867 # its requirements.
2869 if 'sharedrepo' in createopts:
2868 if 'sharedrepo' in createopts:
2870 requirements = set(createopts['sharedrepo'].requirements)
2869 requirements = set(createopts['sharedrepo'].requirements)
2871 if createopts.get('sharedrelative'):
2870 if createopts.get('sharedrelative'):
2872 requirements.add('relshared')
2871 requirements.add('relshared')
2873 else:
2872 else:
2874 requirements.add('shared')
2873 requirements.add('shared')
2875
2874
2876 return requirements
2875 return requirements
2877
2876
2878 if 'backend' not in createopts:
2877 if 'backend' not in createopts:
2879 raise error.ProgrammingError('backend key not present in createopts; '
2878 raise error.ProgrammingError('backend key not present in createopts; '
2880 'was defaultcreateopts() called?')
2879 'was defaultcreateopts() called?')
2881
2880
2882 if createopts['backend'] != 'revlogv1':
2881 if createopts['backend'] != 'revlogv1':
2883 raise error.Abort(_('unable to determine repository requirements for '
2882 raise error.Abort(_('unable to determine repository requirements for '
2884 'storage backend: %s') % createopts['backend'])
2883 'storage backend: %s') % createopts['backend'])
2885
2884
2886 requirements = {'revlogv1'}
2885 requirements = {'revlogv1'}
2887 if ui.configbool('format', 'usestore'):
2886 if ui.configbool('format', 'usestore'):
2888 requirements.add('store')
2887 requirements.add('store')
2889 if ui.configbool('format', 'usefncache'):
2888 if ui.configbool('format', 'usefncache'):
2890 requirements.add('fncache')
2889 requirements.add('fncache')
2891 if ui.configbool('format', 'dotencode'):
2890 if ui.configbool('format', 'dotencode'):
2892 requirements.add('dotencode')
2891 requirements.add('dotencode')
2893
2892
2894 compengine = ui.config('experimental', 'format.compression')
2893 compengine = ui.config('experimental', 'format.compression')
2895 if compengine not in util.compengines:
2894 if compengine not in util.compengines:
2896 raise error.Abort(_('compression engine %s defined by '
2895 raise error.Abort(_('compression engine %s defined by '
2897 'experimental.format.compression not available') %
2896 'experimental.format.compression not available') %
2898 compengine,
2897 compengine,
2899 hint=_('run "hg debuginstall" to list available '
2898 hint=_('run "hg debuginstall" to list available '
2900 'compression engines'))
2899 'compression engines'))
2901
2900
2902 # zlib is the historical default and doesn't need an explicit requirement.
2901 # zlib is the historical default and doesn't need an explicit requirement.
2903 if compengine != 'zlib':
2902 if compengine != 'zlib':
2904 requirements.add('exp-compression-%s' % compengine)
2903 requirements.add('exp-compression-%s' % compengine)
2905
2904
2906 if scmutil.gdinitconfig(ui):
2905 if scmutil.gdinitconfig(ui):
2907 requirements.add('generaldelta')
2906 requirements.add('generaldelta')
2908 # experimental config: format.sparse-revlog
2907 # experimental config: format.sparse-revlog
2909 if ui.configbool('format', 'sparse-revlog'):
2908 if ui.configbool('format', 'sparse-revlog'):
2910 requirements.add(SPARSEREVLOG_REQUIREMENT)
2909 requirements.add(SPARSEREVLOG_REQUIREMENT)
2911 if ui.configbool('experimental', 'treemanifest'):
2910 if ui.configbool('experimental', 'treemanifest'):
2912 requirements.add('treemanifest')
2911 requirements.add('treemanifest')
2913
2912
2914 revlogv2 = ui.config('experimental', 'revlogv2')
2913 revlogv2 = ui.config('experimental', 'revlogv2')
2915 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2914 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2916 requirements.remove('revlogv1')
2915 requirements.remove('revlogv1')
2917 # generaldelta is implied by revlogv2.
2916 # generaldelta is implied by revlogv2.
2918 requirements.discard('generaldelta')
2917 requirements.discard('generaldelta')
2919 requirements.add(REVLOGV2_REQUIREMENT)
2918 requirements.add(REVLOGV2_REQUIREMENT)
2920 # experimental config: format.internal-phase
2919 # experimental config: format.internal-phase
2921 if ui.configbool('format', 'internal-phase'):
2920 if ui.configbool('format', 'internal-phase'):
2922 requirements.add('internal-phase')
2921 requirements.add('internal-phase')
2923
2922
2924 if createopts.get('narrowfiles'):
2923 if createopts.get('narrowfiles'):
2925 requirements.add(repository.NARROW_REQUIREMENT)
2924 requirements.add(repository.NARROW_REQUIREMENT)
2926
2925
2927 if createopts.get('lfs'):
2926 if createopts.get('lfs'):
2928 requirements.add('lfs')
2927 requirements.add('lfs')
2929
2928
2930 return requirements
2929 return requirements
2931
2930
2932 def filterknowncreateopts(ui, createopts):
2931 def filterknowncreateopts(ui, createopts):
2933 """Filters a dict of repo creation options against options that are known.
2932 """Filters a dict of repo creation options against options that are known.
2934
2933
2935 Receives a dict of repo creation options and returns a dict of those
2934 Receives a dict of repo creation options and returns a dict of those
2936 options that we don't know how to handle.
2935 options that we don't know how to handle.
2937
2936
2938 This function is called as part of repository creation. If the
2937 This function is called as part of repository creation. If the
2939 returned dict contains any items, repository creation will not
2938 returned dict contains any items, repository creation will not
2940 be allowed, as it means there was a request to create a repository
2939 be allowed, as it means there was a request to create a repository
2941 with options not recognized by loaded code.
2940 with options not recognized by loaded code.
2942
2941
2943 Extensions can wrap this function to filter out creation options
2942 Extensions can wrap this function to filter out creation options
2944 they know how to handle.
2943 they know how to handle.
2945 """
2944 """
2946 known = {
2945 known = {
2947 'backend',
2946 'backend',
2948 'lfs',
2947 'lfs',
2949 'narrowfiles',
2948 'narrowfiles',
2950 'sharedrepo',
2949 'sharedrepo',
2951 'sharedrelative',
2950 'sharedrelative',
2952 'shareditems',
2951 'shareditems',
2953 'shallowfilestore',
2952 'shallowfilestore',
2954 }
2953 }
2955
2954
2956 return {k: v for k, v in createopts.items() if k not in known}
2955 return {k: v for k, v in createopts.items() if k not in known}
2957
2956
2958 def createrepository(ui, path, createopts=None):
2957 def createrepository(ui, path, createopts=None):
2959 """Create a new repository in a vfs.
2958 """Create a new repository in a vfs.
2960
2959
2961 ``path`` path to the new repo's working directory.
2960 ``path`` path to the new repo's working directory.
2962 ``createopts`` options for the new repository.
2961 ``createopts`` options for the new repository.
2963
2962
2964 The following keys for ``createopts`` are recognized:
2963 The following keys for ``createopts`` are recognized:
2965
2964
2966 backend
2965 backend
2967 The storage backend to use.
2966 The storage backend to use.
2968 lfs
2967 lfs
2969 Repository will be created with ``lfs`` requirement. The lfs extension
2968 Repository will be created with ``lfs`` requirement. The lfs extension
2970 will automatically be loaded when the repository is accessed.
2969 will automatically be loaded when the repository is accessed.
2971 narrowfiles
2970 narrowfiles
2972 Set up repository to support narrow file storage.
2971 Set up repository to support narrow file storage.
2973 sharedrepo
2972 sharedrepo
2974 Repository object from which storage should be shared.
2973 Repository object from which storage should be shared.
2975 sharedrelative
2974 sharedrelative
2976 Boolean indicating if the path to the shared repo should be
2975 Boolean indicating if the path to the shared repo should be
2977 stored as relative. By default, the pointer to the "parent" repo
2976 stored as relative. By default, the pointer to the "parent" repo
2978 is stored as an absolute path.
2977 is stored as an absolute path.
2979 shareditems
2978 shareditems
2980 Set of items to share to the new repository (in addition to storage).
2979 Set of items to share to the new repository (in addition to storage).
2981 shallowfilestore
2980 shallowfilestore
2982 Indicates that storage for files should be shallow (not all ancestor
2981 Indicates that storage for files should be shallow (not all ancestor
2983 revisions are known).
2982 revisions are known).
2984 """
2983 """
2985 createopts = defaultcreateopts(ui, createopts=createopts)
2984 createopts = defaultcreateopts(ui, createopts=createopts)
2986
2985
2987 unknownopts = filterknowncreateopts(ui, createopts)
2986 unknownopts = filterknowncreateopts(ui, createopts)
2988
2987
2989 if not isinstance(unknownopts, dict):
2988 if not isinstance(unknownopts, dict):
2990 raise error.ProgrammingError('filterknowncreateopts() did not return '
2989 raise error.ProgrammingError('filterknowncreateopts() did not return '
2991 'a dict')
2990 'a dict')
2992
2991
2993 if unknownopts:
2992 if unknownopts:
2994 raise error.Abort(_('unable to create repository because of unknown '
2993 raise error.Abort(_('unable to create repository because of unknown '
2995 'creation option: %s') %
2994 'creation option: %s') %
2996 ', '.join(sorted(unknownopts)),
2995 ', '.join(sorted(unknownopts)),
2997 hint=_('is a required extension not loaded?'))
2996 hint=_('is a required extension not loaded?'))
2998
2997
2999 requirements = newreporequirements(ui, createopts=createopts)
2998 requirements = newreporequirements(ui, createopts=createopts)
3000
2999
3001 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3000 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3002
3001
3003 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3002 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3004 if hgvfs.exists():
3003 if hgvfs.exists():
3005 raise error.RepoError(_('repository %s already exists') % path)
3004 raise error.RepoError(_('repository %s already exists') % path)
3006
3005
3007 if 'sharedrepo' in createopts:
3006 if 'sharedrepo' in createopts:
3008 sharedpath = createopts['sharedrepo'].sharedpath
3007 sharedpath = createopts['sharedrepo'].sharedpath
3009
3008
3010 if createopts.get('sharedrelative'):
3009 if createopts.get('sharedrelative'):
3011 try:
3010 try:
3012 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3011 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3013 except (IOError, ValueError) as e:
3012 except (IOError, ValueError) as e:
3014 # ValueError is raised on Windows if the drive letters differ
3013 # ValueError is raised on Windows if the drive letters differ
3015 # on each path.
3014 # on each path.
3016 raise error.Abort(_('cannot calculate relative path'),
3015 raise error.Abort(_('cannot calculate relative path'),
3017 hint=stringutil.forcebytestr(e))
3016 hint=stringutil.forcebytestr(e))
3018
3017
3019 if not wdirvfs.exists():
3018 if not wdirvfs.exists():
3020 wdirvfs.makedirs()
3019 wdirvfs.makedirs()
3021
3020
3022 hgvfs.makedir(notindexed=True)
3021 hgvfs.makedir(notindexed=True)
3023 if 'sharedrepo' not in createopts:
3022 if 'sharedrepo' not in createopts:
3024 hgvfs.mkdir(b'cache')
3023 hgvfs.mkdir(b'cache')
3025 hgvfs.mkdir(b'wcache')
3024 hgvfs.mkdir(b'wcache')
3026
3025
3027 if b'store' in requirements and 'sharedrepo' not in createopts:
3026 if b'store' in requirements and 'sharedrepo' not in createopts:
3028 hgvfs.mkdir(b'store')
3027 hgvfs.mkdir(b'store')
3029
3028
3030 # We create an invalid changelog outside the store so very old
3029 # We create an invalid changelog outside the store so very old
3031 # Mercurial versions (which didn't know about the requirements
3030 # Mercurial versions (which didn't know about the requirements
3032 # file) encounter an error on reading the changelog. This
3031 # file) encounter an error on reading the changelog. This
3033 # effectively locks out old clients and prevents them from
3032 # effectively locks out old clients and prevents them from
3034 # mucking with a repo in an unknown format.
3033 # mucking with a repo in an unknown format.
3035 #
3034 #
3036 # The revlog header has version 2, which won't be recognized by
3035 # The revlog header has version 2, which won't be recognized by
3037 # such old clients.
3036 # such old clients.
3038 hgvfs.append(b'00changelog.i',
3037 hgvfs.append(b'00changelog.i',
3039 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3038 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3040 b'layout')
3039 b'layout')
3041
3040
3042 scmutil.writerequires(hgvfs, requirements)
3041 scmutil.writerequires(hgvfs, requirements)
3043
3042
3044 # Write out file telling readers where to find the shared store.
3043 # Write out file telling readers where to find the shared store.
3045 if 'sharedrepo' in createopts:
3044 if 'sharedrepo' in createopts:
3046 hgvfs.write(b'sharedpath', sharedpath)
3045 hgvfs.write(b'sharedpath', sharedpath)
3047
3046
3048 if createopts.get('shareditems'):
3047 if createopts.get('shareditems'):
3049 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3048 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3050 hgvfs.write(b'shared', shared)
3049 hgvfs.write(b'shared', shared)
3051
3050
3052 def poisonrepository(repo):
3051 def poisonrepository(repo):
3053 """Poison a repository instance so it can no longer be used."""
3052 """Poison a repository instance so it can no longer be used."""
3054 # Perform any cleanup on the instance.
3053 # Perform any cleanup on the instance.
3055 repo.close()
3054 repo.close()
3056
3055
3057 # Our strategy is to replace the type of the object with one that
3056 # Our strategy is to replace the type of the object with one that
3058 # has all attribute lookups result in error.
3057 # has all attribute lookups result in error.
3059 #
3058 #
3060 # But we have to allow the close() method because some constructors
3059 # But we have to allow the close() method because some constructors
3061 # of repos call close() on repo references.
3060 # of repos call close() on repo references.
3062 class poisonedrepository(object):
3061 class poisonedrepository(object):
3063 def __getattribute__(self, item):
3062 def __getattribute__(self, item):
3064 if item == r'close':
3063 if item == r'close':
3065 return object.__getattribute__(self, item)
3064 return object.__getattribute__(self, item)
3066
3065
3067 raise error.ProgrammingError('repo instances should not be used '
3066 raise error.ProgrammingError('repo instances should not be used '
3068 'after unshare')
3067 'after unshare')
3069
3068
3070 def close(self):
3069 def close(self):
3071 pass
3070 pass
3072
3071
3073 # We may have a repoview, which intercepts __setattr__. So be sure
3072 # We may have a repoview, which intercepts __setattr__. So be sure
3074 # we operate at the lowest level possible.
3073 # we operate at the lowest level possible.
3075 object.__setattr__(repo, r'__class__', poisonedrepository)
3074 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now