##// END OF EJS Templates
repoview: improve documentation for `repo.filtered` method...
marmoute -
r42274:e0357a46 default
parent child Browse files
Show More
@@ -1,3106 +1,3116 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 def isfilecached(repo, name):
125 def isfilecached(repo, name):
126 """check if a repo has already cached "name" filecache-ed property
126 """check if a repo has already cached "name" filecache-ed property
127
127
128 This returns (cachedobj-or-None, iscached) tuple.
128 This returns (cachedobj-or-None, iscached) tuple.
129 """
129 """
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
130 cacheentry = repo.unfiltered()._filecache.get(name, None)
131 if not cacheentry:
131 if not cacheentry:
132 return None, False
132 return None, False
133 return cacheentry.obj, True
133 return cacheentry.obj, True
134
134
135 class unfilteredpropertycache(util.propertycache):
135 class unfilteredpropertycache(util.propertycache):
136 """propertycache that apply to unfiltered repo only"""
136 """propertycache that apply to unfiltered repo only"""
137
137
138 def __get__(self, repo, type=None):
138 def __get__(self, repo, type=None):
139 unfi = repo.unfiltered()
139 unfi = repo.unfiltered()
140 if unfi is repo:
140 if unfi is repo:
141 return super(unfilteredpropertycache, self).__get__(unfi)
141 return super(unfilteredpropertycache, self).__get__(unfi)
142 return getattr(unfi, self.name)
142 return getattr(unfi, self.name)
143
143
144 class filteredpropertycache(util.propertycache):
144 class filteredpropertycache(util.propertycache):
145 """propertycache that must take filtering in account"""
145 """propertycache that must take filtering in account"""
146
146
147 def cachevalue(self, obj, value):
147 def cachevalue(self, obj, value):
148 object.__setattr__(obj, self.name, value)
148 object.__setattr__(obj, self.name, value)
149
149
150
150
151 def hasunfilteredcache(repo, name):
151 def hasunfilteredcache(repo, name):
152 """check if a repo has an unfilteredpropertycache value for <name>"""
152 """check if a repo has an unfilteredpropertycache value for <name>"""
153 return name in vars(repo.unfiltered())
153 return name in vars(repo.unfiltered())
154
154
155 def unfilteredmethod(orig):
155 def unfilteredmethod(orig):
156 """decorate method that always need to be run on unfiltered version"""
156 """decorate method that always need to be run on unfiltered version"""
157 def wrapper(repo, *args, **kwargs):
157 def wrapper(repo, *args, **kwargs):
158 return orig(repo.unfiltered(), *args, **kwargs)
158 return orig(repo.unfiltered(), *args, **kwargs)
159 return wrapper
159 return wrapper
160
160
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
162 'unbundle'}
162 'unbundle'}
163 legacycaps = moderncaps.union({'changegroupsubset'})
163 legacycaps = moderncaps.union({'changegroupsubset'})
164
164
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
165 @interfaceutil.implementer(repository.ipeercommandexecutor)
166 class localcommandexecutor(object):
166 class localcommandexecutor(object):
167 def __init__(self, peer):
167 def __init__(self, peer):
168 self._peer = peer
168 self._peer = peer
169 self._sent = False
169 self._sent = False
170 self._closed = False
170 self._closed = False
171
171
172 def __enter__(self):
172 def __enter__(self):
173 return self
173 return self
174
174
175 def __exit__(self, exctype, excvalue, exctb):
175 def __exit__(self, exctype, excvalue, exctb):
176 self.close()
176 self.close()
177
177
178 def callcommand(self, command, args):
178 def callcommand(self, command, args):
179 if self._sent:
179 if self._sent:
180 raise error.ProgrammingError('callcommand() cannot be used after '
180 raise error.ProgrammingError('callcommand() cannot be used after '
181 'sendcommands()')
181 'sendcommands()')
182
182
183 if self._closed:
183 if self._closed:
184 raise error.ProgrammingError('callcommand() cannot be used after '
184 raise error.ProgrammingError('callcommand() cannot be used after '
185 'close()')
185 'close()')
186
186
187 # We don't need to support anything fancy. Just call the named
187 # We don't need to support anything fancy. Just call the named
188 # method on the peer and return a resolved future.
188 # method on the peer and return a resolved future.
189 fn = getattr(self._peer, pycompat.sysstr(command))
189 fn = getattr(self._peer, pycompat.sysstr(command))
190
190
191 f = pycompat.futures.Future()
191 f = pycompat.futures.Future()
192
192
193 try:
193 try:
194 result = fn(**pycompat.strkwargs(args))
194 result = fn(**pycompat.strkwargs(args))
195 except Exception:
195 except Exception:
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
197 else:
197 else:
198 f.set_result(result)
198 f.set_result(result)
199
199
200 return f
200 return f
201
201
202 def sendcommands(self):
202 def sendcommands(self):
203 self._sent = True
203 self._sent = True
204
204
205 def close(self):
205 def close(self):
206 self._closed = True
206 self._closed = True
207
207
208 @interfaceutil.implementer(repository.ipeercommands)
208 @interfaceutil.implementer(repository.ipeercommands)
209 class localpeer(repository.peer):
209 class localpeer(repository.peer):
210 '''peer for a local repo; reflects only the most recent API'''
210 '''peer for a local repo; reflects only the most recent API'''
211
211
212 def __init__(self, repo, caps=None):
212 def __init__(self, repo, caps=None):
213 super(localpeer, self).__init__()
213 super(localpeer, self).__init__()
214
214
215 if caps is None:
215 if caps is None:
216 caps = moderncaps.copy()
216 caps = moderncaps.copy()
217 self._repo = repo.filtered('served')
217 self._repo = repo.filtered('served')
218 self.ui = repo.ui
218 self.ui = repo.ui
219 self._caps = repo._restrictcapabilities(caps)
219 self._caps = repo._restrictcapabilities(caps)
220
220
221 # Begin of _basepeer interface.
221 # Begin of _basepeer interface.
222
222
223 def url(self):
223 def url(self):
224 return self._repo.url()
224 return self._repo.url()
225
225
226 def local(self):
226 def local(self):
227 return self._repo
227 return self._repo
228
228
229 def peer(self):
229 def peer(self):
230 return self
230 return self
231
231
232 def canpush(self):
232 def canpush(self):
233 return True
233 return True
234
234
235 def close(self):
235 def close(self):
236 self._repo.close()
236 self._repo.close()
237
237
238 # End of _basepeer interface.
238 # End of _basepeer interface.
239
239
240 # Begin of _basewirecommands interface.
240 # Begin of _basewirecommands interface.
241
241
242 def branchmap(self):
242 def branchmap(self):
243 return self._repo.branchmap()
243 return self._repo.branchmap()
244
244
245 def capabilities(self):
245 def capabilities(self):
246 return self._caps
246 return self._caps
247
247
248 def clonebundles(self):
248 def clonebundles(self):
249 return self._repo.tryread('clonebundles.manifest')
249 return self._repo.tryread('clonebundles.manifest')
250
250
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
251 def debugwireargs(self, one, two, three=None, four=None, five=None):
252 """Used to test argument passing over the wire"""
252 """Used to test argument passing over the wire"""
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
254 pycompat.bytestr(four),
254 pycompat.bytestr(four),
255 pycompat.bytestr(five))
255 pycompat.bytestr(five))
256
256
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
258 **kwargs):
258 **kwargs):
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
260 common=common, bundlecaps=bundlecaps,
260 common=common, bundlecaps=bundlecaps,
261 **kwargs)[1]
261 **kwargs)[1]
262 cb = util.chunkbuffer(chunks)
262 cb = util.chunkbuffer(chunks)
263
263
264 if exchange.bundle2requested(bundlecaps):
264 if exchange.bundle2requested(bundlecaps):
265 # When requesting a bundle2, getbundle returns a stream to make the
265 # When requesting a bundle2, getbundle returns a stream to make the
266 # wire level function happier. We need to build a proper object
266 # wire level function happier. We need to build a proper object
267 # from it in local peer.
267 # from it in local peer.
268 return bundle2.getunbundler(self.ui, cb)
268 return bundle2.getunbundler(self.ui, cb)
269 else:
269 else:
270 return changegroup.getunbundler('01', cb, None)
270 return changegroup.getunbundler('01', cb, None)
271
271
272 def heads(self):
272 def heads(self):
273 return self._repo.heads()
273 return self._repo.heads()
274
274
275 def known(self, nodes):
275 def known(self, nodes):
276 return self._repo.known(nodes)
276 return self._repo.known(nodes)
277
277
278 def listkeys(self, namespace):
278 def listkeys(self, namespace):
279 return self._repo.listkeys(namespace)
279 return self._repo.listkeys(namespace)
280
280
281 def lookup(self, key):
281 def lookup(self, key):
282 return self._repo.lookup(key)
282 return self._repo.lookup(key)
283
283
284 def pushkey(self, namespace, key, old, new):
284 def pushkey(self, namespace, key, old, new):
285 return self._repo.pushkey(namespace, key, old, new)
285 return self._repo.pushkey(namespace, key, old, new)
286
286
287 def stream_out(self):
287 def stream_out(self):
288 raise error.Abort(_('cannot perform stream clone against local '
288 raise error.Abort(_('cannot perform stream clone against local '
289 'peer'))
289 'peer'))
290
290
291 def unbundle(self, bundle, heads, url):
291 def unbundle(self, bundle, heads, url):
292 """apply a bundle on a repo
292 """apply a bundle on a repo
293
293
294 This function handles the repo locking itself."""
294 This function handles the repo locking itself."""
295 try:
295 try:
296 try:
296 try:
297 bundle = exchange.readbundle(self.ui, bundle, None)
297 bundle = exchange.readbundle(self.ui, bundle, None)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
299 if util.safehasattr(ret, 'getchunks'):
299 if util.safehasattr(ret, 'getchunks'):
300 # This is a bundle20 object, turn it into an unbundler.
300 # This is a bundle20 object, turn it into an unbundler.
301 # This little dance should be dropped eventually when the
301 # This little dance should be dropped eventually when the
302 # API is finally improved.
302 # API is finally improved.
303 stream = util.chunkbuffer(ret.getchunks())
303 stream = util.chunkbuffer(ret.getchunks())
304 ret = bundle2.getunbundler(self.ui, stream)
304 ret = bundle2.getunbundler(self.ui, stream)
305 return ret
305 return ret
306 except Exception as exc:
306 except Exception as exc:
307 # If the exception contains output salvaged from a bundle2
307 # If the exception contains output salvaged from a bundle2
308 # reply, we need to make sure it is printed before continuing
308 # reply, we need to make sure it is printed before continuing
309 # to fail. So we build a bundle2 with such output and consume
309 # to fail. So we build a bundle2 with such output and consume
310 # it directly.
310 # it directly.
311 #
311 #
312 # This is not very elegant but allows a "simple" solution for
312 # This is not very elegant but allows a "simple" solution for
313 # issue4594
313 # issue4594
314 output = getattr(exc, '_bundle2salvagedoutput', ())
314 output = getattr(exc, '_bundle2salvagedoutput', ())
315 if output:
315 if output:
316 bundler = bundle2.bundle20(self._repo.ui)
316 bundler = bundle2.bundle20(self._repo.ui)
317 for out in output:
317 for out in output:
318 bundler.addpart(out)
318 bundler.addpart(out)
319 stream = util.chunkbuffer(bundler.getchunks())
319 stream = util.chunkbuffer(bundler.getchunks())
320 b = bundle2.getunbundler(self.ui, stream)
320 b = bundle2.getunbundler(self.ui, stream)
321 bundle2.processbundle(self._repo, b)
321 bundle2.processbundle(self._repo, b)
322 raise
322 raise
323 except error.PushRaced as exc:
323 except error.PushRaced as exc:
324 raise error.ResponseError(_('push failed:'),
324 raise error.ResponseError(_('push failed:'),
325 stringutil.forcebytestr(exc))
325 stringutil.forcebytestr(exc))
326
326
327 # End of _basewirecommands interface.
327 # End of _basewirecommands interface.
328
328
329 # Begin of peer interface.
329 # Begin of peer interface.
330
330
331 def commandexecutor(self):
331 def commandexecutor(self):
332 return localcommandexecutor(self)
332 return localcommandexecutor(self)
333
333
334 # End of peer interface.
334 # End of peer interface.
335
335
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
336 @interfaceutil.implementer(repository.ipeerlegacycommands)
337 class locallegacypeer(localpeer):
337 class locallegacypeer(localpeer):
338 '''peer extension which implements legacy methods too; used for tests with
338 '''peer extension which implements legacy methods too; used for tests with
339 restricted capabilities'''
339 restricted capabilities'''
340
340
341 def __init__(self, repo):
341 def __init__(self, repo):
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
343
343
344 # Begin of baselegacywirecommands interface.
344 # Begin of baselegacywirecommands interface.
345
345
346 def between(self, pairs):
346 def between(self, pairs):
347 return self._repo.between(pairs)
347 return self._repo.between(pairs)
348
348
349 def branches(self, nodes):
349 def branches(self, nodes):
350 return self._repo.branches(nodes)
350 return self._repo.branches(nodes)
351
351
352 def changegroup(self, nodes, source):
352 def changegroup(self, nodes, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
354 missingheads=self._repo.heads())
354 missingheads=self._repo.heads())
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 def changegroupsubset(self, bases, heads, source):
357 def changegroupsubset(self, bases, heads, source):
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
359 missingheads=heads)
359 missingheads=heads)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
361
361
362 # End of baselegacywirecommands interface.
362 # End of baselegacywirecommands interface.
363
363
364 # Increment the sub-version when the revlog v2 format changes to lock out old
364 # Increment the sub-version when the revlog v2 format changes to lock out old
365 # clients.
365 # clients.
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
367
367
368 # A repository with the sparserevlog feature will have delta chains that
368 # A repository with the sparserevlog feature will have delta chains that
369 # can spread over a larger span. Sparse reading cuts these large spans into
369 # can spread over a larger span. Sparse reading cuts these large spans into
370 # pieces, so that each piece isn't too big.
370 # pieces, so that each piece isn't too big.
371 # Without the sparserevlog capability, reading from the repository could use
371 # Without the sparserevlog capability, reading from the repository could use
372 # huge amounts of memory, because the whole span would be read at once,
372 # huge amounts of memory, because the whole span would be read at once,
373 # including all the intermediate revisions that aren't pertinent for the chain.
373 # including all the intermediate revisions that aren't pertinent for the chain.
374 # This is why once a repository has enabled sparse-read, it becomes required.
374 # This is why once a repository has enabled sparse-read, it becomes required.
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
376
376
377 # Functions receiving (ui, features) that extensions can register to impact
377 # Functions receiving (ui, features) that extensions can register to impact
378 # the ability to load repositories with custom requirements. Only
378 # the ability to load repositories with custom requirements. Only
379 # functions defined in loaded extensions are called.
379 # functions defined in loaded extensions are called.
380 #
380 #
381 # The function receives a set of requirement strings that the repository
381 # The function receives a set of requirement strings that the repository
382 # is capable of opening. Functions will typically add elements to the
382 # is capable of opening. Functions will typically add elements to the
383 # set to reflect that the extension knows how to handle that requirements.
383 # set to reflect that the extension knows how to handle that requirements.
384 featuresetupfuncs = set()
384 featuresetupfuncs = set()
385
385
386 def makelocalrepository(baseui, path, intents=None):
386 def makelocalrepository(baseui, path, intents=None):
387 """Create a local repository object.
387 """Create a local repository object.
388
388
389 Given arguments needed to construct a local repository, this function
389 Given arguments needed to construct a local repository, this function
390 performs various early repository loading functionality (such as
390 performs various early repository loading functionality (such as
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
392 the repository can be opened, derives a type suitable for representing
392 the repository can be opened, derives a type suitable for representing
393 that repository, and returns an instance of it.
393 that repository, and returns an instance of it.
394
394
395 The returned object conforms to the ``repository.completelocalrepository``
395 The returned object conforms to the ``repository.completelocalrepository``
396 interface.
396 interface.
397
397
398 The repository type is derived by calling a series of factory functions
398 The repository type is derived by calling a series of factory functions
399 for each aspect/interface of the final repository. These are defined by
399 for each aspect/interface of the final repository. These are defined by
400 ``REPO_INTERFACES``.
400 ``REPO_INTERFACES``.
401
401
402 Each factory function is called to produce a type implementing a specific
402 Each factory function is called to produce a type implementing a specific
403 interface. The cumulative list of returned types will be combined into a
403 interface. The cumulative list of returned types will be combined into a
404 new type and that type will be instantiated to represent the local
404 new type and that type will be instantiated to represent the local
405 repository.
405 repository.
406
406
407 The factory functions each receive various state that may be consulted
407 The factory functions each receive various state that may be consulted
408 as part of deriving a type.
408 as part of deriving a type.
409
409
410 Extensions should wrap these factory functions to customize repository type
410 Extensions should wrap these factory functions to customize repository type
411 creation. Note that an extension's wrapped function may be called even if
411 creation. Note that an extension's wrapped function may be called even if
412 that extension is not loaded for the repo being constructed. Extensions
412 that extension is not loaded for the repo being constructed. Extensions
413 should check if their ``__name__`` appears in the
413 should check if their ``__name__`` appears in the
414 ``extensionmodulenames`` set passed to the factory function and no-op if
414 ``extensionmodulenames`` set passed to the factory function and no-op if
415 not.
415 not.
416 """
416 """
417 ui = baseui.copy()
417 ui = baseui.copy()
418 # Prevent copying repo configuration.
418 # Prevent copying repo configuration.
419 ui.copy = baseui.copy
419 ui.copy = baseui.copy
420
420
421 # Working directory VFS rooted at repository root.
421 # Working directory VFS rooted at repository root.
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
423
423
424 # Main VFS for .hg/ directory.
424 # Main VFS for .hg/ directory.
425 hgpath = wdirvfs.join(b'.hg')
425 hgpath = wdirvfs.join(b'.hg')
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
427
427
428 # The .hg/ path should exist and should be a directory. All other
428 # The .hg/ path should exist and should be a directory. All other
429 # cases are errors.
429 # cases are errors.
430 if not hgvfs.isdir():
430 if not hgvfs.isdir():
431 try:
431 try:
432 hgvfs.stat()
432 hgvfs.stat()
433 except OSError as e:
433 except OSError as e:
434 if e.errno != errno.ENOENT:
434 if e.errno != errno.ENOENT:
435 raise
435 raise
436
436
437 raise error.RepoError(_(b'repository %s not found') % path)
437 raise error.RepoError(_(b'repository %s not found') % path)
438
438
439 # .hg/requires file contains a newline-delimited list of
439 # .hg/requires file contains a newline-delimited list of
440 # features/capabilities the opener (us) must have in order to use
440 # features/capabilities the opener (us) must have in order to use
441 # the repository. This file was introduced in Mercurial 0.9.2,
441 # the repository. This file was introduced in Mercurial 0.9.2,
442 # which means very old repositories may not have one. We assume
442 # which means very old repositories may not have one. We assume
443 # a missing file translates to no requirements.
443 # a missing file translates to no requirements.
444 try:
444 try:
445 requirements = set(hgvfs.read(b'requires').splitlines())
445 requirements = set(hgvfs.read(b'requires').splitlines())
446 except IOError as e:
446 except IOError as e:
447 if e.errno != errno.ENOENT:
447 if e.errno != errno.ENOENT:
448 raise
448 raise
449 requirements = set()
449 requirements = set()
450
450
451 # The .hg/hgrc file may load extensions or contain config options
451 # The .hg/hgrc file may load extensions or contain config options
452 # that influence repository construction. Attempt to load it and
452 # that influence repository construction. Attempt to load it and
453 # process any new extensions that it may have pulled in.
453 # process any new extensions that it may have pulled in.
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
456 extensions.loadall(ui)
456 extensions.loadall(ui)
457 extensions.populateui(ui)
457 extensions.populateui(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511 wcachepath = hgvfs.join(b'wcache')
511 wcachepath = hgvfs.join(b'wcache')
512
512
513
513
514 # The store has changed over time and the exact layout is dictated by
514 # The store has changed over time and the exact layout is dictated by
515 # requirements. The store interface abstracts differences across all
515 # requirements. The store interface abstracts differences across all
516 # of them.
516 # of them.
517 store = makestore(requirements, storebasepath,
517 store = makestore(requirements, storebasepath,
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
518 lambda base: vfsmod.vfs(base, cacheaudited=True))
519 hgvfs.createmode = store.createmode
519 hgvfs.createmode = store.createmode
520
520
521 storevfs = store.vfs
521 storevfs = store.vfs
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
523
523
524 # The cache vfs is used to manage cache files.
524 # The cache vfs is used to manage cache files.
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
526 cachevfs.createmode = store.createmode
526 cachevfs.createmode = store.createmode
527 # The cache vfs is used to manage cache files related to the working copy
527 # The cache vfs is used to manage cache files related to the working copy
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
529 wcachevfs.createmode = store.createmode
529 wcachevfs.createmode = store.createmode
530
530
531 # Now resolve the type for the repository object. We do this by repeatedly
531 # Now resolve the type for the repository object. We do this by repeatedly
532 # calling a factory function to produces types for specific aspects of the
532 # calling a factory function to produces types for specific aspects of the
533 # repo's operation. The aggregate returned types are used as base classes
533 # repo's operation. The aggregate returned types are used as base classes
534 # for a dynamically-derived type, which will represent our new repository.
534 # for a dynamically-derived type, which will represent our new repository.
535
535
536 bases = []
536 bases = []
537 extrastate = {}
537 extrastate = {}
538
538
539 for iface, fn in REPO_INTERFACES:
539 for iface, fn in REPO_INTERFACES:
540 # We pass all potentially useful state to give extensions tons of
540 # We pass all potentially useful state to give extensions tons of
541 # flexibility.
541 # flexibility.
542 typ = fn()(ui=ui,
542 typ = fn()(ui=ui,
543 intents=intents,
543 intents=intents,
544 requirements=requirements,
544 requirements=requirements,
545 features=features,
545 features=features,
546 wdirvfs=wdirvfs,
546 wdirvfs=wdirvfs,
547 hgvfs=hgvfs,
547 hgvfs=hgvfs,
548 store=store,
548 store=store,
549 storevfs=storevfs,
549 storevfs=storevfs,
550 storeoptions=storevfs.options,
550 storeoptions=storevfs.options,
551 cachevfs=cachevfs,
551 cachevfs=cachevfs,
552 wcachevfs=wcachevfs,
552 wcachevfs=wcachevfs,
553 extensionmodulenames=extensionmodulenames,
553 extensionmodulenames=extensionmodulenames,
554 extrastate=extrastate,
554 extrastate=extrastate,
555 baseclasses=bases)
555 baseclasses=bases)
556
556
557 if not isinstance(typ, type):
557 if not isinstance(typ, type):
558 raise error.ProgrammingError('unable to construct type for %s' %
558 raise error.ProgrammingError('unable to construct type for %s' %
559 iface)
559 iface)
560
560
561 bases.append(typ)
561 bases.append(typ)
562
562
563 # type() allows you to use characters in type names that wouldn't be
563 # type() allows you to use characters in type names that wouldn't be
564 # recognized as Python symbols in source code. We abuse that to add
564 # recognized as Python symbols in source code. We abuse that to add
565 # rich information about our constructed repo.
565 # rich information about our constructed repo.
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
567 wdirvfs.base,
567 wdirvfs.base,
568 b','.join(sorted(requirements))))
568 b','.join(sorted(requirements))))
569
569
570 cls = type(name, tuple(bases), {})
570 cls = type(name, tuple(bases), {})
571
571
572 return cls(
572 return cls(
573 baseui=baseui,
573 baseui=baseui,
574 ui=ui,
574 ui=ui,
575 origroot=path,
575 origroot=path,
576 wdirvfs=wdirvfs,
576 wdirvfs=wdirvfs,
577 hgvfs=hgvfs,
577 hgvfs=hgvfs,
578 requirements=requirements,
578 requirements=requirements,
579 supportedrequirements=supportedrequirements,
579 supportedrequirements=supportedrequirements,
580 sharedpath=storebasepath,
580 sharedpath=storebasepath,
581 store=store,
581 store=store,
582 cachevfs=cachevfs,
582 cachevfs=cachevfs,
583 wcachevfs=wcachevfs,
583 wcachevfs=wcachevfs,
584 features=features,
584 features=features,
585 intents=intents)
585 intents=intents)
586
586
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
588 """Load hgrc files/content into a ui instance.
588 """Load hgrc files/content into a ui instance.
589
589
590 This is called during repository opening to load any additional
590 This is called during repository opening to load any additional
591 config files or settings relevant to the current repository.
591 config files or settings relevant to the current repository.
592
592
593 Returns a bool indicating whether any additional configs were loaded.
593 Returns a bool indicating whether any additional configs were loaded.
594
594
595 Extensions should monkeypatch this function to modify how per-repo
595 Extensions should monkeypatch this function to modify how per-repo
596 configs are loaded. For example, an extension may wish to pull in
596 configs are loaded. For example, an extension may wish to pull in
597 configs from alternate files or sources.
597 configs from alternate files or sources.
598 """
598 """
599 try:
599 try:
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
601 return True
601 return True
602 except IOError:
602 except IOError:
603 return False
603 return False
604
604
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
606 """Perform additional actions after .hg/hgrc is loaded.
606 """Perform additional actions after .hg/hgrc is loaded.
607
607
608 This function is called during repository loading immediately after
608 This function is called during repository loading immediately after
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
610
610
611 The function can be used to validate configs, automatically add
611 The function can be used to validate configs, automatically add
612 options (including extensions) based on requirements, etc.
612 options (including extensions) based on requirements, etc.
613 """
613 """
614
614
615 # Map of requirements to list of extensions to load automatically when
615 # Map of requirements to list of extensions to load automatically when
616 # requirement is present.
616 # requirement is present.
617 autoextensions = {
617 autoextensions = {
618 b'largefiles': [b'largefiles'],
618 b'largefiles': [b'largefiles'],
619 b'lfs': [b'lfs'],
619 b'lfs': [b'lfs'],
620 }
620 }
621
621
622 for requirement, names in sorted(autoextensions.items()):
622 for requirement, names in sorted(autoextensions.items()):
623 if requirement not in requirements:
623 if requirement not in requirements:
624 continue
624 continue
625
625
626 for name in names:
626 for name in names:
627 if not ui.hasconfig(b'extensions', name):
627 if not ui.hasconfig(b'extensions', name):
628 ui.setconfig(b'extensions', name, b'', source='autoload')
628 ui.setconfig(b'extensions', name, b'', source='autoload')
629
629
630 def gathersupportedrequirements(ui):
630 def gathersupportedrequirements(ui):
631 """Determine the complete set of recognized requirements."""
631 """Determine the complete set of recognized requirements."""
632 # Start with all requirements supported by this file.
632 # Start with all requirements supported by this file.
633 supported = set(localrepository._basesupported)
633 supported = set(localrepository._basesupported)
634
634
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
636 # relevant to this ui instance.
636 # relevant to this ui instance.
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
638
638
639 for fn in featuresetupfuncs:
639 for fn in featuresetupfuncs:
640 if fn.__module__ in modules:
640 if fn.__module__ in modules:
641 fn(ui, supported)
641 fn(ui, supported)
642
642
643 # Add derived requirements from registered compression engines.
643 # Add derived requirements from registered compression engines.
644 for name in util.compengines:
644 for name in util.compengines:
645 engine = util.compengines[name]
645 engine = util.compengines[name]
646 if engine.revlogheader():
646 if engine.revlogheader():
647 supported.add(b'exp-compression-%s' % name)
647 supported.add(b'exp-compression-%s' % name)
648
648
649 return supported
649 return supported
650
650
651 def ensurerequirementsrecognized(requirements, supported):
651 def ensurerequirementsrecognized(requirements, supported):
652 """Validate that a set of local requirements is recognized.
652 """Validate that a set of local requirements is recognized.
653
653
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
654 Receives a set of requirements. Raises an ``error.RepoError`` if there
655 exists any requirement in that set that currently loaded code doesn't
655 exists any requirement in that set that currently loaded code doesn't
656 recognize.
656 recognize.
657
657
658 Returns a set of supported requirements.
658 Returns a set of supported requirements.
659 """
659 """
660 missing = set()
660 missing = set()
661
661
662 for requirement in requirements:
662 for requirement in requirements:
663 if requirement in supported:
663 if requirement in supported:
664 continue
664 continue
665
665
666 if not requirement or not requirement[0:1].isalnum():
666 if not requirement or not requirement[0:1].isalnum():
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
668
668
669 missing.add(requirement)
669 missing.add(requirement)
670
670
671 if missing:
671 if missing:
672 raise error.RequirementError(
672 raise error.RequirementError(
673 _(b'repository requires features unknown to this Mercurial: %s') %
673 _(b'repository requires features unknown to this Mercurial: %s') %
674 b' '.join(sorted(missing)),
674 b' '.join(sorted(missing)),
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
676 b'for more information'))
676 b'for more information'))
677
677
678 def ensurerequirementscompatible(ui, requirements):
678 def ensurerequirementscompatible(ui, requirements):
679 """Validates that a set of recognized requirements is mutually compatible.
679 """Validates that a set of recognized requirements is mutually compatible.
680
680
681 Some requirements may not be compatible with others or require
681 Some requirements may not be compatible with others or require
682 config options that aren't enabled. This function is called during
682 config options that aren't enabled. This function is called during
683 repository opening to ensure that the set of requirements needed
683 repository opening to ensure that the set of requirements needed
684 to open a repository is sane and compatible with config options.
684 to open a repository is sane and compatible with config options.
685
685
686 Extensions can monkeypatch this function to perform additional
686 Extensions can monkeypatch this function to perform additional
687 checking.
687 checking.
688
688
689 ``error.RepoError`` should be raised on failure.
689 ``error.RepoError`` should be raised on failure.
690 """
690 """
691 if b'exp-sparse' in requirements and not sparse.enabled:
691 if b'exp-sparse' in requirements and not sparse.enabled:
692 raise error.RepoError(_(b'repository is using sparse feature but '
692 raise error.RepoError(_(b'repository is using sparse feature but '
693 b'sparse is not enabled; enable the '
693 b'sparse is not enabled; enable the '
694 b'"sparse" extensions to access'))
694 b'"sparse" extensions to access'))
695
695
696 def makestore(requirements, path, vfstype):
696 def makestore(requirements, path, vfstype):
697 """Construct a storage object for a repository."""
697 """Construct a storage object for a repository."""
698 if b'store' in requirements:
698 if b'store' in requirements:
699 if b'fncache' in requirements:
699 if b'fncache' in requirements:
700 return storemod.fncachestore(path, vfstype,
700 return storemod.fncachestore(path, vfstype,
701 b'dotencode' in requirements)
701 b'dotencode' in requirements)
702
702
703 return storemod.encodedstore(path, vfstype)
703 return storemod.encodedstore(path, vfstype)
704
704
705 return storemod.basicstore(path, vfstype)
705 return storemod.basicstore(path, vfstype)
706
706
707 def resolvestorevfsoptions(ui, requirements, features):
707 def resolvestorevfsoptions(ui, requirements, features):
708 """Resolve the options to pass to the store vfs opener.
708 """Resolve the options to pass to the store vfs opener.
709
709
710 The returned dict is used to influence behavior of the storage layer.
710 The returned dict is used to influence behavior of the storage layer.
711 """
711 """
712 options = {}
712 options = {}
713
713
714 if b'treemanifest' in requirements:
714 if b'treemanifest' in requirements:
715 options[b'treemanifest'] = True
715 options[b'treemanifest'] = True
716
716
717 # experimental config: format.manifestcachesize
717 # experimental config: format.manifestcachesize
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
719 if manifestcachesize is not None:
719 if manifestcachesize is not None:
720 options[b'manifestcachesize'] = manifestcachesize
720 options[b'manifestcachesize'] = manifestcachesize
721
721
722 # In the absence of another requirement superseding a revlog-related
722 # In the absence of another requirement superseding a revlog-related
723 # requirement, we have to assume the repo is using revlog version 0.
723 # requirement, we have to assume the repo is using revlog version 0.
724 # This revlog format is super old and we don't bother trying to parse
724 # This revlog format is super old and we don't bother trying to parse
725 # opener options for it because those options wouldn't do anything
725 # opener options for it because those options wouldn't do anything
726 # meaningful on such old repos.
726 # meaningful on such old repos.
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
729
729
730 return options
730 return options
731
731
732 def resolverevlogstorevfsoptions(ui, requirements, features):
732 def resolverevlogstorevfsoptions(ui, requirements, features):
733 """Resolve opener options specific to revlogs."""
733 """Resolve opener options specific to revlogs."""
734
734
735 options = {}
735 options = {}
736 options[b'flagprocessors'] = {}
736 options[b'flagprocessors'] = {}
737
737
738 if b'revlogv1' in requirements:
738 if b'revlogv1' in requirements:
739 options[b'revlogv1'] = True
739 options[b'revlogv1'] = True
740 if REVLOGV2_REQUIREMENT in requirements:
740 if REVLOGV2_REQUIREMENT in requirements:
741 options[b'revlogv2'] = True
741 options[b'revlogv2'] = True
742
742
743 if b'generaldelta' in requirements:
743 if b'generaldelta' in requirements:
744 options[b'generaldelta'] = True
744 options[b'generaldelta'] = True
745
745
746 # experimental config: format.chunkcachesize
746 # experimental config: format.chunkcachesize
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
748 if chunkcachesize is not None:
748 if chunkcachesize is not None:
749 options[b'chunkcachesize'] = chunkcachesize
749 options[b'chunkcachesize'] = chunkcachesize
750
750
751 deltabothparents = ui.configbool(b'storage',
751 deltabothparents = ui.configbool(b'storage',
752 b'revlog.optimize-delta-parent-choice')
752 b'revlog.optimize-delta-parent-choice')
753 options[b'deltabothparents'] = deltabothparents
753 options[b'deltabothparents'] = deltabothparents
754
754
755 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
755 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
756 lazydeltabase = False
756 lazydeltabase = False
757 if lazydelta:
757 if lazydelta:
758 lazydeltabase = ui.configbool(b'storage',
758 lazydeltabase = ui.configbool(b'storage',
759 b'revlog.reuse-external-delta-parent')
759 b'revlog.reuse-external-delta-parent')
760 if lazydeltabase is None:
760 if lazydeltabase is None:
761 lazydeltabase = not scmutil.gddeltaconfig(ui)
761 lazydeltabase = not scmutil.gddeltaconfig(ui)
762 options[b'lazydelta'] = lazydelta
762 options[b'lazydelta'] = lazydelta
763 options[b'lazydeltabase'] = lazydeltabase
763 options[b'lazydeltabase'] = lazydeltabase
764
764
765 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
765 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
766 if 0 <= chainspan:
766 if 0 <= chainspan:
767 options[b'maxdeltachainspan'] = chainspan
767 options[b'maxdeltachainspan'] = chainspan
768
768
769 mmapindexthreshold = ui.configbytes(b'experimental',
769 mmapindexthreshold = ui.configbytes(b'experimental',
770 b'mmapindexthreshold')
770 b'mmapindexthreshold')
771 if mmapindexthreshold is not None:
771 if mmapindexthreshold is not None:
772 options[b'mmapindexthreshold'] = mmapindexthreshold
772 options[b'mmapindexthreshold'] = mmapindexthreshold
773
773
774 withsparseread = ui.configbool(b'experimental', b'sparse-read')
774 withsparseread = ui.configbool(b'experimental', b'sparse-read')
775 srdensitythres = float(ui.config(b'experimental',
775 srdensitythres = float(ui.config(b'experimental',
776 b'sparse-read.density-threshold'))
776 b'sparse-read.density-threshold'))
777 srmingapsize = ui.configbytes(b'experimental',
777 srmingapsize = ui.configbytes(b'experimental',
778 b'sparse-read.min-gap-size')
778 b'sparse-read.min-gap-size')
779 options[b'with-sparse-read'] = withsparseread
779 options[b'with-sparse-read'] = withsparseread
780 options[b'sparse-read-density-threshold'] = srdensitythres
780 options[b'sparse-read-density-threshold'] = srdensitythres
781 options[b'sparse-read-min-gap-size'] = srmingapsize
781 options[b'sparse-read-min-gap-size'] = srmingapsize
782
782
783 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
783 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
784 options[b'sparse-revlog'] = sparserevlog
784 options[b'sparse-revlog'] = sparserevlog
785 if sparserevlog:
785 if sparserevlog:
786 options[b'generaldelta'] = True
786 options[b'generaldelta'] = True
787
787
788 maxchainlen = None
788 maxchainlen = None
789 if sparserevlog:
789 if sparserevlog:
790 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
790 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
791 # experimental config: format.maxchainlen
791 # experimental config: format.maxchainlen
792 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
792 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
793 if maxchainlen is not None:
793 if maxchainlen is not None:
794 options[b'maxchainlen'] = maxchainlen
794 options[b'maxchainlen'] = maxchainlen
795
795
796 for r in requirements:
796 for r in requirements:
797 if r.startswith(b'exp-compression-'):
797 if r.startswith(b'exp-compression-'):
798 options[b'compengine'] = r[len(b'exp-compression-'):]
798 options[b'compengine'] = r[len(b'exp-compression-'):]
799
799
800 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
800 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
801 if options[b'zlib.level'] is not None:
801 if options[b'zlib.level'] is not None:
802 if not (0 <= options[b'zlib.level'] <= 9):
802 if not (0 <= options[b'zlib.level'] <= 9):
803 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
803 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
804 raise error.Abort(msg % options[b'zlib.level'])
804 raise error.Abort(msg % options[b'zlib.level'])
805 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
805 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
806 if options[b'zstd.level'] is not None:
806 if options[b'zstd.level'] is not None:
807 if not (0 <= options[b'zstd.level'] <= 22):
807 if not (0 <= options[b'zstd.level'] <= 22):
808 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
808 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
809 raise error.Abort(msg % options[b'zstd.level'])
809 raise error.Abort(msg % options[b'zstd.level'])
810
810
811 if repository.NARROW_REQUIREMENT in requirements:
811 if repository.NARROW_REQUIREMENT in requirements:
812 options[b'enableellipsis'] = True
812 options[b'enableellipsis'] = True
813
813
814 return options
814 return options
815
815
816 def makemain(**kwargs):
816 def makemain(**kwargs):
817 """Produce a type conforming to ``ilocalrepositorymain``."""
817 """Produce a type conforming to ``ilocalrepositorymain``."""
818 return localrepository
818 return localrepository
819
819
820 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
820 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
821 class revlogfilestorage(object):
821 class revlogfilestorage(object):
822 """File storage when using revlogs."""
822 """File storage when using revlogs."""
823
823
824 def file(self, path):
824 def file(self, path):
825 if path[0] == b'/':
825 if path[0] == b'/':
826 path = path[1:]
826 path = path[1:]
827
827
828 return filelog.filelog(self.svfs, path)
828 return filelog.filelog(self.svfs, path)
829
829
830 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
830 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
831 class revlognarrowfilestorage(object):
831 class revlognarrowfilestorage(object):
832 """File storage when using revlogs and narrow files."""
832 """File storage when using revlogs and narrow files."""
833
833
834 def file(self, path):
834 def file(self, path):
835 if path[0] == b'/':
835 if path[0] == b'/':
836 path = path[1:]
836 path = path[1:]
837
837
838 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
838 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
839
839
840 def makefilestorage(requirements, features, **kwargs):
840 def makefilestorage(requirements, features, **kwargs):
841 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
841 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
842 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
842 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
843 features.add(repository.REPO_FEATURE_STREAM_CLONE)
843 features.add(repository.REPO_FEATURE_STREAM_CLONE)
844
844
845 if repository.NARROW_REQUIREMENT in requirements:
845 if repository.NARROW_REQUIREMENT in requirements:
846 return revlognarrowfilestorage
846 return revlognarrowfilestorage
847 else:
847 else:
848 return revlogfilestorage
848 return revlogfilestorage
849
849
850 # List of repository interfaces and factory functions for them. Each
850 # List of repository interfaces and factory functions for them. Each
851 # will be called in order during ``makelocalrepository()`` to iteratively
851 # will be called in order during ``makelocalrepository()`` to iteratively
852 # derive the final type for a local repository instance. We capture the
852 # derive the final type for a local repository instance. We capture the
853 # function as a lambda so we don't hold a reference and the module-level
853 # function as a lambda so we don't hold a reference and the module-level
854 # functions can be wrapped.
854 # functions can be wrapped.
855 REPO_INTERFACES = [
855 REPO_INTERFACES = [
856 (repository.ilocalrepositorymain, lambda: makemain),
856 (repository.ilocalrepositorymain, lambda: makemain),
857 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
857 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
858 ]
858 ]
859
859
860 @interfaceutil.implementer(repository.ilocalrepositorymain)
860 @interfaceutil.implementer(repository.ilocalrepositorymain)
861 class localrepository(object):
861 class localrepository(object):
862 """Main class for representing local repositories.
862 """Main class for representing local repositories.
863
863
864 All local repositories are instances of this class.
864 All local repositories are instances of this class.
865
865
866 Constructed on its own, instances of this class are not usable as
866 Constructed on its own, instances of this class are not usable as
867 repository objects. To obtain a usable repository object, call
867 repository objects. To obtain a usable repository object, call
868 ``hg.repository()``, ``localrepo.instance()``, or
868 ``hg.repository()``, ``localrepo.instance()``, or
869 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
869 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
870 ``instance()`` adds support for creating new repositories.
870 ``instance()`` adds support for creating new repositories.
871 ``hg.repository()`` adds more extension integration, including calling
871 ``hg.repository()`` adds more extension integration, including calling
872 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
872 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
873 used.
873 used.
874 """
874 """
875
875
876 # obsolete experimental requirements:
876 # obsolete experimental requirements:
877 # - manifestv2: An experimental new manifest format that allowed
877 # - manifestv2: An experimental new manifest format that allowed
878 # for stem compression of long paths. Experiment ended up not
878 # for stem compression of long paths. Experiment ended up not
879 # being successful (repository sizes went up due to worse delta
879 # being successful (repository sizes went up due to worse delta
880 # chains), and the code was deleted in 4.6.
880 # chains), and the code was deleted in 4.6.
881 supportedformats = {
881 supportedformats = {
882 'revlogv1',
882 'revlogv1',
883 'generaldelta',
883 'generaldelta',
884 'treemanifest',
884 'treemanifest',
885 REVLOGV2_REQUIREMENT,
885 REVLOGV2_REQUIREMENT,
886 SPARSEREVLOG_REQUIREMENT,
886 SPARSEREVLOG_REQUIREMENT,
887 }
887 }
888 _basesupported = supportedformats | {
888 _basesupported = supportedformats | {
889 'store',
889 'store',
890 'fncache',
890 'fncache',
891 'shared',
891 'shared',
892 'relshared',
892 'relshared',
893 'dotencode',
893 'dotencode',
894 'exp-sparse',
894 'exp-sparse',
895 'internal-phase'
895 'internal-phase'
896 }
896 }
897
897
898 # list of prefix for file which can be written without 'wlock'
898 # list of prefix for file which can be written without 'wlock'
899 # Extensions should extend this list when needed
899 # Extensions should extend this list when needed
900 _wlockfreeprefix = {
900 _wlockfreeprefix = {
901 # We migh consider requiring 'wlock' for the next
901 # We migh consider requiring 'wlock' for the next
902 # two, but pretty much all the existing code assume
902 # two, but pretty much all the existing code assume
903 # wlock is not needed so we keep them excluded for
903 # wlock is not needed so we keep them excluded for
904 # now.
904 # now.
905 'hgrc',
905 'hgrc',
906 'requires',
906 'requires',
907 # XXX cache is a complicatged business someone
907 # XXX cache is a complicatged business someone
908 # should investigate this in depth at some point
908 # should investigate this in depth at some point
909 'cache/',
909 'cache/',
910 # XXX shouldn't be dirstate covered by the wlock?
910 # XXX shouldn't be dirstate covered by the wlock?
911 'dirstate',
911 'dirstate',
912 # XXX bisect was still a bit too messy at the time
912 # XXX bisect was still a bit too messy at the time
913 # this changeset was introduced. Someone should fix
913 # this changeset was introduced. Someone should fix
914 # the remainig bit and drop this line
914 # the remainig bit and drop this line
915 'bisect.state',
915 'bisect.state',
916 }
916 }
917
917
918 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
918 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
919 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
919 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
920 features, intents=None):
920 features, intents=None):
921 """Create a new local repository instance.
921 """Create a new local repository instance.
922
922
923 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
923 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
924 or ``localrepo.makelocalrepository()`` for obtaining a new repository
924 or ``localrepo.makelocalrepository()`` for obtaining a new repository
925 object.
925 object.
926
926
927 Arguments:
927 Arguments:
928
928
929 baseui
929 baseui
930 ``ui.ui`` instance that ``ui`` argument was based off of.
930 ``ui.ui`` instance that ``ui`` argument was based off of.
931
931
932 ui
932 ui
933 ``ui.ui`` instance for use by the repository.
933 ``ui.ui`` instance for use by the repository.
934
934
935 origroot
935 origroot
936 ``bytes`` path to working directory root of this repository.
936 ``bytes`` path to working directory root of this repository.
937
937
938 wdirvfs
938 wdirvfs
939 ``vfs.vfs`` rooted at the working directory.
939 ``vfs.vfs`` rooted at the working directory.
940
940
941 hgvfs
941 hgvfs
942 ``vfs.vfs`` rooted at .hg/
942 ``vfs.vfs`` rooted at .hg/
943
943
944 requirements
944 requirements
945 ``set`` of bytestrings representing repository opening requirements.
945 ``set`` of bytestrings representing repository opening requirements.
946
946
947 supportedrequirements
947 supportedrequirements
948 ``set`` of bytestrings representing repository requirements that we
948 ``set`` of bytestrings representing repository requirements that we
949 know how to open. May be a supetset of ``requirements``.
949 know how to open. May be a supetset of ``requirements``.
950
950
951 sharedpath
951 sharedpath
952 ``bytes`` Defining path to storage base directory. Points to a
952 ``bytes`` Defining path to storage base directory. Points to a
953 ``.hg/`` directory somewhere.
953 ``.hg/`` directory somewhere.
954
954
955 store
955 store
956 ``store.basicstore`` (or derived) instance providing access to
956 ``store.basicstore`` (or derived) instance providing access to
957 versioned storage.
957 versioned storage.
958
958
959 cachevfs
959 cachevfs
960 ``vfs.vfs`` used for cache files.
960 ``vfs.vfs`` used for cache files.
961
961
962 wcachevfs
962 wcachevfs
963 ``vfs.vfs`` used for cache files related to the working copy.
963 ``vfs.vfs`` used for cache files related to the working copy.
964
964
965 features
965 features
966 ``set`` of bytestrings defining features/capabilities of this
966 ``set`` of bytestrings defining features/capabilities of this
967 instance.
967 instance.
968
968
969 intents
969 intents
970 ``set`` of system strings indicating what this repo will be used
970 ``set`` of system strings indicating what this repo will be used
971 for.
971 for.
972 """
972 """
973 self.baseui = baseui
973 self.baseui = baseui
974 self.ui = ui
974 self.ui = ui
975 self.origroot = origroot
975 self.origroot = origroot
976 # vfs rooted at working directory.
976 # vfs rooted at working directory.
977 self.wvfs = wdirvfs
977 self.wvfs = wdirvfs
978 self.root = wdirvfs.base
978 self.root = wdirvfs.base
979 # vfs rooted at .hg/. Used to access most non-store paths.
979 # vfs rooted at .hg/. Used to access most non-store paths.
980 self.vfs = hgvfs
980 self.vfs = hgvfs
981 self.path = hgvfs.base
981 self.path = hgvfs.base
982 self.requirements = requirements
982 self.requirements = requirements
983 self.supported = supportedrequirements
983 self.supported = supportedrequirements
984 self.sharedpath = sharedpath
984 self.sharedpath = sharedpath
985 self.store = store
985 self.store = store
986 self.cachevfs = cachevfs
986 self.cachevfs = cachevfs
987 self.wcachevfs = wcachevfs
987 self.wcachevfs = wcachevfs
988 self.features = features
988 self.features = features
989
989
990 self.filtername = None
990 self.filtername = None
991
991
992 if (self.ui.configbool('devel', 'all-warnings') or
992 if (self.ui.configbool('devel', 'all-warnings') or
993 self.ui.configbool('devel', 'check-locks')):
993 self.ui.configbool('devel', 'check-locks')):
994 self.vfs.audit = self._getvfsward(self.vfs.audit)
994 self.vfs.audit = self._getvfsward(self.vfs.audit)
995 # A list of callback to shape the phase if no data were found.
995 # A list of callback to shape the phase if no data were found.
996 # Callback are in the form: func(repo, roots) --> processed root.
996 # Callback are in the form: func(repo, roots) --> processed root.
997 # This list it to be filled by extension during repo setup
997 # This list it to be filled by extension during repo setup
998 self._phasedefaults = []
998 self._phasedefaults = []
999
999
1000 color.setup(self.ui)
1000 color.setup(self.ui)
1001
1001
1002 self.spath = self.store.path
1002 self.spath = self.store.path
1003 self.svfs = self.store.vfs
1003 self.svfs = self.store.vfs
1004 self.sjoin = self.store.join
1004 self.sjoin = self.store.join
1005 if (self.ui.configbool('devel', 'all-warnings') or
1005 if (self.ui.configbool('devel', 'all-warnings') or
1006 self.ui.configbool('devel', 'check-locks')):
1006 self.ui.configbool('devel', 'check-locks')):
1007 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1007 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1008 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1008 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1009 else: # standard vfs
1009 else: # standard vfs
1010 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1010 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1011
1011
1012 self._dirstatevalidatewarned = False
1012 self._dirstatevalidatewarned = False
1013
1013
1014 self._branchcaches = branchmap.BranchMapCache()
1014 self._branchcaches = branchmap.BranchMapCache()
1015 self._revbranchcache = None
1015 self._revbranchcache = None
1016 self._filterpats = {}
1016 self._filterpats = {}
1017 self._datafilters = {}
1017 self._datafilters = {}
1018 self._transref = self._lockref = self._wlockref = None
1018 self._transref = self._lockref = self._wlockref = None
1019
1019
1020 # A cache for various files under .hg/ that tracks file changes,
1020 # A cache for various files under .hg/ that tracks file changes,
1021 # (used by the filecache decorator)
1021 # (used by the filecache decorator)
1022 #
1022 #
1023 # Maps a property name to its util.filecacheentry
1023 # Maps a property name to its util.filecacheentry
1024 self._filecache = {}
1024 self._filecache = {}
1025
1025
1026 # hold sets of revision to be filtered
1026 # hold sets of revision to be filtered
1027 # should be cleared when something might have changed the filter value:
1027 # should be cleared when something might have changed the filter value:
1028 # - new changesets,
1028 # - new changesets,
1029 # - phase change,
1029 # - phase change,
1030 # - new obsolescence marker,
1030 # - new obsolescence marker,
1031 # - working directory parent change,
1031 # - working directory parent change,
1032 # - bookmark changes
1032 # - bookmark changes
1033 self.filteredrevcache = {}
1033 self.filteredrevcache = {}
1034
1034
1035 # post-dirstate-status hooks
1035 # post-dirstate-status hooks
1036 self._postdsstatus = []
1036 self._postdsstatus = []
1037
1037
1038 # generic mapping between names and nodes
1038 # generic mapping between names and nodes
1039 self.names = namespaces.namespaces()
1039 self.names = namespaces.namespaces()
1040
1040
1041 # Key to signature value.
1041 # Key to signature value.
1042 self._sparsesignaturecache = {}
1042 self._sparsesignaturecache = {}
1043 # Signature to cached matcher instance.
1043 # Signature to cached matcher instance.
1044 self._sparsematchercache = {}
1044 self._sparsematchercache = {}
1045
1045
1046 def _getvfsward(self, origfunc):
1046 def _getvfsward(self, origfunc):
1047 """build a ward for self.vfs"""
1047 """build a ward for self.vfs"""
1048 rref = weakref.ref(self)
1048 rref = weakref.ref(self)
1049 def checkvfs(path, mode=None):
1049 def checkvfs(path, mode=None):
1050 ret = origfunc(path, mode=mode)
1050 ret = origfunc(path, mode=mode)
1051 repo = rref()
1051 repo = rref()
1052 if (repo is None
1052 if (repo is None
1053 or not util.safehasattr(repo, '_wlockref')
1053 or not util.safehasattr(repo, '_wlockref')
1054 or not util.safehasattr(repo, '_lockref')):
1054 or not util.safehasattr(repo, '_lockref')):
1055 return
1055 return
1056 if mode in (None, 'r', 'rb'):
1056 if mode in (None, 'r', 'rb'):
1057 return
1057 return
1058 if path.startswith(repo.path):
1058 if path.startswith(repo.path):
1059 # truncate name relative to the repository (.hg)
1059 # truncate name relative to the repository (.hg)
1060 path = path[len(repo.path) + 1:]
1060 path = path[len(repo.path) + 1:]
1061 if path.startswith('cache/'):
1061 if path.startswith('cache/'):
1062 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1062 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1063 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1063 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1064 if path.startswith('journal.') or path.startswith('undo.'):
1064 if path.startswith('journal.') or path.startswith('undo.'):
1065 # journal is covered by 'lock'
1065 # journal is covered by 'lock'
1066 if repo._currentlock(repo._lockref) is None:
1066 if repo._currentlock(repo._lockref) is None:
1067 repo.ui.develwarn('write with no lock: "%s"' % path,
1067 repo.ui.develwarn('write with no lock: "%s"' % path,
1068 stacklevel=3, config='check-locks')
1068 stacklevel=3, config='check-locks')
1069 elif repo._currentlock(repo._wlockref) is None:
1069 elif repo._currentlock(repo._wlockref) is None:
1070 # rest of vfs files are covered by 'wlock'
1070 # rest of vfs files are covered by 'wlock'
1071 #
1071 #
1072 # exclude special files
1072 # exclude special files
1073 for prefix in self._wlockfreeprefix:
1073 for prefix in self._wlockfreeprefix:
1074 if path.startswith(prefix):
1074 if path.startswith(prefix):
1075 return
1075 return
1076 repo.ui.develwarn('write with no wlock: "%s"' % path,
1076 repo.ui.develwarn('write with no wlock: "%s"' % path,
1077 stacklevel=3, config='check-locks')
1077 stacklevel=3, config='check-locks')
1078 return ret
1078 return ret
1079 return checkvfs
1079 return checkvfs
1080
1080
1081 def _getsvfsward(self, origfunc):
1081 def _getsvfsward(self, origfunc):
1082 """build a ward for self.svfs"""
1082 """build a ward for self.svfs"""
1083 rref = weakref.ref(self)
1083 rref = weakref.ref(self)
1084 def checksvfs(path, mode=None):
1084 def checksvfs(path, mode=None):
1085 ret = origfunc(path, mode=mode)
1085 ret = origfunc(path, mode=mode)
1086 repo = rref()
1086 repo = rref()
1087 if repo is None or not util.safehasattr(repo, '_lockref'):
1087 if repo is None or not util.safehasattr(repo, '_lockref'):
1088 return
1088 return
1089 if mode in (None, 'r', 'rb'):
1089 if mode in (None, 'r', 'rb'):
1090 return
1090 return
1091 if path.startswith(repo.sharedpath):
1091 if path.startswith(repo.sharedpath):
1092 # truncate name relative to the repository (.hg)
1092 # truncate name relative to the repository (.hg)
1093 path = path[len(repo.sharedpath) + 1:]
1093 path = path[len(repo.sharedpath) + 1:]
1094 if repo._currentlock(repo._lockref) is None:
1094 if repo._currentlock(repo._lockref) is None:
1095 repo.ui.develwarn('write with no lock: "%s"' % path,
1095 repo.ui.develwarn('write with no lock: "%s"' % path,
1096 stacklevel=4)
1096 stacklevel=4)
1097 return ret
1097 return ret
1098 return checksvfs
1098 return checksvfs
1099
1099
1100 def close(self):
1100 def close(self):
1101 self._writecaches()
1101 self._writecaches()
1102
1102
1103 def _writecaches(self):
1103 def _writecaches(self):
1104 if self._revbranchcache:
1104 if self._revbranchcache:
1105 self._revbranchcache.write()
1105 self._revbranchcache.write()
1106
1106
1107 def _restrictcapabilities(self, caps):
1107 def _restrictcapabilities(self, caps):
1108 if self.ui.configbool('experimental', 'bundle2-advertise'):
1108 if self.ui.configbool('experimental', 'bundle2-advertise'):
1109 caps = set(caps)
1109 caps = set(caps)
1110 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1110 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1111 role='client'))
1111 role='client'))
1112 caps.add('bundle2=' + urlreq.quote(capsblob))
1112 caps.add('bundle2=' + urlreq.quote(capsblob))
1113 return caps
1113 return caps
1114
1114
1115 def _writerequirements(self):
1115 def _writerequirements(self):
1116 scmutil.writerequires(self.vfs, self.requirements)
1116 scmutil.writerequires(self.vfs, self.requirements)
1117
1117
1118 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1118 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1119 # self -> auditor -> self._checknested -> self
1119 # self -> auditor -> self._checknested -> self
1120
1120
1121 @property
1121 @property
1122 def auditor(self):
1122 def auditor(self):
1123 # This is only used by context.workingctx.match in order to
1123 # This is only used by context.workingctx.match in order to
1124 # detect files in subrepos.
1124 # detect files in subrepos.
1125 return pathutil.pathauditor(self.root, callback=self._checknested)
1125 return pathutil.pathauditor(self.root, callback=self._checknested)
1126
1126
1127 @property
1127 @property
1128 def nofsauditor(self):
1128 def nofsauditor(self):
1129 # This is only used by context.basectx.match in order to detect
1129 # This is only used by context.basectx.match in order to detect
1130 # files in subrepos.
1130 # files in subrepos.
1131 return pathutil.pathauditor(self.root, callback=self._checknested,
1131 return pathutil.pathauditor(self.root, callback=self._checknested,
1132 realfs=False, cached=True)
1132 realfs=False, cached=True)
1133
1133
1134 def _checknested(self, path):
1134 def _checknested(self, path):
1135 """Determine if path is a legal nested repository."""
1135 """Determine if path is a legal nested repository."""
1136 if not path.startswith(self.root):
1136 if not path.startswith(self.root):
1137 return False
1137 return False
1138 subpath = path[len(self.root) + 1:]
1138 subpath = path[len(self.root) + 1:]
1139 normsubpath = util.pconvert(subpath)
1139 normsubpath = util.pconvert(subpath)
1140
1140
1141 # XXX: Checking against the current working copy is wrong in
1141 # XXX: Checking against the current working copy is wrong in
1142 # the sense that it can reject things like
1142 # the sense that it can reject things like
1143 #
1143 #
1144 # $ hg cat -r 10 sub/x.txt
1144 # $ hg cat -r 10 sub/x.txt
1145 #
1145 #
1146 # if sub/ is no longer a subrepository in the working copy
1146 # if sub/ is no longer a subrepository in the working copy
1147 # parent revision.
1147 # parent revision.
1148 #
1148 #
1149 # However, it can of course also allow things that would have
1149 # However, it can of course also allow things that would have
1150 # been rejected before, such as the above cat command if sub/
1150 # been rejected before, such as the above cat command if sub/
1151 # is a subrepository now, but was a normal directory before.
1151 # is a subrepository now, but was a normal directory before.
1152 # The old path auditor would have rejected by mistake since it
1152 # The old path auditor would have rejected by mistake since it
1153 # panics when it sees sub/.hg/.
1153 # panics when it sees sub/.hg/.
1154 #
1154 #
1155 # All in all, checking against the working copy seems sensible
1155 # All in all, checking against the working copy seems sensible
1156 # since we want to prevent access to nested repositories on
1156 # since we want to prevent access to nested repositories on
1157 # the filesystem *now*.
1157 # the filesystem *now*.
1158 ctx = self[None]
1158 ctx = self[None]
1159 parts = util.splitpath(subpath)
1159 parts = util.splitpath(subpath)
1160 while parts:
1160 while parts:
1161 prefix = '/'.join(parts)
1161 prefix = '/'.join(parts)
1162 if prefix in ctx.substate:
1162 if prefix in ctx.substate:
1163 if prefix == normsubpath:
1163 if prefix == normsubpath:
1164 return True
1164 return True
1165 else:
1165 else:
1166 sub = ctx.sub(prefix)
1166 sub = ctx.sub(prefix)
1167 return sub.checknested(subpath[len(prefix) + 1:])
1167 return sub.checknested(subpath[len(prefix) + 1:])
1168 else:
1168 else:
1169 parts.pop()
1169 parts.pop()
1170 return False
1170 return False
1171
1171
1172 def peer(self):
1172 def peer(self):
1173 return localpeer(self) # not cached to avoid reference cycle
1173 return localpeer(self) # not cached to avoid reference cycle
1174
1174
1175 def unfiltered(self):
1175 def unfiltered(self):
1176 """Return unfiltered version of the repository
1176 """Return unfiltered version of the repository
1177
1177
1178 Intended to be overwritten by filtered repo."""
1178 Intended to be overwritten by filtered repo."""
1179 return self
1179 return self
1180
1180
1181 def filtered(self, name, visibilityexceptions=None):
1181 def filtered(self, name, visibilityexceptions=None):
1182 """Return a filtered version of a repository"""
1182 """Return a filtered version of a repository
1183
1184 The `name` parameter is the identifier of the requested view. This
1185 will return a repoview object set "exactly" to the specified view.
1186
1187 This function does not apply recursive filtering to a repository. For
1188 example calling `repo.filtered("served")` will return a repoview using
1189 the "served" view, regardless of the initial view used by `repo`.
1190
1191 In other word, there is always only one level of `repoview` "filtering".
1192 """
1183 cls = repoview.newtype(self.unfiltered().__class__)
1193 cls = repoview.newtype(self.unfiltered().__class__)
1184 return cls(self, name, visibilityexceptions)
1194 return cls(self, name, visibilityexceptions)
1185
1195
1186 @repofilecache('bookmarks', 'bookmarks.current')
1196 @repofilecache('bookmarks', 'bookmarks.current')
1187 def _bookmarks(self):
1197 def _bookmarks(self):
1188 return bookmarks.bmstore(self)
1198 return bookmarks.bmstore(self)
1189
1199
1190 @property
1200 @property
1191 def _activebookmark(self):
1201 def _activebookmark(self):
1192 return self._bookmarks.active
1202 return self._bookmarks.active
1193
1203
1194 # _phasesets depend on changelog. what we need is to call
1204 # _phasesets depend on changelog. what we need is to call
1195 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1205 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1196 # can't be easily expressed in filecache mechanism.
1206 # can't be easily expressed in filecache mechanism.
1197 @storecache('phaseroots', '00changelog.i')
1207 @storecache('phaseroots', '00changelog.i')
1198 def _phasecache(self):
1208 def _phasecache(self):
1199 return phases.phasecache(self, self._phasedefaults)
1209 return phases.phasecache(self, self._phasedefaults)
1200
1210
1201 @storecache('obsstore')
1211 @storecache('obsstore')
1202 def obsstore(self):
1212 def obsstore(self):
1203 return obsolete.makestore(self.ui, self)
1213 return obsolete.makestore(self.ui, self)
1204
1214
1205 @storecache('00changelog.i')
1215 @storecache('00changelog.i')
1206 def changelog(self):
1216 def changelog(self):
1207 return changelog.changelog(self.svfs,
1217 return changelog.changelog(self.svfs,
1208 trypending=txnutil.mayhavepending(self.root))
1218 trypending=txnutil.mayhavepending(self.root))
1209
1219
1210 @storecache('00manifest.i')
1220 @storecache('00manifest.i')
1211 def manifestlog(self):
1221 def manifestlog(self):
1212 rootstore = manifest.manifestrevlog(self.svfs)
1222 rootstore = manifest.manifestrevlog(self.svfs)
1213 return manifest.manifestlog(self.svfs, self, rootstore,
1223 return manifest.manifestlog(self.svfs, self, rootstore,
1214 self._storenarrowmatch)
1224 self._storenarrowmatch)
1215
1225
1216 @repofilecache('dirstate')
1226 @repofilecache('dirstate')
1217 def dirstate(self):
1227 def dirstate(self):
1218 return self._makedirstate()
1228 return self._makedirstate()
1219
1229
1220 def _makedirstate(self):
1230 def _makedirstate(self):
1221 """Extension point for wrapping the dirstate per-repo."""
1231 """Extension point for wrapping the dirstate per-repo."""
1222 sparsematchfn = lambda: sparse.matcher(self)
1232 sparsematchfn = lambda: sparse.matcher(self)
1223
1233
1224 return dirstate.dirstate(self.vfs, self.ui, self.root,
1234 return dirstate.dirstate(self.vfs, self.ui, self.root,
1225 self._dirstatevalidate, sparsematchfn)
1235 self._dirstatevalidate, sparsematchfn)
1226
1236
1227 def _dirstatevalidate(self, node):
1237 def _dirstatevalidate(self, node):
1228 try:
1238 try:
1229 self.changelog.rev(node)
1239 self.changelog.rev(node)
1230 return node
1240 return node
1231 except error.LookupError:
1241 except error.LookupError:
1232 if not self._dirstatevalidatewarned:
1242 if not self._dirstatevalidatewarned:
1233 self._dirstatevalidatewarned = True
1243 self._dirstatevalidatewarned = True
1234 self.ui.warn(_("warning: ignoring unknown"
1244 self.ui.warn(_("warning: ignoring unknown"
1235 " working parent %s!\n") % short(node))
1245 " working parent %s!\n") % short(node))
1236 return nullid
1246 return nullid
1237
1247
1238 @storecache(narrowspec.FILENAME)
1248 @storecache(narrowspec.FILENAME)
1239 def narrowpats(self):
1249 def narrowpats(self):
1240 """matcher patterns for this repository's narrowspec
1250 """matcher patterns for this repository's narrowspec
1241
1251
1242 A tuple of (includes, excludes).
1252 A tuple of (includes, excludes).
1243 """
1253 """
1244 return narrowspec.load(self)
1254 return narrowspec.load(self)
1245
1255
1246 @storecache(narrowspec.FILENAME)
1256 @storecache(narrowspec.FILENAME)
1247 def _storenarrowmatch(self):
1257 def _storenarrowmatch(self):
1248 if repository.NARROW_REQUIREMENT not in self.requirements:
1258 if repository.NARROW_REQUIREMENT not in self.requirements:
1249 return matchmod.always()
1259 return matchmod.always()
1250 include, exclude = self.narrowpats
1260 include, exclude = self.narrowpats
1251 return narrowspec.match(self.root, include=include, exclude=exclude)
1261 return narrowspec.match(self.root, include=include, exclude=exclude)
1252
1262
1253 @storecache(narrowspec.FILENAME)
1263 @storecache(narrowspec.FILENAME)
1254 def _narrowmatch(self):
1264 def _narrowmatch(self):
1255 if repository.NARROW_REQUIREMENT not in self.requirements:
1265 if repository.NARROW_REQUIREMENT not in self.requirements:
1256 return matchmod.always()
1266 return matchmod.always()
1257 narrowspec.checkworkingcopynarrowspec(self)
1267 narrowspec.checkworkingcopynarrowspec(self)
1258 include, exclude = self.narrowpats
1268 include, exclude = self.narrowpats
1259 return narrowspec.match(self.root, include=include, exclude=exclude)
1269 return narrowspec.match(self.root, include=include, exclude=exclude)
1260
1270
1261 def narrowmatch(self, match=None, includeexact=False):
1271 def narrowmatch(self, match=None, includeexact=False):
1262 """matcher corresponding the the repo's narrowspec
1272 """matcher corresponding the the repo's narrowspec
1263
1273
1264 If `match` is given, then that will be intersected with the narrow
1274 If `match` is given, then that will be intersected with the narrow
1265 matcher.
1275 matcher.
1266
1276
1267 If `includeexact` is True, then any exact matches from `match` will
1277 If `includeexact` is True, then any exact matches from `match` will
1268 be included even if they're outside the narrowspec.
1278 be included even if they're outside the narrowspec.
1269 """
1279 """
1270 if match:
1280 if match:
1271 if includeexact and not self._narrowmatch.always():
1281 if includeexact and not self._narrowmatch.always():
1272 # do not exclude explicitly-specified paths so that they can
1282 # do not exclude explicitly-specified paths so that they can
1273 # be warned later on
1283 # be warned later on
1274 em = matchmod.exact(match.files())
1284 em = matchmod.exact(match.files())
1275 nm = matchmod.unionmatcher([self._narrowmatch, em])
1285 nm = matchmod.unionmatcher([self._narrowmatch, em])
1276 return matchmod.intersectmatchers(match, nm)
1286 return matchmod.intersectmatchers(match, nm)
1277 return matchmod.intersectmatchers(match, self._narrowmatch)
1287 return matchmod.intersectmatchers(match, self._narrowmatch)
1278 return self._narrowmatch
1288 return self._narrowmatch
1279
1289
1280 def setnarrowpats(self, newincludes, newexcludes):
1290 def setnarrowpats(self, newincludes, newexcludes):
1281 narrowspec.save(self, newincludes, newexcludes)
1291 narrowspec.save(self, newincludes, newexcludes)
1282 self.invalidate(clearfilecache=True)
1292 self.invalidate(clearfilecache=True)
1283
1293
1284 def __getitem__(self, changeid):
1294 def __getitem__(self, changeid):
1285 if changeid is None:
1295 if changeid is None:
1286 return context.workingctx(self)
1296 return context.workingctx(self)
1287 if isinstance(changeid, context.basectx):
1297 if isinstance(changeid, context.basectx):
1288 return changeid
1298 return changeid
1289 if isinstance(changeid, slice):
1299 if isinstance(changeid, slice):
1290 # wdirrev isn't contiguous so the slice shouldn't include it
1300 # wdirrev isn't contiguous so the slice shouldn't include it
1291 return [self[i]
1301 return [self[i]
1292 for i in pycompat.xrange(*changeid.indices(len(self)))
1302 for i in pycompat.xrange(*changeid.indices(len(self)))
1293 if i not in self.changelog.filteredrevs]
1303 if i not in self.changelog.filteredrevs]
1294 try:
1304 try:
1295 if isinstance(changeid, int):
1305 if isinstance(changeid, int):
1296 node = self.changelog.node(changeid)
1306 node = self.changelog.node(changeid)
1297 rev = changeid
1307 rev = changeid
1298 elif changeid == 'null':
1308 elif changeid == 'null':
1299 node = nullid
1309 node = nullid
1300 rev = nullrev
1310 rev = nullrev
1301 elif changeid == 'tip':
1311 elif changeid == 'tip':
1302 node = self.changelog.tip()
1312 node = self.changelog.tip()
1303 rev = self.changelog.rev(node)
1313 rev = self.changelog.rev(node)
1304 elif changeid == '.':
1314 elif changeid == '.':
1305 # this is a hack to delay/avoid loading obsmarkers
1315 # this is a hack to delay/avoid loading obsmarkers
1306 # when we know that '.' won't be hidden
1316 # when we know that '.' won't be hidden
1307 node = self.dirstate.p1()
1317 node = self.dirstate.p1()
1308 rev = self.unfiltered().changelog.rev(node)
1318 rev = self.unfiltered().changelog.rev(node)
1309 elif len(changeid) == 20:
1319 elif len(changeid) == 20:
1310 try:
1320 try:
1311 node = changeid
1321 node = changeid
1312 rev = self.changelog.rev(changeid)
1322 rev = self.changelog.rev(changeid)
1313 except error.FilteredLookupError:
1323 except error.FilteredLookupError:
1314 changeid = hex(changeid) # for the error message
1324 changeid = hex(changeid) # for the error message
1315 raise
1325 raise
1316 except LookupError:
1326 except LookupError:
1317 # check if it might have come from damaged dirstate
1327 # check if it might have come from damaged dirstate
1318 #
1328 #
1319 # XXX we could avoid the unfiltered if we had a recognizable
1329 # XXX we could avoid the unfiltered if we had a recognizable
1320 # exception for filtered changeset access
1330 # exception for filtered changeset access
1321 if (self.local()
1331 if (self.local()
1322 and changeid in self.unfiltered().dirstate.parents()):
1332 and changeid in self.unfiltered().dirstate.parents()):
1323 msg = _("working directory has unknown parent '%s'!")
1333 msg = _("working directory has unknown parent '%s'!")
1324 raise error.Abort(msg % short(changeid))
1334 raise error.Abort(msg % short(changeid))
1325 changeid = hex(changeid) # for the error message
1335 changeid = hex(changeid) # for the error message
1326 raise
1336 raise
1327
1337
1328 elif len(changeid) == 40:
1338 elif len(changeid) == 40:
1329 node = bin(changeid)
1339 node = bin(changeid)
1330 rev = self.changelog.rev(node)
1340 rev = self.changelog.rev(node)
1331 else:
1341 else:
1332 raise error.ProgrammingError(
1342 raise error.ProgrammingError(
1333 "unsupported changeid '%s' of type %s" %
1343 "unsupported changeid '%s' of type %s" %
1334 (changeid, type(changeid)))
1344 (changeid, type(changeid)))
1335
1345
1336 return context.changectx(self, rev, node)
1346 return context.changectx(self, rev, node)
1337
1347
1338 except (error.FilteredIndexError, error.FilteredLookupError):
1348 except (error.FilteredIndexError, error.FilteredLookupError):
1339 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1349 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1340 % pycompat.bytestr(changeid))
1350 % pycompat.bytestr(changeid))
1341 except (IndexError, LookupError):
1351 except (IndexError, LookupError):
1342 raise error.RepoLookupError(
1352 raise error.RepoLookupError(
1343 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1353 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1344 except error.WdirUnsupported:
1354 except error.WdirUnsupported:
1345 return context.workingctx(self)
1355 return context.workingctx(self)
1346
1356
1347 def __contains__(self, changeid):
1357 def __contains__(self, changeid):
1348 """True if the given changeid exists
1358 """True if the given changeid exists
1349
1359
1350 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1360 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1351 specified.
1361 specified.
1352 """
1362 """
1353 try:
1363 try:
1354 self[changeid]
1364 self[changeid]
1355 return True
1365 return True
1356 except error.RepoLookupError:
1366 except error.RepoLookupError:
1357 return False
1367 return False
1358
1368
1359 def __nonzero__(self):
1369 def __nonzero__(self):
1360 return True
1370 return True
1361
1371
1362 __bool__ = __nonzero__
1372 __bool__ = __nonzero__
1363
1373
1364 def __len__(self):
1374 def __len__(self):
1365 # no need to pay the cost of repoview.changelog
1375 # no need to pay the cost of repoview.changelog
1366 unfi = self.unfiltered()
1376 unfi = self.unfiltered()
1367 return len(unfi.changelog)
1377 return len(unfi.changelog)
1368
1378
1369 def __iter__(self):
1379 def __iter__(self):
1370 return iter(self.changelog)
1380 return iter(self.changelog)
1371
1381
1372 def revs(self, expr, *args):
1382 def revs(self, expr, *args):
1373 '''Find revisions matching a revset.
1383 '''Find revisions matching a revset.
1374
1384
1375 The revset is specified as a string ``expr`` that may contain
1385 The revset is specified as a string ``expr`` that may contain
1376 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1386 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1377
1387
1378 Revset aliases from the configuration are not expanded. To expand
1388 Revset aliases from the configuration are not expanded. To expand
1379 user aliases, consider calling ``scmutil.revrange()`` or
1389 user aliases, consider calling ``scmutil.revrange()`` or
1380 ``repo.anyrevs([expr], user=True)``.
1390 ``repo.anyrevs([expr], user=True)``.
1381
1391
1382 Returns a revset.abstractsmartset, which is a list-like interface
1392 Returns a revset.abstractsmartset, which is a list-like interface
1383 that contains integer revisions.
1393 that contains integer revisions.
1384 '''
1394 '''
1385 tree = revsetlang.spectree(expr, *args)
1395 tree = revsetlang.spectree(expr, *args)
1386 return revset.makematcher(tree)(self)
1396 return revset.makematcher(tree)(self)
1387
1397
1388 def set(self, expr, *args):
1398 def set(self, expr, *args):
1389 '''Find revisions matching a revset and emit changectx instances.
1399 '''Find revisions matching a revset and emit changectx instances.
1390
1400
1391 This is a convenience wrapper around ``revs()`` that iterates the
1401 This is a convenience wrapper around ``revs()`` that iterates the
1392 result and is a generator of changectx instances.
1402 result and is a generator of changectx instances.
1393
1403
1394 Revset aliases from the configuration are not expanded. To expand
1404 Revset aliases from the configuration are not expanded. To expand
1395 user aliases, consider calling ``scmutil.revrange()``.
1405 user aliases, consider calling ``scmutil.revrange()``.
1396 '''
1406 '''
1397 for r in self.revs(expr, *args):
1407 for r in self.revs(expr, *args):
1398 yield self[r]
1408 yield self[r]
1399
1409
1400 def anyrevs(self, specs, user=False, localalias=None):
1410 def anyrevs(self, specs, user=False, localalias=None):
1401 '''Find revisions matching one of the given revsets.
1411 '''Find revisions matching one of the given revsets.
1402
1412
1403 Revset aliases from the configuration are not expanded by default. To
1413 Revset aliases from the configuration are not expanded by default. To
1404 expand user aliases, specify ``user=True``. To provide some local
1414 expand user aliases, specify ``user=True``. To provide some local
1405 definitions overriding user aliases, set ``localalias`` to
1415 definitions overriding user aliases, set ``localalias`` to
1406 ``{name: definitionstring}``.
1416 ``{name: definitionstring}``.
1407 '''
1417 '''
1408 if user:
1418 if user:
1409 m = revset.matchany(self.ui, specs,
1419 m = revset.matchany(self.ui, specs,
1410 lookup=revset.lookupfn(self),
1420 lookup=revset.lookupfn(self),
1411 localalias=localalias)
1421 localalias=localalias)
1412 else:
1422 else:
1413 m = revset.matchany(None, specs, localalias=localalias)
1423 m = revset.matchany(None, specs, localalias=localalias)
1414 return m(self)
1424 return m(self)
1415
1425
1416 def url(self):
1426 def url(self):
1417 return 'file:' + self.root
1427 return 'file:' + self.root
1418
1428
1419 def hook(self, name, throw=False, **args):
1429 def hook(self, name, throw=False, **args):
1420 """Call a hook, passing this repo instance.
1430 """Call a hook, passing this repo instance.
1421
1431
1422 This a convenience method to aid invoking hooks. Extensions likely
1432 This a convenience method to aid invoking hooks. Extensions likely
1423 won't call this unless they have registered a custom hook or are
1433 won't call this unless they have registered a custom hook or are
1424 replacing code that is expected to call a hook.
1434 replacing code that is expected to call a hook.
1425 """
1435 """
1426 return hook.hook(self.ui, self, name, throw, **args)
1436 return hook.hook(self.ui, self, name, throw, **args)
1427
1437
1428 @filteredpropertycache
1438 @filteredpropertycache
1429 def _tagscache(self):
1439 def _tagscache(self):
1430 '''Returns a tagscache object that contains various tags related
1440 '''Returns a tagscache object that contains various tags related
1431 caches.'''
1441 caches.'''
1432
1442
1433 # This simplifies its cache management by having one decorated
1443 # This simplifies its cache management by having one decorated
1434 # function (this one) and the rest simply fetch things from it.
1444 # function (this one) and the rest simply fetch things from it.
1435 class tagscache(object):
1445 class tagscache(object):
1436 def __init__(self):
1446 def __init__(self):
1437 # These two define the set of tags for this repository. tags
1447 # These two define the set of tags for this repository. tags
1438 # maps tag name to node; tagtypes maps tag name to 'global' or
1448 # maps tag name to node; tagtypes maps tag name to 'global' or
1439 # 'local'. (Global tags are defined by .hgtags across all
1449 # 'local'. (Global tags are defined by .hgtags across all
1440 # heads, and local tags are defined in .hg/localtags.)
1450 # heads, and local tags are defined in .hg/localtags.)
1441 # They constitute the in-memory cache of tags.
1451 # They constitute the in-memory cache of tags.
1442 self.tags = self.tagtypes = None
1452 self.tags = self.tagtypes = None
1443
1453
1444 self.nodetagscache = self.tagslist = None
1454 self.nodetagscache = self.tagslist = None
1445
1455
1446 cache = tagscache()
1456 cache = tagscache()
1447 cache.tags, cache.tagtypes = self._findtags()
1457 cache.tags, cache.tagtypes = self._findtags()
1448
1458
1449 return cache
1459 return cache
1450
1460
1451 def tags(self):
1461 def tags(self):
1452 '''return a mapping of tag to node'''
1462 '''return a mapping of tag to node'''
1453 t = {}
1463 t = {}
1454 if self.changelog.filteredrevs:
1464 if self.changelog.filteredrevs:
1455 tags, tt = self._findtags()
1465 tags, tt = self._findtags()
1456 else:
1466 else:
1457 tags = self._tagscache.tags
1467 tags = self._tagscache.tags
1458 rev = self.changelog.rev
1468 rev = self.changelog.rev
1459 for k, v in tags.iteritems():
1469 for k, v in tags.iteritems():
1460 try:
1470 try:
1461 # ignore tags to unknown nodes
1471 # ignore tags to unknown nodes
1462 rev(v)
1472 rev(v)
1463 t[k] = v
1473 t[k] = v
1464 except (error.LookupError, ValueError):
1474 except (error.LookupError, ValueError):
1465 pass
1475 pass
1466 return t
1476 return t
1467
1477
1468 def _findtags(self):
1478 def _findtags(self):
1469 '''Do the hard work of finding tags. Return a pair of dicts
1479 '''Do the hard work of finding tags. Return a pair of dicts
1470 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1480 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1471 maps tag name to a string like \'global\' or \'local\'.
1481 maps tag name to a string like \'global\' or \'local\'.
1472 Subclasses or extensions are free to add their own tags, but
1482 Subclasses or extensions are free to add their own tags, but
1473 should be aware that the returned dicts will be retained for the
1483 should be aware that the returned dicts will be retained for the
1474 duration of the localrepo object.'''
1484 duration of the localrepo object.'''
1475
1485
1476 # XXX what tagtype should subclasses/extensions use? Currently
1486 # XXX what tagtype should subclasses/extensions use? Currently
1477 # mq and bookmarks add tags, but do not set the tagtype at all.
1487 # mq and bookmarks add tags, but do not set the tagtype at all.
1478 # Should each extension invent its own tag type? Should there
1488 # Should each extension invent its own tag type? Should there
1479 # be one tagtype for all such "virtual" tags? Or is the status
1489 # be one tagtype for all such "virtual" tags? Or is the status
1480 # quo fine?
1490 # quo fine?
1481
1491
1482
1492
1483 # map tag name to (node, hist)
1493 # map tag name to (node, hist)
1484 alltags = tagsmod.findglobaltags(self.ui, self)
1494 alltags = tagsmod.findglobaltags(self.ui, self)
1485 # map tag name to tag type
1495 # map tag name to tag type
1486 tagtypes = dict((tag, 'global') for tag in alltags)
1496 tagtypes = dict((tag, 'global') for tag in alltags)
1487
1497
1488 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1498 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1489
1499
1490 # Build the return dicts. Have to re-encode tag names because
1500 # Build the return dicts. Have to re-encode tag names because
1491 # the tags module always uses UTF-8 (in order not to lose info
1501 # the tags module always uses UTF-8 (in order not to lose info
1492 # writing to the cache), but the rest of Mercurial wants them in
1502 # writing to the cache), but the rest of Mercurial wants them in
1493 # local encoding.
1503 # local encoding.
1494 tags = {}
1504 tags = {}
1495 for (name, (node, hist)) in alltags.iteritems():
1505 for (name, (node, hist)) in alltags.iteritems():
1496 if node != nullid:
1506 if node != nullid:
1497 tags[encoding.tolocal(name)] = node
1507 tags[encoding.tolocal(name)] = node
1498 tags['tip'] = self.changelog.tip()
1508 tags['tip'] = self.changelog.tip()
1499 tagtypes = dict([(encoding.tolocal(name), value)
1509 tagtypes = dict([(encoding.tolocal(name), value)
1500 for (name, value) in tagtypes.iteritems()])
1510 for (name, value) in tagtypes.iteritems()])
1501 return (tags, tagtypes)
1511 return (tags, tagtypes)
1502
1512
1503 def tagtype(self, tagname):
1513 def tagtype(self, tagname):
1504 '''
1514 '''
1505 return the type of the given tag. result can be:
1515 return the type of the given tag. result can be:
1506
1516
1507 'local' : a local tag
1517 'local' : a local tag
1508 'global' : a global tag
1518 'global' : a global tag
1509 None : tag does not exist
1519 None : tag does not exist
1510 '''
1520 '''
1511
1521
1512 return self._tagscache.tagtypes.get(tagname)
1522 return self._tagscache.tagtypes.get(tagname)
1513
1523
1514 def tagslist(self):
1524 def tagslist(self):
1515 '''return a list of tags ordered by revision'''
1525 '''return a list of tags ordered by revision'''
1516 if not self._tagscache.tagslist:
1526 if not self._tagscache.tagslist:
1517 l = []
1527 l = []
1518 for t, n in self.tags().iteritems():
1528 for t, n in self.tags().iteritems():
1519 l.append((self.changelog.rev(n), t, n))
1529 l.append((self.changelog.rev(n), t, n))
1520 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1530 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1521
1531
1522 return self._tagscache.tagslist
1532 return self._tagscache.tagslist
1523
1533
1524 def nodetags(self, node):
1534 def nodetags(self, node):
1525 '''return the tags associated with a node'''
1535 '''return the tags associated with a node'''
1526 if not self._tagscache.nodetagscache:
1536 if not self._tagscache.nodetagscache:
1527 nodetagscache = {}
1537 nodetagscache = {}
1528 for t, n in self._tagscache.tags.iteritems():
1538 for t, n in self._tagscache.tags.iteritems():
1529 nodetagscache.setdefault(n, []).append(t)
1539 nodetagscache.setdefault(n, []).append(t)
1530 for tags in nodetagscache.itervalues():
1540 for tags in nodetagscache.itervalues():
1531 tags.sort()
1541 tags.sort()
1532 self._tagscache.nodetagscache = nodetagscache
1542 self._tagscache.nodetagscache = nodetagscache
1533 return self._tagscache.nodetagscache.get(node, [])
1543 return self._tagscache.nodetagscache.get(node, [])
1534
1544
1535 def nodebookmarks(self, node):
1545 def nodebookmarks(self, node):
1536 """return the list of bookmarks pointing to the specified node"""
1546 """return the list of bookmarks pointing to the specified node"""
1537 return self._bookmarks.names(node)
1547 return self._bookmarks.names(node)
1538
1548
1539 def branchmap(self):
1549 def branchmap(self):
1540 '''returns a dictionary {branch: [branchheads]} with branchheads
1550 '''returns a dictionary {branch: [branchheads]} with branchheads
1541 ordered by increasing revision number'''
1551 ordered by increasing revision number'''
1542 return self._branchcaches[self]
1552 return self._branchcaches[self]
1543
1553
1544 @unfilteredmethod
1554 @unfilteredmethod
1545 def revbranchcache(self):
1555 def revbranchcache(self):
1546 if not self._revbranchcache:
1556 if not self._revbranchcache:
1547 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1557 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1548 return self._revbranchcache
1558 return self._revbranchcache
1549
1559
1550 def branchtip(self, branch, ignoremissing=False):
1560 def branchtip(self, branch, ignoremissing=False):
1551 '''return the tip node for a given branch
1561 '''return the tip node for a given branch
1552
1562
1553 If ignoremissing is True, then this method will not raise an error.
1563 If ignoremissing is True, then this method will not raise an error.
1554 This is helpful for callers that only expect None for a missing branch
1564 This is helpful for callers that only expect None for a missing branch
1555 (e.g. namespace).
1565 (e.g. namespace).
1556
1566
1557 '''
1567 '''
1558 try:
1568 try:
1559 return self.branchmap().branchtip(branch)
1569 return self.branchmap().branchtip(branch)
1560 except KeyError:
1570 except KeyError:
1561 if not ignoremissing:
1571 if not ignoremissing:
1562 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1572 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1563 else:
1573 else:
1564 pass
1574 pass
1565
1575
1566 def lookup(self, key):
1576 def lookup(self, key):
1567 node = scmutil.revsymbol(self, key).node()
1577 node = scmutil.revsymbol(self, key).node()
1568 if node is None:
1578 if node is None:
1569 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1579 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1570 return node
1580 return node
1571
1581
1572 def lookupbranch(self, key):
1582 def lookupbranch(self, key):
1573 if self.branchmap().hasbranch(key):
1583 if self.branchmap().hasbranch(key):
1574 return key
1584 return key
1575
1585
1576 return scmutil.revsymbol(self, key).branch()
1586 return scmutil.revsymbol(self, key).branch()
1577
1587
1578 def known(self, nodes):
1588 def known(self, nodes):
1579 cl = self.changelog
1589 cl = self.changelog
1580 nm = cl.nodemap
1590 nm = cl.nodemap
1581 filtered = cl.filteredrevs
1591 filtered = cl.filteredrevs
1582 result = []
1592 result = []
1583 for n in nodes:
1593 for n in nodes:
1584 r = nm.get(n)
1594 r = nm.get(n)
1585 resp = not (r is None or r in filtered)
1595 resp = not (r is None or r in filtered)
1586 result.append(resp)
1596 result.append(resp)
1587 return result
1597 return result
1588
1598
1589 def local(self):
1599 def local(self):
1590 return self
1600 return self
1591
1601
1592 def publishing(self):
1602 def publishing(self):
1593 # it's safe (and desirable) to trust the publish flag unconditionally
1603 # it's safe (and desirable) to trust the publish flag unconditionally
1594 # so that we don't finalize changes shared between users via ssh or nfs
1604 # so that we don't finalize changes shared between users via ssh or nfs
1595 return self.ui.configbool('phases', 'publish', untrusted=True)
1605 return self.ui.configbool('phases', 'publish', untrusted=True)
1596
1606
1597 def cancopy(self):
1607 def cancopy(self):
1598 # so statichttprepo's override of local() works
1608 # so statichttprepo's override of local() works
1599 if not self.local():
1609 if not self.local():
1600 return False
1610 return False
1601 if not self.publishing():
1611 if not self.publishing():
1602 return True
1612 return True
1603 # if publishing we can't copy if there is filtered content
1613 # if publishing we can't copy if there is filtered content
1604 return not self.filtered('visible').changelog.filteredrevs
1614 return not self.filtered('visible').changelog.filteredrevs
1605
1615
1606 def shared(self):
1616 def shared(self):
1607 '''the type of shared repository (None if not shared)'''
1617 '''the type of shared repository (None if not shared)'''
1608 if self.sharedpath != self.path:
1618 if self.sharedpath != self.path:
1609 return 'store'
1619 return 'store'
1610 return None
1620 return None
1611
1621
1612 def wjoin(self, f, *insidef):
1622 def wjoin(self, f, *insidef):
1613 return self.vfs.reljoin(self.root, f, *insidef)
1623 return self.vfs.reljoin(self.root, f, *insidef)
1614
1624
1615 def setparents(self, p1, p2=nullid):
1625 def setparents(self, p1, p2=nullid):
1616 with self.dirstate.parentchange():
1626 with self.dirstate.parentchange():
1617 copies = self.dirstate.setparents(p1, p2)
1627 copies = self.dirstate.setparents(p1, p2)
1618 pctx = self[p1]
1628 pctx = self[p1]
1619 if copies:
1629 if copies:
1620 # Adjust copy records, the dirstate cannot do it, it
1630 # Adjust copy records, the dirstate cannot do it, it
1621 # requires access to parents manifests. Preserve them
1631 # requires access to parents manifests. Preserve them
1622 # only for entries added to first parent.
1632 # only for entries added to first parent.
1623 for f in copies:
1633 for f in copies:
1624 if f not in pctx and copies[f] in pctx:
1634 if f not in pctx and copies[f] in pctx:
1625 self.dirstate.copy(copies[f], f)
1635 self.dirstate.copy(copies[f], f)
1626 if p2 == nullid:
1636 if p2 == nullid:
1627 for f, s in sorted(self.dirstate.copies().items()):
1637 for f, s in sorted(self.dirstate.copies().items()):
1628 if f not in pctx and s not in pctx:
1638 if f not in pctx and s not in pctx:
1629 self.dirstate.copy(None, f)
1639 self.dirstate.copy(None, f)
1630
1640
1631 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1641 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1632 """changeid must be a changeset revision, if specified.
1642 """changeid must be a changeset revision, if specified.
1633 fileid can be a file revision or node."""
1643 fileid can be a file revision or node."""
1634 return context.filectx(self, path, changeid, fileid,
1644 return context.filectx(self, path, changeid, fileid,
1635 changectx=changectx)
1645 changectx=changectx)
1636
1646
1637 def getcwd(self):
1647 def getcwd(self):
1638 return self.dirstate.getcwd()
1648 return self.dirstate.getcwd()
1639
1649
1640 def pathto(self, f, cwd=None):
1650 def pathto(self, f, cwd=None):
1641 return self.dirstate.pathto(f, cwd)
1651 return self.dirstate.pathto(f, cwd)
1642
1652
1643 def _loadfilter(self, filter):
1653 def _loadfilter(self, filter):
1644 if filter not in self._filterpats:
1654 if filter not in self._filterpats:
1645 l = []
1655 l = []
1646 for pat, cmd in self.ui.configitems(filter):
1656 for pat, cmd in self.ui.configitems(filter):
1647 if cmd == '!':
1657 if cmd == '!':
1648 continue
1658 continue
1649 mf = matchmod.match(self.root, '', [pat])
1659 mf = matchmod.match(self.root, '', [pat])
1650 fn = None
1660 fn = None
1651 params = cmd
1661 params = cmd
1652 for name, filterfn in self._datafilters.iteritems():
1662 for name, filterfn in self._datafilters.iteritems():
1653 if cmd.startswith(name):
1663 if cmd.startswith(name):
1654 fn = filterfn
1664 fn = filterfn
1655 params = cmd[len(name):].lstrip()
1665 params = cmd[len(name):].lstrip()
1656 break
1666 break
1657 if not fn:
1667 if not fn:
1658 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1668 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1659 # Wrap old filters not supporting keyword arguments
1669 # Wrap old filters not supporting keyword arguments
1660 if not pycompat.getargspec(fn)[2]:
1670 if not pycompat.getargspec(fn)[2]:
1661 oldfn = fn
1671 oldfn = fn
1662 fn = lambda s, c, **kwargs: oldfn(s, c)
1672 fn = lambda s, c, **kwargs: oldfn(s, c)
1663 l.append((mf, fn, params))
1673 l.append((mf, fn, params))
1664 self._filterpats[filter] = l
1674 self._filterpats[filter] = l
1665 return self._filterpats[filter]
1675 return self._filterpats[filter]
1666
1676
1667 def _filter(self, filterpats, filename, data):
1677 def _filter(self, filterpats, filename, data):
1668 for mf, fn, cmd in filterpats:
1678 for mf, fn, cmd in filterpats:
1669 if mf(filename):
1679 if mf(filename):
1670 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1680 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1671 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1681 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1672 break
1682 break
1673
1683
1674 return data
1684 return data
1675
1685
1676 @unfilteredpropertycache
1686 @unfilteredpropertycache
1677 def _encodefilterpats(self):
1687 def _encodefilterpats(self):
1678 return self._loadfilter('encode')
1688 return self._loadfilter('encode')
1679
1689
1680 @unfilteredpropertycache
1690 @unfilteredpropertycache
1681 def _decodefilterpats(self):
1691 def _decodefilterpats(self):
1682 return self._loadfilter('decode')
1692 return self._loadfilter('decode')
1683
1693
1684 def adddatafilter(self, name, filter):
1694 def adddatafilter(self, name, filter):
1685 self._datafilters[name] = filter
1695 self._datafilters[name] = filter
1686
1696
1687 def wread(self, filename):
1697 def wread(self, filename):
1688 if self.wvfs.islink(filename):
1698 if self.wvfs.islink(filename):
1689 data = self.wvfs.readlink(filename)
1699 data = self.wvfs.readlink(filename)
1690 else:
1700 else:
1691 data = self.wvfs.read(filename)
1701 data = self.wvfs.read(filename)
1692 return self._filter(self._encodefilterpats, filename, data)
1702 return self._filter(self._encodefilterpats, filename, data)
1693
1703
1694 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1704 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1695 """write ``data`` into ``filename`` in the working directory
1705 """write ``data`` into ``filename`` in the working directory
1696
1706
1697 This returns length of written (maybe decoded) data.
1707 This returns length of written (maybe decoded) data.
1698 """
1708 """
1699 data = self._filter(self._decodefilterpats, filename, data)
1709 data = self._filter(self._decodefilterpats, filename, data)
1700 if 'l' in flags:
1710 if 'l' in flags:
1701 self.wvfs.symlink(data, filename)
1711 self.wvfs.symlink(data, filename)
1702 else:
1712 else:
1703 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1713 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1704 **kwargs)
1714 **kwargs)
1705 if 'x' in flags:
1715 if 'x' in flags:
1706 self.wvfs.setflags(filename, False, True)
1716 self.wvfs.setflags(filename, False, True)
1707 else:
1717 else:
1708 self.wvfs.setflags(filename, False, False)
1718 self.wvfs.setflags(filename, False, False)
1709 return len(data)
1719 return len(data)
1710
1720
1711 def wwritedata(self, filename, data):
1721 def wwritedata(self, filename, data):
1712 return self._filter(self._decodefilterpats, filename, data)
1722 return self._filter(self._decodefilterpats, filename, data)
1713
1723
1714 def currenttransaction(self):
1724 def currenttransaction(self):
1715 """return the current transaction or None if non exists"""
1725 """return the current transaction or None if non exists"""
1716 if self._transref:
1726 if self._transref:
1717 tr = self._transref()
1727 tr = self._transref()
1718 else:
1728 else:
1719 tr = None
1729 tr = None
1720
1730
1721 if tr and tr.running():
1731 if tr and tr.running():
1722 return tr
1732 return tr
1723 return None
1733 return None
1724
1734
1725 def transaction(self, desc, report=None):
1735 def transaction(self, desc, report=None):
1726 if (self.ui.configbool('devel', 'all-warnings')
1736 if (self.ui.configbool('devel', 'all-warnings')
1727 or self.ui.configbool('devel', 'check-locks')):
1737 or self.ui.configbool('devel', 'check-locks')):
1728 if self._currentlock(self._lockref) is None:
1738 if self._currentlock(self._lockref) is None:
1729 raise error.ProgrammingError('transaction requires locking')
1739 raise error.ProgrammingError('transaction requires locking')
1730 tr = self.currenttransaction()
1740 tr = self.currenttransaction()
1731 if tr is not None:
1741 if tr is not None:
1732 return tr.nest(name=desc)
1742 return tr.nest(name=desc)
1733
1743
1734 # abort here if the journal already exists
1744 # abort here if the journal already exists
1735 if self.svfs.exists("journal"):
1745 if self.svfs.exists("journal"):
1736 raise error.RepoError(
1746 raise error.RepoError(
1737 _("abandoned transaction found"),
1747 _("abandoned transaction found"),
1738 hint=_("run 'hg recover' to clean up transaction"))
1748 hint=_("run 'hg recover' to clean up transaction"))
1739
1749
1740 idbase = "%.40f#%f" % (random.random(), time.time())
1750 idbase = "%.40f#%f" % (random.random(), time.time())
1741 ha = hex(hashlib.sha1(idbase).digest())
1751 ha = hex(hashlib.sha1(idbase).digest())
1742 txnid = 'TXN:' + ha
1752 txnid = 'TXN:' + ha
1743 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1753 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1744
1754
1745 self._writejournal(desc)
1755 self._writejournal(desc)
1746 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1756 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1747 if report:
1757 if report:
1748 rp = report
1758 rp = report
1749 else:
1759 else:
1750 rp = self.ui.warn
1760 rp = self.ui.warn
1751 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1761 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1752 # we must avoid cyclic reference between repo and transaction.
1762 # we must avoid cyclic reference between repo and transaction.
1753 reporef = weakref.ref(self)
1763 reporef = weakref.ref(self)
1754 # Code to track tag movement
1764 # Code to track tag movement
1755 #
1765 #
1756 # Since tags are all handled as file content, it is actually quite hard
1766 # Since tags are all handled as file content, it is actually quite hard
1757 # to track these movement from a code perspective. So we fallback to a
1767 # to track these movement from a code perspective. So we fallback to a
1758 # tracking at the repository level. One could envision to track changes
1768 # tracking at the repository level. One could envision to track changes
1759 # to the '.hgtags' file through changegroup apply but that fails to
1769 # to the '.hgtags' file through changegroup apply but that fails to
1760 # cope with case where transaction expose new heads without changegroup
1770 # cope with case where transaction expose new heads without changegroup
1761 # being involved (eg: phase movement).
1771 # being involved (eg: phase movement).
1762 #
1772 #
1763 # For now, We gate the feature behind a flag since this likely comes
1773 # For now, We gate the feature behind a flag since this likely comes
1764 # with performance impacts. The current code run more often than needed
1774 # with performance impacts. The current code run more often than needed
1765 # and do not use caches as much as it could. The current focus is on
1775 # and do not use caches as much as it could. The current focus is on
1766 # the behavior of the feature so we disable it by default. The flag
1776 # the behavior of the feature so we disable it by default. The flag
1767 # will be removed when we are happy with the performance impact.
1777 # will be removed when we are happy with the performance impact.
1768 #
1778 #
1769 # Once this feature is no longer experimental move the following
1779 # Once this feature is no longer experimental move the following
1770 # documentation to the appropriate help section:
1780 # documentation to the appropriate help section:
1771 #
1781 #
1772 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1782 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1773 # tags (new or changed or deleted tags). In addition the details of
1783 # tags (new or changed or deleted tags). In addition the details of
1774 # these changes are made available in a file at:
1784 # these changes are made available in a file at:
1775 # ``REPOROOT/.hg/changes/tags.changes``.
1785 # ``REPOROOT/.hg/changes/tags.changes``.
1776 # Make sure you check for HG_TAG_MOVED before reading that file as it
1786 # Make sure you check for HG_TAG_MOVED before reading that file as it
1777 # might exist from a previous transaction even if no tag were touched
1787 # might exist from a previous transaction even if no tag were touched
1778 # in this one. Changes are recorded in a line base format::
1788 # in this one. Changes are recorded in a line base format::
1779 #
1789 #
1780 # <action> <hex-node> <tag-name>\n
1790 # <action> <hex-node> <tag-name>\n
1781 #
1791 #
1782 # Actions are defined as follow:
1792 # Actions are defined as follow:
1783 # "-R": tag is removed,
1793 # "-R": tag is removed,
1784 # "+A": tag is added,
1794 # "+A": tag is added,
1785 # "-M": tag is moved (old value),
1795 # "-M": tag is moved (old value),
1786 # "+M": tag is moved (new value),
1796 # "+M": tag is moved (new value),
1787 tracktags = lambda x: None
1797 tracktags = lambda x: None
1788 # experimental config: experimental.hook-track-tags
1798 # experimental config: experimental.hook-track-tags
1789 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1799 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1790 if desc != 'strip' and shouldtracktags:
1800 if desc != 'strip' and shouldtracktags:
1791 oldheads = self.changelog.headrevs()
1801 oldheads = self.changelog.headrevs()
1792 def tracktags(tr2):
1802 def tracktags(tr2):
1793 repo = reporef()
1803 repo = reporef()
1794 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1804 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1795 newheads = repo.changelog.headrevs()
1805 newheads = repo.changelog.headrevs()
1796 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1806 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1797 # notes: we compare lists here.
1807 # notes: we compare lists here.
1798 # As we do it only once buiding set would not be cheaper
1808 # As we do it only once buiding set would not be cheaper
1799 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1809 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1800 if changes:
1810 if changes:
1801 tr2.hookargs['tag_moved'] = '1'
1811 tr2.hookargs['tag_moved'] = '1'
1802 with repo.vfs('changes/tags.changes', 'w',
1812 with repo.vfs('changes/tags.changes', 'w',
1803 atomictemp=True) as changesfile:
1813 atomictemp=True) as changesfile:
1804 # note: we do not register the file to the transaction
1814 # note: we do not register the file to the transaction
1805 # because we needs it to still exist on the transaction
1815 # because we needs it to still exist on the transaction
1806 # is close (for txnclose hooks)
1816 # is close (for txnclose hooks)
1807 tagsmod.writediff(changesfile, changes)
1817 tagsmod.writediff(changesfile, changes)
1808 def validate(tr2):
1818 def validate(tr2):
1809 """will run pre-closing hooks"""
1819 """will run pre-closing hooks"""
1810 # XXX the transaction API is a bit lacking here so we take a hacky
1820 # XXX the transaction API is a bit lacking here so we take a hacky
1811 # path for now
1821 # path for now
1812 #
1822 #
1813 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1823 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1814 # dict is copied before these run. In addition we needs the data
1824 # dict is copied before these run. In addition we needs the data
1815 # available to in memory hooks too.
1825 # available to in memory hooks too.
1816 #
1826 #
1817 # Moreover, we also need to make sure this runs before txnclose
1827 # Moreover, we also need to make sure this runs before txnclose
1818 # hooks and there is no "pending" mechanism that would execute
1828 # hooks and there is no "pending" mechanism that would execute
1819 # logic only if hooks are about to run.
1829 # logic only if hooks are about to run.
1820 #
1830 #
1821 # Fixing this limitation of the transaction is also needed to track
1831 # Fixing this limitation of the transaction is also needed to track
1822 # other families of changes (bookmarks, phases, obsolescence).
1832 # other families of changes (bookmarks, phases, obsolescence).
1823 #
1833 #
1824 # This will have to be fixed before we remove the experimental
1834 # This will have to be fixed before we remove the experimental
1825 # gating.
1835 # gating.
1826 tracktags(tr2)
1836 tracktags(tr2)
1827 repo = reporef()
1837 repo = reporef()
1828 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1838 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1829 scmutil.enforcesinglehead(repo, tr2, desc)
1839 scmutil.enforcesinglehead(repo, tr2, desc)
1830 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1840 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1831 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1841 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1832 args = tr.hookargs.copy()
1842 args = tr.hookargs.copy()
1833 args.update(bookmarks.preparehookargs(name, old, new))
1843 args.update(bookmarks.preparehookargs(name, old, new))
1834 repo.hook('pretxnclose-bookmark', throw=True,
1844 repo.hook('pretxnclose-bookmark', throw=True,
1835 **pycompat.strkwargs(args))
1845 **pycompat.strkwargs(args))
1836 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1846 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1837 cl = repo.unfiltered().changelog
1847 cl = repo.unfiltered().changelog
1838 for rev, (old, new) in tr.changes['phases'].items():
1848 for rev, (old, new) in tr.changes['phases'].items():
1839 args = tr.hookargs.copy()
1849 args = tr.hookargs.copy()
1840 node = hex(cl.node(rev))
1850 node = hex(cl.node(rev))
1841 args.update(phases.preparehookargs(node, old, new))
1851 args.update(phases.preparehookargs(node, old, new))
1842 repo.hook('pretxnclose-phase', throw=True,
1852 repo.hook('pretxnclose-phase', throw=True,
1843 **pycompat.strkwargs(args))
1853 **pycompat.strkwargs(args))
1844
1854
1845 repo.hook('pretxnclose', throw=True,
1855 repo.hook('pretxnclose', throw=True,
1846 **pycompat.strkwargs(tr.hookargs))
1856 **pycompat.strkwargs(tr.hookargs))
1847 def releasefn(tr, success):
1857 def releasefn(tr, success):
1848 repo = reporef()
1858 repo = reporef()
1849 if success:
1859 if success:
1850 # this should be explicitly invoked here, because
1860 # this should be explicitly invoked here, because
1851 # in-memory changes aren't written out at closing
1861 # in-memory changes aren't written out at closing
1852 # transaction, if tr.addfilegenerator (via
1862 # transaction, if tr.addfilegenerator (via
1853 # dirstate.write or so) isn't invoked while
1863 # dirstate.write or so) isn't invoked while
1854 # transaction running
1864 # transaction running
1855 repo.dirstate.write(None)
1865 repo.dirstate.write(None)
1856 else:
1866 else:
1857 # discard all changes (including ones already written
1867 # discard all changes (including ones already written
1858 # out) in this transaction
1868 # out) in this transaction
1859 narrowspec.restorebackup(self, 'journal.narrowspec')
1869 narrowspec.restorebackup(self, 'journal.narrowspec')
1860 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1870 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1861 repo.dirstate.restorebackup(None, 'journal.dirstate')
1871 repo.dirstate.restorebackup(None, 'journal.dirstate')
1862
1872
1863 repo.invalidate(clearfilecache=True)
1873 repo.invalidate(clearfilecache=True)
1864
1874
1865 tr = transaction.transaction(rp, self.svfs, vfsmap,
1875 tr = transaction.transaction(rp, self.svfs, vfsmap,
1866 "journal",
1876 "journal",
1867 "undo",
1877 "undo",
1868 aftertrans(renames),
1878 aftertrans(renames),
1869 self.store.createmode,
1879 self.store.createmode,
1870 validator=validate,
1880 validator=validate,
1871 releasefn=releasefn,
1881 releasefn=releasefn,
1872 checkambigfiles=_cachedfiles,
1882 checkambigfiles=_cachedfiles,
1873 name=desc)
1883 name=desc)
1874 tr.changes['origrepolen'] = len(self)
1884 tr.changes['origrepolen'] = len(self)
1875 tr.changes['obsmarkers'] = set()
1885 tr.changes['obsmarkers'] = set()
1876 tr.changes['phases'] = {}
1886 tr.changes['phases'] = {}
1877 tr.changes['bookmarks'] = {}
1887 tr.changes['bookmarks'] = {}
1878
1888
1879 tr.hookargs['txnid'] = txnid
1889 tr.hookargs['txnid'] = txnid
1880 tr.hookargs['txnname'] = desc
1890 tr.hookargs['txnname'] = desc
1881 # note: writing the fncache only during finalize mean that the file is
1891 # note: writing the fncache only during finalize mean that the file is
1882 # outdated when running hooks. As fncache is used for streaming clone,
1892 # outdated when running hooks. As fncache is used for streaming clone,
1883 # this is not expected to break anything that happen during the hooks.
1893 # this is not expected to break anything that happen during the hooks.
1884 tr.addfinalize('flush-fncache', self.store.write)
1894 tr.addfinalize('flush-fncache', self.store.write)
1885 def txnclosehook(tr2):
1895 def txnclosehook(tr2):
1886 """To be run if transaction is successful, will schedule a hook run
1896 """To be run if transaction is successful, will schedule a hook run
1887 """
1897 """
1888 # Don't reference tr2 in hook() so we don't hold a reference.
1898 # Don't reference tr2 in hook() so we don't hold a reference.
1889 # This reduces memory consumption when there are multiple
1899 # This reduces memory consumption when there are multiple
1890 # transactions per lock. This can likely go away if issue5045
1900 # transactions per lock. This can likely go away if issue5045
1891 # fixes the function accumulation.
1901 # fixes the function accumulation.
1892 hookargs = tr2.hookargs
1902 hookargs = tr2.hookargs
1893
1903
1894 def hookfunc():
1904 def hookfunc():
1895 repo = reporef()
1905 repo = reporef()
1896 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1906 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1897 bmchanges = sorted(tr.changes['bookmarks'].items())
1907 bmchanges = sorted(tr.changes['bookmarks'].items())
1898 for name, (old, new) in bmchanges:
1908 for name, (old, new) in bmchanges:
1899 args = tr.hookargs.copy()
1909 args = tr.hookargs.copy()
1900 args.update(bookmarks.preparehookargs(name, old, new))
1910 args.update(bookmarks.preparehookargs(name, old, new))
1901 repo.hook('txnclose-bookmark', throw=False,
1911 repo.hook('txnclose-bookmark', throw=False,
1902 **pycompat.strkwargs(args))
1912 **pycompat.strkwargs(args))
1903
1913
1904 if hook.hashook(repo.ui, 'txnclose-phase'):
1914 if hook.hashook(repo.ui, 'txnclose-phase'):
1905 cl = repo.unfiltered().changelog
1915 cl = repo.unfiltered().changelog
1906 phasemv = sorted(tr.changes['phases'].items())
1916 phasemv = sorted(tr.changes['phases'].items())
1907 for rev, (old, new) in phasemv:
1917 for rev, (old, new) in phasemv:
1908 args = tr.hookargs.copy()
1918 args = tr.hookargs.copy()
1909 node = hex(cl.node(rev))
1919 node = hex(cl.node(rev))
1910 args.update(phases.preparehookargs(node, old, new))
1920 args.update(phases.preparehookargs(node, old, new))
1911 repo.hook('txnclose-phase', throw=False,
1921 repo.hook('txnclose-phase', throw=False,
1912 **pycompat.strkwargs(args))
1922 **pycompat.strkwargs(args))
1913
1923
1914 repo.hook('txnclose', throw=False,
1924 repo.hook('txnclose', throw=False,
1915 **pycompat.strkwargs(hookargs))
1925 **pycompat.strkwargs(hookargs))
1916 reporef()._afterlock(hookfunc)
1926 reporef()._afterlock(hookfunc)
1917 tr.addfinalize('txnclose-hook', txnclosehook)
1927 tr.addfinalize('txnclose-hook', txnclosehook)
1918 # Include a leading "-" to make it happen before the transaction summary
1928 # Include a leading "-" to make it happen before the transaction summary
1919 # reports registered via scmutil.registersummarycallback() whose names
1929 # reports registered via scmutil.registersummarycallback() whose names
1920 # are 00-txnreport etc. That way, the caches will be warm when the
1930 # are 00-txnreport etc. That way, the caches will be warm when the
1921 # callbacks run.
1931 # callbacks run.
1922 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1932 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1923 def txnaborthook(tr2):
1933 def txnaborthook(tr2):
1924 """To be run if transaction is aborted
1934 """To be run if transaction is aborted
1925 """
1935 """
1926 reporef().hook('txnabort', throw=False,
1936 reporef().hook('txnabort', throw=False,
1927 **pycompat.strkwargs(tr2.hookargs))
1937 **pycompat.strkwargs(tr2.hookargs))
1928 tr.addabort('txnabort-hook', txnaborthook)
1938 tr.addabort('txnabort-hook', txnaborthook)
1929 # avoid eager cache invalidation. in-memory data should be identical
1939 # avoid eager cache invalidation. in-memory data should be identical
1930 # to stored data if transaction has no error.
1940 # to stored data if transaction has no error.
1931 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1941 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1932 self._transref = weakref.ref(tr)
1942 self._transref = weakref.ref(tr)
1933 scmutil.registersummarycallback(self, tr, desc)
1943 scmutil.registersummarycallback(self, tr, desc)
1934 return tr
1944 return tr
1935
1945
1936 def _journalfiles(self):
1946 def _journalfiles(self):
1937 return ((self.svfs, 'journal'),
1947 return ((self.svfs, 'journal'),
1938 (self.svfs, 'journal.narrowspec'),
1948 (self.svfs, 'journal.narrowspec'),
1939 (self.vfs, 'journal.narrowspec.dirstate'),
1949 (self.vfs, 'journal.narrowspec.dirstate'),
1940 (self.vfs, 'journal.dirstate'),
1950 (self.vfs, 'journal.dirstate'),
1941 (self.vfs, 'journal.branch'),
1951 (self.vfs, 'journal.branch'),
1942 (self.vfs, 'journal.desc'),
1952 (self.vfs, 'journal.desc'),
1943 (self.vfs, 'journal.bookmarks'),
1953 (self.vfs, 'journal.bookmarks'),
1944 (self.svfs, 'journal.phaseroots'))
1954 (self.svfs, 'journal.phaseroots'))
1945
1955
1946 def undofiles(self):
1956 def undofiles(self):
1947 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1957 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1948
1958
1949 @unfilteredmethod
1959 @unfilteredmethod
1950 def _writejournal(self, desc):
1960 def _writejournal(self, desc):
1951 self.dirstate.savebackup(None, 'journal.dirstate')
1961 self.dirstate.savebackup(None, 'journal.dirstate')
1952 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1962 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1953 narrowspec.savebackup(self, 'journal.narrowspec')
1963 narrowspec.savebackup(self, 'journal.narrowspec')
1954 self.vfs.write("journal.branch",
1964 self.vfs.write("journal.branch",
1955 encoding.fromlocal(self.dirstate.branch()))
1965 encoding.fromlocal(self.dirstate.branch()))
1956 self.vfs.write("journal.desc",
1966 self.vfs.write("journal.desc",
1957 "%d\n%s\n" % (len(self), desc))
1967 "%d\n%s\n" % (len(self), desc))
1958 self.vfs.write("journal.bookmarks",
1968 self.vfs.write("journal.bookmarks",
1959 self.vfs.tryread("bookmarks"))
1969 self.vfs.tryread("bookmarks"))
1960 self.svfs.write("journal.phaseroots",
1970 self.svfs.write("journal.phaseroots",
1961 self.svfs.tryread("phaseroots"))
1971 self.svfs.tryread("phaseroots"))
1962
1972
1963 def recover(self):
1973 def recover(self):
1964 with self.lock():
1974 with self.lock():
1965 if self.svfs.exists("journal"):
1975 if self.svfs.exists("journal"):
1966 self.ui.status(_("rolling back interrupted transaction\n"))
1976 self.ui.status(_("rolling back interrupted transaction\n"))
1967 vfsmap = {'': self.svfs,
1977 vfsmap = {'': self.svfs,
1968 'plain': self.vfs,}
1978 'plain': self.vfs,}
1969 transaction.rollback(self.svfs, vfsmap, "journal",
1979 transaction.rollback(self.svfs, vfsmap, "journal",
1970 self.ui.warn,
1980 self.ui.warn,
1971 checkambigfiles=_cachedfiles)
1981 checkambigfiles=_cachedfiles)
1972 self.invalidate()
1982 self.invalidate()
1973 return True
1983 return True
1974 else:
1984 else:
1975 self.ui.warn(_("no interrupted transaction available\n"))
1985 self.ui.warn(_("no interrupted transaction available\n"))
1976 return False
1986 return False
1977
1987
1978 def rollback(self, dryrun=False, force=False):
1988 def rollback(self, dryrun=False, force=False):
1979 wlock = lock = dsguard = None
1989 wlock = lock = dsguard = None
1980 try:
1990 try:
1981 wlock = self.wlock()
1991 wlock = self.wlock()
1982 lock = self.lock()
1992 lock = self.lock()
1983 if self.svfs.exists("undo"):
1993 if self.svfs.exists("undo"):
1984 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1994 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1985
1995
1986 return self._rollback(dryrun, force, dsguard)
1996 return self._rollback(dryrun, force, dsguard)
1987 else:
1997 else:
1988 self.ui.warn(_("no rollback information available\n"))
1998 self.ui.warn(_("no rollback information available\n"))
1989 return 1
1999 return 1
1990 finally:
2000 finally:
1991 release(dsguard, lock, wlock)
2001 release(dsguard, lock, wlock)
1992
2002
1993 @unfilteredmethod # Until we get smarter cache management
2003 @unfilteredmethod # Until we get smarter cache management
1994 def _rollback(self, dryrun, force, dsguard):
2004 def _rollback(self, dryrun, force, dsguard):
1995 ui = self.ui
2005 ui = self.ui
1996 try:
2006 try:
1997 args = self.vfs.read('undo.desc').splitlines()
2007 args = self.vfs.read('undo.desc').splitlines()
1998 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2008 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1999 if len(args) >= 3:
2009 if len(args) >= 3:
2000 detail = args[2]
2010 detail = args[2]
2001 oldtip = oldlen - 1
2011 oldtip = oldlen - 1
2002
2012
2003 if detail and ui.verbose:
2013 if detail and ui.verbose:
2004 msg = (_('repository tip rolled back to revision %d'
2014 msg = (_('repository tip rolled back to revision %d'
2005 ' (undo %s: %s)\n')
2015 ' (undo %s: %s)\n')
2006 % (oldtip, desc, detail))
2016 % (oldtip, desc, detail))
2007 else:
2017 else:
2008 msg = (_('repository tip rolled back to revision %d'
2018 msg = (_('repository tip rolled back to revision %d'
2009 ' (undo %s)\n')
2019 ' (undo %s)\n')
2010 % (oldtip, desc))
2020 % (oldtip, desc))
2011 except IOError:
2021 except IOError:
2012 msg = _('rolling back unknown transaction\n')
2022 msg = _('rolling back unknown transaction\n')
2013 desc = None
2023 desc = None
2014
2024
2015 if not force and self['.'] != self['tip'] and desc == 'commit':
2025 if not force and self['.'] != self['tip'] and desc == 'commit':
2016 raise error.Abort(
2026 raise error.Abort(
2017 _('rollback of last commit while not checked out '
2027 _('rollback of last commit while not checked out '
2018 'may lose data'), hint=_('use -f to force'))
2028 'may lose data'), hint=_('use -f to force'))
2019
2029
2020 ui.status(msg)
2030 ui.status(msg)
2021 if dryrun:
2031 if dryrun:
2022 return 0
2032 return 0
2023
2033
2024 parents = self.dirstate.parents()
2034 parents = self.dirstate.parents()
2025 self.destroying()
2035 self.destroying()
2026 vfsmap = {'plain': self.vfs, '': self.svfs}
2036 vfsmap = {'plain': self.vfs, '': self.svfs}
2027 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2037 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2028 checkambigfiles=_cachedfiles)
2038 checkambigfiles=_cachedfiles)
2029 if self.vfs.exists('undo.bookmarks'):
2039 if self.vfs.exists('undo.bookmarks'):
2030 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2040 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2031 if self.svfs.exists('undo.phaseroots'):
2041 if self.svfs.exists('undo.phaseroots'):
2032 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2042 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2033 self.invalidate()
2043 self.invalidate()
2034
2044
2035 parentgone = any(p not in self.changelog.nodemap for p in parents)
2045 parentgone = any(p not in self.changelog.nodemap for p in parents)
2036 if parentgone:
2046 if parentgone:
2037 # prevent dirstateguard from overwriting already restored one
2047 # prevent dirstateguard from overwriting already restored one
2038 dsguard.close()
2048 dsguard.close()
2039
2049
2040 narrowspec.restorebackup(self, 'undo.narrowspec')
2050 narrowspec.restorebackup(self, 'undo.narrowspec')
2041 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2051 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2042 self.dirstate.restorebackup(None, 'undo.dirstate')
2052 self.dirstate.restorebackup(None, 'undo.dirstate')
2043 try:
2053 try:
2044 branch = self.vfs.read('undo.branch')
2054 branch = self.vfs.read('undo.branch')
2045 self.dirstate.setbranch(encoding.tolocal(branch))
2055 self.dirstate.setbranch(encoding.tolocal(branch))
2046 except IOError:
2056 except IOError:
2047 ui.warn(_('named branch could not be reset: '
2057 ui.warn(_('named branch could not be reset: '
2048 'current branch is still \'%s\'\n')
2058 'current branch is still \'%s\'\n')
2049 % self.dirstate.branch())
2059 % self.dirstate.branch())
2050
2060
2051 parents = tuple([p.rev() for p in self[None].parents()])
2061 parents = tuple([p.rev() for p in self[None].parents()])
2052 if len(parents) > 1:
2062 if len(parents) > 1:
2053 ui.status(_('working directory now based on '
2063 ui.status(_('working directory now based on '
2054 'revisions %d and %d\n') % parents)
2064 'revisions %d and %d\n') % parents)
2055 else:
2065 else:
2056 ui.status(_('working directory now based on '
2066 ui.status(_('working directory now based on '
2057 'revision %d\n') % parents)
2067 'revision %d\n') % parents)
2058 mergemod.mergestate.clean(self, self['.'].node())
2068 mergemod.mergestate.clean(self, self['.'].node())
2059
2069
2060 # TODO: if we know which new heads may result from this rollback, pass
2070 # TODO: if we know which new heads may result from this rollback, pass
2061 # them to destroy(), which will prevent the branchhead cache from being
2071 # them to destroy(), which will prevent the branchhead cache from being
2062 # invalidated.
2072 # invalidated.
2063 self.destroyed()
2073 self.destroyed()
2064 return 0
2074 return 0
2065
2075
2066 def _buildcacheupdater(self, newtransaction):
2076 def _buildcacheupdater(self, newtransaction):
2067 """called during transaction to build the callback updating cache
2077 """called during transaction to build the callback updating cache
2068
2078
2069 Lives on the repository to help extension who might want to augment
2079 Lives on the repository to help extension who might want to augment
2070 this logic. For this purpose, the created transaction is passed to the
2080 this logic. For this purpose, the created transaction is passed to the
2071 method.
2081 method.
2072 """
2082 """
2073 # we must avoid cyclic reference between repo and transaction.
2083 # we must avoid cyclic reference between repo and transaction.
2074 reporef = weakref.ref(self)
2084 reporef = weakref.ref(self)
2075 def updater(tr):
2085 def updater(tr):
2076 repo = reporef()
2086 repo = reporef()
2077 repo.updatecaches(tr)
2087 repo.updatecaches(tr)
2078 return updater
2088 return updater
2079
2089
2080 @unfilteredmethod
2090 @unfilteredmethod
2081 def updatecaches(self, tr=None, full=False):
2091 def updatecaches(self, tr=None, full=False):
2082 """warm appropriate caches
2092 """warm appropriate caches
2083
2093
2084 If this function is called after a transaction closed. The transaction
2094 If this function is called after a transaction closed. The transaction
2085 will be available in the 'tr' argument. This can be used to selectively
2095 will be available in the 'tr' argument. This can be used to selectively
2086 update caches relevant to the changes in that transaction.
2096 update caches relevant to the changes in that transaction.
2087
2097
2088 If 'full' is set, make sure all caches the function knows about have
2098 If 'full' is set, make sure all caches the function knows about have
2089 up-to-date data. Even the ones usually loaded more lazily.
2099 up-to-date data. Even the ones usually loaded more lazily.
2090 """
2100 """
2091 if tr is not None and tr.hookargs.get('source') == 'strip':
2101 if tr is not None and tr.hookargs.get('source') == 'strip':
2092 # During strip, many caches are invalid but
2102 # During strip, many caches are invalid but
2093 # later call to `destroyed` will refresh them.
2103 # later call to `destroyed` will refresh them.
2094 return
2104 return
2095
2105
2096 if tr is None or tr.changes['origrepolen'] < len(self):
2106 if tr is None or tr.changes['origrepolen'] < len(self):
2097 # accessing the 'ser ved' branchmap should refresh all the others,
2107 # accessing the 'ser ved' branchmap should refresh all the others,
2098 self.ui.debug('updating the branch cache\n')
2108 self.ui.debug('updating the branch cache\n')
2099 self.filtered('served').branchmap()
2109 self.filtered('served').branchmap()
2100
2110
2101 if full:
2111 if full:
2102 unfi = self.unfiltered()
2112 unfi = self.unfiltered()
2103 rbc = unfi.revbranchcache()
2113 rbc = unfi.revbranchcache()
2104 for r in unfi.changelog:
2114 for r in unfi.changelog:
2105 rbc.branchinfo(r)
2115 rbc.branchinfo(r)
2106 rbc.write()
2116 rbc.write()
2107
2117
2108 # ensure the working copy parents are in the manifestfulltextcache
2118 # ensure the working copy parents are in the manifestfulltextcache
2109 for ctx in self['.'].parents():
2119 for ctx in self['.'].parents():
2110 ctx.manifest() # accessing the manifest is enough
2120 ctx.manifest() # accessing the manifest is enough
2111
2121
2112 # accessing tags warm the cache
2122 # accessing tags warm the cache
2113 self.tags()
2123 self.tags()
2114 self.filtered('served').tags()
2124 self.filtered('served').tags()
2115
2125
2116 def invalidatecaches(self):
2126 def invalidatecaches(self):
2117
2127
2118 if r'_tagscache' in vars(self):
2128 if r'_tagscache' in vars(self):
2119 # can't use delattr on proxy
2129 # can't use delattr on proxy
2120 del self.__dict__[r'_tagscache']
2130 del self.__dict__[r'_tagscache']
2121
2131
2122 self._branchcaches.clear()
2132 self._branchcaches.clear()
2123 self.invalidatevolatilesets()
2133 self.invalidatevolatilesets()
2124 self._sparsesignaturecache.clear()
2134 self._sparsesignaturecache.clear()
2125
2135
2126 def invalidatevolatilesets(self):
2136 def invalidatevolatilesets(self):
2127 self.filteredrevcache.clear()
2137 self.filteredrevcache.clear()
2128 obsolete.clearobscaches(self)
2138 obsolete.clearobscaches(self)
2129
2139
2130 def invalidatedirstate(self):
2140 def invalidatedirstate(self):
2131 '''Invalidates the dirstate, causing the next call to dirstate
2141 '''Invalidates the dirstate, causing the next call to dirstate
2132 to check if it was modified since the last time it was read,
2142 to check if it was modified since the last time it was read,
2133 rereading it if it has.
2143 rereading it if it has.
2134
2144
2135 This is different to dirstate.invalidate() that it doesn't always
2145 This is different to dirstate.invalidate() that it doesn't always
2136 rereads the dirstate. Use dirstate.invalidate() if you want to
2146 rereads the dirstate. Use dirstate.invalidate() if you want to
2137 explicitly read the dirstate again (i.e. restoring it to a previous
2147 explicitly read the dirstate again (i.e. restoring it to a previous
2138 known good state).'''
2148 known good state).'''
2139 if hasunfilteredcache(self, r'dirstate'):
2149 if hasunfilteredcache(self, r'dirstate'):
2140 for k in self.dirstate._filecache:
2150 for k in self.dirstate._filecache:
2141 try:
2151 try:
2142 delattr(self.dirstate, k)
2152 delattr(self.dirstate, k)
2143 except AttributeError:
2153 except AttributeError:
2144 pass
2154 pass
2145 delattr(self.unfiltered(), r'dirstate')
2155 delattr(self.unfiltered(), r'dirstate')
2146
2156
2147 def invalidate(self, clearfilecache=False):
2157 def invalidate(self, clearfilecache=False):
2148 '''Invalidates both store and non-store parts other than dirstate
2158 '''Invalidates both store and non-store parts other than dirstate
2149
2159
2150 If a transaction is running, invalidation of store is omitted,
2160 If a transaction is running, invalidation of store is omitted,
2151 because discarding in-memory changes might cause inconsistency
2161 because discarding in-memory changes might cause inconsistency
2152 (e.g. incomplete fncache causes unintentional failure, but
2162 (e.g. incomplete fncache causes unintentional failure, but
2153 redundant one doesn't).
2163 redundant one doesn't).
2154 '''
2164 '''
2155 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2165 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2156 for k in list(self._filecache.keys()):
2166 for k in list(self._filecache.keys()):
2157 # dirstate is invalidated separately in invalidatedirstate()
2167 # dirstate is invalidated separately in invalidatedirstate()
2158 if k == 'dirstate':
2168 if k == 'dirstate':
2159 continue
2169 continue
2160 if (k == 'changelog' and
2170 if (k == 'changelog' and
2161 self.currenttransaction() and
2171 self.currenttransaction() and
2162 self.changelog._delayed):
2172 self.changelog._delayed):
2163 # The changelog object may store unwritten revisions. We don't
2173 # The changelog object may store unwritten revisions. We don't
2164 # want to lose them.
2174 # want to lose them.
2165 # TODO: Solve the problem instead of working around it.
2175 # TODO: Solve the problem instead of working around it.
2166 continue
2176 continue
2167
2177
2168 if clearfilecache:
2178 if clearfilecache:
2169 del self._filecache[k]
2179 del self._filecache[k]
2170 try:
2180 try:
2171 delattr(unfiltered, k)
2181 delattr(unfiltered, k)
2172 except AttributeError:
2182 except AttributeError:
2173 pass
2183 pass
2174 self.invalidatecaches()
2184 self.invalidatecaches()
2175 if not self.currenttransaction():
2185 if not self.currenttransaction():
2176 # TODO: Changing contents of store outside transaction
2186 # TODO: Changing contents of store outside transaction
2177 # causes inconsistency. We should make in-memory store
2187 # causes inconsistency. We should make in-memory store
2178 # changes detectable, and abort if changed.
2188 # changes detectable, and abort if changed.
2179 self.store.invalidatecaches()
2189 self.store.invalidatecaches()
2180
2190
2181 def invalidateall(self):
2191 def invalidateall(self):
2182 '''Fully invalidates both store and non-store parts, causing the
2192 '''Fully invalidates both store and non-store parts, causing the
2183 subsequent operation to reread any outside changes.'''
2193 subsequent operation to reread any outside changes.'''
2184 # extension should hook this to invalidate its caches
2194 # extension should hook this to invalidate its caches
2185 self.invalidate()
2195 self.invalidate()
2186 self.invalidatedirstate()
2196 self.invalidatedirstate()
2187
2197
2188 @unfilteredmethod
2198 @unfilteredmethod
2189 def _refreshfilecachestats(self, tr):
2199 def _refreshfilecachestats(self, tr):
2190 """Reload stats of cached files so that they are flagged as valid"""
2200 """Reload stats of cached files so that they are flagged as valid"""
2191 for k, ce in self._filecache.items():
2201 for k, ce in self._filecache.items():
2192 k = pycompat.sysstr(k)
2202 k = pycompat.sysstr(k)
2193 if k == r'dirstate' or k not in self.__dict__:
2203 if k == r'dirstate' or k not in self.__dict__:
2194 continue
2204 continue
2195 ce.refresh()
2205 ce.refresh()
2196
2206
2197 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2207 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2198 inheritchecker=None, parentenvvar=None):
2208 inheritchecker=None, parentenvvar=None):
2199 parentlock = None
2209 parentlock = None
2200 # the contents of parentenvvar are used by the underlying lock to
2210 # the contents of parentenvvar are used by the underlying lock to
2201 # determine whether it can be inherited
2211 # determine whether it can be inherited
2202 if parentenvvar is not None:
2212 if parentenvvar is not None:
2203 parentlock = encoding.environ.get(parentenvvar)
2213 parentlock = encoding.environ.get(parentenvvar)
2204
2214
2205 timeout = 0
2215 timeout = 0
2206 warntimeout = 0
2216 warntimeout = 0
2207 if wait:
2217 if wait:
2208 timeout = self.ui.configint("ui", "timeout")
2218 timeout = self.ui.configint("ui", "timeout")
2209 warntimeout = self.ui.configint("ui", "timeout.warn")
2219 warntimeout = self.ui.configint("ui", "timeout.warn")
2210 # internal config: ui.signal-safe-lock
2220 # internal config: ui.signal-safe-lock
2211 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2221 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2212
2222
2213 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2223 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2214 releasefn=releasefn,
2224 releasefn=releasefn,
2215 acquirefn=acquirefn, desc=desc,
2225 acquirefn=acquirefn, desc=desc,
2216 inheritchecker=inheritchecker,
2226 inheritchecker=inheritchecker,
2217 parentlock=parentlock,
2227 parentlock=parentlock,
2218 signalsafe=signalsafe)
2228 signalsafe=signalsafe)
2219 return l
2229 return l
2220
2230
2221 def _afterlock(self, callback):
2231 def _afterlock(self, callback):
2222 """add a callback to be run when the repository is fully unlocked
2232 """add a callback to be run when the repository is fully unlocked
2223
2233
2224 The callback will be executed when the outermost lock is released
2234 The callback will be executed when the outermost lock is released
2225 (with wlock being higher level than 'lock')."""
2235 (with wlock being higher level than 'lock')."""
2226 for ref in (self._wlockref, self._lockref):
2236 for ref in (self._wlockref, self._lockref):
2227 l = ref and ref()
2237 l = ref and ref()
2228 if l and l.held:
2238 if l and l.held:
2229 l.postrelease.append(callback)
2239 l.postrelease.append(callback)
2230 break
2240 break
2231 else: # no lock have been found.
2241 else: # no lock have been found.
2232 callback()
2242 callback()
2233
2243
2234 def lock(self, wait=True):
2244 def lock(self, wait=True):
2235 '''Lock the repository store (.hg/store) and return a weak reference
2245 '''Lock the repository store (.hg/store) and return a weak reference
2236 to the lock. Use this before modifying the store (e.g. committing or
2246 to the lock. Use this before modifying the store (e.g. committing or
2237 stripping). If you are opening a transaction, get a lock as well.)
2247 stripping). If you are opening a transaction, get a lock as well.)
2238
2248
2239 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2249 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2240 'wlock' first to avoid a dead-lock hazard.'''
2250 'wlock' first to avoid a dead-lock hazard.'''
2241 l = self._currentlock(self._lockref)
2251 l = self._currentlock(self._lockref)
2242 if l is not None:
2252 if l is not None:
2243 l.lock()
2253 l.lock()
2244 return l
2254 return l
2245
2255
2246 l = self._lock(vfs=self.svfs,
2256 l = self._lock(vfs=self.svfs,
2247 lockname="lock",
2257 lockname="lock",
2248 wait=wait,
2258 wait=wait,
2249 releasefn=None,
2259 releasefn=None,
2250 acquirefn=self.invalidate,
2260 acquirefn=self.invalidate,
2251 desc=_('repository %s') % self.origroot)
2261 desc=_('repository %s') % self.origroot)
2252 self._lockref = weakref.ref(l)
2262 self._lockref = weakref.ref(l)
2253 return l
2263 return l
2254
2264
2255 def _wlockchecktransaction(self):
2265 def _wlockchecktransaction(self):
2256 if self.currenttransaction() is not None:
2266 if self.currenttransaction() is not None:
2257 raise error.LockInheritanceContractViolation(
2267 raise error.LockInheritanceContractViolation(
2258 'wlock cannot be inherited in the middle of a transaction')
2268 'wlock cannot be inherited in the middle of a transaction')
2259
2269
2260 def wlock(self, wait=True):
2270 def wlock(self, wait=True):
2261 '''Lock the non-store parts of the repository (everything under
2271 '''Lock the non-store parts of the repository (everything under
2262 .hg except .hg/store) and return a weak reference to the lock.
2272 .hg except .hg/store) and return a weak reference to the lock.
2263
2273
2264 Use this before modifying files in .hg.
2274 Use this before modifying files in .hg.
2265
2275
2266 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2276 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2267 'wlock' first to avoid a dead-lock hazard.'''
2277 'wlock' first to avoid a dead-lock hazard.'''
2268 l = self._wlockref and self._wlockref()
2278 l = self._wlockref and self._wlockref()
2269 if l is not None and l.held:
2279 if l is not None and l.held:
2270 l.lock()
2280 l.lock()
2271 return l
2281 return l
2272
2282
2273 # We do not need to check for non-waiting lock acquisition. Such
2283 # We do not need to check for non-waiting lock acquisition. Such
2274 # acquisition would not cause dead-lock as they would just fail.
2284 # acquisition would not cause dead-lock as they would just fail.
2275 if wait and (self.ui.configbool('devel', 'all-warnings')
2285 if wait and (self.ui.configbool('devel', 'all-warnings')
2276 or self.ui.configbool('devel', 'check-locks')):
2286 or self.ui.configbool('devel', 'check-locks')):
2277 if self._currentlock(self._lockref) is not None:
2287 if self._currentlock(self._lockref) is not None:
2278 self.ui.develwarn('"wlock" acquired after "lock"')
2288 self.ui.develwarn('"wlock" acquired after "lock"')
2279
2289
2280 def unlock():
2290 def unlock():
2281 if self.dirstate.pendingparentchange():
2291 if self.dirstate.pendingparentchange():
2282 self.dirstate.invalidate()
2292 self.dirstate.invalidate()
2283 else:
2293 else:
2284 self.dirstate.write(None)
2294 self.dirstate.write(None)
2285
2295
2286 self._filecache['dirstate'].refresh()
2296 self._filecache['dirstate'].refresh()
2287
2297
2288 l = self._lock(self.vfs, "wlock", wait, unlock,
2298 l = self._lock(self.vfs, "wlock", wait, unlock,
2289 self.invalidatedirstate, _('working directory of %s') %
2299 self.invalidatedirstate, _('working directory of %s') %
2290 self.origroot,
2300 self.origroot,
2291 inheritchecker=self._wlockchecktransaction,
2301 inheritchecker=self._wlockchecktransaction,
2292 parentenvvar='HG_WLOCK_LOCKER')
2302 parentenvvar='HG_WLOCK_LOCKER')
2293 self._wlockref = weakref.ref(l)
2303 self._wlockref = weakref.ref(l)
2294 return l
2304 return l
2295
2305
2296 def _currentlock(self, lockref):
2306 def _currentlock(self, lockref):
2297 """Returns the lock if it's held, or None if it's not."""
2307 """Returns the lock if it's held, or None if it's not."""
2298 if lockref is None:
2308 if lockref is None:
2299 return None
2309 return None
2300 l = lockref()
2310 l = lockref()
2301 if l is None or not l.held:
2311 if l is None or not l.held:
2302 return None
2312 return None
2303 return l
2313 return l
2304
2314
2305 def currentwlock(self):
2315 def currentwlock(self):
2306 """Returns the wlock if it's held, or None if it's not."""
2316 """Returns the wlock if it's held, or None if it's not."""
2307 return self._currentlock(self._wlockref)
2317 return self._currentlock(self._wlockref)
2308
2318
2309 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2319 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2310 """
2320 """
2311 commit an individual file as part of a larger transaction
2321 commit an individual file as part of a larger transaction
2312 """
2322 """
2313
2323
2314 fname = fctx.path()
2324 fname = fctx.path()
2315 fparent1 = manifest1.get(fname, nullid)
2325 fparent1 = manifest1.get(fname, nullid)
2316 fparent2 = manifest2.get(fname, nullid)
2326 fparent2 = manifest2.get(fname, nullid)
2317 if isinstance(fctx, context.filectx):
2327 if isinstance(fctx, context.filectx):
2318 node = fctx.filenode()
2328 node = fctx.filenode()
2319 if node in [fparent1, fparent2]:
2329 if node in [fparent1, fparent2]:
2320 self.ui.debug('reusing %s filelog entry\n' % fname)
2330 self.ui.debug('reusing %s filelog entry\n' % fname)
2321 if manifest1.flags(fname) != fctx.flags():
2331 if manifest1.flags(fname) != fctx.flags():
2322 changelist.append(fname)
2332 changelist.append(fname)
2323 return node
2333 return node
2324
2334
2325 flog = self.file(fname)
2335 flog = self.file(fname)
2326 meta = {}
2336 meta = {}
2327 cfname = fctx.copysource()
2337 cfname = fctx.copysource()
2328 if cfname and cfname != fname:
2338 if cfname and cfname != fname:
2329 # Mark the new revision of this file as a copy of another
2339 # Mark the new revision of this file as a copy of another
2330 # file. This copy data will effectively act as a parent
2340 # file. This copy data will effectively act as a parent
2331 # of this new revision. If this is a merge, the first
2341 # of this new revision. If this is a merge, the first
2332 # parent will be the nullid (meaning "look up the copy data")
2342 # parent will be the nullid (meaning "look up the copy data")
2333 # and the second one will be the other parent. For example:
2343 # and the second one will be the other parent. For example:
2334 #
2344 #
2335 # 0 --- 1 --- 3 rev1 changes file foo
2345 # 0 --- 1 --- 3 rev1 changes file foo
2336 # \ / rev2 renames foo to bar and changes it
2346 # \ / rev2 renames foo to bar and changes it
2337 # \- 2 -/ rev3 should have bar with all changes and
2347 # \- 2 -/ rev3 should have bar with all changes and
2338 # should record that bar descends from
2348 # should record that bar descends from
2339 # bar in rev2 and foo in rev1
2349 # bar in rev2 and foo in rev1
2340 #
2350 #
2341 # this allows this merge to succeed:
2351 # this allows this merge to succeed:
2342 #
2352 #
2343 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2353 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2344 # \ / merging rev3 and rev4 should use bar@rev2
2354 # \ / merging rev3 and rev4 should use bar@rev2
2345 # \- 2 --- 4 as the merge base
2355 # \- 2 --- 4 as the merge base
2346 #
2356 #
2347
2357
2348 cnode = manifest1.get(cfname)
2358 cnode = manifest1.get(cfname)
2349 newfparent = fparent2
2359 newfparent = fparent2
2350
2360
2351 if manifest2: # branch merge
2361 if manifest2: # branch merge
2352 if fparent2 == nullid or cnode is None: # copied on remote side
2362 if fparent2 == nullid or cnode is None: # copied on remote side
2353 if cfname in manifest2:
2363 if cfname in manifest2:
2354 cnode = manifest2[cfname]
2364 cnode = manifest2[cfname]
2355 newfparent = fparent1
2365 newfparent = fparent1
2356
2366
2357 # Here, we used to search backwards through history to try to find
2367 # Here, we used to search backwards through history to try to find
2358 # where the file copy came from if the source of a copy was not in
2368 # where the file copy came from if the source of a copy was not in
2359 # the parent directory. However, this doesn't actually make sense to
2369 # the parent directory. However, this doesn't actually make sense to
2360 # do (what does a copy from something not in your working copy even
2370 # do (what does a copy from something not in your working copy even
2361 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2371 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2362 # the user that copy information was dropped, so if they didn't
2372 # the user that copy information was dropped, so if they didn't
2363 # expect this outcome it can be fixed, but this is the correct
2373 # expect this outcome it can be fixed, but this is the correct
2364 # behavior in this circumstance.
2374 # behavior in this circumstance.
2365
2375
2366 if cnode:
2376 if cnode:
2367 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2377 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2368 meta["copy"] = cfname
2378 meta["copy"] = cfname
2369 meta["copyrev"] = hex(cnode)
2379 meta["copyrev"] = hex(cnode)
2370 fparent1, fparent2 = nullid, newfparent
2380 fparent1, fparent2 = nullid, newfparent
2371 else:
2381 else:
2372 self.ui.warn(_("warning: can't find ancestor for '%s' "
2382 self.ui.warn(_("warning: can't find ancestor for '%s' "
2373 "copied from '%s'!\n") % (fname, cfname))
2383 "copied from '%s'!\n") % (fname, cfname))
2374
2384
2375 elif fparent1 == nullid:
2385 elif fparent1 == nullid:
2376 fparent1, fparent2 = fparent2, nullid
2386 fparent1, fparent2 = fparent2, nullid
2377 elif fparent2 != nullid:
2387 elif fparent2 != nullid:
2378 # is one parent an ancestor of the other?
2388 # is one parent an ancestor of the other?
2379 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2389 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2380 if fparent1 in fparentancestors:
2390 if fparent1 in fparentancestors:
2381 fparent1, fparent2 = fparent2, nullid
2391 fparent1, fparent2 = fparent2, nullid
2382 elif fparent2 in fparentancestors:
2392 elif fparent2 in fparentancestors:
2383 fparent2 = nullid
2393 fparent2 = nullid
2384
2394
2385 # is the file changed?
2395 # is the file changed?
2386 text = fctx.data()
2396 text = fctx.data()
2387 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2397 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2388 changelist.append(fname)
2398 changelist.append(fname)
2389 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2399 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2390 # are just the flags changed during merge?
2400 # are just the flags changed during merge?
2391 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2401 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2392 changelist.append(fname)
2402 changelist.append(fname)
2393
2403
2394 return fparent1
2404 return fparent1
2395
2405
2396 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2406 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2397 """check for commit arguments that aren't committable"""
2407 """check for commit arguments that aren't committable"""
2398 if match.isexact() or match.prefix():
2408 if match.isexact() or match.prefix():
2399 matched = set(status.modified + status.added + status.removed)
2409 matched = set(status.modified + status.added + status.removed)
2400
2410
2401 for f in match.files():
2411 for f in match.files():
2402 f = self.dirstate.normalize(f)
2412 f = self.dirstate.normalize(f)
2403 if f == '.' or f in matched or f in wctx.substate:
2413 if f == '.' or f in matched or f in wctx.substate:
2404 continue
2414 continue
2405 if f in status.deleted:
2415 if f in status.deleted:
2406 fail(f, _('file not found!'))
2416 fail(f, _('file not found!'))
2407 if f in vdirs: # visited directory
2417 if f in vdirs: # visited directory
2408 d = f + '/'
2418 d = f + '/'
2409 for mf in matched:
2419 for mf in matched:
2410 if mf.startswith(d):
2420 if mf.startswith(d):
2411 break
2421 break
2412 else:
2422 else:
2413 fail(f, _("no match under directory!"))
2423 fail(f, _("no match under directory!"))
2414 elif f not in self.dirstate:
2424 elif f not in self.dirstate:
2415 fail(f, _("file not tracked!"))
2425 fail(f, _("file not tracked!"))
2416
2426
2417 @unfilteredmethod
2427 @unfilteredmethod
2418 def commit(self, text="", user=None, date=None, match=None, force=False,
2428 def commit(self, text="", user=None, date=None, match=None, force=False,
2419 editor=False, extra=None):
2429 editor=False, extra=None):
2420 """Add a new revision to current repository.
2430 """Add a new revision to current repository.
2421
2431
2422 Revision information is gathered from the working directory,
2432 Revision information is gathered from the working directory,
2423 match can be used to filter the committed files. If editor is
2433 match can be used to filter the committed files. If editor is
2424 supplied, it is called to get a commit message.
2434 supplied, it is called to get a commit message.
2425 """
2435 """
2426 if extra is None:
2436 if extra is None:
2427 extra = {}
2437 extra = {}
2428
2438
2429 def fail(f, msg):
2439 def fail(f, msg):
2430 raise error.Abort('%s: %s' % (f, msg))
2440 raise error.Abort('%s: %s' % (f, msg))
2431
2441
2432 if not match:
2442 if not match:
2433 match = matchmod.always()
2443 match = matchmod.always()
2434
2444
2435 if not force:
2445 if not force:
2436 vdirs = []
2446 vdirs = []
2437 match.explicitdir = vdirs.append
2447 match.explicitdir = vdirs.append
2438 match.bad = fail
2448 match.bad = fail
2439
2449
2440 # lock() for recent changelog (see issue4368)
2450 # lock() for recent changelog (see issue4368)
2441 with self.wlock(), self.lock():
2451 with self.wlock(), self.lock():
2442 wctx = self[None]
2452 wctx = self[None]
2443 merge = len(wctx.parents()) > 1
2453 merge = len(wctx.parents()) > 1
2444
2454
2445 if not force and merge and not match.always():
2455 if not force and merge and not match.always():
2446 raise error.Abort(_('cannot partially commit a merge '
2456 raise error.Abort(_('cannot partially commit a merge '
2447 '(do not specify files or patterns)'))
2457 '(do not specify files or patterns)'))
2448
2458
2449 status = self.status(match=match, clean=force)
2459 status = self.status(match=match, clean=force)
2450 if force:
2460 if force:
2451 status.modified.extend(status.clean) # mq may commit clean files
2461 status.modified.extend(status.clean) # mq may commit clean files
2452
2462
2453 # check subrepos
2463 # check subrepos
2454 subs, commitsubs, newstate = subrepoutil.precommit(
2464 subs, commitsubs, newstate = subrepoutil.precommit(
2455 self.ui, wctx, status, match, force=force)
2465 self.ui, wctx, status, match, force=force)
2456
2466
2457 # make sure all explicit patterns are matched
2467 # make sure all explicit patterns are matched
2458 if not force:
2468 if not force:
2459 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2469 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2460
2470
2461 cctx = context.workingcommitctx(self, status,
2471 cctx = context.workingcommitctx(self, status,
2462 text, user, date, extra)
2472 text, user, date, extra)
2463
2473
2464 # internal config: ui.allowemptycommit
2474 # internal config: ui.allowemptycommit
2465 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2475 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2466 or extra.get('close') or merge or cctx.files()
2476 or extra.get('close') or merge or cctx.files()
2467 or self.ui.configbool('ui', 'allowemptycommit'))
2477 or self.ui.configbool('ui', 'allowemptycommit'))
2468 if not allowemptycommit:
2478 if not allowemptycommit:
2469 return None
2479 return None
2470
2480
2471 if merge and cctx.deleted():
2481 if merge and cctx.deleted():
2472 raise error.Abort(_("cannot commit merge with missing files"))
2482 raise error.Abort(_("cannot commit merge with missing files"))
2473
2483
2474 ms = mergemod.mergestate.read(self)
2484 ms = mergemod.mergestate.read(self)
2475 mergeutil.checkunresolved(ms)
2485 mergeutil.checkunresolved(ms)
2476
2486
2477 if editor:
2487 if editor:
2478 cctx._text = editor(self, cctx, subs)
2488 cctx._text = editor(self, cctx, subs)
2479 edited = (text != cctx._text)
2489 edited = (text != cctx._text)
2480
2490
2481 # Save commit message in case this transaction gets rolled back
2491 # Save commit message in case this transaction gets rolled back
2482 # (e.g. by a pretxncommit hook). Leave the content alone on
2492 # (e.g. by a pretxncommit hook). Leave the content alone on
2483 # the assumption that the user will use the same editor again.
2493 # the assumption that the user will use the same editor again.
2484 msgfn = self.savecommitmessage(cctx._text)
2494 msgfn = self.savecommitmessage(cctx._text)
2485
2495
2486 # commit subs and write new state
2496 # commit subs and write new state
2487 if subs:
2497 if subs:
2488 uipathfn = scmutil.getuipathfn(self)
2498 uipathfn = scmutil.getuipathfn(self)
2489 for s in sorted(commitsubs):
2499 for s in sorted(commitsubs):
2490 sub = wctx.sub(s)
2500 sub = wctx.sub(s)
2491 self.ui.status(_('committing subrepository %s\n') %
2501 self.ui.status(_('committing subrepository %s\n') %
2492 uipathfn(subrepoutil.subrelpath(sub)))
2502 uipathfn(subrepoutil.subrelpath(sub)))
2493 sr = sub.commit(cctx._text, user, date)
2503 sr = sub.commit(cctx._text, user, date)
2494 newstate[s] = (newstate[s][0], sr)
2504 newstate[s] = (newstate[s][0], sr)
2495 subrepoutil.writestate(self, newstate)
2505 subrepoutil.writestate(self, newstate)
2496
2506
2497 p1, p2 = self.dirstate.parents()
2507 p1, p2 = self.dirstate.parents()
2498 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2508 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2499 try:
2509 try:
2500 self.hook("precommit", throw=True, parent1=hookp1,
2510 self.hook("precommit", throw=True, parent1=hookp1,
2501 parent2=hookp2)
2511 parent2=hookp2)
2502 with self.transaction('commit'):
2512 with self.transaction('commit'):
2503 ret = self.commitctx(cctx, True)
2513 ret = self.commitctx(cctx, True)
2504 # update bookmarks, dirstate and mergestate
2514 # update bookmarks, dirstate and mergestate
2505 bookmarks.update(self, [p1, p2], ret)
2515 bookmarks.update(self, [p1, p2], ret)
2506 cctx.markcommitted(ret)
2516 cctx.markcommitted(ret)
2507 ms.reset()
2517 ms.reset()
2508 except: # re-raises
2518 except: # re-raises
2509 if edited:
2519 if edited:
2510 self.ui.write(
2520 self.ui.write(
2511 _('note: commit message saved in %s\n') % msgfn)
2521 _('note: commit message saved in %s\n') % msgfn)
2512 raise
2522 raise
2513
2523
2514 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2524 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2515 # hack for command that use a temporary commit (eg: histedit)
2525 # hack for command that use a temporary commit (eg: histedit)
2516 # temporary commit got stripped before hook release
2526 # temporary commit got stripped before hook release
2517 if self.changelog.hasnode(ret):
2527 if self.changelog.hasnode(ret):
2518 self.hook("commit", node=node, parent1=parent1,
2528 self.hook("commit", node=node, parent1=parent1,
2519 parent2=parent2)
2529 parent2=parent2)
2520 self._afterlock(commithook)
2530 self._afterlock(commithook)
2521 return ret
2531 return ret
2522
2532
2523 @unfilteredmethod
2533 @unfilteredmethod
2524 def commitctx(self, ctx, error=False):
2534 def commitctx(self, ctx, error=False):
2525 """Add a new revision to current repository.
2535 """Add a new revision to current repository.
2526 Revision information is passed via the context argument.
2536 Revision information is passed via the context argument.
2527
2537
2528 ctx.files() should list all files involved in this commit, i.e.
2538 ctx.files() should list all files involved in this commit, i.e.
2529 modified/added/removed files. On merge, it may be wider than the
2539 modified/added/removed files. On merge, it may be wider than the
2530 ctx.files() to be committed, since any file nodes derived directly
2540 ctx.files() to be committed, since any file nodes derived directly
2531 from p1 or p2 are excluded from the committed ctx.files().
2541 from p1 or p2 are excluded from the committed ctx.files().
2532 """
2542 """
2533
2543
2534 p1, p2 = ctx.p1(), ctx.p2()
2544 p1, p2 = ctx.p1(), ctx.p2()
2535 user = ctx.user()
2545 user = ctx.user()
2536
2546
2537 with self.lock(), self.transaction("commit") as tr:
2547 with self.lock(), self.transaction("commit") as tr:
2538 trp = weakref.proxy(tr)
2548 trp = weakref.proxy(tr)
2539
2549
2540 if ctx.manifestnode():
2550 if ctx.manifestnode():
2541 # reuse an existing manifest revision
2551 # reuse an existing manifest revision
2542 self.ui.debug('reusing known manifest\n')
2552 self.ui.debug('reusing known manifest\n')
2543 mn = ctx.manifestnode()
2553 mn = ctx.manifestnode()
2544 files = ctx.files()
2554 files = ctx.files()
2545 elif ctx.files():
2555 elif ctx.files():
2546 m1ctx = p1.manifestctx()
2556 m1ctx = p1.manifestctx()
2547 m2ctx = p2.manifestctx()
2557 m2ctx = p2.manifestctx()
2548 mctx = m1ctx.copy()
2558 mctx = m1ctx.copy()
2549
2559
2550 m = mctx.read()
2560 m = mctx.read()
2551 m1 = m1ctx.read()
2561 m1 = m1ctx.read()
2552 m2 = m2ctx.read()
2562 m2 = m2ctx.read()
2553
2563
2554 # check in files
2564 # check in files
2555 added = []
2565 added = []
2556 changed = []
2566 changed = []
2557 removed = list(ctx.removed())
2567 removed = list(ctx.removed())
2558 linkrev = len(self)
2568 linkrev = len(self)
2559 self.ui.note(_("committing files:\n"))
2569 self.ui.note(_("committing files:\n"))
2560 uipathfn = scmutil.getuipathfn(self)
2570 uipathfn = scmutil.getuipathfn(self)
2561 for f in sorted(ctx.modified() + ctx.added()):
2571 for f in sorted(ctx.modified() + ctx.added()):
2562 self.ui.note(uipathfn(f) + "\n")
2572 self.ui.note(uipathfn(f) + "\n")
2563 try:
2573 try:
2564 fctx = ctx[f]
2574 fctx = ctx[f]
2565 if fctx is None:
2575 if fctx is None:
2566 removed.append(f)
2576 removed.append(f)
2567 else:
2577 else:
2568 added.append(f)
2578 added.append(f)
2569 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2579 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2570 trp, changed)
2580 trp, changed)
2571 m.setflag(f, fctx.flags())
2581 m.setflag(f, fctx.flags())
2572 except OSError:
2582 except OSError:
2573 self.ui.warn(_("trouble committing %s!\n") %
2583 self.ui.warn(_("trouble committing %s!\n") %
2574 uipathfn(f))
2584 uipathfn(f))
2575 raise
2585 raise
2576 except IOError as inst:
2586 except IOError as inst:
2577 errcode = getattr(inst, 'errno', errno.ENOENT)
2587 errcode = getattr(inst, 'errno', errno.ENOENT)
2578 if error or errcode and errcode != errno.ENOENT:
2588 if error or errcode and errcode != errno.ENOENT:
2579 self.ui.warn(_("trouble committing %s!\n") %
2589 self.ui.warn(_("trouble committing %s!\n") %
2580 uipathfn(f))
2590 uipathfn(f))
2581 raise
2591 raise
2582
2592
2583 # update manifest
2593 # update manifest
2584 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2594 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2585 drop = [f for f in removed if f in m]
2595 drop = [f for f in removed if f in m]
2586 for f in drop:
2596 for f in drop:
2587 del m[f]
2597 del m[f]
2588 files = changed + removed
2598 files = changed + removed
2589 md = None
2599 md = None
2590 if not files:
2600 if not files:
2591 # if no "files" actually changed in terms of the changelog,
2601 # if no "files" actually changed in terms of the changelog,
2592 # try hard to detect unmodified manifest entry so that the
2602 # try hard to detect unmodified manifest entry so that the
2593 # exact same commit can be reproduced later on convert.
2603 # exact same commit can be reproduced later on convert.
2594 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2604 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2595 if not files and md:
2605 if not files and md:
2596 self.ui.debug('not reusing manifest (no file change in '
2606 self.ui.debug('not reusing manifest (no file change in '
2597 'changelog, but manifest differs)\n')
2607 'changelog, but manifest differs)\n')
2598 if files or md:
2608 if files or md:
2599 self.ui.note(_("committing manifest\n"))
2609 self.ui.note(_("committing manifest\n"))
2600 # we're using narrowmatch here since it's already applied at
2610 # we're using narrowmatch here since it's already applied at
2601 # other stages (such as dirstate.walk), so we're already
2611 # other stages (such as dirstate.walk), so we're already
2602 # ignoring things outside of narrowspec in most cases. The
2612 # ignoring things outside of narrowspec in most cases. The
2603 # one case where we might have files outside the narrowspec
2613 # one case where we might have files outside the narrowspec
2604 # at this point is merges, and we already error out in the
2614 # at this point is merges, and we already error out in the
2605 # case where the merge has files outside of the narrowspec,
2615 # case where the merge has files outside of the narrowspec,
2606 # so this is safe.
2616 # so this is safe.
2607 mn = mctx.write(trp, linkrev,
2617 mn = mctx.write(trp, linkrev,
2608 p1.manifestnode(), p2.manifestnode(),
2618 p1.manifestnode(), p2.manifestnode(),
2609 added, drop, match=self.narrowmatch())
2619 added, drop, match=self.narrowmatch())
2610 else:
2620 else:
2611 self.ui.debug('reusing manifest form p1 (listed files '
2621 self.ui.debug('reusing manifest form p1 (listed files '
2612 'actually unchanged)\n')
2622 'actually unchanged)\n')
2613 mn = p1.manifestnode()
2623 mn = p1.manifestnode()
2614 else:
2624 else:
2615 self.ui.debug('reusing manifest from p1 (no file change)\n')
2625 self.ui.debug('reusing manifest from p1 (no file change)\n')
2616 mn = p1.manifestnode()
2626 mn = p1.manifestnode()
2617 files = []
2627 files = []
2618
2628
2619 # update changelog
2629 # update changelog
2620 self.ui.note(_("committing changelog\n"))
2630 self.ui.note(_("committing changelog\n"))
2621 self.changelog.delayupdate(tr)
2631 self.changelog.delayupdate(tr)
2622 n = self.changelog.add(mn, files, ctx.description(),
2632 n = self.changelog.add(mn, files, ctx.description(),
2623 trp, p1.node(), p2.node(),
2633 trp, p1.node(), p2.node(),
2624 user, ctx.date(), ctx.extra().copy())
2634 user, ctx.date(), ctx.extra().copy())
2625 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2635 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2626 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2636 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2627 parent2=xp2)
2637 parent2=xp2)
2628 # set the new commit is proper phase
2638 # set the new commit is proper phase
2629 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2639 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2630 if targetphase:
2640 if targetphase:
2631 # retract boundary do not alter parent changeset.
2641 # retract boundary do not alter parent changeset.
2632 # if a parent have higher the resulting phase will
2642 # if a parent have higher the resulting phase will
2633 # be compliant anyway
2643 # be compliant anyway
2634 #
2644 #
2635 # if minimal phase was 0 we don't need to retract anything
2645 # if minimal phase was 0 we don't need to retract anything
2636 phases.registernew(self, tr, targetphase, [n])
2646 phases.registernew(self, tr, targetphase, [n])
2637 return n
2647 return n
2638
2648
2639 @unfilteredmethod
2649 @unfilteredmethod
2640 def destroying(self):
2650 def destroying(self):
2641 '''Inform the repository that nodes are about to be destroyed.
2651 '''Inform the repository that nodes are about to be destroyed.
2642 Intended for use by strip and rollback, so there's a common
2652 Intended for use by strip and rollback, so there's a common
2643 place for anything that has to be done before destroying history.
2653 place for anything that has to be done before destroying history.
2644
2654
2645 This is mostly useful for saving state that is in memory and waiting
2655 This is mostly useful for saving state that is in memory and waiting
2646 to be flushed when the current lock is released. Because a call to
2656 to be flushed when the current lock is released. Because a call to
2647 destroyed is imminent, the repo will be invalidated causing those
2657 destroyed is imminent, the repo will be invalidated causing those
2648 changes to stay in memory (waiting for the next unlock), or vanish
2658 changes to stay in memory (waiting for the next unlock), or vanish
2649 completely.
2659 completely.
2650 '''
2660 '''
2651 # When using the same lock to commit and strip, the phasecache is left
2661 # When using the same lock to commit and strip, the phasecache is left
2652 # dirty after committing. Then when we strip, the repo is invalidated,
2662 # dirty after committing. Then when we strip, the repo is invalidated,
2653 # causing those changes to disappear.
2663 # causing those changes to disappear.
2654 if '_phasecache' in vars(self):
2664 if '_phasecache' in vars(self):
2655 self._phasecache.write()
2665 self._phasecache.write()
2656
2666
2657 @unfilteredmethod
2667 @unfilteredmethod
2658 def destroyed(self):
2668 def destroyed(self):
2659 '''Inform the repository that nodes have been destroyed.
2669 '''Inform the repository that nodes have been destroyed.
2660 Intended for use by strip and rollback, so there's a common
2670 Intended for use by strip and rollback, so there's a common
2661 place for anything that has to be done after destroying history.
2671 place for anything that has to be done after destroying history.
2662 '''
2672 '''
2663 # When one tries to:
2673 # When one tries to:
2664 # 1) destroy nodes thus calling this method (e.g. strip)
2674 # 1) destroy nodes thus calling this method (e.g. strip)
2665 # 2) use phasecache somewhere (e.g. commit)
2675 # 2) use phasecache somewhere (e.g. commit)
2666 #
2676 #
2667 # then 2) will fail because the phasecache contains nodes that were
2677 # then 2) will fail because the phasecache contains nodes that were
2668 # removed. We can either remove phasecache from the filecache,
2678 # removed. We can either remove phasecache from the filecache,
2669 # causing it to reload next time it is accessed, or simply filter
2679 # causing it to reload next time it is accessed, or simply filter
2670 # the removed nodes now and write the updated cache.
2680 # the removed nodes now and write the updated cache.
2671 self._phasecache.filterunknown(self)
2681 self._phasecache.filterunknown(self)
2672 self._phasecache.write()
2682 self._phasecache.write()
2673
2683
2674 # refresh all repository caches
2684 # refresh all repository caches
2675 self.updatecaches()
2685 self.updatecaches()
2676
2686
2677 # Ensure the persistent tag cache is updated. Doing it now
2687 # Ensure the persistent tag cache is updated. Doing it now
2678 # means that the tag cache only has to worry about destroyed
2688 # means that the tag cache only has to worry about destroyed
2679 # heads immediately after a strip/rollback. That in turn
2689 # heads immediately after a strip/rollback. That in turn
2680 # guarantees that "cachetip == currenttip" (comparing both rev
2690 # guarantees that "cachetip == currenttip" (comparing both rev
2681 # and node) always means no nodes have been added or destroyed.
2691 # and node) always means no nodes have been added or destroyed.
2682
2692
2683 # XXX this is suboptimal when qrefresh'ing: we strip the current
2693 # XXX this is suboptimal when qrefresh'ing: we strip the current
2684 # head, refresh the tag cache, then immediately add a new head.
2694 # head, refresh the tag cache, then immediately add a new head.
2685 # But I think doing it this way is necessary for the "instant
2695 # But I think doing it this way is necessary for the "instant
2686 # tag cache retrieval" case to work.
2696 # tag cache retrieval" case to work.
2687 self.invalidate()
2697 self.invalidate()
2688
2698
2689 def status(self, node1='.', node2=None, match=None,
2699 def status(self, node1='.', node2=None, match=None,
2690 ignored=False, clean=False, unknown=False,
2700 ignored=False, clean=False, unknown=False,
2691 listsubrepos=False):
2701 listsubrepos=False):
2692 '''a convenience method that calls node1.status(node2)'''
2702 '''a convenience method that calls node1.status(node2)'''
2693 return self[node1].status(node2, match, ignored, clean, unknown,
2703 return self[node1].status(node2, match, ignored, clean, unknown,
2694 listsubrepos)
2704 listsubrepos)
2695
2705
2696 def addpostdsstatus(self, ps):
2706 def addpostdsstatus(self, ps):
2697 """Add a callback to run within the wlock, at the point at which status
2707 """Add a callback to run within the wlock, at the point at which status
2698 fixups happen.
2708 fixups happen.
2699
2709
2700 On status completion, callback(wctx, status) will be called with the
2710 On status completion, callback(wctx, status) will be called with the
2701 wlock held, unless the dirstate has changed from underneath or the wlock
2711 wlock held, unless the dirstate has changed from underneath or the wlock
2702 couldn't be grabbed.
2712 couldn't be grabbed.
2703
2713
2704 Callbacks should not capture and use a cached copy of the dirstate --
2714 Callbacks should not capture and use a cached copy of the dirstate --
2705 it might change in the meanwhile. Instead, they should access the
2715 it might change in the meanwhile. Instead, they should access the
2706 dirstate via wctx.repo().dirstate.
2716 dirstate via wctx.repo().dirstate.
2707
2717
2708 This list is emptied out after each status run -- extensions should
2718 This list is emptied out after each status run -- extensions should
2709 make sure it adds to this list each time dirstate.status is called.
2719 make sure it adds to this list each time dirstate.status is called.
2710 Extensions should also make sure they don't call this for statuses
2720 Extensions should also make sure they don't call this for statuses
2711 that don't involve the dirstate.
2721 that don't involve the dirstate.
2712 """
2722 """
2713
2723
2714 # The list is located here for uniqueness reasons -- it is actually
2724 # The list is located here for uniqueness reasons -- it is actually
2715 # managed by the workingctx, but that isn't unique per-repo.
2725 # managed by the workingctx, but that isn't unique per-repo.
2716 self._postdsstatus.append(ps)
2726 self._postdsstatus.append(ps)
2717
2727
2718 def postdsstatus(self):
2728 def postdsstatus(self):
2719 """Used by workingctx to get the list of post-dirstate-status hooks."""
2729 """Used by workingctx to get the list of post-dirstate-status hooks."""
2720 return self._postdsstatus
2730 return self._postdsstatus
2721
2731
2722 def clearpostdsstatus(self):
2732 def clearpostdsstatus(self):
2723 """Used by workingctx to clear post-dirstate-status hooks."""
2733 """Used by workingctx to clear post-dirstate-status hooks."""
2724 del self._postdsstatus[:]
2734 del self._postdsstatus[:]
2725
2735
2726 def heads(self, start=None):
2736 def heads(self, start=None):
2727 if start is None:
2737 if start is None:
2728 cl = self.changelog
2738 cl = self.changelog
2729 headrevs = reversed(cl.headrevs())
2739 headrevs = reversed(cl.headrevs())
2730 return [cl.node(rev) for rev in headrevs]
2740 return [cl.node(rev) for rev in headrevs]
2731
2741
2732 heads = self.changelog.heads(start)
2742 heads = self.changelog.heads(start)
2733 # sort the output in rev descending order
2743 # sort the output in rev descending order
2734 return sorted(heads, key=self.changelog.rev, reverse=True)
2744 return sorted(heads, key=self.changelog.rev, reverse=True)
2735
2745
2736 def branchheads(self, branch=None, start=None, closed=False):
2746 def branchheads(self, branch=None, start=None, closed=False):
2737 '''return a (possibly filtered) list of heads for the given branch
2747 '''return a (possibly filtered) list of heads for the given branch
2738
2748
2739 Heads are returned in topological order, from newest to oldest.
2749 Heads are returned in topological order, from newest to oldest.
2740 If branch is None, use the dirstate branch.
2750 If branch is None, use the dirstate branch.
2741 If start is not None, return only heads reachable from start.
2751 If start is not None, return only heads reachable from start.
2742 If closed is True, return heads that are marked as closed as well.
2752 If closed is True, return heads that are marked as closed as well.
2743 '''
2753 '''
2744 if branch is None:
2754 if branch is None:
2745 branch = self[None].branch()
2755 branch = self[None].branch()
2746 branches = self.branchmap()
2756 branches = self.branchmap()
2747 if not branches.hasbranch(branch):
2757 if not branches.hasbranch(branch):
2748 return []
2758 return []
2749 # the cache returns heads ordered lowest to highest
2759 # the cache returns heads ordered lowest to highest
2750 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2760 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2751 if start is not None:
2761 if start is not None:
2752 # filter out the heads that cannot be reached from startrev
2762 # filter out the heads that cannot be reached from startrev
2753 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2763 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2754 bheads = [h for h in bheads if h in fbheads]
2764 bheads = [h for h in bheads if h in fbheads]
2755 return bheads
2765 return bheads
2756
2766
2757 def branches(self, nodes):
2767 def branches(self, nodes):
2758 if not nodes:
2768 if not nodes:
2759 nodes = [self.changelog.tip()]
2769 nodes = [self.changelog.tip()]
2760 b = []
2770 b = []
2761 for n in nodes:
2771 for n in nodes:
2762 t = n
2772 t = n
2763 while True:
2773 while True:
2764 p = self.changelog.parents(n)
2774 p = self.changelog.parents(n)
2765 if p[1] != nullid or p[0] == nullid:
2775 if p[1] != nullid or p[0] == nullid:
2766 b.append((t, n, p[0], p[1]))
2776 b.append((t, n, p[0], p[1]))
2767 break
2777 break
2768 n = p[0]
2778 n = p[0]
2769 return b
2779 return b
2770
2780
2771 def between(self, pairs):
2781 def between(self, pairs):
2772 r = []
2782 r = []
2773
2783
2774 for top, bottom in pairs:
2784 for top, bottom in pairs:
2775 n, l, i = top, [], 0
2785 n, l, i = top, [], 0
2776 f = 1
2786 f = 1
2777
2787
2778 while n != bottom and n != nullid:
2788 while n != bottom and n != nullid:
2779 p = self.changelog.parents(n)[0]
2789 p = self.changelog.parents(n)[0]
2780 if i == f:
2790 if i == f:
2781 l.append(n)
2791 l.append(n)
2782 f = f * 2
2792 f = f * 2
2783 n = p
2793 n = p
2784 i += 1
2794 i += 1
2785
2795
2786 r.append(l)
2796 r.append(l)
2787
2797
2788 return r
2798 return r
2789
2799
2790 def checkpush(self, pushop):
2800 def checkpush(self, pushop):
2791 """Extensions can override this function if additional checks have
2801 """Extensions can override this function if additional checks have
2792 to be performed before pushing, or call it if they override push
2802 to be performed before pushing, or call it if they override push
2793 command.
2803 command.
2794 """
2804 """
2795
2805
2796 @unfilteredpropertycache
2806 @unfilteredpropertycache
2797 def prepushoutgoinghooks(self):
2807 def prepushoutgoinghooks(self):
2798 """Return util.hooks consists of a pushop with repo, remote, outgoing
2808 """Return util.hooks consists of a pushop with repo, remote, outgoing
2799 methods, which are called before pushing changesets.
2809 methods, which are called before pushing changesets.
2800 """
2810 """
2801 return util.hooks()
2811 return util.hooks()
2802
2812
2803 def pushkey(self, namespace, key, old, new):
2813 def pushkey(self, namespace, key, old, new):
2804 try:
2814 try:
2805 tr = self.currenttransaction()
2815 tr = self.currenttransaction()
2806 hookargs = {}
2816 hookargs = {}
2807 if tr is not None:
2817 if tr is not None:
2808 hookargs.update(tr.hookargs)
2818 hookargs.update(tr.hookargs)
2809 hookargs = pycompat.strkwargs(hookargs)
2819 hookargs = pycompat.strkwargs(hookargs)
2810 hookargs[r'namespace'] = namespace
2820 hookargs[r'namespace'] = namespace
2811 hookargs[r'key'] = key
2821 hookargs[r'key'] = key
2812 hookargs[r'old'] = old
2822 hookargs[r'old'] = old
2813 hookargs[r'new'] = new
2823 hookargs[r'new'] = new
2814 self.hook('prepushkey', throw=True, **hookargs)
2824 self.hook('prepushkey', throw=True, **hookargs)
2815 except error.HookAbort as exc:
2825 except error.HookAbort as exc:
2816 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2826 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2817 if exc.hint:
2827 if exc.hint:
2818 self.ui.write_err(_("(%s)\n") % exc.hint)
2828 self.ui.write_err(_("(%s)\n") % exc.hint)
2819 return False
2829 return False
2820 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2830 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2821 ret = pushkey.push(self, namespace, key, old, new)
2831 ret = pushkey.push(self, namespace, key, old, new)
2822 def runhook():
2832 def runhook():
2823 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2833 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2824 ret=ret)
2834 ret=ret)
2825 self._afterlock(runhook)
2835 self._afterlock(runhook)
2826 return ret
2836 return ret
2827
2837
2828 def listkeys(self, namespace):
2838 def listkeys(self, namespace):
2829 self.hook('prelistkeys', throw=True, namespace=namespace)
2839 self.hook('prelistkeys', throw=True, namespace=namespace)
2830 self.ui.debug('listing keys for "%s"\n' % namespace)
2840 self.ui.debug('listing keys for "%s"\n' % namespace)
2831 values = pushkey.list(self, namespace)
2841 values = pushkey.list(self, namespace)
2832 self.hook('listkeys', namespace=namespace, values=values)
2842 self.hook('listkeys', namespace=namespace, values=values)
2833 return values
2843 return values
2834
2844
2835 def debugwireargs(self, one, two, three=None, four=None, five=None):
2845 def debugwireargs(self, one, two, three=None, four=None, five=None):
2836 '''used to test argument passing over the wire'''
2846 '''used to test argument passing over the wire'''
2837 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2847 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2838 pycompat.bytestr(four),
2848 pycompat.bytestr(four),
2839 pycompat.bytestr(five))
2849 pycompat.bytestr(five))
2840
2850
2841 def savecommitmessage(self, text):
2851 def savecommitmessage(self, text):
2842 fp = self.vfs('last-message.txt', 'wb')
2852 fp = self.vfs('last-message.txt', 'wb')
2843 try:
2853 try:
2844 fp.write(text)
2854 fp.write(text)
2845 finally:
2855 finally:
2846 fp.close()
2856 fp.close()
2847 return self.pathto(fp.name[len(self.root) + 1:])
2857 return self.pathto(fp.name[len(self.root) + 1:])
2848
2858
2849 # used to avoid circular references so destructors work
2859 # used to avoid circular references so destructors work
2850 def aftertrans(files):
2860 def aftertrans(files):
2851 renamefiles = [tuple(t) for t in files]
2861 renamefiles = [tuple(t) for t in files]
2852 def a():
2862 def a():
2853 for vfs, src, dest in renamefiles:
2863 for vfs, src, dest in renamefiles:
2854 # if src and dest refer to a same file, vfs.rename is a no-op,
2864 # if src and dest refer to a same file, vfs.rename is a no-op,
2855 # leaving both src and dest on disk. delete dest to make sure
2865 # leaving both src and dest on disk. delete dest to make sure
2856 # the rename couldn't be such a no-op.
2866 # the rename couldn't be such a no-op.
2857 vfs.tryunlink(dest)
2867 vfs.tryunlink(dest)
2858 try:
2868 try:
2859 vfs.rename(src, dest)
2869 vfs.rename(src, dest)
2860 except OSError: # journal file does not yet exist
2870 except OSError: # journal file does not yet exist
2861 pass
2871 pass
2862 return a
2872 return a
2863
2873
2864 def undoname(fn):
2874 def undoname(fn):
2865 base, name = os.path.split(fn)
2875 base, name = os.path.split(fn)
2866 assert name.startswith('journal')
2876 assert name.startswith('journal')
2867 return os.path.join(base, name.replace('journal', 'undo', 1))
2877 return os.path.join(base, name.replace('journal', 'undo', 1))
2868
2878
2869 def instance(ui, path, create, intents=None, createopts=None):
2879 def instance(ui, path, create, intents=None, createopts=None):
2870 localpath = util.urllocalpath(path)
2880 localpath = util.urllocalpath(path)
2871 if create:
2881 if create:
2872 createrepository(ui, localpath, createopts=createopts)
2882 createrepository(ui, localpath, createopts=createopts)
2873
2883
2874 return makelocalrepository(ui, localpath, intents=intents)
2884 return makelocalrepository(ui, localpath, intents=intents)
2875
2885
2876 def islocal(path):
2886 def islocal(path):
2877 return True
2887 return True
2878
2888
2879 def defaultcreateopts(ui, createopts=None):
2889 def defaultcreateopts(ui, createopts=None):
2880 """Populate the default creation options for a repository.
2890 """Populate the default creation options for a repository.
2881
2891
2882 A dictionary of explicitly requested creation options can be passed
2892 A dictionary of explicitly requested creation options can be passed
2883 in. Missing keys will be populated.
2893 in. Missing keys will be populated.
2884 """
2894 """
2885 createopts = dict(createopts or {})
2895 createopts = dict(createopts or {})
2886
2896
2887 if 'backend' not in createopts:
2897 if 'backend' not in createopts:
2888 # experimental config: storage.new-repo-backend
2898 # experimental config: storage.new-repo-backend
2889 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2899 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2890
2900
2891 return createopts
2901 return createopts
2892
2902
2893 def newreporequirements(ui, createopts):
2903 def newreporequirements(ui, createopts):
2894 """Determine the set of requirements for a new local repository.
2904 """Determine the set of requirements for a new local repository.
2895
2905
2896 Extensions can wrap this function to specify custom requirements for
2906 Extensions can wrap this function to specify custom requirements for
2897 new repositories.
2907 new repositories.
2898 """
2908 """
2899 # If the repo is being created from a shared repository, we copy
2909 # If the repo is being created from a shared repository, we copy
2900 # its requirements.
2910 # its requirements.
2901 if 'sharedrepo' in createopts:
2911 if 'sharedrepo' in createopts:
2902 requirements = set(createopts['sharedrepo'].requirements)
2912 requirements = set(createopts['sharedrepo'].requirements)
2903 if createopts.get('sharedrelative'):
2913 if createopts.get('sharedrelative'):
2904 requirements.add('relshared')
2914 requirements.add('relshared')
2905 else:
2915 else:
2906 requirements.add('shared')
2916 requirements.add('shared')
2907
2917
2908 return requirements
2918 return requirements
2909
2919
2910 if 'backend' not in createopts:
2920 if 'backend' not in createopts:
2911 raise error.ProgrammingError('backend key not present in createopts; '
2921 raise error.ProgrammingError('backend key not present in createopts; '
2912 'was defaultcreateopts() called?')
2922 'was defaultcreateopts() called?')
2913
2923
2914 if createopts['backend'] != 'revlogv1':
2924 if createopts['backend'] != 'revlogv1':
2915 raise error.Abort(_('unable to determine repository requirements for '
2925 raise error.Abort(_('unable to determine repository requirements for '
2916 'storage backend: %s') % createopts['backend'])
2926 'storage backend: %s') % createopts['backend'])
2917
2927
2918 requirements = {'revlogv1'}
2928 requirements = {'revlogv1'}
2919 if ui.configbool('format', 'usestore'):
2929 if ui.configbool('format', 'usestore'):
2920 requirements.add('store')
2930 requirements.add('store')
2921 if ui.configbool('format', 'usefncache'):
2931 if ui.configbool('format', 'usefncache'):
2922 requirements.add('fncache')
2932 requirements.add('fncache')
2923 if ui.configbool('format', 'dotencode'):
2933 if ui.configbool('format', 'dotencode'):
2924 requirements.add('dotencode')
2934 requirements.add('dotencode')
2925
2935
2926 compengine = ui.config('format', 'revlog-compression')
2936 compengine = ui.config('format', 'revlog-compression')
2927 if compengine not in util.compengines:
2937 if compengine not in util.compengines:
2928 raise error.Abort(_('compression engine %s defined by '
2938 raise error.Abort(_('compression engine %s defined by '
2929 'format.revlog-compression not available') %
2939 'format.revlog-compression not available') %
2930 compengine,
2940 compengine,
2931 hint=_('run "hg debuginstall" to list available '
2941 hint=_('run "hg debuginstall" to list available '
2932 'compression engines'))
2942 'compression engines'))
2933
2943
2934 # zlib is the historical default and doesn't need an explicit requirement.
2944 # zlib is the historical default and doesn't need an explicit requirement.
2935 if compengine != 'zlib':
2945 if compengine != 'zlib':
2936 requirements.add('exp-compression-%s' % compengine)
2946 requirements.add('exp-compression-%s' % compengine)
2937
2947
2938 if scmutil.gdinitconfig(ui):
2948 if scmutil.gdinitconfig(ui):
2939 requirements.add('generaldelta')
2949 requirements.add('generaldelta')
2940 if ui.configbool('format', 'sparse-revlog'):
2950 if ui.configbool('format', 'sparse-revlog'):
2941 requirements.add(SPARSEREVLOG_REQUIREMENT)
2951 requirements.add(SPARSEREVLOG_REQUIREMENT)
2942 if ui.configbool('experimental', 'treemanifest'):
2952 if ui.configbool('experimental', 'treemanifest'):
2943 requirements.add('treemanifest')
2953 requirements.add('treemanifest')
2944
2954
2945 revlogv2 = ui.config('experimental', 'revlogv2')
2955 revlogv2 = ui.config('experimental', 'revlogv2')
2946 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2956 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2947 requirements.remove('revlogv1')
2957 requirements.remove('revlogv1')
2948 # generaldelta is implied by revlogv2.
2958 # generaldelta is implied by revlogv2.
2949 requirements.discard('generaldelta')
2959 requirements.discard('generaldelta')
2950 requirements.add(REVLOGV2_REQUIREMENT)
2960 requirements.add(REVLOGV2_REQUIREMENT)
2951 # experimental config: format.internal-phase
2961 # experimental config: format.internal-phase
2952 if ui.configbool('format', 'internal-phase'):
2962 if ui.configbool('format', 'internal-phase'):
2953 requirements.add('internal-phase')
2963 requirements.add('internal-phase')
2954
2964
2955 if createopts.get('narrowfiles'):
2965 if createopts.get('narrowfiles'):
2956 requirements.add(repository.NARROW_REQUIREMENT)
2966 requirements.add(repository.NARROW_REQUIREMENT)
2957
2967
2958 if createopts.get('lfs'):
2968 if createopts.get('lfs'):
2959 requirements.add('lfs')
2969 requirements.add('lfs')
2960
2970
2961 return requirements
2971 return requirements
2962
2972
2963 def filterknowncreateopts(ui, createopts):
2973 def filterknowncreateopts(ui, createopts):
2964 """Filters a dict of repo creation options against options that are known.
2974 """Filters a dict of repo creation options against options that are known.
2965
2975
2966 Receives a dict of repo creation options and returns a dict of those
2976 Receives a dict of repo creation options and returns a dict of those
2967 options that we don't know how to handle.
2977 options that we don't know how to handle.
2968
2978
2969 This function is called as part of repository creation. If the
2979 This function is called as part of repository creation. If the
2970 returned dict contains any items, repository creation will not
2980 returned dict contains any items, repository creation will not
2971 be allowed, as it means there was a request to create a repository
2981 be allowed, as it means there was a request to create a repository
2972 with options not recognized by loaded code.
2982 with options not recognized by loaded code.
2973
2983
2974 Extensions can wrap this function to filter out creation options
2984 Extensions can wrap this function to filter out creation options
2975 they know how to handle.
2985 they know how to handle.
2976 """
2986 """
2977 known = {
2987 known = {
2978 'backend',
2988 'backend',
2979 'lfs',
2989 'lfs',
2980 'narrowfiles',
2990 'narrowfiles',
2981 'sharedrepo',
2991 'sharedrepo',
2982 'sharedrelative',
2992 'sharedrelative',
2983 'shareditems',
2993 'shareditems',
2984 'shallowfilestore',
2994 'shallowfilestore',
2985 }
2995 }
2986
2996
2987 return {k: v for k, v in createopts.items() if k not in known}
2997 return {k: v for k, v in createopts.items() if k not in known}
2988
2998
2989 def createrepository(ui, path, createopts=None):
2999 def createrepository(ui, path, createopts=None):
2990 """Create a new repository in a vfs.
3000 """Create a new repository in a vfs.
2991
3001
2992 ``path`` path to the new repo's working directory.
3002 ``path`` path to the new repo's working directory.
2993 ``createopts`` options for the new repository.
3003 ``createopts`` options for the new repository.
2994
3004
2995 The following keys for ``createopts`` are recognized:
3005 The following keys for ``createopts`` are recognized:
2996
3006
2997 backend
3007 backend
2998 The storage backend to use.
3008 The storage backend to use.
2999 lfs
3009 lfs
3000 Repository will be created with ``lfs`` requirement. The lfs extension
3010 Repository will be created with ``lfs`` requirement. The lfs extension
3001 will automatically be loaded when the repository is accessed.
3011 will automatically be loaded when the repository is accessed.
3002 narrowfiles
3012 narrowfiles
3003 Set up repository to support narrow file storage.
3013 Set up repository to support narrow file storage.
3004 sharedrepo
3014 sharedrepo
3005 Repository object from which storage should be shared.
3015 Repository object from which storage should be shared.
3006 sharedrelative
3016 sharedrelative
3007 Boolean indicating if the path to the shared repo should be
3017 Boolean indicating if the path to the shared repo should be
3008 stored as relative. By default, the pointer to the "parent" repo
3018 stored as relative. By default, the pointer to the "parent" repo
3009 is stored as an absolute path.
3019 is stored as an absolute path.
3010 shareditems
3020 shareditems
3011 Set of items to share to the new repository (in addition to storage).
3021 Set of items to share to the new repository (in addition to storage).
3012 shallowfilestore
3022 shallowfilestore
3013 Indicates that storage for files should be shallow (not all ancestor
3023 Indicates that storage for files should be shallow (not all ancestor
3014 revisions are known).
3024 revisions are known).
3015 """
3025 """
3016 createopts = defaultcreateopts(ui, createopts=createopts)
3026 createopts = defaultcreateopts(ui, createopts=createopts)
3017
3027
3018 unknownopts = filterknowncreateopts(ui, createopts)
3028 unknownopts = filterknowncreateopts(ui, createopts)
3019
3029
3020 if not isinstance(unknownopts, dict):
3030 if not isinstance(unknownopts, dict):
3021 raise error.ProgrammingError('filterknowncreateopts() did not return '
3031 raise error.ProgrammingError('filterknowncreateopts() did not return '
3022 'a dict')
3032 'a dict')
3023
3033
3024 if unknownopts:
3034 if unknownopts:
3025 raise error.Abort(_('unable to create repository because of unknown '
3035 raise error.Abort(_('unable to create repository because of unknown '
3026 'creation option: %s') %
3036 'creation option: %s') %
3027 ', '.join(sorted(unknownopts)),
3037 ', '.join(sorted(unknownopts)),
3028 hint=_('is a required extension not loaded?'))
3038 hint=_('is a required extension not loaded?'))
3029
3039
3030 requirements = newreporequirements(ui, createopts=createopts)
3040 requirements = newreporequirements(ui, createopts=createopts)
3031
3041
3032 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3042 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3033
3043
3034 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3044 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3035 if hgvfs.exists():
3045 if hgvfs.exists():
3036 raise error.RepoError(_('repository %s already exists') % path)
3046 raise error.RepoError(_('repository %s already exists') % path)
3037
3047
3038 if 'sharedrepo' in createopts:
3048 if 'sharedrepo' in createopts:
3039 sharedpath = createopts['sharedrepo'].sharedpath
3049 sharedpath = createopts['sharedrepo'].sharedpath
3040
3050
3041 if createopts.get('sharedrelative'):
3051 if createopts.get('sharedrelative'):
3042 try:
3052 try:
3043 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3053 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3044 except (IOError, ValueError) as e:
3054 except (IOError, ValueError) as e:
3045 # ValueError is raised on Windows if the drive letters differ
3055 # ValueError is raised on Windows if the drive letters differ
3046 # on each path.
3056 # on each path.
3047 raise error.Abort(_('cannot calculate relative path'),
3057 raise error.Abort(_('cannot calculate relative path'),
3048 hint=stringutil.forcebytestr(e))
3058 hint=stringutil.forcebytestr(e))
3049
3059
3050 if not wdirvfs.exists():
3060 if not wdirvfs.exists():
3051 wdirvfs.makedirs()
3061 wdirvfs.makedirs()
3052
3062
3053 hgvfs.makedir(notindexed=True)
3063 hgvfs.makedir(notindexed=True)
3054 if 'sharedrepo' not in createopts:
3064 if 'sharedrepo' not in createopts:
3055 hgvfs.mkdir(b'cache')
3065 hgvfs.mkdir(b'cache')
3056 hgvfs.mkdir(b'wcache')
3066 hgvfs.mkdir(b'wcache')
3057
3067
3058 if b'store' in requirements and 'sharedrepo' not in createopts:
3068 if b'store' in requirements and 'sharedrepo' not in createopts:
3059 hgvfs.mkdir(b'store')
3069 hgvfs.mkdir(b'store')
3060
3070
3061 # We create an invalid changelog outside the store so very old
3071 # We create an invalid changelog outside the store so very old
3062 # Mercurial versions (which didn't know about the requirements
3072 # Mercurial versions (which didn't know about the requirements
3063 # file) encounter an error on reading the changelog. This
3073 # file) encounter an error on reading the changelog. This
3064 # effectively locks out old clients and prevents them from
3074 # effectively locks out old clients and prevents them from
3065 # mucking with a repo in an unknown format.
3075 # mucking with a repo in an unknown format.
3066 #
3076 #
3067 # The revlog header has version 2, which won't be recognized by
3077 # The revlog header has version 2, which won't be recognized by
3068 # such old clients.
3078 # such old clients.
3069 hgvfs.append(b'00changelog.i',
3079 hgvfs.append(b'00changelog.i',
3070 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3080 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3071 b'layout')
3081 b'layout')
3072
3082
3073 scmutil.writerequires(hgvfs, requirements)
3083 scmutil.writerequires(hgvfs, requirements)
3074
3084
3075 # Write out file telling readers where to find the shared store.
3085 # Write out file telling readers where to find the shared store.
3076 if 'sharedrepo' in createopts:
3086 if 'sharedrepo' in createopts:
3077 hgvfs.write(b'sharedpath', sharedpath)
3087 hgvfs.write(b'sharedpath', sharedpath)
3078
3088
3079 if createopts.get('shareditems'):
3089 if createopts.get('shareditems'):
3080 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3090 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3081 hgvfs.write(b'shared', shared)
3091 hgvfs.write(b'shared', shared)
3082
3092
3083 def poisonrepository(repo):
3093 def poisonrepository(repo):
3084 """Poison a repository instance so it can no longer be used."""
3094 """Poison a repository instance so it can no longer be used."""
3085 # Perform any cleanup on the instance.
3095 # Perform any cleanup on the instance.
3086 repo.close()
3096 repo.close()
3087
3097
3088 # Our strategy is to replace the type of the object with one that
3098 # Our strategy is to replace the type of the object with one that
3089 # has all attribute lookups result in error.
3099 # has all attribute lookups result in error.
3090 #
3100 #
3091 # But we have to allow the close() method because some constructors
3101 # But we have to allow the close() method because some constructors
3092 # of repos call close() on repo references.
3102 # of repos call close() on repo references.
3093 class poisonedrepository(object):
3103 class poisonedrepository(object):
3094 def __getattribute__(self, item):
3104 def __getattribute__(self, item):
3095 if item == r'close':
3105 if item == r'close':
3096 return object.__getattribute__(self, item)
3106 return object.__getattribute__(self, item)
3097
3107
3098 raise error.ProgrammingError('repo instances should not be used '
3108 raise error.ProgrammingError('repo instances should not be used '
3099 'after unshare')
3109 'after unshare')
3100
3110
3101 def close(self):
3111 def close(self):
3102 pass
3112 pass
3103
3113
3104 # We may have a repoview, which intercepts __setattr__. So be sure
3114 # We may have a repoview, which intercepts __setattr__. So be sure
3105 # we operate at the lowest level possible.
3115 # we operate at the lowest level possible.
3106 object.__setattr__(repo, r'__class__', poisonedrepository)
3116 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now