##// END OF EJS Templates
localrepo: resolve store and cachevfs in makelocalrepository()...
Gregory Szorc -
r39733:98ca9078 default
parent child Browse files
Show More
@@ -1,2678 +1,2703
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store as storemod,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 def makelocalrepository(baseui, path, intents=None):
379 def makelocalrepository(baseui, path, intents=None):
380 """Create a local repository object.
380 """Create a local repository object.
381
381
382 Given arguments needed to construct a local repository, this function
382 Given arguments needed to construct a local repository, this function
383 derives a type suitable for representing that repository and returns an
383 derives a type suitable for representing that repository and returns an
384 instance of it.
384 instance of it.
385
385
386 The returned object conforms to the ``repository.completelocalrepository``
386 The returned object conforms to the ``repository.completelocalrepository``
387 interface.
387 interface.
388 """
388 """
389 ui = baseui.copy()
389 ui = baseui.copy()
390 # Prevent copying repo configuration.
390 # Prevent copying repo configuration.
391 ui.copy = baseui.copy
391 ui.copy = baseui.copy
392
392
393 # Working directory VFS rooted at repository root.
393 # Working directory VFS rooted at repository root.
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
395
395
396 # Main VFS for .hg/ directory.
396 # Main VFS for .hg/ directory.
397 hgpath = wdirvfs.join(b'.hg')
397 hgpath = wdirvfs.join(b'.hg')
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
399
399
400 # The .hg/ path should exist and should be a directory. All other
400 # The .hg/ path should exist and should be a directory. All other
401 # cases are errors.
401 # cases are errors.
402 if not hgvfs.isdir():
402 if not hgvfs.isdir():
403 try:
403 try:
404 hgvfs.stat()
404 hgvfs.stat()
405 except OSError as e:
405 except OSError as e:
406 if e.errno != errno.ENOENT:
406 if e.errno != errno.ENOENT:
407 raise
407 raise
408
408
409 raise error.RepoError(_(b'repository %s not found') % path)
409 raise error.RepoError(_(b'repository %s not found') % path)
410
410
411 # .hg/requires file contains a newline-delimited list of
411 # .hg/requires file contains a newline-delimited list of
412 # features/capabilities the opener (us) must have in order to use
412 # features/capabilities the opener (us) must have in order to use
413 # the repository. This file was introduced in Mercurial 0.9.2,
413 # the repository. This file was introduced in Mercurial 0.9.2,
414 # which means very old repositories may not have one. We assume
414 # which means very old repositories may not have one. We assume
415 # a missing file translates to no requirements.
415 # a missing file translates to no requirements.
416 try:
416 try:
417 requirements = set(hgvfs.read(b'requires').splitlines())
417 requirements = set(hgvfs.read(b'requires').splitlines())
418 except IOError as e:
418 except IOError as e:
419 if e.errno != errno.ENOENT:
419 if e.errno != errno.ENOENT:
420 raise
420 raise
421 requirements = set()
421 requirements = set()
422
422
423 # The .hg/hgrc file may load extensions or contain config options
423 # The .hg/hgrc file may load extensions or contain config options
424 # that influence repository construction. Attempt to load it and
424 # that influence repository construction. Attempt to load it and
425 # process any new extensions that it may have pulled in.
425 # process any new extensions that it may have pulled in.
426 try:
426 try:
427 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
427 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
428 except IOError:
428 except IOError:
429 pass
429 pass
430 else:
430 else:
431 extensions.loadall(ui)
431 extensions.loadall(ui)
432
432
433 supportedrequirements = gathersupportedrequirements(ui)
433 supportedrequirements = gathersupportedrequirements(ui)
434
434
435 # We first validate the requirements are known.
435 # We first validate the requirements are known.
436 ensurerequirementsrecognized(requirements, supportedrequirements)
436 ensurerequirementsrecognized(requirements, supportedrequirements)
437
437
438 # Then we validate that the known set is reasonable to use together.
438 # Then we validate that the known set is reasonable to use together.
439 ensurerequirementscompatible(ui, requirements)
439 ensurerequirementscompatible(ui, requirements)
440
440
441 # TODO there are unhandled edge cases related to opening repositories with
441 # TODO there are unhandled edge cases related to opening repositories with
442 # shared storage. If storage is shared, we should also test for requirements
442 # shared storage. If storage is shared, we should also test for requirements
443 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
443 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
444 # that repo, as that repo may load extensions needed to open it. This is a
444 # that repo, as that repo may load extensions needed to open it. This is a
445 # bit complicated because we don't want the other hgrc to overwrite settings
445 # bit complicated because we don't want the other hgrc to overwrite settings
446 # in this hgrc.
446 # in this hgrc.
447 #
447 #
448 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
448 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
449 # file when sharing repos. But if a requirement is added after the share is
449 # file when sharing repos. But if a requirement is added after the share is
450 # performed, thereby introducing a new requirement for the opener, we may
450 # performed, thereby introducing a new requirement for the opener, we may
451 # will not see that and could encounter a run-time error interacting with
451 # will not see that and could encounter a run-time error interacting with
452 # that shared store since it has an unknown-to-us requirement.
452 # that shared store since it has an unknown-to-us requirement.
453
453
454 # At this point, we know we should be capable of opening the repository.
454 # At this point, we know we should be capable of opening the repository.
455 # Now get on with doing that.
455 # Now get on with doing that.
456
456
457 # The "store" part of the repository holds versioned data. How it is
458 # accessed is determined by various requirements. The ``shared`` or
459 # ``relshared`` requirements indicate the store lives in the path contained
460 # in the ``.hg/sharedpath`` file. This is an absolute path for
461 # ``shared`` and relative to ``.hg/`` for ``relshared``.
462 if b'shared' in requirements or b'relshared' in requirements:
463 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
464 if b'relshared' in requirements:
465 sharedpath = hgvfs.join(sharedpath)
466
467 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
468
469 if not sharedvfs.exists():
470 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
471 b'directory %s') % sharedvfs.base)
472
473 storebasepath = sharedvfs.base
474 cachepath = sharedvfs.join(b'cache')
475 else:
476 storebasepath = hgvfs.base
477 cachepath = hgvfs.join(b'cache')
478
479 # The store has changed over time and the exact layout is dictated by
480 # requirements. The store interface abstracts differences across all
481 # of them.
482 store = storemod.store(requirements, storebasepath,
483 lambda base: vfsmod.vfs(base, cacheaudited=True))
484
485 hgvfs.createmode = store.createmode
486
487 # The cache vfs is used to manage cache files.
488 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
489 cachevfs.createmode = store.createmode
490
457 return localrepository(
491 return localrepository(
458 baseui=baseui,
492 baseui=baseui,
459 ui=ui,
493 ui=ui,
460 origroot=path,
494 origroot=path,
461 wdirvfs=wdirvfs,
495 wdirvfs=wdirvfs,
462 hgvfs=hgvfs,
496 hgvfs=hgvfs,
463 requirements=requirements,
497 requirements=requirements,
464 supportedrequirements=supportedrequirements,
498 supportedrequirements=supportedrequirements,
499 sharedpath=storebasepath,
500 store=store,
501 cachevfs=cachevfs,
465 intents=intents)
502 intents=intents)
466
503
467 def gathersupportedrequirements(ui):
504 def gathersupportedrequirements(ui):
468 """Determine the complete set of recognized requirements."""
505 """Determine the complete set of recognized requirements."""
469 # Start with all requirements supported by this file.
506 # Start with all requirements supported by this file.
470 supported = set(localrepository._basesupported)
507 supported = set(localrepository._basesupported)
471
508
472 # Execute ``featuresetupfuncs`` entries if they belong to an extension
509 # Execute ``featuresetupfuncs`` entries if they belong to an extension
473 # relevant to this ui instance.
510 # relevant to this ui instance.
474 modules = {m.__name__ for n, m in extensions.extensions(ui)}
511 modules = {m.__name__ for n, m in extensions.extensions(ui)}
475
512
476 for fn in featuresetupfuncs:
513 for fn in featuresetupfuncs:
477 if fn.__module__ in modules:
514 if fn.__module__ in modules:
478 fn(ui, supported)
515 fn(ui, supported)
479
516
480 # Add derived requirements from registered compression engines.
517 # Add derived requirements from registered compression engines.
481 for name in util.compengines:
518 for name in util.compengines:
482 engine = util.compengines[name]
519 engine = util.compengines[name]
483 if engine.revlogheader():
520 if engine.revlogheader():
484 supported.add(b'exp-compression-%s' % name)
521 supported.add(b'exp-compression-%s' % name)
485
522
486 return supported
523 return supported
487
524
488 def ensurerequirementsrecognized(requirements, supported):
525 def ensurerequirementsrecognized(requirements, supported):
489 """Validate that a set of local requirements is recognized.
526 """Validate that a set of local requirements is recognized.
490
527
491 Receives a set of requirements. Raises an ``error.RepoError`` if there
528 Receives a set of requirements. Raises an ``error.RepoError`` if there
492 exists any requirement in that set that currently loaded code doesn't
529 exists any requirement in that set that currently loaded code doesn't
493 recognize.
530 recognize.
494
531
495 Returns a set of supported requirements.
532 Returns a set of supported requirements.
496 """
533 """
497 missing = set()
534 missing = set()
498
535
499 for requirement in requirements:
536 for requirement in requirements:
500 if requirement in supported:
537 if requirement in supported:
501 continue
538 continue
502
539
503 if not requirement or not requirement[0:1].isalnum():
540 if not requirement or not requirement[0:1].isalnum():
504 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
541 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
505
542
506 missing.add(requirement)
543 missing.add(requirement)
507
544
508 if missing:
545 if missing:
509 raise error.RequirementError(
546 raise error.RequirementError(
510 _(b'repository requires features unknown to this Mercurial: %s') %
547 _(b'repository requires features unknown to this Mercurial: %s') %
511 b' '.join(sorted(missing)),
548 b' '.join(sorted(missing)),
512 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
549 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
513 b'for more information'))
550 b'for more information'))
514
551
515 def ensurerequirementscompatible(ui, requirements):
552 def ensurerequirementscompatible(ui, requirements):
516 """Validates that a set of recognized requirements is mutually compatible.
553 """Validates that a set of recognized requirements is mutually compatible.
517
554
518 Some requirements may not be compatible with others or require
555 Some requirements may not be compatible with others or require
519 config options that aren't enabled. This function is called during
556 config options that aren't enabled. This function is called during
520 repository opening to ensure that the set of requirements needed
557 repository opening to ensure that the set of requirements needed
521 to open a repository is sane and compatible with config options.
558 to open a repository is sane and compatible with config options.
522
559
523 Extensions can monkeypatch this function to perform additional
560 Extensions can monkeypatch this function to perform additional
524 checking.
561 checking.
525
562
526 ``error.RepoError`` should be raised on failure.
563 ``error.RepoError`` should be raised on failure.
527 """
564 """
528 if b'exp-sparse' in requirements and not sparse.enabled:
565 if b'exp-sparse' in requirements and not sparse.enabled:
529 raise error.RepoError(_(b'repository is using sparse feature but '
566 raise error.RepoError(_(b'repository is using sparse feature but '
530 b'sparse is not enabled; enable the '
567 b'sparse is not enabled; enable the '
531 b'"sparse" extensions to access'))
568 b'"sparse" extensions to access'))
532
569
533 @interfaceutil.implementer(repository.completelocalrepository)
570 @interfaceutil.implementer(repository.completelocalrepository)
534 class localrepository(object):
571 class localrepository(object):
535
572
536 # obsolete experimental requirements:
573 # obsolete experimental requirements:
537 # - manifestv2: An experimental new manifest format that allowed
574 # - manifestv2: An experimental new manifest format that allowed
538 # for stem compression of long paths. Experiment ended up not
575 # for stem compression of long paths. Experiment ended up not
539 # being successful (repository sizes went up due to worse delta
576 # being successful (repository sizes went up due to worse delta
540 # chains), and the code was deleted in 4.6.
577 # chains), and the code was deleted in 4.6.
541 supportedformats = {
578 supportedformats = {
542 'revlogv1',
579 'revlogv1',
543 'generaldelta',
580 'generaldelta',
544 'treemanifest',
581 'treemanifest',
545 REVLOGV2_REQUIREMENT,
582 REVLOGV2_REQUIREMENT,
546 SPARSEREVLOG_REQUIREMENT,
583 SPARSEREVLOG_REQUIREMENT,
547 }
584 }
548 _basesupported = supportedformats | {
585 _basesupported = supportedformats | {
549 'store',
586 'store',
550 'fncache',
587 'fncache',
551 'shared',
588 'shared',
552 'relshared',
589 'relshared',
553 'dotencode',
590 'dotencode',
554 'exp-sparse',
591 'exp-sparse',
555 'internal-phase'
592 'internal-phase'
556 }
593 }
557 openerreqs = {
594 openerreqs = {
558 'revlogv1',
595 'revlogv1',
559 'generaldelta',
596 'generaldelta',
560 'treemanifest',
597 'treemanifest',
561 }
598 }
562
599
563 # list of prefix for file which can be written without 'wlock'
600 # list of prefix for file which can be written without 'wlock'
564 # Extensions should extend this list when needed
601 # Extensions should extend this list when needed
565 _wlockfreeprefix = {
602 _wlockfreeprefix = {
566 # We migh consider requiring 'wlock' for the next
603 # We migh consider requiring 'wlock' for the next
567 # two, but pretty much all the existing code assume
604 # two, but pretty much all the existing code assume
568 # wlock is not needed so we keep them excluded for
605 # wlock is not needed so we keep them excluded for
569 # now.
606 # now.
570 'hgrc',
607 'hgrc',
571 'requires',
608 'requires',
572 # XXX cache is a complicatged business someone
609 # XXX cache is a complicatged business someone
573 # should investigate this in depth at some point
610 # should investigate this in depth at some point
574 'cache/',
611 'cache/',
575 # XXX shouldn't be dirstate covered by the wlock?
612 # XXX shouldn't be dirstate covered by the wlock?
576 'dirstate',
613 'dirstate',
577 # XXX bisect was still a bit too messy at the time
614 # XXX bisect was still a bit too messy at the time
578 # this changeset was introduced. Someone should fix
615 # this changeset was introduced. Someone should fix
579 # the remainig bit and drop this line
616 # the remainig bit and drop this line
580 'bisect.state',
617 'bisect.state',
581 }
618 }
582
619
583 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
620 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
584 supportedrequirements, intents=None):
621 supportedrequirements, sharedpath, store, cachevfs,
622 intents=None):
585 """Create a new local repository instance.
623 """Create a new local repository instance.
586
624
587 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
625 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
588 or ``localrepo.makelocalrepository()`` for obtaining a new repository
626 or ``localrepo.makelocalrepository()`` for obtaining a new repository
589 object.
627 object.
590
628
591 Arguments:
629 Arguments:
592
630
593 baseui
631 baseui
594 ``ui.ui`` instance that ``ui`` argument was based off of.
632 ``ui.ui`` instance that ``ui`` argument was based off of.
595
633
596 ui
634 ui
597 ``ui.ui`` instance for use by the repository.
635 ``ui.ui`` instance for use by the repository.
598
636
599 origroot
637 origroot
600 ``bytes`` path to working directory root of this repository.
638 ``bytes`` path to working directory root of this repository.
601
639
602 wdirvfs
640 wdirvfs
603 ``vfs.vfs`` rooted at the working directory.
641 ``vfs.vfs`` rooted at the working directory.
604
642
605 hgvfs
643 hgvfs
606 ``vfs.vfs`` rooted at .hg/
644 ``vfs.vfs`` rooted at .hg/
607
645
608 requirements
646 requirements
609 ``set`` of bytestrings representing repository opening requirements.
647 ``set`` of bytestrings representing repository opening requirements.
610
648
611 supportedrequirements
649 supportedrequirements
612 ``set`` of bytestrings representing repository requirements that we
650 ``set`` of bytestrings representing repository requirements that we
613 know how to open. May be a supetset of ``requirements``.
651 know how to open. May be a supetset of ``requirements``.
614
652
653 sharedpath
654 ``bytes`` Defining path to storage base directory. Points to a
655 ``.hg/`` directory somewhere.
656
657 store
658 ``store.basicstore`` (or derived) instance providing access to
659 versioned storage.
660
661 cachevfs
662 ``vfs.vfs`` used for cache files.
663
615 intents
664 intents
616 ``set`` of system strings indicating what this repo will be used
665 ``set`` of system strings indicating what this repo will be used
617 for.
666 for.
618 """
667 """
619 self.baseui = baseui
668 self.baseui = baseui
620 self.ui = ui
669 self.ui = ui
621 self.origroot = origroot
670 self.origroot = origroot
622 # vfs rooted at working directory.
671 # vfs rooted at working directory.
623 self.wvfs = wdirvfs
672 self.wvfs = wdirvfs
624 self.root = wdirvfs.base
673 self.root = wdirvfs.base
625 # vfs rooted at .hg/. Used to access most non-store paths.
674 # vfs rooted at .hg/. Used to access most non-store paths.
626 self.vfs = hgvfs
675 self.vfs = hgvfs
627 self.path = hgvfs.base
676 self.path = hgvfs.base
628 self.requirements = requirements
677 self.requirements = requirements
629 self.supported = supportedrequirements
678 self.supported = supportedrequirements
679 self.sharedpath = sharedpath
680 self.store = store
681 self.cachevfs = cachevfs
630
682
631 self.filtername = None
683 self.filtername = None
632 # svfs: usually rooted at .hg/store, used to access repository history
633 # If this is a shared repository, this vfs may point to another
634 # repository's .hg/store directory.
635 self.svfs = None
636
684
637 if (self.ui.configbool('devel', 'all-warnings') or
685 if (self.ui.configbool('devel', 'all-warnings') or
638 self.ui.configbool('devel', 'check-locks')):
686 self.ui.configbool('devel', 'check-locks')):
639 self.vfs.audit = self._getvfsward(self.vfs.audit)
687 self.vfs.audit = self._getvfsward(self.vfs.audit)
640 # A list of callback to shape the phase if no data were found.
688 # A list of callback to shape the phase if no data were found.
641 # Callback are in the form: func(repo, roots) --> processed root.
689 # Callback are in the form: func(repo, roots) --> processed root.
642 # This list it to be filled by extension during repo setup
690 # This list it to be filled by extension during repo setup
643 self._phasedefaults = []
691 self._phasedefaults = []
644
692
645 color.setup(self.ui)
693 color.setup(self.ui)
646
694
647 cachepath = self.vfs.join('cache')
648 self.sharedpath = self.path
649 try:
650 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
651 if 'relshared' in self.requirements:
652 sharedpath = self.vfs.join(sharedpath)
653 vfs = vfsmod.vfs(sharedpath, realpath=True)
654 cachepath = vfs.join('cache')
655 s = vfs.base
656 if not vfs.exists():
657 raise error.RepoError(
658 _('.hg/sharedpath points to nonexistent directory %s') % s)
659 self.sharedpath = s
660 except IOError as inst:
661 if inst.errno != errno.ENOENT:
662 raise
663
664 self.store = store.store(
665 self.requirements, self.sharedpath,
666 lambda base: vfsmod.vfs(base, cacheaudited=True))
667 self.spath = self.store.path
695 self.spath = self.store.path
668 self.svfs = self.store.vfs
696 self.svfs = self.store.vfs
669 self.sjoin = self.store.join
697 self.sjoin = self.store.join
670 self.vfs.createmode = self.store.createmode
671 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
672 self.cachevfs.createmode = self.store.createmode
673 if (self.ui.configbool('devel', 'all-warnings') or
698 if (self.ui.configbool('devel', 'all-warnings') or
674 self.ui.configbool('devel', 'check-locks')):
699 self.ui.configbool('devel', 'check-locks')):
675 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
700 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
676 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
701 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
677 else: # standard vfs
702 else: # standard vfs
678 self.svfs.audit = self._getsvfsward(self.svfs.audit)
703 self.svfs.audit = self._getsvfsward(self.svfs.audit)
679 self._applyopenerreqs()
704 self._applyopenerreqs()
680
705
681 self._dirstatevalidatewarned = False
706 self._dirstatevalidatewarned = False
682
707
683 self._branchcaches = {}
708 self._branchcaches = {}
684 self._revbranchcache = None
709 self._revbranchcache = None
685 self._filterpats = {}
710 self._filterpats = {}
686 self._datafilters = {}
711 self._datafilters = {}
687 self._transref = self._lockref = self._wlockref = None
712 self._transref = self._lockref = self._wlockref = None
688
713
689 # A cache for various files under .hg/ that tracks file changes,
714 # A cache for various files under .hg/ that tracks file changes,
690 # (used by the filecache decorator)
715 # (used by the filecache decorator)
691 #
716 #
692 # Maps a property name to its util.filecacheentry
717 # Maps a property name to its util.filecacheentry
693 self._filecache = {}
718 self._filecache = {}
694
719
695 # hold sets of revision to be filtered
720 # hold sets of revision to be filtered
696 # should be cleared when something might have changed the filter value:
721 # should be cleared when something might have changed the filter value:
697 # - new changesets,
722 # - new changesets,
698 # - phase change,
723 # - phase change,
699 # - new obsolescence marker,
724 # - new obsolescence marker,
700 # - working directory parent change,
725 # - working directory parent change,
701 # - bookmark changes
726 # - bookmark changes
702 self.filteredrevcache = {}
727 self.filteredrevcache = {}
703
728
704 # post-dirstate-status hooks
729 # post-dirstate-status hooks
705 self._postdsstatus = []
730 self._postdsstatus = []
706
731
707 # generic mapping between names and nodes
732 # generic mapping between names and nodes
708 self.names = namespaces.namespaces()
733 self.names = namespaces.namespaces()
709
734
710 # Key to signature value.
735 # Key to signature value.
711 self._sparsesignaturecache = {}
736 self._sparsesignaturecache = {}
712 # Signature to cached matcher instance.
737 # Signature to cached matcher instance.
713 self._sparsematchercache = {}
738 self._sparsematchercache = {}
714
739
715 def _getvfsward(self, origfunc):
740 def _getvfsward(self, origfunc):
716 """build a ward for self.vfs"""
741 """build a ward for self.vfs"""
717 rref = weakref.ref(self)
742 rref = weakref.ref(self)
718 def checkvfs(path, mode=None):
743 def checkvfs(path, mode=None):
719 ret = origfunc(path, mode=mode)
744 ret = origfunc(path, mode=mode)
720 repo = rref()
745 repo = rref()
721 if (repo is None
746 if (repo is None
722 or not util.safehasattr(repo, '_wlockref')
747 or not util.safehasattr(repo, '_wlockref')
723 or not util.safehasattr(repo, '_lockref')):
748 or not util.safehasattr(repo, '_lockref')):
724 return
749 return
725 if mode in (None, 'r', 'rb'):
750 if mode in (None, 'r', 'rb'):
726 return
751 return
727 if path.startswith(repo.path):
752 if path.startswith(repo.path):
728 # truncate name relative to the repository (.hg)
753 # truncate name relative to the repository (.hg)
729 path = path[len(repo.path) + 1:]
754 path = path[len(repo.path) + 1:]
730 if path.startswith('cache/'):
755 if path.startswith('cache/'):
731 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
756 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
732 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
757 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
733 if path.startswith('journal.'):
758 if path.startswith('journal.'):
734 # journal is covered by 'lock'
759 # journal is covered by 'lock'
735 if repo._currentlock(repo._lockref) is None:
760 if repo._currentlock(repo._lockref) is None:
736 repo.ui.develwarn('write with no lock: "%s"' % path,
761 repo.ui.develwarn('write with no lock: "%s"' % path,
737 stacklevel=2, config='check-locks')
762 stacklevel=2, config='check-locks')
738 elif repo._currentlock(repo._wlockref) is None:
763 elif repo._currentlock(repo._wlockref) is None:
739 # rest of vfs files are covered by 'wlock'
764 # rest of vfs files are covered by 'wlock'
740 #
765 #
741 # exclude special files
766 # exclude special files
742 for prefix in self._wlockfreeprefix:
767 for prefix in self._wlockfreeprefix:
743 if path.startswith(prefix):
768 if path.startswith(prefix):
744 return
769 return
745 repo.ui.develwarn('write with no wlock: "%s"' % path,
770 repo.ui.develwarn('write with no wlock: "%s"' % path,
746 stacklevel=2, config='check-locks')
771 stacklevel=2, config='check-locks')
747 return ret
772 return ret
748 return checkvfs
773 return checkvfs
749
774
750 def _getsvfsward(self, origfunc):
775 def _getsvfsward(self, origfunc):
751 """build a ward for self.svfs"""
776 """build a ward for self.svfs"""
752 rref = weakref.ref(self)
777 rref = weakref.ref(self)
753 def checksvfs(path, mode=None):
778 def checksvfs(path, mode=None):
754 ret = origfunc(path, mode=mode)
779 ret = origfunc(path, mode=mode)
755 repo = rref()
780 repo = rref()
756 if repo is None or not util.safehasattr(repo, '_lockref'):
781 if repo is None or not util.safehasattr(repo, '_lockref'):
757 return
782 return
758 if mode in (None, 'r', 'rb'):
783 if mode in (None, 'r', 'rb'):
759 return
784 return
760 if path.startswith(repo.sharedpath):
785 if path.startswith(repo.sharedpath):
761 # truncate name relative to the repository (.hg)
786 # truncate name relative to the repository (.hg)
762 path = path[len(repo.sharedpath) + 1:]
787 path = path[len(repo.sharedpath) + 1:]
763 if repo._currentlock(repo._lockref) is None:
788 if repo._currentlock(repo._lockref) is None:
764 repo.ui.develwarn('write with no lock: "%s"' % path,
789 repo.ui.develwarn('write with no lock: "%s"' % path,
765 stacklevel=3)
790 stacklevel=3)
766 return ret
791 return ret
767 return checksvfs
792 return checksvfs
768
793
769 def close(self):
794 def close(self):
770 self._writecaches()
795 self._writecaches()
771
796
772 def _writecaches(self):
797 def _writecaches(self):
773 if self._revbranchcache:
798 if self._revbranchcache:
774 self._revbranchcache.write()
799 self._revbranchcache.write()
775
800
776 def _restrictcapabilities(self, caps):
801 def _restrictcapabilities(self, caps):
777 if self.ui.configbool('experimental', 'bundle2-advertise'):
802 if self.ui.configbool('experimental', 'bundle2-advertise'):
778 caps = set(caps)
803 caps = set(caps)
779 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
804 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
780 role='client'))
805 role='client'))
781 caps.add('bundle2=' + urlreq.quote(capsblob))
806 caps.add('bundle2=' + urlreq.quote(capsblob))
782 return caps
807 return caps
783
808
784 def _applyopenerreqs(self):
809 def _applyopenerreqs(self):
785 self.svfs.options = dict((r, 1) for r in self.requirements
810 self.svfs.options = dict((r, 1) for r in self.requirements
786 if r in self.openerreqs)
811 if r in self.openerreqs)
787 # experimental config: format.chunkcachesize
812 # experimental config: format.chunkcachesize
788 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
813 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
789 if chunkcachesize is not None:
814 if chunkcachesize is not None:
790 self.svfs.options['chunkcachesize'] = chunkcachesize
815 self.svfs.options['chunkcachesize'] = chunkcachesize
791 # experimental config: format.manifestcachesize
816 # experimental config: format.manifestcachesize
792 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
817 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
793 if manifestcachesize is not None:
818 if manifestcachesize is not None:
794 self.svfs.options['manifestcachesize'] = manifestcachesize
819 self.svfs.options['manifestcachesize'] = manifestcachesize
795 deltabothparents = self.ui.configbool('storage',
820 deltabothparents = self.ui.configbool('storage',
796 'revlog.optimize-delta-parent-choice')
821 'revlog.optimize-delta-parent-choice')
797 self.svfs.options['deltabothparents'] = deltabothparents
822 self.svfs.options['deltabothparents'] = deltabothparents
798 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
823 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
799 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
824 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
800 if 0 <= chainspan:
825 if 0 <= chainspan:
801 self.svfs.options['maxdeltachainspan'] = chainspan
826 self.svfs.options['maxdeltachainspan'] = chainspan
802 mmapindexthreshold = self.ui.configbytes('experimental',
827 mmapindexthreshold = self.ui.configbytes('experimental',
803 'mmapindexthreshold')
828 'mmapindexthreshold')
804 if mmapindexthreshold is not None:
829 if mmapindexthreshold is not None:
805 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
830 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
806 withsparseread = self.ui.configbool('experimental', 'sparse-read')
831 withsparseread = self.ui.configbool('experimental', 'sparse-read')
807 srdensitythres = float(self.ui.config('experimental',
832 srdensitythres = float(self.ui.config('experimental',
808 'sparse-read.density-threshold'))
833 'sparse-read.density-threshold'))
809 srmingapsize = self.ui.configbytes('experimental',
834 srmingapsize = self.ui.configbytes('experimental',
810 'sparse-read.min-gap-size')
835 'sparse-read.min-gap-size')
811 self.svfs.options['with-sparse-read'] = withsparseread
836 self.svfs.options['with-sparse-read'] = withsparseread
812 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
837 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
813 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
838 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
814 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
839 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
815 self.svfs.options['sparse-revlog'] = sparserevlog
840 self.svfs.options['sparse-revlog'] = sparserevlog
816 if sparserevlog:
841 if sparserevlog:
817 self.svfs.options['generaldelta'] = True
842 self.svfs.options['generaldelta'] = True
818 maxchainlen = None
843 maxchainlen = None
819 if sparserevlog:
844 if sparserevlog:
820 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
845 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
821 # experimental config: format.maxchainlen
846 # experimental config: format.maxchainlen
822 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
847 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
823 if maxchainlen is not None:
848 if maxchainlen is not None:
824 self.svfs.options['maxchainlen'] = maxchainlen
849 self.svfs.options['maxchainlen'] = maxchainlen
825
850
826 for r in self.requirements:
851 for r in self.requirements:
827 if r.startswith('exp-compression-'):
852 if r.startswith('exp-compression-'):
828 self.svfs.options['compengine'] = r[len('exp-compression-'):]
853 self.svfs.options['compengine'] = r[len('exp-compression-'):]
829
854
830 # TODO move "revlogv2" to openerreqs once finalized.
855 # TODO move "revlogv2" to openerreqs once finalized.
831 if REVLOGV2_REQUIREMENT in self.requirements:
856 if REVLOGV2_REQUIREMENT in self.requirements:
832 self.svfs.options['revlogv2'] = True
857 self.svfs.options['revlogv2'] = True
833
858
834 def _writerequirements(self):
859 def _writerequirements(self):
835 scmutil.writerequires(self.vfs, self.requirements)
860 scmutil.writerequires(self.vfs, self.requirements)
836
861
837 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
862 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
838 # self -> auditor -> self._checknested -> self
863 # self -> auditor -> self._checknested -> self
839
864
840 @property
865 @property
841 def auditor(self):
866 def auditor(self):
842 # This is only used by context.workingctx.match in order to
867 # This is only used by context.workingctx.match in order to
843 # detect files in subrepos.
868 # detect files in subrepos.
844 return pathutil.pathauditor(self.root, callback=self._checknested)
869 return pathutil.pathauditor(self.root, callback=self._checknested)
845
870
846 @property
871 @property
847 def nofsauditor(self):
872 def nofsauditor(self):
848 # This is only used by context.basectx.match in order to detect
873 # This is only used by context.basectx.match in order to detect
849 # files in subrepos.
874 # files in subrepos.
850 return pathutil.pathauditor(self.root, callback=self._checknested,
875 return pathutil.pathauditor(self.root, callback=self._checknested,
851 realfs=False, cached=True)
876 realfs=False, cached=True)
852
877
853 def _checknested(self, path):
878 def _checknested(self, path):
854 """Determine if path is a legal nested repository."""
879 """Determine if path is a legal nested repository."""
855 if not path.startswith(self.root):
880 if not path.startswith(self.root):
856 return False
881 return False
857 subpath = path[len(self.root) + 1:]
882 subpath = path[len(self.root) + 1:]
858 normsubpath = util.pconvert(subpath)
883 normsubpath = util.pconvert(subpath)
859
884
860 # XXX: Checking against the current working copy is wrong in
885 # XXX: Checking against the current working copy is wrong in
861 # the sense that it can reject things like
886 # the sense that it can reject things like
862 #
887 #
863 # $ hg cat -r 10 sub/x.txt
888 # $ hg cat -r 10 sub/x.txt
864 #
889 #
865 # if sub/ is no longer a subrepository in the working copy
890 # if sub/ is no longer a subrepository in the working copy
866 # parent revision.
891 # parent revision.
867 #
892 #
868 # However, it can of course also allow things that would have
893 # However, it can of course also allow things that would have
869 # been rejected before, such as the above cat command if sub/
894 # been rejected before, such as the above cat command if sub/
870 # is a subrepository now, but was a normal directory before.
895 # is a subrepository now, but was a normal directory before.
871 # The old path auditor would have rejected by mistake since it
896 # The old path auditor would have rejected by mistake since it
872 # panics when it sees sub/.hg/.
897 # panics when it sees sub/.hg/.
873 #
898 #
874 # All in all, checking against the working copy seems sensible
899 # All in all, checking against the working copy seems sensible
875 # since we want to prevent access to nested repositories on
900 # since we want to prevent access to nested repositories on
876 # the filesystem *now*.
901 # the filesystem *now*.
877 ctx = self[None]
902 ctx = self[None]
878 parts = util.splitpath(subpath)
903 parts = util.splitpath(subpath)
879 while parts:
904 while parts:
880 prefix = '/'.join(parts)
905 prefix = '/'.join(parts)
881 if prefix in ctx.substate:
906 if prefix in ctx.substate:
882 if prefix == normsubpath:
907 if prefix == normsubpath:
883 return True
908 return True
884 else:
909 else:
885 sub = ctx.sub(prefix)
910 sub = ctx.sub(prefix)
886 return sub.checknested(subpath[len(prefix) + 1:])
911 return sub.checknested(subpath[len(prefix) + 1:])
887 else:
912 else:
888 parts.pop()
913 parts.pop()
889 return False
914 return False
890
915
891 def peer(self):
916 def peer(self):
892 return localpeer(self) # not cached to avoid reference cycle
917 return localpeer(self) # not cached to avoid reference cycle
893
918
894 def unfiltered(self):
919 def unfiltered(self):
895 """Return unfiltered version of the repository
920 """Return unfiltered version of the repository
896
921
897 Intended to be overwritten by filtered repo."""
922 Intended to be overwritten by filtered repo."""
898 return self
923 return self
899
924
900 def filtered(self, name, visibilityexceptions=None):
925 def filtered(self, name, visibilityexceptions=None):
901 """Return a filtered version of a repository"""
926 """Return a filtered version of a repository"""
902 cls = repoview.newtype(self.unfiltered().__class__)
927 cls = repoview.newtype(self.unfiltered().__class__)
903 return cls(self, name, visibilityexceptions)
928 return cls(self, name, visibilityexceptions)
904
929
905 @repofilecache('bookmarks', 'bookmarks.current')
930 @repofilecache('bookmarks', 'bookmarks.current')
906 def _bookmarks(self):
931 def _bookmarks(self):
907 return bookmarks.bmstore(self)
932 return bookmarks.bmstore(self)
908
933
909 @property
934 @property
910 def _activebookmark(self):
935 def _activebookmark(self):
911 return self._bookmarks.active
936 return self._bookmarks.active
912
937
913 # _phasesets depend on changelog. what we need is to call
938 # _phasesets depend on changelog. what we need is to call
914 # _phasecache.invalidate() if '00changelog.i' was changed, but it
939 # _phasecache.invalidate() if '00changelog.i' was changed, but it
915 # can't be easily expressed in filecache mechanism.
940 # can't be easily expressed in filecache mechanism.
916 @storecache('phaseroots', '00changelog.i')
941 @storecache('phaseroots', '00changelog.i')
917 def _phasecache(self):
942 def _phasecache(self):
918 return phases.phasecache(self, self._phasedefaults)
943 return phases.phasecache(self, self._phasedefaults)
919
944
920 @storecache('obsstore')
945 @storecache('obsstore')
921 def obsstore(self):
946 def obsstore(self):
922 return obsolete.makestore(self.ui, self)
947 return obsolete.makestore(self.ui, self)
923
948
924 @storecache('00changelog.i')
949 @storecache('00changelog.i')
925 def changelog(self):
950 def changelog(self):
926 return changelog.changelog(self.svfs,
951 return changelog.changelog(self.svfs,
927 trypending=txnutil.mayhavepending(self.root))
952 trypending=txnutil.mayhavepending(self.root))
928
953
929 def _constructmanifest(self):
954 def _constructmanifest(self):
930 # This is a temporary function while we migrate from manifest to
955 # This is a temporary function while we migrate from manifest to
931 # manifestlog. It allows bundlerepo and unionrepo to intercept the
956 # manifestlog. It allows bundlerepo and unionrepo to intercept the
932 # manifest creation.
957 # manifest creation.
933 return manifest.manifestrevlog(self.svfs)
958 return manifest.manifestrevlog(self.svfs)
934
959
935 @storecache('00manifest.i')
960 @storecache('00manifest.i')
936 def manifestlog(self):
961 def manifestlog(self):
937 return manifest.manifestlog(self.svfs, self)
962 return manifest.manifestlog(self.svfs, self)
938
963
939 @repofilecache('dirstate')
964 @repofilecache('dirstate')
940 def dirstate(self):
965 def dirstate(self):
941 return self._makedirstate()
966 return self._makedirstate()
942
967
943 def _makedirstate(self):
968 def _makedirstate(self):
944 """Extension point for wrapping the dirstate per-repo."""
969 """Extension point for wrapping the dirstate per-repo."""
945 sparsematchfn = lambda: sparse.matcher(self)
970 sparsematchfn = lambda: sparse.matcher(self)
946
971
947 return dirstate.dirstate(self.vfs, self.ui, self.root,
972 return dirstate.dirstate(self.vfs, self.ui, self.root,
948 self._dirstatevalidate, sparsematchfn)
973 self._dirstatevalidate, sparsematchfn)
949
974
950 def _dirstatevalidate(self, node):
975 def _dirstatevalidate(self, node):
951 try:
976 try:
952 self.changelog.rev(node)
977 self.changelog.rev(node)
953 return node
978 return node
954 except error.LookupError:
979 except error.LookupError:
955 if not self._dirstatevalidatewarned:
980 if not self._dirstatevalidatewarned:
956 self._dirstatevalidatewarned = True
981 self._dirstatevalidatewarned = True
957 self.ui.warn(_("warning: ignoring unknown"
982 self.ui.warn(_("warning: ignoring unknown"
958 " working parent %s!\n") % short(node))
983 " working parent %s!\n") % short(node))
959 return nullid
984 return nullid
960
985
961 @storecache(narrowspec.FILENAME)
986 @storecache(narrowspec.FILENAME)
962 def narrowpats(self):
987 def narrowpats(self):
963 """matcher patterns for this repository's narrowspec
988 """matcher patterns for this repository's narrowspec
964
989
965 A tuple of (includes, excludes).
990 A tuple of (includes, excludes).
966 """
991 """
967 source = self
992 source = self
968 if self.shared():
993 if self.shared():
969 from . import hg
994 from . import hg
970 source = hg.sharedreposource(self)
995 source = hg.sharedreposource(self)
971 return narrowspec.load(source)
996 return narrowspec.load(source)
972
997
973 @storecache(narrowspec.FILENAME)
998 @storecache(narrowspec.FILENAME)
974 def _narrowmatch(self):
999 def _narrowmatch(self):
975 if repository.NARROW_REQUIREMENT not in self.requirements:
1000 if repository.NARROW_REQUIREMENT not in self.requirements:
976 return matchmod.always(self.root, '')
1001 return matchmod.always(self.root, '')
977 include, exclude = self.narrowpats
1002 include, exclude = self.narrowpats
978 return narrowspec.match(self.root, include=include, exclude=exclude)
1003 return narrowspec.match(self.root, include=include, exclude=exclude)
979
1004
980 # TODO(martinvonz): make this property-like instead?
1005 # TODO(martinvonz): make this property-like instead?
981 def narrowmatch(self):
1006 def narrowmatch(self):
982 return self._narrowmatch
1007 return self._narrowmatch
983
1008
984 def setnarrowpats(self, newincludes, newexcludes):
1009 def setnarrowpats(self, newincludes, newexcludes):
985 narrowspec.save(self, newincludes, newexcludes)
1010 narrowspec.save(self, newincludes, newexcludes)
986 self.invalidate(clearfilecache=True)
1011 self.invalidate(clearfilecache=True)
987
1012
988 def __getitem__(self, changeid):
1013 def __getitem__(self, changeid):
989 if changeid is None:
1014 if changeid is None:
990 return context.workingctx(self)
1015 return context.workingctx(self)
991 if isinstance(changeid, context.basectx):
1016 if isinstance(changeid, context.basectx):
992 return changeid
1017 return changeid
993 if isinstance(changeid, slice):
1018 if isinstance(changeid, slice):
994 # wdirrev isn't contiguous so the slice shouldn't include it
1019 # wdirrev isn't contiguous so the slice shouldn't include it
995 return [context.changectx(self, i)
1020 return [context.changectx(self, i)
996 for i in pycompat.xrange(*changeid.indices(len(self)))
1021 for i in pycompat.xrange(*changeid.indices(len(self)))
997 if i not in self.changelog.filteredrevs]
1022 if i not in self.changelog.filteredrevs]
998 try:
1023 try:
999 return context.changectx(self, changeid)
1024 return context.changectx(self, changeid)
1000 except error.WdirUnsupported:
1025 except error.WdirUnsupported:
1001 return context.workingctx(self)
1026 return context.workingctx(self)
1002
1027
1003 def __contains__(self, changeid):
1028 def __contains__(self, changeid):
1004 """True if the given changeid exists
1029 """True if the given changeid exists
1005
1030
1006 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1031 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1007 specified.
1032 specified.
1008 """
1033 """
1009 try:
1034 try:
1010 self[changeid]
1035 self[changeid]
1011 return True
1036 return True
1012 except error.RepoLookupError:
1037 except error.RepoLookupError:
1013 return False
1038 return False
1014
1039
1015 def __nonzero__(self):
1040 def __nonzero__(self):
1016 return True
1041 return True
1017
1042
1018 __bool__ = __nonzero__
1043 __bool__ = __nonzero__
1019
1044
1020 def __len__(self):
1045 def __len__(self):
1021 # no need to pay the cost of repoview.changelog
1046 # no need to pay the cost of repoview.changelog
1022 unfi = self.unfiltered()
1047 unfi = self.unfiltered()
1023 return len(unfi.changelog)
1048 return len(unfi.changelog)
1024
1049
1025 def __iter__(self):
1050 def __iter__(self):
1026 return iter(self.changelog)
1051 return iter(self.changelog)
1027
1052
1028 def revs(self, expr, *args):
1053 def revs(self, expr, *args):
1029 '''Find revisions matching a revset.
1054 '''Find revisions matching a revset.
1030
1055
1031 The revset is specified as a string ``expr`` that may contain
1056 The revset is specified as a string ``expr`` that may contain
1032 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1057 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1033
1058
1034 Revset aliases from the configuration are not expanded. To expand
1059 Revset aliases from the configuration are not expanded. To expand
1035 user aliases, consider calling ``scmutil.revrange()`` or
1060 user aliases, consider calling ``scmutil.revrange()`` or
1036 ``repo.anyrevs([expr], user=True)``.
1061 ``repo.anyrevs([expr], user=True)``.
1037
1062
1038 Returns a revset.abstractsmartset, which is a list-like interface
1063 Returns a revset.abstractsmartset, which is a list-like interface
1039 that contains integer revisions.
1064 that contains integer revisions.
1040 '''
1065 '''
1041 expr = revsetlang.formatspec(expr, *args)
1066 expr = revsetlang.formatspec(expr, *args)
1042 m = revset.match(None, expr)
1067 m = revset.match(None, expr)
1043 return m(self)
1068 return m(self)
1044
1069
1045 def set(self, expr, *args):
1070 def set(self, expr, *args):
1046 '''Find revisions matching a revset and emit changectx instances.
1071 '''Find revisions matching a revset and emit changectx instances.
1047
1072
1048 This is a convenience wrapper around ``revs()`` that iterates the
1073 This is a convenience wrapper around ``revs()`` that iterates the
1049 result and is a generator of changectx instances.
1074 result and is a generator of changectx instances.
1050
1075
1051 Revset aliases from the configuration are not expanded. To expand
1076 Revset aliases from the configuration are not expanded. To expand
1052 user aliases, consider calling ``scmutil.revrange()``.
1077 user aliases, consider calling ``scmutil.revrange()``.
1053 '''
1078 '''
1054 for r in self.revs(expr, *args):
1079 for r in self.revs(expr, *args):
1055 yield self[r]
1080 yield self[r]
1056
1081
1057 def anyrevs(self, specs, user=False, localalias=None):
1082 def anyrevs(self, specs, user=False, localalias=None):
1058 '''Find revisions matching one of the given revsets.
1083 '''Find revisions matching one of the given revsets.
1059
1084
1060 Revset aliases from the configuration are not expanded by default. To
1085 Revset aliases from the configuration are not expanded by default. To
1061 expand user aliases, specify ``user=True``. To provide some local
1086 expand user aliases, specify ``user=True``. To provide some local
1062 definitions overriding user aliases, set ``localalias`` to
1087 definitions overriding user aliases, set ``localalias`` to
1063 ``{name: definitionstring}``.
1088 ``{name: definitionstring}``.
1064 '''
1089 '''
1065 if user:
1090 if user:
1066 m = revset.matchany(self.ui, specs,
1091 m = revset.matchany(self.ui, specs,
1067 lookup=revset.lookupfn(self),
1092 lookup=revset.lookupfn(self),
1068 localalias=localalias)
1093 localalias=localalias)
1069 else:
1094 else:
1070 m = revset.matchany(None, specs, localalias=localalias)
1095 m = revset.matchany(None, specs, localalias=localalias)
1071 return m(self)
1096 return m(self)
1072
1097
1073 def url(self):
1098 def url(self):
1074 return 'file:' + self.root
1099 return 'file:' + self.root
1075
1100
1076 def hook(self, name, throw=False, **args):
1101 def hook(self, name, throw=False, **args):
1077 """Call a hook, passing this repo instance.
1102 """Call a hook, passing this repo instance.
1078
1103
1079 This a convenience method to aid invoking hooks. Extensions likely
1104 This a convenience method to aid invoking hooks. Extensions likely
1080 won't call this unless they have registered a custom hook or are
1105 won't call this unless they have registered a custom hook or are
1081 replacing code that is expected to call a hook.
1106 replacing code that is expected to call a hook.
1082 """
1107 """
1083 return hook.hook(self.ui, self, name, throw, **args)
1108 return hook.hook(self.ui, self, name, throw, **args)
1084
1109
1085 @filteredpropertycache
1110 @filteredpropertycache
1086 def _tagscache(self):
1111 def _tagscache(self):
1087 '''Returns a tagscache object that contains various tags related
1112 '''Returns a tagscache object that contains various tags related
1088 caches.'''
1113 caches.'''
1089
1114
1090 # This simplifies its cache management by having one decorated
1115 # This simplifies its cache management by having one decorated
1091 # function (this one) and the rest simply fetch things from it.
1116 # function (this one) and the rest simply fetch things from it.
1092 class tagscache(object):
1117 class tagscache(object):
1093 def __init__(self):
1118 def __init__(self):
1094 # These two define the set of tags for this repository. tags
1119 # These two define the set of tags for this repository. tags
1095 # maps tag name to node; tagtypes maps tag name to 'global' or
1120 # maps tag name to node; tagtypes maps tag name to 'global' or
1096 # 'local'. (Global tags are defined by .hgtags across all
1121 # 'local'. (Global tags are defined by .hgtags across all
1097 # heads, and local tags are defined in .hg/localtags.)
1122 # heads, and local tags are defined in .hg/localtags.)
1098 # They constitute the in-memory cache of tags.
1123 # They constitute the in-memory cache of tags.
1099 self.tags = self.tagtypes = None
1124 self.tags = self.tagtypes = None
1100
1125
1101 self.nodetagscache = self.tagslist = None
1126 self.nodetagscache = self.tagslist = None
1102
1127
1103 cache = tagscache()
1128 cache = tagscache()
1104 cache.tags, cache.tagtypes = self._findtags()
1129 cache.tags, cache.tagtypes = self._findtags()
1105
1130
1106 return cache
1131 return cache
1107
1132
1108 def tags(self):
1133 def tags(self):
1109 '''return a mapping of tag to node'''
1134 '''return a mapping of tag to node'''
1110 t = {}
1135 t = {}
1111 if self.changelog.filteredrevs:
1136 if self.changelog.filteredrevs:
1112 tags, tt = self._findtags()
1137 tags, tt = self._findtags()
1113 else:
1138 else:
1114 tags = self._tagscache.tags
1139 tags = self._tagscache.tags
1115 for k, v in tags.iteritems():
1140 for k, v in tags.iteritems():
1116 try:
1141 try:
1117 # ignore tags to unknown nodes
1142 # ignore tags to unknown nodes
1118 self.changelog.rev(v)
1143 self.changelog.rev(v)
1119 t[k] = v
1144 t[k] = v
1120 except (error.LookupError, ValueError):
1145 except (error.LookupError, ValueError):
1121 pass
1146 pass
1122 return t
1147 return t
1123
1148
1124 def _findtags(self):
1149 def _findtags(self):
1125 '''Do the hard work of finding tags. Return a pair of dicts
1150 '''Do the hard work of finding tags. Return a pair of dicts
1126 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1151 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1127 maps tag name to a string like \'global\' or \'local\'.
1152 maps tag name to a string like \'global\' or \'local\'.
1128 Subclasses or extensions are free to add their own tags, but
1153 Subclasses or extensions are free to add their own tags, but
1129 should be aware that the returned dicts will be retained for the
1154 should be aware that the returned dicts will be retained for the
1130 duration of the localrepo object.'''
1155 duration of the localrepo object.'''
1131
1156
1132 # XXX what tagtype should subclasses/extensions use? Currently
1157 # XXX what tagtype should subclasses/extensions use? Currently
1133 # mq and bookmarks add tags, but do not set the tagtype at all.
1158 # mq and bookmarks add tags, but do not set the tagtype at all.
1134 # Should each extension invent its own tag type? Should there
1159 # Should each extension invent its own tag type? Should there
1135 # be one tagtype for all such "virtual" tags? Or is the status
1160 # be one tagtype for all such "virtual" tags? Or is the status
1136 # quo fine?
1161 # quo fine?
1137
1162
1138
1163
1139 # map tag name to (node, hist)
1164 # map tag name to (node, hist)
1140 alltags = tagsmod.findglobaltags(self.ui, self)
1165 alltags = tagsmod.findglobaltags(self.ui, self)
1141 # map tag name to tag type
1166 # map tag name to tag type
1142 tagtypes = dict((tag, 'global') for tag in alltags)
1167 tagtypes = dict((tag, 'global') for tag in alltags)
1143
1168
1144 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1169 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1145
1170
1146 # Build the return dicts. Have to re-encode tag names because
1171 # Build the return dicts. Have to re-encode tag names because
1147 # the tags module always uses UTF-8 (in order not to lose info
1172 # the tags module always uses UTF-8 (in order not to lose info
1148 # writing to the cache), but the rest of Mercurial wants them in
1173 # writing to the cache), but the rest of Mercurial wants them in
1149 # local encoding.
1174 # local encoding.
1150 tags = {}
1175 tags = {}
1151 for (name, (node, hist)) in alltags.iteritems():
1176 for (name, (node, hist)) in alltags.iteritems():
1152 if node != nullid:
1177 if node != nullid:
1153 tags[encoding.tolocal(name)] = node
1178 tags[encoding.tolocal(name)] = node
1154 tags['tip'] = self.changelog.tip()
1179 tags['tip'] = self.changelog.tip()
1155 tagtypes = dict([(encoding.tolocal(name), value)
1180 tagtypes = dict([(encoding.tolocal(name), value)
1156 for (name, value) in tagtypes.iteritems()])
1181 for (name, value) in tagtypes.iteritems()])
1157 return (tags, tagtypes)
1182 return (tags, tagtypes)
1158
1183
1159 def tagtype(self, tagname):
1184 def tagtype(self, tagname):
1160 '''
1185 '''
1161 return the type of the given tag. result can be:
1186 return the type of the given tag. result can be:
1162
1187
1163 'local' : a local tag
1188 'local' : a local tag
1164 'global' : a global tag
1189 'global' : a global tag
1165 None : tag does not exist
1190 None : tag does not exist
1166 '''
1191 '''
1167
1192
1168 return self._tagscache.tagtypes.get(tagname)
1193 return self._tagscache.tagtypes.get(tagname)
1169
1194
1170 def tagslist(self):
1195 def tagslist(self):
1171 '''return a list of tags ordered by revision'''
1196 '''return a list of tags ordered by revision'''
1172 if not self._tagscache.tagslist:
1197 if not self._tagscache.tagslist:
1173 l = []
1198 l = []
1174 for t, n in self.tags().iteritems():
1199 for t, n in self.tags().iteritems():
1175 l.append((self.changelog.rev(n), t, n))
1200 l.append((self.changelog.rev(n), t, n))
1176 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1201 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1177
1202
1178 return self._tagscache.tagslist
1203 return self._tagscache.tagslist
1179
1204
1180 def nodetags(self, node):
1205 def nodetags(self, node):
1181 '''return the tags associated with a node'''
1206 '''return the tags associated with a node'''
1182 if not self._tagscache.nodetagscache:
1207 if not self._tagscache.nodetagscache:
1183 nodetagscache = {}
1208 nodetagscache = {}
1184 for t, n in self._tagscache.tags.iteritems():
1209 for t, n in self._tagscache.tags.iteritems():
1185 nodetagscache.setdefault(n, []).append(t)
1210 nodetagscache.setdefault(n, []).append(t)
1186 for tags in nodetagscache.itervalues():
1211 for tags in nodetagscache.itervalues():
1187 tags.sort()
1212 tags.sort()
1188 self._tagscache.nodetagscache = nodetagscache
1213 self._tagscache.nodetagscache = nodetagscache
1189 return self._tagscache.nodetagscache.get(node, [])
1214 return self._tagscache.nodetagscache.get(node, [])
1190
1215
1191 def nodebookmarks(self, node):
1216 def nodebookmarks(self, node):
1192 """return the list of bookmarks pointing to the specified node"""
1217 """return the list of bookmarks pointing to the specified node"""
1193 return self._bookmarks.names(node)
1218 return self._bookmarks.names(node)
1194
1219
1195 def branchmap(self):
1220 def branchmap(self):
1196 '''returns a dictionary {branch: [branchheads]} with branchheads
1221 '''returns a dictionary {branch: [branchheads]} with branchheads
1197 ordered by increasing revision number'''
1222 ordered by increasing revision number'''
1198 branchmap.updatecache(self)
1223 branchmap.updatecache(self)
1199 return self._branchcaches[self.filtername]
1224 return self._branchcaches[self.filtername]
1200
1225
1201 @unfilteredmethod
1226 @unfilteredmethod
1202 def revbranchcache(self):
1227 def revbranchcache(self):
1203 if not self._revbranchcache:
1228 if not self._revbranchcache:
1204 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1229 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1205 return self._revbranchcache
1230 return self._revbranchcache
1206
1231
1207 def branchtip(self, branch, ignoremissing=False):
1232 def branchtip(self, branch, ignoremissing=False):
1208 '''return the tip node for a given branch
1233 '''return the tip node for a given branch
1209
1234
1210 If ignoremissing is True, then this method will not raise an error.
1235 If ignoremissing is True, then this method will not raise an error.
1211 This is helpful for callers that only expect None for a missing branch
1236 This is helpful for callers that only expect None for a missing branch
1212 (e.g. namespace).
1237 (e.g. namespace).
1213
1238
1214 '''
1239 '''
1215 try:
1240 try:
1216 return self.branchmap().branchtip(branch)
1241 return self.branchmap().branchtip(branch)
1217 except KeyError:
1242 except KeyError:
1218 if not ignoremissing:
1243 if not ignoremissing:
1219 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1244 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1220 else:
1245 else:
1221 pass
1246 pass
1222
1247
1223 def lookup(self, key):
1248 def lookup(self, key):
1224 return scmutil.revsymbol(self, key).node()
1249 return scmutil.revsymbol(self, key).node()
1225
1250
1226 def lookupbranch(self, key):
1251 def lookupbranch(self, key):
1227 if key in self.branchmap():
1252 if key in self.branchmap():
1228 return key
1253 return key
1229
1254
1230 return scmutil.revsymbol(self, key).branch()
1255 return scmutil.revsymbol(self, key).branch()
1231
1256
1232 def known(self, nodes):
1257 def known(self, nodes):
1233 cl = self.changelog
1258 cl = self.changelog
1234 nm = cl.nodemap
1259 nm = cl.nodemap
1235 filtered = cl.filteredrevs
1260 filtered = cl.filteredrevs
1236 result = []
1261 result = []
1237 for n in nodes:
1262 for n in nodes:
1238 r = nm.get(n)
1263 r = nm.get(n)
1239 resp = not (r is None or r in filtered)
1264 resp = not (r is None or r in filtered)
1240 result.append(resp)
1265 result.append(resp)
1241 return result
1266 return result
1242
1267
1243 def local(self):
1268 def local(self):
1244 return self
1269 return self
1245
1270
1246 def publishing(self):
1271 def publishing(self):
1247 # it's safe (and desirable) to trust the publish flag unconditionally
1272 # it's safe (and desirable) to trust the publish flag unconditionally
1248 # so that we don't finalize changes shared between users via ssh or nfs
1273 # so that we don't finalize changes shared between users via ssh or nfs
1249 return self.ui.configbool('phases', 'publish', untrusted=True)
1274 return self.ui.configbool('phases', 'publish', untrusted=True)
1250
1275
1251 def cancopy(self):
1276 def cancopy(self):
1252 # so statichttprepo's override of local() works
1277 # so statichttprepo's override of local() works
1253 if not self.local():
1278 if not self.local():
1254 return False
1279 return False
1255 if not self.publishing():
1280 if not self.publishing():
1256 return True
1281 return True
1257 # if publishing we can't copy if there is filtered content
1282 # if publishing we can't copy if there is filtered content
1258 return not self.filtered('visible').changelog.filteredrevs
1283 return not self.filtered('visible').changelog.filteredrevs
1259
1284
1260 def shared(self):
1285 def shared(self):
1261 '''the type of shared repository (None if not shared)'''
1286 '''the type of shared repository (None if not shared)'''
1262 if self.sharedpath != self.path:
1287 if self.sharedpath != self.path:
1263 return 'store'
1288 return 'store'
1264 return None
1289 return None
1265
1290
1266 def wjoin(self, f, *insidef):
1291 def wjoin(self, f, *insidef):
1267 return self.vfs.reljoin(self.root, f, *insidef)
1292 return self.vfs.reljoin(self.root, f, *insidef)
1268
1293
1269 def file(self, f):
1294 def file(self, f):
1270 if f[0] == '/':
1295 if f[0] == '/':
1271 f = f[1:]
1296 f = f[1:]
1272 return filelog.filelog(self.svfs, f)
1297 return filelog.filelog(self.svfs, f)
1273
1298
1274 def setparents(self, p1, p2=nullid):
1299 def setparents(self, p1, p2=nullid):
1275 with self.dirstate.parentchange():
1300 with self.dirstate.parentchange():
1276 copies = self.dirstate.setparents(p1, p2)
1301 copies = self.dirstate.setparents(p1, p2)
1277 pctx = self[p1]
1302 pctx = self[p1]
1278 if copies:
1303 if copies:
1279 # Adjust copy records, the dirstate cannot do it, it
1304 # Adjust copy records, the dirstate cannot do it, it
1280 # requires access to parents manifests. Preserve them
1305 # requires access to parents manifests. Preserve them
1281 # only for entries added to first parent.
1306 # only for entries added to first parent.
1282 for f in copies:
1307 for f in copies:
1283 if f not in pctx and copies[f] in pctx:
1308 if f not in pctx and copies[f] in pctx:
1284 self.dirstate.copy(copies[f], f)
1309 self.dirstate.copy(copies[f], f)
1285 if p2 == nullid:
1310 if p2 == nullid:
1286 for f, s in sorted(self.dirstate.copies().items()):
1311 for f, s in sorted(self.dirstate.copies().items()):
1287 if f not in pctx and s not in pctx:
1312 if f not in pctx and s not in pctx:
1288 self.dirstate.copy(None, f)
1313 self.dirstate.copy(None, f)
1289
1314
1290 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1315 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1291 """changeid can be a changeset revision, node, or tag.
1316 """changeid can be a changeset revision, node, or tag.
1292 fileid can be a file revision or node."""
1317 fileid can be a file revision or node."""
1293 return context.filectx(self, path, changeid, fileid,
1318 return context.filectx(self, path, changeid, fileid,
1294 changectx=changectx)
1319 changectx=changectx)
1295
1320
1296 def getcwd(self):
1321 def getcwd(self):
1297 return self.dirstate.getcwd()
1322 return self.dirstate.getcwd()
1298
1323
1299 def pathto(self, f, cwd=None):
1324 def pathto(self, f, cwd=None):
1300 return self.dirstate.pathto(f, cwd)
1325 return self.dirstate.pathto(f, cwd)
1301
1326
1302 def _loadfilter(self, filter):
1327 def _loadfilter(self, filter):
1303 if filter not in self._filterpats:
1328 if filter not in self._filterpats:
1304 l = []
1329 l = []
1305 for pat, cmd in self.ui.configitems(filter):
1330 for pat, cmd in self.ui.configitems(filter):
1306 if cmd == '!':
1331 if cmd == '!':
1307 continue
1332 continue
1308 mf = matchmod.match(self.root, '', [pat])
1333 mf = matchmod.match(self.root, '', [pat])
1309 fn = None
1334 fn = None
1310 params = cmd
1335 params = cmd
1311 for name, filterfn in self._datafilters.iteritems():
1336 for name, filterfn in self._datafilters.iteritems():
1312 if cmd.startswith(name):
1337 if cmd.startswith(name):
1313 fn = filterfn
1338 fn = filterfn
1314 params = cmd[len(name):].lstrip()
1339 params = cmd[len(name):].lstrip()
1315 break
1340 break
1316 if not fn:
1341 if not fn:
1317 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1342 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1318 # Wrap old filters not supporting keyword arguments
1343 # Wrap old filters not supporting keyword arguments
1319 if not pycompat.getargspec(fn)[2]:
1344 if not pycompat.getargspec(fn)[2]:
1320 oldfn = fn
1345 oldfn = fn
1321 fn = lambda s, c, **kwargs: oldfn(s, c)
1346 fn = lambda s, c, **kwargs: oldfn(s, c)
1322 l.append((mf, fn, params))
1347 l.append((mf, fn, params))
1323 self._filterpats[filter] = l
1348 self._filterpats[filter] = l
1324 return self._filterpats[filter]
1349 return self._filterpats[filter]
1325
1350
1326 def _filter(self, filterpats, filename, data):
1351 def _filter(self, filterpats, filename, data):
1327 for mf, fn, cmd in filterpats:
1352 for mf, fn, cmd in filterpats:
1328 if mf(filename):
1353 if mf(filename):
1329 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1354 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1330 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1355 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1331 break
1356 break
1332
1357
1333 return data
1358 return data
1334
1359
1335 @unfilteredpropertycache
1360 @unfilteredpropertycache
1336 def _encodefilterpats(self):
1361 def _encodefilterpats(self):
1337 return self._loadfilter('encode')
1362 return self._loadfilter('encode')
1338
1363
1339 @unfilteredpropertycache
1364 @unfilteredpropertycache
1340 def _decodefilterpats(self):
1365 def _decodefilterpats(self):
1341 return self._loadfilter('decode')
1366 return self._loadfilter('decode')
1342
1367
1343 def adddatafilter(self, name, filter):
1368 def adddatafilter(self, name, filter):
1344 self._datafilters[name] = filter
1369 self._datafilters[name] = filter
1345
1370
1346 def wread(self, filename):
1371 def wread(self, filename):
1347 if self.wvfs.islink(filename):
1372 if self.wvfs.islink(filename):
1348 data = self.wvfs.readlink(filename)
1373 data = self.wvfs.readlink(filename)
1349 else:
1374 else:
1350 data = self.wvfs.read(filename)
1375 data = self.wvfs.read(filename)
1351 return self._filter(self._encodefilterpats, filename, data)
1376 return self._filter(self._encodefilterpats, filename, data)
1352
1377
1353 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1378 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1354 """write ``data`` into ``filename`` in the working directory
1379 """write ``data`` into ``filename`` in the working directory
1355
1380
1356 This returns length of written (maybe decoded) data.
1381 This returns length of written (maybe decoded) data.
1357 """
1382 """
1358 data = self._filter(self._decodefilterpats, filename, data)
1383 data = self._filter(self._decodefilterpats, filename, data)
1359 if 'l' in flags:
1384 if 'l' in flags:
1360 self.wvfs.symlink(data, filename)
1385 self.wvfs.symlink(data, filename)
1361 else:
1386 else:
1362 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1387 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1363 **kwargs)
1388 **kwargs)
1364 if 'x' in flags:
1389 if 'x' in flags:
1365 self.wvfs.setflags(filename, False, True)
1390 self.wvfs.setflags(filename, False, True)
1366 else:
1391 else:
1367 self.wvfs.setflags(filename, False, False)
1392 self.wvfs.setflags(filename, False, False)
1368 return len(data)
1393 return len(data)
1369
1394
1370 def wwritedata(self, filename, data):
1395 def wwritedata(self, filename, data):
1371 return self._filter(self._decodefilterpats, filename, data)
1396 return self._filter(self._decodefilterpats, filename, data)
1372
1397
1373 def currenttransaction(self):
1398 def currenttransaction(self):
1374 """return the current transaction or None if non exists"""
1399 """return the current transaction or None if non exists"""
1375 if self._transref:
1400 if self._transref:
1376 tr = self._transref()
1401 tr = self._transref()
1377 else:
1402 else:
1378 tr = None
1403 tr = None
1379
1404
1380 if tr and tr.running():
1405 if tr and tr.running():
1381 return tr
1406 return tr
1382 return None
1407 return None
1383
1408
1384 def transaction(self, desc, report=None):
1409 def transaction(self, desc, report=None):
1385 if (self.ui.configbool('devel', 'all-warnings')
1410 if (self.ui.configbool('devel', 'all-warnings')
1386 or self.ui.configbool('devel', 'check-locks')):
1411 or self.ui.configbool('devel', 'check-locks')):
1387 if self._currentlock(self._lockref) is None:
1412 if self._currentlock(self._lockref) is None:
1388 raise error.ProgrammingError('transaction requires locking')
1413 raise error.ProgrammingError('transaction requires locking')
1389 tr = self.currenttransaction()
1414 tr = self.currenttransaction()
1390 if tr is not None:
1415 if tr is not None:
1391 return tr.nest(name=desc)
1416 return tr.nest(name=desc)
1392
1417
1393 # abort here if the journal already exists
1418 # abort here if the journal already exists
1394 if self.svfs.exists("journal"):
1419 if self.svfs.exists("journal"):
1395 raise error.RepoError(
1420 raise error.RepoError(
1396 _("abandoned transaction found"),
1421 _("abandoned transaction found"),
1397 hint=_("run 'hg recover' to clean up transaction"))
1422 hint=_("run 'hg recover' to clean up transaction"))
1398
1423
1399 idbase = "%.40f#%f" % (random.random(), time.time())
1424 idbase = "%.40f#%f" % (random.random(), time.time())
1400 ha = hex(hashlib.sha1(idbase).digest())
1425 ha = hex(hashlib.sha1(idbase).digest())
1401 txnid = 'TXN:' + ha
1426 txnid = 'TXN:' + ha
1402 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1427 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1403
1428
1404 self._writejournal(desc)
1429 self._writejournal(desc)
1405 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1430 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1406 if report:
1431 if report:
1407 rp = report
1432 rp = report
1408 else:
1433 else:
1409 rp = self.ui.warn
1434 rp = self.ui.warn
1410 vfsmap = {'plain': self.vfs} # root of .hg/
1435 vfsmap = {'plain': self.vfs} # root of .hg/
1411 # we must avoid cyclic reference between repo and transaction.
1436 # we must avoid cyclic reference between repo and transaction.
1412 reporef = weakref.ref(self)
1437 reporef = weakref.ref(self)
1413 # Code to track tag movement
1438 # Code to track tag movement
1414 #
1439 #
1415 # Since tags are all handled as file content, it is actually quite hard
1440 # Since tags are all handled as file content, it is actually quite hard
1416 # to track these movement from a code perspective. So we fallback to a
1441 # to track these movement from a code perspective. So we fallback to a
1417 # tracking at the repository level. One could envision to track changes
1442 # tracking at the repository level. One could envision to track changes
1418 # to the '.hgtags' file through changegroup apply but that fails to
1443 # to the '.hgtags' file through changegroup apply but that fails to
1419 # cope with case where transaction expose new heads without changegroup
1444 # cope with case where transaction expose new heads without changegroup
1420 # being involved (eg: phase movement).
1445 # being involved (eg: phase movement).
1421 #
1446 #
1422 # For now, We gate the feature behind a flag since this likely comes
1447 # For now, We gate the feature behind a flag since this likely comes
1423 # with performance impacts. The current code run more often than needed
1448 # with performance impacts. The current code run more often than needed
1424 # and do not use caches as much as it could. The current focus is on
1449 # and do not use caches as much as it could. The current focus is on
1425 # the behavior of the feature so we disable it by default. The flag
1450 # the behavior of the feature so we disable it by default. The flag
1426 # will be removed when we are happy with the performance impact.
1451 # will be removed when we are happy with the performance impact.
1427 #
1452 #
1428 # Once this feature is no longer experimental move the following
1453 # Once this feature is no longer experimental move the following
1429 # documentation to the appropriate help section:
1454 # documentation to the appropriate help section:
1430 #
1455 #
1431 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1456 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1432 # tags (new or changed or deleted tags). In addition the details of
1457 # tags (new or changed or deleted tags). In addition the details of
1433 # these changes are made available in a file at:
1458 # these changes are made available in a file at:
1434 # ``REPOROOT/.hg/changes/tags.changes``.
1459 # ``REPOROOT/.hg/changes/tags.changes``.
1435 # Make sure you check for HG_TAG_MOVED before reading that file as it
1460 # Make sure you check for HG_TAG_MOVED before reading that file as it
1436 # might exist from a previous transaction even if no tag were touched
1461 # might exist from a previous transaction even if no tag were touched
1437 # in this one. Changes are recorded in a line base format::
1462 # in this one. Changes are recorded in a line base format::
1438 #
1463 #
1439 # <action> <hex-node> <tag-name>\n
1464 # <action> <hex-node> <tag-name>\n
1440 #
1465 #
1441 # Actions are defined as follow:
1466 # Actions are defined as follow:
1442 # "-R": tag is removed,
1467 # "-R": tag is removed,
1443 # "+A": tag is added,
1468 # "+A": tag is added,
1444 # "-M": tag is moved (old value),
1469 # "-M": tag is moved (old value),
1445 # "+M": tag is moved (new value),
1470 # "+M": tag is moved (new value),
1446 tracktags = lambda x: None
1471 tracktags = lambda x: None
1447 # experimental config: experimental.hook-track-tags
1472 # experimental config: experimental.hook-track-tags
1448 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1473 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1449 if desc != 'strip' and shouldtracktags:
1474 if desc != 'strip' and shouldtracktags:
1450 oldheads = self.changelog.headrevs()
1475 oldheads = self.changelog.headrevs()
1451 def tracktags(tr2):
1476 def tracktags(tr2):
1452 repo = reporef()
1477 repo = reporef()
1453 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1478 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1454 newheads = repo.changelog.headrevs()
1479 newheads = repo.changelog.headrevs()
1455 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1480 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1456 # notes: we compare lists here.
1481 # notes: we compare lists here.
1457 # As we do it only once buiding set would not be cheaper
1482 # As we do it only once buiding set would not be cheaper
1458 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1483 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1459 if changes:
1484 if changes:
1460 tr2.hookargs['tag_moved'] = '1'
1485 tr2.hookargs['tag_moved'] = '1'
1461 with repo.vfs('changes/tags.changes', 'w',
1486 with repo.vfs('changes/tags.changes', 'w',
1462 atomictemp=True) as changesfile:
1487 atomictemp=True) as changesfile:
1463 # note: we do not register the file to the transaction
1488 # note: we do not register the file to the transaction
1464 # because we needs it to still exist on the transaction
1489 # because we needs it to still exist on the transaction
1465 # is close (for txnclose hooks)
1490 # is close (for txnclose hooks)
1466 tagsmod.writediff(changesfile, changes)
1491 tagsmod.writediff(changesfile, changes)
1467 def validate(tr2):
1492 def validate(tr2):
1468 """will run pre-closing hooks"""
1493 """will run pre-closing hooks"""
1469 # XXX the transaction API is a bit lacking here so we take a hacky
1494 # XXX the transaction API is a bit lacking here so we take a hacky
1470 # path for now
1495 # path for now
1471 #
1496 #
1472 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1497 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1473 # dict is copied before these run. In addition we needs the data
1498 # dict is copied before these run. In addition we needs the data
1474 # available to in memory hooks too.
1499 # available to in memory hooks too.
1475 #
1500 #
1476 # Moreover, we also need to make sure this runs before txnclose
1501 # Moreover, we also need to make sure this runs before txnclose
1477 # hooks and there is no "pending" mechanism that would execute
1502 # hooks and there is no "pending" mechanism that would execute
1478 # logic only if hooks are about to run.
1503 # logic only if hooks are about to run.
1479 #
1504 #
1480 # Fixing this limitation of the transaction is also needed to track
1505 # Fixing this limitation of the transaction is also needed to track
1481 # other families of changes (bookmarks, phases, obsolescence).
1506 # other families of changes (bookmarks, phases, obsolescence).
1482 #
1507 #
1483 # This will have to be fixed before we remove the experimental
1508 # This will have to be fixed before we remove the experimental
1484 # gating.
1509 # gating.
1485 tracktags(tr2)
1510 tracktags(tr2)
1486 repo = reporef()
1511 repo = reporef()
1487 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1512 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1488 scmutil.enforcesinglehead(repo, tr2, desc)
1513 scmutil.enforcesinglehead(repo, tr2, desc)
1489 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1514 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1490 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1515 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1491 args = tr.hookargs.copy()
1516 args = tr.hookargs.copy()
1492 args.update(bookmarks.preparehookargs(name, old, new))
1517 args.update(bookmarks.preparehookargs(name, old, new))
1493 repo.hook('pretxnclose-bookmark', throw=True,
1518 repo.hook('pretxnclose-bookmark', throw=True,
1494 txnname=desc,
1519 txnname=desc,
1495 **pycompat.strkwargs(args))
1520 **pycompat.strkwargs(args))
1496 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1521 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1497 cl = repo.unfiltered().changelog
1522 cl = repo.unfiltered().changelog
1498 for rev, (old, new) in tr.changes['phases'].items():
1523 for rev, (old, new) in tr.changes['phases'].items():
1499 args = tr.hookargs.copy()
1524 args = tr.hookargs.copy()
1500 node = hex(cl.node(rev))
1525 node = hex(cl.node(rev))
1501 args.update(phases.preparehookargs(node, old, new))
1526 args.update(phases.preparehookargs(node, old, new))
1502 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1527 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1503 **pycompat.strkwargs(args))
1528 **pycompat.strkwargs(args))
1504
1529
1505 repo.hook('pretxnclose', throw=True,
1530 repo.hook('pretxnclose', throw=True,
1506 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1531 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1507 def releasefn(tr, success):
1532 def releasefn(tr, success):
1508 repo = reporef()
1533 repo = reporef()
1509 if success:
1534 if success:
1510 # this should be explicitly invoked here, because
1535 # this should be explicitly invoked here, because
1511 # in-memory changes aren't written out at closing
1536 # in-memory changes aren't written out at closing
1512 # transaction, if tr.addfilegenerator (via
1537 # transaction, if tr.addfilegenerator (via
1513 # dirstate.write or so) isn't invoked while
1538 # dirstate.write or so) isn't invoked while
1514 # transaction running
1539 # transaction running
1515 repo.dirstate.write(None)
1540 repo.dirstate.write(None)
1516 else:
1541 else:
1517 # discard all changes (including ones already written
1542 # discard all changes (including ones already written
1518 # out) in this transaction
1543 # out) in this transaction
1519 narrowspec.restorebackup(self, 'journal.narrowspec')
1544 narrowspec.restorebackup(self, 'journal.narrowspec')
1520 repo.dirstate.restorebackup(None, 'journal.dirstate')
1545 repo.dirstate.restorebackup(None, 'journal.dirstate')
1521
1546
1522 repo.invalidate(clearfilecache=True)
1547 repo.invalidate(clearfilecache=True)
1523
1548
1524 tr = transaction.transaction(rp, self.svfs, vfsmap,
1549 tr = transaction.transaction(rp, self.svfs, vfsmap,
1525 "journal",
1550 "journal",
1526 "undo",
1551 "undo",
1527 aftertrans(renames),
1552 aftertrans(renames),
1528 self.store.createmode,
1553 self.store.createmode,
1529 validator=validate,
1554 validator=validate,
1530 releasefn=releasefn,
1555 releasefn=releasefn,
1531 checkambigfiles=_cachedfiles,
1556 checkambigfiles=_cachedfiles,
1532 name=desc)
1557 name=desc)
1533 tr.changes['origrepolen'] = len(self)
1558 tr.changes['origrepolen'] = len(self)
1534 tr.changes['obsmarkers'] = set()
1559 tr.changes['obsmarkers'] = set()
1535 tr.changes['phases'] = {}
1560 tr.changes['phases'] = {}
1536 tr.changes['bookmarks'] = {}
1561 tr.changes['bookmarks'] = {}
1537
1562
1538 tr.hookargs['txnid'] = txnid
1563 tr.hookargs['txnid'] = txnid
1539 # note: writing the fncache only during finalize mean that the file is
1564 # note: writing the fncache only during finalize mean that the file is
1540 # outdated when running hooks. As fncache is used for streaming clone,
1565 # outdated when running hooks. As fncache is used for streaming clone,
1541 # this is not expected to break anything that happen during the hooks.
1566 # this is not expected to break anything that happen during the hooks.
1542 tr.addfinalize('flush-fncache', self.store.write)
1567 tr.addfinalize('flush-fncache', self.store.write)
1543 def txnclosehook(tr2):
1568 def txnclosehook(tr2):
1544 """To be run if transaction is successful, will schedule a hook run
1569 """To be run if transaction is successful, will schedule a hook run
1545 """
1570 """
1546 # Don't reference tr2 in hook() so we don't hold a reference.
1571 # Don't reference tr2 in hook() so we don't hold a reference.
1547 # This reduces memory consumption when there are multiple
1572 # This reduces memory consumption when there are multiple
1548 # transactions per lock. This can likely go away if issue5045
1573 # transactions per lock. This can likely go away if issue5045
1549 # fixes the function accumulation.
1574 # fixes the function accumulation.
1550 hookargs = tr2.hookargs
1575 hookargs = tr2.hookargs
1551
1576
1552 def hookfunc():
1577 def hookfunc():
1553 repo = reporef()
1578 repo = reporef()
1554 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1579 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1555 bmchanges = sorted(tr.changes['bookmarks'].items())
1580 bmchanges = sorted(tr.changes['bookmarks'].items())
1556 for name, (old, new) in bmchanges:
1581 for name, (old, new) in bmchanges:
1557 args = tr.hookargs.copy()
1582 args = tr.hookargs.copy()
1558 args.update(bookmarks.preparehookargs(name, old, new))
1583 args.update(bookmarks.preparehookargs(name, old, new))
1559 repo.hook('txnclose-bookmark', throw=False,
1584 repo.hook('txnclose-bookmark', throw=False,
1560 txnname=desc, **pycompat.strkwargs(args))
1585 txnname=desc, **pycompat.strkwargs(args))
1561
1586
1562 if hook.hashook(repo.ui, 'txnclose-phase'):
1587 if hook.hashook(repo.ui, 'txnclose-phase'):
1563 cl = repo.unfiltered().changelog
1588 cl = repo.unfiltered().changelog
1564 phasemv = sorted(tr.changes['phases'].items())
1589 phasemv = sorted(tr.changes['phases'].items())
1565 for rev, (old, new) in phasemv:
1590 for rev, (old, new) in phasemv:
1566 args = tr.hookargs.copy()
1591 args = tr.hookargs.copy()
1567 node = hex(cl.node(rev))
1592 node = hex(cl.node(rev))
1568 args.update(phases.preparehookargs(node, old, new))
1593 args.update(phases.preparehookargs(node, old, new))
1569 repo.hook('txnclose-phase', throw=False, txnname=desc,
1594 repo.hook('txnclose-phase', throw=False, txnname=desc,
1570 **pycompat.strkwargs(args))
1595 **pycompat.strkwargs(args))
1571
1596
1572 repo.hook('txnclose', throw=False, txnname=desc,
1597 repo.hook('txnclose', throw=False, txnname=desc,
1573 **pycompat.strkwargs(hookargs))
1598 **pycompat.strkwargs(hookargs))
1574 reporef()._afterlock(hookfunc)
1599 reporef()._afterlock(hookfunc)
1575 tr.addfinalize('txnclose-hook', txnclosehook)
1600 tr.addfinalize('txnclose-hook', txnclosehook)
1576 # Include a leading "-" to make it happen before the transaction summary
1601 # Include a leading "-" to make it happen before the transaction summary
1577 # reports registered via scmutil.registersummarycallback() whose names
1602 # reports registered via scmutil.registersummarycallback() whose names
1578 # are 00-txnreport etc. That way, the caches will be warm when the
1603 # are 00-txnreport etc. That way, the caches will be warm when the
1579 # callbacks run.
1604 # callbacks run.
1580 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1605 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1581 def txnaborthook(tr2):
1606 def txnaborthook(tr2):
1582 """To be run if transaction is aborted
1607 """To be run if transaction is aborted
1583 """
1608 """
1584 reporef().hook('txnabort', throw=False, txnname=desc,
1609 reporef().hook('txnabort', throw=False, txnname=desc,
1585 **pycompat.strkwargs(tr2.hookargs))
1610 **pycompat.strkwargs(tr2.hookargs))
1586 tr.addabort('txnabort-hook', txnaborthook)
1611 tr.addabort('txnabort-hook', txnaborthook)
1587 # avoid eager cache invalidation. in-memory data should be identical
1612 # avoid eager cache invalidation. in-memory data should be identical
1588 # to stored data if transaction has no error.
1613 # to stored data if transaction has no error.
1589 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1614 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1590 self._transref = weakref.ref(tr)
1615 self._transref = weakref.ref(tr)
1591 scmutil.registersummarycallback(self, tr, desc)
1616 scmutil.registersummarycallback(self, tr, desc)
1592 return tr
1617 return tr
1593
1618
1594 def _journalfiles(self):
1619 def _journalfiles(self):
1595 return ((self.svfs, 'journal'),
1620 return ((self.svfs, 'journal'),
1596 (self.vfs, 'journal.dirstate'),
1621 (self.vfs, 'journal.dirstate'),
1597 (self.vfs, 'journal.branch'),
1622 (self.vfs, 'journal.branch'),
1598 (self.vfs, 'journal.desc'),
1623 (self.vfs, 'journal.desc'),
1599 (self.vfs, 'journal.bookmarks'),
1624 (self.vfs, 'journal.bookmarks'),
1600 (self.svfs, 'journal.phaseroots'))
1625 (self.svfs, 'journal.phaseroots'))
1601
1626
1602 def undofiles(self):
1627 def undofiles(self):
1603 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1628 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1604
1629
1605 @unfilteredmethod
1630 @unfilteredmethod
1606 def _writejournal(self, desc):
1631 def _writejournal(self, desc):
1607 self.dirstate.savebackup(None, 'journal.dirstate')
1632 self.dirstate.savebackup(None, 'journal.dirstate')
1608 narrowspec.savebackup(self, 'journal.narrowspec')
1633 narrowspec.savebackup(self, 'journal.narrowspec')
1609 self.vfs.write("journal.branch",
1634 self.vfs.write("journal.branch",
1610 encoding.fromlocal(self.dirstate.branch()))
1635 encoding.fromlocal(self.dirstate.branch()))
1611 self.vfs.write("journal.desc",
1636 self.vfs.write("journal.desc",
1612 "%d\n%s\n" % (len(self), desc))
1637 "%d\n%s\n" % (len(self), desc))
1613 self.vfs.write("journal.bookmarks",
1638 self.vfs.write("journal.bookmarks",
1614 self.vfs.tryread("bookmarks"))
1639 self.vfs.tryread("bookmarks"))
1615 self.svfs.write("journal.phaseroots",
1640 self.svfs.write("journal.phaseroots",
1616 self.svfs.tryread("phaseroots"))
1641 self.svfs.tryread("phaseroots"))
1617
1642
1618 def recover(self):
1643 def recover(self):
1619 with self.lock():
1644 with self.lock():
1620 if self.svfs.exists("journal"):
1645 if self.svfs.exists("journal"):
1621 self.ui.status(_("rolling back interrupted transaction\n"))
1646 self.ui.status(_("rolling back interrupted transaction\n"))
1622 vfsmap = {'': self.svfs,
1647 vfsmap = {'': self.svfs,
1623 'plain': self.vfs,}
1648 'plain': self.vfs,}
1624 transaction.rollback(self.svfs, vfsmap, "journal",
1649 transaction.rollback(self.svfs, vfsmap, "journal",
1625 self.ui.warn,
1650 self.ui.warn,
1626 checkambigfiles=_cachedfiles)
1651 checkambigfiles=_cachedfiles)
1627 self.invalidate()
1652 self.invalidate()
1628 return True
1653 return True
1629 else:
1654 else:
1630 self.ui.warn(_("no interrupted transaction available\n"))
1655 self.ui.warn(_("no interrupted transaction available\n"))
1631 return False
1656 return False
1632
1657
1633 def rollback(self, dryrun=False, force=False):
1658 def rollback(self, dryrun=False, force=False):
1634 wlock = lock = dsguard = None
1659 wlock = lock = dsguard = None
1635 try:
1660 try:
1636 wlock = self.wlock()
1661 wlock = self.wlock()
1637 lock = self.lock()
1662 lock = self.lock()
1638 if self.svfs.exists("undo"):
1663 if self.svfs.exists("undo"):
1639 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1664 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1640
1665
1641 return self._rollback(dryrun, force, dsguard)
1666 return self._rollback(dryrun, force, dsguard)
1642 else:
1667 else:
1643 self.ui.warn(_("no rollback information available\n"))
1668 self.ui.warn(_("no rollback information available\n"))
1644 return 1
1669 return 1
1645 finally:
1670 finally:
1646 release(dsguard, lock, wlock)
1671 release(dsguard, lock, wlock)
1647
1672
1648 @unfilteredmethod # Until we get smarter cache management
1673 @unfilteredmethod # Until we get smarter cache management
1649 def _rollback(self, dryrun, force, dsguard):
1674 def _rollback(self, dryrun, force, dsguard):
1650 ui = self.ui
1675 ui = self.ui
1651 try:
1676 try:
1652 args = self.vfs.read('undo.desc').splitlines()
1677 args = self.vfs.read('undo.desc').splitlines()
1653 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1678 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1654 if len(args) >= 3:
1679 if len(args) >= 3:
1655 detail = args[2]
1680 detail = args[2]
1656 oldtip = oldlen - 1
1681 oldtip = oldlen - 1
1657
1682
1658 if detail and ui.verbose:
1683 if detail and ui.verbose:
1659 msg = (_('repository tip rolled back to revision %d'
1684 msg = (_('repository tip rolled back to revision %d'
1660 ' (undo %s: %s)\n')
1685 ' (undo %s: %s)\n')
1661 % (oldtip, desc, detail))
1686 % (oldtip, desc, detail))
1662 else:
1687 else:
1663 msg = (_('repository tip rolled back to revision %d'
1688 msg = (_('repository tip rolled back to revision %d'
1664 ' (undo %s)\n')
1689 ' (undo %s)\n')
1665 % (oldtip, desc))
1690 % (oldtip, desc))
1666 except IOError:
1691 except IOError:
1667 msg = _('rolling back unknown transaction\n')
1692 msg = _('rolling back unknown transaction\n')
1668 desc = None
1693 desc = None
1669
1694
1670 if not force and self['.'] != self['tip'] and desc == 'commit':
1695 if not force and self['.'] != self['tip'] and desc == 'commit':
1671 raise error.Abort(
1696 raise error.Abort(
1672 _('rollback of last commit while not checked out '
1697 _('rollback of last commit while not checked out '
1673 'may lose data'), hint=_('use -f to force'))
1698 'may lose data'), hint=_('use -f to force'))
1674
1699
1675 ui.status(msg)
1700 ui.status(msg)
1676 if dryrun:
1701 if dryrun:
1677 return 0
1702 return 0
1678
1703
1679 parents = self.dirstate.parents()
1704 parents = self.dirstate.parents()
1680 self.destroying()
1705 self.destroying()
1681 vfsmap = {'plain': self.vfs, '': self.svfs}
1706 vfsmap = {'plain': self.vfs, '': self.svfs}
1682 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1707 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1683 checkambigfiles=_cachedfiles)
1708 checkambigfiles=_cachedfiles)
1684 if self.vfs.exists('undo.bookmarks'):
1709 if self.vfs.exists('undo.bookmarks'):
1685 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1710 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1686 if self.svfs.exists('undo.phaseroots'):
1711 if self.svfs.exists('undo.phaseroots'):
1687 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1712 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1688 self.invalidate()
1713 self.invalidate()
1689
1714
1690 parentgone = (parents[0] not in self.changelog.nodemap or
1715 parentgone = (parents[0] not in self.changelog.nodemap or
1691 parents[1] not in self.changelog.nodemap)
1716 parents[1] not in self.changelog.nodemap)
1692 if parentgone:
1717 if parentgone:
1693 # prevent dirstateguard from overwriting already restored one
1718 # prevent dirstateguard from overwriting already restored one
1694 dsguard.close()
1719 dsguard.close()
1695
1720
1696 narrowspec.restorebackup(self, 'undo.narrowspec')
1721 narrowspec.restorebackup(self, 'undo.narrowspec')
1697 self.dirstate.restorebackup(None, 'undo.dirstate')
1722 self.dirstate.restorebackup(None, 'undo.dirstate')
1698 try:
1723 try:
1699 branch = self.vfs.read('undo.branch')
1724 branch = self.vfs.read('undo.branch')
1700 self.dirstate.setbranch(encoding.tolocal(branch))
1725 self.dirstate.setbranch(encoding.tolocal(branch))
1701 except IOError:
1726 except IOError:
1702 ui.warn(_('named branch could not be reset: '
1727 ui.warn(_('named branch could not be reset: '
1703 'current branch is still \'%s\'\n')
1728 'current branch is still \'%s\'\n')
1704 % self.dirstate.branch())
1729 % self.dirstate.branch())
1705
1730
1706 parents = tuple([p.rev() for p in self[None].parents()])
1731 parents = tuple([p.rev() for p in self[None].parents()])
1707 if len(parents) > 1:
1732 if len(parents) > 1:
1708 ui.status(_('working directory now based on '
1733 ui.status(_('working directory now based on '
1709 'revisions %d and %d\n') % parents)
1734 'revisions %d and %d\n') % parents)
1710 else:
1735 else:
1711 ui.status(_('working directory now based on '
1736 ui.status(_('working directory now based on '
1712 'revision %d\n') % parents)
1737 'revision %d\n') % parents)
1713 mergemod.mergestate.clean(self, self['.'].node())
1738 mergemod.mergestate.clean(self, self['.'].node())
1714
1739
1715 # TODO: if we know which new heads may result from this rollback, pass
1740 # TODO: if we know which new heads may result from this rollback, pass
1716 # them to destroy(), which will prevent the branchhead cache from being
1741 # them to destroy(), which will prevent the branchhead cache from being
1717 # invalidated.
1742 # invalidated.
1718 self.destroyed()
1743 self.destroyed()
1719 return 0
1744 return 0
1720
1745
1721 def _buildcacheupdater(self, newtransaction):
1746 def _buildcacheupdater(self, newtransaction):
1722 """called during transaction to build the callback updating cache
1747 """called during transaction to build the callback updating cache
1723
1748
1724 Lives on the repository to help extension who might want to augment
1749 Lives on the repository to help extension who might want to augment
1725 this logic. For this purpose, the created transaction is passed to the
1750 this logic. For this purpose, the created transaction is passed to the
1726 method.
1751 method.
1727 """
1752 """
1728 # we must avoid cyclic reference between repo and transaction.
1753 # we must avoid cyclic reference between repo and transaction.
1729 reporef = weakref.ref(self)
1754 reporef = weakref.ref(self)
1730 def updater(tr):
1755 def updater(tr):
1731 repo = reporef()
1756 repo = reporef()
1732 repo.updatecaches(tr)
1757 repo.updatecaches(tr)
1733 return updater
1758 return updater
1734
1759
1735 @unfilteredmethod
1760 @unfilteredmethod
1736 def updatecaches(self, tr=None, full=False):
1761 def updatecaches(self, tr=None, full=False):
1737 """warm appropriate caches
1762 """warm appropriate caches
1738
1763
1739 If this function is called after a transaction closed. The transaction
1764 If this function is called after a transaction closed. The transaction
1740 will be available in the 'tr' argument. This can be used to selectively
1765 will be available in the 'tr' argument. This can be used to selectively
1741 update caches relevant to the changes in that transaction.
1766 update caches relevant to the changes in that transaction.
1742
1767
1743 If 'full' is set, make sure all caches the function knows about have
1768 If 'full' is set, make sure all caches the function knows about have
1744 up-to-date data. Even the ones usually loaded more lazily.
1769 up-to-date data. Even the ones usually loaded more lazily.
1745 """
1770 """
1746 if tr is not None and tr.hookargs.get('source') == 'strip':
1771 if tr is not None and tr.hookargs.get('source') == 'strip':
1747 # During strip, many caches are invalid but
1772 # During strip, many caches are invalid but
1748 # later call to `destroyed` will refresh them.
1773 # later call to `destroyed` will refresh them.
1749 return
1774 return
1750
1775
1751 if tr is None or tr.changes['origrepolen'] < len(self):
1776 if tr is None or tr.changes['origrepolen'] < len(self):
1752 # updating the unfiltered branchmap should refresh all the others,
1777 # updating the unfiltered branchmap should refresh all the others,
1753 self.ui.debug('updating the branch cache\n')
1778 self.ui.debug('updating the branch cache\n')
1754 branchmap.updatecache(self.filtered('served'))
1779 branchmap.updatecache(self.filtered('served'))
1755
1780
1756 if full:
1781 if full:
1757 rbc = self.revbranchcache()
1782 rbc = self.revbranchcache()
1758 for r in self.changelog:
1783 for r in self.changelog:
1759 rbc.branchinfo(r)
1784 rbc.branchinfo(r)
1760 rbc.write()
1785 rbc.write()
1761
1786
1762 # ensure the working copy parents are in the manifestfulltextcache
1787 # ensure the working copy parents are in the manifestfulltextcache
1763 for ctx in self['.'].parents():
1788 for ctx in self['.'].parents():
1764 ctx.manifest() # accessing the manifest is enough
1789 ctx.manifest() # accessing the manifest is enough
1765
1790
1766 def invalidatecaches(self):
1791 def invalidatecaches(self):
1767
1792
1768 if '_tagscache' in vars(self):
1793 if '_tagscache' in vars(self):
1769 # can't use delattr on proxy
1794 # can't use delattr on proxy
1770 del self.__dict__['_tagscache']
1795 del self.__dict__['_tagscache']
1771
1796
1772 self.unfiltered()._branchcaches.clear()
1797 self.unfiltered()._branchcaches.clear()
1773 self.invalidatevolatilesets()
1798 self.invalidatevolatilesets()
1774 self._sparsesignaturecache.clear()
1799 self._sparsesignaturecache.clear()
1775
1800
1776 def invalidatevolatilesets(self):
1801 def invalidatevolatilesets(self):
1777 self.filteredrevcache.clear()
1802 self.filteredrevcache.clear()
1778 obsolete.clearobscaches(self)
1803 obsolete.clearobscaches(self)
1779
1804
1780 def invalidatedirstate(self):
1805 def invalidatedirstate(self):
1781 '''Invalidates the dirstate, causing the next call to dirstate
1806 '''Invalidates the dirstate, causing the next call to dirstate
1782 to check if it was modified since the last time it was read,
1807 to check if it was modified since the last time it was read,
1783 rereading it if it has.
1808 rereading it if it has.
1784
1809
1785 This is different to dirstate.invalidate() that it doesn't always
1810 This is different to dirstate.invalidate() that it doesn't always
1786 rereads the dirstate. Use dirstate.invalidate() if you want to
1811 rereads the dirstate. Use dirstate.invalidate() if you want to
1787 explicitly read the dirstate again (i.e. restoring it to a previous
1812 explicitly read the dirstate again (i.e. restoring it to a previous
1788 known good state).'''
1813 known good state).'''
1789 if hasunfilteredcache(self, 'dirstate'):
1814 if hasunfilteredcache(self, 'dirstate'):
1790 for k in self.dirstate._filecache:
1815 for k in self.dirstate._filecache:
1791 try:
1816 try:
1792 delattr(self.dirstate, k)
1817 delattr(self.dirstate, k)
1793 except AttributeError:
1818 except AttributeError:
1794 pass
1819 pass
1795 delattr(self.unfiltered(), 'dirstate')
1820 delattr(self.unfiltered(), 'dirstate')
1796
1821
1797 def invalidate(self, clearfilecache=False):
1822 def invalidate(self, clearfilecache=False):
1798 '''Invalidates both store and non-store parts other than dirstate
1823 '''Invalidates both store and non-store parts other than dirstate
1799
1824
1800 If a transaction is running, invalidation of store is omitted,
1825 If a transaction is running, invalidation of store is omitted,
1801 because discarding in-memory changes might cause inconsistency
1826 because discarding in-memory changes might cause inconsistency
1802 (e.g. incomplete fncache causes unintentional failure, but
1827 (e.g. incomplete fncache causes unintentional failure, but
1803 redundant one doesn't).
1828 redundant one doesn't).
1804 '''
1829 '''
1805 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1830 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1806 for k in list(self._filecache.keys()):
1831 for k in list(self._filecache.keys()):
1807 # dirstate is invalidated separately in invalidatedirstate()
1832 # dirstate is invalidated separately in invalidatedirstate()
1808 if k == 'dirstate':
1833 if k == 'dirstate':
1809 continue
1834 continue
1810 if (k == 'changelog' and
1835 if (k == 'changelog' and
1811 self.currenttransaction() and
1836 self.currenttransaction() and
1812 self.changelog._delayed):
1837 self.changelog._delayed):
1813 # The changelog object may store unwritten revisions. We don't
1838 # The changelog object may store unwritten revisions. We don't
1814 # want to lose them.
1839 # want to lose them.
1815 # TODO: Solve the problem instead of working around it.
1840 # TODO: Solve the problem instead of working around it.
1816 continue
1841 continue
1817
1842
1818 if clearfilecache:
1843 if clearfilecache:
1819 del self._filecache[k]
1844 del self._filecache[k]
1820 try:
1845 try:
1821 delattr(unfiltered, k)
1846 delattr(unfiltered, k)
1822 except AttributeError:
1847 except AttributeError:
1823 pass
1848 pass
1824 self.invalidatecaches()
1849 self.invalidatecaches()
1825 if not self.currenttransaction():
1850 if not self.currenttransaction():
1826 # TODO: Changing contents of store outside transaction
1851 # TODO: Changing contents of store outside transaction
1827 # causes inconsistency. We should make in-memory store
1852 # causes inconsistency. We should make in-memory store
1828 # changes detectable, and abort if changed.
1853 # changes detectable, and abort if changed.
1829 self.store.invalidatecaches()
1854 self.store.invalidatecaches()
1830
1855
1831 def invalidateall(self):
1856 def invalidateall(self):
1832 '''Fully invalidates both store and non-store parts, causing the
1857 '''Fully invalidates both store and non-store parts, causing the
1833 subsequent operation to reread any outside changes.'''
1858 subsequent operation to reread any outside changes.'''
1834 # extension should hook this to invalidate its caches
1859 # extension should hook this to invalidate its caches
1835 self.invalidate()
1860 self.invalidate()
1836 self.invalidatedirstate()
1861 self.invalidatedirstate()
1837
1862
1838 @unfilteredmethod
1863 @unfilteredmethod
1839 def _refreshfilecachestats(self, tr):
1864 def _refreshfilecachestats(self, tr):
1840 """Reload stats of cached files so that they are flagged as valid"""
1865 """Reload stats of cached files so that they are flagged as valid"""
1841 for k, ce in self._filecache.items():
1866 for k, ce in self._filecache.items():
1842 k = pycompat.sysstr(k)
1867 k = pycompat.sysstr(k)
1843 if k == r'dirstate' or k not in self.__dict__:
1868 if k == r'dirstate' or k not in self.__dict__:
1844 continue
1869 continue
1845 ce.refresh()
1870 ce.refresh()
1846
1871
1847 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1872 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1848 inheritchecker=None, parentenvvar=None):
1873 inheritchecker=None, parentenvvar=None):
1849 parentlock = None
1874 parentlock = None
1850 # the contents of parentenvvar are used by the underlying lock to
1875 # the contents of parentenvvar are used by the underlying lock to
1851 # determine whether it can be inherited
1876 # determine whether it can be inherited
1852 if parentenvvar is not None:
1877 if parentenvvar is not None:
1853 parentlock = encoding.environ.get(parentenvvar)
1878 parentlock = encoding.environ.get(parentenvvar)
1854
1879
1855 timeout = 0
1880 timeout = 0
1856 warntimeout = 0
1881 warntimeout = 0
1857 if wait:
1882 if wait:
1858 timeout = self.ui.configint("ui", "timeout")
1883 timeout = self.ui.configint("ui", "timeout")
1859 warntimeout = self.ui.configint("ui", "timeout.warn")
1884 warntimeout = self.ui.configint("ui", "timeout.warn")
1860 # internal config: ui.signal-safe-lock
1885 # internal config: ui.signal-safe-lock
1861 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1886 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1862
1887
1863 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1888 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1864 releasefn=releasefn,
1889 releasefn=releasefn,
1865 acquirefn=acquirefn, desc=desc,
1890 acquirefn=acquirefn, desc=desc,
1866 inheritchecker=inheritchecker,
1891 inheritchecker=inheritchecker,
1867 parentlock=parentlock,
1892 parentlock=parentlock,
1868 signalsafe=signalsafe)
1893 signalsafe=signalsafe)
1869 return l
1894 return l
1870
1895
1871 def _afterlock(self, callback):
1896 def _afterlock(self, callback):
1872 """add a callback to be run when the repository is fully unlocked
1897 """add a callback to be run when the repository is fully unlocked
1873
1898
1874 The callback will be executed when the outermost lock is released
1899 The callback will be executed when the outermost lock is released
1875 (with wlock being higher level than 'lock')."""
1900 (with wlock being higher level than 'lock')."""
1876 for ref in (self._wlockref, self._lockref):
1901 for ref in (self._wlockref, self._lockref):
1877 l = ref and ref()
1902 l = ref and ref()
1878 if l and l.held:
1903 if l and l.held:
1879 l.postrelease.append(callback)
1904 l.postrelease.append(callback)
1880 break
1905 break
1881 else: # no lock have been found.
1906 else: # no lock have been found.
1882 callback()
1907 callback()
1883
1908
1884 def lock(self, wait=True):
1909 def lock(self, wait=True):
1885 '''Lock the repository store (.hg/store) and return a weak reference
1910 '''Lock the repository store (.hg/store) and return a weak reference
1886 to the lock. Use this before modifying the store (e.g. committing or
1911 to the lock. Use this before modifying the store (e.g. committing or
1887 stripping). If you are opening a transaction, get a lock as well.)
1912 stripping). If you are opening a transaction, get a lock as well.)
1888
1913
1889 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1914 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1890 'wlock' first to avoid a dead-lock hazard.'''
1915 'wlock' first to avoid a dead-lock hazard.'''
1891 l = self._currentlock(self._lockref)
1916 l = self._currentlock(self._lockref)
1892 if l is not None:
1917 if l is not None:
1893 l.lock()
1918 l.lock()
1894 return l
1919 return l
1895
1920
1896 l = self._lock(self.svfs, "lock", wait, None,
1921 l = self._lock(self.svfs, "lock", wait, None,
1897 self.invalidate, _('repository %s') % self.origroot)
1922 self.invalidate, _('repository %s') % self.origroot)
1898 self._lockref = weakref.ref(l)
1923 self._lockref = weakref.ref(l)
1899 return l
1924 return l
1900
1925
1901 def _wlockchecktransaction(self):
1926 def _wlockchecktransaction(self):
1902 if self.currenttransaction() is not None:
1927 if self.currenttransaction() is not None:
1903 raise error.LockInheritanceContractViolation(
1928 raise error.LockInheritanceContractViolation(
1904 'wlock cannot be inherited in the middle of a transaction')
1929 'wlock cannot be inherited in the middle of a transaction')
1905
1930
1906 def wlock(self, wait=True):
1931 def wlock(self, wait=True):
1907 '''Lock the non-store parts of the repository (everything under
1932 '''Lock the non-store parts of the repository (everything under
1908 .hg except .hg/store) and return a weak reference to the lock.
1933 .hg except .hg/store) and return a weak reference to the lock.
1909
1934
1910 Use this before modifying files in .hg.
1935 Use this before modifying files in .hg.
1911
1936
1912 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1937 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1913 'wlock' first to avoid a dead-lock hazard.'''
1938 'wlock' first to avoid a dead-lock hazard.'''
1914 l = self._wlockref and self._wlockref()
1939 l = self._wlockref and self._wlockref()
1915 if l is not None and l.held:
1940 if l is not None and l.held:
1916 l.lock()
1941 l.lock()
1917 return l
1942 return l
1918
1943
1919 # We do not need to check for non-waiting lock acquisition. Such
1944 # We do not need to check for non-waiting lock acquisition. Such
1920 # acquisition would not cause dead-lock as they would just fail.
1945 # acquisition would not cause dead-lock as they would just fail.
1921 if wait and (self.ui.configbool('devel', 'all-warnings')
1946 if wait and (self.ui.configbool('devel', 'all-warnings')
1922 or self.ui.configbool('devel', 'check-locks')):
1947 or self.ui.configbool('devel', 'check-locks')):
1923 if self._currentlock(self._lockref) is not None:
1948 if self._currentlock(self._lockref) is not None:
1924 self.ui.develwarn('"wlock" acquired after "lock"')
1949 self.ui.develwarn('"wlock" acquired after "lock"')
1925
1950
1926 def unlock():
1951 def unlock():
1927 if self.dirstate.pendingparentchange():
1952 if self.dirstate.pendingparentchange():
1928 self.dirstate.invalidate()
1953 self.dirstate.invalidate()
1929 else:
1954 else:
1930 self.dirstate.write(None)
1955 self.dirstate.write(None)
1931
1956
1932 self._filecache['dirstate'].refresh()
1957 self._filecache['dirstate'].refresh()
1933
1958
1934 l = self._lock(self.vfs, "wlock", wait, unlock,
1959 l = self._lock(self.vfs, "wlock", wait, unlock,
1935 self.invalidatedirstate, _('working directory of %s') %
1960 self.invalidatedirstate, _('working directory of %s') %
1936 self.origroot,
1961 self.origroot,
1937 inheritchecker=self._wlockchecktransaction,
1962 inheritchecker=self._wlockchecktransaction,
1938 parentenvvar='HG_WLOCK_LOCKER')
1963 parentenvvar='HG_WLOCK_LOCKER')
1939 self._wlockref = weakref.ref(l)
1964 self._wlockref = weakref.ref(l)
1940 return l
1965 return l
1941
1966
1942 def _currentlock(self, lockref):
1967 def _currentlock(self, lockref):
1943 """Returns the lock if it's held, or None if it's not."""
1968 """Returns the lock if it's held, or None if it's not."""
1944 if lockref is None:
1969 if lockref is None:
1945 return None
1970 return None
1946 l = lockref()
1971 l = lockref()
1947 if l is None or not l.held:
1972 if l is None or not l.held:
1948 return None
1973 return None
1949 return l
1974 return l
1950
1975
1951 def currentwlock(self):
1976 def currentwlock(self):
1952 """Returns the wlock if it's held, or None if it's not."""
1977 """Returns the wlock if it's held, or None if it's not."""
1953 return self._currentlock(self._wlockref)
1978 return self._currentlock(self._wlockref)
1954
1979
1955 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1980 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1956 """
1981 """
1957 commit an individual file as part of a larger transaction
1982 commit an individual file as part of a larger transaction
1958 """
1983 """
1959
1984
1960 fname = fctx.path()
1985 fname = fctx.path()
1961 fparent1 = manifest1.get(fname, nullid)
1986 fparent1 = manifest1.get(fname, nullid)
1962 fparent2 = manifest2.get(fname, nullid)
1987 fparent2 = manifest2.get(fname, nullid)
1963 if isinstance(fctx, context.filectx):
1988 if isinstance(fctx, context.filectx):
1964 node = fctx.filenode()
1989 node = fctx.filenode()
1965 if node in [fparent1, fparent2]:
1990 if node in [fparent1, fparent2]:
1966 self.ui.debug('reusing %s filelog entry\n' % fname)
1991 self.ui.debug('reusing %s filelog entry\n' % fname)
1967 if manifest1.flags(fname) != fctx.flags():
1992 if manifest1.flags(fname) != fctx.flags():
1968 changelist.append(fname)
1993 changelist.append(fname)
1969 return node
1994 return node
1970
1995
1971 flog = self.file(fname)
1996 flog = self.file(fname)
1972 meta = {}
1997 meta = {}
1973 copy = fctx.renamed()
1998 copy = fctx.renamed()
1974 if copy and copy[0] != fname:
1999 if copy and copy[0] != fname:
1975 # Mark the new revision of this file as a copy of another
2000 # Mark the new revision of this file as a copy of another
1976 # file. This copy data will effectively act as a parent
2001 # file. This copy data will effectively act as a parent
1977 # of this new revision. If this is a merge, the first
2002 # of this new revision. If this is a merge, the first
1978 # parent will be the nullid (meaning "look up the copy data")
2003 # parent will be the nullid (meaning "look up the copy data")
1979 # and the second one will be the other parent. For example:
2004 # and the second one will be the other parent. For example:
1980 #
2005 #
1981 # 0 --- 1 --- 3 rev1 changes file foo
2006 # 0 --- 1 --- 3 rev1 changes file foo
1982 # \ / rev2 renames foo to bar and changes it
2007 # \ / rev2 renames foo to bar and changes it
1983 # \- 2 -/ rev3 should have bar with all changes and
2008 # \- 2 -/ rev3 should have bar with all changes and
1984 # should record that bar descends from
2009 # should record that bar descends from
1985 # bar in rev2 and foo in rev1
2010 # bar in rev2 and foo in rev1
1986 #
2011 #
1987 # this allows this merge to succeed:
2012 # this allows this merge to succeed:
1988 #
2013 #
1989 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2014 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1990 # \ / merging rev3 and rev4 should use bar@rev2
2015 # \ / merging rev3 and rev4 should use bar@rev2
1991 # \- 2 --- 4 as the merge base
2016 # \- 2 --- 4 as the merge base
1992 #
2017 #
1993
2018
1994 cfname = copy[0]
2019 cfname = copy[0]
1995 crev = manifest1.get(cfname)
2020 crev = manifest1.get(cfname)
1996 newfparent = fparent2
2021 newfparent = fparent2
1997
2022
1998 if manifest2: # branch merge
2023 if manifest2: # branch merge
1999 if fparent2 == nullid or crev is None: # copied on remote side
2024 if fparent2 == nullid or crev is None: # copied on remote side
2000 if cfname in manifest2:
2025 if cfname in manifest2:
2001 crev = manifest2[cfname]
2026 crev = manifest2[cfname]
2002 newfparent = fparent1
2027 newfparent = fparent1
2003
2028
2004 # Here, we used to search backwards through history to try to find
2029 # Here, we used to search backwards through history to try to find
2005 # where the file copy came from if the source of a copy was not in
2030 # where the file copy came from if the source of a copy was not in
2006 # the parent directory. However, this doesn't actually make sense to
2031 # the parent directory. However, this doesn't actually make sense to
2007 # do (what does a copy from something not in your working copy even
2032 # do (what does a copy from something not in your working copy even
2008 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2033 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2009 # the user that copy information was dropped, so if they didn't
2034 # the user that copy information was dropped, so if they didn't
2010 # expect this outcome it can be fixed, but this is the correct
2035 # expect this outcome it can be fixed, but this is the correct
2011 # behavior in this circumstance.
2036 # behavior in this circumstance.
2012
2037
2013 if crev:
2038 if crev:
2014 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2039 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2015 meta["copy"] = cfname
2040 meta["copy"] = cfname
2016 meta["copyrev"] = hex(crev)
2041 meta["copyrev"] = hex(crev)
2017 fparent1, fparent2 = nullid, newfparent
2042 fparent1, fparent2 = nullid, newfparent
2018 else:
2043 else:
2019 self.ui.warn(_("warning: can't find ancestor for '%s' "
2044 self.ui.warn(_("warning: can't find ancestor for '%s' "
2020 "copied from '%s'!\n") % (fname, cfname))
2045 "copied from '%s'!\n") % (fname, cfname))
2021
2046
2022 elif fparent1 == nullid:
2047 elif fparent1 == nullid:
2023 fparent1, fparent2 = fparent2, nullid
2048 fparent1, fparent2 = fparent2, nullid
2024 elif fparent2 != nullid:
2049 elif fparent2 != nullid:
2025 # is one parent an ancestor of the other?
2050 # is one parent an ancestor of the other?
2026 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2051 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2027 if fparent1 in fparentancestors:
2052 if fparent1 in fparentancestors:
2028 fparent1, fparent2 = fparent2, nullid
2053 fparent1, fparent2 = fparent2, nullid
2029 elif fparent2 in fparentancestors:
2054 elif fparent2 in fparentancestors:
2030 fparent2 = nullid
2055 fparent2 = nullid
2031
2056
2032 # is the file changed?
2057 # is the file changed?
2033 text = fctx.data()
2058 text = fctx.data()
2034 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2059 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2035 changelist.append(fname)
2060 changelist.append(fname)
2036 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2061 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2037 # are just the flags changed during merge?
2062 # are just the flags changed during merge?
2038 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2063 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2039 changelist.append(fname)
2064 changelist.append(fname)
2040
2065
2041 return fparent1
2066 return fparent1
2042
2067
2043 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2068 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2044 """check for commit arguments that aren't committable"""
2069 """check for commit arguments that aren't committable"""
2045 if match.isexact() or match.prefix():
2070 if match.isexact() or match.prefix():
2046 matched = set(status.modified + status.added + status.removed)
2071 matched = set(status.modified + status.added + status.removed)
2047
2072
2048 for f in match.files():
2073 for f in match.files():
2049 f = self.dirstate.normalize(f)
2074 f = self.dirstate.normalize(f)
2050 if f == '.' or f in matched or f in wctx.substate:
2075 if f == '.' or f in matched or f in wctx.substate:
2051 continue
2076 continue
2052 if f in status.deleted:
2077 if f in status.deleted:
2053 fail(f, _('file not found!'))
2078 fail(f, _('file not found!'))
2054 if f in vdirs: # visited directory
2079 if f in vdirs: # visited directory
2055 d = f + '/'
2080 d = f + '/'
2056 for mf in matched:
2081 for mf in matched:
2057 if mf.startswith(d):
2082 if mf.startswith(d):
2058 break
2083 break
2059 else:
2084 else:
2060 fail(f, _("no match under directory!"))
2085 fail(f, _("no match under directory!"))
2061 elif f not in self.dirstate:
2086 elif f not in self.dirstate:
2062 fail(f, _("file not tracked!"))
2087 fail(f, _("file not tracked!"))
2063
2088
2064 @unfilteredmethod
2089 @unfilteredmethod
2065 def commit(self, text="", user=None, date=None, match=None, force=False,
2090 def commit(self, text="", user=None, date=None, match=None, force=False,
2066 editor=False, extra=None):
2091 editor=False, extra=None):
2067 """Add a new revision to current repository.
2092 """Add a new revision to current repository.
2068
2093
2069 Revision information is gathered from the working directory,
2094 Revision information is gathered from the working directory,
2070 match can be used to filter the committed files. If editor is
2095 match can be used to filter the committed files. If editor is
2071 supplied, it is called to get a commit message.
2096 supplied, it is called to get a commit message.
2072 """
2097 """
2073 if extra is None:
2098 if extra is None:
2074 extra = {}
2099 extra = {}
2075
2100
2076 def fail(f, msg):
2101 def fail(f, msg):
2077 raise error.Abort('%s: %s' % (f, msg))
2102 raise error.Abort('%s: %s' % (f, msg))
2078
2103
2079 if not match:
2104 if not match:
2080 match = matchmod.always(self.root, '')
2105 match = matchmod.always(self.root, '')
2081
2106
2082 if not force:
2107 if not force:
2083 vdirs = []
2108 vdirs = []
2084 match.explicitdir = vdirs.append
2109 match.explicitdir = vdirs.append
2085 match.bad = fail
2110 match.bad = fail
2086
2111
2087 wlock = lock = tr = None
2112 wlock = lock = tr = None
2088 try:
2113 try:
2089 wlock = self.wlock()
2114 wlock = self.wlock()
2090 lock = self.lock() # for recent changelog (see issue4368)
2115 lock = self.lock() # for recent changelog (see issue4368)
2091
2116
2092 wctx = self[None]
2117 wctx = self[None]
2093 merge = len(wctx.parents()) > 1
2118 merge = len(wctx.parents()) > 1
2094
2119
2095 if not force and merge and not match.always():
2120 if not force and merge and not match.always():
2096 raise error.Abort(_('cannot partially commit a merge '
2121 raise error.Abort(_('cannot partially commit a merge '
2097 '(do not specify files or patterns)'))
2122 '(do not specify files or patterns)'))
2098
2123
2099 status = self.status(match=match, clean=force)
2124 status = self.status(match=match, clean=force)
2100 if force:
2125 if force:
2101 status.modified.extend(status.clean) # mq may commit clean files
2126 status.modified.extend(status.clean) # mq may commit clean files
2102
2127
2103 # check subrepos
2128 # check subrepos
2104 subs, commitsubs, newstate = subrepoutil.precommit(
2129 subs, commitsubs, newstate = subrepoutil.precommit(
2105 self.ui, wctx, status, match, force=force)
2130 self.ui, wctx, status, match, force=force)
2106
2131
2107 # make sure all explicit patterns are matched
2132 # make sure all explicit patterns are matched
2108 if not force:
2133 if not force:
2109 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2134 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2110
2135
2111 cctx = context.workingcommitctx(self, status,
2136 cctx = context.workingcommitctx(self, status,
2112 text, user, date, extra)
2137 text, user, date, extra)
2113
2138
2114 # internal config: ui.allowemptycommit
2139 # internal config: ui.allowemptycommit
2115 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2140 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2116 or extra.get('close') or merge or cctx.files()
2141 or extra.get('close') or merge or cctx.files()
2117 or self.ui.configbool('ui', 'allowemptycommit'))
2142 or self.ui.configbool('ui', 'allowemptycommit'))
2118 if not allowemptycommit:
2143 if not allowemptycommit:
2119 return None
2144 return None
2120
2145
2121 if merge and cctx.deleted():
2146 if merge and cctx.deleted():
2122 raise error.Abort(_("cannot commit merge with missing files"))
2147 raise error.Abort(_("cannot commit merge with missing files"))
2123
2148
2124 ms = mergemod.mergestate.read(self)
2149 ms = mergemod.mergestate.read(self)
2125 mergeutil.checkunresolved(ms)
2150 mergeutil.checkunresolved(ms)
2126
2151
2127 if editor:
2152 if editor:
2128 cctx._text = editor(self, cctx, subs)
2153 cctx._text = editor(self, cctx, subs)
2129 edited = (text != cctx._text)
2154 edited = (text != cctx._text)
2130
2155
2131 # Save commit message in case this transaction gets rolled back
2156 # Save commit message in case this transaction gets rolled back
2132 # (e.g. by a pretxncommit hook). Leave the content alone on
2157 # (e.g. by a pretxncommit hook). Leave the content alone on
2133 # the assumption that the user will use the same editor again.
2158 # the assumption that the user will use the same editor again.
2134 msgfn = self.savecommitmessage(cctx._text)
2159 msgfn = self.savecommitmessage(cctx._text)
2135
2160
2136 # commit subs and write new state
2161 # commit subs and write new state
2137 if subs:
2162 if subs:
2138 for s in sorted(commitsubs):
2163 for s in sorted(commitsubs):
2139 sub = wctx.sub(s)
2164 sub = wctx.sub(s)
2140 self.ui.status(_('committing subrepository %s\n') %
2165 self.ui.status(_('committing subrepository %s\n') %
2141 subrepoutil.subrelpath(sub))
2166 subrepoutil.subrelpath(sub))
2142 sr = sub.commit(cctx._text, user, date)
2167 sr = sub.commit(cctx._text, user, date)
2143 newstate[s] = (newstate[s][0], sr)
2168 newstate[s] = (newstate[s][0], sr)
2144 subrepoutil.writestate(self, newstate)
2169 subrepoutil.writestate(self, newstate)
2145
2170
2146 p1, p2 = self.dirstate.parents()
2171 p1, p2 = self.dirstate.parents()
2147 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2172 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2148 try:
2173 try:
2149 self.hook("precommit", throw=True, parent1=hookp1,
2174 self.hook("precommit", throw=True, parent1=hookp1,
2150 parent2=hookp2)
2175 parent2=hookp2)
2151 tr = self.transaction('commit')
2176 tr = self.transaction('commit')
2152 ret = self.commitctx(cctx, True)
2177 ret = self.commitctx(cctx, True)
2153 except: # re-raises
2178 except: # re-raises
2154 if edited:
2179 if edited:
2155 self.ui.write(
2180 self.ui.write(
2156 _('note: commit message saved in %s\n') % msgfn)
2181 _('note: commit message saved in %s\n') % msgfn)
2157 raise
2182 raise
2158 # update bookmarks, dirstate and mergestate
2183 # update bookmarks, dirstate and mergestate
2159 bookmarks.update(self, [p1, p2], ret)
2184 bookmarks.update(self, [p1, p2], ret)
2160 cctx.markcommitted(ret)
2185 cctx.markcommitted(ret)
2161 ms.reset()
2186 ms.reset()
2162 tr.close()
2187 tr.close()
2163
2188
2164 finally:
2189 finally:
2165 lockmod.release(tr, lock, wlock)
2190 lockmod.release(tr, lock, wlock)
2166
2191
2167 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2192 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2168 # hack for command that use a temporary commit (eg: histedit)
2193 # hack for command that use a temporary commit (eg: histedit)
2169 # temporary commit got stripped before hook release
2194 # temporary commit got stripped before hook release
2170 if self.changelog.hasnode(ret):
2195 if self.changelog.hasnode(ret):
2171 self.hook("commit", node=node, parent1=parent1,
2196 self.hook("commit", node=node, parent1=parent1,
2172 parent2=parent2)
2197 parent2=parent2)
2173 self._afterlock(commithook)
2198 self._afterlock(commithook)
2174 return ret
2199 return ret
2175
2200
2176 @unfilteredmethod
2201 @unfilteredmethod
2177 def commitctx(self, ctx, error=False):
2202 def commitctx(self, ctx, error=False):
2178 """Add a new revision to current repository.
2203 """Add a new revision to current repository.
2179 Revision information is passed via the context argument.
2204 Revision information is passed via the context argument.
2180
2205
2181 ctx.files() should list all files involved in this commit, i.e.
2206 ctx.files() should list all files involved in this commit, i.e.
2182 modified/added/removed files. On merge, it may be wider than the
2207 modified/added/removed files. On merge, it may be wider than the
2183 ctx.files() to be committed, since any file nodes derived directly
2208 ctx.files() to be committed, since any file nodes derived directly
2184 from p1 or p2 are excluded from the committed ctx.files().
2209 from p1 or p2 are excluded from the committed ctx.files().
2185 """
2210 """
2186
2211
2187 tr = None
2212 tr = None
2188 p1, p2 = ctx.p1(), ctx.p2()
2213 p1, p2 = ctx.p1(), ctx.p2()
2189 user = ctx.user()
2214 user = ctx.user()
2190
2215
2191 lock = self.lock()
2216 lock = self.lock()
2192 try:
2217 try:
2193 tr = self.transaction("commit")
2218 tr = self.transaction("commit")
2194 trp = weakref.proxy(tr)
2219 trp = weakref.proxy(tr)
2195
2220
2196 if ctx.manifestnode():
2221 if ctx.manifestnode():
2197 # reuse an existing manifest revision
2222 # reuse an existing manifest revision
2198 self.ui.debug('reusing known manifest\n')
2223 self.ui.debug('reusing known manifest\n')
2199 mn = ctx.manifestnode()
2224 mn = ctx.manifestnode()
2200 files = ctx.files()
2225 files = ctx.files()
2201 elif ctx.files():
2226 elif ctx.files():
2202 m1ctx = p1.manifestctx()
2227 m1ctx = p1.manifestctx()
2203 m2ctx = p2.manifestctx()
2228 m2ctx = p2.manifestctx()
2204 mctx = m1ctx.copy()
2229 mctx = m1ctx.copy()
2205
2230
2206 m = mctx.read()
2231 m = mctx.read()
2207 m1 = m1ctx.read()
2232 m1 = m1ctx.read()
2208 m2 = m2ctx.read()
2233 m2 = m2ctx.read()
2209
2234
2210 # check in files
2235 # check in files
2211 added = []
2236 added = []
2212 changed = []
2237 changed = []
2213 removed = list(ctx.removed())
2238 removed = list(ctx.removed())
2214 linkrev = len(self)
2239 linkrev = len(self)
2215 self.ui.note(_("committing files:\n"))
2240 self.ui.note(_("committing files:\n"))
2216 for f in sorted(ctx.modified() + ctx.added()):
2241 for f in sorted(ctx.modified() + ctx.added()):
2217 self.ui.note(f + "\n")
2242 self.ui.note(f + "\n")
2218 try:
2243 try:
2219 fctx = ctx[f]
2244 fctx = ctx[f]
2220 if fctx is None:
2245 if fctx is None:
2221 removed.append(f)
2246 removed.append(f)
2222 else:
2247 else:
2223 added.append(f)
2248 added.append(f)
2224 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2249 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2225 trp, changed)
2250 trp, changed)
2226 m.setflag(f, fctx.flags())
2251 m.setflag(f, fctx.flags())
2227 except OSError as inst:
2252 except OSError as inst:
2228 self.ui.warn(_("trouble committing %s!\n") % f)
2253 self.ui.warn(_("trouble committing %s!\n") % f)
2229 raise
2254 raise
2230 except IOError as inst:
2255 except IOError as inst:
2231 errcode = getattr(inst, 'errno', errno.ENOENT)
2256 errcode = getattr(inst, 'errno', errno.ENOENT)
2232 if error or errcode and errcode != errno.ENOENT:
2257 if error or errcode and errcode != errno.ENOENT:
2233 self.ui.warn(_("trouble committing %s!\n") % f)
2258 self.ui.warn(_("trouble committing %s!\n") % f)
2234 raise
2259 raise
2235
2260
2236 # update manifest
2261 # update manifest
2237 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2262 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2238 drop = [f for f in removed if f in m]
2263 drop = [f for f in removed if f in m]
2239 for f in drop:
2264 for f in drop:
2240 del m[f]
2265 del m[f]
2241 files = changed + removed
2266 files = changed + removed
2242 md = None
2267 md = None
2243 if not files:
2268 if not files:
2244 # if no "files" actually changed in terms of the changelog,
2269 # if no "files" actually changed in terms of the changelog,
2245 # try hard to detect unmodified manifest entry so that the
2270 # try hard to detect unmodified manifest entry so that the
2246 # exact same commit can be reproduced later on convert.
2271 # exact same commit can be reproduced later on convert.
2247 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2272 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2248 if not files and md:
2273 if not files and md:
2249 self.ui.debug('not reusing manifest (no file change in '
2274 self.ui.debug('not reusing manifest (no file change in '
2250 'changelog, but manifest differs)\n')
2275 'changelog, but manifest differs)\n')
2251 if files or md:
2276 if files or md:
2252 self.ui.note(_("committing manifest\n"))
2277 self.ui.note(_("committing manifest\n"))
2253 # we're using narrowmatch here since it's already applied at
2278 # we're using narrowmatch here since it's already applied at
2254 # other stages (such as dirstate.walk), so we're already
2279 # other stages (such as dirstate.walk), so we're already
2255 # ignoring things outside of narrowspec in most cases. The
2280 # ignoring things outside of narrowspec in most cases. The
2256 # one case where we might have files outside the narrowspec
2281 # one case where we might have files outside the narrowspec
2257 # at this point is merges, and we already error out in the
2282 # at this point is merges, and we already error out in the
2258 # case where the merge has files outside of the narrowspec,
2283 # case where the merge has files outside of the narrowspec,
2259 # so this is safe.
2284 # so this is safe.
2260 mn = mctx.write(trp, linkrev,
2285 mn = mctx.write(trp, linkrev,
2261 p1.manifestnode(), p2.manifestnode(),
2286 p1.manifestnode(), p2.manifestnode(),
2262 added, drop, match=self.narrowmatch())
2287 added, drop, match=self.narrowmatch())
2263 else:
2288 else:
2264 self.ui.debug('reusing manifest form p1 (listed files '
2289 self.ui.debug('reusing manifest form p1 (listed files '
2265 'actually unchanged)\n')
2290 'actually unchanged)\n')
2266 mn = p1.manifestnode()
2291 mn = p1.manifestnode()
2267 else:
2292 else:
2268 self.ui.debug('reusing manifest from p1 (no file change)\n')
2293 self.ui.debug('reusing manifest from p1 (no file change)\n')
2269 mn = p1.manifestnode()
2294 mn = p1.manifestnode()
2270 files = []
2295 files = []
2271
2296
2272 # update changelog
2297 # update changelog
2273 self.ui.note(_("committing changelog\n"))
2298 self.ui.note(_("committing changelog\n"))
2274 self.changelog.delayupdate(tr)
2299 self.changelog.delayupdate(tr)
2275 n = self.changelog.add(mn, files, ctx.description(),
2300 n = self.changelog.add(mn, files, ctx.description(),
2276 trp, p1.node(), p2.node(),
2301 trp, p1.node(), p2.node(),
2277 user, ctx.date(), ctx.extra().copy())
2302 user, ctx.date(), ctx.extra().copy())
2278 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2303 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2279 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2304 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2280 parent2=xp2)
2305 parent2=xp2)
2281 # set the new commit is proper phase
2306 # set the new commit is proper phase
2282 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2307 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2283 if targetphase:
2308 if targetphase:
2284 # retract boundary do not alter parent changeset.
2309 # retract boundary do not alter parent changeset.
2285 # if a parent have higher the resulting phase will
2310 # if a parent have higher the resulting phase will
2286 # be compliant anyway
2311 # be compliant anyway
2287 #
2312 #
2288 # if minimal phase was 0 we don't need to retract anything
2313 # if minimal phase was 0 we don't need to retract anything
2289 phases.registernew(self, tr, targetphase, [n])
2314 phases.registernew(self, tr, targetphase, [n])
2290 tr.close()
2315 tr.close()
2291 return n
2316 return n
2292 finally:
2317 finally:
2293 if tr:
2318 if tr:
2294 tr.release()
2319 tr.release()
2295 lock.release()
2320 lock.release()
2296
2321
2297 @unfilteredmethod
2322 @unfilteredmethod
2298 def destroying(self):
2323 def destroying(self):
2299 '''Inform the repository that nodes are about to be destroyed.
2324 '''Inform the repository that nodes are about to be destroyed.
2300 Intended for use by strip and rollback, so there's a common
2325 Intended for use by strip and rollback, so there's a common
2301 place for anything that has to be done before destroying history.
2326 place for anything that has to be done before destroying history.
2302
2327
2303 This is mostly useful for saving state that is in memory and waiting
2328 This is mostly useful for saving state that is in memory and waiting
2304 to be flushed when the current lock is released. Because a call to
2329 to be flushed when the current lock is released. Because a call to
2305 destroyed is imminent, the repo will be invalidated causing those
2330 destroyed is imminent, the repo will be invalidated causing those
2306 changes to stay in memory (waiting for the next unlock), or vanish
2331 changes to stay in memory (waiting for the next unlock), or vanish
2307 completely.
2332 completely.
2308 '''
2333 '''
2309 # When using the same lock to commit and strip, the phasecache is left
2334 # When using the same lock to commit and strip, the phasecache is left
2310 # dirty after committing. Then when we strip, the repo is invalidated,
2335 # dirty after committing. Then when we strip, the repo is invalidated,
2311 # causing those changes to disappear.
2336 # causing those changes to disappear.
2312 if '_phasecache' in vars(self):
2337 if '_phasecache' in vars(self):
2313 self._phasecache.write()
2338 self._phasecache.write()
2314
2339
2315 @unfilteredmethod
2340 @unfilteredmethod
2316 def destroyed(self):
2341 def destroyed(self):
2317 '''Inform the repository that nodes have been destroyed.
2342 '''Inform the repository that nodes have been destroyed.
2318 Intended for use by strip and rollback, so there's a common
2343 Intended for use by strip and rollback, so there's a common
2319 place for anything that has to be done after destroying history.
2344 place for anything that has to be done after destroying history.
2320 '''
2345 '''
2321 # When one tries to:
2346 # When one tries to:
2322 # 1) destroy nodes thus calling this method (e.g. strip)
2347 # 1) destroy nodes thus calling this method (e.g. strip)
2323 # 2) use phasecache somewhere (e.g. commit)
2348 # 2) use phasecache somewhere (e.g. commit)
2324 #
2349 #
2325 # then 2) will fail because the phasecache contains nodes that were
2350 # then 2) will fail because the phasecache contains nodes that were
2326 # removed. We can either remove phasecache from the filecache,
2351 # removed. We can either remove phasecache from the filecache,
2327 # causing it to reload next time it is accessed, or simply filter
2352 # causing it to reload next time it is accessed, or simply filter
2328 # the removed nodes now and write the updated cache.
2353 # the removed nodes now and write the updated cache.
2329 self._phasecache.filterunknown(self)
2354 self._phasecache.filterunknown(self)
2330 self._phasecache.write()
2355 self._phasecache.write()
2331
2356
2332 # refresh all repository caches
2357 # refresh all repository caches
2333 self.updatecaches()
2358 self.updatecaches()
2334
2359
2335 # Ensure the persistent tag cache is updated. Doing it now
2360 # Ensure the persistent tag cache is updated. Doing it now
2336 # means that the tag cache only has to worry about destroyed
2361 # means that the tag cache only has to worry about destroyed
2337 # heads immediately after a strip/rollback. That in turn
2362 # heads immediately after a strip/rollback. That in turn
2338 # guarantees that "cachetip == currenttip" (comparing both rev
2363 # guarantees that "cachetip == currenttip" (comparing both rev
2339 # and node) always means no nodes have been added or destroyed.
2364 # and node) always means no nodes have been added or destroyed.
2340
2365
2341 # XXX this is suboptimal when qrefresh'ing: we strip the current
2366 # XXX this is suboptimal when qrefresh'ing: we strip the current
2342 # head, refresh the tag cache, then immediately add a new head.
2367 # head, refresh the tag cache, then immediately add a new head.
2343 # But I think doing it this way is necessary for the "instant
2368 # But I think doing it this way is necessary for the "instant
2344 # tag cache retrieval" case to work.
2369 # tag cache retrieval" case to work.
2345 self.invalidate()
2370 self.invalidate()
2346
2371
2347 def status(self, node1='.', node2=None, match=None,
2372 def status(self, node1='.', node2=None, match=None,
2348 ignored=False, clean=False, unknown=False,
2373 ignored=False, clean=False, unknown=False,
2349 listsubrepos=False):
2374 listsubrepos=False):
2350 '''a convenience method that calls node1.status(node2)'''
2375 '''a convenience method that calls node1.status(node2)'''
2351 return self[node1].status(node2, match, ignored, clean, unknown,
2376 return self[node1].status(node2, match, ignored, clean, unknown,
2352 listsubrepos)
2377 listsubrepos)
2353
2378
2354 def addpostdsstatus(self, ps):
2379 def addpostdsstatus(self, ps):
2355 """Add a callback to run within the wlock, at the point at which status
2380 """Add a callback to run within the wlock, at the point at which status
2356 fixups happen.
2381 fixups happen.
2357
2382
2358 On status completion, callback(wctx, status) will be called with the
2383 On status completion, callback(wctx, status) will be called with the
2359 wlock held, unless the dirstate has changed from underneath or the wlock
2384 wlock held, unless the dirstate has changed from underneath or the wlock
2360 couldn't be grabbed.
2385 couldn't be grabbed.
2361
2386
2362 Callbacks should not capture and use a cached copy of the dirstate --
2387 Callbacks should not capture and use a cached copy of the dirstate --
2363 it might change in the meanwhile. Instead, they should access the
2388 it might change in the meanwhile. Instead, they should access the
2364 dirstate via wctx.repo().dirstate.
2389 dirstate via wctx.repo().dirstate.
2365
2390
2366 This list is emptied out after each status run -- extensions should
2391 This list is emptied out after each status run -- extensions should
2367 make sure it adds to this list each time dirstate.status is called.
2392 make sure it adds to this list each time dirstate.status is called.
2368 Extensions should also make sure they don't call this for statuses
2393 Extensions should also make sure they don't call this for statuses
2369 that don't involve the dirstate.
2394 that don't involve the dirstate.
2370 """
2395 """
2371
2396
2372 # The list is located here for uniqueness reasons -- it is actually
2397 # The list is located here for uniqueness reasons -- it is actually
2373 # managed by the workingctx, but that isn't unique per-repo.
2398 # managed by the workingctx, but that isn't unique per-repo.
2374 self._postdsstatus.append(ps)
2399 self._postdsstatus.append(ps)
2375
2400
2376 def postdsstatus(self):
2401 def postdsstatus(self):
2377 """Used by workingctx to get the list of post-dirstate-status hooks."""
2402 """Used by workingctx to get the list of post-dirstate-status hooks."""
2378 return self._postdsstatus
2403 return self._postdsstatus
2379
2404
2380 def clearpostdsstatus(self):
2405 def clearpostdsstatus(self):
2381 """Used by workingctx to clear post-dirstate-status hooks."""
2406 """Used by workingctx to clear post-dirstate-status hooks."""
2382 del self._postdsstatus[:]
2407 del self._postdsstatus[:]
2383
2408
2384 def heads(self, start=None):
2409 def heads(self, start=None):
2385 if start is None:
2410 if start is None:
2386 cl = self.changelog
2411 cl = self.changelog
2387 headrevs = reversed(cl.headrevs())
2412 headrevs = reversed(cl.headrevs())
2388 return [cl.node(rev) for rev in headrevs]
2413 return [cl.node(rev) for rev in headrevs]
2389
2414
2390 heads = self.changelog.heads(start)
2415 heads = self.changelog.heads(start)
2391 # sort the output in rev descending order
2416 # sort the output in rev descending order
2392 return sorted(heads, key=self.changelog.rev, reverse=True)
2417 return sorted(heads, key=self.changelog.rev, reverse=True)
2393
2418
2394 def branchheads(self, branch=None, start=None, closed=False):
2419 def branchheads(self, branch=None, start=None, closed=False):
2395 '''return a (possibly filtered) list of heads for the given branch
2420 '''return a (possibly filtered) list of heads for the given branch
2396
2421
2397 Heads are returned in topological order, from newest to oldest.
2422 Heads are returned in topological order, from newest to oldest.
2398 If branch is None, use the dirstate branch.
2423 If branch is None, use the dirstate branch.
2399 If start is not None, return only heads reachable from start.
2424 If start is not None, return only heads reachable from start.
2400 If closed is True, return heads that are marked as closed as well.
2425 If closed is True, return heads that are marked as closed as well.
2401 '''
2426 '''
2402 if branch is None:
2427 if branch is None:
2403 branch = self[None].branch()
2428 branch = self[None].branch()
2404 branches = self.branchmap()
2429 branches = self.branchmap()
2405 if branch not in branches:
2430 if branch not in branches:
2406 return []
2431 return []
2407 # the cache returns heads ordered lowest to highest
2432 # the cache returns heads ordered lowest to highest
2408 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2433 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2409 if start is not None:
2434 if start is not None:
2410 # filter out the heads that cannot be reached from startrev
2435 # filter out the heads that cannot be reached from startrev
2411 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2436 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2412 bheads = [h for h in bheads if h in fbheads]
2437 bheads = [h for h in bheads if h in fbheads]
2413 return bheads
2438 return bheads
2414
2439
2415 def branches(self, nodes):
2440 def branches(self, nodes):
2416 if not nodes:
2441 if not nodes:
2417 nodes = [self.changelog.tip()]
2442 nodes = [self.changelog.tip()]
2418 b = []
2443 b = []
2419 for n in nodes:
2444 for n in nodes:
2420 t = n
2445 t = n
2421 while True:
2446 while True:
2422 p = self.changelog.parents(n)
2447 p = self.changelog.parents(n)
2423 if p[1] != nullid or p[0] == nullid:
2448 if p[1] != nullid or p[0] == nullid:
2424 b.append((t, n, p[0], p[1]))
2449 b.append((t, n, p[0], p[1]))
2425 break
2450 break
2426 n = p[0]
2451 n = p[0]
2427 return b
2452 return b
2428
2453
2429 def between(self, pairs):
2454 def between(self, pairs):
2430 r = []
2455 r = []
2431
2456
2432 for top, bottom in pairs:
2457 for top, bottom in pairs:
2433 n, l, i = top, [], 0
2458 n, l, i = top, [], 0
2434 f = 1
2459 f = 1
2435
2460
2436 while n != bottom and n != nullid:
2461 while n != bottom and n != nullid:
2437 p = self.changelog.parents(n)[0]
2462 p = self.changelog.parents(n)[0]
2438 if i == f:
2463 if i == f:
2439 l.append(n)
2464 l.append(n)
2440 f = f * 2
2465 f = f * 2
2441 n = p
2466 n = p
2442 i += 1
2467 i += 1
2443
2468
2444 r.append(l)
2469 r.append(l)
2445
2470
2446 return r
2471 return r
2447
2472
2448 def checkpush(self, pushop):
2473 def checkpush(self, pushop):
2449 """Extensions can override this function if additional checks have
2474 """Extensions can override this function if additional checks have
2450 to be performed before pushing, or call it if they override push
2475 to be performed before pushing, or call it if they override push
2451 command.
2476 command.
2452 """
2477 """
2453
2478
2454 @unfilteredpropertycache
2479 @unfilteredpropertycache
2455 def prepushoutgoinghooks(self):
2480 def prepushoutgoinghooks(self):
2456 """Return util.hooks consists of a pushop with repo, remote, outgoing
2481 """Return util.hooks consists of a pushop with repo, remote, outgoing
2457 methods, which are called before pushing changesets.
2482 methods, which are called before pushing changesets.
2458 """
2483 """
2459 return util.hooks()
2484 return util.hooks()
2460
2485
2461 def pushkey(self, namespace, key, old, new):
2486 def pushkey(self, namespace, key, old, new):
2462 try:
2487 try:
2463 tr = self.currenttransaction()
2488 tr = self.currenttransaction()
2464 hookargs = {}
2489 hookargs = {}
2465 if tr is not None:
2490 if tr is not None:
2466 hookargs.update(tr.hookargs)
2491 hookargs.update(tr.hookargs)
2467 hookargs = pycompat.strkwargs(hookargs)
2492 hookargs = pycompat.strkwargs(hookargs)
2468 hookargs[r'namespace'] = namespace
2493 hookargs[r'namespace'] = namespace
2469 hookargs[r'key'] = key
2494 hookargs[r'key'] = key
2470 hookargs[r'old'] = old
2495 hookargs[r'old'] = old
2471 hookargs[r'new'] = new
2496 hookargs[r'new'] = new
2472 self.hook('prepushkey', throw=True, **hookargs)
2497 self.hook('prepushkey', throw=True, **hookargs)
2473 except error.HookAbort as exc:
2498 except error.HookAbort as exc:
2474 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2499 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2475 if exc.hint:
2500 if exc.hint:
2476 self.ui.write_err(_("(%s)\n") % exc.hint)
2501 self.ui.write_err(_("(%s)\n") % exc.hint)
2477 return False
2502 return False
2478 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2503 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2479 ret = pushkey.push(self, namespace, key, old, new)
2504 ret = pushkey.push(self, namespace, key, old, new)
2480 def runhook():
2505 def runhook():
2481 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2506 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2482 ret=ret)
2507 ret=ret)
2483 self._afterlock(runhook)
2508 self._afterlock(runhook)
2484 return ret
2509 return ret
2485
2510
2486 def listkeys(self, namespace):
2511 def listkeys(self, namespace):
2487 self.hook('prelistkeys', throw=True, namespace=namespace)
2512 self.hook('prelistkeys', throw=True, namespace=namespace)
2488 self.ui.debug('listing keys for "%s"\n' % namespace)
2513 self.ui.debug('listing keys for "%s"\n' % namespace)
2489 values = pushkey.list(self, namespace)
2514 values = pushkey.list(self, namespace)
2490 self.hook('listkeys', namespace=namespace, values=values)
2515 self.hook('listkeys', namespace=namespace, values=values)
2491 return values
2516 return values
2492
2517
2493 def debugwireargs(self, one, two, three=None, four=None, five=None):
2518 def debugwireargs(self, one, two, three=None, four=None, five=None):
2494 '''used to test argument passing over the wire'''
2519 '''used to test argument passing over the wire'''
2495 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2520 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2496 pycompat.bytestr(four),
2521 pycompat.bytestr(four),
2497 pycompat.bytestr(five))
2522 pycompat.bytestr(five))
2498
2523
2499 def savecommitmessage(self, text):
2524 def savecommitmessage(self, text):
2500 fp = self.vfs('last-message.txt', 'wb')
2525 fp = self.vfs('last-message.txt', 'wb')
2501 try:
2526 try:
2502 fp.write(text)
2527 fp.write(text)
2503 finally:
2528 finally:
2504 fp.close()
2529 fp.close()
2505 return self.pathto(fp.name[len(self.root) + 1:])
2530 return self.pathto(fp.name[len(self.root) + 1:])
2506
2531
2507 # used to avoid circular references so destructors work
2532 # used to avoid circular references so destructors work
2508 def aftertrans(files):
2533 def aftertrans(files):
2509 renamefiles = [tuple(t) for t in files]
2534 renamefiles = [tuple(t) for t in files]
2510 def a():
2535 def a():
2511 for vfs, src, dest in renamefiles:
2536 for vfs, src, dest in renamefiles:
2512 # if src and dest refer to a same file, vfs.rename is a no-op,
2537 # if src and dest refer to a same file, vfs.rename is a no-op,
2513 # leaving both src and dest on disk. delete dest to make sure
2538 # leaving both src and dest on disk. delete dest to make sure
2514 # the rename couldn't be such a no-op.
2539 # the rename couldn't be such a no-op.
2515 vfs.tryunlink(dest)
2540 vfs.tryunlink(dest)
2516 try:
2541 try:
2517 vfs.rename(src, dest)
2542 vfs.rename(src, dest)
2518 except OSError: # journal file does not yet exist
2543 except OSError: # journal file does not yet exist
2519 pass
2544 pass
2520 return a
2545 return a
2521
2546
2522 def undoname(fn):
2547 def undoname(fn):
2523 base, name = os.path.split(fn)
2548 base, name = os.path.split(fn)
2524 assert name.startswith('journal')
2549 assert name.startswith('journal')
2525 return os.path.join(base, name.replace('journal', 'undo', 1))
2550 return os.path.join(base, name.replace('journal', 'undo', 1))
2526
2551
2527 def instance(ui, path, create, intents=None, createopts=None):
2552 def instance(ui, path, create, intents=None, createopts=None):
2528 localpath = util.urllocalpath(path)
2553 localpath = util.urllocalpath(path)
2529 if create:
2554 if create:
2530 createrepository(ui, localpath, createopts=createopts)
2555 createrepository(ui, localpath, createopts=createopts)
2531
2556
2532 return makelocalrepository(ui, localpath, intents=intents)
2557 return makelocalrepository(ui, localpath, intents=intents)
2533
2558
2534 def islocal(path):
2559 def islocal(path):
2535 return True
2560 return True
2536
2561
2537 def newreporequirements(ui, createopts=None):
2562 def newreporequirements(ui, createopts=None):
2538 """Determine the set of requirements for a new local repository.
2563 """Determine the set of requirements for a new local repository.
2539
2564
2540 Extensions can wrap this function to specify custom requirements for
2565 Extensions can wrap this function to specify custom requirements for
2541 new repositories.
2566 new repositories.
2542 """
2567 """
2543 createopts = createopts or {}
2568 createopts = createopts or {}
2544
2569
2545 requirements = {'revlogv1'}
2570 requirements = {'revlogv1'}
2546 if ui.configbool('format', 'usestore'):
2571 if ui.configbool('format', 'usestore'):
2547 requirements.add('store')
2572 requirements.add('store')
2548 if ui.configbool('format', 'usefncache'):
2573 if ui.configbool('format', 'usefncache'):
2549 requirements.add('fncache')
2574 requirements.add('fncache')
2550 if ui.configbool('format', 'dotencode'):
2575 if ui.configbool('format', 'dotencode'):
2551 requirements.add('dotencode')
2576 requirements.add('dotencode')
2552
2577
2553 compengine = ui.config('experimental', 'format.compression')
2578 compengine = ui.config('experimental', 'format.compression')
2554 if compengine not in util.compengines:
2579 if compengine not in util.compengines:
2555 raise error.Abort(_('compression engine %s defined by '
2580 raise error.Abort(_('compression engine %s defined by '
2556 'experimental.format.compression not available') %
2581 'experimental.format.compression not available') %
2557 compengine,
2582 compengine,
2558 hint=_('run "hg debuginstall" to list available '
2583 hint=_('run "hg debuginstall" to list available '
2559 'compression engines'))
2584 'compression engines'))
2560
2585
2561 # zlib is the historical default and doesn't need an explicit requirement.
2586 # zlib is the historical default and doesn't need an explicit requirement.
2562 if compengine != 'zlib':
2587 if compengine != 'zlib':
2563 requirements.add('exp-compression-%s' % compengine)
2588 requirements.add('exp-compression-%s' % compengine)
2564
2589
2565 if scmutil.gdinitconfig(ui):
2590 if scmutil.gdinitconfig(ui):
2566 requirements.add('generaldelta')
2591 requirements.add('generaldelta')
2567 if ui.configbool('experimental', 'treemanifest'):
2592 if ui.configbool('experimental', 'treemanifest'):
2568 requirements.add('treemanifest')
2593 requirements.add('treemanifest')
2569 # experimental config: format.sparse-revlog
2594 # experimental config: format.sparse-revlog
2570 if ui.configbool('format', 'sparse-revlog'):
2595 if ui.configbool('format', 'sparse-revlog'):
2571 requirements.add(SPARSEREVLOG_REQUIREMENT)
2596 requirements.add(SPARSEREVLOG_REQUIREMENT)
2572
2597
2573 revlogv2 = ui.config('experimental', 'revlogv2')
2598 revlogv2 = ui.config('experimental', 'revlogv2')
2574 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2599 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2575 requirements.remove('revlogv1')
2600 requirements.remove('revlogv1')
2576 # generaldelta is implied by revlogv2.
2601 # generaldelta is implied by revlogv2.
2577 requirements.discard('generaldelta')
2602 requirements.discard('generaldelta')
2578 requirements.add(REVLOGV2_REQUIREMENT)
2603 requirements.add(REVLOGV2_REQUIREMENT)
2579 # experimental config: format.internal-phase
2604 # experimental config: format.internal-phase
2580 if ui.configbool('format', 'internal-phase'):
2605 if ui.configbool('format', 'internal-phase'):
2581 requirements.add('internal-phase')
2606 requirements.add('internal-phase')
2582
2607
2583 if createopts.get('narrowfiles'):
2608 if createopts.get('narrowfiles'):
2584 requirements.add(repository.NARROW_REQUIREMENT)
2609 requirements.add(repository.NARROW_REQUIREMENT)
2585
2610
2586 return requirements
2611 return requirements
2587
2612
2588 def filterknowncreateopts(ui, createopts):
2613 def filterknowncreateopts(ui, createopts):
2589 """Filters a dict of repo creation options against options that are known.
2614 """Filters a dict of repo creation options against options that are known.
2590
2615
2591 Receives a dict of repo creation options and returns a dict of those
2616 Receives a dict of repo creation options and returns a dict of those
2592 options that we don't know how to handle.
2617 options that we don't know how to handle.
2593
2618
2594 This function is called as part of repository creation. If the
2619 This function is called as part of repository creation. If the
2595 returned dict contains any items, repository creation will not
2620 returned dict contains any items, repository creation will not
2596 be allowed, as it means there was a request to create a repository
2621 be allowed, as it means there was a request to create a repository
2597 with options not recognized by loaded code.
2622 with options not recognized by loaded code.
2598
2623
2599 Extensions can wrap this function to filter out creation options
2624 Extensions can wrap this function to filter out creation options
2600 they know how to handle.
2625 they know how to handle.
2601 """
2626 """
2602 known = {'narrowfiles'}
2627 known = {'narrowfiles'}
2603
2628
2604 return {k: v for k, v in createopts.items() if k not in known}
2629 return {k: v for k, v in createopts.items() if k not in known}
2605
2630
2606 def createrepository(ui, path, createopts=None):
2631 def createrepository(ui, path, createopts=None):
2607 """Create a new repository in a vfs.
2632 """Create a new repository in a vfs.
2608
2633
2609 ``path`` path to the new repo's working directory.
2634 ``path`` path to the new repo's working directory.
2610 ``createopts`` options for the new repository.
2635 ``createopts`` options for the new repository.
2611 """
2636 """
2612 createopts = createopts or {}
2637 createopts = createopts or {}
2613
2638
2614 unknownopts = filterknowncreateopts(ui, createopts)
2639 unknownopts = filterknowncreateopts(ui, createopts)
2615
2640
2616 if not isinstance(unknownopts, dict):
2641 if not isinstance(unknownopts, dict):
2617 raise error.ProgrammingError('filterknowncreateopts() did not return '
2642 raise error.ProgrammingError('filterknowncreateopts() did not return '
2618 'a dict')
2643 'a dict')
2619
2644
2620 if unknownopts:
2645 if unknownopts:
2621 raise error.Abort(_('unable to create repository because of unknown '
2646 raise error.Abort(_('unable to create repository because of unknown '
2622 'creation option: %s') %
2647 'creation option: %s') %
2623 ', '.sorted(unknownopts),
2648 ', '.sorted(unknownopts),
2624 hint=_('is a required extension not loaded?'))
2649 hint=_('is a required extension not loaded?'))
2625
2650
2626 requirements = newreporequirements(ui, createopts=createopts)
2651 requirements = newreporequirements(ui, createopts=createopts)
2627
2652
2628 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2653 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2629 if not wdirvfs.exists():
2654 if not wdirvfs.exists():
2630 wdirvfs.makedirs()
2655 wdirvfs.makedirs()
2631
2656
2632 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2657 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2633 if hgvfs.exists():
2658 if hgvfs.exists():
2634 raise error.RepoError(_('repository %s already exists') % path)
2659 raise error.RepoError(_('repository %s already exists') % path)
2635
2660
2636 hgvfs.makedir(notindexed=True)
2661 hgvfs.makedir(notindexed=True)
2637
2662
2638 if b'store' in requirements:
2663 if b'store' in requirements:
2639 hgvfs.mkdir(b'store')
2664 hgvfs.mkdir(b'store')
2640
2665
2641 # We create an invalid changelog outside the store so very old
2666 # We create an invalid changelog outside the store so very old
2642 # Mercurial versions (which didn't know about the requirements
2667 # Mercurial versions (which didn't know about the requirements
2643 # file) encounter an error on reading the changelog. This
2668 # file) encounter an error on reading the changelog. This
2644 # effectively locks out old clients and prevents them from
2669 # effectively locks out old clients and prevents them from
2645 # mucking with a repo in an unknown format.
2670 # mucking with a repo in an unknown format.
2646 #
2671 #
2647 # The revlog header has version 2, which won't be recognized by
2672 # The revlog header has version 2, which won't be recognized by
2648 # such old clients.
2673 # such old clients.
2649 hgvfs.append(b'00changelog.i',
2674 hgvfs.append(b'00changelog.i',
2650 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2675 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2651 b'layout')
2676 b'layout')
2652
2677
2653 scmutil.writerequires(hgvfs, requirements)
2678 scmutil.writerequires(hgvfs, requirements)
2654
2679
2655 def poisonrepository(repo):
2680 def poisonrepository(repo):
2656 """Poison a repository instance so it can no longer be used."""
2681 """Poison a repository instance so it can no longer be used."""
2657 # Perform any cleanup on the instance.
2682 # Perform any cleanup on the instance.
2658 repo.close()
2683 repo.close()
2659
2684
2660 # Our strategy is to replace the type of the object with one that
2685 # Our strategy is to replace the type of the object with one that
2661 # has all attribute lookups result in error.
2686 # has all attribute lookups result in error.
2662 #
2687 #
2663 # But we have to allow the close() method because some constructors
2688 # But we have to allow the close() method because some constructors
2664 # of repos call close() on repo references.
2689 # of repos call close() on repo references.
2665 class poisonedrepository(object):
2690 class poisonedrepository(object):
2666 def __getattribute__(self, item):
2691 def __getattribute__(self, item):
2667 if item == r'close':
2692 if item == r'close':
2668 return object.__getattribute__(self, item)
2693 return object.__getattribute__(self, item)
2669
2694
2670 raise error.ProgrammingError('repo instances should not be used '
2695 raise error.ProgrammingError('repo instances should not be used '
2671 'after unshare')
2696 'after unshare')
2672
2697
2673 def close(self):
2698 def close(self):
2674 pass
2699 pass
2675
2700
2676 # We may have a repoview, which intercepts __setattr__. So be sure
2701 # We may have a repoview, which intercepts __setattr__. So be sure
2677 # we operate at the lowest level possible.
2702 # we operate at the lowest level possible.
2678 object.__setattr__(repo, r'__class__', poisonedrepository)
2703 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now