##// END OF EJS Templates
localrepo: use boolean in opener options...
Gregory Szorc -
r39735:6f26417b default
parent child Browse files
Show More
@@ -1,2714 +1,2714 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store as storemod,
59 store as storemod,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 def makelocalrepository(baseui, path, intents=None):
379 def makelocalrepository(baseui, path, intents=None):
380 """Create a local repository object.
380 """Create a local repository object.
381
381
382 Given arguments needed to construct a local repository, this function
382 Given arguments needed to construct a local repository, this function
383 derives a type suitable for representing that repository and returns an
383 derives a type suitable for representing that repository and returns an
384 instance of it.
384 instance of it.
385
385
386 The returned object conforms to the ``repository.completelocalrepository``
386 The returned object conforms to the ``repository.completelocalrepository``
387 interface.
387 interface.
388 """
388 """
389 ui = baseui.copy()
389 ui = baseui.copy()
390 # Prevent copying repo configuration.
390 # Prevent copying repo configuration.
391 ui.copy = baseui.copy
391 ui.copy = baseui.copy
392
392
393 # Working directory VFS rooted at repository root.
393 # Working directory VFS rooted at repository root.
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
395
395
396 # Main VFS for .hg/ directory.
396 # Main VFS for .hg/ directory.
397 hgpath = wdirvfs.join(b'.hg')
397 hgpath = wdirvfs.join(b'.hg')
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
399
399
400 # The .hg/ path should exist and should be a directory. All other
400 # The .hg/ path should exist and should be a directory. All other
401 # cases are errors.
401 # cases are errors.
402 if not hgvfs.isdir():
402 if not hgvfs.isdir():
403 try:
403 try:
404 hgvfs.stat()
404 hgvfs.stat()
405 except OSError as e:
405 except OSError as e:
406 if e.errno != errno.ENOENT:
406 if e.errno != errno.ENOENT:
407 raise
407 raise
408
408
409 raise error.RepoError(_(b'repository %s not found') % path)
409 raise error.RepoError(_(b'repository %s not found') % path)
410
410
411 # .hg/requires file contains a newline-delimited list of
411 # .hg/requires file contains a newline-delimited list of
412 # features/capabilities the opener (us) must have in order to use
412 # features/capabilities the opener (us) must have in order to use
413 # the repository. This file was introduced in Mercurial 0.9.2,
413 # the repository. This file was introduced in Mercurial 0.9.2,
414 # which means very old repositories may not have one. We assume
414 # which means very old repositories may not have one. We assume
415 # a missing file translates to no requirements.
415 # a missing file translates to no requirements.
416 try:
416 try:
417 requirements = set(hgvfs.read(b'requires').splitlines())
417 requirements = set(hgvfs.read(b'requires').splitlines())
418 except IOError as e:
418 except IOError as e:
419 if e.errno != errno.ENOENT:
419 if e.errno != errno.ENOENT:
420 raise
420 raise
421 requirements = set()
421 requirements = set()
422
422
423 # The .hg/hgrc file may load extensions or contain config options
423 # The .hg/hgrc file may load extensions or contain config options
424 # that influence repository construction. Attempt to load it and
424 # that influence repository construction. Attempt to load it and
425 # process any new extensions that it may have pulled in.
425 # process any new extensions that it may have pulled in.
426 try:
426 try:
427 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
427 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
428 except IOError:
428 except IOError:
429 pass
429 pass
430 else:
430 else:
431 extensions.loadall(ui)
431 extensions.loadall(ui)
432
432
433 supportedrequirements = gathersupportedrequirements(ui)
433 supportedrequirements = gathersupportedrequirements(ui)
434
434
435 # We first validate the requirements are known.
435 # We first validate the requirements are known.
436 ensurerequirementsrecognized(requirements, supportedrequirements)
436 ensurerequirementsrecognized(requirements, supportedrequirements)
437
437
438 # Then we validate that the known set is reasonable to use together.
438 # Then we validate that the known set is reasonable to use together.
439 ensurerequirementscompatible(ui, requirements)
439 ensurerequirementscompatible(ui, requirements)
440
440
441 # TODO there are unhandled edge cases related to opening repositories with
441 # TODO there are unhandled edge cases related to opening repositories with
442 # shared storage. If storage is shared, we should also test for requirements
442 # shared storage. If storage is shared, we should also test for requirements
443 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
443 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
444 # that repo, as that repo may load extensions needed to open it. This is a
444 # that repo, as that repo may load extensions needed to open it. This is a
445 # bit complicated because we don't want the other hgrc to overwrite settings
445 # bit complicated because we don't want the other hgrc to overwrite settings
446 # in this hgrc.
446 # in this hgrc.
447 #
447 #
448 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
448 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
449 # file when sharing repos. But if a requirement is added after the share is
449 # file when sharing repos. But if a requirement is added after the share is
450 # performed, thereby introducing a new requirement for the opener, we may
450 # performed, thereby introducing a new requirement for the opener, we may
451 # will not see that and could encounter a run-time error interacting with
451 # will not see that and could encounter a run-time error interacting with
452 # that shared store since it has an unknown-to-us requirement.
452 # that shared store since it has an unknown-to-us requirement.
453
453
454 # At this point, we know we should be capable of opening the repository.
454 # At this point, we know we should be capable of opening the repository.
455 # Now get on with doing that.
455 # Now get on with doing that.
456
456
457 # The "store" part of the repository holds versioned data. How it is
457 # The "store" part of the repository holds versioned data. How it is
458 # accessed is determined by various requirements. The ``shared`` or
458 # accessed is determined by various requirements. The ``shared`` or
459 # ``relshared`` requirements indicate the store lives in the path contained
459 # ``relshared`` requirements indicate the store lives in the path contained
460 # in the ``.hg/sharedpath`` file. This is an absolute path for
460 # in the ``.hg/sharedpath`` file. This is an absolute path for
461 # ``shared`` and relative to ``.hg/`` for ``relshared``.
461 # ``shared`` and relative to ``.hg/`` for ``relshared``.
462 if b'shared' in requirements or b'relshared' in requirements:
462 if b'shared' in requirements or b'relshared' in requirements:
463 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
463 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
464 if b'relshared' in requirements:
464 if b'relshared' in requirements:
465 sharedpath = hgvfs.join(sharedpath)
465 sharedpath = hgvfs.join(sharedpath)
466
466
467 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
467 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
468
468
469 if not sharedvfs.exists():
469 if not sharedvfs.exists():
470 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
470 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
471 b'directory %s') % sharedvfs.base)
471 b'directory %s') % sharedvfs.base)
472
472
473 storebasepath = sharedvfs.base
473 storebasepath = sharedvfs.base
474 cachepath = sharedvfs.join(b'cache')
474 cachepath = sharedvfs.join(b'cache')
475 else:
475 else:
476 storebasepath = hgvfs.base
476 storebasepath = hgvfs.base
477 cachepath = hgvfs.join(b'cache')
477 cachepath = hgvfs.join(b'cache')
478
478
479 # The store has changed over time and the exact layout is dictated by
479 # The store has changed over time and the exact layout is dictated by
480 # requirements. The store interface abstracts differences across all
480 # requirements. The store interface abstracts differences across all
481 # of them.
481 # of them.
482 store = makestore(requirements, storebasepath,
482 store = makestore(requirements, storebasepath,
483 lambda base: vfsmod.vfs(base, cacheaudited=True))
483 lambda base: vfsmod.vfs(base, cacheaudited=True))
484
484
485 hgvfs.createmode = store.createmode
485 hgvfs.createmode = store.createmode
486
486
487 # The cache vfs is used to manage cache files.
487 # The cache vfs is used to manage cache files.
488 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
488 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
489 cachevfs.createmode = store.createmode
489 cachevfs.createmode = store.createmode
490
490
491 return localrepository(
491 return localrepository(
492 baseui=baseui,
492 baseui=baseui,
493 ui=ui,
493 ui=ui,
494 origroot=path,
494 origroot=path,
495 wdirvfs=wdirvfs,
495 wdirvfs=wdirvfs,
496 hgvfs=hgvfs,
496 hgvfs=hgvfs,
497 requirements=requirements,
497 requirements=requirements,
498 supportedrequirements=supportedrequirements,
498 supportedrequirements=supportedrequirements,
499 sharedpath=storebasepath,
499 sharedpath=storebasepath,
500 store=store,
500 store=store,
501 cachevfs=cachevfs,
501 cachevfs=cachevfs,
502 intents=intents)
502 intents=intents)
503
503
504 def gathersupportedrequirements(ui):
504 def gathersupportedrequirements(ui):
505 """Determine the complete set of recognized requirements."""
505 """Determine the complete set of recognized requirements."""
506 # Start with all requirements supported by this file.
506 # Start with all requirements supported by this file.
507 supported = set(localrepository._basesupported)
507 supported = set(localrepository._basesupported)
508
508
509 # Execute ``featuresetupfuncs`` entries if they belong to an extension
509 # Execute ``featuresetupfuncs`` entries if they belong to an extension
510 # relevant to this ui instance.
510 # relevant to this ui instance.
511 modules = {m.__name__ for n, m in extensions.extensions(ui)}
511 modules = {m.__name__ for n, m in extensions.extensions(ui)}
512
512
513 for fn in featuresetupfuncs:
513 for fn in featuresetupfuncs:
514 if fn.__module__ in modules:
514 if fn.__module__ in modules:
515 fn(ui, supported)
515 fn(ui, supported)
516
516
517 # Add derived requirements from registered compression engines.
517 # Add derived requirements from registered compression engines.
518 for name in util.compengines:
518 for name in util.compengines:
519 engine = util.compengines[name]
519 engine = util.compengines[name]
520 if engine.revlogheader():
520 if engine.revlogheader():
521 supported.add(b'exp-compression-%s' % name)
521 supported.add(b'exp-compression-%s' % name)
522
522
523 return supported
523 return supported
524
524
525 def ensurerequirementsrecognized(requirements, supported):
525 def ensurerequirementsrecognized(requirements, supported):
526 """Validate that a set of local requirements is recognized.
526 """Validate that a set of local requirements is recognized.
527
527
528 Receives a set of requirements. Raises an ``error.RepoError`` if there
528 Receives a set of requirements. Raises an ``error.RepoError`` if there
529 exists any requirement in that set that currently loaded code doesn't
529 exists any requirement in that set that currently loaded code doesn't
530 recognize.
530 recognize.
531
531
532 Returns a set of supported requirements.
532 Returns a set of supported requirements.
533 """
533 """
534 missing = set()
534 missing = set()
535
535
536 for requirement in requirements:
536 for requirement in requirements:
537 if requirement in supported:
537 if requirement in supported:
538 continue
538 continue
539
539
540 if not requirement or not requirement[0:1].isalnum():
540 if not requirement or not requirement[0:1].isalnum():
541 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
541 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
542
542
543 missing.add(requirement)
543 missing.add(requirement)
544
544
545 if missing:
545 if missing:
546 raise error.RequirementError(
546 raise error.RequirementError(
547 _(b'repository requires features unknown to this Mercurial: %s') %
547 _(b'repository requires features unknown to this Mercurial: %s') %
548 b' '.join(sorted(missing)),
548 b' '.join(sorted(missing)),
549 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
549 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
550 b'for more information'))
550 b'for more information'))
551
551
552 def ensurerequirementscompatible(ui, requirements):
552 def ensurerequirementscompatible(ui, requirements):
553 """Validates that a set of recognized requirements is mutually compatible.
553 """Validates that a set of recognized requirements is mutually compatible.
554
554
555 Some requirements may not be compatible with others or require
555 Some requirements may not be compatible with others or require
556 config options that aren't enabled. This function is called during
556 config options that aren't enabled. This function is called during
557 repository opening to ensure that the set of requirements needed
557 repository opening to ensure that the set of requirements needed
558 to open a repository is sane and compatible with config options.
558 to open a repository is sane and compatible with config options.
559
559
560 Extensions can monkeypatch this function to perform additional
560 Extensions can monkeypatch this function to perform additional
561 checking.
561 checking.
562
562
563 ``error.RepoError`` should be raised on failure.
563 ``error.RepoError`` should be raised on failure.
564 """
564 """
565 if b'exp-sparse' in requirements and not sparse.enabled:
565 if b'exp-sparse' in requirements and not sparse.enabled:
566 raise error.RepoError(_(b'repository is using sparse feature but '
566 raise error.RepoError(_(b'repository is using sparse feature but '
567 b'sparse is not enabled; enable the '
567 b'sparse is not enabled; enable the '
568 b'"sparse" extensions to access'))
568 b'"sparse" extensions to access'))
569
569
570 def makestore(requirements, path, vfstype):
570 def makestore(requirements, path, vfstype):
571 """Construct a storage object for a repository."""
571 """Construct a storage object for a repository."""
572 if b'store' in requirements:
572 if b'store' in requirements:
573 if b'fncache' in requirements:
573 if b'fncache' in requirements:
574 return storemod.fncachestore(path, vfstype,
574 return storemod.fncachestore(path, vfstype,
575 b'dotencode' in requirements)
575 b'dotencode' in requirements)
576
576
577 return storemod.encodedstore(path, vfstype)
577 return storemod.encodedstore(path, vfstype)
578
578
579 return storemod.basicstore(path, vfstype)
579 return storemod.basicstore(path, vfstype)
580
580
581 @interfaceutil.implementer(repository.completelocalrepository)
581 @interfaceutil.implementer(repository.completelocalrepository)
582 class localrepository(object):
582 class localrepository(object):
583
583
584 # obsolete experimental requirements:
584 # obsolete experimental requirements:
585 # - manifestv2: An experimental new manifest format that allowed
585 # - manifestv2: An experimental new manifest format that allowed
586 # for stem compression of long paths. Experiment ended up not
586 # for stem compression of long paths. Experiment ended up not
587 # being successful (repository sizes went up due to worse delta
587 # being successful (repository sizes went up due to worse delta
588 # chains), and the code was deleted in 4.6.
588 # chains), and the code was deleted in 4.6.
589 supportedformats = {
589 supportedformats = {
590 'revlogv1',
590 'revlogv1',
591 'generaldelta',
591 'generaldelta',
592 'treemanifest',
592 'treemanifest',
593 REVLOGV2_REQUIREMENT,
593 REVLOGV2_REQUIREMENT,
594 SPARSEREVLOG_REQUIREMENT,
594 SPARSEREVLOG_REQUIREMENT,
595 }
595 }
596 _basesupported = supportedformats | {
596 _basesupported = supportedformats | {
597 'store',
597 'store',
598 'fncache',
598 'fncache',
599 'shared',
599 'shared',
600 'relshared',
600 'relshared',
601 'dotencode',
601 'dotencode',
602 'exp-sparse',
602 'exp-sparse',
603 'internal-phase'
603 'internal-phase'
604 }
604 }
605 openerreqs = {
605 openerreqs = {
606 'revlogv1',
606 'revlogv1',
607 'generaldelta',
607 'generaldelta',
608 'treemanifest',
608 'treemanifest',
609 }
609 }
610
610
611 # list of prefix for file which can be written without 'wlock'
611 # list of prefix for file which can be written without 'wlock'
612 # Extensions should extend this list when needed
612 # Extensions should extend this list when needed
613 _wlockfreeprefix = {
613 _wlockfreeprefix = {
614 # We migh consider requiring 'wlock' for the next
614 # We migh consider requiring 'wlock' for the next
615 # two, but pretty much all the existing code assume
615 # two, but pretty much all the existing code assume
616 # wlock is not needed so we keep them excluded for
616 # wlock is not needed so we keep them excluded for
617 # now.
617 # now.
618 'hgrc',
618 'hgrc',
619 'requires',
619 'requires',
620 # XXX cache is a complicatged business someone
620 # XXX cache is a complicatged business someone
621 # should investigate this in depth at some point
621 # should investigate this in depth at some point
622 'cache/',
622 'cache/',
623 # XXX shouldn't be dirstate covered by the wlock?
623 # XXX shouldn't be dirstate covered by the wlock?
624 'dirstate',
624 'dirstate',
625 # XXX bisect was still a bit too messy at the time
625 # XXX bisect was still a bit too messy at the time
626 # this changeset was introduced. Someone should fix
626 # this changeset was introduced. Someone should fix
627 # the remainig bit and drop this line
627 # the remainig bit and drop this line
628 'bisect.state',
628 'bisect.state',
629 }
629 }
630
630
631 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
631 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
632 supportedrequirements, sharedpath, store, cachevfs,
632 supportedrequirements, sharedpath, store, cachevfs,
633 intents=None):
633 intents=None):
634 """Create a new local repository instance.
634 """Create a new local repository instance.
635
635
636 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
636 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
637 or ``localrepo.makelocalrepository()`` for obtaining a new repository
637 or ``localrepo.makelocalrepository()`` for obtaining a new repository
638 object.
638 object.
639
639
640 Arguments:
640 Arguments:
641
641
642 baseui
642 baseui
643 ``ui.ui`` instance that ``ui`` argument was based off of.
643 ``ui.ui`` instance that ``ui`` argument was based off of.
644
644
645 ui
645 ui
646 ``ui.ui`` instance for use by the repository.
646 ``ui.ui`` instance for use by the repository.
647
647
648 origroot
648 origroot
649 ``bytes`` path to working directory root of this repository.
649 ``bytes`` path to working directory root of this repository.
650
650
651 wdirvfs
651 wdirvfs
652 ``vfs.vfs`` rooted at the working directory.
652 ``vfs.vfs`` rooted at the working directory.
653
653
654 hgvfs
654 hgvfs
655 ``vfs.vfs`` rooted at .hg/
655 ``vfs.vfs`` rooted at .hg/
656
656
657 requirements
657 requirements
658 ``set`` of bytestrings representing repository opening requirements.
658 ``set`` of bytestrings representing repository opening requirements.
659
659
660 supportedrequirements
660 supportedrequirements
661 ``set`` of bytestrings representing repository requirements that we
661 ``set`` of bytestrings representing repository requirements that we
662 know how to open. May be a supetset of ``requirements``.
662 know how to open. May be a supetset of ``requirements``.
663
663
664 sharedpath
664 sharedpath
665 ``bytes`` Defining path to storage base directory. Points to a
665 ``bytes`` Defining path to storage base directory. Points to a
666 ``.hg/`` directory somewhere.
666 ``.hg/`` directory somewhere.
667
667
668 store
668 store
669 ``store.basicstore`` (or derived) instance providing access to
669 ``store.basicstore`` (or derived) instance providing access to
670 versioned storage.
670 versioned storage.
671
671
672 cachevfs
672 cachevfs
673 ``vfs.vfs`` used for cache files.
673 ``vfs.vfs`` used for cache files.
674
674
675 intents
675 intents
676 ``set`` of system strings indicating what this repo will be used
676 ``set`` of system strings indicating what this repo will be used
677 for.
677 for.
678 """
678 """
679 self.baseui = baseui
679 self.baseui = baseui
680 self.ui = ui
680 self.ui = ui
681 self.origroot = origroot
681 self.origroot = origroot
682 # vfs rooted at working directory.
682 # vfs rooted at working directory.
683 self.wvfs = wdirvfs
683 self.wvfs = wdirvfs
684 self.root = wdirvfs.base
684 self.root = wdirvfs.base
685 # vfs rooted at .hg/. Used to access most non-store paths.
685 # vfs rooted at .hg/. Used to access most non-store paths.
686 self.vfs = hgvfs
686 self.vfs = hgvfs
687 self.path = hgvfs.base
687 self.path = hgvfs.base
688 self.requirements = requirements
688 self.requirements = requirements
689 self.supported = supportedrequirements
689 self.supported = supportedrequirements
690 self.sharedpath = sharedpath
690 self.sharedpath = sharedpath
691 self.store = store
691 self.store = store
692 self.cachevfs = cachevfs
692 self.cachevfs = cachevfs
693
693
694 self.filtername = None
694 self.filtername = None
695
695
696 if (self.ui.configbool('devel', 'all-warnings') or
696 if (self.ui.configbool('devel', 'all-warnings') or
697 self.ui.configbool('devel', 'check-locks')):
697 self.ui.configbool('devel', 'check-locks')):
698 self.vfs.audit = self._getvfsward(self.vfs.audit)
698 self.vfs.audit = self._getvfsward(self.vfs.audit)
699 # A list of callback to shape the phase if no data were found.
699 # A list of callback to shape the phase if no data were found.
700 # Callback are in the form: func(repo, roots) --> processed root.
700 # Callback are in the form: func(repo, roots) --> processed root.
701 # This list it to be filled by extension during repo setup
701 # This list it to be filled by extension during repo setup
702 self._phasedefaults = []
702 self._phasedefaults = []
703
703
704 color.setup(self.ui)
704 color.setup(self.ui)
705
705
706 self.spath = self.store.path
706 self.spath = self.store.path
707 self.svfs = self.store.vfs
707 self.svfs = self.store.vfs
708 self.sjoin = self.store.join
708 self.sjoin = self.store.join
709 if (self.ui.configbool('devel', 'all-warnings') or
709 if (self.ui.configbool('devel', 'all-warnings') or
710 self.ui.configbool('devel', 'check-locks')):
710 self.ui.configbool('devel', 'check-locks')):
711 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
711 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
712 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
712 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
713 else: # standard vfs
713 else: # standard vfs
714 self.svfs.audit = self._getsvfsward(self.svfs.audit)
714 self.svfs.audit = self._getsvfsward(self.svfs.audit)
715 self._applyopenerreqs()
715 self._applyopenerreqs()
716
716
717 self._dirstatevalidatewarned = False
717 self._dirstatevalidatewarned = False
718
718
719 self._branchcaches = {}
719 self._branchcaches = {}
720 self._revbranchcache = None
720 self._revbranchcache = None
721 self._filterpats = {}
721 self._filterpats = {}
722 self._datafilters = {}
722 self._datafilters = {}
723 self._transref = self._lockref = self._wlockref = None
723 self._transref = self._lockref = self._wlockref = None
724
724
725 # A cache for various files under .hg/ that tracks file changes,
725 # A cache for various files under .hg/ that tracks file changes,
726 # (used by the filecache decorator)
726 # (used by the filecache decorator)
727 #
727 #
728 # Maps a property name to its util.filecacheentry
728 # Maps a property name to its util.filecacheentry
729 self._filecache = {}
729 self._filecache = {}
730
730
731 # hold sets of revision to be filtered
731 # hold sets of revision to be filtered
732 # should be cleared when something might have changed the filter value:
732 # should be cleared when something might have changed the filter value:
733 # - new changesets,
733 # - new changesets,
734 # - phase change,
734 # - phase change,
735 # - new obsolescence marker,
735 # - new obsolescence marker,
736 # - working directory parent change,
736 # - working directory parent change,
737 # - bookmark changes
737 # - bookmark changes
738 self.filteredrevcache = {}
738 self.filteredrevcache = {}
739
739
740 # post-dirstate-status hooks
740 # post-dirstate-status hooks
741 self._postdsstatus = []
741 self._postdsstatus = []
742
742
743 # generic mapping between names and nodes
743 # generic mapping between names and nodes
744 self.names = namespaces.namespaces()
744 self.names = namespaces.namespaces()
745
745
746 # Key to signature value.
746 # Key to signature value.
747 self._sparsesignaturecache = {}
747 self._sparsesignaturecache = {}
748 # Signature to cached matcher instance.
748 # Signature to cached matcher instance.
749 self._sparsematchercache = {}
749 self._sparsematchercache = {}
750
750
751 def _getvfsward(self, origfunc):
751 def _getvfsward(self, origfunc):
752 """build a ward for self.vfs"""
752 """build a ward for self.vfs"""
753 rref = weakref.ref(self)
753 rref = weakref.ref(self)
754 def checkvfs(path, mode=None):
754 def checkvfs(path, mode=None):
755 ret = origfunc(path, mode=mode)
755 ret = origfunc(path, mode=mode)
756 repo = rref()
756 repo = rref()
757 if (repo is None
757 if (repo is None
758 or not util.safehasattr(repo, '_wlockref')
758 or not util.safehasattr(repo, '_wlockref')
759 or not util.safehasattr(repo, '_lockref')):
759 or not util.safehasattr(repo, '_lockref')):
760 return
760 return
761 if mode in (None, 'r', 'rb'):
761 if mode in (None, 'r', 'rb'):
762 return
762 return
763 if path.startswith(repo.path):
763 if path.startswith(repo.path):
764 # truncate name relative to the repository (.hg)
764 # truncate name relative to the repository (.hg)
765 path = path[len(repo.path) + 1:]
765 path = path[len(repo.path) + 1:]
766 if path.startswith('cache/'):
766 if path.startswith('cache/'):
767 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
767 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
768 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
768 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
769 if path.startswith('journal.'):
769 if path.startswith('journal.'):
770 # journal is covered by 'lock'
770 # journal is covered by 'lock'
771 if repo._currentlock(repo._lockref) is None:
771 if repo._currentlock(repo._lockref) is None:
772 repo.ui.develwarn('write with no lock: "%s"' % path,
772 repo.ui.develwarn('write with no lock: "%s"' % path,
773 stacklevel=2, config='check-locks')
773 stacklevel=2, config='check-locks')
774 elif repo._currentlock(repo._wlockref) is None:
774 elif repo._currentlock(repo._wlockref) is None:
775 # rest of vfs files are covered by 'wlock'
775 # rest of vfs files are covered by 'wlock'
776 #
776 #
777 # exclude special files
777 # exclude special files
778 for prefix in self._wlockfreeprefix:
778 for prefix in self._wlockfreeprefix:
779 if path.startswith(prefix):
779 if path.startswith(prefix):
780 return
780 return
781 repo.ui.develwarn('write with no wlock: "%s"' % path,
781 repo.ui.develwarn('write with no wlock: "%s"' % path,
782 stacklevel=2, config='check-locks')
782 stacklevel=2, config='check-locks')
783 return ret
783 return ret
784 return checkvfs
784 return checkvfs
785
785
786 def _getsvfsward(self, origfunc):
786 def _getsvfsward(self, origfunc):
787 """build a ward for self.svfs"""
787 """build a ward for self.svfs"""
788 rref = weakref.ref(self)
788 rref = weakref.ref(self)
789 def checksvfs(path, mode=None):
789 def checksvfs(path, mode=None):
790 ret = origfunc(path, mode=mode)
790 ret = origfunc(path, mode=mode)
791 repo = rref()
791 repo = rref()
792 if repo is None or not util.safehasattr(repo, '_lockref'):
792 if repo is None or not util.safehasattr(repo, '_lockref'):
793 return
793 return
794 if mode in (None, 'r', 'rb'):
794 if mode in (None, 'r', 'rb'):
795 return
795 return
796 if path.startswith(repo.sharedpath):
796 if path.startswith(repo.sharedpath):
797 # truncate name relative to the repository (.hg)
797 # truncate name relative to the repository (.hg)
798 path = path[len(repo.sharedpath) + 1:]
798 path = path[len(repo.sharedpath) + 1:]
799 if repo._currentlock(repo._lockref) is None:
799 if repo._currentlock(repo._lockref) is None:
800 repo.ui.develwarn('write with no lock: "%s"' % path,
800 repo.ui.develwarn('write with no lock: "%s"' % path,
801 stacklevel=3)
801 stacklevel=3)
802 return ret
802 return ret
803 return checksvfs
803 return checksvfs
804
804
805 def close(self):
805 def close(self):
806 self._writecaches()
806 self._writecaches()
807
807
808 def _writecaches(self):
808 def _writecaches(self):
809 if self._revbranchcache:
809 if self._revbranchcache:
810 self._revbranchcache.write()
810 self._revbranchcache.write()
811
811
812 def _restrictcapabilities(self, caps):
812 def _restrictcapabilities(self, caps):
813 if self.ui.configbool('experimental', 'bundle2-advertise'):
813 if self.ui.configbool('experimental', 'bundle2-advertise'):
814 caps = set(caps)
814 caps = set(caps)
815 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
815 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
816 role='client'))
816 role='client'))
817 caps.add('bundle2=' + urlreq.quote(capsblob))
817 caps.add('bundle2=' + urlreq.quote(capsblob))
818 return caps
818 return caps
819
819
820 def _applyopenerreqs(self):
820 def _applyopenerreqs(self):
821 self.svfs.options = dict((r, 1) for r in self.requirements
821 self.svfs.options = {r: True for r in self.requirements
822 if r in self.openerreqs)
822 if r in self.openerreqs}
823 # experimental config: format.chunkcachesize
823 # experimental config: format.chunkcachesize
824 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
824 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
825 if chunkcachesize is not None:
825 if chunkcachesize is not None:
826 self.svfs.options['chunkcachesize'] = chunkcachesize
826 self.svfs.options['chunkcachesize'] = chunkcachesize
827 # experimental config: format.manifestcachesize
827 # experimental config: format.manifestcachesize
828 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
828 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
829 if manifestcachesize is not None:
829 if manifestcachesize is not None:
830 self.svfs.options['manifestcachesize'] = manifestcachesize
830 self.svfs.options['manifestcachesize'] = manifestcachesize
831 deltabothparents = self.ui.configbool('storage',
831 deltabothparents = self.ui.configbool('storage',
832 'revlog.optimize-delta-parent-choice')
832 'revlog.optimize-delta-parent-choice')
833 self.svfs.options['deltabothparents'] = deltabothparents
833 self.svfs.options['deltabothparents'] = deltabothparents
834 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
834 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
835 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
835 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
836 if 0 <= chainspan:
836 if 0 <= chainspan:
837 self.svfs.options['maxdeltachainspan'] = chainspan
837 self.svfs.options['maxdeltachainspan'] = chainspan
838 mmapindexthreshold = self.ui.configbytes('experimental',
838 mmapindexthreshold = self.ui.configbytes('experimental',
839 'mmapindexthreshold')
839 'mmapindexthreshold')
840 if mmapindexthreshold is not None:
840 if mmapindexthreshold is not None:
841 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
841 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
842 withsparseread = self.ui.configbool('experimental', 'sparse-read')
842 withsparseread = self.ui.configbool('experimental', 'sparse-read')
843 srdensitythres = float(self.ui.config('experimental',
843 srdensitythres = float(self.ui.config('experimental',
844 'sparse-read.density-threshold'))
844 'sparse-read.density-threshold'))
845 srmingapsize = self.ui.configbytes('experimental',
845 srmingapsize = self.ui.configbytes('experimental',
846 'sparse-read.min-gap-size')
846 'sparse-read.min-gap-size')
847 self.svfs.options['with-sparse-read'] = withsparseread
847 self.svfs.options['with-sparse-read'] = withsparseread
848 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
848 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
849 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
849 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
850 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
850 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
851 self.svfs.options['sparse-revlog'] = sparserevlog
851 self.svfs.options['sparse-revlog'] = sparserevlog
852 if sparserevlog:
852 if sparserevlog:
853 self.svfs.options['generaldelta'] = True
853 self.svfs.options['generaldelta'] = True
854 maxchainlen = None
854 maxchainlen = None
855 if sparserevlog:
855 if sparserevlog:
856 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
856 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
857 # experimental config: format.maxchainlen
857 # experimental config: format.maxchainlen
858 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
858 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
859 if maxchainlen is not None:
859 if maxchainlen is not None:
860 self.svfs.options['maxchainlen'] = maxchainlen
860 self.svfs.options['maxchainlen'] = maxchainlen
861
861
862 for r in self.requirements:
862 for r in self.requirements:
863 if r.startswith('exp-compression-'):
863 if r.startswith('exp-compression-'):
864 self.svfs.options['compengine'] = r[len('exp-compression-'):]
864 self.svfs.options['compengine'] = r[len('exp-compression-'):]
865
865
866 # TODO move "revlogv2" to openerreqs once finalized.
866 # TODO move "revlogv2" to openerreqs once finalized.
867 if REVLOGV2_REQUIREMENT in self.requirements:
867 if REVLOGV2_REQUIREMENT in self.requirements:
868 self.svfs.options['revlogv2'] = True
868 self.svfs.options['revlogv2'] = True
869
869
870 def _writerequirements(self):
870 def _writerequirements(self):
871 scmutil.writerequires(self.vfs, self.requirements)
871 scmutil.writerequires(self.vfs, self.requirements)
872
872
873 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
873 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
874 # self -> auditor -> self._checknested -> self
874 # self -> auditor -> self._checknested -> self
875
875
876 @property
876 @property
877 def auditor(self):
877 def auditor(self):
878 # This is only used by context.workingctx.match in order to
878 # This is only used by context.workingctx.match in order to
879 # detect files in subrepos.
879 # detect files in subrepos.
880 return pathutil.pathauditor(self.root, callback=self._checknested)
880 return pathutil.pathauditor(self.root, callback=self._checknested)
881
881
882 @property
882 @property
883 def nofsauditor(self):
883 def nofsauditor(self):
884 # This is only used by context.basectx.match in order to detect
884 # This is only used by context.basectx.match in order to detect
885 # files in subrepos.
885 # files in subrepos.
886 return pathutil.pathauditor(self.root, callback=self._checknested,
886 return pathutil.pathauditor(self.root, callback=self._checknested,
887 realfs=False, cached=True)
887 realfs=False, cached=True)
888
888
889 def _checknested(self, path):
889 def _checknested(self, path):
890 """Determine if path is a legal nested repository."""
890 """Determine if path is a legal nested repository."""
891 if not path.startswith(self.root):
891 if not path.startswith(self.root):
892 return False
892 return False
893 subpath = path[len(self.root) + 1:]
893 subpath = path[len(self.root) + 1:]
894 normsubpath = util.pconvert(subpath)
894 normsubpath = util.pconvert(subpath)
895
895
896 # XXX: Checking against the current working copy is wrong in
896 # XXX: Checking against the current working copy is wrong in
897 # the sense that it can reject things like
897 # the sense that it can reject things like
898 #
898 #
899 # $ hg cat -r 10 sub/x.txt
899 # $ hg cat -r 10 sub/x.txt
900 #
900 #
901 # if sub/ is no longer a subrepository in the working copy
901 # if sub/ is no longer a subrepository in the working copy
902 # parent revision.
902 # parent revision.
903 #
903 #
904 # However, it can of course also allow things that would have
904 # However, it can of course also allow things that would have
905 # been rejected before, such as the above cat command if sub/
905 # been rejected before, such as the above cat command if sub/
906 # is a subrepository now, but was a normal directory before.
906 # is a subrepository now, but was a normal directory before.
907 # The old path auditor would have rejected by mistake since it
907 # The old path auditor would have rejected by mistake since it
908 # panics when it sees sub/.hg/.
908 # panics when it sees sub/.hg/.
909 #
909 #
910 # All in all, checking against the working copy seems sensible
910 # All in all, checking against the working copy seems sensible
911 # since we want to prevent access to nested repositories on
911 # since we want to prevent access to nested repositories on
912 # the filesystem *now*.
912 # the filesystem *now*.
913 ctx = self[None]
913 ctx = self[None]
914 parts = util.splitpath(subpath)
914 parts = util.splitpath(subpath)
915 while parts:
915 while parts:
916 prefix = '/'.join(parts)
916 prefix = '/'.join(parts)
917 if prefix in ctx.substate:
917 if prefix in ctx.substate:
918 if prefix == normsubpath:
918 if prefix == normsubpath:
919 return True
919 return True
920 else:
920 else:
921 sub = ctx.sub(prefix)
921 sub = ctx.sub(prefix)
922 return sub.checknested(subpath[len(prefix) + 1:])
922 return sub.checknested(subpath[len(prefix) + 1:])
923 else:
923 else:
924 parts.pop()
924 parts.pop()
925 return False
925 return False
926
926
927 def peer(self):
927 def peer(self):
928 return localpeer(self) # not cached to avoid reference cycle
928 return localpeer(self) # not cached to avoid reference cycle
929
929
930 def unfiltered(self):
930 def unfiltered(self):
931 """Return unfiltered version of the repository
931 """Return unfiltered version of the repository
932
932
933 Intended to be overwritten by filtered repo."""
933 Intended to be overwritten by filtered repo."""
934 return self
934 return self
935
935
936 def filtered(self, name, visibilityexceptions=None):
936 def filtered(self, name, visibilityexceptions=None):
937 """Return a filtered version of a repository"""
937 """Return a filtered version of a repository"""
938 cls = repoview.newtype(self.unfiltered().__class__)
938 cls = repoview.newtype(self.unfiltered().__class__)
939 return cls(self, name, visibilityexceptions)
939 return cls(self, name, visibilityexceptions)
940
940
941 @repofilecache('bookmarks', 'bookmarks.current')
941 @repofilecache('bookmarks', 'bookmarks.current')
942 def _bookmarks(self):
942 def _bookmarks(self):
943 return bookmarks.bmstore(self)
943 return bookmarks.bmstore(self)
944
944
945 @property
945 @property
946 def _activebookmark(self):
946 def _activebookmark(self):
947 return self._bookmarks.active
947 return self._bookmarks.active
948
948
949 # _phasesets depend on changelog. what we need is to call
949 # _phasesets depend on changelog. what we need is to call
950 # _phasecache.invalidate() if '00changelog.i' was changed, but it
950 # _phasecache.invalidate() if '00changelog.i' was changed, but it
951 # can't be easily expressed in filecache mechanism.
951 # can't be easily expressed in filecache mechanism.
952 @storecache('phaseroots', '00changelog.i')
952 @storecache('phaseroots', '00changelog.i')
953 def _phasecache(self):
953 def _phasecache(self):
954 return phases.phasecache(self, self._phasedefaults)
954 return phases.phasecache(self, self._phasedefaults)
955
955
956 @storecache('obsstore')
956 @storecache('obsstore')
957 def obsstore(self):
957 def obsstore(self):
958 return obsolete.makestore(self.ui, self)
958 return obsolete.makestore(self.ui, self)
959
959
960 @storecache('00changelog.i')
960 @storecache('00changelog.i')
961 def changelog(self):
961 def changelog(self):
962 return changelog.changelog(self.svfs,
962 return changelog.changelog(self.svfs,
963 trypending=txnutil.mayhavepending(self.root))
963 trypending=txnutil.mayhavepending(self.root))
964
964
965 def _constructmanifest(self):
965 def _constructmanifest(self):
966 # This is a temporary function while we migrate from manifest to
966 # This is a temporary function while we migrate from manifest to
967 # manifestlog. It allows bundlerepo and unionrepo to intercept the
967 # manifestlog. It allows bundlerepo and unionrepo to intercept the
968 # manifest creation.
968 # manifest creation.
969 return manifest.manifestrevlog(self.svfs)
969 return manifest.manifestrevlog(self.svfs)
970
970
971 @storecache('00manifest.i')
971 @storecache('00manifest.i')
972 def manifestlog(self):
972 def manifestlog(self):
973 return manifest.manifestlog(self.svfs, self)
973 return manifest.manifestlog(self.svfs, self)
974
974
975 @repofilecache('dirstate')
975 @repofilecache('dirstate')
976 def dirstate(self):
976 def dirstate(self):
977 return self._makedirstate()
977 return self._makedirstate()
978
978
979 def _makedirstate(self):
979 def _makedirstate(self):
980 """Extension point for wrapping the dirstate per-repo."""
980 """Extension point for wrapping the dirstate per-repo."""
981 sparsematchfn = lambda: sparse.matcher(self)
981 sparsematchfn = lambda: sparse.matcher(self)
982
982
983 return dirstate.dirstate(self.vfs, self.ui, self.root,
983 return dirstate.dirstate(self.vfs, self.ui, self.root,
984 self._dirstatevalidate, sparsematchfn)
984 self._dirstatevalidate, sparsematchfn)
985
985
986 def _dirstatevalidate(self, node):
986 def _dirstatevalidate(self, node):
987 try:
987 try:
988 self.changelog.rev(node)
988 self.changelog.rev(node)
989 return node
989 return node
990 except error.LookupError:
990 except error.LookupError:
991 if not self._dirstatevalidatewarned:
991 if not self._dirstatevalidatewarned:
992 self._dirstatevalidatewarned = True
992 self._dirstatevalidatewarned = True
993 self.ui.warn(_("warning: ignoring unknown"
993 self.ui.warn(_("warning: ignoring unknown"
994 " working parent %s!\n") % short(node))
994 " working parent %s!\n") % short(node))
995 return nullid
995 return nullid
996
996
997 @storecache(narrowspec.FILENAME)
997 @storecache(narrowspec.FILENAME)
998 def narrowpats(self):
998 def narrowpats(self):
999 """matcher patterns for this repository's narrowspec
999 """matcher patterns for this repository's narrowspec
1000
1000
1001 A tuple of (includes, excludes).
1001 A tuple of (includes, excludes).
1002 """
1002 """
1003 source = self
1003 source = self
1004 if self.shared():
1004 if self.shared():
1005 from . import hg
1005 from . import hg
1006 source = hg.sharedreposource(self)
1006 source = hg.sharedreposource(self)
1007 return narrowspec.load(source)
1007 return narrowspec.load(source)
1008
1008
1009 @storecache(narrowspec.FILENAME)
1009 @storecache(narrowspec.FILENAME)
1010 def _narrowmatch(self):
1010 def _narrowmatch(self):
1011 if repository.NARROW_REQUIREMENT not in self.requirements:
1011 if repository.NARROW_REQUIREMENT not in self.requirements:
1012 return matchmod.always(self.root, '')
1012 return matchmod.always(self.root, '')
1013 include, exclude = self.narrowpats
1013 include, exclude = self.narrowpats
1014 return narrowspec.match(self.root, include=include, exclude=exclude)
1014 return narrowspec.match(self.root, include=include, exclude=exclude)
1015
1015
1016 # TODO(martinvonz): make this property-like instead?
1016 # TODO(martinvonz): make this property-like instead?
1017 def narrowmatch(self):
1017 def narrowmatch(self):
1018 return self._narrowmatch
1018 return self._narrowmatch
1019
1019
1020 def setnarrowpats(self, newincludes, newexcludes):
1020 def setnarrowpats(self, newincludes, newexcludes):
1021 narrowspec.save(self, newincludes, newexcludes)
1021 narrowspec.save(self, newincludes, newexcludes)
1022 self.invalidate(clearfilecache=True)
1022 self.invalidate(clearfilecache=True)
1023
1023
1024 def __getitem__(self, changeid):
1024 def __getitem__(self, changeid):
1025 if changeid is None:
1025 if changeid is None:
1026 return context.workingctx(self)
1026 return context.workingctx(self)
1027 if isinstance(changeid, context.basectx):
1027 if isinstance(changeid, context.basectx):
1028 return changeid
1028 return changeid
1029 if isinstance(changeid, slice):
1029 if isinstance(changeid, slice):
1030 # wdirrev isn't contiguous so the slice shouldn't include it
1030 # wdirrev isn't contiguous so the slice shouldn't include it
1031 return [context.changectx(self, i)
1031 return [context.changectx(self, i)
1032 for i in pycompat.xrange(*changeid.indices(len(self)))
1032 for i in pycompat.xrange(*changeid.indices(len(self)))
1033 if i not in self.changelog.filteredrevs]
1033 if i not in self.changelog.filteredrevs]
1034 try:
1034 try:
1035 return context.changectx(self, changeid)
1035 return context.changectx(self, changeid)
1036 except error.WdirUnsupported:
1036 except error.WdirUnsupported:
1037 return context.workingctx(self)
1037 return context.workingctx(self)
1038
1038
1039 def __contains__(self, changeid):
1039 def __contains__(self, changeid):
1040 """True if the given changeid exists
1040 """True if the given changeid exists
1041
1041
1042 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1042 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1043 specified.
1043 specified.
1044 """
1044 """
1045 try:
1045 try:
1046 self[changeid]
1046 self[changeid]
1047 return True
1047 return True
1048 except error.RepoLookupError:
1048 except error.RepoLookupError:
1049 return False
1049 return False
1050
1050
1051 def __nonzero__(self):
1051 def __nonzero__(self):
1052 return True
1052 return True
1053
1053
1054 __bool__ = __nonzero__
1054 __bool__ = __nonzero__
1055
1055
1056 def __len__(self):
1056 def __len__(self):
1057 # no need to pay the cost of repoview.changelog
1057 # no need to pay the cost of repoview.changelog
1058 unfi = self.unfiltered()
1058 unfi = self.unfiltered()
1059 return len(unfi.changelog)
1059 return len(unfi.changelog)
1060
1060
1061 def __iter__(self):
1061 def __iter__(self):
1062 return iter(self.changelog)
1062 return iter(self.changelog)
1063
1063
1064 def revs(self, expr, *args):
1064 def revs(self, expr, *args):
1065 '''Find revisions matching a revset.
1065 '''Find revisions matching a revset.
1066
1066
1067 The revset is specified as a string ``expr`` that may contain
1067 The revset is specified as a string ``expr`` that may contain
1068 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1068 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1069
1069
1070 Revset aliases from the configuration are not expanded. To expand
1070 Revset aliases from the configuration are not expanded. To expand
1071 user aliases, consider calling ``scmutil.revrange()`` or
1071 user aliases, consider calling ``scmutil.revrange()`` or
1072 ``repo.anyrevs([expr], user=True)``.
1072 ``repo.anyrevs([expr], user=True)``.
1073
1073
1074 Returns a revset.abstractsmartset, which is a list-like interface
1074 Returns a revset.abstractsmartset, which is a list-like interface
1075 that contains integer revisions.
1075 that contains integer revisions.
1076 '''
1076 '''
1077 expr = revsetlang.formatspec(expr, *args)
1077 expr = revsetlang.formatspec(expr, *args)
1078 m = revset.match(None, expr)
1078 m = revset.match(None, expr)
1079 return m(self)
1079 return m(self)
1080
1080
1081 def set(self, expr, *args):
1081 def set(self, expr, *args):
1082 '''Find revisions matching a revset and emit changectx instances.
1082 '''Find revisions matching a revset and emit changectx instances.
1083
1083
1084 This is a convenience wrapper around ``revs()`` that iterates the
1084 This is a convenience wrapper around ``revs()`` that iterates the
1085 result and is a generator of changectx instances.
1085 result and is a generator of changectx instances.
1086
1086
1087 Revset aliases from the configuration are not expanded. To expand
1087 Revset aliases from the configuration are not expanded. To expand
1088 user aliases, consider calling ``scmutil.revrange()``.
1088 user aliases, consider calling ``scmutil.revrange()``.
1089 '''
1089 '''
1090 for r in self.revs(expr, *args):
1090 for r in self.revs(expr, *args):
1091 yield self[r]
1091 yield self[r]
1092
1092
1093 def anyrevs(self, specs, user=False, localalias=None):
1093 def anyrevs(self, specs, user=False, localalias=None):
1094 '''Find revisions matching one of the given revsets.
1094 '''Find revisions matching one of the given revsets.
1095
1095
1096 Revset aliases from the configuration are not expanded by default. To
1096 Revset aliases from the configuration are not expanded by default. To
1097 expand user aliases, specify ``user=True``. To provide some local
1097 expand user aliases, specify ``user=True``. To provide some local
1098 definitions overriding user aliases, set ``localalias`` to
1098 definitions overriding user aliases, set ``localalias`` to
1099 ``{name: definitionstring}``.
1099 ``{name: definitionstring}``.
1100 '''
1100 '''
1101 if user:
1101 if user:
1102 m = revset.matchany(self.ui, specs,
1102 m = revset.matchany(self.ui, specs,
1103 lookup=revset.lookupfn(self),
1103 lookup=revset.lookupfn(self),
1104 localalias=localalias)
1104 localalias=localalias)
1105 else:
1105 else:
1106 m = revset.matchany(None, specs, localalias=localalias)
1106 m = revset.matchany(None, specs, localalias=localalias)
1107 return m(self)
1107 return m(self)
1108
1108
1109 def url(self):
1109 def url(self):
1110 return 'file:' + self.root
1110 return 'file:' + self.root
1111
1111
1112 def hook(self, name, throw=False, **args):
1112 def hook(self, name, throw=False, **args):
1113 """Call a hook, passing this repo instance.
1113 """Call a hook, passing this repo instance.
1114
1114
1115 This a convenience method to aid invoking hooks. Extensions likely
1115 This a convenience method to aid invoking hooks. Extensions likely
1116 won't call this unless they have registered a custom hook or are
1116 won't call this unless they have registered a custom hook or are
1117 replacing code that is expected to call a hook.
1117 replacing code that is expected to call a hook.
1118 """
1118 """
1119 return hook.hook(self.ui, self, name, throw, **args)
1119 return hook.hook(self.ui, self, name, throw, **args)
1120
1120
1121 @filteredpropertycache
1121 @filteredpropertycache
1122 def _tagscache(self):
1122 def _tagscache(self):
1123 '''Returns a tagscache object that contains various tags related
1123 '''Returns a tagscache object that contains various tags related
1124 caches.'''
1124 caches.'''
1125
1125
1126 # This simplifies its cache management by having one decorated
1126 # This simplifies its cache management by having one decorated
1127 # function (this one) and the rest simply fetch things from it.
1127 # function (this one) and the rest simply fetch things from it.
1128 class tagscache(object):
1128 class tagscache(object):
1129 def __init__(self):
1129 def __init__(self):
1130 # These two define the set of tags for this repository. tags
1130 # These two define the set of tags for this repository. tags
1131 # maps tag name to node; tagtypes maps tag name to 'global' or
1131 # maps tag name to node; tagtypes maps tag name to 'global' or
1132 # 'local'. (Global tags are defined by .hgtags across all
1132 # 'local'. (Global tags are defined by .hgtags across all
1133 # heads, and local tags are defined in .hg/localtags.)
1133 # heads, and local tags are defined in .hg/localtags.)
1134 # They constitute the in-memory cache of tags.
1134 # They constitute the in-memory cache of tags.
1135 self.tags = self.tagtypes = None
1135 self.tags = self.tagtypes = None
1136
1136
1137 self.nodetagscache = self.tagslist = None
1137 self.nodetagscache = self.tagslist = None
1138
1138
1139 cache = tagscache()
1139 cache = tagscache()
1140 cache.tags, cache.tagtypes = self._findtags()
1140 cache.tags, cache.tagtypes = self._findtags()
1141
1141
1142 return cache
1142 return cache
1143
1143
1144 def tags(self):
1144 def tags(self):
1145 '''return a mapping of tag to node'''
1145 '''return a mapping of tag to node'''
1146 t = {}
1146 t = {}
1147 if self.changelog.filteredrevs:
1147 if self.changelog.filteredrevs:
1148 tags, tt = self._findtags()
1148 tags, tt = self._findtags()
1149 else:
1149 else:
1150 tags = self._tagscache.tags
1150 tags = self._tagscache.tags
1151 for k, v in tags.iteritems():
1151 for k, v in tags.iteritems():
1152 try:
1152 try:
1153 # ignore tags to unknown nodes
1153 # ignore tags to unknown nodes
1154 self.changelog.rev(v)
1154 self.changelog.rev(v)
1155 t[k] = v
1155 t[k] = v
1156 except (error.LookupError, ValueError):
1156 except (error.LookupError, ValueError):
1157 pass
1157 pass
1158 return t
1158 return t
1159
1159
1160 def _findtags(self):
1160 def _findtags(self):
1161 '''Do the hard work of finding tags. Return a pair of dicts
1161 '''Do the hard work of finding tags. Return a pair of dicts
1162 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1162 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1163 maps tag name to a string like \'global\' or \'local\'.
1163 maps tag name to a string like \'global\' or \'local\'.
1164 Subclasses or extensions are free to add their own tags, but
1164 Subclasses or extensions are free to add their own tags, but
1165 should be aware that the returned dicts will be retained for the
1165 should be aware that the returned dicts will be retained for the
1166 duration of the localrepo object.'''
1166 duration of the localrepo object.'''
1167
1167
1168 # XXX what tagtype should subclasses/extensions use? Currently
1168 # XXX what tagtype should subclasses/extensions use? Currently
1169 # mq and bookmarks add tags, but do not set the tagtype at all.
1169 # mq and bookmarks add tags, but do not set the tagtype at all.
1170 # Should each extension invent its own tag type? Should there
1170 # Should each extension invent its own tag type? Should there
1171 # be one tagtype for all such "virtual" tags? Or is the status
1171 # be one tagtype for all such "virtual" tags? Or is the status
1172 # quo fine?
1172 # quo fine?
1173
1173
1174
1174
1175 # map tag name to (node, hist)
1175 # map tag name to (node, hist)
1176 alltags = tagsmod.findglobaltags(self.ui, self)
1176 alltags = tagsmod.findglobaltags(self.ui, self)
1177 # map tag name to tag type
1177 # map tag name to tag type
1178 tagtypes = dict((tag, 'global') for tag in alltags)
1178 tagtypes = dict((tag, 'global') for tag in alltags)
1179
1179
1180 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1180 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1181
1181
1182 # Build the return dicts. Have to re-encode tag names because
1182 # Build the return dicts. Have to re-encode tag names because
1183 # the tags module always uses UTF-8 (in order not to lose info
1183 # the tags module always uses UTF-8 (in order not to lose info
1184 # writing to the cache), but the rest of Mercurial wants them in
1184 # writing to the cache), but the rest of Mercurial wants them in
1185 # local encoding.
1185 # local encoding.
1186 tags = {}
1186 tags = {}
1187 for (name, (node, hist)) in alltags.iteritems():
1187 for (name, (node, hist)) in alltags.iteritems():
1188 if node != nullid:
1188 if node != nullid:
1189 tags[encoding.tolocal(name)] = node
1189 tags[encoding.tolocal(name)] = node
1190 tags['tip'] = self.changelog.tip()
1190 tags['tip'] = self.changelog.tip()
1191 tagtypes = dict([(encoding.tolocal(name), value)
1191 tagtypes = dict([(encoding.tolocal(name), value)
1192 for (name, value) in tagtypes.iteritems()])
1192 for (name, value) in tagtypes.iteritems()])
1193 return (tags, tagtypes)
1193 return (tags, tagtypes)
1194
1194
1195 def tagtype(self, tagname):
1195 def tagtype(self, tagname):
1196 '''
1196 '''
1197 return the type of the given tag. result can be:
1197 return the type of the given tag. result can be:
1198
1198
1199 'local' : a local tag
1199 'local' : a local tag
1200 'global' : a global tag
1200 'global' : a global tag
1201 None : tag does not exist
1201 None : tag does not exist
1202 '''
1202 '''
1203
1203
1204 return self._tagscache.tagtypes.get(tagname)
1204 return self._tagscache.tagtypes.get(tagname)
1205
1205
1206 def tagslist(self):
1206 def tagslist(self):
1207 '''return a list of tags ordered by revision'''
1207 '''return a list of tags ordered by revision'''
1208 if not self._tagscache.tagslist:
1208 if not self._tagscache.tagslist:
1209 l = []
1209 l = []
1210 for t, n in self.tags().iteritems():
1210 for t, n in self.tags().iteritems():
1211 l.append((self.changelog.rev(n), t, n))
1211 l.append((self.changelog.rev(n), t, n))
1212 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1212 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1213
1213
1214 return self._tagscache.tagslist
1214 return self._tagscache.tagslist
1215
1215
1216 def nodetags(self, node):
1216 def nodetags(self, node):
1217 '''return the tags associated with a node'''
1217 '''return the tags associated with a node'''
1218 if not self._tagscache.nodetagscache:
1218 if not self._tagscache.nodetagscache:
1219 nodetagscache = {}
1219 nodetagscache = {}
1220 for t, n in self._tagscache.tags.iteritems():
1220 for t, n in self._tagscache.tags.iteritems():
1221 nodetagscache.setdefault(n, []).append(t)
1221 nodetagscache.setdefault(n, []).append(t)
1222 for tags in nodetagscache.itervalues():
1222 for tags in nodetagscache.itervalues():
1223 tags.sort()
1223 tags.sort()
1224 self._tagscache.nodetagscache = nodetagscache
1224 self._tagscache.nodetagscache = nodetagscache
1225 return self._tagscache.nodetagscache.get(node, [])
1225 return self._tagscache.nodetagscache.get(node, [])
1226
1226
1227 def nodebookmarks(self, node):
1227 def nodebookmarks(self, node):
1228 """return the list of bookmarks pointing to the specified node"""
1228 """return the list of bookmarks pointing to the specified node"""
1229 return self._bookmarks.names(node)
1229 return self._bookmarks.names(node)
1230
1230
1231 def branchmap(self):
1231 def branchmap(self):
1232 '''returns a dictionary {branch: [branchheads]} with branchheads
1232 '''returns a dictionary {branch: [branchheads]} with branchheads
1233 ordered by increasing revision number'''
1233 ordered by increasing revision number'''
1234 branchmap.updatecache(self)
1234 branchmap.updatecache(self)
1235 return self._branchcaches[self.filtername]
1235 return self._branchcaches[self.filtername]
1236
1236
1237 @unfilteredmethod
1237 @unfilteredmethod
1238 def revbranchcache(self):
1238 def revbranchcache(self):
1239 if not self._revbranchcache:
1239 if not self._revbranchcache:
1240 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1240 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1241 return self._revbranchcache
1241 return self._revbranchcache
1242
1242
1243 def branchtip(self, branch, ignoremissing=False):
1243 def branchtip(self, branch, ignoremissing=False):
1244 '''return the tip node for a given branch
1244 '''return the tip node for a given branch
1245
1245
1246 If ignoremissing is True, then this method will not raise an error.
1246 If ignoremissing is True, then this method will not raise an error.
1247 This is helpful for callers that only expect None for a missing branch
1247 This is helpful for callers that only expect None for a missing branch
1248 (e.g. namespace).
1248 (e.g. namespace).
1249
1249
1250 '''
1250 '''
1251 try:
1251 try:
1252 return self.branchmap().branchtip(branch)
1252 return self.branchmap().branchtip(branch)
1253 except KeyError:
1253 except KeyError:
1254 if not ignoremissing:
1254 if not ignoremissing:
1255 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1255 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1256 else:
1256 else:
1257 pass
1257 pass
1258
1258
1259 def lookup(self, key):
1259 def lookup(self, key):
1260 return scmutil.revsymbol(self, key).node()
1260 return scmutil.revsymbol(self, key).node()
1261
1261
1262 def lookupbranch(self, key):
1262 def lookupbranch(self, key):
1263 if key in self.branchmap():
1263 if key in self.branchmap():
1264 return key
1264 return key
1265
1265
1266 return scmutil.revsymbol(self, key).branch()
1266 return scmutil.revsymbol(self, key).branch()
1267
1267
1268 def known(self, nodes):
1268 def known(self, nodes):
1269 cl = self.changelog
1269 cl = self.changelog
1270 nm = cl.nodemap
1270 nm = cl.nodemap
1271 filtered = cl.filteredrevs
1271 filtered = cl.filteredrevs
1272 result = []
1272 result = []
1273 for n in nodes:
1273 for n in nodes:
1274 r = nm.get(n)
1274 r = nm.get(n)
1275 resp = not (r is None or r in filtered)
1275 resp = not (r is None or r in filtered)
1276 result.append(resp)
1276 result.append(resp)
1277 return result
1277 return result
1278
1278
1279 def local(self):
1279 def local(self):
1280 return self
1280 return self
1281
1281
1282 def publishing(self):
1282 def publishing(self):
1283 # it's safe (and desirable) to trust the publish flag unconditionally
1283 # it's safe (and desirable) to trust the publish flag unconditionally
1284 # so that we don't finalize changes shared between users via ssh or nfs
1284 # so that we don't finalize changes shared between users via ssh or nfs
1285 return self.ui.configbool('phases', 'publish', untrusted=True)
1285 return self.ui.configbool('phases', 'publish', untrusted=True)
1286
1286
1287 def cancopy(self):
1287 def cancopy(self):
1288 # so statichttprepo's override of local() works
1288 # so statichttprepo's override of local() works
1289 if not self.local():
1289 if not self.local():
1290 return False
1290 return False
1291 if not self.publishing():
1291 if not self.publishing():
1292 return True
1292 return True
1293 # if publishing we can't copy if there is filtered content
1293 # if publishing we can't copy if there is filtered content
1294 return not self.filtered('visible').changelog.filteredrevs
1294 return not self.filtered('visible').changelog.filteredrevs
1295
1295
1296 def shared(self):
1296 def shared(self):
1297 '''the type of shared repository (None if not shared)'''
1297 '''the type of shared repository (None if not shared)'''
1298 if self.sharedpath != self.path:
1298 if self.sharedpath != self.path:
1299 return 'store'
1299 return 'store'
1300 return None
1300 return None
1301
1301
1302 def wjoin(self, f, *insidef):
1302 def wjoin(self, f, *insidef):
1303 return self.vfs.reljoin(self.root, f, *insidef)
1303 return self.vfs.reljoin(self.root, f, *insidef)
1304
1304
1305 def file(self, f):
1305 def file(self, f):
1306 if f[0] == '/':
1306 if f[0] == '/':
1307 f = f[1:]
1307 f = f[1:]
1308 return filelog.filelog(self.svfs, f)
1308 return filelog.filelog(self.svfs, f)
1309
1309
1310 def setparents(self, p1, p2=nullid):
1310 def setparents(self, p1, p2=nullid):
1311 with self.dirstate.parentchange():
1311 with self.dirstate.parentchange():
1312 copies = self.dirstate.setparents(p1, p2)
1312 copies = self.dirstate.setparents(p1, p2)
1313 pctx = self[p1]
1313 pctx = self[p1]
1314 if copies:
1314 if copies:
1315 # Adjust copy records, the dirstate cannot do it, it
1315 # Adjust copy records, the dirstate cannot do it, it
1316 # requires access to parents manifests. Preserve them
1316 # requires access to parents manifests. Preserve them
1317 # only for entries added to first parent.
1317 # only for entries added to first parent.
1318 for f in copies:
1318 for f in copies:
1319 if f not in pctx and copies[f] in pctx:
1319 if f not in pctx and copies[f] in pctx:
1320 self.dirstate.copy(copies[f], f)
1320 self.dirstate.copy(copies[f], f)
1321 if p2 == nullid:
1321 if p2 == nullid:
1322 for f, s in sorted(self.dirstate.copies().items()):
1322 for f, s in sorted(self.dirstate.copies().items()):
1323 if f not in pctx and s not in pctx:
1323 if f not in pctx and s not in pctx:
1324 self.dirstate.copy(None, f)
1324 self.dirstate.copy(None, f)
1325
1325
1326 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1326 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1327 """changeid can be a changeset revision, node, or tag.
1327 """changeid can be a changeset revision, node, or tag.
1328 fileid can be a file revision or node."""
1328 fileid can be a file revision or node."""
1329 return context.filectx(self, path, changeid, fileid,
1329 return context.filectx(self, path, changeid, fileid,
1330 changectx=changectx)
1330 changectx=changectx)
1331
1331
1332 def getcwd(self):
1332 def getcwd(self):
1333 return self.dirstate.getcwd()
1333 return self.dirstate.getcwd()
1334
1334
1335 def pathto(self, f, cwd=None):
1335 def pathto(self, f, cwd=None):
1336 return self.dirstate.pathto(f, cwd)
1336 return self.dirstate.pathto(f, cwd)
1337
1337
1338 def _loadfilter(self, filter):
1338 def _loadfilter(self, filter):
1339 if filter not in self._filterpats:
1339 if filter not in self._filterpats:
1340 l = []
1340 l = []
1341 for pat, cmd in self.ui.configitems(filter):
1341 for pat, cmd in self.ui.configitems(filter):
1342 if cmd == '!':
1342 if cmd == '!':
1343 continue
1343 continue
1344 mf = matchmod.match(self.root, '', [pat])
1344 mf = matchmod.match(self.root, '', [pat])
1345 fn = None
1345 fn = None
1346 params = cmd
1346 params = cmd
1347 for name, filterfn in self._datafilters.iteritems():
1347 for name, filterfn in self._datafilters.iteritems():
1348 if cmd.startswith(name):
1348 if cmd.startswith(name):
1349 fn = filterfn
1349 fn = filterfn
1350 params = cmd[len(name):].lstrip()
1350 params = cmd[len(name):].lstrip()
1351 break
1351 break
1352 if not fn:
1352 if not fn:
1353 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1353 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1354 # Wrap old filters not supporting keyword arguments
1354 # Wrap old filters not supporting keyword arguments
1355 if not pycompat.getargspec(fn)[2]:
1355 if not pycompat.getargspec(fn)[2]:
1356 oldfn = fn
1356 oldfn = fn
1357 fn = lambda s, c, **kwargs: oldfn(s, c)
1357 fn = lambda s, c, **kwargs: oldfn(s, c)
1358 l.append((mf, fn, params))
1358 l.append((mf, fn, params))
1359 self._filterpats[filter] = l
1359 self._filterpats[filter] = l
1360 return self._filterpats[filter]
1360 return self._filterpats[filter]
1361
1361
1362 def _filter(self, filterpats, filename, data):
1362 def _filter(self, filterpats, filename, data):
1363 for mf, fn, cmd in filterpats:
1363 for mf, fn, cmd in filterpats:
1364 if mf(filename):
1364 if mf(filename):
1365 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1365 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1366 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1366 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1367 break
1367 break
1368
1368
1369 return data
1369 return data
1370
1370
1371 @unfilteredpropertycache
1371 @unfilteredpropertycache
1372 def _encodefilterpats(self):
1372 def _encodefilterpats(self):
1373 return self._loadfilter('encode')
1373 return self._loadfilter('encode')
1374
1374
1375 @unfilteredpropertycache
1375 @unfilteredpropertycache
1376 def _decodefilterpats(self):
1376 def _decodefilterpats(self):
1377 return self._loadfilter('decode')
1377 return self._loadfilter('decode')
1378
1378
1379 def adddatafilter(self, name, filter):
1379 def adddatafilter(self, name, filter):
1380 self._datafilters[name] = filter
1380 self._datafilters[name] = filter
1381
1381
1382 def wread(self, filename):
1382 def wread(self, filename):
1383 if self.wvfs.islink(filename):
1383 if self.wvfs.islink(filename):
1384 data = self.wvfs.readlink(filename)
1384 data = self.wvfs.readlink(filename)
1385 else:
1385 else:
1386 data = self.wvfs.read(filename)
1386 data = self.wvfs.read(filename)
1387 return self._filter(self._encodefilterpats, filename, data)
1387 return self._filter(self._encodefilterpats, filename, data)
1388
1388
1389 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1389 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1390 """write ``data`` into ``filename`` in the working directory
1390 """write ``data`` into ``filename`` in the working directory
1391
1391
1392 This returns length of written (maybe decoded) data.
1392 This returns length of written (maybe decoded) data.
1393 """
1393 """
1394 data = self._filter(self._decodefilterpats, filename, data)
1394 data = self._filter(self._decodefilterpats, filename, data)
1395 if 'l' in flags:
1395 if 'l' in flags:
1396 self.wvfs.symlink(data, filename)
1396 self.wvfs.symlink(data, filename)
1397 else:
1397 else:
1398 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1398 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1399 **kwargs)
1399 **kwargs)
1400 if 'x' in flags:
1400 if 'x' in flags:
1401 self.wvfs.setflags(filename, False, True)
1401 self.wvfs.setflags(filename, False, True)
1402 else:
1402 else:
1403 self.wvfs.setflags(filename, False, False)
1403 self.wvfs.setflags(filename, False, False)
1404 return len(data)
1404 return len(data)
1405
1405
1406 def wwritedata(self, filename, data):
1406 def wwritedata(self, filename, data):
1407 return self._filter(self._decodefilterpats, filename, data)
1407 return self._filter(self._decodefilterpats, filename, data)
1408
1408
1409 def currenttransaction(self):
1409 def currenttransaction(self):
1410 """return the current transaction or None if non exists"""
1410 """return the current transaction or None if non exists"""
1411 if self._transref:
1411 if self._transref:
1412 tr = self._transref()
1412 tr = self._transref()
1413 else:
1413 else:
1414 tr = None
1414 tr = None
1415
1415
1416 if tr and tr.running():
1416 if tr and tr.running():
1417 return tr
1417 return tr
1418 return None
1418 return None
1419
1419
1420 def transaction(self, desc, report=None):
1420 def transaction(self, desc, report=None):
1421 if (self.ui.configbool('devel', 'all-warnings')
1421 if (self.ui.configbool('devel', 'all-warnings')
1422 or self.ui.configbool('devel', 'check-locks')):
1422 or self.ui.configbool('devel', 'check-locks')):
1423 if self._currentlock(self._lockref) is None:
1423 if self._currentlock(self._lockref) is None:
1424 raise error.ProgrammingError('transaction requires locking')
1424 raise error.ProgrammingError('transaction requires locking')
1425 tr = self.currenttransaction()
1425 tr = self.currenttransaction()
1426 if tr is not None:
1426 if tr is not None:
1427 return tr.nest(name=desc)
1427 return tr.nest(name=desc)
1428
1428
1429 # abort here if the journal already exists
1429 # abort here if the journal already exists
1430 if self.svfs.exists("journal"):
1430 if self.svfs.exists("journal"):
1431 raise error.RepoError(
1431 raise error.RepoError(
1432 _("abandoned transaction found"),
1432 _("abandoned transaction found"),
1433 hint=_("run 'hg recover' to clean up transaction"))
1433 hint=_("run 'hg recover' to clean up transaction"))
1434
1434
1435 idbase = "%.40f#%f" % (random.random(), time.time())
1435 idbase = "%.40f#%f" % (random.random(), time.time())
1436 ha = hex(hashlib.sha1(idbase).digest())
1436 ha = hex(hashlib.sha1(idbase).digest())
1437 txnid = 'TXN:' + ha
1437 txnid = 'TXN:' + ha
1438 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1438 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1439
1439
1440 self._writejournal(desc)
1440 self._writejournal(desc)
1441 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1441 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1442 if report:
1442 if report:
1443 rp = report
1443 rp = report
1444 else:
1444 else:
1445 rp = self.ui.warn
1445 rp = self.ui.warn
1446 vfsmap = {'plain': self.vfs} # root of .hg/
1446 vfsmap = {'plain': self.vfs} # root of .hg/
1447 # we must avoid cyclic reference between repo and transaction.
1447 # we must avoid cyclic reference between repo and transaction.
1448 reporef = weakref.ref(self)
1448 reporef = weakref.ref(self)
1449 # Code to track tag movement
1449 # Code to track tag movement
1450 #
1450 #
1451 # Since tags are all handled as file content, it is actually quite hard
1451 # Since tags are all handled as file content, it is actually quite hard
1452 # to track these movement from a code perspective. So we fallback to a
1452 # to track these movement from a code perspective. So we fallback to a
1453 # tracking at the repository level. One could envision to track changes
1453 # tracking at the repository level. One could envision to track changes
1454 # to the '.hgtags' file through changegroup apply but that fails to
1454 # to the '.hgtags' file through changegroup apply but that fails to
1455 # cope with case where transaction expose new heads without changegroup
1455 # cope with case where transaction expose new heads without changegroup
1456 # being involved (eg: phase movement).
1456 # being involved (eg: phase movement).
1457 #
1457 #
1458 # For now, We gate the feature behind a flag since this likely comes
1458 # For now, We gate the feature behind a flag since this likely comes
1459 # with performance impacts. The current code run more often than needed
1459 # with performance impacts. The current code run more often than needed
1460 # and do not use caches as much as it could. The current focus is on
1460 # and do not use caches as much as it could. The current focus is on
1461 # the behavior of the feature so we disable it by default. The flag
1461 # the behavior of the feature so we disable it by default. The flag
1462 # will be removed when we are happy with the performance impact.
1462 # will be removed when we are happy with the performance impact.
1463 #
1463 #
1464 # Once this feature is no longer experimental move the following
1464 # Once this feature is no longer experimental move the following
1465 # documentation to the appropriate help section:
1465 # documentation to the appropriate help section:
1466 #
1466 #
1467 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1467 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1468 # tags (new or changed or deleted tags). In addition the details of
1468 # tags (new or changed or deleted tags). In addition the details of
1469 # these changes are made available in a file at:
1469 # these changes are made available in a file at:
1470 # ``REPOROOT/.hg/changes/tags.changes``.
1470 # ``REPOROOT/.hg/changes/tags.changes``.
1471 # Make sure you check for HG_TAG_MOVED before reading that file as it
1471 # Make sure you check for HG_TAG_MOVED before reading that file as it
1472 # might exist from a previous transaction even if no tag were touched
1472 # might exist from a previous transaction even if no tag were touched
1473 # in this one. Changes are recorded in a line base format::
1473 # in this one. Changes are recorded in a line base format::
1474 #
1474 #
1475 # <action> <hex-node> <tag-name>\n
1475 # <action> <hex-node> <tag-name>\n
1476 #
1476 #
1477 # Actions are defined as follow:
1477 # Actions are defined as follow:
1478 # "-R": tag is removed,
1478 # "-R": tag is removed,
1479 # "+A": tag is added,
1479 # "+A": tag is added,
1480 # "-M": tag is moved (old value),
1480 # "-M": tag is moved (old value),
1481 # "+M": tag is moved (new value),
1481 # "+M": tag is moved (new value),
1482 tracktags = lambda x: None
1482 tracktags = lambda x: None
1483 # experimental config: experimental.hook-track-tags
1483 # experimental config: experimental.hook-track-tags
1484 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1484 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1485 if desc != 'strip' and shouldtracktags:
1485 if desc != 'strip' and shouldtracktags:
1486 oldheads = self.changelog.headrevs()
1486 oldheads = self.changelog.headrevs()
1487 def tracktags(tr2):
1487 def tracktags(tr2):
1488 repo = reporef()
1488 repo = reporef()
1489 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1489 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1490 newheads = repo.changelog.headrevs()
1490 newheads = repo.changelog.headrevs()
1491 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1491 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1492 # notes: we compare lists here.
1492 # notes: we compare lists here.
1493 # As we do it only once buiding set would not be cheaper
1493 # As we do it only once buiding set would not be cheaper
1494 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1494 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1495 if changes:
1495 if changes:
1496 tr2.hookargs['tag_moved'] = '1'
1496 tr2.hookargs['tag_moved'] = '1'
1497 with repo.vfs('changes/tags.changes', 'w',
1497 with repo.vfs('changes/tags.changes', 'w',
1498 atomictemp=True) as changesfile:
1498 atomictemp=True) as changesfile:
1499 # note: we do not register the file to the transaction
1499 # note: we do not register the file to the transaction
1500 # because we needs it to still exist on the transaction
1500 # because we needs it to still exist on the transaction
1501 # is close (for txnclose hooks)
1501 # is close (for txnclose hooks)
1502 tagsmod.writediff(changesfile, changes)
1502 tagsmod.writediff(changesfile, changes)
1503 def validate(tr2):
1503 def validate(tr2):
1504 """will run pre-closing hooks"""
1504 """will run pre-closing hooks"""
1505 # XXX the transaction API is a bit lacking here so we take a hacky
1505 # XXX the transaction API is a bit lacking here so we take a hacky
1506 # path for now
1506 # path for now
1507 #
1507 #
1508 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1508 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1509 # dict is copied before these run. In addition we needs the data
1509 # dict is copied before these run. In addition we needs the data
1510 # available to in memory hooks too.
1510 # available to in memory hooks too.
1511 #
1511 #
1512 # Moreover, we also need to make sure this runs before txnclose
1512 # Moreover, we also need to make sure this runs before txnclose
1513 # hooks and there is no "pending" mechanism that would execute
1513 # hooks and there is no "pending" mechanism that would execute
1514 # logic only if hooks are about to run.
1514 # logic only if hooks are about to run.
1515 #
1515 #
1516 # Fixing this limitation of the transaction is also needed to track
1516 # Fixing this limitation of the transaction is also needed to track
1517 # other families of changes (bookmarks, phases, obsolescence).
1517 # other families of changes (bookmarks, phases, obsolescence).
1518 #
1518 #
1519 # This will have to be fixed before we remove the experimental
1519 # This will have to be fixed before we remove the experimental
1520 # gating.
1520 # gating.
1521 tracktags(tr2)
1521 tracktags(tr2)
1522 repo = reporef()
1522 repo = reporef()
1523 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1523 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1524 scmutil.enforcesinglehead(repo, tr2, desc)
1524 scmutil.enforcesinglehead(repo, tr2, desc)
1525 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1525 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1526 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1526 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1527 args = tr.hookargs.copy()
1527 args = tr.hookargs.copy()
1528 args.update(bookmarks.preparehookargs(name, old, new))
1528 args.update(bookmarks.preparehookargs(name, old, new))
1529 repo.hook('pretxnclose-bookmark', throw=True,
1529 repo.hook('pretxnclose-bookmark', throw=True,
1530 txnname=desc,
1530 txnname=desc,
1531 **pycompat.strkwargs(args))
1531 **pycompat.strkwargs(args))
1532 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1532 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1533 cl = repo.unfiltered().changelog
1533 cl = repo.unfiltered().changelog
1534 for rev, (old, new) in tr.changes['phases'].items():
1534 for rev, (old, new) in tr.changes['phases'].items():
1535 args = tr.hookargs.copy()
1535 args = tr.hookargs.copy()
1536 node = hex(cl.node(rev))
1536 node = hex(cl.node(rev))
1537 args.update(phases.preparehookargs(node, old, new))
1537 args.update(phases.preparehookargs(node, old, new))
1538 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1538 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1539 **pycompat.strkwargs(args))
1539 **pycompat.strkwargs(args))
1540
1540
1541 repo.hook('pretxnclose', throw=True,
1541 repo.hook('pretxnclose', throw=True,
1542 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1542 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1543 def releasefn(tr, success):
1543 def releasefn(tr, success):
1544 repo = reporef()
1544 repo = reporef()
1545 if success:
1545 if success:
1546 # this should be explicitly invoked here, because
1546 # this should be explicitly invoked here, because
1547 # in-memory changes aren't written out at closing
1547 # in-memory changes aren't written out at closing
1548 # transaction, if tr.addfilegenerator (via
1548 # transaction, if tr.addfilegenerator (via
1549 # dirstate.write or so) isn't invoked while
1549 # dirstate.write or so) isn't invoked while
1550 # transaction running
1550 # transaction running
1551 repo.dirstate.write(None)
1551 repo.dirstate.write(None)
1552 else:
1552 else:
1553 # discard all changes (including ones already written
1553 # discard all changes (including ones already written
1554 # out) in this transaction
1554 # out) in this transaction
1555 narrowspec.restorebackup(self, 'journal.narrowspec')
1555 narrowspec.restorebackup(self, 'journal.narrowspec')
1556 repo.dirstate.restorebackup(None, 'journal.dirstate')
1556 repo.dirstate.restorebackup(None, 'journal.dirstate')
1557
1557
1558 repo.invalidate(clearfilecache=True)
1558 repo.invalidate(clearfilecache=True)
1559
1559
1560 tr = transaction.transaction(rp, self.svfs, vfsmap,
1560 tr = transaction.transaction(rp, self.svfs, vfsmap,
1561 "journal",
1561 "journal",
1562 "undo",
1562 "undo",
1563 aftertrans(renames),
1563 aftertrans(renames),
1564 self.store.createmode,
1564 self.store.createmode,
1565 validator=validate,
1565 validator=validate,
1566 releasefn=releasefn,
1566 releasefn=releasefn,
1567 checkambigfiles=_cachedfiles,
1567 checkambigfiles=_cachedfiles,
1568 name=desc)
1568 name=desc)
1569 tr.changes['origrepolen'] = len(self)
1569 tr.changes['origrepolen'] = len(self)
1570 tr.changes['obsmarkers'] = set()
1570 tr.changes['obsmarkers'] = set()
1571 tr.changes['phases'] = {}
1571 tr.changes['phases'] = {}
1572 tr.changes['bookmarks'] = {}
1572 tr.changes['bookmarks'] = {}
1573
1573
1574 tr.hookargs['txnid'] = txnid
1574 tr.hookargs['txnid'] = txnid
1575 # note: writing the fncache only during finalize mean that the file is
1575 # note: writing the fncache only during finalize mean that the file is
1576 # outdated when running hooks. As fncache is used for streaming clone,
1576 # outdated when running hooks. As fncache is used for streaming clone,
1577 # this is not expected to break anything that happen during the hooks.
1577 # this is not expected to break anything that happen during the hooks.
1578 tr.addfinalize('flush-fncache', self.store.write)
1578 tr.addfinalize('flush-fncache', self.store.write)
1579 def txnclosehook(tr2):
1579 def txnclosehook(tr2):
1580 """To be run if transaction is successful, will schedule a hook run
1580 """To be run if transaction is successful, will schedule a hook run
1581 """
1581 """
1582 # Don't reference tr2 in hook() so we don't hold a reference.
1582 # Don't reference tr2 in hook() so we don't hold a reference.
1583 # This reduces memory consumption when there are multiple
1583 # This reduces memory consumption when there are multiple
1584 # transactions per lock. This can likely go away if issue5045
1584 # transactions per lock. This can likely go away if issue5045
1585 # fixes the function accumulation.
1585 # fixes the function accumulation.
1586 hookargs = tr2.hookargs
1586 hookargs = tr2.hookargs
1587
1587
1588 def hookfunc():
1588 def hookfunc():
1589 repo = reporef()
1589 repo = reporef()
1590 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1590 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1591 bmchanges = sorted(tr.changes['bookmarks'].items())
1591 bmchanges = sorted(tr.changes['bookmarks'].items())
1592 for name, (old, new) in bmchanges:
1592 for name, (old, new) in bmchanges:
1593 args = tr.hookargs.copy()
1593 args = tr.hookargs.copy()
1594 args.update(bookmarks.preparehookargs(name, old, new))
1594 args.update(bookmarks.preparehookargs(name, old, new))
1595 repo.hook('txnclose-bookmark', throw=False,
1595 repo.hook('txnclose-bookmark', throw=False,
1596 txnname=desc, **pycompat.strkwargs(args))
1596 txnname=desc, **pycompat.strkwargs(args))
1597
1597
1598 if hook.hashook(repo.ui, 'txnclose-phase'):
1598 if hook.hashook(repo.ui, 'txnclose-phase'):
1599 cl = repo.unfiltered().changelog
1599 cl = repo.unfiltered().changelog
1600 phasemv = sorted(tr.changes['phases'].items())
1600 phasemv = sorted(tr.changes['phases'].items())
1601 for rev, (old, new) in phasemv:
1601 for rev, (old, new) in phasemv:
1602 args = tr.hookargs.copy()
1602 args = tr.hookargs.copy()
1603 node = hex(cl.node(rev))
1603 node = hex(cl.node(rev))
1604 args.update(phases.preparehookargs(node, old, new))
1604 args.update(phases.preparehookargs(node, old, new))
1605 repo.hook('txnclose-phase', throw=False, txnname=desc,
1605 repo.hook('txnclose-phase', throw=False, txnname=desc,
1606 **pycompat.strkwargs(args))
1606 **pycompat.strkwargs(args))
1607
1607
1608 repo.hook('txnclose', throw=False, txnname=desc,
1608 repo.hook('txnclose', throw=False, txnname=desc,
1609 **pycompat.strkwargs(hookargs))
1609 **pycompat.strkwargs(hookargs))
1610 reporef()._afterlock(hookfunc)
1610 reporef()._afterlock(hookfunc)
1611 tr.addfinalize('txnclose-hook', txnclosehook)
1611 tr.addfinalize('txnclose-hook', txnclosehook)
1612 # Include a leading "-" to make it happen before the transaction summary
1612 # Include a leading "-" to make it happen before the transaction summary
1613 # reports registered via scmutil.registersummarycallback() whose names
1613 # reports registered via scmutil.registersummarycallback() whose names
1614 # are 00-txnreport etc. That way, the caches will be warm when the
1614 # are 00-txnreport etc. That way, the caches will be warm when the
1615 # callbacks run.
1615 # callbacks run.
1616 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1616 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1617 def txnaborthook(tr2):
1617 def txnaborthook(tr2):
1618 """To be run if transaction is aborted
1618 """To be run if transaction is aborted
1619 """
1619 """
1620 reporef().hook('txnabort', throw=False, txnname=desc,
1620 reporef().hook('txnabort', throw=False, txnname=desc,
1621 **pycompat.strkwargs(tr2.hookargs))
1621 **pycompat.strkwargs(tr2.hookargs))
1622 tr.addabort('txnabort-hook', txnaborthook)
1622 tr.addabort('txnabort-hook', txnaborthook)
1623 # avoid eager cache invalidation. in-memory data should be identical
1623 # avoid eager cache invalidation. in-memory data should be identical
1624 # to stored data if transaction has no error.
1624 # to stored data if transaction has no error.
1625 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1625 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1626 self._transref = weakref.ref(tr)
1626 self._transref = weakref.ref(tr)
1627 scmutil.registersummarycallback(self, tr, desc)
1627 scmutil.registersummarycallback(self, tr, desc)
1628 return tr
1628 return tr
1629
1629
1630 def _journalfiles(self):
1630 def _journalfiles(self):
1631 return ((self.svfs, 'journal'),
1631 return ((self.svfs, 'journal'),
1632 (self.vfs, 'journal.dirstate'),
1632 (self.vfs, 'journal.dirstate'),
1633 (self.vfs, 'journal.branch'),
1633 (self.vfs, 'journal.branch'),
1634 (self.vfs, 'journal.desc'),
1634 (self.vfs, 'journal.desc'),
1635 (self.vfs, 'journal.bookmarks'),
1635 (self.vfs, 'journal.bookmarks'),
1636 (self.svfs, 'journal.phaseroots'))
1636 (self.svfs, 'journal.phaseroots'))
1637
1637
1638 def undofiles(self):
1638 def undofiles(self):
1639 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1639 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1640
1640
1641 @unfilteredmethod
1641 @unfilteredmethod
1642 def _writejournal(self, desc):
1642 def _writejournal(self, desc):
1643 self.dirstate.savebackup(None, 'journal.dirstate')
1643 self.dirstate.savebackup(None, 'journal.dirstate')
1644 narrowspec.savebackup(self, 'journal.narrowspec')
1644 narrowspec.savebackup(self, 'journal.narrowspec')
1645 self.vfs.write("journal.branch",
1645 self.vfs.write("journal.branch",
1646 encoding.fromlocal(self.dirstate.branch()))
1646 encoding.fromlocal(self.dirstate.branch()))
1647 self.vfs.write("journal.desc",
1647 self.vfs.write("journal.desc",
1648 "%d\n%s\n" % (len(self), desc))
1648 "%d\n%s\n" % (len(self), desc))
1649 self.vfs.write("journal.bookmarks",
1649 self.vfs.write("journal.bookmarks",
1650 self.vfs.tryread("bookmarks"))
1650 self.vfs.tryread("bookmarks"))
1651 self.svfs.write("journal.phaseroots",
1651 self.svfs.write("journal.phaseroots",
1652 self.svfs.tryread("phaseroots"))
1652 self.svfs.tryread("phaseroots"))
1653
1653
1654 def recover(self):
1654 def recover(self):
1655 with self.lock():
1655 with self.lock():
1656 if self.svfs.exists("journal"):
1656 if self.svfs.exists("journal"):
1657 self.ui.status(_("rolling back interrupted transaction\n"))
1657 self.ui.status(_("rolling back interrupted transaction\n"))
1658 vfsmap = {'': self.svfs,
1658 vfsmap = {'': self.svfs,
1659 'plain': self.vfs,}
1659 'plain': self.vfs,}
1660 transaction.rollback(self.svfs, vfsmap, "journal",
1660 transaction.rollback(self.svfs, vfsmap, "journal",
1661 self.ui.warn,
1661 self.ui.warn,
1662 checkambigfiles=_cachedfiles)
1662 checkambigfiles=_cachedfiles)
1663 self.invalidate()
1663 self.invalidate()
1664 return True
1664 return True
1665 else:
1665 else:
1666 self.ui.warn(_("no interrupted transaction available\n"))
1666 self.ui.warn(_("no interrupted transaction available\n"))
1667 return False
1667 return False
1668
1668
1669 def rollback(self, dryrun=False, force=False):
1669 def rollback(self, dryrun=False, force=False):
1670 wlock = lock = dsguard = None
1670 wlock = lock = dsguard = None
1671 try:
1671 try:
1672 wlock = self.wlock()
1672 wlock = self.wlock()
1673 lock = self.lock()
1673 lock = self.lock()
1674 if self.svfs.exists("undo"):
1674 if self.svfs.exists("undo"):
1675 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1675 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1676
1676
1677 return self._rollback(dryrun, force, dsguard)
1677 return self._rollback(dryrun, force, dsguard)
1678 else:
1678 else:
1679 self.ui.warn(_("no rollback information available\n"))
1679 self.ui.warn(_("no rollback information available\n"))
1680 return 1
1680 return 1
1681 finally:
1681 finally:
1682 release(dsguard, lock, wlock)
1682 release(dsguard, lock, wlock)
1683
1683
1684 @unfilteredmethod # Until we get smarter cache management
1684 @unfilteredmethod # Until we get smarter cache management
1685 def _rollback(self, dryrun, force, dsguard):
1685 def _rollback(self, dryrun, force, dsguard):
1686 ui = self.ui
1686 ui = self.ui
1687 try:
1687 try:
1688 args = self.vfs.read('undo.desc').splitlines()
1688 args = self.vfs.read('undo.desc').splitlines()
1689 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1689 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1690 if len(args) >= 3:
1690 if len(args) >= 3:
1691 detail = args[2]
1691 detail = args[2]
1692 oldtip = oldlen - 1
1692 oldtip = oldlen - 1
1693
1693
1694 if detail and ui.verbose:
1694 if detail and ui.verbose:
1695 msg = (_('repository tip rolled back to revision %d'
1695 msg = (_('repository tip rolled back to revision %d'
1696 ' (undo %s: %s)\n')
1696 ' (undo %s: %s)\n')
1697 % (oldtip, desc, detail))
1697 % (oldtip, desc, detail))
1698 else:
1698 else:
1699 msg = (_('repository tip rolled back to revision %d'
1699 msg = (_('repository tip rolled back to revision %d'
1700 ' (undo %s)\n')
1700 ' (undo %s)\n')
1701 % (oldtip, desc))
1701 % (oldtip, desc))
1702 except IOError:
1702 except IOError:
1703 msg = _('rolling back unknown transaction\n')
1703 msg = _('rolling back unknown transaction\n')
1704 desc = None
1704 desc = None
1705
1705
1706 if not force and self['.'] != self['tip'] and desc == 'commit':
1706 if not force and self['.'] != self['tip'] and desc == 'commit':
1707 raise error.Abort(
1707 raise error.Abort(
1708 _('rollback of last commit while not checked out '
1708 _('rollback of last commit while not checked out '
1709 'may lose data'), hint=_('use -f to force'))
1709 'may lose data'), hint=_('use -f to force'))
1710
1710
1711 ui.status(msg)
1711 ui.status(msg)
1712 if dryrun:
1712 if dryrun:
1713 return 0
1713 return 0
1714
1714
1715 parents = self.dirstate.parents()
1715 parents = self.dirstate.parents()
1716 self.destroying()
1716 self.destroying()
1717 vfsmap = {'plain': self.vfs, '': self.svfs}
1717 vfsmap = {'plain': self.vfs, '': self.svfs}
1718 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1718 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1719 checkambigfiles=_cachedfiles)
1719 checkambigfiles=_cachedfiles)
1720 if self.vfs.exists('undo.bookmarks'):
1720 if self.vfs.exists('undo.bookmarks'):
1721 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1721 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1722 if self.svfs.exists('undo.phaseroots'):
1722 if self.svfs.exists('undo.phaseroots'):
1723 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1723 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1724 self.invalidate()
1724 self.invalidate()
1725
1725
1726 parentgone = (parents[0] not in self.changelog.nodemap or
1726 parentgone = (parents[0] not in self.changelog.nodemap or
1727 parents[1] not in self.changelog.nodemap)
1727 parents[1] not in self.changelog.nodemap)
1728 if parentgone:
1728 if parentgone:
1729 # prevent dirstateguard from overwriting already restored one
1729 # prevent dirstateguard from overwriting already restored one
1730 dsguard.close()
1730 dsguard.close()
1731
1731
1732 narrowspec.restorebackup(self, 'undo.narrowspec')
1732 narrowspec.restorebackup(self, 'undo.narrowspec')
1733 self.dirstate.restorebackup(None, 'undo.dirstate')
1733 self.dirstate.restorebackup(None, 'undo.dirstate')
1734 try:
1734 try:
1735 branch = self.vfs.read('undo.branch')
1735 branch = self.vfs.read('undo.branch')
1736 self.dirstate.setbranch(encoding.tolocal(branch))
1736 self.dirstate.setbranch(encoding.tolocal(branch))
1737 except IOError:
1737 except IOError:
1738 ui.warn(_('named branch could not be reset: '
1738 ui.warn(_('named branch could not be reset: '
1739 'current branch is still \'%s\'\n')
1739 'current branch is still \'%s\'\n')
1740 % self.dirstate.branch())
1740 % self.dirstate.branch())
1741
1741
1742 parents = tuple([p.rev() for p in self[None].parents()])
1742 parents = tuple([p.rev() for p in self[None].parents()])
1743 if len(parents) > 1:
1743 if len(parents) > 1:
1744 ui.status(_('working directory now based on '
1744 ui.status(_('working directory now based on '
1745 'revisions %d and %d\n') % parents)
1745 'revisions %d and %d\n') % parents)
1746 else:
1746 else:
1747 ui.status(_('working directory now based on '
1747 ui.status(_('working directory now based on '
1748 'revision %d\n') % parents)
1748 'revision %d\n') % parents)
1749 mergemod.mergestate.clean(self, self['.'].node())
1749 mergemod.mergestate.clean(self, self['.'].node())
1750
1750
1751 # TODO: if we know which new heads may result from this rollback, pass
1751 # TODO: if we know which new heads may result from this rollback, pass
1752 # them to destroy(), which will prevent the branchhead cache from being
1752 # them to destroy(), which will prevent the branchhead cache from being
1753 # invalidated.
1753 # invalidated.
1754 self.destroyed()
1754 self.destroyed()
1755 return 0
1755 return 0
1756
1756
1757 def _buildcacheupdater(self, newtransaction):
1757 def _buildcacheupdater(self, newtransaction):
1758 """called during transaction to build the callback updating cache
1758 """called during transaction to build the callback updating cache
1759
1759
1760 Lives on the repository to help extension who might want to augment
1760 Lives on the repository to help extension who might want to augment
1761 this logic. For this purpose, the created transaction is passed to the
1761 this logic. For this purpose, the created transaction is passed to the
1762 method.
1762 method.
1763 """
1763 """
1764 # we must avoid cyclic reference between repo and transaction.
1764 # we must avoid cyclic reference between repo and transaction.
1765 reporef = weakref.ref(self)
1765 reporef = weakref.ref(self)
1766 def updater(tr):
1766 def updater(tr):
1767 repo = reporef()
1767 repo = reporef()
1768 repo.updatecaches(tr)
1768 repo.updatecaches(tr)
1769 return updater
1769 return updater
1770
1770
1771 @unfilteredmethod
1771 @unfilteredmethod
1772 def updatecaches(self, tr=None, full=False):
1772 def updatecaches(self, tr=None, full=False):
1773 """warm appropriate caches
1773 """warm appropriate caches
1774
1774
1775 If this function is called after a transaction closed. The transaction
1775 If this function is called after a transaction closed. The transaction
1776 will be available in the 'tr' argument. This can be used to selectively
1776 will be available in the 'tr' argument. This can be used to selectively
1777 update caches relevant to the changes in that transaction.
1777 update caches relevant to the changes in that transaction.
1778
1778
1779 If 'full' is set, make sure all caches the function knows about have
1779 If 'full' is set, make sure all caches the function knows about have
1780 up-to-date data. Even the ones usually loaded more lazily.
1780 up-to-date data. Even the ones usually loaded more lazily.
1781 """
1781 """
1782 if tr is not None and tr.hookargs.get('source') == 'strip':
1782 if tr is not None and tr.hookargs.get('source') == 'strip':
1783 # During strip, many caches are invalid but
1783 # During strip, many caches are invalid but
1784 # later call to `destroyed` will refresh them.
1784 # later call to `destroyed` will refresh them.
1785 return
1785 return
1786
1786
1787 if tr is None or tr.changes['origrepolen'] < len(self):
1787 if tr is None or tr.changes['origrepolen'] < len(self):
1788 # updating the unfiltered branchmap should refresh all the others,
1788 # updating the unfiltered branchmap should refresh all the others,
1789 self.ui.debug('updating the branch cache\n')
1789 self.ui.debug('updating the branch cache\n')
1790 branchmap.updatecache(self.filtered('served'))
1790 branchmap.updatecache(self.filtered('served'))
1791
1791
1792 if full:
1792 if full:
1793 rbc = self.revbranchcache()
1793 rbc = self.revbranchcache()
1794 for r in self.changelog:
1794 for r in self.changelog:
1795 rbc.branchinfo(r)
1795 rbc.branchinfo(r)
1796 rbc.write()
1796 rbc.write()
1797
1797
1798 # ensure the working copy parents are in the manifestfulltextcache
1798 # ensure the working copy parents are in the manifestfulltextcache
1799 for ctx in self['.'].parents():
1799 for ctx in self['.'].parents():
1800 ctx.manifest() # accessing the manifest is enough
1800 ctx.manifest() # accessing the manifest is enough
1801
1801
1802 def invalidatecaches(self):
1802 def invalidatecaches(self):
1803
1803
1804 if '_tagscache' in vars(self):
1804 if '_tagscache' in vars(self):
1805 # can't use delattr on proxy
1805 # can't use delattr on proxy
1806 del self.__dict__['_tagscache']
1806 del self.__dict__['_tagscache']
1807
1807
1808 self.unfiltered()._branchcaches.clear()
1808 self.unfiltered()._branchcaches.clear()
1809 self.invalidatevolatilesets()
1809 self.invalidatevolatilesets()
1810 self._sparsesignaturecache.clear()
1810 self._sparsesignaturecache.clear()
1811
1811
1812 def invalidatevolatilesets(self):
1812 def invalidatevolatilesets(self):
1813 self.filteredrevcache.clear()
1813 self.filteredrevcache.clear()
1814 obsolete.clearobscaches(self)
1814 obsolete.clearobscaches(self)
1815
1815
1816 def invalidatedirstate(self):
1816 def invalidatedirstate(self):
1817 '''Invalidates the dirstate, causing the next call to dirstate
1817 '''Invalidates the dirstate, causing the next call to dirstate
1818 to check if it was modified since the last time it was read,
1818 to check if it was modified since the last time it was read,
1819 rereading it if it has.
1819 rereading it if it has.
1820
1820
1821 This is different to dirstate.invalidate() that it doesn't always
1821 This is different to dirstate.invalidate() that it doesn't always
1822 rereads the dirstate. Use dirstate.invalidate() if you want to
1822 rereads the dirstate. Use dirstate.invalidate() if you want to
1823 explicitly read the dirstate again (i.e. restoring it to a previous
1823 explicitly read the dirstate again (i.e. restoring it to a previous
1824 known good state).'''
1824 known good state).'''
1825 if hasunfilteredcache(self, 'dirstate'):
1825 if hasunfilteredcache(self, 'dirstate'):
1826 for k in self.dirstate._filecache:
1826 for k in self.dirstate._filecache:
1827 try:
1827 try:
1828 delattr(self.dirstate, k)
1828 delattr(self.dirstate, k)
1829 except AttributeError:
1829 except AttributeError:
1830 pass
1830 pass
1831 delattr(self.unfiltered(), 'dirstate')
1831 delattr(self.unfiltered(), 'dirstate')
1832
1832
1833 def invalidate(self, clearfilecache=False):
1833 def invalidate(self, clearfilecache=False):
1834 '''Invalidates both store and non-store parts other than dirstate
1834 '''Invalidates both store and non-store parts other than dirstate
1835
1835
1836 If a transaction is running, invalidation of store is omitted,
1836 If a transaction is running, invalidation of store is omitted,
1837 because discarding in-memory changes might cause inconsistency
1837 because discarding in-memory changes might cause inconsistency
1838 (e.g. incomplete fncache causes unintentional failure, but
1838 (e.g. incomplete fncache causes unintentional failure, but
1839 redundant one doesn't).
1839 redundant one doesn't).
1840 '''
1840 '''
1841 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1841 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1842 for k in list(self._filecache.keys()):
1842 for k in list(self._filecache.keys()):
1843 # dirstate is invalidated separately in invalidatedirstate()
1843 # dirstate is invalidated separately in invalidatedirstate()
1844 if k == 'dirstate':
1844 if k == 'dirstate':
1845 continue
1845 continue
1846 if (k == 'changelog' and
1846 if (k == 'changelog' and
1847 self.currenttransaction() and
1847 self.currenttransaction() and
1848 self.changelog._delayed):
1848 self.changelog._delayed):
1849 # The changelog object may store unwritten revisions. We don't
1849 # The changelog object may store unwritten revisions. We don't
1850 # want to lose them.
1850 # want to lose them.
1851 # TODO: Solve the problem instead of working around it.
1851 # TODO: Solve the problem instead of working around it.
1852 continue
1852 continue
1853
1853
1854 if clearfilecache:
1854 if clearfilecache:
1855 del self._filecache[k]
1855 del self._filecache[k]
1856 try:
1856 try:
1857 delattr(unfiltered, k)
1857 delattr(unfiltered, k)
1858 except AttributeError:
1858 except AttributeError:
1859 pass
1859 pass
1860 self.invalidatecaches()
1860 self.invalidatecaches()
1861 if not self.currenttransaction():
1861 if not self.currenttransaction():
1862 # TODO: Changing contents of store outside transaction
1862 # TODO: Changing contents of store outside transaction
1863 # causes inconsistency. We should make in-memory store
1863 # causes inconsistency. We should make in-memory store
1864 # changes detectable, and abort if changed.
1864 # changes detectable, and abort if changed.
1865 self.store.invalidatecaches()
1865 self.store.invalidatecaches()
1866
1866
1867 def invalidateall(self):
1867 def invalidateall(self):
1868 '''Fully invalidates both store and non-store parts, causing the
1868 '''Fully invalidates both store and non-store parts, causing the
1869 subsequent operation to reread any outside changes.'''
1869 subsequent operation to reread any outside changes.'''
1870 # extension should hook this to invalidate its caches
1870 # extension should hook this to invalidate its caches
1871 self.invalidate()
1871 self.invalidate()
1872 self.invalidatedirstate()
1872 self.invalidatedirstate()
1873
1873
1874 @unfilteredmethod
1874 @unfilteredmethod
1875 def _refreshfilecachestats(self, tr):
1875 def _refreshfilecachestats(self, tr):
1876 """Reload stats of cached files so that they are flagged as valid"""
1876 """Reload stats of cached files so that they are flagged as valid"""
1877 for k, ce in self._filecache.items():
1877 for k, ce in self._filecache.items():
1878 k = pycompat.sysstr(k)
1878 k = pycompat.sysstr(k)
1879 if k == r'dirstate' or k not in self.__dict__:
1879 if k == r'dirstate' or k not in self.__dict__:
1880 continue
1880 continue
1881 ce.refresh()
1881 ce.refresh()
1882
1882
1883 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1883 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1884 inheritchecker=None, parentenvvar=None):
1884 inheritchecker=None, parentenvvar=None):
1885 parentlock = None
1885 parentlock = None
1886 # the contents of parentenvvar are used by the underlying lock to
1886 # the contents of parentenvvar are used by the underlying lock to
1887 # determine whether it can be inherited
1887 # determine whether it can be inherited
1888 if parentenvvar is not None:
1888 if parentenvvar is not None:
1889 parentlock = encoding.environ.get(parentenvvar)
1889 parentlock = encoding.environ.get(parentenvvar)
1890
1890
1891 timeout = 0
1891 timeout = 0
1892 warntimeout = 0
1892 warntimeout = 0
1893 if wait:
1893 if wait:
1894 timeout = self.ui.configint("ui", "timeout")
1894 timeout = self.ui.configint("ui", "timeout")
1895 warntimeout = self.ui.configint("ui", "timeout.warn")
1895 warntimeout = self.ui.configint("ui", "timeout.warn")
1896 # internal config: ui.signal-safe-lock
1896 # internal config: ui.signal-safe-lock
1897 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1897 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1898
1898
1899 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1899 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1900 releasefn=releasefn,
1900 releasefn=releasefn,
1901 acquirefn=acquirefn, desc=desc,
1901 acquirefn=acquirefn, desc=desc,
1902 inheritchecker=inheritchecker,
1902 inheritchecker=inheritchecker,
1903 parentlock=parentlock,
1903 parentlock=parentlock,
1904 signalsafe=signalsafe)
1904 signalsafe=signalsafe)
1905 return l
1905 return l
1906
1906
1907 def _afterlock(self, callback):
1907 def _afterlock(self, callback):
1908 """add a callback to be run when the repository is fully unlocked
1908 """add a callback to be run when the repository is fully unlocked
1909
1909
1910 The callback will be executed when the outermost lock is released
1910 The callback will be executed when the outermost lock is released
1911 (with wlock being higher level than 'lock')."""
1911 (with wlock being higher level than 'lock')."""
1912 for ref in (self._wlockref, self._lockref):
1912 for ref in (self._wlockref, self._lockref):
1913 l = ref and ref()
1913 l = ref and ref()
1914 if l and l.held:
1914 if l and l.held:
1915 l.postrelease.append(callback)
1915 l.postrelease.append(callback)
1916 break
1916 break
1917 else: # no lock have been found.
1917 else: # no lock have been found.
1918 callback()
1918 callback()
1919
1919
1920 def lock(self, wait=True):
1920 def lock(self, wait=True):
1921 '''Lock the repository store (.hg/store) and return a weak reference
1921 '''Lock the repository store (.hg/store) and return a weak reference
1922 to the lock. Use this before modifying the store (e.g. committing or
1922 to the lock. Use this before modifying the store (e.g. committing or
1923 stripping). If you are opening a transaction, get a lock as well.)
1923 stripping). If you are opening a transaction, get a lock as well.)
1924
1924
1925 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1925 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1926 'wlock' first to avoid a dead-lock hazard.'''
1926 'wlock' first to avoid a dead-lock hazard.'''
1927 l = self._currentlock(self._lockref)
1927 l = self._currentlock(self._lockref)
1928 if l is not None:
1928 if l is not None:
1929 l.lock()
1929 l.lock()
1930 return l
1930 return l
1931
1931
1932 l = self._lock(self.svfs, "lock", wait, None,
1932 l = self._lock(self.svfs, "lock", wait, None,
1933 self.invalidate, _('repository %s') % self.origroot)
1933 self.invalidate, _('repository %s') % self.origroot)
1934 self._lockref = weakref.ref(l)
1934 self._lockref = weakref.ref(l)
1935 return l
1935 return l
1936
1936
1937 def _wlockchecktransaction(self):
1937 def _wlockchecktransaction(self):
1938 if self.currenttransaction() is not None:
1938 if self.currenttransaction() is not None:
1939 raise error.LockInheritanceContractViolation(
1939 raise error.LockInheritanceContractViolation(
1940 'wlock cannot be inherited in the middle of a transaction')
1940 'wlock cannot be inherited in the middle of a transaction')
1941
1941
1942 def wlock(self, wait=True):
1942 def wlock(self, wait=True):
1943 '''Lock the non-store parts of the repository (everything under
1943 '''Lock the non-store parts of the repository (everything under
1944 .hg except .hg/store) and return a weak reference to the lock.
1944 .hg except .hg/store) and return a weak reference to the lock.
1945
1945
1946 Use this before modifying files in .hg.
1946 Use this before modifying files in .hg.
1947
1947
1948 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1948 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1949 'wlock' first to avoid a dead-lock hazard.'''
1949 'wlock' first to avoid a dead-lock hazard.'''
1950 l = self._wlockref and self._wlockref()
1950 l = self._wlockref and self._wlockref()
1951 if l is not None and l.held:
1951 if l is not None and l.held:
1952 l.lock()
1952 l.lock()
1953 return l
1953 return l
1954
1954
1955 # We do not need to check for non-waiting lock acquisition. Such
1955 # We do not need to check for non-waiting lock acquisition. Such
1956 # acquisition would not cause dead-lock as they would just fail.
1956 # acquisition would not cause dead-lock as they would just fail.
1957 if wait and (self.ui.configbool('devel', 'all-warnings')
1957 if wait and (self.ui.configbool('devel', 'all-warnings')
1958 or self.ui.configbool('devel', 'check-locks')):
1958 or self.ui.configbool('devel', 'check-locks')):
1959 if self._currentlock(self._lockref) is not None:
1959 if self._currentlock(self._lockref) is not None:
1960 self.ui.develwarn('"wlock" acquired after "lock"')
1960 self.ui.develwarn('"wlock" acquired after "lock"')
1961
1961
1962 def unlock():
1962 def unlock():
1963 if self.dirstate.pendingparentchange():
1963 if self.dirstate.pendingparentchange():
1964 self.dirstate.invalidate()
1964 self.dirstate.invalidate()
1965 else:
1965 else:
1966 self.dirstate.write(None)
1966 self.dirstate.write(None)
1967
1967
1968 self._filecache['dirstate'].refresh()
1968 self._filecache['dirstate'].refresh()
1969
1969
1970 l = self._lock(self.vfs, "wlock", wait, unlock,
1970 l = self._lock(self.vfs, "wlock", wait, unlock,
1971 self.invalidatedirstate, _('working directory of %s') %
1971 self.invalidatedirstate, _('working directory of %s') %
1972 self.origroot,
1972 self.origroot,
1973 inheritchecker=self._wlockchecktransaction,
1973 inheritchecker=self._wlockchecktransaction,
1974 parentenvvar='HG_WLOCK_LOCKER')
1974 parentenvvar='HG_WLOCK_LOCKER')
1975 self._wlockref = weakref.ref(l)
1975 self._wlockref = weakref.ref(l)
1976 return l
1976 return l
1977
1977
1978 def _currentlock(self, lockref):
1978 def _currentlock(self, lockref):
1979 """Returns the lock if it's held, or None if it's not."""
1979 """Returns the lock if it's held, or None if it's not."""
1980 if lockref is None:
1980 if lockref is None:
1981 return None
1981 return None
1982 l = lockref()
1982 l = lockref()
1983 if l is None or not l.held:
1983 if l is None or not l.held:
1984 return None
1984 return None
1985 return l
1985 return l
1986
1986
1987 def currentwlock(self):
1987 def currentwlock(self):
1988 """Returns the wlock if it's held, or None if it's not."""
1988 """Returns the wlock if it's held, or None if it's not."""
1989 return self._currentlock(self._wlockref)
1989 return self._currentlock(self._wlockref)
1990
1990
1991 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1991 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1992 """
1992 """
1993 commit an individual file as part of a larger transaction
1993 commit an individual file as part of a larger transaction
1994 """
1994 """
1995
1995
1996 fname = fctx.path()
1996 fname = fctx.path()
1997 fparent1 = manifest1.get(fname, nullid)
1997 fparent1 = manifest1.get(fname, nullid)
1998 fparent2 = manifest2.get(fname, nullid)
1998 fparent2 = manifest2.get(fname, nullid)
1999 if isinstance(fctx, context.filectx):
1999 if isinstance(fctx, context.filectx):
2000 node = fctx.filenode()
2000 node = fctx.filenode()
2001 if node in [fparent1, fparent2]:
2001 if node in [fparent1, fparent2]:
2002 self.ui.debug('reusing %s filelog entry\n' % fname)
2002 self.ui.debug('reusing %s filelog entry\n' % fname)
2003 if manifest1.flags(fname) != fctx.flags():
2003 if manifest1.flags(fname) != fctx.flags():
2004 changelist.append(fname)
2004 changelist.append(fname)
2005 return node
2005 return node
2006
2006
2007 flog = self.file(fname)
2007 flog = self.file(fname)
2008 meta = {}
2008 meta = {}
2009 copy = fctx.renamed()
2009 copy = fctx.renamed()
2010 if copy and copy[0] != fname:
2010 if copy and copy[0] != fname:
2011 # Mark the new revision of this file as a copy of another
2011 # Mark the new revision of this file as a copy of another
2012 # file. This copy data will effectively act as a parent
2012 # file. This copy data will effectively act as a parent
2013 # of this new revision. If this is a merge, the first
2013 # of this new revision. If this is a merge, the first
2014 # parent will be the nullid (meaning "look up the copy data")
2014 # parent will be the nullid (meaning "look up the copy data")
2015 # and the second one will be the other parent. For example:
2015 # and the second one will be the other parent. For example:
2016 #
2016 #
2017 # 0 --- 1 --- 3 rev1 changes file foo
2017 # 0 --- 1 --- 3 rev1 changes file foo
2018 # \ / rev2 renames foo to bar and changes it
2018 # \ / rev2 renames foo to bar and changes it
2019 # \- 2 -/ rev3 should have bar with all changes and
2019 # \- 2 -/ rev3 should have bar with all changes and
2020 # should record that bar descends from
2020 # should record that bar descends from
2021 # bar in rev2 and foo in rev1
2021 # bar in rev2 and foo in rev1
2022 #
2022 #
2023 # this allows this merge to succeed:
2023 # this allows this merge to succeed:
2024 #
2024 #
2025 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2025 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2026 # \ / merging rev3 and rev4 should use bar@rev2
2026 # \ / merging rev3 and rev4 should use bar@rev2
2027 # \- 2 --- 4 as the merge base
2027 # \- 2 --- 4 as the merge base
2028 #
2028 #
2029
2029
2030 cfname = copy[0]
2030 cfname = copy[0]
2031 crev = manifest1.get(cfname)
2031 crev = manifest1.get(cfname)
2032 newfparent = fparent2
2032 newfparent = fparent2
2033
2033
2034 if manifest2: # branch merge
2034 if manifest2: # branch merge
2035 if fparent2 == nullid or crev is None: # copied on remote side
2035 if fparent2 == nullid or crev is None: # copied on remote side
2036 if cfname in manifest2:
2036 if cfname in manifest2:
2037 crev = manifest2[cfname]
2037 crev = manifest2[cfname]
2038 newfparent = fparent1
2038 newfparent = fparent1
2039
2039
2040 # Here, we used to search backwards through history to try to find
2040 # Here, we used to search backwards through history to try to find
2041 # where the file copy came from if the source of a copy was not in
2041 # where the file copy came from if the source of a copy was not in
2042 # the parent directory. However, this doesn't actually make sense to
2042 # the parent directory. However, this doesn't actually make sense to
2043 # do (what does a copy from something not in your working copy even
2043 # do (what does a copy from something not in your working copy even
2044 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2044 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2045 # the user that copy information was dropped, so if they didn't
2045 # the user that copy information was dropped, so if they didn't
2046 # expect this outcome it can be fixed, but this is the correct
2046 # expect this outcome it can be fixed, but this is the correct
2047 # behavior in this circumstance.
2047 # behavior in this circumstance.
2048
2048
2049 if crev:
2049 if crev:
2050 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2050 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2051 meta["copy"] = cfname
2051 meta["copy"] = cfname
2052 meta["copyrev"] = hex(crev)
2052 meta["copyrev"] = hex(crev)
2053 fparent1, fparent2 = nullid, newfparent
2053 fparent1, fparent2 = nullid, newfparent
2054 else:
2054 else:
2055 self.ui.warn(_("warning: can't find ancestor for '%s' "
2055 self.ui.warn(_("warning: can't find ancestor for '%s' "
2056 "copied from '%s'!\n") % (fname, cfname))
2056 "copied from '%s'!\n") % (fname, cfname))
2057
2057
2058 elif fparent1 == nullid:
2058 elif fparent1 == nullid:
2059 fparent1, fparent2 = fparent2, nullid
2059 fparent1, fparent2 = fparent2, nullid
2060 elif fparent2 != nullid:
2060 elif fparent2 != nullid:
2061 # is one parent an ancestor of the other?
2061 # is one parent an ancestor of the other?
2062 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2062 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2063 if fparent1 in fparentancestors:
2063 if fparent1 in fparentancestors:
2064 fparent1, fparent2 = fparent2, nullid
2064 fparent1, fparent2 = fparent2, nullid
2065 elif fparent2 in fparentancestors:
2065 elif fparent2 in fparentancestors:
2066 fparent2 = nullid
2066 fparent2 = nullid
2067
2067
2068 # is the file changed?
2068 # is the file changed?
2069 text = fctx.data()
2069 text = fctx.data()
2070 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2070 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2071 changelist.append(fname)
2071 changelist.append(fname)
2072 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2072 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2073 # are just the flags changed during merge?
2073 # are just the flags changed during merge?
2074 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2074 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2075 changelist.append(fname)
2075 changelist.append(fname)
2076
2076
2077 return fparent1
2077 return fparent1
2078
2078
2079 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2079 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2080 """check for commit arguments that aren't committable"""
2080 """check for commit arguments that aren't committable"""
2081 if match.isexact() or match.prefix():
2081 if match.isexact() or match.prefix():
2082 matched = set(status.modified + status.added + status.removed)
2082 matched = set(status.modified + status.added + status.removed)
2083
2083
2084 for f in match.files():
2084 for f in match.files():
2085 f = self.dirstate.normalize(f)
2085 f = self.dirstate.normalize(f)
2086 if f == '.' or f in matched or f in wctx.substate:
2086 if f == '.' or f in matched or f in wctx.substate:
2087 continue
2087 continue
2088 if f in status.deleted:
2088 if f in status.deleted:
2089 fail(f, _('file not found!'))
2089 fail(f, _('file not found!'))
2090 if f in vdirs: # visited directory
2090 if f in vdirs: # visited directory
2091 d = f + '/'
2091 d = f + '/'
2092 for mf in matched:
2092 for mf in matched:
2093 if mf.startswith(d):
2093 if mf.startswith(d):
2094 break
2094 break
2095 else:
2095 else:
2096 fail(f, _("no match under directory!"))
2096 fail(f, _("no match under directory!"))
2097 elif f not in self.dirstate:
2097 elif f not in self.dirstate:
2098 fail(f, _("file not tracked!"))
2098 fail(f, _("file not tracked!"))
2099
2099
2100 @unfilteredmethod
2100 @unfilteredmethod
2101 def commit(self, text="", user=None, date=None, match=None, force=False,
2101 def commit(self, text="", user=None, date=None, match=None, force=False,
2102 editor=False, extra=None):
2102 editor=False, extra=None):
2103 """Add a new revision to current repository.
2103 """Add a new revision to current repository.
2104
2104
2105 Revision information is gathered from the working directory,
2105 Revision information is gathered from the working directory,
2106 match can be used to filter the committed files. If editor is
2106 match can be used to filter the committed files. If editor is
2107 supplied, it is called to get a commit message.
2107 supplied, it is called to get a commit message.
2108 """
2108 """
2109 if extra is None:
2109 if extra is None:
2110 extra = {}
2110 extra = {}
2111
2111
2112 def fail(f, msg):
2112 def fail(f, msg):
2113 raise error.Abort('%s: %s' % (f, msg))
2113 raise error.Abort('%s: %s' % (f, msg))
2114
2114
2115 if not match:
2115 if not match:
2116 match = matchmod.always(self.root, '')
2116 match = matchmod.always(self.root, '')
2117
2117
2118 if not force:
2118 if not force:
2119 vdirs = []
2119 vdirs = []
2120 match.explicitdir = vdirs.append
2120 match.explicitdir = vdirs.append
2121 match.bad = fail
2121 match.bad = fail
2122
2122
2123 wlock = lock = tr = None
2123 wlock = lock = tr = None
2124 try:
2124 try:
2125 wlock = self.wlock()
2125 wlock = self.wlock()
2126 lock = self.lock() # for recent changelog (see issue4368)
2126 lock = self.lock() # for recent changelog (see issue4368)
2127
2127
2128 wctx = self[None]
2128 wctx = self[None]
2129 merge = len(wctx.parents()) > 1
2129 merge = len(wctx.parents()) > 1
2130
2130
2131 if not force and merge and not match.always():
2131 if not force and merge and not match.always():
2132 raise error.Abort(_('cannot partially commit a merge '
2132 raise error.Abort(_('cannot partially commit a merge '
2133 '(do not specify files or patterns)'))
2133 '(do not specify files or patterns)'))
2134
2134
2135 status = self.status(match=match, clean=force)
2135 status = self.status(match=match, clean=force)
2136 if force:
2136 if force:
2137 status.modified.extend(status.clean) # mq may commit clean files
2137 status.modified.extend(status.clean) # mq may commit clean files
2138
2138
2139 # check subrepos
2139 # check subrepos
2140 subs, commitsubs, newstate = subrepoutil.precommit(
2140 subs, commitsubs, newstate = subrepoutil.precommit(
2141 self.ui, wctx, status, match, force=force)
2141 self.ui, wctx, status, match, force=force)
2142
2142
2143 # make sure all explicit patterns are matched
2143 # make sure all explicit patterns are matched
2144 if not force:
2144 if not force:
2145 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2145 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2146
2146
2147 cctx = context.workingcommitctx(self, status,
2147 cctx = context.workingcommitctx(self, status,
2148 text, user, date, extra)
2148 text, user, date, extra)
2149
2149
2150 # internal config: ui.allowemptycommit
2150 # internal config: ui.allowemptycommit
2151 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2151 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2152 or extra.get('close') or merge or cctx.files()
2152 or extra.get('close') or merge or cctx.files()
2153 or self.ui.configbool('ui', 'allowemptycommit'))
2153 or self.ui.configbool('ui', 'allowemptycommit'))
2154 if not allowemptycommit:
2154 if not allowemptycommit:
2155 return None
2155 return None
2156
2156
2157 if merge and cctx.deleted():
2157 if merge and cctx.deleted():
2158 raise error.Abort(_("cannot commit merge with missing files"))
2158 raise error.Abort(_("cannot commit merge with missing files"))
2159
2159
2160 ms = mergemod.mergestate.read(self)
2160 ms = mergemod.mergestate.read(self)
2161 mergeutil.checkunresolved(ms)
2161 mergeutil.checkunresolved(ms)
2162
2162
2163 if editor:
2163 if editor:
2164 cctx._text = editor(self, cctx, subs)
2164 cctx._text = editor(self, cctx, subs)
2165 edited = (text != cctx._text)
2165 edited = (text != cctx._text)
2166
2166
2167 # Save commit message in case this transaction gets rolled back
2167 # Save commit message in case this transaction gets rolled back
2168 # (e.g. by a pretxncommit hook). Leave the content alone on
2168 # (e.g. by a pretxncommit hook). Leave the content alone on
2169 # the assumption that the user will use the same editor again.
2169 # the assumption that the user will use the same editor again.
2170 msgfn = self.savecommitmessage(cctx._text)
2170 msgfn = self.savecommitmessage(cctx._text)
2171
2171
2172 # commit subs and write new state
2172 # commit subs and write new state
2173 if subs:
2173 if subs:
2174 for s in sorted(commitsubs):
2174 for s in sorted(commitsubs):
2175 sub = wctx.sub(s)
2175 sub = wctx.sub(s)
2176 self.ui.status(_('committing subrepository %s\n') %
2176 self.ui.status(_('committing subrepository %s\n') %
2177 subrepoutil.subrelpath(sub))
2177 subrepoutil.subrelpath(sub))
2178 sr = sub.commit(cctx._text, user, date)
2178 sr = sub.commit(cctx._text, user, date)
2179 newstate[s] = (newstate[s][0], sr)
2179 newstate[s] = (newstate[s][0], sr)
2180 subrepoutil.writestate(self, newstate)
2180 subrepoutil.writestate(self, newstate)
2181
2181
2182 p1, p2 = self.dirstate.parents()
2182 p1, p2 = self.dirstate.parents()
2183 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2183 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2184 try:
2184 try:
2185 self.hook("precommit", throw=True, parent1=hookp1,
2185 self.hook("precommit", throw=True, parent1=hookp1,
2186 parent2=hookp2)
2186 parent2=hookp2)
2187 tr = self.transaction('commit')
2187 tr = self.transaction('commit')
2188 ret = self.commitctx(cctx, True)
2188 ret = self.commitctx(cctx, True)
2189 except: # re-raises
2189 except: # re-raises
2190 if edited:
2190 if edited:
2191 self.ui.write(
2191 self.ui.write(
2192 _('note: commit message saved in %s\n') % msgfn)
2192 _('note: commit message saved in %s\n') % msgfn)
2193 raise
2193 raise
2194 # update bookmarks, dirstate and mergestate
2194 # update bookmarks, dirstate and mergestate
2195 bookmarks.update(self, [p1, p2], ret)
2195 bookmarks.update(self, [p1, p2], ret)
2196 cctx.markcommitted(ret)
2196 cctx.markcommitted(ret)
2197 ms.reset()
2197 ms.reset()
2198 tr.close()
2198 tr.close()
2199
2199
2200 finally:
2200 finally:
2201 lockmod.release(tr, lock, wlock)
2201 lockmod.release(tr, lock, wlock)
2202
2202
2203 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2203 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2204 # hack for command that use a temporary commit (eg: histedit)
2204 # hack for command that use a temporary commit (eg: histedit)
2205 # temporary commit got stripped before hook release
2205 # temporary commit got stripped before hook release
2206 if self.changelog.hasnode(ret):
2206 if self.changelog.hasnode(ret):
2207 self.hook("commit", node=node, parent1=parent1,
2207 self.hook("commit", node=node, parent1=parent1,
2208 parent2=parent2)
2208 parent2=parent2)
2209 self._afterlock(commithook)
2209 self._afterlock(commithook)
2210 return ret
2210 return ret
2211
2211
2212 @unfilteredmethod
2212 @unfilteredmethod
2213 def commitctx(self, ctx, error=False):
2213 def commitctx(self, ctx, error=False):
2214 """Add a new revision to current repository.
2214 """Add a new revision to current repository.
2215 Revision information is passed via the context argument.
2215 Revision information is passed via the context argument.
2216
2216
2217 ctx.files() should list all files involved in this commit, i.e.
2217 ctx.files() should list all files involved in this commit, i.e.
2218 modified/added/removed files. On merge, it may be wider than the
2218 modified/added/removed files. On merge, it may be wider than the
2219 ctx.files() to be committed, since any file nodes derived directly
2219 ctx.files() to be committed, since any file nodes derived directly
2220 from p1 or p2 are excluded from the committed ctx.files().
2220 from p1 or p2 are excluded from the committed ctx.files().
2221 """
2221 """
2222
2222
2223 tr = None
2223 tr = None
2224 p1, p2 = ctx.p1(), ctx.p2()
2224 p1, p2 = ctx.p1(), ctx.p2()
2225 user = ctx.user()
2225 user = ctx.user()
2226
2226
2227 lock = self.lock()
2227 lock = self.lock()
2228 try:
2228 try:
2229 tr = self.transaction("commit")
2229 tr = self.transaction("commit")
2230 trp = weakref.proxy(tr)
2230 trp = weakref.proxy(tr)
2231
2231
2232 if ctx.manifestnode():
2232 if ctx.manifestnode():
2233 # reuse an existing manifest revision
2233 # reuse an existing manifest revision
2234 self.ui.debug('reusing known manifest\n')
2234 self.ui.debug('reusing known manifest\n')
2235 mn = ctx.manifestnode()
2235 mn = ctx.manifestnode()
2236 files = ctx.files()
2236 files = ctx.files()
2237 elif ctx.files():
2237 elif ctx.files():
2238 m1ctx = p1.manifestctx()
2238 m1ctx = p1.manifestctx()
2239 m2ctx = p2.manifestctx()
2239 m2ctx = p2.manifestctx()
2240 mctx = m1ctx.copy()
2240 mctx = m1ctx.copy()
2241
2241
2242 m = mctx.read()
2242 m = mctx.read()
2243 m1 = m1ctx.read()
2243 m1 = m1ctx.read()
2244 m2 = m2ctx.read()
2244 m2 = m2ctx.read()
2245
2245
2246 # check in files
2246 # check in files
2247 added = []
2247 added = []
2248 changed = []
2248 changed = []
2249 removed = list(ctx.removed())
2249 removed = list(ctx.removed())
2250 linkrev = len(self)
2250 linkrev = len(self)
2251 self.ui.note(_("committing files:\n"))
2251 self.ui.note(_("committing files:\n"))
2252 for f in sorted(ctx.modified() + ctx.added()):
2252 for f in sorted(ctx.modified() + ctx.added()):
2253 self.ui.note(f + "\n")
2253 self.ui.note(f + "\n")
2254 try:
2254 try:
2255 fctx = ctx[f]
2255 fctx = ctx[f]
2256 if fctx is None:
2256 if fctx is None:
2257 removed.append(f)
2257 removed.append(f)
2258 else:
2258 else:
2259 added.append(f)
2259 added.append(f)
2260 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2260 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2261 trp, changed)
2261 trp, changed)
2262 m.setflag(f, fctx.flags())
2262 m.setflag(f, fctx.flags())
2263 except OSError as inst:
2263 except OSError as inst:
2264 self.ui.warn(_("trouble committing %s!\n") % f)
2264 self.ui.warn(_("trouble committing %s!\n") % f)
2265 raise
2265 raise
2266 except IOError as inst:
2266 except IOError as inst:
2267 errcode = getattr(inst, 'errno', errno.ENOENT)
2267 errcode = getattr(inst, 'errno', errno.ENOENT)
2268 if error or errcode and errcode != errno.ENOENT:
2268 if error or errcode and errcode != errno.ENOENT:
2269 self.ui.warn(_("trouble committing %s!\n") % f)
2269 self.ui.warn(_("trouble committing %s!\n") % f)
2270 raise
2270 raise
2271
2271
2272 # update manifest
2272 # update manifest
2273 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2273 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2274 drop = [f for f in removed if f in m]
2274 drop = [f for f in removed if f in m]
2275 for f in drop:
2275 for f in drop:
2276 del m[f]
2276 del m[f]
2277 files = changed + removed
2277 files = changed + removed
2278 md = None
2278 md = None
2279 if not files:
2279 if not files:
2280 # if no "files" actually changed in terms of the changelog,
2280 # if no "files" actually changed in terms of the changelog,
2281 # try hard to detect unmodified manifest entry so that the
2281 # try hard to detect unmodified manifest entry so that the
2282 # exact same commit can be reproduced later on convert.
2282 # exact same commit can be reproduced later on convert.
2283 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2283 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2284 if not files and md:
2284 if not files and md:
2285 self.ui.debug('not reusing manifest (no file change in '
2285 self.ui.debug('not reusing manifest (no file change in '
2286 'changelog, but manifest differs)\n')
2286 'changelog, but manifest differs)\n')
2287 if files or md:
2287 if files or md:
2288 self.ui.note(_("committing manifest\n"))
2288 self.ui.note(_("committing manifest\n"))
2289 # we're using narrowmatch here since it's already applied at
2289 # we're using narrowmatch here since it's already applied at
2290 # other stages (such as dirstate.walk), so we're already
2290 # other stages (such as dirstate.walk), so we're already
2291 # ignoring things outside of narrowspec in most cases. The
2291 # ignoring things outside of narrowspec in most cases. The
2292 # one case where we might have files outside the narrowspec
2292 # one case where we might have files outside the narrowspec
2293 # at this point is merges, and we already error out in the
2293 # at this point is merges, and we already error out in the
2294 # case where the merge has files outside of the narrowspec,
2294 # case where the merge has files outside of the narrowspec,
2295 # so this is safe.
2295 # so this is safe.
2296 mn = mctx.write(trp, linkrev,
2296 mn = mctx.write(trp, linkrev,
2297 p1.manifestnode(), p2.manifestnode(),
2297 p1.manifestnode(), p2.manifestnode(),
2298 added, drop, match=self.narrowmatch())
2298 added, drop, match=self.narrowmatch())
2299 else:
2299 else:
2300 self.ui.debug('reusing manifest form p1 (listed files '
2300 self.ui.debug('reusing manifest form p1 (listed files '
2301 'actually unchanged)\n')
2301 'actually unchanged)\n')
2302 mn = p1.manifestnode()
2302 mn = p1.manifestnode()
2303 else:
2303 else:
2304 self.ui.debug('reusing manifest from p1 (no file change)\n')
2304 self.ui.debug('reusing manifest from p1 (no file change)\n')
2305 mn = p1.manifestnode()
2305 mn = p1.manifestnode()
2306 files = []
2306 files = []
2307
2307
2308 # update changelog
2308 # update changelog
2309 self.ui.note(_("committing changelog\n"))
2309 self.ui.note(_("committing changelog\n"))
2310 self.changelog.delayupdate(tr)
2310 self.changelog.delayupdate(tr)
2311 n = self.changelog.add(mn, files, ctx.description(),
2311 n = self.changelog.add(mn, files, ctx.description(),
2312 trp, p1.node(), p2.node(),
2312 trp, p1.node(), p2.node(),
2313 user, ctx.date(), ctx.extra().copy())
2313 user, ctx.date(), ctx.extra().copy())
2314 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2314 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2315 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2315 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2316 parent2=xp2)
2316 parent2=xp2)
2317 # set the new commit is proper phase
2317 # set the new commit is proper phase
2318 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2318 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2319 if targetphase:
2319 if targetphase:
2320 # retract boundary do not alter parent changeset.
2320 # retract boundary do not alter parent changeset.
2321 # if a parent have higher the resulting phase will
2321 # if a parent have higher the resulting phase will
2322 # be compliant anyway
2322 # be compliant anyway
2323 #
2323 #
2324 # if minimal phase was 0 we don't need to retract anything
2324 # if minimal phase was 0 we don't need to retract anything
2325 phases.registernew(self, tr, targetphase, [n])
2325 phases.registernew(self, tr, targetphase, [n])
2326 tr.close()
2326 tr.close()
2327 return n
2327 return n
2328 finally:
2328 finally:
2329 if tr:
2329 if tr:
2330 tr.release()
2330 tr.release()
2331 lock.release()
2331 lock.release()
2332
2332
2333 @unfilteredmethod
2333 @unfilteredmethod
2334 def destroying(self):
2334 def destroying(self):
2335 '''Inform the repository that nodes are about to be destroyed.
2335 '''Inform the repository that nodes are about to be destroyed.
2336 Intended for use by strip and rollback, so there's a common
2336 Intended for use by strip and rollback, so there's a common
2337 place for anything that has to be done before destroying history.
2337 place for anything that has to be done before destroying history.
2338
2338
2339 This is mostly useful for saving state that is in memory and waiting
2339 This is mostly useful for saving state that is in memory and waiting
2340 to be flushed when the current lock is released. Because a call to
2340 to be flushed when the current lock is released. Because a call to
2341 destroyed is imminent, the repo will be invalidated causing those
2341 destroyed is imminent, the repo will be invalidated causing those
2342 changes to stay in memory (waiting for the next unlock), or vanish
2342 changes to stay in memory (waiting for the next unlock), or vanish
2343 completely.
2343 completely.
2344 '''
2344 '''
2345 # When using the same lock to commit and strip, the phasecache is left
2345 # When using the same lock to commit and strip, the phasecache is left
2346 # dirty after committing. Then when we strip, the repo is invalidated,
2346 # dirty after committing. Then when we strip, the repo is invalidated,
2347 # causing those changes to disappear.
2347 # causing those changes to disappear.
2348 if '_phasecache' in vars(self):
2348 if '_phasecache' in vars(self):
2349 self._phasecache.write()
2349 self._phasecache.write()
2350
2350
2351 @unfilteredmethod
2351 @unfilteredmethod
2352 def destroyed(self):
2352 def destroyed(self):
2353 '''Inform the repository that nodes have been destroyed.
2353 '''Inform the repository that nodes have been destroyed.
2354 Intended for use by strip and rollback, so there's a common
2354 Intended for use by strip and rollback, so there's a common
2355 place for anything that has to be done after destroying history.
2355 place for anything that has to be done after destroying history.
2356 '''
2356 '''
2357 # When one tries to:
2357 # When one tries to:
2358 # 1) destroy nodes thus calling this method (e.g. strip)
2358 # 1) destroy nodes thus calling this method (e.g. strip)
2359 # 2) use phasecache somewhere (e.g. commit)
2359 # 2) use phasecache somewhere (e.g. commit)
2360 #
2360 #
2361 # then 2) will fail because the phasecache contains nodes that were
2361 # then 2) will fail because the phasecache contains nodes that were
2362 # removed. We can either remove phasecache from the filecache,
2362 # removed. We can either remove phasecache from the filecache,
2363 # causing it to reload next time it is accessed, or simply filter
2363 # causing it to reload next time it is accessed, or simply filter
2364 # the removed nodes now and write the updated cache.
2364 # the removed nodes now and write the updated cache.
2365 self._phasecache.filterunknown(self)
2365 self._phasecache.filterunknown(self)
2366 self._phasecache.write()
2366 self._phasecache.write()
2367
2367
2368 # refresh all repository caches
2368 # refresh all repository caches
2369 self.updatecaches()
2369 self.updatecaches()
2370
2370
2371 # Ensure the persistent tag cache is updated. Doing it now
2371 # Ensure the persistent tag cache is updated. Doing it now
2372 # means that the tag cache only has to worry about destroyed
2372 # means that the tag cache only has to worry about destroyed
2373 # heads immediately after a strip/rollback. That in turn
2373 # heads immediately after a strip/rollback. That in turn
2374 # guarantees that "cachetip == currenttip" (comparing both rev
2374 # guarantees that "cachetip == currenttip" (comparing both rev
2375 # and node) always means no nodes have been added or destroyed.
2375 # and node) always means no nodes have been added or destroyed.
2376
2376
2377 # XXX this is suboptimal when qrefresh'ing: we strip the current
2377 # XXX this is suboptimal when qrefresh'ing: we strip the current
2378 # head, refresh the tag cache, then immediately add a new head.
2378 # head, refresh the tag cache, then immediately add a new head.
2379 # But I think doing it this way is necessary for the "instant
2379 # But I think doing it this way is necessary for the "instant
2380 # tag cache retrieval" case to work.
2380 # tag cache retrieval" case to work.
2381 self.invalidate()
2381 self.invalidate()
2382
2382
2383 def status(self, node1='.', node2=None, match=None,
2383 def status(self, node1='.', node2=None, match=None,
2384 ignored=False, clean=False, unknown=False,
2384 ignored=False, clean=False, unknown=False,
2385 listsubrepos=False):
2385 listsubrepos=False):
2386 '''a convenience method that calls node1.status(node2)'''
2386 '''a convenience method that calls node1.status(node2)'''
2387 return self[node1].status(node2, match, ignored, clean, unknown,
2387 return self[node1].status(node2, match, ignored, clean, unknown,
2388 listsubrepos)
2388 listsubrepos)
2389
2389
2390 def addpostdsstatus(self, ps):
2390 def addpostdsstatus(self, ps):
2391 """Add a callback to run within the wlock, at the point at which status
2391 """Add a callback to run within the wlock, at the point at which status
2392 fixups happen.
2392 fixups happen.
2393
2393
2394 On status completion, callback(wctx, status) will be called with the
2394 On status completion, callback(wctx, status) will be called with the
2395 wlock held, unless the dirstate has changed from underneath or the wlock
2395 wlock held, unless the dirstate has changed from underneath or the wlock
2396 couldn't be grabbed.
2396 couldn't be grabbed.
2397
2397
2398 Callbacks should not capture and use a cached copy of the dirstate --
2398 Callbacks should not capture and use a cached copy of the dirstate --
2399 it might change in the meanwhile. Instead, they should access the
2399 it might change in the meanwhile. Instead, they should access the
2400 dirstate via wctx.repo().dirstate.
2400 dirstate via wctx.repo().dirstate.
2401
2401
2402 This list is emptied out after each status run -- extensions should
2402 This list is emptied out after each status run -- extensions should
2403 make sure it adds to this list each time dirstate.status is called.
2403 make sure it adds to this list each time dirstate.status is called.
2404 Extensions should also make sure they don't call this for statuses
2404 Extensions should also make sure they don't call this for statuses
2405 that don't involve the dirstate.
2405 that don't involve the dirstate.
2406 """
2406 """
2407
2407
2408 # The list is located here for uniqueness reasons -- it is actually
2408 # The list is located here for uniqueness reasons -- it is actually
2409 # managed by the workingctx, but that isn't unique per-repo.
2409 # managed by the workingctx, but that isn't unique per-repo.
2410 self._postdsstatus.append(ps)
2410 self._postdsstatus.append(ps)
2411
2411
2412 def postdsstatus(self):
2412 def postdsstatus(self):
2413 """Used by workingctx to get the list of post-dirstate-status hooks."""
2413 """Used by workingctx to get the list of post-dirstate-status hooks."""
2414 return self._postdsstatus
2414 return self._postdsstatus
2415
2415
2416 def clearpostdsstatus(self):
2416 def clearpostdsstatus(self):
2417 """Used by workingctx to clear post-dirstate-status hooks."""
2417 """Used by workingctx to clear post-dirstate-status hooks."""
2418 del self._postdsstatus[:]
2418 del self._postdsstatus[:]
2419
2419
2420 def heads(self, start=None):
2420 def heads(self, start=None):
2421 if start is None:
2421 if start is None:
2422 cl = self.changelog
2422 cl = self.changelog
2423 headrevs = reversed(cl.headrevs())
2423 headrevs = reversed(cl.headrevs())
2424 return [cl.node(rev) for rev in headrevs]
2424 return [cl.node(rev) for rev in headrevs]
2425
2425
2426 heads = self.changelog.heads(start)
2426 heads = self.changelog.heads(start)
2427 # sort the output in rev descending order
2427 # sort the output in rev descending order
2428 return sorted(heads, key=self.changelog.rev, reverse=True)
2428 return sorted(heads, key=self.changelog.rev, reverse=True)
2429
2429
2430 def branchheads(self, branch=None, start=None, closed=False):
2430 def branchheads(self, branch=None, start=None, closed=False):
2431 '''return a (possibly filtered) list of heads for the given branch
2431 '''return a (possibly filtered) list of heads for the given branch
2432
2432
2433 Heads are returned in topological order, from newest to oldest.
2433 Heads are returned in topological order, from newest to oldest.
2434 If branch is None, use the dirstate branch.
2434 If branch is None, use the dirstate branch.
2435 If start is not None, return only heads reachable from start.
2435 If start is not None, return only heads reachable from start.
2436 If closed is True, return heads that are marked as closed as well.
2436 If closed is True, return heads that are marked as closed as well.
2437 '''
2437 '''
2438 if branch is None:
2438 if branch is None:
2439 branch = self[None].branch()
2439 branch = self[None].branch()
2440 branches = self.branchmap()
2440 branches = self.branchmap()
2441 if branch not in branches:
2441 if branch not in branches:
2442 return []
2442 return []
2443 # the cache returns heads ordered lowest to highest
2443 # the cache returns heads ordered lowest to highest
2444 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2444 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2445 if start is not None:
2445 if start is not None:
2446 # filter out the heads that cannot be reached from startrev
2446 # filter out the heads that cannot be reached from startrev
2447 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2447 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2448 bheads = [h for h in bheads if h in fbheads]
2448 bheads = [h for h in bheads if h in fbheads]
2449 return bheads
2449 return bheads
2450
2450
2451 def branches(self, nodes):
2451 def branches(self, nodes):
2452 if not nodes:
2452 if not nodes:
2453 nodes = [self.changelog.tip()]
2453 nodes = [self.changelog.tip()]
2454 b = []
2454 b = []
2455 for n in nodes:
2455 for n in nodes:
2456 t = n
2456 t = n
2457 while True:
2457 while True:
2458 p = self.changelog.parents(n)
2458 p = self.changelog.parents(n)
2459 if p[1] != nullid or p[0] == nullid:
2459 if p[1] != nullid or p[0] == nullid:
2460 b.append((t, n, p[0], p[1]))
2460 b.append((t, n, p[0], p[1]))
2461 break
2461 break
2462 n = p[0]
2462 n = p[0]
2463 return b
2463 return b
2464
2464
2465 def between(self, pairs):
2465 def between(self, pairs):
2466 r = []
2466 r = []
2467
2467
2468 for top, bottom in pairs:
2468 for top, bottom in pairs:
2469 n, l, i = top, [], 0
2469 n, l, i = top, [], 0
2470 f = 1
2470 f = 1
2471
2471
2472 while n != bottom and n != nullid:
2472 while n != bottom and n != nullid:
2473 p = self.changelog.parents(n)[0]
2473 p = self.changelog.parents(n)[0]
2474 if i == f:
2474 if i == f:
2475 l.append(n)
2475 l.append(n)
2476 f = f * 2
2476 f = f * 2
2477 n = p
2477 n = p
2478 i += 1
2478 i += 1
2479
2479
2480 r.append(l)
2480 r.append(l)
2481
2481
2482 return r
2482 return r
2483
2483
2484 def checkpush(self, pushop):
2484 def checkpush(self, pushop):
2485 """Extensions can override this function if additional checks have
2485 """Extensions can override this function if additional checks have
2486 to be performed before pushing, or call it if they override push
2486 to be performed before pushing, or call it if they override push
2487 command.
2487 command.
2488 """
2488 """
2489
2489
2490 @unfilteredpropertycache
2490 @unfilteredpropertycache
2491 def prepushoutgoinghooks(self):
2491 def prepushoutgoinghooks(self):
2492 """Return util.hooks consists of a pushop with repo, remote, outgoing
2492 """Return util.hooks consists of a pushop with repo, remote, outgoing
2493 methods, which are called before pushing changesets.
2493 methods, which are called before pushing changesets.
2494 """
2494 """
2495 return util.hooks()
2495 return util.hooks()
2496
2496
2497 def pushkey(self, namespace, key, old, new):
2497 def pushkey(self, namespace, key, old, new):
2498 try:
2498 try:
2499 tr = self.currenttransaction()
2499 tr = self.currenttransaction()
2500 hookargs = {}
2500 hookargs = {}
2501 if tr is not None:
2501 if tr is not None:
2502 hookargs.update(tr.hookargs)
2502 hookargs.update(tr.hookargs)
2503 hookargs = pycompat.strkwargs(hookargs)
2503 hookargs = pycompat.strkwargs(hookargs)
2504 hookargs[r'namespace'] = namespace
2504 hookargs[r'namespace'] = namespace
2505 hookargs[r'key'] = key
2505 hookargs[r'key'] = key
2506 hookargs[r'old'] = old
2506 hookargs[r'old'] = old
2507 hookargs[r'new'] = new
2507 hookargs[r'new'] = new
2508 self.hook('prepushkey', throw=True, **hookargs)
2508 self.hook('prepushkey', throw=True, **hookargs)
2509 except error.HookAbort as exc:
2509 except error.HookAbort as exc:
2510 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2510 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2511 if exc.hint:
2511 if exc.hint:
2512 self.ui.write_err(_("(%s)\n") % exc.hint)
2512 self.ui.write_err(_("(%s)\n") % exc.hint)
2513 return False
2513 return False
2514 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2514 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2515 ret = pushkey.push(self, namespace, key, old, new)
2515 ret = pushkey.push(self, namespace, key, old, new)
2516 def runhook():
2516 def runhook():
2517 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2517 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2518 ret=ret)
2518 ret=ret)
2519 self._afterlock(runhook)
2519 self._afterlock(runhook)
2520 return ret
2520 return ret
2521
2521
2522 def listkeys(self, namespace):
2522 def listkeys(self, namespace):
2523 self.hook('prelistkeys', throw=True, namespace=namespace)
2523 self.hook('prelistkeys', throw=True, namespace=namespace)
2524 self.ui.debug('listing keys for "%s"\n' % namespace)
2524 self.ui.debug('listing keys for "%s"\n' % namespace)
2525 values = pushkey.list(self, namespace)
2525 values = pushkey.list(self, namespace)
2526 self.hook('listkeys', namespace=namespace, values=values)
2526 self.hook('listkeys', namespace=namespace, values=values)
2527 return values
2527 return values
2528
2528
2529 def debugwireargs(self, one, two, three=None, four=None, five=None):
2529 def debugwireargs(self, one, two, three=None, four=None, five=None):
2530 '''used to test argument passing over the wire'''
2530 '''used to test argument passing over the wire'''
2531 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2531 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2532 pycompat.bytestr(four),
2532 pycompat.bytestr(four),
2533 pycompat.bytestr(five))
2533 pycompat.bytestr(five))
2534
2534
2535 def savecommitmessage(self, text):
2535 def savecommitmessage(self, text):
2536 fp = self.vfs('last-message.txt', 'wb')
2536 fp = self.vfs('last-message.txt', 'wb')
2537 try:
2537 try:
2538 fp.write(text)
2538 fp.write(text)
2539 finally:
2539 finally:
2540 fp.close()
2540 fp.close()
2541 return self.pathto(fp.name[len(self.root) + 1:])
2541 return self.pathto(fp.name[len(self.root) + 1:])
2542
2542
2543 # used to avoid circular references so destructors work
2543 # used to avoid circular references so destructors work
2544 def aftertrans(files):
2544 def aftertrans(files):
2545 renamefiles = [tuple(t) for t in files]
2545 renamefiles = [tuple(t) for t in files]
2546 def a():
2546 def a():
2547 for vfs, src, dest in renamefiles:
2547 for vfs, src, dest in renamefiles:
2548 # if src and dest refer to a same file, vfs.rename is a no-op,
2548 # if src and dest refer to a same file, vfs.rename is a no-op,
2549 # leaving both src and dest on disk. delete dest to make sure
2549 # leaving both src and dest on disk. delete dest to make sure
2550 # the rename couldn't be such a no-op.
2550 # the rename couldn't be such a no-op.
2551 vfs.tryunlink(dest)
2551 vfs.tryunlink(dest)
2552 try:
2552 try:
2553 vfs.rename(src, dest)
2553 vfs.rename(src, dest)
2554 except OSError: # journal file does not yet exist
2554 except OSError: # journal file does not yet exist
2555 pass
2555 pass
2556 return a
2556 return a
2557
2557
2558 def undoname(fn):
2558 def undoname(fn):
2559 base, name = os.path.split(fn)
2559 base, name = os.path.split(fn)
2560 assert name.startswith('journal')
2560 assert name.startswith('journal')
2561 return os.path.join(base, name.replace('journal', 'undo', 1))
2561 return os.path.join(base, name.replace('journal', 'undo', 1))
2562
2562
2563 def instance(ui, path, create, intents=None, createopts=None):
2563 def instance(ui, path, create, intents=None, createopts=None):
2564 localpath = util.urllocalpath(path)
2564 localpath = util.urllocalpath(path)
2565 if create:
2565 if create:
2566 createrepository(ui, localpath, createopts=createopts)
2566 createrepository(ui, localpath, createopts=createopts)
2567
2567
2568 return makelocalrepository(ui, localpath, intents=intents)
2568 return makelocalrepository(ui, localpath, intents=intents)
2569
2569
2570 def islocal(path):
2570 def islocal(path):
2571 return True
2571 return True
2572
2572
2573 def newreporequirements(ui, createopts=None):
2573 def newreporequirements(ui, createopts=None):
2574 """Determine the set of requirements for a new local repository.
2574 """Determine the set of requirements for a new local repository.
2575
2575
2576 Extensions can wrap this function to specify custom requirements for
2576 Extensions can wrap this function to specify custom requirements for
2577 new repositories.
2577 new repositories.
2578 """
2578 """
2579 createopts = createopts or {}
2579 createopts = createopts or {}
2580
2580
2581 requirements = {'revlogv1'}
2581 requirements = {'revlogv1'}
2582 if ui.configbool('format', 'usestore'):
2582 if ui.configbool('format', 'usestore'):
2583 requirements.add('store')
2583 requirements.add('store')
2584 if ui.configbool('format', 'usefncache'):
2584 if ui.configbool('format', 'usefncache'):
2585 requirements.add('fncache')
2585 requirements.add('fncache')
2586 if ui.configbool('format', 'dotencode'):
2586 if ui.configbool('format', 'dotencode'):
2587 requirements.add('dotencode')
2587 requirements.add('dotencode')
2588
2588
2589 compengine = ui.config('experimental', 'format.compression')
2589 compengine = ui.config('experimental', 'format.compression')
2590 if compengine not in util.compengines:
2590 if compengine not in util.compengines:
2591 raise error.Abort(_('compression engine %s defined by '
2591 raise error.Abort(_('compression engine %s defined by '
2592 'experimental.format.compression not available') %
2592 'experimental.format.compression not available') %
2593 compengine,
2593 compengine,
2594 hint=_('run "hg debuginstall" to list available '
2594 hint=_('run "hg debuginstall" to list available '
2595 'compression engines'))
2595 'compression engines'))
2596
2596
2597 # zlib is the historical default and doesn't need an explicit requirement.
2597 # zlib is the historical default and doesn't need an explicit requirement.
2598 if compengine != 'zlib':
2598 if compengine != 'zlib':
2599 requirements.add('exp-compression-%s' % compengine)
2599 requirements.add('exp-compression-%s' % compengine)
2600
2600
2601 if scmutil.gdinitconfig(ui):
2601 if scmutil.gdinitconfig(ui):
2602 requirements.add('generaldelta')
2602 requirements.add('generaldelta')
2603 if ui.configbool('experimental', 'treemanifest'):
2603 if ui.configbool('experimental', 'treemanifest'):
2604 requirements.add('treemanifest')
2604 requirements.add('treemanifest')
2605 # experimental config: format.sparse-revlog
2605 # experimental config: format.sparse-revlog
2606 if ui.configbool('format', 'sparse-revlog'):
2606 if ui.configbool('format', 'sparse-revlog'):
2607 requirements.add(SPARSEREVLOG_REQUIREMENT)
2607 requirements.add(SPARSEREVLOG_REQUIREMENT)
2608
2608
2609 revlogv2 = ui.config('experimental', 'revlogv2')
2609 revlogv2 = ui.config('experimental', 'revlogv2')
2610 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2610 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2611 requirements.remove('revlogv1')
2611 requirements.remove('revlogv1')
2612 # generaldelta is implied by revlogv2.
2612 # generaldelta is implied by revlogv2.
2613 requirements.discard('generaldelta')
2613 requirements.discard('generaldelta')
2614 requirements.add(REVLOGV2_REQUIREMENT)
2614 requirements.add(REVLOGV2_REQUIREMENT)
2615 # experimental config: format.internal-phase
2615 # experimental config: format.internal-phase
2616 if ui.configbool('format', 'internal-phase'):
2616 if ui.configbool('format', 'internal-phase'):
2617 requirements.add('internal-phase')
2617 requirements.add('internal-phase')
2618
2618
2619 if createopts.get('narrowfiles'):
2619 if createopts.get('narrowfiles'):
2620 requirements.add(repository.NARROW_REQUIREMENT)
2620 requirements.add(repository.NARROW_REQUIREMENT)
2621
2621
2622 return requirements
2622 return requirements
2623
2623
2624 def filterknowncreateopts(ui, createopts):
2624 def filterknowncreateopts(ui, createopts):
2625 """Filters a dict of repo creation options against options that are known.
2625 """Filters a dict of repo creation options against options that are known.
2626
2626
2627 Receives a dict of repo creation options and returns a dict of those
2627 Receives a dict of repo creation options and returns a dict of those
2628 options that we don't know how to handle.
2628 options that we don't know how to handle.
2629
2629
2630 This function is called as part of repository creation. If the
2630 This function is called as part of repository creation. If the
2631 returned dict contains any items, repository creation will not
2631 returned dict contains any items, repository creation will not
2632 be allowed, as it means there was a request to create a repository
2632 be allowed, as it means there was a request to create a repository
2633 with options not recognized by loaded code.
2633 with options not recognized by loaded code.
2634
2634
2635 Extensions can wrap this function to filter out creation options
2635 Extensions can wrap this function to filter out creation options
2636 they know how to handle.
2636 they know how to handle.
2637 """
2637 """
2638 known = {'narrowfiles'}
2638 known = {'narrowfiles'}
2639
2639
2640 return {k: v for k, v in createopts.items() if k not in known}
2640 return {k: v for k, v in createopts.items() if k not in known}
2641
2641
2642 def createrepository(ui, path, createopts=None):
2642 def createrepository(ui, path, createopts=None):
2643 """Create a new repository in a vfs.
2643 """Create a new repository in a vfs.
2644
2644
2645 ``path`` path to the new repo's working directory.
2645 ``path`` path to the new repo's working directory.
2646 ``createopts`` options for the new repository.
2646 ``createopts`` options for the new repository.
2647 """
2647 """
2648 createopts = createopts or {}
2648 createopts = createopts or {}
2649
2649
2650 unknownopts = filterknowncreateopts(ui, createopts)
2650 unknownopts = filterknowncreateopts(ui, createopts)
2651
2651
2652 if not isinstance(unknownopts, dict):
2652 if not isinstance(unknownopts, dict):
2653 raise error.ProgrammingError('filterknowncreateopts() did not return '
2653 raise error.ProgrammingError('filterknowncreateopts() did not return '
2654 'a dict')
2654 'a dict')
2655
2655
2656 if unknownopts:
2656 if unknownopts:
2657 raise error.Abort(_('unable to create repository because of unknown '
2657 raise error.Abort(_('unable to create repository because of unknown '
2658 'creation option: %s') %
2658 'creation option: %s') %
2659 ', '.sorted(unknownopts),
2659 ', '.sorted(unknownopts),
2660 hint=_('is a required extension not loaded?'))
2660 hint=_('is a required extension not loaded?'))
2661
2661
2662 requirements = newreporequirements(ui, createopts=createopts)
2662 requirements = newreporequirements(ui, createopts=createopts)
2663
2663
2664 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2664 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2665 if not wdirvfs.exists():
2665 if not wdirvfs.exists():
2666 wdirvfs.makedirs()
2666 wdirvfs.makedirs()
2667
2667
2668 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2668 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2669 if hgvfs.exists():
2669 if hgvfs.exists():
2670 raise error.RepoError(_('repository %s already exists') % path)
2670 raise error.RepoError(_('repository %s already exists') % path)
2671
2671
2672 hgvfs.makedir(notindexed=True)
2672 hgvfs.makedir(notindexed=True)
2673
2673
2674 if b'store' in requirements:
2674 if b'store' in requirements:
2675 hgvfs.mkdir(b'store')
2675 hgvfs.mkdir(b'store')
2676
2676
2677 # We create an invalid changelog outside the store so very old
2677 # We create an invalid changelog outside the store so very old
2678 # Mercurial versions (which didn't know about the requirements
2678 # Mercurial versions (which didn't know about the requirements
2679 # file) encounter an error on reading the changelog. This
2679 # file) encounter an error on reading the changelog. This
2680 # effectively locks out old clients and prevents them from
2680 # effectively locks out old clients and prevents them from
2681 # mucking with a repo in an unknown format.
2681 # mucking with a repo in an unknown format.
2682 #
2682 #
2683 # The revlog header has version 2, which won't be recognized by
2683 # The revlog header has version 2, which won't be recognized by
2684 # such old clients.
2684 # such old clients.
2685 hgvfs.append(b'00changelog.i',
2685 hgvfs.append(b'00changelog.i',
2686 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2686 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2687 b'layout')
2687 b'layout')
2688
2688
2689 scmutil.writerequires(hgvfs, requirements)
2689 scmutil.writerequires(hgvfs, requirements)
2690
2690
2691 def poisonrepository(repo):
2691 def poisonrepository(repo):
2692 """Poison a repository instance so it can no longer be used."""
2692 """Poison a repository instance so it can no longer be used."""
2693 # Perform any cleanup on the instance.
2693 # Perform any cleanup on the instance.
2694 repo.close()
2694 repo.close()
2695
2695
2696 # Our strategy is to replace the type of the object with one that
2696 # Our strategy is to replace the type of the object with one that
2697 # has all attribute lookups result in error.
2697 # has all attribute lookups result in error.
2698 #
2698 #
2699 # But we have to allow the close() method because some constructors
2699 # But we have to allow the close() method because some constructors
2700 # of repos call close() on repo references.
2700 # of repos call close() on repo references.
2701 class poisonedrepository(object):
2701 class poisonedrepository(object):
2702 def __getattribute__(self, item):
2702 def __getattribute__(self, item):
2703 if item == r'close':
2703 if item == r'close':
2704 return object.__getattribute__(self, item)
2704 return object.__getattribute__(self, item)
2705
2705
2706 raise error.ProgrammingError('repo instances should not be used '
2706 raise error.ProgrammingError('repo instances should not be used '
2707 'after unshare')
2707 'after unshare')
2708
2708
2709 def close(self):
2709 def close(self):
2710 pass
2710 pass
2711
2711
2712 # We may have a repoview, which intercepts __setattr__. So be sure
2712 # We may have a repoview, which intercepts __setattr__. So be sure
2713 # we operate at the lowest level possible.
2713 # we operate at the lowest level possible.
2714 object.__setattr__(repo, r'__class__', poisonedrepository)
2714 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now