##// END OF EJS Templates
localrepo: check for .hg/ directory in makelocalrepository()...
Gregory Szorc -
r39727:2f067e36 default
parent child Browse files
Show More
@@ -1,2589 +1,2591
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 def makelocalrepository(baseui, path, intents=None):
379 def makelocalrepository(baseui, path, intents=None):
380 """Create a local repository object.
380 """Create a local repository object.
381
381
382 Given arguments needed to construct a local repository, this function
382 Given arguments needed to construct a local repository, this function
383 derives a type suitable for representing that repository and returns an
383 derives a type suitable for representing that repository and returns an
384 instance of it.
384 instance of it.
385
385
386 The returned object conforms to the ``repository.completelocalrepository``
386 The returned object conforms to the ``repository.completelocalrepository``
387 interface.
387 interface.
388 """
388 """
389 ui = baseui.copy()
389 ui = baseui.copy()
390 # Prevent copying repo configuration.
390 # Prevent copying repo configuration.
391 ui.copy = baseui.copy
391 ui.copy = baseui.copy
392
392
393 # Working directory VFS rooted at repository root.
393 # Working directory VFS rooted at repository root.
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
395
395
396 # Main VFS for .hg/ directory.
396 # Main VFS for .hg/ directory.
397 hgpath = wdirvfs.join(b'.hg')
397 hgpath = wdirvfs.join(b'.hg')
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
399
399
400 # The .hg/ path should exist and should be a directory. All other
401 # cases are errors.
402 if not hgvfs.isdir():
403 try:
404 hgvfs.stat()
405 except OSError as e:
406 if e.errno != errno.ENOENT:
407 raise
408
409 raise error.RepoError(_(b'repository %s not found') % path)
410
400 # The .hg/hgrc file may load extensions or contain config options
411 # The .hg/hgrc file may load extensions or contain config options
401 # that influence repository construction. Attempt to load it and
412 # that influence repository construction. Attempt to load it and
402 # process any new extensions that it may have pulled in.
413 # process any new extensions that it may have pulled in.
403 try:
414 try:
404 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
415 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
405 except IOError:
416 except IOError:
406 pass
417 pass
407 else:
418 else:
408 extensions.loadall(ui)
419 extensions.loadall(ui)
409
420
410 return localrepository(
421 return localrepository(
411 baseui=baseui,
422 baseui=baseui,
412 ui=ui,
423 ui=ui,
413 origroot=path,
424 origroot=path,
414 wdirvfs=wdirvfs,
425 wdirvfs=wdirvfs,
415 hgvfs=hgvfs,
426 hgvfs=hgvfs,
416 intents=intents)
427 intents=intents)
417
428
418 @interfaceutil.implementer(repository.completelocalrepository)
429 @interfaceutil.implementer(repository.completelocalrepository)
419 class localrepository(object):
430 class localrepository(object):
420
431
421 # obsolete experimental requirements:
432 # obsolete experimental requirements:
422 # - manifestv2: An experimental new manifest format that allowed
433 # - manifestv2: An experimental new manifest format that allowed
423 # for stem compression of long paths. Experiment ended up not
434 # for stem compression of long paths. Experiment ended up not
424 # being successful (repository sizes went up due to worse delta
435 # being successful (repository sizes went up due to worse delta
425 # chains), and the code was deleted in 4.6.
436 # chains), and the code was deleted in 4.6.
426 supportedformats = {
437 supportedformats = {
427 'revlogv1',
438 'revlogv1',
428 'generaldelta',
439 'generaldelta',
429 'treemanifest',
440 'treemanifest',
430 REVLOGV2_REQUIREMENT,
441 REVLOGV2_REQUIREMENT,
431 SPARSEREVLOG_REQUIREMENT,
442 SPARSEREVLOG_REQUIREMENT,
432 }
443 }
433 _basesupported = supportedformats | {
444 _basesupported = supportedformats | {
434 'store',
445 'store',
435 'fncache',
446 'fncache',
436 'shared',
447 'shared',
437 'relshared',
448 'relshared',
438 'dotencode',
449 'dotencode',
439 'exp-sparse',
450 'exp-sparse',
440 'internal-phase'
451 'internal-phase'
441 }
452 }
442 openerreqs = {
453 openerreqs = {
443 'revlogv1',
454 'revlogv1',
444 'generaldelta',
455 'generaldelta',
445 'treemanifest',
456 'treemanifest',
446 }
457 }
447
458
448 # list of prefix for file which can be written without 'wlock'
459 # list of prefix for file which can be written without 'wlock'
449 # Extensions should extend this list when needed
460 # Extensions should extend this list when needed
450 _wlockfreeprefix = {
461 _wlockfreeprefix = {
451 # We migh consider requiring 'wlock' for the next
462 # We migh consider requiring 'wlock' for the next
452 # two, but pretty much all the existing code assume
463 # two, but pretty much all the existing code assume
453 # wlock is not needed so we keep them excluded for
464 # wlock is not needed so we keep them excluded for
454 # now.
465 # now.
455 'hgrc',
466 'hgrc',
456 'requires',
467 'requires',
457 # XXX cache is a complicatged business someone
468 # XXX cache is a complicatged business someone
458 # should investigate this in depth at some point
469 # should investigate this in depth at some point
459 'cache/',
470 'cache/',
460 # XXX shouldn't be dirstate covered by the wlock?
471 # XXX shouldn't be dirstate covered by the wlock?
461 'dirstate',
472 'dirstate',
462 # XXX bisect was still a bit too messy at the time
473 # XXX bisect was still a bit too messy at the time
463 # this changeset was introduced. Someone should fix
474 # this changeset was introduced. Someone should fix
464 # the remainig bit and drop this line
475 # the remainig bit and drop this line
465 'bisect.state',
476 'bisect.state',
466 }
477 }
467
478
468 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, intents=None):
479 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, intents=None):
469 """Create a new local repository instance.
480 """Create a new local repository instance.
470
481
471 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
482 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
472 or ``localrepo.makelocalrepository()`` for obtaining a new repository
483 or ``localrepo.makelocalrepository()`` for obtaining a new repository
473 object.
484 object.
474
485
475 Arguments:
486 Arguments:
476
487
477 baseui
488 baseui
478 ``ui.ui`` instance that ``ui`` argument was based off of.
489 ``ui.ui`` instance that ``ui`` argument was based off of.
479
490
480 ui
491 ui
481 ``ui.ui`` instance for use by the repository.
492 ``ui.ui`` instance for use by the repository.
482
493
483 origroot
494 origroot
484 ``bytes`` path to working directory root of this repository.
495 ``bytes`` path to working directory root of this repository.
485
496
486 wdirvfs
497 wdirvfs
487 ``vfs.vfs`` rooted at the working directory.
498 ``vfs.vfs`` rooted at the working directory.
488
499
489 hgvfs
500 hgvfs
490 ``vfs.vfs`` rooted at .hg/
501 ``vfs.vfs`` rooted at .hg/
491
502
492 intents
503 intents
493 ``set`` of system strings indicating what this repo will be used
504 ``set`` of system strings indicating what this repo will be used
494 for.
505 for.
495 """
506 """
496 self.baseui = baseui
507 self.baseui = baseui
497 self.ui = ui
508 self.ui = ui
498 self.origroot = origroot
509 self.origroot = origroot
499 # vfs rooted at working directory.
510 # vfs rooted at working directory.
500 self.wvfs = wdirvfs
511 self.wvfs = wdirvfs
501 self.root = wdirvfs.base
512 self.root = wdirvfs.base
502 # vfs rooted at .hg/. Used to access most non-store paths.
513 # vfs rooted at .hg/. Used to access most non-store paths.
503 self.vfs = hgvfs
514 self.vfs = hgvfs
504 self.path = hgvfs.base
515 self.path = hgvfs.base
505
516
506 self.requirements = set()
507 self.filtername = None
517 self.filtername = None
508 # svfs: usually rooted at .hg/store, used to access repository history
518 # svfs: usually rooted at .hg/store, used to access repository history
509 # If this is a shared repository, this vfs may point to another
519 # If this is a shared repository, this vfs may point to another
510 # repository's .hg/store directory.
520 # repository's .hg/store directory.
511 self.svfs = None
521 self.svfs = None
512
522
513 if (self.ui.configbool('devel', 'all-warnings') or
523 if (self.ui.configbool('devel', 'all-warnings') or
514 self.ui.configbool('devel', 'check-locks')):
524 self.ui.configbool('devel', 'check-locks')):
515 self.vfs.audit = self._getvfsward(self.vfs.audit)
525 self.vfs.audit = self._getvfsward(self.vfs.audit)
516 # A list of callback to shape the phase if no data were found.
526 # A list of callback to shape the phase if no data were found.
517 # Callback are in the form: func(repo, roots) --> processed root.
527 # Callback are in the form: func(repo, roots) --> processed root.
518 # This list it to be filled by extension during repo setup
528 # This list it to be filled by extension during repo setup
519 self._phasedefaults = []
529 self._phasedefaults = []
520
530
521 if featuresetupfuncs:
531 if featuresetupfuncs:
522 self.supported = set(self._basesupported) # use private copy
532 self.supported = set(self._basesupported) # use private copy
523 extmods = set(m.__name__ for n, m
533 extmods = set(m.__name__ for n, m
524 in extensions.extensions(self.ui))
534 in extensions.extensions(self.ui))
525 for setupfunc in featuresetupfuncs:
535 for setupfunc in featuresetupfuncs:
526 if setupfunc.__module__ in extmods:
536 if setupfunc.__module__ in extmods:
527 setupfunc(self.ui, self.supported)
537 setupfunc(self.ui, self.supported)
528 else:
538 else:
529 self.supported = self._basesupported
539 self.supported = self._basesupported
530 color.setup(self.ui)
540 color.setup(self.ui)
531
541
532 # Add compression engines.
542 # Add compression engines.
533 for name in util.compengines:
543 for name in util.compengines:
534 engine = util.compengines[name]
544 engine = util.compengines[name]
535 if engine.revlogheader():
545 if engine.revlogheader():
536 self.supported.add('exp-compression-%s' % name)
546 self.supported.add('exp-compression-%s' % name)
537
547
538 if not self.vfs.isdir():
548 try:
539 try:
549 self.requirements = scmutil.readrequires(self.vfs, self.supported)
540 self.vfs.stat()
550 except IOError as inst:
541 except OSError as inst:
551 if inst.errno != errno.ENOENT:
542 if inst.errno != errno.ENOENT:
552 raise
543 raise
553 self.requirements = set()
544 raise error.RepoError(_("repository %s not found") % origroot)
545 else:
546 try:
547 self.requirements = scmutil.readrequires(
548 self.vfs, self.supported)
549 except IOError as inst:
550 if inst.errno != errno.ENOENT:
551 raise
552
554
553 cachepath = self.vfs.join('cache')
555 cachepath = self.vfs.join('cache')
554 self.sharedpath = self.path
556 self.sharedpath = self.path
555 try:
557 try:
556 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
558 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
557 if 'relshared' in self.requirements:
559 if 'relshared' in self.requirements:
558 sharedpath = self.vfs.join(sharedpath)
560 sharedpath = self.vfs.join(sharedpath)
559 vfs = vfsmod.vfs(sharedpath, realpath=True)
561 vfs = vfsmod.vfs(sharedpath, realpath=True)
560 cachepath = vfs.join('cache')
562 cachepath = vfs.join('cache')
561 s = vfs.base
563 s = vfs.base
562 if not vfs.exists():
564 if not vfs.exists():
563 raise error.RepoError(
565 raise error.RepoError(
564 _('.hg/sharedpath points to nonexistent directory %s') % s)
566 _('.hg/sharedpath points to nonexistent directory %s') % s)
565 self.sharedpath = s
567 self.sharedpath = s
566 except IOError as inst:
568 except IOError as inst:
567 if inst.errno != errno.ENOENT:
569 if inst.errno != errno.ENOENT:
568 raise
570 raise
569
571
570 if 'exp-sparse' in self.requirements and not sparse.enabled:
572 if 'exp-sparse' in self.requirements and not sparse.enabled:
571 raise error.RepoError(_('repository is using sparse feature but '
573 raise error.RepoError(_('repository is using sparse feature but '
572 'sparse is not enabled; enable the '
574 'sparse is not enabled; enable the '
573 '"sparse" extensions to access'))
575 '"sparse" extensions to access'))
574
576
575 self.store = store.store(
577 self.store = store.store(
576 self.requirements, self.sharedpath,
578 self.requirements, self.sharedpath,
577 lambda base: vfsmod.vfs(base, cacheaudited=True))
579 lambda base: vfsmod.vfs(base, cacheaudited=True))
578 self.spath = self.store.path
580 self.spath = self.store.path
579 self.svfs = self.store.vfs
581 self.svfs = self.store.vfs
580 self.sjoin = self.store.join
582 self.sjoin = self.store.join
581 self.vfs.createmode = self.store.createmode
583 self.vfs.createmode = self.store.createmode
582 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
584 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
583 self.cachevfs.createmode = self.store.createmode
585 self.cachevfs.createmode = self.store.createmode
584 if (self.ui.configbool('devel', 'all-warnings') or
586 if (self.ui.configbool('devel', 'all-warnings') or
585 self.ui.configbool('devel', 'check-locks')):
587 self.ui.configbool('devel', 'check-locks')):
586 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
588 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
587 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
589 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
588 else: # standard vfs
590 else: # standard vfs
589 self.svfs.audit = self._getsvfsward(self.svfs.audit)
591 self.svfs.audit = self._getsvfsward(self.svfs.audit)
590 self._applyopenerreqs()
592 self._applyopenerreqs()
591
593
592 self._dirstatevalidatewarned = False
594 self._dirstatevalidatewarned = False
593
595
594 self._branchcaches = {}
596 self._branchcaches = {}
595 self._revbranchcache = None
597 self._revbranchcache = None
596 self._filterpats = {}
598 self._filterpats = {}
597 self._datafilters = {}
599 self._datafilters = {}
598 self._transref = self._lockref = self._wlockref = None
600 self._transref = self._lockref = self._wlockref = None
599
601
600 # A cache for various files under .hg/ that tracks file changes,
602 # A cache for various files under .hg/ that tracks file changes,
601 # (used by the filecache decorator)
603 # (used by the filecache decorator)
602 #
604 #
603 # Maps a property name to its util.filecacheentry
605 # Maps a property name to its util.filecacheentry
604 self._filecache = {}
606 self._filecache = {}
605
607
606 # hold sets of revision to be filtered
608 # hold sets of revision to be filtered
607 # should be cleared when something might have changed the filter value:
609 # should be cleared when something might have changed the filter value:
608 # - new changesets,
610 # - new changesets,
609 # - phase change,
611 # - phase change,
610 # - new obsolescence marker,
612 # - new obsolescence marker,
611 # - working directory parent change,
613 # - working directory parent change,
612 # - bookmark changes
614 # - bookmark changes
613 self.filteredrevcache = {}
615 self.filteredrevcache = {}
614
616
615 # post-dirstate-status hooks
617 # post-dirstate-status hooks
616 self._postdsstatus = []
618 self._postdsstatus = []
617
619
618 # generic mapping between names and nodes
620 # generic mapping between names and nodes
619 self.names = namespaces.namespaces()
621 self.names = namespaces.namespaces()
620
622
621 # Key to signature value.
623 # Key to signature value.
622 self._sparsesignaturecache = {}
624 self._sparsesignaturecache = {}
623 # Signature to cached matcher instance.
625 # Signature to cached matcher instance.
624 self._sparsematchercache = {}
626 self._sparsematchercache = {}
625
627
626 def _getvfsward(self, origfunc):
628 def _getvfsward(self, origfunc):
627 """build a ward for self.vfs"""
629 """build a ward for self.vfs"""
628 rref = weakref.ref(self)
630 rref = weakref.ref(self)
629 def checkvfs(path, mode=None):
631 def checkvfs(path, mode=None):
630 ret = origfunc(path, mode=mode)
632 ret = origfunc(path, mode=mode)
631 repo = rref()
633 repo = rref()
632 if (repo is None
634 if (repo is None
633 or not util.safehasattr(repo, '_wlockref')
635 or not util.safehasattr(repo, '_wlockref')
634 or not util.safehasattr(repo, '_lockref')):
636 or not util.safehasattr(repo, '_lockref')):
635 return
637 return
636 if mode in (None, 'r', 'rb'):
638 if mode in (None, 'r', 'rb'):
637 return
639 return
638 if path.startswith(repo.path):
640 if path.startswith(repo.path):
639 # truncate name relative to the repository (.hg)
641 # truncate name relative to the repository (.hg)
640 path = path[len(repo.path) + 1:]
642 path = path[len(repo.path) + 1:]
641 if path.startswith('cache/'):
643 if path.startswith('cache/'):
642 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
644 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
643 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
645 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
644 if path.startswith('journal.'):
646 if path.startswith('journal.'):
645 # journal is covered by 'lock'
647 # journal is covered by 'lock'
646 if repo._currentlock(repo._lockref) is None:
648 if repo._currentlock(repo._lockref) is None:
647 repo.ui.develwarn('write with no lock: "%s"' % path,
649 repo.ui.develwarn('write with no lock: "%s"' % path,
648 stacklevel=2, config='check-locks')
650 stacklevel=2, config='check-locks')
649 elif repo._currentlock(repo._wlockref) is None:
651 elif repo._currentlock(repo._wlockref) is None:
650 # rest of vfs files are covered by 'wlock'
652 # rest of vfs files are covered by 'wlock'
651 #
653 #
652 # exclude special files
654 # exclude special files
653 for prefix in self._wlockfreeprefix:
655 for prefix in self._wlockfreeprefix:
654 if path.startswith(prefix):
656 if path.startswith(prefix):
655 return
657 return
656 repo.ui.develwarn('write with no wlock: "%s"' % path,
658 repo.ui.develwarn('write with no wlock: "%s"' % path,
657 stacklevel=2, config='check-locks')
659 stacklevel=2, config='check-locks')
658 return ret
660 return ret
659 return checkvfs
661 return checkvfs
660
662
661 def _getsvfsward(self, origfunc):
663 def _getsvfsward(self, origfunc):
662 """build a ward for self.svfs"""
664 """build a ward for self.svfs"""
663 rref = weakref.ref(self)
665 rref = weakref.ref(self)
664 def checksvfs(path, mode=None):
666 def checksvfs(path, mode=None):
665 ret = origfunc(path, mode=mode)
667 ret = origfunc(path, mode=mode)
666 repo = rref()
668 repo = rref()
667 if repo is None or not util.safehasattr(repo, '_lockref'):
669 if repo is None or not util.safehasattr(repo, '_lockref'):
668 return
670 return
669 if mode in (None, 'r', 'rb'):
671 if mode in (None, 'r', 'rb'):
670 return
672 return
671 if path.startswith(repo.sharedpath):
673 if path.startswith(repo.sharedpath):
672 # truncate name relative to the repository (.hg)
674 # truncate name relative to the repository (.hg)
673 path = path[len(repo.sharedpath) + 1:]
675 path = path[len(repo.sharedpath) + 1:]
674 if repo._currentlock(repo._lockref) is None:
676 if repo._currentlock(repo._lockref) is None:
675 repo.ui.develwarn('write with no lock: "%s"' % path,
677 repo.ui.develwarn('write with no lock: "%s"' % path,
676 stacklevel=3)
678 stacklevel=3)
677 return ret
679 return ret
678 return checksvfs
680 return checksvfs
679
681
680 def close(self):
682 def close(self):
681 self._writecaches()
683 self._writecaches()
682
684
683 def _writecaches(self):
685 def _writecaches(self):
684 if self._revbranchcache:
686 if self._revbranchcache:
685 self._revbranchcache.write()
687 self._revbranchcache.write()
686
688
687 def _restrictcapabilities(self, caps):
689 def _restrictcapabilities(self, caps):
688 if self.ui.configbool('experimental', 'bundle2-advertise'):
690 if self.ui.configbool('experimental', 'bundle2-advertise'):
689 caps = set(caps)
691 caps = set(caps)
690 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
692 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
691 role='client'))
693 role='client'))
692 caps.add('bundle2=' + urlreq.quote(capsblob))
694 caps.add('bundle2=' + urlreq.quote(capsblob))
693 return caps
695 return caps
694
696
695 def _applyopenerreqs(self):
697 def _applyopenerreqs(self):
696 self.svfs.options = dict((r, 1) for r in self.requirements
698 self.svfs.options = dict((r, 1) for r in self.requirements
697 if r in self.openerreqs)
699 if r in self.openerreqs)
698 # experimental config: format.chunkcachesize
700 # experimental config: format.chunkcachesize
699 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
701 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
700 if chunkcachesize is not None:
702 if chunkcachesize is not None:
701 self.svfs.options['chunkcachesize'] = chunkcachesize
703 self.svfs.options['chunkcachesize'] = chunkcachesize
702 # experimental config: format.manifestcachesize
704 # experimental config: format.manifestcachesize
703 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
705 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
704 if manifestcachesize is not None:
706 if manifestcachesize is not None:
705 self.svfs.options['manifestcachesize'] = manifestcachesize
707 self.svfs.options['manifestcachesize'] = manifestcachesize
706 deltabothparents = self.ui.configbool('storage',
708 deltabothparents = self.ui.configbool('storage',
707 'revlog.optimize-delta-parent-choice')
709 'revlog.optimize-delta-parent-choice')
708 self.svfs.options['deltabothparents'] = deltabothparents
710 self.svfs.options['deltabothparents'] = deltabothparents
709 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
711 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
710 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
712 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
711 if 0 <= chainspan:
713 if 0 <= chainspan:
712 self.svfs.options['maxdeltachainspan'] = chainspan
714 self.svfs.options['maxdeltachainspan'] = chainspan
713 mmapindexthreshold = self.ui.configbytes('experimental',
715 mmapindexthreshold = self.ui.configbytes('experimental',
714 'mmapindexthreshold')
716 'mmapindexthreshold')
715 if mmapindexthreshold is not None:
717 if mmapindexthreshold is not None:
716 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
718 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
717 withsparseread = self.ui.configbool('experimental', 'sparse-read')
719 withsparseread = self.ui.configbool('experimental', 'sparse-read')
718 srdensitythres = float(self.ui.config('experimental',
720 srdensitythres = float(self.ui.config('experimental',
719 'sparse-read.density-threshold'))
721 'sparse-read.density-threshold'))
720 srmingapsize = self.ui.configbytes('experimental',
722 srmingapsize = self.ui.configbytes('experimental',
721 'sparse-read.min-gap-size')
723 'sparse-read.min-gap-size')
722 self.svfs.options['with-sparse-read'] = withsparseread
724 self.svfs.options['with-sparse-read'] = withsparseread
723 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
725 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
724 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
726 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
725 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
727 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
726 self.svfs.options['sparse-revlog'] = sparserevlog
728 self.svfs.options['sparse-revlog'] = sparserevlog
727 if sparserevlog:
729 if sparserevlog:
728 self.svfs.options['generaldelta'] = True
730 self.svfs.options['generaldelta'] = True
729 maxchainlen = None
731 maxchainlen = None
730 if sparserevlog:
732 if sparserevlog:
731 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
733 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
732 # experimental config: format.maxchainlen
734 # experimental config: format.maxchainlen
733 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
735 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
734 if maxchainlen is not None:
736 if maxchainlen is not None:
735 self.svfs.options['maxchainlen'] = maxchainlen
737 self.svfs.options['maxchainlen'] = maxchainlen
736
738
737 for r in self.requirements:
739 for r in self.requirements:
738 if r.startswith('exp-compression-'):
740 if r.startswith('exp-compression-'):
739 self.svfs.options['compengine'] = r[len('exp-compression-'):]
741 self.svfs.options['compengine'] = r[len('exp-compression-'):]
740
742
741 # TODO move "revlogv2" to openerreqs once finalized.
743 # TODO move "revlogv2" to openerreqs once finalized.
742 if REVLOGV2_REQUIREMENT in self.requirements:
744 if REVLOGV2_REQUIREMENT in self.requirements:
743 self.svfs.options['revlogv2'] = True
745 self.svfs.options['revlogv2'] = True
744
746
745 def _writerequirements(self):
747 def _writerequirements(self):
746 scmutil.writerequires(self.vfs, self.requirements)
748 scmutil.writerequires(self.vfs, self.requirements)
747
749
748 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
750 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
749 # self -> auditor -> self._checknested -> self
751 # self -> auditor -> self._checknested -> self
750
752
751 @property
753 @property
752 def auditor(self):
754 def auditor(self):
753 # This is only used by context.workingctx.match in order to
755 # This is only used by context.workingctx.match in order to
754 # detect files in subrepos.
756 # detect files in subrepos.
755 return pathutil.pathauditor(self.root, callback=self._checknested)
757 return pathutil.pathauditor(self.root, callback=self._checknested)
756
758
757 @property
759 @property
758 def nofsauditor(self):
760 def nofsauditor(self):
759 # This is only used by context.basectx.match in order to detect
761 # This is only used by context.basectx.match in order to detect
760 # files in subrepos.
762 # files in subrepos.
761 return pathutil.pathauditor(self.root, callback=self._checknested,
763 return pathutil.pathauditor(self.root, callback=self._checknested,
762 realfs=False, cached=True)
764 realfs=False, cached=True)
763
765
764 def _checknested(self, path):
766 def _checknested(self, path):
765 """Determine if path is a legal nested repository."""
767 """Determine if path is a legal nested repository."""
766 if not path.startswith(self.root):
768 if not path.startswith(self.root):
767 return False
769 return False
768 subpath = path[len(self.root) + 1:]
770 subpath = path[len(self.root) + 1:]
769 normsubpath = util.pconvert(subpath)
771 normsubpath = util.pconvert(subpath)
770
772
771 # XXX: Checking against the current working copy is wrong in
773 # XXX: Checking against the current working copy is wrong in
772 # the sense that it can reject things like
774 # the sense that it can reject things like
773 #
775 #
774 # $ hg cat -r 10 sub/x.txt
776 # $ hg cat -r 10 sub/x.txt
775 #
777 #
776 # if sub/ is no longer a subrepository in the working copy
778 # if sub/ is no longer a subrepository in the working copy
777 # parent revision.
779 # parent revision.
778 #
780 #
779 # However, it can of course also allow things that would have
781 # However, it can of course also allow things that would have
780 # been rejected before, such as the above cat command if sub/
782 # been rejected before, such as the above cat command if sub/
781 # is a subrepository now, but was a normal directory before.
783 # is a subrepository now, but was a normal directory before.
782 # The old path auditor would have rejected by mistake since it
784 # The old path auditor would have rejected by mistake since it
783 # panics when it sees sub/.hg/.
785 # panics when it sees sub/.hg/.
784 #
786 #
785 # All in all, checking against the working copy seems sensible
787 # All in all, checking against the working copy seems sensible
786 # since we want to prevent access to nested repositories on
788 # since we want to prevent access to nested repositories on
787 # the filesystem *now*.
789 # the filesystem *now*.
788 ctx = self[None]
790 ctx = self[None]
789 parts = util.splitpath(subpath)
791 parts = util.splitpath(subpath)
790 while parts:
792 while parts:
791 prefix = '/'.join(parts)
793 prefix = '/'.join(parts)
792 if prefix in ctx.substate:
794 if prefix in ctx.substate:
793 if prefix == normsubpath:
795 if prefix == normsubpath:
794 return True
796 return True
795 else:
797 else:
796 sub = ctx.sub(prefix)
798 sub = ctx.sub(prefix)
797 return sub.checknested(subpath[len(prefix) + 1:])
799 return sub.checknested(subpath[len(prefix) + 1:])
798 else:
800 else:
799 parts.pop()
801 parts.pop()
800 return False
802 return False
801
803
802 def peer(self):
804 def peer(self):
803 return localpeer(self) # not cached to avoid reference cycle
805 return localpeer(self) # not cached to avoid reference cycle
804
806
805 def unfiltered(self):
807 def unfiltered(self):
806 """Return unfiltered version of the repository
808 """Return unfiltered version of the repository
807
809
808 Intended to be overwritten by filtered repo."""
810 Intended to be overwritten by filtered repo."""
809 return self
811 return self
810
812
811 def filtered(self, name, visibilityexceptions=None):
813 def filtered(self, name, visibilityexceptions=None):
812 """Return a filtered version of a repository"""
814 """Return a filtered version of a repository"""
813 cls = repoview.newtype(self.unfiltered().__class__)
815 cls = repoview.newtype(self.unfiltered().__class__)
814 return cls(self, name, visibilityexceptions)
816 return cls(self, name, visibilityexceptions)
815
817
816 @repofilecache('bookmarks', 'bookmarks.current')
818 @repofilecache('bookmarks', 'bookmarks.current')
817 def _bookmarks(self):
819 def _bookmarks(self):
818 return bookmarks.bmstore(self)
820 return bookmarks.bmstore(self)
819
821
820 @property
822 @property
821 def _activebookmark(self):
823 def _activebookmark(self):
822 return self._bookmarks.active
824 return self._bookmarks.active
823
825
824 # _phasesets depend on changelog. what we need is to call
826 # _phasesets depend on changelog. what we need is to call
825 # _phasecache.invalidate() if '00changelog.i' was changed, but it
827 # _phasecache.invalidate() if '00changelog.i' was changed, but it
826 # can't be easily expressed in filecache mechanism.
828 # can't be easily expressed in filecache mechanism.
827 @storecache('phaseroots', '00changelog.i')
829 @storecache('phaseroots', '00changelog.i')
828 def _phasecache(self):
830 def _phasecache(self):
829 return phases.phasecache(self, self._phasedefaults)
831 return phases.phasecache(self, self._phasedefaults)
830
832
831 @storecache('obsstore')
833 @storecache('obsstore')
832 def obsstore(self):
834 def obsstore(self):
833 return obsolete.makestore(self.ui, self)
835 return obsolete.makestore(self.ui, self)
834
836
835 @storecache('00changelog.i')
837 @storecache('00changelog.i')
836 def changelog(self):
838 def changelog(self):
837 return changelog.changelog(self.svfs,
839 return changelog.changelog(self.svfs,
838 trypending=txnutil.mayhavepending(self.root))
840 trypending=txnutil.mayhavepending(self.root))
839
841
840 def _constructmanifest(self):
842 def _constructmanifest(self):
841 # This is a temporary function while we migrate from manifest to
843 # This is a temporary function while we migrate from manifest to
842 # manifestlog. It allows bundlerepo and unionrepo to intercept the
844 # manifestlog. It allows bundlerepo and unionrepo to intercept the
843 # manifest creation.
845 # manifest creation.
844 return manifest.manifestrevlog(self.svfs)
846 return manifest.manifestrevlog(self.svfs)
845
847
846 @storecache('00manifest.i')
848 @storecache('00manifest.i')
847 def manifestlog(self):
849 def manifestlog(self):
848 return manifest.manifestlog(self.svfs, self)
850 return manifest.manifestlog(self.svfs, self)
849
851
850 @repofilecache('dirstate')
852 @repofilecache('dirstate')
851 def dirstate(self):
853 def dirstate(self):
852 return self._makedirstate()
854 return self._makedirstate()
853
855
854 def _makedirstate(self):
856 def _makedirstate(self):
855 """Extension point for wrapping the dirstate per-repo."""
857 """Extension point for wrapping the dirstate per-repo."""
856 sparsematchfn = lambda: sparse.matcher(self)
858 sparsematchfn = lambda: sparse.matcher(self)
857
859
858 return dirstate.dirstate(self.vfs, self.ui, self.root,
860 return dirstate.dirstate(self.vfs, self.ui, self.root,
859 self._dirstatevalidate, sparsematchfn)
861 self._dirstatevalidate, sparsematchfn)
860
862
861 def _dirstatevalidate(self, node):
863 def _dirstatevalidate(self, node):
862 try:
864 try:
863 self.changelog.rev(node)
865 self.changelog.rev(node)
864 return node
866 return node
865 except error.LookupError:
867 except error.LookupError:
866 if not self._dirstatevalidatewarned:
868 if not self._dirstatevalidatewarned:
867 self._dirstatevalidatewarned = True
869 self._dirstatevalidatewarned = True
868 self.ui.warn(_("warning: ignoring unknown"
870 self.ui.warn(_("warning: ignoring unknown"
869 " working parent %s!\n") % short(node))
871 " working parent %s!\n") % short(node))
870 return nullid
872 return nullid
871
873
872 @storecache(narrowspec.FILENAME)
874 @storecache(narrowspec.FILENAME)
873 def narrowpats(self):
875 def narrowpats(self):
874 """matcher patterns for this repository's narrowspec
876 """matcher patterns for this repository's narrowspec
875
877
876 A tuple of (includes, excludes).
878 A tuple of (includes, excludes).
877 """
879 """
878 source = self
880 source = self
879 if self.shared():
881 if self.shared():
880 from . import hg
882 from . import hg
881 source = hg.sharedreposource(self)
883 source = hg.sharedreposource(self)
882 return narrowspec.load(source)
884 return narrowspec.load(source)
883
885
884 @storecache(narrowspec.FILENAME)
886 @storecache(narrowspec.FILENAME)
885 def _narrowmatch(self):
887 def _narrowmatch(self):
886 if repository.NARROW_REQUIREMENT not in self.requirements:
888 if repository.NARROW_REQUIREMENT not in self.requirements:
887 return matchmod.always(self.root, '')
889 return matchmod.always(self.root, '')
888 include, exclude = self.narrowpats
890 include, exclude = self.narrowpats
889 return narrowspec.match(self.root, include=include, exclude=exclude)
891 return narrowspec.match(self.root, include=include, exclude=exclude)
890
892
891 # TODO(martinvonz): make this property-like instead?
893 # TODO(martinvonz): make this property-like instead?
892 def narrowmatch(self):
894 def narrowmatch(self):
893 return self._narrowmatch
895 return self._narrowmatch
894
896
895 def setnarrowpats(self, newincludes, newexcludes):
897 def setnarrowpats(self, newincludes, newexcludes):
896 narrowspec.save(self, newincludes, newexcludes)
898 narrowspec.save(self, newincludes, newexcludes)
897 self.invalidate(clearfilecache=True)
899 self.invalidate(clearfilecache=True)
898
900
899 def __getitem__(self, changeid):
901 def __getitem__(self, changeid):
900 if changeid is None:
902 if changeid is None:
901 return context.workingctx(self)
903 return context.workingctx(self)
902 if isinstance(changeid, context.basectx):
904 if isinstance(changeid, context.basectx):
903 return changeid
905 return changeid
904 if isinstance(changeid, slice):
906 if isinstance(changeid, slice):
905 # wdirrev isn't contiguous so the slice shouldn't include it
907 # wdirrev isn't contiguous so the slice shouldn't include it
906 return [context.changectx(self, i)
908 return [context.changectx(self, i)
907 for i in pycompat.xrange(*changeid.indices(len(self)))
909 for i in pycompat.xrange(*changeid.indices(len(self)))
908 if i not in self.changelog.filteredrevs]
910 if i not in self.changelog.filteredrevs]
909 try:
911 try:
910 return context.changectx(self, changeid)
912 return context.changectx(self, changeid)
911 except error.WdirUnsupported:
913 except error.WdirUnsupported:
912 return context.workingctx(self)
914 return context.workingctx(self)
913
915
914 def __contains__(self, changeid):
916 def __contains__(self, changeid):
915 """True if the given changeid exists
917 """True if the given changeid exists
916
918
917 error.AmbiguousPrefixLookupError is raised if an ambiguous node
919 error.AmbiguousPrefixLookupError is raised if an ambiguous node
918 specified.
920 specified.
919 """
921 """
920 try:
922 try:
921 self[changeid]
923 self[changeid]
922 return True
924 return True
923 except error.RepoLookupError:
925 except error.RepoLookupError:
924 return False
926 return False
925
927
926 def __nonzero__(self):
928 def __nonzero__(self):
927 return True
929 return True
928
930
929 __bool__ = __nonzero__
931 __bool__ = __nonzero__
930
932
931 def __len__(self):
933 def __len__(self):
932 # no need to pay the cost of repoview.changelog
934 # no need to pay the cost of repoview.changelog
933 unfi = self.unfiltered()
935 unfi = self.unfiltered()
934 return len(unfi.changelog)
936 return len(unfi.changelog)
935
937
936 def __iter__(self):
938 def __iter__(self):
937 return iter(self.changelog)
939 return iter(self.changelog)
938
940
939 def revs(self, expr, *args):
941 def revs(self, expr, *args):
940 '''Find revisions matching a revset.
942 '''Find revisions matching a revset.
941
943
942 The revset is specified as a string ``expr`` that may contain
944 The revset is specified as a string ``expr`` that may contain
943 %-formatting to escape certain types. See ``revsetlang.formatspec``.
945 %-formatting to escape certain types. See ``revsetlang.formatspec``.
944
946
945 Revset aliases from the configuration are not expanded. To expand
947 Revset aliases from the configuration are not expanded. To expand
946 user aliases, consider calling ``scmutil.revrange()`` or
948 user aliases, consider calling ``scmutil.revrange()`` or
947 ``repo.anyrevs([expr], user=True)``.
949 ``repo.anyrevs([expr], user=True)``.
948
950
949 Returns a revset.abstractsmartset, which is a list-like interface
951 Returns a revset.abstractsmartset, which is a list-like interface
950 that contains integer revisions.
952 that contains integer revisions.
951 '''
953 '''
952 expr = revsetlang.formatspec(expr, *args)
954 expr = revsetlang.formatspec(expr, *args)
953 m = revset.match(None, expr)
955 m = revset.match(None, expr)
954 return m(self)
956 return m(self)
955
957
956 def set(self, expr, *args):
958 def set(self, expr, *args):
957 '''Find revisions matching a revset and emit changectx instances.
959 '''Find revisions matching a revset and emit changectx instances.
958
960
959 This is a convenience wrapper around ``revs()`` that iterates the
961 This is a convenience wrapper around ``revs()`` that iterates the
960 result and is a generator of changectx instances.
962 result and is a generator of changectx instances.
961
963
962 Revset aliases from the configuration are not expanded. To expand
964 Revset aliases from the configuration are not expanded. To expand
963 user aliases, consider calling ``scmutil.revrange()``.
965 user aliases, consider calling ``scmutil.revrange()``.
964 '''
966 '''
965 for r in self.revs(expr, *args):
967 for r in self.revs(expr, *args):
966 yield self[r]
968 yield self[r]
967
969
968 def anyrevs(self, specs, user=False, localalias=None):
970 def anyrevs(self, specs, user=False, localalias=None):
969 '''Find revisions matching one of the given revsets.
971 '''Find revisions matching one of the given revsets.
970
972
971 Revset aliases from the configuration are not expanded by default. To
973 Revset aliases from the configuration are not expanded by default. To
972 expand user aliases, specify ``user=True``. To provide some local
974 expand user aliases, specify ``user=True``. To provide some local
973 definitions overriding user aliases, set ``localalias`` to
975 definitions overriding user aliases, set ``localalias`` to
974 ``{name: definitionstring}``.
976 ``{name: definitionstring}``.
975 '''
977 '''
976 if user:
978 if user:
977 m = revset.matchany(self.ui, specs,
979 m = revset.matchany(self.ui, specs,
978 lookup=revset.lookupfn(self),
980 lookup=revset.lookupfn(self),
979 localalias=localalias)
981 localalias=localalias)
980 else:
982 else:
981 m = revset.matchany(None, specs, localalias=localalias)
983 m = revset.matchany(None, specs, localalias=localalias)
982 return m(self)
984 return m(self)
983
985
984 def url(self):
986 def url(self):
985 return 'file:' + self.root
987 return 'file:' + self.root
986
988
987 def hook(self, name, throw=False, **args):
989 def hook(self, name, throw=False, **args):
988 """Call a hook, passing this repo instance.
990 """Call a hook, passing this repo instance.
989
991
990 This a convenience method to aid invoking hooks. Extensions likely
992 This a convenience method to aid invoking hooks. Extensions likely
991 won't call this unless they have registered a custom hook or are
993 won't call this unless they have registered a custom hook or are
992 replacing code that is expected to call a hook.
994 replacing code that is expected to call a hook.
993 """
995 """
994 return hook.hook(self.ui, self, name, throw, **args)
996 return hook.hook(self.ui, self, name, throw, **args)
995
997
996 @filteredpropertycache
998 @filteredpropertycache
997 def _tagscache(self):
999 def _tagscache(self):
998 '''Returns a tagscache object that contains various tags related
1000 '''Returns a tagscache object that contains various tags related
999 caches.'''
1001 caches.'''
1000
1002
1001 # This simplifies its cache management by having one decorated
1003 # This simplifies its cache management by having one decorated
1002 # function (this one) and the rest simply fetch things from it.
1004 # function (this one) and the rest simply fetch things from it.
1003 class tagscache(object):
1005 class tagscache(object):
1004 def __init__(self):
1006 def __init__(self):
1005 # These two define the set of tags for this repository. tags
1007 # These two define the set of tags for this repository. tags
1006 # maps tag name to node; tagtypes maps tag name to 'global' or
1008 # maps tag name to node; tagtypes maps tag name to 'global' or
1007 # 'local'. (Global tags are defined by .hgtags across all
1009 # 'local'. (Global tags are defined by .hgtags across all
1008 # heads, and local tags are defined in .hg/localtags.)
1010 # heads, and local tags are defined in .hg/localtags.)
1009 # They constitute the in-memory cache of tags.
1011 # They constitute the in-memory cache of tags.
1010 self.tags = self.tagtypes = None
1012 self.tags = self.tagtypes = None
1011
1013
1012 self.nodetagscache = self.tagslist = None
1014 self.nodetagscache = self.tagslist = None
1013
1015
1014 cache = tagscache()
1016 cache = tagscache()
1015 cache.tags, cache.tagtypes = self._findtags()
1017 cache.tags, cache.tagtypes = self._findtags()
1016
1018
1017 return cache
1019 return cache
1018
1020
1019 def tags(self):
1021 def tags(self):
1020 '''return a mapping of tag to node'''
1022 '''return a mapping of tag to node'''
1021 t = {}
1023 t = {}
1022 if self.changelog.filteredrevs:
1024 if self.changelog.filteredrevs:
1023 tags, tt = self._findtags()
1025 tags, tt = self._findtags()
1024 else:
1026 else:
1025 tags = self._tagscache.tags
1027 tags = self._tagscache.tags
1026 for k, v in tags.iteritems():
1028 for k, v in tags.iteritems():
1027 try:
1029 try:
1028 # ignore tags to unknown nodes
1030 # ignore tags to unknown nodes
1029 self.changelog.rev(v)
1031 self.changelog.rev(v)
1030 t[k] = v
1032 t[k] = v
1031 except (error.LookupError, ValueError):
1033 except (error.LookupError, ValueError):
1032 pass
1034 pass
1033 return t
1035 return t
1034
1036
1035 def _findtags(self):
1037 def _findtags(self):
1036 '''Do the hard work of finding tags. Return a pair of dicts
1038 '''Do the hard work of finding tags. Return a pair of dicts
1037 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1039 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1038 maps tag name to a string like \'global\' or \'local\'.
1040 maps tag name to a string like \'global\' or \'local\'.
1039 Subclasses or extensions are free to add their own tags, but
1041 Subclasses or extensions are free to add their own tags, but
1040 should be aware that the returned dicts will be retained for the
1042 should be aware that the returned dicts will be retained for the
1041 duration of the localrepo object.'''
1043 duration of the localrepo object.'''
1042
1044
1043 # XXX what tagtype should subclasses/extensions use? Currently
1045 # XXX what tagtype should subclasses/extensions use? Currently
1044 # mq and bookmarks add tags, but do not set the tagtype at all.
1046 # mq and bookmarks add tags, but do not set the tagtype at all.
1045 # Should each extension invent its own tag type? Should there
1047 # Should each extension invent its own tag type? Should there
1046 # be one tagtype for all such "virtual" tags? Or is the status
1048 # be one tagtype for all such "virtual" tags? Or is the status
1047 # quo fine?
1049 # quo fine?
1048
1050
1049
1051
1050 # map tag name to (node, hist)
1052 # map tag name to (node, hist)
1051 alltags = tagsmod.findglobaltags(self.ui, self)
1053 alltags = tagsmod.findglobaltags(self.ui, self)
1052 # map tag name to tag type
1054 # map tag name to tag type
1053 tagtypes = dict((tag, 'global') for tag in alltags)
1055 tagtypes = dict((tag, 'global') for tag in alltags)
1054
1056
1055 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1057 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1056
1058
1057 # Build the return dicts. Have to re-encode tag names because
1059 # Build the return dicts. Have to re-encode tag names because
1058 # the tags module always uses UTF-8 (in order not to lose info
1060 # the tags module always uses UTF-8 (in order not to lose info
1059 # writing to the cache), but the rest of Mercurial wants them in
1061 # writing to the cache), but the rest of Mercurial wants them in
1060 # local encoding.
1062 # local encoding.
1061 tags = {}
1063 tags = {}
1062 for (name, (node, hist)) in alltags.iteritems():
1064 for (name, (node, hist)) in alltags.iteritems():
1063 if node != nullid:
1065 if node != nullid:
1064 tags[encoding.tolocal(name)] = node
1066 tags[encoding.tolocal(name)] = node
1065 tags['tip'] = self.changelog.tip()
1067 tags['tip'] = self.changelog.tip()
1066 tagtypes = dict([(encoding.tolocal(name), value)
1068 tagtypes = dict([(encoding.tolocal(name), value)
1067 for (name, value) in tagtypes.iteritems()])
1069 for (name, value) in tagtypes.iteritems()])
1068 return (tags, tagtypes)
1070 return (tags, tagtypes)
1069
1071
1070 def tagtype(self, tagname):
1072 def tagtype(self, tagname):
1071 '''
1073 '''
1072 return the type of the given tag. result can be:
1074 return the type of the given tag. result can be:
1073
1075
1074 'local' : a local tag
1076 'local' : a local tag
1075 'global' : a global tag
1077 'global' : a global tag
1076 None : tag does not exist
1078 None : tag does not exist
1077 '''
1079 '''
1078
1080
1079 return self._tagscache.tagtypes.get(tagname)
1081 return self._tagscache.tagtypes.get(tagname)
1080
1082
1081 def tagslist(self):
1083 def tagslist(self):
1082 '''return a list of tags ordered by revision'''
1084 '''return a list of tags ordered by revision'''
1083 if not self._tagscache.tagslist:
1085 if not self._tagscache.tagslist:
1084 l = []
1086 l = []
1085 for t, n in self.tags().iteritems():
1087 for t, n in self.tags().iteritems():
1086 l.append((self.changelog.rev(n), t, n))
1088 l.append((self.changelog.rev(n), t, n))
1087 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1089 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1088
1090
1089 return self._tagscache.tagslist
1091 return self._tagscache.tagslist
1090
1092
1091 def nodetags(self, node):
1093 def nodetags(self, node):
1092 '''return the tags associated with a node'''
1094 '''return the tags associated with a node'''
1093 if not self._tagscache.nodetagscache:
1095 if not self._tagscache.nodetagscache:
1094 nodetagscache = {}
1096 nodetagscache = {}
1095 for t, n in self._tagscache.tags.iteritems():
1097 for t, n in self._tagscache.tags.iteritems():
1096 nodetagscache.setdefault(n, []).append(t)
1098 nodetagscache.setdefault(n, []).append(t)
1097 for tags in nodetagscache.itervalues():
1099 for tags in nodetagscache.itervalues():
1098 tags.sort()
1100 tags.sort()
1099 self._tagscache.nodetagscache = nodetagscache
1101 self._tagscache.nodetagscache = nodetagscache
1100 return self._tagscache.nodetagscache.get(node, [])
1102 return self._tagscache.nodetagscache.get(node, [])
1101
1103
1102 def nodebookmarks(self, node):
1104 def nodebookmarks(self, node):
1103 """return the list of bookmarks pointing to the specified node"""
1105 """return the list of bookmarks pointing to the specified node"""
1104 return self._bookmarks.names(node)
1106 return self._bookmarks.names(node)
1105
1107
1106 def branchmap(self):
1108 def branchmap(self):
1107 '''returns a dictionary {branch: [branchheads]} with branchheads
1109 '''returns a dictionary {branch: [branchheads]} with branchheads
1108 ordered by increasing revision number'''
1110 ordered by increasing revision number'''
1109 branchmap.updatecache(self)
1111 branchmap.updatecache(self)
1110 return self._branchcaches[self.filtername]
1112 return self._branchcaches[self.filtername]
1111
1113
1112 @unfilteredmethod
1114 @unfilteredmethod
1113 def revbranchcache(self):
1115 def revbranchcache(self):
1114 if not self._revbranchcache:
1116 if not self._revbranchcache:
1115 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1117 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1116 return self._revbranchcache
1118 return self._revbranchcache
1117
1119
1118 def branchtip(self, branch, ignoremissing=False):
1120 def branchtip(self, branch, ignoremissing=False):
1119 '''return the tip node for a given branch
1121 '''return the tip node for a given branch
1120
1122
1121 If ignoremissing is True, then this method will not raise an error.
1123 If ignoremissing is True, then this method will not raise an error.
1122 This is helpful for callers that only expect None for a missing branch
1124 This is helpful for callers that only expect None for a missing branch
1123 (e.g. namespace).
1125 (e.g. namespace).
1124
1126
1125 '''
1127 '''
1126 try:
1128 try:
1127 return self.branchmap().branchtip(branch)
1129 return self.branchmap().branchtip(branch)
1128 except KeyError:
1130 except KeyError:
1129 if not ignoremissing:
1131 if not ignoremissing:
1130 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1132 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1131 else:
1133 else:
1132 pass
1134 pass
1133
1135
1134 def lookup(self, key):
1136 def lookup(self, key):
1135 return scmutil.revsymbol(self, key).node()
1137 return scmutil.revsymbol(self, key).node()
1136
1138
1137 def lookupbranch(self, key):
1139 def lookupbranch(self, key):
1138 if key in self.branchmap():
1140 if key in self.branchmap():
1139 return key
1141 return key
1140
1142
1141 return scmutil.revsymbol(self, key).branch()
1143 return scmutil.revsymbol(self, key).branch()
1142
1144
1143 def known(self, nodes):
1145 def known(self, nodes):
1144 cl = self.changelog
1146 cl = self.changelog
1145 nm = cl.nodemap
1147 nm = cl.nodemap
1146 filtered = cl.filteredrevs
1148 filtered = cl.filteredrevs
1147 result = []
1149 result = []
1148 for n in nodes:
1150 for n in nodes:
1149 r = nm.get(n)
1151 r = nm.get(n)
1150 resp = not (r is None or r in filtered)
1152 resp = not (r is None or r in filtered)
1151 result.append(resp)
1153 result.append(resp)
1152 return result
1154 return result
1153
1155
1154 def local(self):
1156 def local(self):
1155 return self
1157 return self
1156
1158
1157 def publishing(self):
1159 def publishing(self):
1158 # it's safe (and desirable) to trust the publish flag unconditionally
1160 # it's safe (and desirable) to trust the publish flag unconditionally
1159 # so that we don't finalize changes shared between users via ssh or nfs
1161 # so that we don't finalize changes shared between users via ssh or nfs
1160 return self.ui.configbool('phases', 'publish', untrusted=True)
1162 return self.ui.configbool('phases', 'publish', untrusted=True)
1161
1163
1162 def cancopy(self):
1164 def cancopy(self):
1163 # so statichttprepo's override of local() works
1165 # so statichttprepo's override of local() works
1164 if not self.local():
1166 if not self.local():
1165 return False
1167 return False
1166 if not self.publishing():
1168 if not self.publishing():
1167 return True
1169 return True
1168 # if publishing we can't copy if there is filtered content
1170 # if publishing we can't copy if there is filtered content
1169 return not self.filtered('visible').changelog.filteredrevs
1171 return not self.filtered('visible').changelog.filteredrevs
1170
1172
1171 def shared(self):
1173 def shared(self):
1172 '''the type of shared repository (None if not shared)'''
1174 '''the type of shared repository (None if not shared)'''
1173 if self.sharedpath != self.path:
1175 if self.sharedpath != self.path:
1174 return 'store'
1176 return 'store'
1175 return None
1177 return None
1176
1178
1177 def wjoin(self, f, *insidef):
1179 def wjoin(self, f, *insidef):
1178 return self.vfs.reljoin(self.root, f, *insidef)
1180 return self.vfs.reljoin(self.root, f, *insidef)
1179
1181
1180 def file(self, f):
1182 def file(self, f):
1181 if f[0] == '/':
1183 if f[0] == '/':
1182 f = f[1:]
1184 f = f[1:]
1183 return filelog.filelog(self.svfs, f)
1185 return filelog.filelog(self.svfs, f)
1184
1186
1185 def setparents(self, p1, p2=nullid):
1187 def setparents(self, p1, p2=nullid):
1186 with self.dirstate.parentchange():
1188 with self.dirstate.parentchange():
1187 copies = self.dirstate.setparents(p1, p2)
1189 copies = self.dirstate.setparents(p1, p2)
1188 pctx = self[p1]
1190 pctx = self[p1]
1189 if copies:
1191 if copies:
1190 # Adjust copy records, the dirstate cannot do it, it
1192 # Adjust copy records, the dirstate cannot do it, it
1191 # requires access to parents manifests. Preserve them
1193 # requires access to parents manifests. Preserve them
1192 # only for entries added to first parent.
1194 # only for entries added to first parent.
1193 for f in copies:
1195 for f in copies:
1194 if f not in pctx and copies[f] in pctx:
1196 if f not in pctx and copies[f] in pctx:
1195 self.dirstate.copy(copies[f], f)
1197 self.dirstate.copy(copies[f], f)
1196 if p2 == nullid:
1198 if p2 == nullid:
1197 for f, s in sorted(self.dirstate.copies().items()):
1199 for f, s in sorted(self.dirstate.copies().items()):
1198 if f not in pctx and s not in pctx:
1200 if f not in pctx and s not in pctx:
1199 self.dirstate.copy(None, f)
1201 self.dirstate.copy(None, f)
1200
1202
1201 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1203 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1202 """changeid can be a changeset revision, node, or tag.
1204 """changeid can be a changeset revision, node, or tag.
1203 fileid can be a file revision or node."""
1205 fileid can be a file revision or node."""
1204 return context.filectx(self, path, changeid, fileid,
1206 return context.filectx(self, path, changeid, fileid,
1205 changectx=changectx)
1207 changectx=changectx)
1206
1208
1207 def getcwd(self):
1209 def getcwd(self):
1208 return self.dirstate.getcwd()
1210 return self.dirstate.getcwd()
1209
1211
1210 def pathto(self, f, cwd=None):
1212 def pathto(self, f, cwd=None):
1211 return self.dirstate.pathto(f, cwd)
1213 return self.dirstate.pathto(f, cwd)
1212
1214
1213 def _loadfilter(self, filter):
1215 def _loadfilter(self, filter):
1214 if filter not in self._filterpats:
1216 if filter not in self._filterpats:
1215 l = []
1217 l = []
1216 for pat, cmd in self.ui.configitems(filter):
1218 for pat, cmd in self.ui.configitems(filter):
1217 if cmd == '!':
1219 if cmd == '!':
1218 continue
1220 continue
1219 mf = matchmod.match(self.root, '', [pat])
1221 mf = matchmod.match(self.root, '', [pat])
1220 fn = None
1222 fn = None
1221 params = cmd
1223 params = cmd
1222 for name, filterfn in self._datafilters.iteritems():
1224 for name, filterfn in self._datafilters.iteritems():
1223 if cmd.startswith(name):
1225 if cmd.startswith(name):
1224 fn = filterfn
1226 fn = filterfn
1225 params = cmd[len(name):].lstrip()
1227 params = cmd[len(name):].lstrip()
1226 break
1228 break
1227 if not fn:
1229 if not fn:
1228 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1230 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1229 # Wrap old filters not supporting keyword arguments
1231 # Wrap old filters not supporting keyword arguments
1230 if not pycompat.getargspec(fn)[2]:
1232 if not pycompat.getargspec(fn)[2]:
1231 oldfn = fn
1233 oldfn = fn
1232 fn = lambda s, c, **kwargs: oldfn(s, c)
1234 fn = lambda s, c, **kwargs: oldfn(s, c)
1233 l.append((mf, fn, params))
1235 l.append((mf, fn, params))
1234 self._filterpats[filter] = l
1236 self._filterpats[filter] = l
1235 return self._filterpats[filter]
1237 return self._filterpats[filter]
1236
1238
1237 def _filter(self, filterpats, filename, data):
1239 def _filter(self, filterpats, filename, data):
1238 for mf, fn, cmd in filterpats:
1240 for mf, fn, cmd in filterpats:
1239 if mf(filename):
1241 if mf(filename):
1240 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1242 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1241 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1243 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1242 break
1244 break
1243
1245
1244 return data
1246 return data
1245
1247
1246 @unfilteredpropertycache
1248 @unfilteredpropertycache
1247 def _encodefilterpats(self):
1249 def _encodefilterpats(self):
1248 return self._loadfilter('encode')
1250 return self._loadfilter('encode')
1249
1251
1250 @unfilteredpropertycache
1252 @unfilteredpropertycache
1251 def _decodefilterpats(self):
1253 def _decodefilterpats(self):
1252 return self._loadfilter('decode')
1254 return self._loadfilter('decode')
1253
1255
1254 def adddatafilter(self, name, filter):
1256 def adddatafilter(self, name, filter):
1255 self._datafilters[name] = filter
1257 self._datafilters[name] = filter
1256
1258
1257 def wread(self, filename):
1259 def wread(self, filename):
1258 if self.wvfs.islink(filename):
1260 if self.wvfs.islink(filename):
1259 data = self.wvfs.readlink(filename)
1261 data = self.wvfs.readlink(filename)
1260 else:
1262 else:
1261 data = self.wvfs.read(filename)
1263 data = self.wvfs.read(filename)
1262 return self._filter(self._encodefilterpats, filename, data)
1264 return self._filter(self._encodefilterpats, filename, data)
1263
1265
1264 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1266 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1265 """write ``data`` into ``filename`` in the working directory
1267 """write ``data`` into ``filename`` in the working directory
1266
1268
1267 This returns length of written (maybe decoded) data.
1269 This returns length of written (maybe decoded) data.
1268 """
1270 """
1269 data = self._filter(self._decodefilterpats, filename, data)
1271 data = self._filter(self._decodefilterpats, filename, data)
1270 if 'l' in flags:
1272 if 'l' in flags:
1271 self.wvfs.symlink(data, filename)
1273 self.wvfs.symlink(data, filename)
1272 else:
1274 else:
1273 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1275 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1274 **kwargs)
1276 **kwargs)
1275 if 'x' in flags:
1277 if 'x' in flags:
1276 self.wvfs.setflags(filename, False, True)
1278 self.wvfs.setflags(filename, False, True)
1277 else:
1279 else:
1278 self.wvfs.setflags(filename, False, False)
1280 self.wvfs.setflags(filename, False, False)
1279 return len(data)
1281 return len(data)
1280
1282
1281 def wwritedata(self, filename, data):
1283 def wwritedata(self, filename, data):
1282 return self._filter(self._decodefilterpats, filename, data)
1284 return self._filter(self._decodefilterpats, filename, data)
1283
1285
1284 def currenttransaction(self):
1286 def currenttransaction(self):
1285 """return the current transaction or None if non exists"""
1287 """return the current transaction or None if non exists"""
1286 if self._transref:
1288 if self._transref:
1287 tr = self._transref()
1289 tr = self._transref()
1288 else:
1290 else:
1289 tr = None
1291 tr = None
1290
1292
1291 if tr and tr.running():
1293 if tr and tr.running():
1292 return tr
1294 return tr
1293 return None
1295 return None
1294
1296
1295 def transaction(self, desc, report=None):
1297 def transaction(self, desc, report=None):
1296 if (self.ui.configbool('devel', 'all-warnings')
1298 if (self.ui.configbool('devel', 'all-warnings')
1297 or self.ui.configbool('devel', 'check-locks')):
1299 or self.ui.configbool('devel', 'check-locks')):
1298 if self._currentlock(self._lockref) is None:
1300 if self._currentlock(self._lockref) is None:
1299 raise error.ProgrammingError('transaction requires locking')
1301 raise error.ProgrammingError('transaction requires locking')
1300 tr = self.currenttransaction()
1302 tr = self.currenttransaction()
1301 if tr is not None:
1303 if tr is not None:
1302 return tr.nest(name=desc)
1304 return tr.nest(name=desc)
1303
1305
1304 # abort here if the journal already exists
1306 # abort here if the journal already exists
1305 if self.svfs.exists("journal"):
1307 if self.svfs.exists("journal"):
1306 raise error.RepoError(
1308 raise error.RepoError(
1307 _("abandoned transaction found"),
1309 _("abandoned transaction found"),
1308 hint=_("run 'hg recover' to clean up transaction"))
1310 hint=_("run 'hg recover' to clean up transaction"))
1309
1311
1310 idbase = "%.40f#%f" % (random.random(), time.time())
1312 idbase = "%.40f#%f" % (random.random(), time.time())
1311 ha = hex(hashlib.sha1(idbase).digest())
1313 ha = hex(hashlib.sha1(idbase).digest())
1312 txnid = 'TXN:' + ha
1314 txnid = 'TXN:' + ha
1313 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1315 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1314
1316
1315 self._writejournal(desc)
1317 self._writejournal(desc)
1316 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1318 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1317 if report:
1319 if report:
1318 rp = report
1320 rp = report
1319 else:
1321 else:
1320 rp = self.ui.warn
1322 rp = self.ui.warn
1321 vfsmap = {'plain': self.vfs} # root of .hg/
1323 vfsmap = {'plain': self.vfs} # root of .hg/
1322 # we must avoid cyclic reference between repo and transaction.
1324 # we must avoid cyclic reference between repo and transaction.
1323 reporef = weakref.ref(self)
1325 reporef = weakref.ref(self)
1324 # Code to track tag movement
1326 # Code to track tag movement
1325 #
1327 #
1326 # Since tags are all handled as file content, it is actually quite hard
1328 # Since tags are all handled as file content, it is actually quite hard
1327 # to track these movement from a code perspective. So we fallback to a
1329 # to track these movement from a code perspective. So we fallback to a
1328 # tracking at the repository level. One could envision to track changes
1330 # tracking at the repository level. One could envision to track changes
1329 # to the '.hgtags' file through changegroup apply but that fails to
1331 # to the '.hgtags' file through changegroup apply but that fails to
1330 # cope with case where transaction expose new heads without changegroup
1332 # cope with case where transaction expose new heads without changegroup
1331 # being involved (eg: phase movement).
1333 # being involved (eg: phase movement).
1332 #
1334 #
1333 # For now, We gate the feature behind a flag since this likely comes
1335 # For now, We gate the feature behind a flag since this likely comes
1334 # with performance impacts. The current code run more often than needed
1336 # with performance impacts. The current code run more often than needed
1335 # and do not use caches as much as it could. The current focus is on
1337 # and do not use caches as much as it could. The current focus is on
1336 # the behavior of the feature so we disable it by default. The flag
1338 # the behavior of the feature so we disable it by default. The flag
1337 # will be removed when we are happy with the performance impact.
1339 # will be removed when we are happy with the performance impact.
1338 #
1340 #
1339 # Once this feature is no longer experimental move the following
1341 # Once this feature is no longer experimental move the following
1340 # documentation to the appropriate help section:
1342 # documentation to the appropriate help section:
1341 #
1343 #
1342 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1344 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1343 # tags (new or changed or deleted tags). In addition the details of
1345 # tags (new or changed or deleted tags). In addition the details of
1344 # these changes are made available in a file at:
1346 # these changes are made available in a file at:
1345 # ``REPOROOT/.hg/changes/tags.changes``.
1347 # ``REPOROOT/.hg/changes/tags.changes``.
1346 # Make sure you check for HG_TAG_MOVED before reading that file as it
1348 # Make sure you check for HG_TAG_MOVED before reading that file as it
1347 # might exist from a previous transaction even if no tag were touched
1349 # might exist from a previous transaction even if no tag were touched
1348 # in this one. Changes are recorded in a line base format::
1350 # in this one. Changes are recorded in a line base format::
1349 #
1351 #
1350 # <action> <hex-node> <tag-name>\n
1352 # <action> <hex-node> <tag-name>\n
1351 #
1353 #
1352 # Actions are defined as follow:
1354 # Actions are defined as follow:
1353 # "-R": tag is removed,
1355 # "-R": tag is removed,
1354 # "+A": tag is added,
1356 # "+A": tag is added,
1355 # "-M": tag is moved (old value),
1357 # "-M": tag is moved (old value),
1356 # "+M": tag is moved (new value),
1358 # "+M": tag is moved (new value),
1357 tracktags = lambda x: None
1359 tracktags = lambda x: None
1358 # experimental config: experimental.hook-track-tags
1360 # experimental config: experimental.hook-track-tags
1359 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1361 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1360 if desc != 'strip' and shouldtracktags:
1362 if desc != 'strip' and shouldtracktags:
1361 oldheads = self.changelog.headrevs()
1363 oldheads = self.changelog.headrevs()
1362 def tracktags(tr2):
1364 def tracktags(tr2):
1363 repo = reporef()
1365 repo = reporef()
1364 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1366 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1365 newheads = repo.changelog.headrevs()
1367 newheads = repo.changelog.headrevs()
1366 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1368 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1367 # notes: we compare lists here.
1369 # notes: we compare lists here.
1368 # As we do it only once buiding set would not be cheaper
1370 # As we do it only once buiding set would not be cheaper
1369 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1371 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1370 if changes:
1372 if changes:
1371 tr2.hookargs['tag_moved'] = '1'
1373 tr2.hookargs['tag_moved'] = '1'
1372 with repo.vfs('changes/tags.changes', 'w',
1374 with repo.vfs('changes/tags.changes', 'w',
1373 atomictemp=True) as changesfile:
1375 atomictemp=True) as changesfile:
1374 # note: we do not register the file to the transaction
1376 # note: we do not register the file to the transaction
1375 # because we needs it to still exist on the transaction
1377 # because we needs it to still exist on the transaction
1376 # is close (for txnclose hooks)
1378 # is close (for txnclose hooks)
1377 tagsmod.writediff(changesfile, changes)
1379 tagsmod.writediff(changesfile, changes)
1378 def validate(tr2):
1380 def validate(tr2):
1379 """will run pre-closing hooks"""
1381 """will run pre-closing hooks"""
1380 # XXX the transaction API is a bit lacking here so we take a hacky
1382 # XXX the transaction API is a bit lacking here so we take a hacky
1381 # path for now
1383 # path for now
1382 #
1384 #
1383 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1385 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1384 # dict is copied before these run. In addition we needs the data
1386 # dict is copied before these run. In addition we needs the data
1385 # available to in memory hooks too.
1387 # available to in memory hooks too.
1386 #
1388 #
1387 # Moreover, we also need to make sure this runs before txnclose
1389 # Moreover, we also need to make sure this runs before txnclose
1388 # hooks and there is no "pending" mechanism that would execute
1390 # hooks and there is no "pending" mechanism that would execute
1389 # logic only if hooks are about to run.
1391 # logic only if hooks are about to run.
1390 #
1392 #
1391 # Fixing this limitation of the transaction is also needed to track
1393 # Fixing this limitation of the transaction is also needed to track
1392 # other families of changes (bookmarks, phases, obsolescence).
1394 # other families of changes (bookmarks, phases, obsolescence).
1393 #
1395 #
1394 # This will have to be fixed before we remove the experimental
1396 # This will have to be fixed before we remove the experimental
1395 # gating.
1397 # gating.
1396 tracktags(tr2)
1398 tracktags(tr2)
1397 repo = reporef()
1399 repo = reporef()
1398 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1400 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1399 scmutil.enforcesinglehead(repo, tr2, desc)
1401 scmutil.enforcesinglehead(repo, tr2, desc)
1400 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1402 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1401 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1403 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1402 args = tr.hookargs.copy()
1404 args = tr.hookargs.copy()
1403 args.update(bookmarks.preparehookargs(name, old, new))
1405 args.update(bookmarks.preparehookargs(name, old, new))
1404 repo.hook('pretxnclose-bookmark', throw=True,
1406 repo.hook('pretxnclose-bookmark', throw=True,
1405 txnname=desc,
1407 txnname=desc,
1406 **pycompat.strkwargs(args))
1408 **pycompat.strkwargs(args))
1407 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1409 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1408 cl = repo.unfiltered().changelog
1410 cl = repo.unfiltered().changelog
1409 for rev, (old, new) in tr.changes['phases'].items():
1411 for rev, (old, new) in tr.changes['phases'].items():
1410 args = tr.hookargs.copy()
1412 args = tr.hookargs.copy()
1411 node = hex(cl.node(rev))
1413 node = hex(cl.node(rev))
1412 args.update(phases.preparehookargs(node, old, new))
1414 args.update(phases.preparehookargs(node, old, new))
1413 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1415 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1414 **pycompat.strkwargs(args))
1416 **pycompat.strkwargs(args))
1415
1417
1416 repo.hook('pretxnclose', throw=True,
1418 repo.hook('pretxnclose', throw=True,
1417 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1419 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1418 def releasefn(tr, success):
1420 def releasefn(tr, success):
1419 repo = reporef()
1421 repo = reporef()
1420 if success:
1422 if success:
1421 # this should be explicitly invoked here, because
1423 # this should be explicitly invoked here, because
1422 # in-memory changes aren't written out at closing
1424 # in-memory changes aren't written out at closing
1423 # transaction, if tr.addfilegenerator (via
1425 # transaction, if tr.addfilegenerator (via
1424 # dirstate.write or so) isn't invoked while
1426 # dirstate.write or so) isn't invoked while
1425 # transaction running
1427 # transaction running
1426 repo.dirstate.write(None)
1428 repo.dirstate.write(None)
1427 else:
1429 else:
1428 # discard all changes (including ones already written
1430 # discard all changes (including ones already written
1429 # out) in this transaction
1431 # out) in this transaction
1430 narrowspec.restorebackup(self, 'journal.narrowspec')
1432 narrowspec.restorebackup(self, 'journal.narrowspec')
1431 repo.dirstate.restorebackup(None, 'journal.dirstate')
1433 repo.dirstate.restorebackup(None, 'journal.dirstate')
1432
1434
1433 repo.invalidate(clearfilecache=True)
1435 repo.invalidate(clearfilecache=True)
1434
1436
1435 tr = transaction.transaction(rp, self.svfs, vfsmap,
1437 tr = transaction.transaction(rp, self.svfs, vfsmap,
1436 "journal",
1438 "journal",
1437 "undo",
1439 "undo",
1438 aftertrans(renames),
1440 aftertrans(renames),
1439 self.store.createmode,
1441 self.store.createmode,
1440 validator=validate,
1442 validator=validate,
1441 releasefn=releasefn,
1443 releasefn=releasefn,
1442 checkambigfiles=_cachedfiles,
1444 checkambigfiles=_cachedfiles,
1443 name=desc)
1445 name=desc)
1444 tr.changes['origrepolen'] = len(self)
1446 tr.changes['origrepolen'] = len(self)
1445 tr.changes['obsmarkers'] = set()
1447 tr.changes['obsmarkers'] = set()
1446 tr.changes['phases'] = {}
1448 tr.changes['phases'] = {}
1447 tr.changes['bookmarks'] = {}
1449 tr.changes['bookmarks'] = {}
1448
1450
1449 tr.hookargs['txnid'] = txnid
1451 tr.hookargs['txnid'] = txnid
1450 # note: writing the fncache only during finalize mean that the file is
1452 # note: writing the fncache only during finalize mean that the file is
1451 # outdated when running hooks. As fncache is used for streaming clone,
1453 # outdated when running hooks. As fncache is used for streaming clone,
1452 # this is not expected to break anything that happen during the hooks.
1454 # this is not expected to break anything that happen during the hooks.
1453 tr.addfinalize('flush-fncache', self.store.write)
1455 tr.addfinalize('flush-fncache', self.store.write)
1454 def txnclosehook(tr2):
1456 def txnclosehook(tr2):
1455 """To be run if transaction is successful, will schedule a hook run
1457 """To be run if transaction is successful, will schedule a hook run
1456 """
1458 """
1457 # Don't reference tr2 in hook() so we don't hold a reference.
1459 # Don't reference tr2 in hook() so we don't hold a reference.
1458 # This reduces memory consumption when there are multiple
1460 # This reduces memory consumption when there are multiple
1459 # transactions per lock. This can likely go away if issue5045
1461 # transactions per lock. This can likely go away if issue5045
1460 # fixes the function accumulation.
1462 # fixes the function accumulation.
1461 hookargs = tr2.hookargs
1463 hookargs = tr2.hookargs
1462
1464
1463 def hookfunc():
1465 def hookfunc():
1464 repo = reporef()
1466 repo = reporef()
1465 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1467 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1466 bmchanges = sorted(tr.changes['bookmarks'].items())
1468 bmchanges = sorted(tr.changes['bookmarks'].items())
1467 for name, (old, new) in bmchanges:
1469 for name, (old, new) in bmchanges:
1468 args = tr.hookargs.copy()
1470 args = tr.hookargs.copy()
1469 args.update(bookmarks.preparehookargs(name, old, new))
1471 args.update(bookmarks.preparehookargs(name, old, new))
1470 repo.hook('txnclose-bookmark', throw=False,
1472 repo.hook('txnclose-bookmark', throw=False,
1471 txnname=desc, **pycompat.strkwargs(args))
1473 txnname=desc, **pycompat.strkwargs(args))
1472
1474
1473 if hook.hashook(repo.ui, 'txnclose-phase'):
1475 if hook.hashook(repo.ui, 'txnclose-phase'):
1474 cl = repo.unfiltered().changelog
1476 cl = repo.unfiltered().changelog
1475 phasemv = sorted(tr.changes['phases'].items())
1477 phasemv = sorted(tr.changes['phases'].items())
1476 for rev, (old, new) in phasemv:
1478 for rev, (old, new) in phasemv:
1477 args = tr.hookargs.copy()
1479 args = tr.hookargs.copy()
1478 node = hex(cl.node(rev))
1480 node = hex(cl.node(rev))
1479 args.update(phases.preparehookargs(node, old, new))
1481 args.update(phases.preparehookargs(node, old, new))
1480 repo.hook('txnclose-phase', throw=False, txnname=desc,
1482 repo.hook('txnclose-phase', throw=False, txnname=desc,
1481 **pycompat.strkwargs(args))
1483 **pycompat.strkwargs(args))
1482
1484
1483 repo.hook('txnclose', throw=False, txnname=desc,
1485 repo.hook('txnclose', throw=False, txnname=desc,
1484 **pycompat.strkwargs(hookargs))
1486 **pycompat.strkwargs(hookargs))
1485 reporef()._afterlock(hookfunc)
1487 reporef()._afterlock(hookfunc)
1486 tr.addfinalize('txnclose-hook', txnclosehook)
1488 tr.addfinalize('txnclose-hook', txnclosehook)
1487 # Include a leading "-" to make it happen before the transaction summary
1489 # Include a leading "-" to make it happen before the transaction summary
1488 # reports registered via scmutil.registersummarycallback() whose names
1490 # reports registered via scmutil.registersummarycallback() whose names
1489 # are 00-txnreport etc. That way, the caches will be warm when the
1491 # are 00-txnreport etc. That way, the caches will be warm when the
1490 # callbacks run.
1492 # callbacks run.
1491 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1493 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1492 def txnaborthook(tr2):
1494 def txnaborthook(tr2):
1493 """To be run if transaction is aborted
1495 """To be run if transaction is aborted
1494 """
1496 """
1495 reporef().hook('txnabort', throw=False, txnname=desc,
1497 reporef().hook('txnabort', throw=False, txnname=desc,
1496 **pycompat.strkwargs(tr2.hookargs))
1498 **pycompat.strkwargs(tr2.hookargs))
1497 tr.addabort('txnabort-hook', txnaborthook)
1499 tr.addabort('txnabort-hook', txnaborthook)
1498 # avoid eager cache invalidation. in-memory data should be identical
1500 # avoid eager cache invalidation. in-memory data should be identical
1499 # to stored data if transaction has no error.
1501 # to stored data if transaction has no error.
1500 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1502 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1501 self._transref = weakref.ref(tr)
1503 self._transref = weakref.ref(tr)
1502 scmutil.registersummarycallback(self, tr, desc)
1504 scmutil.registersummarycallback(self, tr, desc)
1503 return tr
1505 return tr
1504
1506
1505 def _journalfiles(self):
1507 def _journalfiles(self):
1506 return ((self.svfs, 'journal'),
1508 return ((self.svfs, 'journal'),
1507 (self.vfs, 'journal.dirstate'),
1509 (self.vfs, 'journal.dirstate'),
1508 (self.vfs, 'journal.branch'),
1510 (self.vfs, 'journal.branch'),
1509 (self.vfs, 'journal.desc'),
1511 (self.vfs, 'journal.desc'),
1510 (self.vfs, 'journal.bookmarks'),
1512 (self.vfs, 'journal.bookmarks'),
1511 (self.svfs, 'journal.phaseroots'))
1513 (self.svfs, 'journal.phaseroots'))
1512
1514
1513 def undofiles(self):
1515 def undofiles(self):
1514 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1516 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1515
1517
1516 @unfilteredmethod
1518 @unfilteredmethod
1517 def _writejournal(self, desc):
1519 def _writejournal(self, desc):
1518 self.dirstate.savebackup(None, 'journal.dirstate')
1520 self.dirstate.savebackup(None, 'journal.dirstate')
1519 narrowspec.savebackup(self, 'journal.narrowspec')
1521 narrowspec.savebackup(self, 'journal.narrowspec')
1520 self.vfs.write("journal.branch",
1522 self.vfs.write("journal.branch",
1521 encoding.fromlocal(self.dirstate.branch()))
1523 encoding.fromlocal(self.dirstate.branch()))
1522 self.vfs.write("journal.desc",
1524 self.vfs.write("journal.desc",
1523 "%d\n%s\n" % (len(self), desc))
1525 "%d\n%s\n" % (len(self), desc))
1524 self.vfs.write("journal.bookmarks",
1526 self.vfs.write("journal.bookmarks",
1525 self.vfs.tryread("bookmarks"))
1527 self.vfs.tryread("bookmarks"))
1526 self.svfs.write("journal.phaseroots",
1528 self.svfs.write("journal.phaseroots",
1527 self.svfs.tryread("phaseroots"))
1529 self.svfs.tryread("phaseroots"))
1528
1530
1529 def recover(self):
1531 def recover(self):
1530 with self.lock():
1532 with self.lock():
1531 if self.svfs.exists("journal"):
1533 if self.svfs.exists("journal"):
1532 self.ui.status(_("rolling back interrupted transaction\n"))
1534 self.ui.status(_("rolling back interrupted transaction\n"))
1533 vfsmap = {'': self.svfs,
1535 vfsmap = {'': self.svfs,
1534 'plain': self.vfs,}
1536 'plain': self.vfs,}
1535 transaction.rollback(self.svfs, vfsmap, "journal",
1537 transaction.rollback(self.svfs, vfsmap, "journal",
1536 self.ui.warn,
1538 self.ui.warn,
1537 checkambigfiles=_cachedfiles)
1539 checkambigfiles=_cachedfiles)
1538 self.invalidate()
1540 self.invalidate()
1539 return True
1541 return True
1540 else:
1542 else:
1541 self.ui.warn(_("no interrupted transaction available\n"))
1543 self.ui.warn(_("no interrupted transaction available\n"))
1542 return False
1544 return False
1543
1545
1544 def rollback(self, dryrun=False, force=False):
1546 def rollback(self, dryrun=False, force=False):
1545 wlock = lock = dsguard = None
1547 wlock = lock = dsguard = None
1546 try:
1548 try:
1547 wlock = self.wlock()
1549 wlock = self.wlock()
1548 lock = self.lock()
1550 lock = self.lock()
1549 if self.svfs.exists("undo"):
1551 if self.svfs.exists("undo"):
1550 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1552 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1551
1553
1552 return self._rollback(dryrun, force, dsguard)
1554 return self._rollback(dryrun, force, dsguard)
1553 else:
1555 else:
1554 self.ui.warn(_("no rollback information available\n"))
1556 self.ui.warn(_("no rollback information available\n"))
1555 return 1
1557 return 1
1556 finally:
1558 finally:
1557 release(dsguard, lock, wlock)
1559 release(dsguard, lock, wlock)
1558
1560
1559 @unfilteredmethod # Until we get smarter cache management
1561 @unfilteredmethod # Until we get smarter cache management
1560 def _rollback(self, dryrun, force, dsguard):
1562 def _rollback(self, dryrun, force, dsguard):
1561 ui = self.ui
1563 ui = self.ui
1562 try:
1564 try:
1563 args = self.vfs.read('undo.desc').splitlines()
1565 args = self.vfs.read('undo.desc').splitlines()
1564 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1566 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1565 if len(args) >= 3:
1567 if len(args) >= 3:
1566 detail = args[2]
1568 detail = args[2]
1567 oldtip = oldlen - 1
1569 oldtip = oldlen - 1
1568
1570
1569 if detail and ui.verbose:
1571 if detail and ui.verbose:
1570 msg = (_('repository tip rolled back to revision %d'
1572 msg = (_('repository tip rolled back to revision %d'
1571 ' (undo %s: %s)\n')
1573 ' (undo %s: %s)\n')
1572 % (oldtip, desc, detail))
1574 % (oldtip, desc, detail))
1573 else:
1575 else:
1574 msg = (_('repository tip rolled back to revision %d'
1576 msg = (_('repository tip rolled back to revision %d'
1575 ' (undo %s)\n')
1577 ' (undo %s)\n')
1576 % (oldtip, desc))
1578 % (oldtip, desc))
1577 except IOError:
1579 except IOError:
1578 msg = _('rolling back unknown transaction\n')
1580 msg = _('rolling back unknown transaction\n')
1579 desc = None
1581 desc = None
1580
1582
1581 if not force and self['.'] != self['tip'] and desc == 'commit':
1583 if not force and self['.'] != self['tip'] and desc == 'commit':
1582 raise error.Abort(
1584 raise error.Abort(
1583 _('rollback of last commit while not checked out '
1585 _('rollback of last commit while not checked out '
1584 'may lose data'), hint=_('use -f to force'))
1586 'may lose data'), hint=_('use -f to force'))
1585
1587
1586 ui.status(msg)
1588 ui.status(msg)
1587 if dryrun:
1589 if dryrun:
1588 return 0
1590 return 0
1589
1591
1590 parents = self.dirstate.parents()
1592 parents = self.dirstate.parents()
1591 self.destroying()
1593 self.destroying()
1592 vfsmap = {'plain': self.vfs, '': self.svfs}
1594 vfsmap = {'plain': self.vfs, '': self.svfs}
1593 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1595 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1594 checkambigfiles=_cachedfiles)
1596 checkambigfiles=_cachedfiles)
1595 if self.vfs.exists('undo.bookmarks'):
1597 if self.vfs.exists('undo.bookmarks'):
1596 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1598 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1597 if self.svfs.exists('undo.phaseroots'):
1599 if self.svfs.exists('undo.phaseroots'):
1598 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1600 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1599 self.invalidate()
1601 self.invalidate()
1600
1602
1601 parentgone = (parents[0] not in self.changelog.nodemap or
1603 parentgone = (parents[0] not in self.changelog.nodemap or
1602 parents[1] not in self.changelog.nodemap)
1604 parents[1] not in self.changelog.nodemap)
1603 if parentgone:
1605 if parentgone:
1604 # prevent dirstateguard from overwriting already restored one
1606 # prevent dirstateguard from overwriting already restored one
1605 dsguard.close()
1607 dsguard.close()
1606
1608
1607 narrowspec.restorebackup(self, 'undo.narrowspec')
1609 narrowspec.restorebackup(self, 'undo.narrowspec')
1608 self.dirstate.restorebackup(None, 'undo.dirstate')
1610 self.dirstate.restorebackup(None, 'undo.dirstate')
1609 try:
1611 try:
1610 branch = self.vfs.read('undo.branch')
1612 branch = self.vfs.read('undo.branch')
1611 self.dirstate.setbranch(encoding.tolocal(branch))
1613 self.dirstate.setbranch(encoding.tolocal(branch))
1612 except IOError:
1614 except IOError:
1613 ui.warn(_('named branch could not be reset: '
1615 ui.warn(_('named branch could not be reset: '
1614 'current branch is still \'%s\'\n')
1616 'current branch is still \'%s\'\n')
1615 % self.dirstate.branch())
1617 % self.dirstate.branch())
1616
1618
1617 parents = tuple([p.rev() for p in self[None].parents()])
1619 parents = tuple([p.rev() for p in self[None].parents()])
1618 if len(parents) > 1:
1620 if len(parents) > 1:
1619 ui.status(_('working directory now based on '
1621 ui.status(_('working directory now based on '
1620 'revisions %d and %d\n') % parents)
1622 'revisions %d and %d\n') % parents)
1621 else:
1623 else:
1622 ui.status(_('working directory now based on '
1624 ui.status(_('working directory now based on '
1623 'revision %d\n') % parents)
1625 'revision %d\n') % parents)
1624 mergemod.mergestate.clean(self, self['.'].node())
1626 mergemod.mergestate.clean(self, self['.'].node())
1625
1627
1626 # TODO: if we know which new heads may result from this rollback, pass
1628 # TODO: if we know which new heads may result from this rollback, pass
1627 # them to destroy(), which will prevent the branchhead cache from being
1629 # them to destroy(), which will prevent the branchhead cache from being
1628 # invalidated.
1630 # invalidated.
1629 self.destroyed()
1631 self.destroyed()
1630 return 0
1632 return 0
1631
1633
1632 def _buildcacheupdater(self, newtransaction):
1634 def _buildcacheupdater(self, newtransaction):
1633 """called during transaction to build the callback updating cache
1635 """called during transaction to build the callback updating cache
1634
1636
1635 Lives on the repository to help extension who might want to augment
1637 Lives on the repository to help extension who might want to augment
1636 this logic. For this purpose, the created transaction is passed to the
1638 this logic. For this purpose, the created transaction is passed to the
1637 method.
1639 method.
1638 """
1640 """
1639 # we must avoid cyclic reference between repo and transaction.
1641 # we must avoid cyclic reference between repo and transaction.
1640 reporef = weakref.ref(self)
1642 reporef = weakref.ref(self)
1641 def updater(tr):
1643 def updater(tr):
1642 repo = reporef()
1644 repo = reporef()
1643 repo.updatecaches(tr)
1645 repo.updatecaches(tr)
1644 return updater
1646 return updater
1645
1647
1646 @unfilteredmethod
1648 @unfilteredmethod
1647 def updatecaches(self, tr=None, full=False):
1649 def updatecaches(self, tr=None, full=False):
1648 """warm appropriate caches
1650 """warm appropriate caches
1649
1651
1650 If this function is called after a transaction closed. The transaction
1652 If this function is called after a transaction closed. The transaction
1651 will be available in the 'tr' argument. This can be used to selectively
1653 will be available in the 'tr' argument. This can be used to selectively
1652 update caches relevant to the changes in that transaction.
1654 update caches relevant to the changes in that transaction.
1653
1655
1654 If 'full' is set, make sure all caches the function knows about have
1656 If 'full' is set, make sure all caches the function knows about have
1655 up-to-date data. Even the ones usually loaded more lazily.
1657 up-to-date data. Even the ones usually loaded more lazily.
1656 """
1658 """
1657 if tr is not None and tr.hookargs.get('source') == 'strip':
1659 if tr is not None and tr.hookargs.get('source') == 'strip':
1658 # During strip, many caches are invalid but
1660 # During strip, many caches are invalid but
1659 # later call to `destroyed` will refresh them.
1661 # later call to `destroyed` will refresh them.
1660 return
1662 return
1661
1663
1662 if tr is None or tr.changes['origrepolen'] < len(self):
1664 if tr is None or tr.changes['origrepolen'] < len(self):
1663 # updating the unfiltered branchmap should refresh all the others,
1665 # updating the unfiltered branchmap should refresh all the others,
1664 self.ui.debug('updating the branch cache\n')
1666 self.ui.debug('updating the branch cache\n')
1665 branchmap.updatecache(self.filtered('served'))
1667 branchmap.updatecache(self.filtered('served'))
1666
1668
1667 if full:
1669 if full:
1668 rbc = self.revbranchcache()
1670 rbc = self.revbranchcache()
1669 for r in self.changelog:
1671 for r in self.changelog:
1670 rbc.branchinfo(r)
1672 rbc.branchinfo(r)
1671 rbc.write()
1673 rbc.write()
1672
1674
1673 # ensure the working copy parents are in the manifestfulltextcache
1675 # ensure the working copy parents are in the manifestfulltextcache
1674 for ctx in self['.'].parents():
1676 for ctx in self['.'].parents():
1675 ctx.manifest() # accessing the manifest is enough
1677 ctx.manifest() # accessing the manifest is enough
1676
1678
1677 def invalidatecaches(self):
1679 def invalidatecaches(self):
1678
1680
1679 if '_tagscache' in vars(self):
1681 if '_tagscache' in vars(self):
1680 # can't use delattr on proxy
1682 # can't use delattr on proxy
1681 del self.__dict__['_tagscache']
1683 del self.__dict__['_tagscache']
1682
1684
1683 self.unfiltered()._branchcaches.clear()
1685 self.unfiltered()._branchcaches.clear()
1684 self.invalidatevolatilesets()
1686 self.invalidatevolatilesets()
1685 self._sparsesignaturecache.clear()
1687 self._sparsesignaturecache.clear()
1686
1688
1687 def invalidatevolatilesets(self):
1689 def invalidatevolatilesets(self):
1688 self.filteredrevcache.clear()
1690 self.filteredrevcache.clear()
1689 obsolete.clearobscaches(self)
1691 obsolete.clearobscaches(self)
1690
1692
1691 def invalidatedirstate(self):
1693 def invalidatedirstate(self):
1692 '''Invalidates the dirstate, causing the next call to dirstate
1694 '''Invalidates the dirstate, causing the next call to dirstate
1693 to check if it was modified since the last time it was read,
1695 to check if it was modified since the last time it was read,
1694 rereading it if it has.
1696 rereading it if it has.
1695
1697
1696 This is different to dirstate.invalidate() that it doesn't always
1698 This is different to dirstate.invalidate() that it doesn't always
1697 rereads the dirstate. Use dirstate.invalidate() if you want to
1699 rereads the dirstate. Use dirstate.invalidate() if you want to
1698 explicitly read the dirstate again (i.e. restoring it to a previous
1700 explicitly read the dirstate again (i.e. restoring it to a previous
1699 known good state).'''
1701 known good state).'''
1700 if hasunfilteredcache(self, 'dirstate'):
1702 if hasunfilteredcache(self, 'dirstate'):
1701 for k in self.dirstate._filecache:
1703 for k in self.dirstate._filecache:
1702 try:
1704 try:
1703 delattr(self.dirstate, k)
1705 delattr(self.dirstate, k)
1704 except AttributeError:
1706 except AttributeError:
1705 pass
1707 pass
1706 delattr(self.unfiltered(), 'dirstate')
1708 delattr(self.unfiltered(), 'dirstate')
1707
1709
1708 def invalidate(self, clearfilecache=False):
1710 def invalidate(self, clearfilecache=False):
1709 '''Invalidates both store and non-store parts other than dirstate
1711 '''Invalidates both store and non-store parts other than dirstate
1710
1712
1711 If a transaction is running, invalidation of store is omitted,
1713 If a transaction is running, invalidation of store is omitted,
1712 because discarding in-memory changes might cause inconsistency
1714 because discarding in-memory changes might cause inconsistency
1713 (e.g. incomplete fncache causes unintentional failure, but
1715 (e.g. incomplete fncache causes unintentional failure, but
1714 redundant one doesn't).
1716 redundant one doesn't).
1715 '''
1717 '''
1716 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1718 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1717 for k in list(self._filecache.keys()):
1719 for k in list(self._filecache.keys()):
1718 # dirstate is invalidated separately in invalidatedirstate()
1720 # dirstate is invalidated separately in invalidatedirstate()
1719 if k == 'dirstate':
1721 if k == 'dirstate':
1720 continue
1722 continue
1721 if (k == 'changelog' and
1723 if (k == 'changelog' and
1722 self.currenttransaction() and
1724 self.currenttransaction() and
1723 self.changelog._delayed):
1725 self.changelog._delayed):
1724 # The changelog object may store unwritten revisions. We don't
1726 # The changelog object may store unwritten revisions. We don't
1725 # want to lose them.
1727 # want to lose them.
1726 # TODO: Solve the problem instead of working around it.
1728 # TODO: Solve the problem instead of working around it.
1727 continue
1729 continue
1728
1730
1729 if clearfilecache:
1731 if clearfilecache:
1730 del self._filecache[k]
1732 del self._filecache[k]
1731 try:
1733 try:
1732 delattr(unfiltered, k)
1734 delattr(unfiltered, k)
1733 except AttributeError:
1735 except AttributeError:
1734 pass
1736 pass
1735 self.invalidatecaches()
1737 self.invalidatecaches()
1736 if not self.currenttransaction():
1738 if not self.currenttransaction():
1737 # TODO: Changing contents of store outside transaction
1739 # TODO: Changing contents of store outside transaction
1738 # causes inconsistency. We should make in-memory store
1740 # causes inconsistency. We should make in-memory store
1739 # changes detectable, and abort if changed.
1741 # changes detectable, and abort if changed.
1740 self.store.invalidatecaches()
1742 self.store.invalidatecaches()
1741
1743
1742 def invalidateall(self):
1744 def invalidateall(self):
1743 '''Fully invalidates both store and non-store parts, causing the
1745 '''Fully invalidates both store and non-store parts, causing the
1744 subsequent operation to reread any outside changes.'''
1746 subsequent operation to reread any outside changes.'''
1745 # extension should hook this to invalidate its caches
1747 # extension should hook this to invalidate its caches
1746 self.invalidate()
1748 self.invalidate()
1747 self.invalidatedirstate()
1749 self.invalidatedirstate()
1748
1750
1749 @unfilteredmethod
1751 @unfilteredmethod
1750 def _refreshfilecachestats(self, tr):
1752 def _refreshfilecachestats(self, tr):
1751 """Reload stats of cached files so that they are flagged as valid"""
1753 """Reload stats of cached files so that they are flagged as valid"""
1752 for k, ce in self._filecache.items():
1754 for k, ce in self._filecache.items():
1753 k = pycompat.sysstr(k)
1755 k = pycompat.sysstr(k)
1754 if k == r'dirstate' or k not in self.__dict__:
1756 if k == r'dirstate' or k not in self.__dict__:
1755 continue
1757 continue
1756 ce.refresh()
1758 ce.refresh()
1757
1759
1758 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1760 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1759 inheritchecker=None, parentenvvar=None):
1761 inheritchecker=None, parentenvvar=None):
1760 parentlock = None
1762 parentlock = None
1761 # the contents of parentenvvar are used by the underlying lock to
1763 # the contents of parentenvvar are used by the underlying lock to
1762 # determine whether it can be inherited
1764 # determine whether it can be inherited
1763 if parentenvvar is not None:
1765 if parentenvvar is not None:
1764 parentlock = encoding.environ.get(parentenvvar)
1766 parentlock = encoding.environ.get(parentenvvar)
1765
1767
1766 timeout = 0
1768 timeout = 0
1767 warntimeout = 0
1769 warntimeout = 0
1768 if wait:
1770 if wait:
1769 timeout = self.ui.configint("ui", "timeout")
1771 timeout = self.ui.configint("ui", "timeout")
1770 warntimeout = self.ui.configint("ui", "timeout.warn")
1772 warntimeout = self.ui.configint("ui", "timeout.warn")
1771 # internal config: ui.signal-safe-lock
1773 # internal config: ui.signal-safe-lock
1772 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1774 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1773
1775
1774 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1776 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1775 releasefn=releasefn,
1777 releasefn=releasefn,
1776 acquirefn=acquirefn, desc=desc,
1778 acquirefn=acquirefn, desc=desc,
1777 inheritchecker=inheritchecker,
1779 inheritchecker=inheritchecker,
1778 parentlock=parentlock,
1780 parentlock=parentlock,
1779 signalsafe=signalsafe)
1781 signalsafe=signalsafe)
1780 return l
1782 return l
1781
1783
1782 def _afterlock(self, callback):
1784 def _afterlock(self, callback):
1783 """add a callback to be run when the repository is fully unlocked
1785 """add a callback to be run when the repository is fully unlocked
1784
1786
1785 The callback will be executed when the outermost lock is released
1787 The callback will be executed when the outermost lock is released
1786 (with wlock being higher level than 'lock')."""
1788 (with wlock being higher level than 'lock')."""
1787 for ref in (self._wlockref, self._lockref):
1789 for ref in (self._wlockref, self._lockref):
1788 l = ref and ref()
1790 l = ref and ref()
1789 if l and l.held:
1791 if l and l.held:
1790 l.postrelease.append(callback)
1792 l.postrelease.append(callback)
1791 break
1793 break
1792 else: # no lock have been found.
1794 else: # no lock have been found.
1793 callback()
1795 callback()
1794
1796
1795 def lock(self, wait=True):
1797 def lock(self, wait=True):
1796 '''Lock the repository store (.hg/store) and return a weak reference
1798 '''Lock the repository store (.hg/store) and return a weak reference
1797 to the lock. Use this before modifying the store (e.g. committing or
1799 to the lock. Use this before modifying the store (e.g. committing or
1798 stripping). If you are opening a transaction, get a lock as well.)
1800 stripping). If you are opening a transaction, get a lock as well.)
1799
1801
1800 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1802 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1801 'wlock' first to avoid a dead-lock hazard.'''
1803 'wlock' first to avoid a dead-lock hazard.'''
1802 l = self._currentlock(self._lockref)
1804 l = self._currentlock(self._lockref)
1803 if l is not None:
1805 if l is not None:
1804 l.lock()
1806 l.lock()
1805 return l
1807 return l
1806
1808
1807 l = self._lock(self.svfs, "lock", wait, None,
1809 l = self._lock(self.svfs, "lock", wait, None,
1808 self.invalidate, _('repository %s') % self.origroot)
1810 self.invalidate, _('repository %s') % self.origroot)
1809 self._lockref = weakref.ref(l)
1811 self._lockref = weakref.ref(l)
1810 return l
1812 return l
1811
1813
1812 def _wlockchecktransaction(self):
1814 def _wlockchecktransaction(self):
1813 if self.currenttransaction() is not None:
1815 if self.currenttransaction() is not None:
1814 raise error.LockInheritanceContractViolation(
1816 raise error.LockInheritanceContractViolation(
1815 'wlock cannot be inherited in the middle of a transaction')
1817 'wlock cannot be inherited in the middle of a transaction')
1816
1818
1817 def wlock(self, wait=True):
1819 def wlock(self, wait=True):
1818 '''Lock the non-store parts of the repository (everything under
1820 '''Lock the non-store parts of the repository (everything under
1819 .hg except .hg/store) and return a weak reference to the lock.
1821 .hg except .hg/store) and return a weak reference to the lock.
1820
1822
1821 Use this before modifying files in .hg.
1823 Use this before modifying files in .hg.
1822
1824
1823 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1825 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1824 'wlock' first to avoid a dead-lock hazard.'''
1826 'wlock' first to avoid a dead-lock hazard.'''
1825 l = self._wlockref and self._wlockref()
1827 l = self._wlockref and self._wlockref()
1826 if l is not None and l.held:
1828 if l is not None and l.held:
1827 l.lock()
1829 l.lock()
1828 return l
1830 return l
1829
1831
1830 # We do not need to check for non-waiting lock acquisition. Such
1832 # We do not need to check for non-waiting lock acquisition. Such
1831 # acquisition would not cause dead-lock as they would just fail.
1833 # acquisition would not cause dead-lock as they would just fail.
1832 if wait and (self.ui.configbool('devel', 'all-warnings')
1834 if wait and (self.ui.configbool('devel', 'all-warnings')
1833 or self.ui.configbool('devel', 'check-locks')):
1835 or self.ui.configbool('devel', 'check-locks')):
1834 if self._currentlock(self._lockref) is not None:
1836 if self._currentlock(self._lockref) is not None:
1835 self.ui.develwarn('"wlock" acquired after "lock"')
1837 self.ui.develwarn('"wlock" acquired after "lock"')
1836
1838
1837 def unlock():
1839 def unlock():
1838 if self.dirstate.pendingparentchange():
1840 if self.dirstate.pendingparentchange():
1839 self.dirstate.invalidate()
1841 self.dirstate.invalidate()
1840 else:
1842 else:
1841 self.dirstate.write(None)
1843 self.dirstate.write(None)
1842
1844
1843 self._filecache['dirstate'].refresh()
1845 self._filecache['dirstate'].refresh()
1844
1846
1845 l = self._lock(self.vfs, "wlock", wait, unlock,
1847 l = self._lock(self.vfs, "wlock", wait, unlock,
1846 self.invalidatedirstate, _('working directory of %s') %
1848 self.invalidatedirstate, _('working directory of %s') %
1847 self.origroot,
1849 self.origroot,
1848 inheritchecker=self._wlockchecktransaction,
1850 inheritchecker=self._wlockchecktransaction,
1849 parentenvvar='HG_WLOCK_LOCKER')
1851 parentenvvar='HG_WLOCK_LOCKER')
1850 self._wlockref = weakref.ref(l)
1852 self._wlockref = weakref.ref(l)
1851 return l
1853 return l
1852
1854
1853 def _currentlock(self, lockref):
1855 def _currentlock(self, lockref):
1854 """Returns the lock if it's held, or None if it's not."""
1856 """Returns the lock if it's held, or None if it's not."""
1855 if lockref is None:
1857 if lockref is None:
1856 return None
1858 return None
1857 l = lockref()
1859 l = lockref()
1858 if l is None or not l.held:
1860 if l is None or not l.held:
1859 return None
1861 return None
1860 return l
1862 return l
1861
1863
1862 def currentwlock(self):
1864 def currentwlock(self):
1863 """Returns the wlock if it's held, or None if it's not."""
1865 """Returns the wlock if it's held, or None if it's not."""
1864 return self._currentlock(self._wlockref)
1866 return self._currentlock(self._wlockref)
1865
1867
1866 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1868 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1867 """
1869 """
1868 commit an individual file as part of a larger transaction
1870 commit an individual file as part of a larger transaction
1869 """
1871 """
1870
1872
1871 fname = fctx.path()
1873 fname = fctx.path()
1872 fparent1 = manifest1.get(fname, nullid)
1874 fparent1 = manifest1.get(fname, nullid)
1873 fparent2 = manifest2.get(fname, nullid)
1875 fparent2 = manifest2.get(fname, nullid)
1874 if isinstance(fctx, context.filectx):
1876 if isinstance(fctx, context.filectx):
1875 node = fctx.filenode()
1877 node = fctx.filenode()
1876 if node in [fparent1, fparent2]:
1878 if node in [fparent1, fparent2]:
1877 self.ui.debug('reusing %s filelog entry\n' % fname)
1879 self.ui.debug('reusing %s filelog entry\n' % fname)
1878 if manifest1.flags(fname) != fctx.flags():
1880 if manifest1.flags(fname) != fctx.flags():
1879 changelist.append(fname)
1881 changelist.append(fname)
1880 return node
1882 return node
1881
1883
1882 flog = self.file(fname)
1884 flog = self.file(fname)
1883 meta = {}
1885 meta = {}
1884 copy = fctx.renamed()
1886 copy = fctx.renamed()
1885 if copy and copy[0] != fname:
1887 if copy and copy[0] != fname:
1886 # Mark the new revision of this file as a copy of another
1888 # Mark the new revision of this file as a copy of another
1887 # file. This copy data will effectively act as a parent
1889 # file. This copy data will effectively act as a parent
1888 # of this new revision. If this is a merge, the first
1890 # of this new revision. If this is a merge, the first
1889 # parent will be the nullid (meaning "look up the copy data")
1891 # parent will be the nullid (meaning "look up the copy data")
1890 # and the second one will be the other parent. For example:
1892 # and the second one will be the other parent. For example:
1891 #
1893 #
1892 # 0 --- 1 --- 3 rev1 changes file foo
1894 # 0 --- 1 --- 3 rev1 changes file foo
1893 # \ / rev2 renames foo to bar and changes it
1895 # \ / rev2 renames foo to bar and changes it
1894 # \- 2 -/ rev3 should have bar with all changes and
1896 # \- 2 -/ rev3 should have bar with all changes and
1895 # should record that bar descends from
1897 # should record that bar descends from
1896 # bar in rev2 and foo in rev1
1898 # bar in rev2 and foo in rev1
1897 #
1899 #
1898 # this allows this merge to succeed:
1900 # this allows this merge to succeed:
1899 #
1901 #
1900 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1902 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1901 # \ / merging rev3 and rev4 should use bar@rev2
1903 # \ / merging rev3 and rev4 should use bar@rev2
1902 # \- 2 --- 4 as the merge base
1904 # \- 2 --- 4 as the merge base
1903 #
1905 #
1904
1906
1905 cfname = copy[0]
1907 cfname = copy[0]
1906 crev = manifest1.get(cfname)
1908 crev = manifest1.get(cfname)
1907 newfparent = fparent2
1909 newfparent = fparent2
1908
1910
1909 if manifest2: # branch merge
1911 if manifest2: # branch merge
1910 if fparent2 == nullid or crev is None: # copied on remote side
1912 if fparent2 == nullid or crev is None: # copied on remote side
1911 if cfname in manifest2:
1913 if cfname in manifest2:
1912 crev = manifest2[cfname]
1914 crev = manifest2[cfname]
1913 newfparent = fparent1
1915 newfparent = fparent1
1914
1916
1915 # Here, we used to search backwards through history to try to find
1917 # Here, we used to search backwards through history to try to find
1916 # where the file copy came from if the source of a copy was not in
1918 # where the file copy came from if the source of a copy was not in
1917 # the parent directory. However, this doesn't actually make sense to
1919 # the parent directory. However, this doesn't actually make sense to
1918 # do (what does a copy from something not in your working copy even
1920 # do (what does a copy from something not in your working copy even
1919 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1921 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1920 # the user that copy information was dropped, so if they didn't
1922 # the user that copy information was dropped, so if they didn't
1921 # expect this outcome it can be fixed, but this is the correct
1923 # expect this outcome it can be fixed, but this is the correct
1922 # behavior in this circumstance.
1924 # behavior in this circumstance.
1923
1925
1924 if crev:
1926 if crev:
1925 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1927 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1926 meta["copy"] = cfname
1928 meta["copy"] = cfname
1927 meta["copyrev"] = hex(crev)
1929 meta["copyrev"] = hex(crev)
1928 fparent1, fparent2 = nullid, newfparent
1930 fparent1, fparent2 = nullid, newfparent
1929 else:
1931 else:
1930 self.ui.warn(_("warning: can't find ancestor for '%s' "
1932 self.ui.warn(_("warning: can't find ancestor for '%s' "
1931 "copied from '%s'!\n") % (fname, cfname))
1933 "copied from '%s'!\n") % (fname, cfname))
1932
1934
1933 elif fparent1 == nullid:
1935 elif fparent1 == nullid:
1934 fparent1, fparent2 = fparent2, nullid
1936 fparent1, fparent2 = fparent2, nullid
1935 elif fparent2 != nullid:
1937 elif fparent2 != nullid:
1936 # is one parent an ancestor of the other?
1938 # is one parent an ancestor of the other?
1937 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1939 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1938 if fparent1 in fparentancestors:
1940 if fparent1 in fparentancestors:
1939 fparent1, fparent2 = fparent2, nullid
1941 fparent1, fparent2 = fparent2, nullid
1940 elif fparent2 in fparentancestors:
1942 elif fparent2 in fparentancestors:
1941 fparent2 = nullid
1943 fparent2 = nullid
1942
1944
1943 # is the file changed?
1945 # is the file changed?
1944 text = fctx.data()
1946 text = fctx.data()
1945 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1947 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1946 changelist.append(fname)
1948 changelist.append(fname)
1947 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1949 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1948 # are just the flags changed during merge?
1950 # are just the flags changed during merge?
1949 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1951 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1950 changelist.append(fname)
1952 changelist.append(fname)
1951
1953
1952 return fparent1
1954 return fparent1
1953
1955
1954 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1956 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1955 """check for commit arguments that aren't committable"""
1957 """check for commit arguments that aren't committable"""
1956 if match.isexact() or match.prefix():
1958 if match.isexact() or match.prefix():
1957 matched = set(status.modified + status.added + status.removed)
1959 matched = set(status.modified + status.added + status.removed)
1958
1960
1959 for f in match.files():
1961 for f in match.files():
1960 f = self.dirstate.normalize(f)
1962 f = self.dirstate.normalize(f)
1961 if f == '.' or f in matched or f in wctx.substate:
1963 if f == '.' or f in matched or f in wctx.substate:
1962 continue
1964 continue
1963 if f in status.deleted:
1965 if f in status.deleted:
1964 fail(f, _('file not found!'))
1966 fail(f, _('file not found!'))
1965 if f in vdirs: # visited directory
1967 if f in vdirs: # visited directory
1966 d = f + '/'
1968 d = f + '/'
1967 for mf in matched:
1969 for mf in matched:
1968 if mf.startswith(d):
1970 if mf.startswith(d):
1969 break
1971 break
1970 else:
1972 else:
1971 fail(f, _("no match under directory!"))
1973 fail(f, _("no match under directory!"))
1972 elif f not in self.dirstate:
1974 elif f not in self.dirstate:
1973 fail(f, _("file not tracked!"))
1975 fail(f, _("file not tracked!"))
1974
1976
1975 @unfilteredmethod
1977 @unfilteredmethod
1976 def commit(self, text="", user=None, date=None, match=None, force=False,
1978 def commit(self, text="", user=None, date=None, match=None, force=False,
1977 editor=False, extra=None):
1979 editor=False, extra=None):
1978 """Add a new revision to current repository.
1980 """Add a new revision to current repository.
1979
1981
1980 Revision information is gathered from the working directory,
1982 Revision information is gathered from the working directory,
1981 match can be used to filter the committed files. If editor is
1983 match can be used to filter the committed files. If editor is
1982 supplied, it is called to get a commit message.
1984 supplied, it is called to get a commit message.
1983 """
1985 """
1984 if extra is None:
1986 if extra is None:
1985 extra = {}
1987 extra = {}
1986
1988
1987 def fail(f, msg):
1989 def fail(f, msg):
1988 raise error.Abort('%s: %s' % (f, msg))
1990 raise error.Abort('%s: %s' % (f, msg))
1989
1991
1990 if not match:
1992 if not match:
1991 match = matchmod.always(self.root, '')
1993 match = matchmod.always(self.root, '')
1992
1994
1993 if not force:
1995 if not force:
1994 vdirs = []
1996 vdirs = []
1995 match.explicitdir = vdirs.append
1997 match.explicitdir = vdirs.append
1996 match.bad = fail
1998 match.bad = fail
1997
1999
1998 wlock = lock = tr = None
2000 wlock = lock = tr = None
1999 try:
2001 try:
2000 wlock = self.wlock()
2002 wlock = self.wlock()
2001 lock = self.lock() # for recent changelog (see issue4368)
2003 lock = self.lock() # for recent changelog (see issue4368)
2002
2004
2003 wctx = self[None]
2005 wctx = self[None]
2004 merge = len(wctx.parents()) > 1
2006 merge = len(wctx.parents()) > 1
2005
2007
2006 if not force and merge and not match.always():
2008 if not force and merge and not match.always():
2007 raise error.Abort(_('cannot partially commit a merge '
2009 raise error.Abort(_('cannot partially commit a merge '
2008 '(do not specify files or patterns)'))
2010 '(do not specify files or patterns)'))
2009
2011
2010 status = self.status(match=match, clean=force)
2012 status = self.status(match=match, clean=force)
2011 if force:
2013 if force:
2012 status.modified.extend(status.clean) # mq may commit clean files
2014 status.modified.extend(status.clean) # mq may commit clean files
2013
2015
2014 # check subrepos
2016 # check subrepos
2015 subs, commitsubs, newstate = subrepoutil.precommit(
2017 subs, commitsubs, newstate = subrepoutil.precommit(
2016 self.ui, wctx, status, match, force=force)
2018 self.ui, wctx, status, match, force=force)
2017
2019
2018 # make sure all explicit patterns are matched
2020 # make sure all explicit patterns are matched
2019 if not force:
2021 if not force:
2020 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2022 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2021
2023
2022 cctx = context.workingcommitctx(self, status,
2024 cctx = context.workingcommitctx(self, status,
2023 text, user, date, extra)
2025 text, user, date, extra)
2024
2026
2025 # internal config: ui.allowemptycommit
2027 # internal config: ui.allowemptycommit
2026 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2028 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2027 or extra.get('close') or merge or cctx.files()
2029 or extra.get('close') or merge or cctx.files()
2028 or self.ui.configbool('ui', 'allowemptycommit'))
2030 or self.ui.configbool('ui', 'allowemptycommit'))
2029 if not allowemptycommit:
2031 if not allowemptycommit:
2030 return None
2032 return None
2031
2033
2032 if merge and cctx.deleted():
2034 if merge and cctx.deleted():
2033 raise error.Abort(_("cannot commit merge with missing files"))
2035 raise error.Abort(_("cannot commit merge with missing files"))
2034
2036
2035 ms = mergemod.mergestate.read(self)
2037 ms = mergemod.mergestate.read(self)
2036 mergeutil.checkunresolved(ms)
2038 mergeutil.checkunresolved(ms)
2037
2039
2038 if editor:
2040 if editor:
2039 cctx._text = editor(self, cctx, subs)
2041 cctx._text = editor(self, cctx, subs)
2040 edited = (text != cctx._text)
2042 edited = (text != cctx._text)
2041
2043
2042 # Save commit message in case this transaction gets rolled back
2044 # Save commit message in case this transaction gets rolled back
2043 # (e.g. by a pretxncommit hook). Leave the content alone on
2045 # (e.g. by a pretxncommit hook). Leave the content alone on
2044 # the assumption that the user will use the same editor again.
2046 # the assumption that the user will use the same editor again.
2045 msgfn = self.savecommitmessage(cctx._text)
2047 msgfn = self.savecommitmessage(cctx._text)
2046
2048
2047 # commit subs and write new state
2049 # commit subs and write new state
2048 if subs:
2050 if subs:
2049 for s in sorted(commitsubs):
2051 for s in sorted(commitsubs):
2050 sub = wctx.sub(s)
2052 sub = wctx.sub(s)
2051 self.ui.status(_('committing subrepository %s\n') %
2053 self.ui.status(_('committing subrepository %s\n') %
2052 subrepoutil.subrelpath(sub))
2054 subrepoutil.subrelpath(sub))
2053 sr = sub.commit(cctx._text, user, date)
2055 sr = sub.commit(cctx._text, user, date)
2054 newstate[s] = (newstate[s][0], sr)
2056 newstate[s] = (newstate[s][0], sr)
2055 subrepoutil.writestate(self, newstate)
2057 subrepoutil.writestate(self, newstate)
2056
2058
2057 p1, p2 = self.dirstate.parents()
2059 p1, p2 = self.dirstate.parents()
2058 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2060 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2059 try:
2061 try:
2060 self.hook("precommit", throw=True, parent1=hookp1,
2062 self.hook("precommit", throw=True, parent1=hookp1,
2061 parent2=hookp2)
2063 parent2=hookp2)
2062 tr = self.transaction('commit')
2064 tr = self.transaction('commit')
2063 ret = self.commitctx(cctx, True)
2065 ret = self.commitctx(cctx, True)
2064 except: # re-raises
2066 except: # re-raises
2065 if edited:
2067 if edited:
2066 self.ui.write(
2068 self.ui.write(
2067 _('note: commit message saved in %s\n') % msgfn)
2069 _('note: commit message saved in %s\n') % msgfn)
2068 raise
2070 raise
2069 # update bookmarks, dirstate and mergestate
2071 # update bookmarks, dirstate and mergestate
2070 bookmarks.update(self, [p1, p2], ret)
2072 bookmarks.update(self, [p1, p2], ret)
2071 cctx.markcommitted(ret)
2073 cctx.markcommitted(ret)
2072 ms.reset()
2074 ms.reset()
2073 tr.close()
2075 tr.close()
2074
2076
2075 finally:
2077 finally:
2076 lockmod.release(tr, lock, wlock)
2078 lockmod.release(tr, lock, wlock)
2077
2079
2078 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2080 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2079 # hack for command that use a temporary commit (eg: histedit)
2081 # hack for command that use a temporary commit (eg: histedit)
2080 # temporary commit got stripped before hook release
2082 # temporary commit got stripped before hook release
2081 if self.changelog.hasnode(ret):
2083 if self.changelog.hasnode(ret):
2082 self.hook("commit", node=node, parent1=parent1,
2084 self.hook("commit", node=node, parent1=parent1,
2083 parent2=parent2)
2085 parent2=parent2)
2084 self._afterlock(commithook)
2086 self._afterlock(commithook)
2085 return ret
2087 return ret
2086
2088
2087 @unfilteredmethod
2089 @unfilteredmethod
2088 def commitctx(self, ctx, error=False):
2090 def commitctx(self, ctx, error=False):
2089 """Add a new revision to current repository.
2091 """Add a new revision to current repository.
2090 Revision information is passed via the context argument.
2092 Revision information is passed via the context argument.
2091
2093
2092 ctx.files() should list all files involved in this commit, i.e.
2094 ctx.files() should list all files involved in this commit, i.e.
2093 modified/added/removed files. On merge, it may be wider than the
2095 modified/added/removed files. On merge, it may be wider than the
2094 ctx.files() to be committed, since any file nodes derived directly
2096 ctx.files() to be committed, since any file nodes derived directly
2095 from p1 or p2 are excluded from the committed ctx.files().
2097 from p1 or p2 are excluded from the committed ctx.files().
2096 """
2098 """
2097
2099
2098 tr = None
2100 tr = None
2099 p1, p2 = ctx.p1(), ctx.p2()
2101 p1, p2 = ctx.p1(), ctx.p2()
2100 user = ctx.user()
2102 user = ctx.user()
2101
2103
2102 lock = self.lock()
2104 lock = self.lock()
2103 try:
2105 try:
2104 tr = self.transaction("commit")
2106 tr = self.transaction("commit")
2105 trp = weakref.proxy(tr)
2107 trp = weakref.proxy(tr)
2106
2108
2107 if ctx.manifestnode():
2109 if ctx.manifestnode():
2108 # reuse an existing manifest revision
2110 # reuse an existing manifest revision
2109 self.ui.debug('reusing known manifest\n')
2111 self.ui.debug('reusing known manifest\n')
2110 mn = ctx.manifestnode()
2112 mn = ctx.manifestnode()
2111 files = ctx.files()
2113 files = ctx.files()
2112 elif ctx.files():
2114 elif ctx.files():
2113 m1ctx = p1.manifestctx()
2115 m1ctx = p1.manifestctx()
2114 m2ctx = p2.manifestctx()
2116 m2ctx = p2.manifestctx()
2115 mctx = m1ctx.copy()
2117 mctx = m1ctx.copy()
2116
2118
2117 m = mctx.read()
2119 m = mctx.read()
2118 m1 = m1ctx.read()
2120 m1 = m1ctx.read()
2119 m2 = m2ctx.read()
2121 m2 = m2ctx.read()
2120
2122
2121 # check in files
2123 # check in files
2122 added = []
2124 added = []
2123 changed = []
2125 changed = []
2124 removed = list(ctx.removed())
2126 removed = list(ctx.removed())
2125 linkrev = len(self)
2127 linkrev = len(self)
2126 self.ui.note(_("committing files:\n"))
2128 self.ui.note(_("committing files:\n"))
2127 for f in sorted(ctx.modified() + ctx.added()):
2129 for f in sorted(ctx.modified() + ctx.added()):
2128 self.ui.note(f + "\n")
2130 self.ui.note(f + "\n")
2129 try:
2131 try:
2130 fctx = ctx[f]
2132 fctx = ctx[f]
2131 if fctx is None:
2133 if fctx is None:
2132 removed.append(f)
2134 removed.append(f)
2133 else:
2135 else:
2134 added.append(f)
2136 added.append(f)
2135 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2137 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2136 trp, changed)
2138 trp, changed)
2137 m.setflag(f, fctx.flags())
2139 m.setflag(f, fctx.flags())
2138 except OSError as inst:
2140 except OSError as inst:
2139 self.ui.warn(_("trouble committing %s!\n") % f)
2141 self.ui.warn(_("trouble committing %s!\n") % f)
2140 raise
2142 raise
2141 except IOError as inst:
2143 except IOError as inst:
2142 errcode = getattr(inst, 'errno', errno.ENOENT)
2144 errcode = getattr(inst, 'errno', errno.ENOENT)
2143 if error or errcode and errcode != errno.ENOENT:
2145 if error or errcode and errcode != errno.ENOENT:
2144 self.ui.warn(_("trouble committing %s!\n") % f)
2146 self.ui.warn(_("trouble committing %s!\n") % f)
2145 raise
2147 raise
2146
2148
2147 # update manifest
2149 # update manifest
2148 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2150 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2149 drop = [f for f in removed if f in m]
2151 drop = [f for f in removed if f in m]
2150 for f in drop:
2152 for f in drop:
2151 del m[f]
2153 del m[f]
2152 files = changed + removed
2154 files = changed + removed
2153 md = None
2155 md = None
2154 if not files:
2156 if not files:
2155 # if no "files" actually changed in terms of the changelog,
2157 # if no "files" actually changed in terms of the changelog,
2156 # try hard to detect unmodified manifest entry so that the
2158 # try hard to detect unmodified manifest entry so that the
2157 # exact same commit can be reproduced later on convert.
2159 # exact same commit can be reproduced later on convert.
2158 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2160 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2159 if not files and md:
2161 if not files and md:
2160 self.ui.debug('not reusing manifest (no file change in '
2162 self.ui.debug('not reusing manifest (no file change in '
2161 'changelog, but manifest differs)\n')
2163 'changelog, but manifest differs)\n')
2162 if files or md:
2164 if files or md:
2163 self.ui.note(_("committing manifest\n"))
2165 self.ui.note(_("committing manifest\n"))
2164 # we're using narrowmatch here since it's already applied at
2166 # we're using narrowmatch here since it's already applied at
2165 # other stages (such as dirstate.walk), so we're already
2167 # other stages (such as dirstate.walk), so we're already
2166 # ignoring things outside of narrowspec in most cases. The
2168 # ignoring things outside of narrowspec in most cases. The
2167 # one case where we might have files outside the narrowspec
2169 # one case where we might have files outside the narrowspec
2168 # at this point is merges, and we already error out in the
2170 # at this point is merges, and we already error out in the
2169 # case where the merge has files outside of the narrowspec,
2171 # case where the merge has files outside of the narrowspec,
2170 # so this is safe.
2172 # so this is safe.
2171 mn = mctx.write(trp, linkrev,
2173 mn = mctx.write(trp, linkrev,
2172 p1.manifestnode(), p2.manifestnode(),
2174 p1.manifestnode(), p2.manifestnode(),
2173 added, drop, match=self.narrowmatch())
2175 added, drop, match=self.narrowmatch())
2174 else:
2176 else:
2175 self.ui.debug('reusing manifest form p1 (listed files '
2177 self.ui.debug('reusing manifest form p1 (listed files '
2176 'actually unchanged)\n')
2178 'actually unchanged)\n')
2177 mn = p1.manifestnode()
2179 mn = p1.manifestnode()
2178 else:
2180 else:
2179 self.ui.debug('reusing manifest from p1 (no file change)\n')
2181 self.ui.debug('reusing manifest from p1 (no file change)\n')
2180 mn = p1.manifestnode()
2182 mn = p1.manifestnode()
2181 files = []
2183 files = []
2182
2184
2183 # update changelog
2185 # update changelog
2184 self.ui.note(_("committing changelog\n"))
2186 self.ui.note(_("committing changelog\n"))
2185 self.changelog.delayupdate(tr)
2187 self.changelog.delayupdate(tr)
2186 n = self.changelog.add(mn, files, ctx.description(),
2188 n = self.changelog.add(mn, files, ctx.description(),
2187 trp, p1.node(), p2.node(),
2189 trp, p1.node(), p2.node(),
2188 user, ctx.date(), ctx.extra().copy())
2190 user, ctx.date(), ctx.extra().copy())
2189 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2191 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2190 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2192 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2191 parent2=xp2)
2193 parent2=xp2)
2192 # set the new commit is proper phase
2194 # set the new commit is proper phase
2193 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2195 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2194 if targetphase:
2196 if targetphase:
2195 # retract boundary do not alter parent changeset.
2197 # retract boundary do not alter parent changeset.
2196 # if a parent have higher the resulting phase will
2198 # if a parent have higher the resulting phase will
2197 # be compliant anyway
2199 # be compliant anyway
2198 #
2200 #
2199 # if minimal phase was 0 we don't need to retract anything
2201 # if minimal phase was 0 we don't need to retract anything
2200 phases.registernew(self, tr, targetphase, [n])
2202 phases.registernew(self, tr, targetphase, [n])
2201 tr.close()
2203 tr.close()
2202 return n
2204 return n
2203 finally:
2205 finally:
2204 if tr:
2206 if tr:
2205 tr.release()
2207 tr.release()
2206 lock.release()
2208 lock.release()
2207
2209
2208 @unfilteredmethod
2210 @unfilteredmethod
2209 def destroying(self):
2211 def destroying(self):
2210 '''Inform the repository that nodes are about to be destroyed.
2212 '''Inform the repository that nodes are about to be destroyed.
2211 Intended for use by strip and rollback, so there's a common
2213 Intended for use by strip and rollback, so there's a common
2212 place for anything that has to be done before destroying history.
2214 place for anything that has to be done before destroying history.
2213
2215
2214 This is mostly useful for saving state that is in memory and waiting
2216 This is mostly useful for saving state that is in memory and waiting
2215 to be flushed when the current lock is released. Because a call to
2217 to be flushed when the current lock is released. Because a call to
2216 destroyed is imminent, the repo will be invalidated causing those
2218 destroyed is imminent, the repo will be invalidated causing those
2217 changes to stay in memory (waiting for the next unlock), or vanish
2219 changes to stay in memory (waiting for the next unlock), or vanish
2218 completely.
2220 completely.
2219 '''
2221 '''
2220 # When using the same lock to commit and strip, the phasecache is left
2222 # When using the same lock to commit and strip, the phasecache is left
2221 # dirty after committing. Then when we strip, the repo is invalidated,
2223 # dirty after committing. Then when we strip, the repo is invalidated,
2222 # causing those changes to disappear.
2224 # causing those changes to disappear.
2223 if '_phasecache' in vars(self):
2225 if '_phasecache' in vars(self):
2224 self._phasecache.write()
2226 self._phasecache.write()
2225
2227
2226 @unfilteredmethod
2228 @unfilteredmethod
2227 def destroyed(self):
2229 def destroyed(self):
2228 '''Inform the repository that nodes have been destroyed.
2230 '''Inform the repository that nodes have been destroyed.
2229 Intended for use by strip and rollback, so there's a common
2231 Intended for use by strip and rollback, so there's a common
2230 place for anything that has to be done after destroying history.
2232 place for anything that has to be done after destroying history.
2231 '''
2233 '''
2232 # When one tries to:
2234 # When one tries to:
2233 # 1) destroy nodes thus calling this method (e.g. strip)
2235 # 1) destroy nodes thus calling this method (e.g. strip)
2234 # 2) use phasecache somewhere (e.g. commit)
2236 # 2) use phasecache somewhere (e.g. commit)
2235 #
2237 #
2236 # then 2) will fail because the phasecache contains nodes that were
2238 # then 2) will fail because the phasecache contains nodes that were
2237 # removed. We can either remove phasecache from the filecache,
2239 # removed. We can either remove phasecache from the filecache,
2238 # causing it to reload next time it is accessed, or simply filter
2240 # causing it to reload next time it is accessed, or simply filter
2239 # the removed nodes now and write the updated cache.
2241 # the removed nodes now and write the updated cache.
2240 self._phasecache.filterunknown(self)
2242 self._phasecache.filterunknown(self)
2241 self._phasecache.write()
2243 self._phasecache.write()
2242
2244
2243 # refresh all repository caches
2245 # refresh all repository caches
2244 self.updatecaches()
2246 self.updatecaches()
2245
2247
2246 # Ensure the persistent tag cache is updated. Doing it now
2248 # Ensure the persistent tag cache is updated. Doing it now
2247 # means that the tag cache only has to worry about destroyed
2249 # means that the tag cache only has to worry about destroyed
2248 # heads immediately after a strip/rollback. That in turn
2250 # heads immediately after a strip/rollback. That in turn
2249 # guarantees that "cachetip == currenttip" (comparing both rev
2251 # guarantees that "cachetip == currenttip" (comparing both rev
2250 # and node) always means no nodes have been added or destroyed.
2252 # and node) always means no nodes have been added or destroyed.
2251
2253
2252 # XXX this is suboptimal when qrefresh'ing: we strip the current
2254 # XXX this is suboptimal when qrefresh'ing: we strip the current
2253 # head, refresh the tag cache, then immediately add a new head.
2255 # head, refresh the tag cache, then immediately add a new head.
2254 # But I think doing it this way is necessary for the "instant
2256 # But I think doing it this way is necessary for the "instant
2255 # tag cache retrieval" case to work.
2257 # tag cache retrieval" case to work.
2256 self.invalidate()
2258 self.invalidate()
2257
2259
2258 def status(self, node1='.', node2=None, match=None,
2260 def status(self, node1='.', node2=None, match=None,
2259 ignored=False, clean=False, unknown=False,
2261 ignored=False, clean=False, unknown=False,
2260 listsubrepos=False):
2262 listsubrepos=False):
2261 '''a convenience method that calls node1.status(node2)'''
2263 '''a convenience method that calls node1.status(node2)'''
2262 return self[node1].status(node2, match, ignored, clean, unknown,
2264 return self[node1].status(node2, match, ignored, clean, unknown,
2263 listsubrepos)
2265 listsubrepos)
2264
2266
2265 def addpostdsstatus(self, ps):
2267 def addpostdsstatus(self, ps):
2266 """Add a callback to run within the wlock, at the point at which status
2268 """Add a callback to run within the wlock, at the point at which status
2267 fixups happen.
2269 fixups happen.
2268
2270
2269 On status completion, callback(wctx, status) will be called with the
2271 On status completion, callback(wctx, status) will be called with the
2270 wlock held, unless the dirstate has changed from underneath or the wlock
2272 wlock held, unless the dirstate has changed from underneath or the wlock
2271 couldn't be grabbed.
2273 couldn't be grabbed.
2272
2274
2273 Callbacks should not capture and use a cached copy of the dirstate --
2275 Callbacks should not capture and use a cached copy of the dirstate --
2274 it might change in the meanwhile. Instead, they should access the
2276 it might change in the meanwhile. Instead, they should access the
2275 dirstate via wctx.repo().dirstate.
2277 dirstate via wctx.repo().dirstate.
2276
2278
2277 This list is emptied out after each status run -- extensions should
2279 This list is emptied out after each status run -- extensions should
2278 make sure it adds to this list each time dirstate.status is called.
2280 make sure it adds to this list each time dirstate.status is called.
2279 Extensions should also make sure they don't call this for statuses
2281 Extensions should also make sure they don't call this for statuses
2280 that don't involve the dirstate.
2282 that don't involve the dirstate.
2281 """
2283 """
2282
2284
2283 # The list is located here for uniqueness reasons -- it is actually
2285 # The list is located here for uniqueness reasons -- it is actually
2284 # managed by the workingctx, but that isn't unique per-repo.
2286 # managed by the workingctx, but that isn't unique per-repo.
2285 self._postdsstatus.append(ps)
2287 self._postdsstatus.append(ps)
2286
2288
2287 def postdsstatus(self):
2289 def postdsstatus(self):
2288 """Used by workingctx to get the list of post-dirstate-status hooks."""
2290 """Used by workingctx to get the list of post-dirstate-status hooks."""
2289 return self._postdsstatus
2291 return self._postdsstatus
2290
2292
2291 def clearpostdsstatus(self):
2293 def clearpostdsstatus(self):
2292 """Used by workingctx to clear post-dirstate-status hooks."""
2294 """Used by workingctx to clear post-dirstate-status hooks."""
2293 del self._postdsstatus[:]
2295 del self._postdsstatus[:]
2294
2296
2295 def heads(self, start=None):
2297 def heads(self, start=None):
2296 if start is None:
2298 if start is None:
2297 cl = self.changelog
2299 cl = self.changelog
2298 headrevs = reversed(cl.headrevs())
2300 headrevs = reversed(cl.headrevs())
2299 return [cl.node(rev) for rev in headrevs]
2301 return [cl.node(rev) for rev in headrevs]
2300
2302
2301 heads = self.changelog.heads(start)
2303 heads = self.changelog.heads(start)
2302 # sort the output in rev descending order
2304 # sort the output in rev descending order
2303 return sorted(heads, key=self.changelog.rev, reverse=True)
2305 return sorted(heads, key=self.changelog.rev, reverse=True)
2304
2306
2305 def branchheads(self, branch=None, start=None, closed=False):
2307 def branchheads(self, branch=None, start=None, closed=False):
2306 '''return a (possibly filtered) list of heads for the given branch
2308 '''return a (possibly filtered) list of heads for the given branch
2307
2309
2308 Heads are returned in topological order, from newest to oldest.
2310 Heads are returned in topological order, from newest to oldest.
2309 If branch is None, use the dirstate branch.
2311 If branch is None, use the dirstate branch.
2310 If start is not None, return only heads reachable from start.
2312 If start is not None, return only heads reachable from start.
2311 If closed is True, return heads that are marked as closed as well.
2313 If closed is True, return heads that are marked as closed as well.
2312 '''
2314 '''
2313 if branch is None:
2315 if branch is None:
2314 branch = self[None].branch()
2316 branch = self[None].branch()
2315 branches = self.branchmap()
2317 branches = self.branchmap()
2316 if branch not in branches:
2318 if branch not in branches:
2317 return []
2319 return []
2318 # the cache returns heads ordered lowest to highest
2320 # the cache returns heads ordered lowest to highest
2319 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2321 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2320 if start is not None:
2322 if start is not None:
2321 # filter out the heads that cannot be reached from startrev
2323 # filter out the heads that cannot be reached from startrev
2322 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2324 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2323 bheads = [h for h in bheads if h in fbheads]
2325 bheads = [h for h in bheads if h in fbheads]
2324 return bheads
2326 return bheads
2325
2327
2326 def branches(self, nodes):
2328 def branches(self, nodes):
2327 if not nodes:
2329 if not nodes:
2328 nodes = [self.changelog.tip()]
2330 nodes = [self.changelog.tip()]
2329 b = []
2331 b = []
2330 for n in nodes:
2332 for n in nodes:
2331 t = n
2333 t = n
2332 while True:
2334 while True:
2333 p = self.changelog.parents(n)
2335 p = self.changelog.parents(n)
2334 if p[1] != nullid or p[0] == nullid:
2336 if p[1] != nullid or p[0] == nullid:
2335 b.append((t, n, p[0], p[1]))
2337 b.append((t, n, p[0], p[1]))
2336 break
2338 break
2337 n = p[0]
2339 n = p[0]
2338 return b
2340 return b
2339
2341
2340 def between(self, pairs):
2342 def between(self, pairs):
2341 r = []
2343 r = []
2342
2344
2343 for top, bottom in pairs:
2345 for top, bottom in pairs:
2344 n, l, i = top, [], 0
2346 n, l, i = top, [], 0
2345 f = 1
2347 f = 1
2346
2348
2347 while n != bottom and n != nullid:
2349 while n != bottom and n != nullid:
2348 p = self.changelog.parents(n)[0]
2350 p = self.changelog.parents(n)[0]
2349 if i == f:
2351 if i == f:
2350 l.append(n)
2352 l.append(n)
2351 f = f * 2
2353 f = f * 2
2352 n = p
2354 n = p
2353 i += 1
2355 i += 1
2354
2356
2355 r.append(l)
2357 r.append(l)
2356
2358
2357 return r
2359 return r
2358
2360
2359 def checkpush(self, pushop):
2361 def checkpush(self, pushop):
2360 """Extensions can override this function if additional checks have
2362 """Extensions can override this function if additional checks have
2361 to be performed before pushing, or call it if they override push
2363 to be performed before pushing, or call it if they override push
2362 command.
2364 command.
2363 """
2365 """
2364
2366
2365 @unfilteredpropertycache
2367 @unfilteredpropertycache
2366 def prepushoutgoinghooks(self):
2368 def prepushoutgoinghooks(self):
2367 """Return util.hooks consists of a pushop with repo, remote, outgoing
2369 """Return util.hooks consists of a pushop with repo, remote, outgoing
2368 methods, which are called before pushing changesets.
2370 methods, which are called before pushing changesets.
2369 """
2371 """
2370 return util.hooks()
2372 return util.hooks()
2371
2373
2372 def pushkey(self, namespace, key, old, new):
2374 def pushkey(self, namespace, key, old, new):
2373 try:
2375 try:
2374 tr = self.currenttransaction()
2376 tr = self.currenttransaction()
2375 hookargs = {}
2377 hookargs = {}
2376 if tr is not None:
2378 if tr is not None:
2377 hookargs.update(tr.hookargs)
2379 hookargs.update(tr.hookargs)
2378 hookargs = pycompat.strkwargs(hookargs)
2380 hookargs = pycompat.strkwargs(hookargs)
2379 hookargs[r'namespace'] = namespace
2381 hookargs[r'namespace'] = namespace
2380 hookargs[r'key'] = key
2382 hookargs[r'key'] = key
2381 hookargs[r'old'] = old
2383 hookargs[r'old'] = old
2382 hookargs[r'new'] = new
2384 hookargs[r'new'] = new
2383 self.hook('prepushkey', throw=True, **hookargs)
2385 self.hook('prepushkey', throw=True, **hookargs)
2384 except error.HookAbort as exc:
2386 except error.HookAbort as exc:
2385 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2387 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2386 if exc.hint:
2388 if exc.hint:
2387 self.ui.write_err(_("(%s)\n") % exc.hint)
2389 self.ui.write_err(_("(%s)\n") % exc.hint)
2388 return False
2390 return False
2389 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2391 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2390 ret = pushkey.push(self, namespace, key, old, new)
2392 ret = pushkey.push(self, namespace, key, old, new)
2391 def runhook():
2393 def runhook():
2392 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2394 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2393 ret=ret)
2395 ret=ret)
2394 self._afterlock(runhook)
2396 self._afterlock(runhook)
2395 return ret
2397 return ret
2396
2398
2397 def listkeys(self, namespace):
2399 def listkeys(self, namespace):
2398 self.hook('prelistkeys', throw=True, namespace=namespace)
2400 self.hook('prelistkeys', throw=True, namespace=namespace)
2399 self.ui.debug('listing keys for "%s"\n' % namespace)
2401 self.ui.debug('listing keys for "%s"\n' % namespace)
2400 values = pushkey.list(self, namespace)
2402 values = pushkey.list(self, namespace)
2401 self.hook('listkeys', namespace=namespace, values=values)
2403 self.hook('listkeys', namespace=namespace, values=values)
2402 return values
2404 return values
2403
2405
2404 def debugwireargs(self, one, two, three=None, four=None, five=None):
2406 def debugwireargs(self, one, two, three=None, four=None, five=None):
2405 '''used to test argument passing over the wire'''
2407 '''used to test argument passing over the wire'''
2406 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2408 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2407 pycompat.bytestr(four),
2409 pycompat.bytestr(four),
2408 pycompat.bytestr(five))
2410 pycompat.bytestr(five))
2409
2411
2410 def savecommitmessage(self, text):
2412 def savecommitmessage(self, text):
2411 fp = self.vfs('last-message.txt', 'wb')
2413 fp = self.vfs('last-message.txt', 'wb')
2412 try:
2414 try:
2413 fp.write(text)
2415 fp.write(text)
2414 finally:
2416 finally:
2415 fp.close()
2417 fp.close()
2416 return self.pathto(fp.name[len(self.root) + 1:])
2418 return self.pathto(fp.name[len(self.root) + 1:])
2417
2419
2418 # used to avoid circular references so destructors work
2420 # used to avoid circular references so destructors work
2419 def aftertrans(files):
2421 def aftertrans(files):
2420 renamefiles = [tuple(t) for t in files]
2422 renamefiles = [tuple(t) for t in files]
2421 def a():
2423 def a():
2422 for vfs, src, dest in renamefiles:
2424 for vfs, src, dest in renamefiles:
2423 # if src and dest refer to a same file, vfs.rename is a no-op,
2425 # if src and dest refer to a same file, vfs.rename is a no-op,
2424 # leaving both src and dest on disk. delete dest to make sure
2426 # leaving both src and dest on disk. delete dest to make sure
2425 # the rename couldn't be such a no-op.
2427 # the rename couldn't be such a no-op.
2426 vfs.tryunlink(dest)
2428 vfs.tryunlink(dest)
2427 try:
2429 try:
2428 vfs.rename(src, dest)
2430 vfs.rename(src, dest)
2429 except OSError: # journal file does not yet exist
2431 except OSError: # journal file does not yet exist
2430 pass
2432 pass
2431 return a
2433 return a
2432
2434
2433 def undoname(fn):
2435 def undoname(fn):
2434 base, name = os.path.split(fn)
2436 base, name = os.path.split(fn)
2435 assert name.startswith('journal')
2437 assert name.startswith('journal')
2436 return os.path.join(base, name.replace('journal', 'undo', 1))
2438 return os.path.join(base, name.replace('journal', 'undo', 1))
2437
2439
2438 def instance(ui, path, create, intents=None, createopts=None):
2440 def instance(ui, path, create, intents=None, createopts=None):
2439 localpath = util.urllocalpath(path)
2441 localpath = util.urllocalpath(path)
2440 if create:
2442 if create:
2441 createrepository(ui, localpath, createopts=createopts)
2443 createrepository(ui, localpath, createopts=createopts)
2442
2444
2443 return makelocalrepository(ui, localpath, intents=intents)
2445 return makelocalrepository(ui, localpath, intents=intents)
2444
2446
2445 def islocal(path):
2447 def islocal(path):
2446 return True
2448 return True
2447
2449
2448 def newreporequirements(ui, createopts=None):
2450 def newreporequirements(ui, createopts=None):
2449 """Determine the set of requirements for a new local repository.
2451 """Determine the set of requirements for a new local repository.
2450
2452
2451 Extensions can wrap this function to specify custom requirements for
2453 Extensions can wrap this function to specify custom requirements for
2452 new repositories.
2454 new repositories.
2453 """
2455 """
2454 createopts = createopts or {}
2456 createopts = createopts or {}
2455
2457
2456 requirements = {'revlogv1'}
2458 requirements = {'revlogv1'}
2457 if ui.configbool('format', 'usestore'):
2459 if ui.configbool('format', 'usestore'):
2458 requirements.add('store')
2460 requirements.add('store')
2459 if ui.configbool('format', 'usefncache'):
2461 if ui.configbool('format', 'usefncache'):
2460 requirements.add('fncache')
2462 requirements.add('fncache')
2461 if ui.configbool('format', 'dotencode'):
2463 if ui.configbool('format', 'dotencode'):
2462 requirements.add('dotencode')
2464 requirements.add('dotencode')
2463
2465
2464 compengine = ui.config('experimental', 'format.compression')
2466 compengine = ui.config('experimental', 'format.compression')
2465 if compengine not in util.compengines:
2467 if compengine not in util.compengines:
2466 raise error.Abort(_('compression engine %s defined by '
2468 raise error.Abort(_('compression engine %s defined by '
2467 'experimental.format.compression not available') %
2469 'experimental.format.compression not available') %
2468 compengine,
2470 compengine,
2469 hint=_('run "hg debuginstall" to list available '
2471 hint=_('run "hg debuginstall" to list available '
2470 'compression engines'))
2472 'compression engines'))
2471
2473
2472 # zlib is the historical default and doesn't need an explicit requirement.
2474 # zlib is the historical default and doesn't need an explicit requirement.
2473 if compengine != 'zlib':
2475 if compengine != 'zlib':
2474 requirements.add('exp-compression-%s' % compengine)
2476 requirements.add('exp-compression-%s' % compengine)
2475
2477
2476 if scmutil.gdinitconfig(ui):
2478 if scmutil.gdinitconfig(ui):
2477 requirements.add('generaldelta')
2479 requirements.add('generaldelta')
2478 if ui.configbool('experimental', 'treemanifest'):
2480 if ui.configbool('experimental', 'treemanifest'):
2479 requirements.add('treemanifest')
2481 requirements.add('treemanifest')
2480 # experimental config: format.sparse-revlog
2482 # experimental config: format.sparse-revlog
2481 if ui.configbool('format', 'sparse-revlog'):
2483 if ui.configbool('format', 'sparse-revlog'):
2482 requirements.add(SPARSEREVLOG_REQUIREMENT)
2484 requirements.add(SPARSEREVLOG_REQUIREMENT)
2483
2485
2484 revlogv2 = ui.config('experimental', 'revlogv2')
2486 revlogv2 = ui.config('experimental', 'revlogv2')
2485 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2487 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2486 requirements.remove('revlogv1')
2488 requirements.remove('revlogv1')
2487 # generaldelta is implied by revlogv2.
2489 # generaldelta is implied by revlogv2.
2488 requirements.discard('generaldelta')
2490 requirements.discard('generaldelta')
2489 requirements.add(REVLOGV2_REQUIREMENT)
2491 requirements.add(REVLOGV2_REQUIREMENT)
2490 # experimental config: format.internal-phase
2492 # experimental config: format.internal-phase
2491 if ui.configbool('format', 'internal-phase'):
2493 if ui.configbool('format', 'internal-phase'):
2492 requirements.add('internal-phase')
2494 requirements.add('internal-phase')
2493
2495
2494 if createopts.get('narrowfiles'):
2496 if createopts.get('narrowfiles'):
2495 requirements.add(repository.NARROW_REQUIREMENT)
2497 requirements.add(repository.NARROW_REQUIREMENT)
2496
2498
2497 return requirements
2499 return requirements
2498
2500
2499 def filterknowncreateopts(ui, createopts):
2501 def filterknowncreateopts(ui, createopts):
2500 """Filters a dict of repo creation options against options that are known.
2502 """Filters a dict of repo creation options against options that are known.
2501
2503
2502 Receives a dict of repo creation options and returns a dict of those
2504 Receives a dict of repo creation options and returns a dict of those
2503 options that we don't know how to handle.
2505 options that we don't know how to handle.
2504
2506
2505 This function is called as part of repository creation. If the
2507 This function is called as part of repository creation. If the
2506 returned dict contains any items, repository creation will not
2508 returned dict contains any items, repository creation will not
2507 be allowed, as it means there was a request to create a repository
2509 be allowed, as it means there was a request to create a repository
2508 with options not recognized by loaded code.
2510 with options not recognized by loaded code.
2509
2511
2510 Extensions can wrap this function to filter out creation options
2512 Extensions can wrap this function to filter out creation options
2511 they know how to handle.
2513 they know how to handle.
2512 """
2514 """
2513 known = {'narrowfiles'}
2515 known = {'narrowfiles'}
2514
2516
2515 return {k: v for k, v in createopts.items() if k not in known}
2517 return {k: v for k, v in createopts.items() if k not in known}
2516
2518
2517 def createrepository(ui, path, createopts=None):
2519 def createrepository(ui, path, createopts=None):
2518 """Create a new repository in a vfs.
2520 """Create a new repository in a vfs.
2519
2521
2520 ``path`` path to the new repo's working directory.
2522 ``path`` path to the new repo's working directory.
2521 ``createopts`` options for the new repository.
2523 ``createopts`` options for the new repository.
2522 """
2524 """
2523 createopts = createopts or {}
2525 createopts = createopts or {}
2524
2526
2525 unknownopts = filterknowncreateopts(ui, createopts)
2527 unknownopts = filterknowncreateopts(ui, createopts)
2526
2528
2527 if not isinstance(unknownopts, dict):
2529 if not isinstance(unknownopts, dict):
2528 raise error.ProgrammingError('filterknowncreateopts() did not return '
2530 raise error.ProgrammingError('filterknowncreateopts() did not return '
2529 'a dict')
2531 'a dict')
2530
2532
2531 if unknownopts:
2533 if unknownopts:
2532 raise error.Abort(_('unable to create repository because of unknown '
2534 raise error.Abort(_('unable to create repository because of unknown '
2533 'creation option: %s') %
2535 'creation option: %s') %
2534 ', '.sorted(unknownopts),
2536 ', '.sorted(unknownopts),
2535 hint=_('is a required extension not loaded?'))
2537 hint=_('is a required extension not loaded?'))
2536
2538
2537 requirements = newreporequirements(ui, createopts=createopts)
2539 requirements = newreporequirements(ui, createopts=createopts)
2538
2540
2539 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2541 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2540 if not wdirvfs.exists():
2542 if not wdirvfs.exists():
2541 wdirvfs.makedirs()
2543 wdirvfs.makedirs()
2542
2544
2543 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2545 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2544 if hgvfs.exists():
2546 if hgvfs.exists():
2545 raise error.RepoError(_('repository %s already exists') % path)
2547 raise error.RepoError(_('repository %s already exists') % path)
2546
2548
2547 hgvfs.makedir(notindexed=True)
2549 hgvfs.makedir(notindexed=True)
2548
2550
2549 if b'store' in requirements:
2551 if b'store' in requirements:
2550 hgvfs.mkdir(b'store')
2552 hgvfs.mkdir(b'store')
2551
2553
2552 # We create an invalid changelog outside the store so very old
2554 # We create an invalid changelog outside the store so very old
2553 # Mercurial versions (which didn't know about the requirements
2555 # Mercurial versions (which didn't know about the requirements
2554 # file) encounter an error on reading the changelog. This
2556 # file) encounter an error on reading the changelog. This
2555 # effectively locks out old clients and prevents them from
2557 # effectively locks out old clients and prevents them from
2556 # mucking with a repo in an unknown format.
2558 # mucking with a repo in an unknown format.
2557 #
2559 #
2558 # The revlog header has version 2, which won't be recognized by
2560 # The revlog header has version 2, which won't be recognized by
2559 # such old clients.
2561 # such old clients.
2560 hgvfs.append(b'00changelog.i',
2562 hgvfs.append(b'00changelog.i',
2561 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2563 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2562 b'layout')
2564 b'layout')
2563
2565
2564 scmutil.writerequires(hgvfs, requirements)
2566 scmutil.writerequires(hgvfs, requirements)
2565
2567
2566 def poisonrepository(repo):
2568 def poisonrepository(repo):
2567 """Poison a repository instance so it can no longer be used."""
2569 """Poison a repository instance so it can no longer be used."""
2568 # Perform any cleanup on the instance.
2570 # Perform any cleanup on the instance.
2569 repo.close()
2571 repo.close()
2570
2572
2571 # Our strategy is to replace the type of the object with one that
2573 # Our strategy is to replace the type of the object with one that
2572 # has all attribute lookups result in error.
2574 # has all attribute lookups result in error.
2573 #
2575 #
2574 # But we have to allow the close() method because some constructors
2576 # But we have to allow the close() method because some constructors
2575 # of repos call close() on repo references.
2577 # of repos call close() on repo references.
2576 class poisonedrepository(object):
2578 class poisonedrepository(object):
2577 def __getattribute__(self, item):
2579 def __getattribute__(self, item):
2578 if item == r'close':
2580 if item == r'close':
2579 return object.__getattribute__(self, item)
2581 return object.__getattribute__(self, item)
2580
2582
2581 raise error.ProgrammingError('repo instances should not be used '
2583 raise error.ProgrammingError('repo instances should not be used '
2582 'after unshare')
2584 'after unshare')
2583
2585
2584 def close(self):
2586 def close(self):
2585 pass
2587 pass
2586
2588
2587 # We may have a repoview, which intercepts __setattr__. So be sure
2589 # We may have a repoview, which intercepts __setattr__. So be sure
2588 # we operate at the lowest level possible.
2590 # we operate at the lowest level possible.
2589 object.__setattr__(repo, r'__class__', poisonedrepository)
2591 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now