##// END OF EJS Templates
localrepo: read requirements file in makelocalrepository()...
Gregory Szorc -
r39728:6a3162ed default
parent child Browse files
Show More
@@ -1,2591 +1,2619 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 def makelocalrepository(baseui, path, intents=None):
379 def makelocalrepository(baseui, path, intents=None):
380 """Create a local repository object.
380 """Create a local repository object.
381
381
382 Given arguments needed to construct a local repository, this function
382 Given arguments needed to construct a local repository, this function
383 derives a type suitable for representing that repository and returns an
383 derives a type suitable for representing that repository and returns an
384 instance of it.
384 instance of it.
385
385
386 The returned object conforms to the ``repository.completelocalrepository``
386 The returned object conforms to the ``repository.completelocalrepository``
387 interface.
387 interface.
388 """
388 """
389 ui = baseui.copy()
389 ui = baseui.copy()
390 # Prevent copying repo configuration.
390 # Prevent copying repo configuration.
391 ui.copy = baseui.copy
391 ui.copy = baseui.copy
392
392
393 # Working directory VFS rooted at repository root.
393 # Working directory VFS rooted at repository root.
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
395
395
396 # Main VFS for .hg/ directory.
396 # Main VFS for .hg/ directory.
397 hgpath = wdirvfs.join(b'.hg')
397 hgpath = wdirvfs.join(b'.hg')
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
399
399
400 # The .hg/ path should exist and should be a directory. All other
400 # The .hg/ path should exist and should be a directory. All other
401 # cases are errors.
401 # cases are errors.
402 if not hgvfs.isdir():
402 if not hgvfs.isdir():
403 try:
403 try:
404 hgvfs.stat()
404 hgvfs.stat()
405 except OSError as e:
405 except OSError as e:
406 if e.errno != errno.ENOENT:
406 if e.errno != errno.ENOENT:
407 raise
407 raise
408
408
409 raise error.RepoError(_(b'repository %s not found') % path)
409 raise error.RepoError(_(b'repository %s not found') % path)
410
410
411 # .hg/requires file contains a newline-delimited list of
412 # features/capabilities the opener (us) must have in order to use
413 # the repository. This file was introduced in Mercurial 0.9.2,
414 # which means very old repositories may not have one. We assume
415 # a missing file translates to no requirements.
416 try:
417 requirements = set(hgvfs.read(b'requires').splitlines())
418 except IOError as e:
419 if e.errno != errno.ENOENT:
420 raise
421 requirements = set()
422
411 # The .hg/hgrc file may load extensions or contain config options
423 # The .hg/hgrc file may load extensions or contain config options
412 # that influence repository construction. Attempt to load it and
424 # that influence repository construction. Attempt to load it and
413 # process any new extensions that it may have pulled in.
425 # process any new extensions that it may have pulled in.
414 try:
426 try:
415 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
427 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
416 except IOError:
428 except IOError:
417 pass
429 pass
418 else:
430 else:
419 extensions.loadall(ui)
431 extensions.loadall(ui)
420
432
421 return localrepository(
433 return localrepository(
422 baseui=baseui,
434 baseui=baseui,
423 ui=ui,
435 ui=ui,
424 origroot=path,
436 origroot=path,
425 wdirvfs=wdirvfs,
437 wdirvfs=wdirvfs,
426 hgvfs=hgvfs,
438 hgvfs=hgvfs,
439 requirements=requirements,
427 intents=intents)
440 intents=intents)
428
441
429 @interfaceutil.implementer(repository.completelocalrepository)
442 @interfaceutil.implementer(repository.completelocalrepository)
430 class localrepository(object):
443 class localrepository(object):
431
444
432 # obsolete experimental requirements:
445 # obsolete experimental requirements:
433 # - manifestv2: An experimental new manifest format that allowed
446 # - manifestv2: An experimental new manifest format that allowed
434 # for stem compression of long paths. Experiment ended up not
447 # for stem compression of long paths. Experiment ended up not
435 # being successful (repository sizes went up due to worse delta
448 # being successful (repository sizes went up due to worse delta
436 # chains), and the code was deleted in 4.6.
449 # chains), and the code was deleted in 4.6.
437 supportedformats = {
450 supportedformats = {
438 'revlogv1',
451 'revlogv1',
439 'generaldelta',
452 'generaldelta',
440 'treemanifest',
453 'treemanifest',
441 REVLOGV2_REQUIREMENT,
454 REVLOGV2_REQUIREMENT,
442 SPARSEREVLOG_REQUIREMENT,
455 SPARSEREVLOG_REQUIREMENT,
443 }
456 }
444 _basesupported = supportedformats | {
457 _basesupported = supportedformats | {
445 'store',
458 'store',
446 'fncache',
459 'fncache',
447 'shared',
460 'shared',
448 'relshared',
461 'relshared',
449 'dotencode',
462 'dotencode',
450 'exp-sparse',
463 'exp-sparse',
451 'internal-phase'
464 'internal-phase'
452 }
465 }
453 openerreqs = {
466 openerreqs = {
454 'revlogv1',
467 'revlogv1',
455 'generaldelta',
468 'generaldelta',
456 'treemanifest',
469 'treemanifest',
457 }
470 }
458
471
459 # list of prefix for file which can be written without 'wlock'
472 # list of prefix for file which can be written without 'wlock'
460 # Extensions should extend this list when needed
473 # Extensions should extend this list when needed
461 _wlockfreeprefix = {
474 _wlockfreeprefix = {
462 # We migh consider requiring 'wlock' for the next
475 # We migh consider requiring 'wlock' for the next
463 # two, but pretty much all the existing code assume
476 # two, but pretty much all the existing code assume
464 # wlock is not needed so we keep them excluded for
477 # wlock is not needed so we keep them excluded for
465 # now.
478 # now.
466 'hgrc',
479 'hgrc',
467 'requires',
480 'requires',
468 # XXX cache is a complicatged business someone
481 # XXX cache is a complicatged business someone
469 # should investigate this in depth at some point
482 # should investigate this in depth at some point
470 'cache/',
483 'cache/',
471 # XXX shouldn't be dirstate covered by the wlock?
484 # XXX shouldn't be dirstate covered by the wlock?
472 'dirstate',
485 'dirstate',
473 # XXX bisect was still a bit too messy at the time
486 # XXX bisect was still a bit too messy at the time
474 # this changeset was introduced. Someone should fix
487 # this changeset was introduced. Someone should fix
475 # the remainig bit and drop this line
488 # the remainig bit and drop this line
476 'bisect.state',
489 'bisect.state',
477 }
490 }
478
491
479 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, intents=None):
492 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
493 intents=None):
480 """Create a new local repository instance.
494 """Create a new local repository instance.
481
495
482 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
496 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
483 or ``localrepo.makelocalrepository()`` for obtaining a new repository
497 or ``localrepo.makelocalrepository()`` for obtaining a new repository
484 object.
498 object.
485
499
486 Arguments:
500 Arguments:
487
501
488 baseui
502 baseui
489 ``ui.ui`` instance that ``ui`` argument was based off of.
503 ``ui.ui`` instance that ``ui`` argument was based off of.
490
504
491 ui
505 ui
492 ``ui.ui`` instance for use by the repository.
506 ``ui.ui`` instance for use by the repository.
493
507
494 origroot
508 origroot
495 ``bytes`` path to working directory root of this repository.
509 ``bytes`` path to working directory root of this repository.
496
510
497 wdirvfs
511 wdirvfs
498 ``vfs.vfs`` rooted at the working directory.
512 ``vfs.vfs`` rooted at the working directory.
499
513
500 hgvfs
514 hgvfs
501 ``vfs.vfs`` rooted at .hg/
515 ``vfs.vfs`` rooted at .hg/
502
516
517 requirements
518 ``set`` of bytestrings representing repository opening requirements.
519
503 intents
520 intents
504 ``set`` of system strings indicating what this repo will be used
521 ``set`` of system strings indicating what this repo will be used
505 for.
522 for.
506 """
523 """
507 self.baseui = baseui
524 self.baseui = baseui
508 self.ui = ui
525 self.ui = ui
509 self.origroot = origroot
526 self.origroot = origroot
510 # vfs rooted at working directory.
527 # vfs rooted at working directory.
511 self.wvfs = wdirvfs
528 self.wvfs = wdirvfs
512 self.root = wdirvfs.base
529 self.root = wdirvfs.base
513 # vfs rooted at .hg/. Used to access most non-store paths.
530 # vfs rooted at .hg/. Used to access most non-store paths.
514 self.vfs = hgvfs
531 self.vfs = hgvfs
515 self.path = hgvfs.base
532 self.path = hgvfs.base
516
533
517 self.filtername = None
534 self.filtername = None
518 # svfs: usually rooted at .hg/store, used to access repository history
535 # svfs: usually rooted at .hg/store, used to access repository history
519 # If this is a shared repository, this vfs may point to another
536 # If this is a shared repository, this vfs may point to another
520 # repository's .hg/store directory.
537 # repository's .hg/store directory.
521 self.svfs = None
538 self.svfs = None
522
539
523 if (self.ui.configbool('devel', 'all-warnings') or
540 if (self.ui.configbool('devel', 'all-warnings') or
524 self.ui.configbool('devel', 'check-locks')):
541 self.ui.configbool('devel', 'check-locks')):
525 self.vfs.audit = self._getvfsward(self.vfs.audit)
542 self.vfs.audit = self._getvfsward(self.vfs.audit)
526 # A list of callback to shape the phase if no data were found.
543 # A list of callback to shape the phase if no data were found.
527 # Callback are in the form: func(repo, roots) --> processed root.
544 # Callback are in the form: func(repo, roots) --> processed root.
528 # This list it to be filled by extension during repo setup
545 # This list it to be filled by extension during repo setup
529 self._phasedefaults = []
546 self._phasedefaults = []
530
547
531 if featuresetupfuncs:
548 if featuresetupfuncs:
532 self.supported = set(self._basesupported) # use private copy
549 self.supported = set(self._basesupported) # use private copy
533 extmods = set(m.__name__ for n, m
550 extmods = set(m.__name__ for n, m
534 in extensions.extensions(self.ui))
551 in extensions.extensions(self.ui))
535 for setupfunc in featuresetupfuncs:
552 for setupfunc in featuresetupfuncs:
536 if setupfunc.__module__ in extmods:
553 if setupfunc.__module__ in extmods:
537 setupfunc(self.ui, self.supported)
554 setupfunc(self.ui, self.supported)
538 else:
555 else:
539 self.supported = self._basesupported
556 self.supported = self._basesupported
540 color.setup(self.ui)
557 color.setup(self.ui)
541
558
542 # Add compression engines.
559 # Add compression engines.
543 for name in util.compengines:
560 for name in util.compengines:
544 engine = util.compengines[name]
561 engine = util.compengines[name]
545 if engine.revlogheader():
562 if engine.revlogheader():
546 self.supported.add('exp-compression-%s' % name)
563 self.supported.add('exp-compression-%s' % name)
547
564
548 try:
565 # Validate that all seen repository requirements are supported.
549 self.requirements = scmutil.readrequires(self.vfs, self.supported)
566 missingrequirements = []
550 except IOError as inst:
567 for r in requirements:
551 if inst.errno != errno.ENOENT:
568 if r not in self.supported:
552 raise
569 if not r or not r[0:1].isalnum():
553 self.requirements = set()
570 raise error.RequirementError(
571 _(".hg/requires file is corrupt"))
572 missingrequirements.append(r)
573 missingrequirements.sort()
574 if missingrequirements:
575 raise error.RequirementError(
576 _("repository requires features unknown to this Mercurial: %s")
577 % " ".join(missingrequirements),
578 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
579 " for more information"))
580
581 self.requirements = requirements
554
582
555 cachepath = self.vfs.join('cache')
583 cachepath = self.vfs.join('cache')
556 self.sharedpath = self.path
584 self.sharedpath = self.path
557 try:
585 try:
558 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
586 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
559 if 'relshared' in self.requirements:
587 if 'relshared' in self.requirements:
560 sharedpath = self.vfs.join(sharedpath)
588 sharedpath = self.vfs.join(sharedpath)
561 vfs = vfsmod.vfs(sharedpath, realpath=True)
589 vfs = vfsmod.vfs(sharedpath, realpath=True)
562 cachepath = vfs.join('cache')
590 cachepath = vfs.join('cache')
563 s = vfs.base
591 s = vfs.base
564 if not vfs.exists():
592 if not vfs.exists():
565 raise error.RepoError(
593 raise error.RepoError(
566 _('.hg/sharedpath points to nonexistent directory %s') % s)
594 _('.hg/sharedpath points to nonexistent directory %s') % s)
567 self.sharedpath = s
595 self.sharedpath = s
568 except IOError as inst:
596 except IOError as inst:
569 if inst.errno != errno.ENOENT:
597 if inst.errno != errno.ENOENT:
570 raise
598 raise
571
599
572 if 'exp-sparse' in self.requirements and not sparse.enabled:
600 if 'exp-sparse' in self.requirements and not sparse.enabled:
573 raise error.RepoError(_('repository is using sparse feature but '
601 raise error.RepoError(_('repository is using sparse feature but '
574 'sparse is not enabled; enable the '
602 'sparse is not enabled; enable the '
575 '"sparse" extensions to access'))
603 '"sparse" extensions to access'))
576
604
577 self.store = store.store(
605 self.store = store.store(
578 self.requirements, self.sharedpath,
606 self.requirements, self.sharedpath,
579 lambda base: vfsmod.vfs(base, cacheaudited=True))
607 lambda base: vfsmod.vfs(base, cacheaudited=True))
580 self.spath = self.store.path
608 self.spath = self.store.path
581 self.svfs = self.store.vfs
609 self.svfs = self.store.vfs
582 self.sjoin = self.store.join
610 self.sjoin = self.store.join
583 self.vfs.createmode = self.store.createmode
611 self.vfs.createmode = self.store.createmode
584 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
612 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
585 self.cachevfs.createmode = self.store.createmode
613 self.cachevfs.createmode = self.store.createmode
586 if (self.ui.configbool('devel', 'all-warnings') or
614 if (self.ui.configbool('devel', 'all-warnings') or
587 self.ui.configbool('devel', 'check-locks')):
615 self.ui.configbool('devel', 'check-locks')):
588 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
616 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
589 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
617 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
590 else: # standard vfs
618 else: # standard vfs
591 self.svfs.audit = self._getsvfsward(self.svfs.audit)
619 self.svfs.audit = self._getsvfsward(self.svfs.audit)
592 self._applyopenerreqs()
620 self._applyopenerreqs()
593
621
594 self._dirstatevalidatewarned = False
622 self._dirstatevalidatewarned = False
595
623
596 self._branchcaches = {}
624 self._branchcaches = {}
597 self._revbranchcache = None
625 self._revbranchcache = None
598 self._filterpats = {}
626 self._filterpats = {}
599 self._datafilters = {}
627 self._datafilters = {}
600 self._transref = self._lockref = self._wlockref = None
628 self._transref = self._lockref = self._wlockref = None
601
629
602 # A cache for various files under .hg/ that tracks file changes,
630 # A cache for various files under .hg/ that tracks file changes,
603 # (used by the filecache decorator)
631 # (used by the filecache decorator)
604 #
632 #
605 # Maps a property name to its util.filecacheentry
633 # Maps a property name to its util.filecacheentry
606 self._filecache = {}
634 self._filecache = {}
607
635
608 # hold sets of revision to be filtered
636 # hold sets of revision to be filtered
609 # should be cleared when something might have changed the filter value:
637 # should be cleared when something might have changed the filter value:
610 # - new changesets,
638 # - new changesets,
611 # - phase change,
639 # - phase change,
612 # - new obsolescence marker,
640 # - new obsolescence marker,
613 # - working directory parent change,
641 # - working directory parent change,
614 # - bookmark changes
642 # - bookmark changes
615 self.filteredrevcache = {}
643 self.filteredrevcache = {}
616
644
617 # post-dirstate-status hooks
645 # post-dirstate-status hooks
618 self._postdsstatus = []
646 self._postdsstatus = []
619
647
620 # generic mapping between names and nodes
648 # generic mapping between names and nodes
621 self.names = namespaces.namespaces()
649 self.names = namespaces.namespaces()
622
650
623 # Key to signature value.
651 # Key to signature value.
624 self._sparsesignaturecache = {}
652 self._sparsesignaturecache = {}
625 # Signature to cached matcher instance.
653 # Signature to cached matcher instance.
626 self._sparsematchercache = {}
654 self._sparsematchercache = {}
627
655
628 def _getvfsward(self, origfunc):
656 def _getvfsward(self, origfunc):
629 """build a ward for self.vfs"""
657 """build a ward for self.vfs"""
630 rref = weakref.ref(self)
658 rref = weakref.ref(self)
631 def checkvfs(path, mode=None):
659 def checkvfs(path, mode=None):
632 ret = origfunc(path, mode=mode)
660 ret = origfunc(path, mode=mode)
633 repo = rref()
661 repo = rref()
634 if (repo is None
662 if (repo is None
635 or not util.safehasattr(repo, '_wlockref')
663 or not util.safehasattr(repo, '_wlockref')
636 or not util.safehasattr(repo, '_lockref')):
664 or not util.safehasattr(repo, '_lockref')):
637 return
665 return
638 if mode in (None, 'r', 'rb'):
666 if mode in (None, 'r', 'rb'):
639 return
667 return
640 if path.startswith(repo.path):
668 if path.startswith(repo.path):
641 # truncate name relative to the repository (.hg)
669 # truncate name relative to the repository (.hg)
642 path = path[len(repo.path) + 1:]
670 path = path[len(repo.path) + 1:]
643 if path.startswith('cache/'):
671 if path.startswith('cache/'):
644 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
672 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
645 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
673 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
646 if path.startswith('journal.'):
674 if path.startswith('journal.'):
647 # journal is covered by 'lock'
675 # journal is covered by 'lock'
648 if repo._currentlock(repo._lockref) is None:
676 if repo._currentlock(repo._lockref) is None:
649 repo.ui.develwarn('write with no lock: "%s"' % path,
677 repo.ui.develwarn('write with no lock: "%s"' % path,
650 stacklevel=2, config='check-locks')
678 stacklevel=2, config='check-locks')
651 elif repo._currentlock(repo._wlockref) is None:
679 elif repo._currentlock(repo._wlockref) is None:
652 # rest of vfs files are covered by 'wlock'
680 # rest of vfs files are covered by 'wlock'
653 #
681 #
654 # exclude special files
682 # exclude special files
655 for prefix in self._wlockfreeprefix:
683 for prefix in self._wlockfreeprefix:
656 if path.startswith(prefix):
684 if path.startswith(prefix):
657 return
685 return
658 repo.ui.develwarn('write with no wlock: "%s"' % path,
686 repo.ui.develwarn('write with no wlock: "%s"' % path,
659 stacklevel=2, config='check-locks')
687 stacklevel=2, config='check-locks')
660 return ret
688 return ret
661 return checkvfs
689 return checkvfs
662
690
663 def _getsvfsward(self, origfunc):
691 def _getsvfsward(self, origfunc):
664 """build a ward for self.svfs"""
692 """build a ward for self.svfs"""
665 rref = weakref.ref(self)
693 rref = weakref.ref(self)
666 def checksvfs(path, mode=None):
694 def checksvfs(path, mode=None):
667 ret = origfunc(path, mode=mode)
695 ret = origfunc(path, mode=mode)
668 repo = rref()
696 repo = rref()
669 if repo is None or not util.safehasattr(repo, '_lockref'):
697 if repo is None or not util.safehasattr(repo, '_lockref'):
670 return
698 return
671 if mode in (None, 'r', 'rb'):
699 if mode in (None, 'r', 'rb'):
672 return
700 return
673 if path.startswith(repo.sharedpath):
701 if path.startswith(repo.sharedpath):
674 # truncate name relative to the repository (.hg)
702 # truncate name relative to the repository (.hg)
675 path = path[len(repo.sharedpath) + 1:]
703 path = path[len(repo.sharedpath) + 1:]
676 if repo._currentlock(repo._lockref) is None:
704 if repo._currentlock(repo._lockref) is None:
677 repo.ui.develwarn('write with no lock: "%s"' % path,
705 repo.ui.develwarn('write with no lock: "%s"' % path,
678 stacklevel=3)
706 stacklevel=3)
679 return ret
707 return ret
680 return checksvfs
708 return checksvfs
681
709
682 def close(self):
710 def close(self):
683 self._writecaches()
711 self._writecaches()
684
712
685 def _writecaches(self):
713 def _writecaches(self):
686 if self._revbranchcache:
714 if self._revbranchcache:
687 self._revbranchcache.write()
715 self._revbranchcache.write()
688
716
689 def _restrictcapabilities(self, caps):
717 def _restrictcapabilities(self, caps):
690 if self.ui.configbool('experimental', 'bundle2-advertise'):
718 if self.ui.configbool('experimental', 'bundle2-advertise'):
691 caps = set(caps)
719 caps = set(caps)
692 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
720 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
693 role='client'))
721 role='client'))
694 caps.add('bundle2=' + urlreq.quote(capsblob))
722 caps.add('bundle2=' + urlreq.quote(capsblob))
695 return caps
723 return caps
696
724
697 def _applyopenerreqs(self):
725 def _applyopenerreqs(self):
698 self.svfs.options = dict((r, 1) for r in self.requirements
726 self.svfs.options = dict((r, 1) for r in self.requirements
699 if r in self.openerreqs)
727 if r in self.openerreqs)
700 # experimental config: format.chunkcachesize
728 # experimental config: format.chunkcachesize
701 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
729 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
702 if chunkcachesize is not None:
730 if chunkcachesize is not None:
703 self.svfs.options['chunkcachesize'] = chunkcachesize
731 self.svfs.options['chunkcachesize'] = chunkcachesize
704 # experimental config: format.manifestcachesize
732 # experimental config: format.manifestcachesize
705 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
733 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
706 if manifestcachesize is not None:
734 if manifestcachesize is not None:
707 self.svfs.options['manifestcachesize'] = manifestcachesize
735 self.svfs.options['manifestcachesize'] = manifestcachesize
708 deltabothparents = self.ui.configbool('storage',
736 deltabothparents = self.ui.configbool('storage',
709 'revlog.optimize-delta-parent-choice')
737 'revlog.optimize-delta-parent-choice')
710 self.svfs.options['deltabothparents'] = deltabothparents
738 self.svfs.options['deltabothparents'] = deltabothparents
711 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
739 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
712 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
740 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
713 if 0 <= chainspan:
741 if 0 <= chainspan:
714 self.svfs.options['maxdeltachainspan'] = chainspan
742 self.svfs.options['maxdeltachainspan'] = chainspan
715 mmapindexthreshold = self.ui.configbytes('experimental',
743 mmapindexthreshold = self.ui.configbytes('experimental',
716 'mmapindexthreshold')
744 'mmapindexthreshold')
717 if mmapindexthreshold is not None:
745 if mmapindexthreshold is not None:
718 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
746 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
719 withsparseread = self.ui.configbool('experimental', 'sparse-read')
747 withsparseread = self.ui.configbool('experimental', 'sparse-read')
720 srdensitythres = float(self.ui.config('experimental',
748 srdensitythres = float(self.ui.config('experimental',
721 'sparse-read.density-threshold'))
749 'sparse-read.density-threshold'))
722 srmingapsize = self.ui.configbytes('experimental',
750 srmingapsize = self.ui.configbytes('experimental',
723 'sparse-read.min-gap-size')
751 'sparse-read.min-gap-size')
724 self.svfs.options['with-sparse-read'] = withsparseread
752 self.svfs.options['with-sparse-read'] = withsparseread
725 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
753 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
726 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
754 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
727 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
755 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
728 self.svfs.options['sparse-revlog'] = sparserevlog
756 self.svfs.options['sparse-revlog'] = sparserevlog
729 if sparserevlog:
757 if sparserevlog:
730 self.svfs.options['generaldelta'] = True
758 self.svfs.options['generaldelta'] = True
731 maxchainlen = None
759 maxchainlen = None
732 if sparserevlog:
760 if sparserevlog:
733 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
761 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
734 # experimental config: format.maxchainlen
762 # experimental config: format.maxchainlen
735 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
763 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
736 if maxchainlen is not None:
764 if maxchainlen is not None:
737 self.svfs.options['maxchainlen'] = maxchainlen
765 self.svfs.options['maxchainlen'] = maxchainlen
738
766
739 for r in self.requirements:
767 for r in self.requirements:
740 if r.startswith('exp-compression-'):
768 if r.startswith('exp-compression-'):
741 self.svfs.options['compengine'] = r[len('exp-compression-'):]
769 self.svfs.options['compengine'] = r[len('exp-compression-'):]
742
770
743 # TODO move "revlogv2" to openerreqs once finalized.
771 # TODO move "revlogv2" to openerreqs once finalized.
744 if REVLOGV2_REQUIREMENT in self.requirements:
772 if REVLOGV2_REQUIREMENT in self.requirements:
745 self.svfs.options['revlogv2'] = True
773 self.svfs.options['revlogv2'] = True
746
774
747 def _writerequirements(self):
775 def _writerequirements(self):
748 scmutil.writerequires(self.vfs, self.requirements)
776 scmutil.writerequires(self.vfs, self.requirements)
749
777
750 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
778 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
751 # self -> auditor -> self._checknested -> self
779 # self -> auditor -> self._checknested -> self
752
780
753 @property
781 @property
754 def auditor(self):
782 def auditor(self):
755 # This is only used by context.workingctx.match in order to
783 # This is only used by context.workingctx.match in order to
756 # detect files in subrepos.
784 # detect files in subrepos.
757 return pathutil.pathauditor(self.root, callback=self._checknested)
785 return pathutil.pathauditor(self.root, callback=self._checknested)
758
786
759 @property
787 @property
760 def nofsauditor(self):
788 def nofsauditor(self):
761 # This is only used by context.basectx.match in order to detect
789 # This is only used by context.basectx.match in order to detect
762 # files in subrepos.
790 # files in subrepos.
763 return pathutil.pathauditor(self.root, callback=self._checknested,
791 return pathutil.pathauditor(self.root, callback=self._checknested,
764 realfs=False, cached=True)
792 realfs=False, cached=True)
765
793
766 def _checknested(self, path):
794 def _checknested(self, path):
767 """Determine if path is a legal nested repository."""
795 """Determine if path is a legal nested repository."""
768 if not path.startswith(self.root):
796 if not path.startswith(self.root):
769 return False
797 return False
770 subpath = path[len(self.root) + 1:]
798 subpath = path[len(self.root) + 1:]
771 normsubpath = util.pconvert(subpath)
799 normsubpath = util.pconvert(subpath)
772
800
773 # XXX: Checking against the current working copy is wrong in
801 # XXX: Checking against the current working copy is wrong in
774 # the sense that it can reject things like
802 # the sense that it can reject things like
775 #
803 #
776 # $ hg cat -r 10 sub/x.txt
804 # $ hg cat -r 10 sub/x.txt
777 #
805 #
778 # if sub/ is no longer a subrepository in the working copy
806 # if sub/ is no longer a subrepository in the working copy
779 # parent revision.
807 # parent revision.
780 #
808 #
781 # However, it can of course also allow things that would have
809 # However, it can of course also allow things that would have
782 # been rejected before, such as the above cat command if sub/
810 # been rejected before, such as the above cat command if sub/
783 # is a subrepository now, but was a normal directory before.
811 # is a subrepository now, but was a normal directory before.
784 # The old path auditor would have rejected by mistake since it
812 # The old path auditor would have rejected by mistake since it
785 # panics when it sees sub/.hg/.
813 # panics when it sees sub/.hg/.
786 #
814 #
787 # All in all, checking against the working copy seems sensible
815 # All in all, checking against the working copy seems sensible
788 # since we want to prevent access to nested repositories on
816 # since we want to prevent access to nested repositories on
789 # the filesystem *now*.
817 # the filesystem *now*.
790 ctx = self[None]
818 ctx = self[None]
791 parts = util.splitpath(subpath)
819 parts = util.splitpath(subpath)
792 while parts:
820 while parts:
793 prefix = '/'.join(parts)
821 prefix = '/'.join(parts)
794 if prefix in ctx.substate:
822 if prefix in ctx.substate:
795 if prefix == normsubpath:
823 if prefix == normsubpath:
796 return True
824 return True
797 else:
825 else:
798 sub = ctx.sub(prefix)
826 sub = ctx.sub(prefix)
799 return sub.checknested(subpath[len(prefix) + 1:])
827 return sub.checknested(subpath[len(prefix) + 1:])
800 else:
828 else:
801 parts.pop()
829 parts.pop()
802 return False
830 return False
803
831
804 def peer(self):
832 def peer(self):
805 return localpeer(self) # not cached to avoid reference cycle
833 return localpeer(self) # not cached to avoid reference cycle
806
834
807 def unfiltered(self):
835 def unfiltered(self):
808 """Return unfiltered version of the repository
836 """Return unfiltered version of the repository
809
837
810 Intended to be overwritten by filtered repo."""
838 Intended to be overwritten by filtered repo."""
811 return self
839 return self
812
840
813 def filtered(self, name, visibilityexceptions=None):
841 def filtered(self, name, visibilityexceptions=None):
814 """Return a filtered version of a repository"""
842 """Return a filtered version of a repository"""
815 cls = repoview.newtype(self.unfiltered().__class__)
843 cls = repoview.newtype(self.unfiltered().__class__)
816 return cls(self, name, visibilityexceptions)
844 return cls(self, name, visibilityexceptions)
817
845
818 @repofilecache('bookmarks', 'bookmarks.current')
846 @repofilecache('bookmarks', 'bookmarks.current')
819 def _bookmarks(self):
847 def _bookmarks(self):
820 return bookmarks.bmstore(self)
848 return bookmarks.bmstore(self)
821
849
822 @property
850 @property
823 def _activebookmark(self):
851 def _activebookmark(self):
824 return self._bookmarks.active
852 return self._bookmarks.active
825
853
826 # _phasesets depend on changelog. what we need is to call
854 # _phasesets depend on changelog. what we need is to call
827 # _phasecache.invalidate() if '00changelog.i' was changed, but it
855 # _phasecache.invalidate() if '00changelog.i' was changed, but it
828 # can't be easily expressed in filecache mechanism.
856 # can't be easily expressed in filecache mechanism.
829 @storecache('phaseroots', '00changelog.i')
857 @storecache('phaseroots', '00changelog.i')
830 def _phasecache(self):
858 def _phasecache(self):
831 return phases.phasecache(self, self._phasedefaults)
859 return phases.phasecache(self, self._phasedefaults)
832
860
833 @storecache('obsstore')
861 @storecache('obsstore')
834 def obsstore(self):
862 def obsstore(self):
835 return obsolete.makestore(self.ui, self)
863 return obsolete.makestore(self.ui, self)
836
864
837 @storecache('00changelog.i')
865 @storecache('00changelog.i')
838 def changelog(self):
866 def changelog(self):
839 return changelog.changelog(self.svfs,
867 return changelog.changelog(self.svfs,
840 trypending=txnutil.mayhavepending(self.root))
868 trypending=txnutil.mayhavepending(self.root))
841
869
842 def _constructmanifest(self):
870 def _constructmanifest(self):
843 # This is a temporary function while we migrate from manifest to
871 # This is a temporary function while we migrate from manifest to
844 # manifestlog. It allows bundlerepo and unionrepo to intercept the
872 # manifestlog. It allows bundlerepo and unionrepo to intercept the
845 # manifest creation.
873 # manifest creation.
846 return manifest.manifestrevlog(self.svfs)
874 return manifest.manifestrevlog(self.svfs)
847
875
848 @storecache('00manifest.i')
876 @storecache('00manifest.i')
849 def manifestlog(self):
877 def manifestlog(self):
850 return manifest.manifestlog(self.svfs, self)
878 return manifest.manifestlog(self.svfs, self)
851
879
852 @repofilecache('dirstate')
880 @repofilecache('dirstate')
853 def dirstate(self):
881 def dirstate(self):
854 return self._makedirstate()
882 return self._makedirstate()
855
883
856 def _makedirstate(self):
884 def _makedirstate(self):
857 """Extension point for wrapping the dirstate per-repo."""
885 """Extension point for wrapping the dirstate per-repo."""
858 sparsematchfn = lambda: sparse.matcher(self)
886 sparsematchfn = lambda: sparse.matcher(self)
859
887
860 return dirstate.dirstate(self.vfs, self.ui, self.root,
888 return dirstate.dirstate(self.vfs, self.ui, self.root,
861 self._dirstatevalidate, sparsematchfn)
889 self._dirstatevalidate, sparsematchfn)
862
890
863 def _dirstatevalidate(self, node):
891 def _dirstatevalidate(self, node):
864 try:
892 try:
865 self.changelog.rev(node)
893 self.changelog.rev(node)
866 return node
894 return node
867 except error.LookupError:
895 except error.LookupError:
868 if not self._dirstatevalidatewarned:
896 if not self._dirstatevalidatewarned:
869 self._dirstatevalidatewarned = True
897 self._dirstatevalidatewarned = True
870 self.ui.warn(_("warning: ignoring unknown"
898 self.ui.warn(_("warning: ignoring unknown"
871 " working parent %s!\n") % short(node))
899 " working parent %s!\n") % short(node))
872 return nullid
900 return nullid
873
901
874 @storecache(narrowspec.FILENAME)
902 @storecache(narrowspec.FILENAME)
875 def narrowpats(self):
903 def narrowpats(self):
876 """matcher patterns for this repository's narrowspec
904 """matcher patterns for this repository's narrowspec
877
905
878 A tuple of (includes, excludes).
906 A tuple of (includes, excludes).
879 """
907 """
880 source = self
908 source = self
881 if self.shared():
909 if self.shared():
882 from . import hg
910 from . import hg
883 source = hg.sharedreposource(self)
911 source = hg.sharedreposource(self)
884 return narrowspec.load(source)
912 return narrowspec.load(source)
885
913
886 @storecache(narrowspec.FILENAME)
914 @storecache(narrowspec.FILENAME)
887 def _narrowmatch(self):
915 def _narrowmatch(self):
888 if repository.NARROW_REQUIREMENT not in self.requirements:
916 if repository.NARROW_REQUIREMENT not in self.requirements:
889 return matchmod.always(self.root, '')
917 return matchmod.always(self.root, '')
890 include, exclude = self.narrowpats
918 include, exclude = self.narrowpats
891 return narrowspec.match(self.root, include=include, exclude=exclude)
919 return narrowspec.match(self.root, include=include, exclude=exclude)
892
920
893 # TODO(martinvonz): make this property-like instead?
921 # TODO(martinvonz): make this property-like instead?
894 def narrowmatch(self):
922 def narrowmatch(self):
895 return self._narrowmatch
923 return self._narrowmatch
896
924
897 def setnarrowpats(self, newincludes, newexcludes):
925 def setnarrowpats(self, newincludes, newexcludes):
898 narrowspec.save(self, newincludes, newexcludes)
926 narrowspec.save(self, newincludes, newexcludes)
899 self.invalidate(clearfilecache=True)
927 self.invalidate(clearfilecache=True)
900
928
901 def __getitem__(self, changeid):
929 def __getitem__(self, changeid):
902 if changeid is None:
930 if changeid is None:
903 return context.workingctx(self)
931 return context.workingctx(self)
904 if isinstance(changeid, context.basectx):
932 if isinstance(changeid, context.basectx):
905 return changeid
933 return changeid
906 if isinstance(changeid, slice):
934 if isinstance(changeid, slice):
907 # wdirrev isn't contiguous so the slice shouldn't include it
935 # wdirrev isn't contiguous so the slice shouldn't include it
908 return [context.changectx(self, i)
936 return [context.changectx(self, i)
909 for i in pycompat.xrange(*changeid.indices(len(self)))
937 for i in pycompat.xrange(*changeid.indices(len(self)))
910 if i not in self.changelog.filteredrevs]
938 if i not in self.changelog.filteredrevs]
911 try:
939 try:
912 return context.changectx(self, changeid)
940 return context.changectx(self, changeid)
913 except error.WdirUnsupported:
941 except error.WdirUnsupported:
914 return context.workingctx(self)
942 return context.workingctx(self)
915
943
916 def __contains__(self, changeid):
944 def __contains__(self, changeid):
917 """True if the given changeid exists
945 """True if the given changeid exists
918
946
919 error.AmbiguousPrefixLookupError is raised if an ambiguous node
947 error.AmbiguousPrefixLookupError is raised if an ambiguous node
920 specified.
948 specified.
921 """
949 """
922 try:
950 try:
923 self[changeid]
951 self[changeid]
924 return True
952 return True
925 except error.RepoLookupError:
953 except error.RepoLookupError:
926 return False
954 return False
927
955
928 def __nonzero__(self):
956 def __nonzero__(self):
929 return True
957 return True
930
958
931 __bool__ = __nonzero__
959 __bool__ = __nonzero__
932
960
933 def __len__(self):
961 def __len__(self):
934 # no need to pay the cost of repoview.changelog
962 # no need to pay the cost of repoview.changelog
935 unfi = self.unfiltered()
963 unfi = self.unfiltered()
936 return len(unfi.changelog)
964 return len(unfi.changelog)
937
965
938 def __iter__(self):
966 def __iter__(self):
939 return iter(self.changelog)
967 return iter(self.changelog)
940
968
941 def revs(self, expr, *args):
969 def revs(self, expr, *args):
942 '''Find revisions matching a revset.
970 '''Find revisions matching a revset.
943
971
944 The revset is specified as a string ``expr`` that may contain
972 The revset is specified as a string ``expr`` that may contain
945 %-formatting to escape certain types. See ``revsetlang.formatspec``.
973 %-formatting to escape certain types. See ``revsetlang.formatspec``.
946
974
947 Revset aliases from the configuration are not expanded. To expand
975 Revset aliases from the configuration are not expanded. To expand
948 user aliases, consider calling ``scmutil.revrange()`` or
976 user aliases, consider calling ``scmutil.revrange()`` or
949 ``repo.anyrevs([expr], user=True)``.
977 ``repo.anyrevs([expr], user=True)``.
950
978
951 Returns a revset.abstractsmartset, which is a list-like interface
979 Returns a revset.abstractsmartset, which is a list-like interface
952 that contains integer revisions.
980 that contains integer revisions.
953 '''
981 '''
954 expr = revsetlang.formatspec(expr, *args)
982 expr = revsetlang.formatspec(expr, *args)
955 m = revset.match(None, expr)
983 m = revset.match(None, expr)
956 return m(self)
984 return m(self)
957
985
958 def set(self, expr, *args):
986 def set(self, expr, *args):
959 '''Find revisions matching a revset and emit changectx instances.
987 '''Find revisions matching a revset and emit changectx instances.
960
988
961 This is a convenience wrapper around ``revs()`` that iterates the
989 This is a convenience wrapper around ``revs()`` that iterates the
962 result and is a generator of changectx instances.
990 result and is a generator of changectx instances.
963
991
964 Revset aliases from the configuration are not expanded. To expand
992 Revset aliases from the configuration are not expanded. To expand
965 user aliases, consider calling ``scmutil.revrange()``.
993 user aliases, consider calling ``scmutil.revrange()``.
966 '''
994 '''
967 for r in self.revs(expr, *args):
995 for r in self.revs(expr, *args):
968 yield self[r]
996 yield self[r]
969
997
970 def anyrevs(self, specs, user=False, localalias=None):
998 def anyrevs(self, specs, user=False, localalias=None):
971 '''Find revisions matching one of the given revsets.
999 '''Find revisions matching one of the given revsets.
972
1000
973 Revset aliases from the configuration are not expanded by default. To
1001 Revset aliases from the configuration are not expanded by default. To
974 expand user aliases, specify ``user=True``. To provide some local
1002 expand user aliases, specify ``user=True``. To provide some local
975 definitions overriding user aliases, set ``localalias`` to
1003 definitions overriding user aliases, set ``localalias`` to
976 ``{name: definitionstring}``.
1004 ``{name: definitionstring}``.
977 '''
1005 '''
978 if user:
1006 if user:
979 m = revset.matchany(self.ui, specs,
1007 m = revset.matchany(self.ui, specs,
980 lookup=revset.lookupfn(self),
1008 lookup=revset.lookupfn(self),
981 localalias=localalias)
1009 localalias=localalias)
982 else:
1010 else:
983 m = revset.matchany(None, specs, localalias=localalias)
1011 m = revset.matchany(None, specs, localalias=localalias)
984 return m(self)
1012 return m(self)
985
1013
986 def url(self):
1014 def url(self):
987 return 'file:' + self.root
1015 return 'file:' + self.root
988
1016
989 def hook(self, name, throw=False, **args):
1017 def hook(self, name, throw=False, **args):
990 """Call a hook, passing this repo instance.
1018 """Call a hook, passing this repo instance.
991
1019
992 This a convenience method to aid invoking hooks. Extensions likely
1020 This a convenience method to aid invoking hooks. Extensions likely
993 won't call this unless they have registered a custom hook or are
1021 won't call this unless they have registered a custom hook or are
994 replacing code that is expected to call a hook.
1022 replacing code that is expected to call a hook.
995 """
1023 """
996 return hook.hook(self.ui, self, name, throw, **args)
1024 return hook.hook(self.ui, self, name, throw, **args)
997
1025
998 @filteredpropertycache
1026 @filteredpropertycache
999 def _tagscache(self):
1027 def _tagscache(self):
1000 '''Returns a tagscache object that contains various tags related
1028 '''Returns a tagscache object that contains various tags related
1001 caches.'''
1029 caches.'''
1002
1030
1003 # This simplifies its cache management by having one decorated
1031 # This simplifies its cache management by having one decorated
1004 # function (this one) and the rest simply fetch things from it.
1032 # function (this one) and the rest simply fetch things from it.
1005 class tagscache(object):
1033 class tagscache(object):
1006 def __init__(self):
1034 def __init__(self):
1007 # These two define the set of tags for this repository. tags
1035 # These two define the set of tags for this repository. tags
1008 # maps tag name to node; tagtypes maps tag name to 'global' or
1036 # maps tag name to node; tagtypes maps tag name to 'global' or
1009 # 'local'. (Global tags are defined by .hgtags across all
1037 # 'local'. (Global tags are defined by .hgtags across all
1010 # heads, and local tags are defined in .hg/localtags.)
1038 # heads, and local tags are defined in .hg/localtags.)
1011 # They constitute the in-memory cache of tags.
1039 # They constitute the in-memory cache of tags.
1012 self.tags = self.tagtypes = None
1040 self.tags = self.tagtypes = None
1013
1041
1014 self.nodetagscache = self.tagslist = None
1042 self.nodetagscache = self.tagslist = None
1015
1043
1016 cache = tagscache()
1044 cache = tagscache()
1017 cache.tags, cache.tagtypes = self._findtags()
1045 cache.tags, cache.tagtypes = self._findtags()
1018
1046
1019 return cache
1047 return cache
1020
1048
1021 def tags(self):
1049 def tags(self):
1022 '''return a mapping of tag to node'''
1050 '''return a mapping of tag to node'''
1023 t = {}
1051 t = {}
1024 if self.changelog.filteredrevs:
1052 if self.changelog.filteredrevs:
1025 tags, tt = self._findtags()
1053 tags, tt = self._findtags()
1026 else:
1054 else:
1027 tags = self._tagscache.tags
1055 tags = self._tagscache.tags
1028 for k, v in tags.iteritems():
1056 for k, v in tags.iteritems():
1029 try:
1057 try:
1030 # ignore tags to unknown nodes
1058 # ignore tags to unknown nodes
1031 self.changelog.rev(v)
1059 self.changelog.rev(v)
1032 t[k] = v
1060 t[k] = v
1033 except (error.LookupError, ValueError):
1061 except (error.LookupError, ValueError):
1034 pass
1062 pass
1035 return t
1063 return t
1036
1064
1037 def _findtags(self):
1065 def _findtags(self):
1038 '''Do the hard work of finding tags. Return a pair of dicts
1066 '''Do the hard work of finding tags. Return a pair of dicts
1039 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1067 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1040 maps tag name to a string like \'global\' or \'local\'.
1068 maps tag name to a string like \'global\' or \'local\'.
1041 Subclasses or extensions are free to add their own tags, but
1069 Subclasses or extensions are free to add their own tags, but
1042 should be aware that the returned dicts will be retained for the
1070 should be aware that the returned dicts will be retained for the
1043 duration of the localrepo object.'''
1071 duration of the localrepo object.'''
1044
1072
1045 # XXX what tagtype should subclasses/extensions use? Currently
1073 # XXX what tagtype should subclasses/extensions use? Currently
1046 # mq and bookmarks add tags, but do not set the tagtype at all.
1074 # mq and bookmarks add tags, but do not set the tagtype at all.
1047 # Should each extension invent its own tag type? Should there
1075 # Should each extension invent its own tag type? Should there
1048 # be one tagtype for all such "virtual" tags? Or is the status
1076 # be one tagtype for all such "virtual" tags? Or is the status
1049 # quo fine?
1077 # quo fine?
1050
1078
1051
1079
1052 # map tag name to (node, hist)
1080 # map tag name to (node, hist)
1053 alltags = tagsmod.findglobaltags(self.ui, self)
1081 alltags = tagsmod.findglobaltags(self.ui, self)
1054 # map tag name to tag type
1082 # map tag name to tag type
1055 tagtypes = dict((tag, 'global') for tag in alltags)
1083 tagtypes = dict((tag, 'global') for tag in alltags)
1056
1084
1057 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1085 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1058
1086
1059 # Build the return dicts. Have to re-encode tag names because
1087 # Build the return dicts. Have to re-encode tag names because
1060 # the tags module always uses UTF-8 (in order not to lose info
1088 # the tags module always uses UTF-8 (in order not to lose info
1061 # writing to the cache), but the rest of Mercurial wants them in
1089 # writing to the cache), but the rest of Mercurial wants them in
1062 # local encoding.
1090 # local encoding.
1063 tags = {}
1091 tags = {}
1064 for (name, (node, hist)) in alltags.iteritems():
1092 for (name, (node, hist)) in alltags.iteritems():
1065 if node != nullid:
1093 if node != nullid:
1066 tags[encoding.tolocal(name)] = node
1094 tags[encoding.tolocal(name)] = node
1067 tags['tip'] = self.changelog.tip()
1095 tags['tip'] = self.changelog.tip()
1068 tagtypes = dict([(encoding.tolocal(name), value)
1096 tagtypes = dict([(encoding.tolocal(name), value)
1069 for (name, value) in tagtypes.iteritems()])
1097 for (name, value) in tagtypes.iteritems()])
1070 return (tags, tagtypes)
1098 return (tags, tagtypes)
1071
1099
1072 def tagtype(self, tagname):
1100 def tagtype(self, tagname):
1073 '''
1101 '''
1074 return the type of the given tag. result can be:
1102 return the type of the given tag. result can be:
1075
1103
1076 'local' : a local tag
1104 'local' : a local tag
1077 'global' : a global tag
1105 'global' : a global tag
1078 None : tag does not exist
1106 None : tag does not exist
1079 '''
1107 '''
1080
1108
1081 return self._tagscache.tagtypes.get(tagname)
1109 return self._tagscache.tagtypes.get(tagname)
1082
1110
1083 def tagslist(self):
1111 def tagslist(self):
1084 '''return a list of tags ordered by revision'''
1112 '''return a list of tags ordered by revision'''
1085 if not self._tagscache.tagslist:
1113 if not self._tagscache.tagslist:
1086 l = []
1114 l = []
1087 for t, n in self.tags().iteritems():
1115 for t, n in self.tags().iteritems():
1088 l.append((self.changelog.rev(n), t, n))
1116 l.append((self.changelog.rev(n), t, n))
1089 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1117 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1090
1118
1091 return self._tagscache.tagslist
1119 return self._tagscache.tagslist
1092
1120
1093 def nodetags(self, node):
1121 def nodetags(self, node):
1094 '''return the tags associated with a node'''
1122 '''return the tags associated with a node'''
1095 if not self._tagscache.nodetagscache:
1123 if not self._tagscache.nodetagscache:
1096 nodetagscache = {}
1124 nodetagscache = {}
1097 for t, n in self._tagscache.tags.iteritems():
1125 for t, n in self._tagscache.tags.iteritems():
1098 nodetagscache.setdefault(n, []).append(t)
1126 nodetagscache.setdefault(n, []).append(t)
1099 for tags in nodetagscache.itervalues():
1127 for tags in nodetagscache.itervalues():
1100 tags.sort()
1128 tags.sort()
1101 self._tagscache.nodetagscache = nodetagscache
1129 self._tagscache.nodetagscache = nodetagscache
1102 return self._tagscache.nodetagscache.get(node, [])
1130 return self._tagscache.nodetagscache.get(node, [])
1103
1131
1104 def nodebookmarks(self, node):
1132 def nodebookmarks(self, node):
1105 """return the list of bookmarks pointing to the specified node"""
1133 """return the list of bookmarks pointing to the specified node"""
1106 return self._bookmarks.names(node)
1134 return self._bookmarks.names(node)
1107
1135
1108 def branchmap(self):
1136 def branchmap(self):
1109 '''returns a dictionary {branch: [branchheads]} with branchheads
1137 '''returns a dictionary {branch: [branchheads]} with branchheads
1110 ordered by increasing revision number'''
1138 ordered by increasing revision number'''
1111 branchmap.updatecache(self)
1139 branchmap.updatecache(self)
1112 return self._branchcaches[self.filtername]
1140 return self._branchcaches[self.filtername]
1113
1141
1114 @unfilteredmethod
1142 @unfilteredmethod
1115 def revbranchcache(self):
1143 def revbranchcache(self):
1116 if not self._revbranchcache:
1144 if not self._revbranchcache:
1117 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1145 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1118 return self._revbranchcache
1146 return self._revbranchcache
1119
1147
1120 def branchtip(self, branch, ignoremissing=False):
1148 def branchtip(self, branch, ignoremissing=False):
1121 '''return the tip node for a given branch
1149 '''return the tip node for a given branch
1122
1150
1123 If ignoremissing is True, then this method will not raise an error.
1151 If ignoremissing is True, then this method will not raise an error.
1124 This is helpful for callers that only expect None for a missing branch
1152 This is helpful for callers that only expect None for a missing branch
1125 (e.g. namespace).
1153 (e.g. namespace).
1126
1154
1127 '''
1155 '''
1128 try:
1156 try:
1129 return self.branchmap().branchtip(branch)
1157 return self.branchmap().branchtip(branch)
1130 except KeyError:
1158 except KeyError:
1131 if not ignoremissing:
1159 if not ignoremissing:
1132 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1160 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1133 else:
1161 else:
1134 pass
1162 pass
1135
1163
1136 def lookup(self, key):
1164 def lookup(self, key):
1137 return scmutil.revsymbol(self, key).node()
1165 return scmutil.revsymbol(self, key).node()
1138
1166
1139 def lookupbranch(self, key):
1167 def lookupbranch(self, key):
1140 if key in self.branchmap():
1168 if key in self.branchmap():
1141 return key
1169 return key
1142
1170
1143 return scmutil.revsymbol(self, key).branch()
1171 return scmutil.revsymbol(self, key).branch()
1144
1172
1145 def known(self, nodes):
1173 def known(self, nodes):
1146 cl = self.changelog
1174 cl = self.changelog
1147 nm = cl.nodemap
1175 nm = cl.nodemap
1148 filtered = cl.filteredrevs
1176 filtered = cl.filteredrevs
1149 result = []
1177 result = []
1150 for n in nodes:
1178 for n in nodes:
1151 r = nm.get(n)
1179 r = nm.get(n)
1152 resp = not (r is None or r in filtered)
1180 resp = not (r is None or r in filtered)
1153 result.append(resp)
1181 result.append(resp)
1154 return result
1182 return result
1155
1183
1156 def local(self):
1184 def local(self):
1157 return self
1185 return self
1158
1186
1159 def publishing(self):
1187 def publishing(self):
1160 # it's safe (and desirable) to trust the publish flag unconditionally
1188 # it's safe (and desirable) to trust the publish flag unconditionally
1161 # so that we don't finalize changes shared between users via ssh or nfs
1189 # so that we don't finalize changes shared between users via ssh or nfs
1162 return self.ui.configbool('phases', 'publish', untrusted=True)
1190 return self.ui.configbool('phases', 'publish', untrusted=True)
1163
1191
1164 def cancopy(self):
1192 def cancopy(self):
1165 # so statichttprepo's override of local() works
1193 # so statichttprepo's override of local() works
1166 if not self.local():
1194 if not self.local():
1167 return False
1195 return False
1168 if not self.publishing():
1196 if not self.publishing():
1169 return True
1197 return True
1170 # if publishing we can't copy if there is filtered content
1198 # if publishing we can't copy if there is filtered content
1171 return not self.filtered('visible').changelog.filteredrevs
1199 return not self.filtered('visible').changelog.filteredrevs
1172
1200
1173 def shared(self):
1201 def shared(self):
1174 '''the type of shared repository (None if not shared)'''
1202 '''the type of shared repository (None if not shared)'''
1175 if self.sharedpath != self.path:
1203 if self.sharedpath != self.path:
1176 return 'store'
1204 return 'store'
1177 return None
1205 return None
1178
1206
1179 def wjoin(self, f, *insidef):
1207 def wjoin(self, f, *insidef):
1180 return self.vfs.reljoin(self.root, f, *insidef)
1208 return self.vfs.reljoin(self.root, f, *insidef)
1181
1209
1182 def file(self, f):
1210 def file(self, f):
1183 if f[0] == '/':
1211 if f[0] == '/':
1184 f = f[1:]
1212 f = f[1:]
1185 return filelog.filelog(self.svfs, f)
1213 return filelog.filelog(self.svfs, f)
1186
1214
1187 def setparents(self, p1, p2=nullid):
1215 def setparents(self, p1, p2=nullid):
1188 with self.dirstate.parentchange():
1216 with self.dirstate.parentchange():
1189 copies = self.dirstate.setparents(p1, p2)
1217 copies = self.dirstate.setparents(p1, p2)
1190 pctx = self[p1]
1218 pctx = self[p1]
1191 if copies:
1219 if copies:
1192 # Adjust copy records, the dirstate cannot do it, it
1220 # Adjust copy records, the dirstate cannot do it, it
1193 # requires access to parents manifests. Preserve them
1221 # requires access to parents manifests. Preserve them
1194 # only for entries added to first parent.
1222 # only for entries added to first parent.
1195 for f in copies:
1223 for f in copies:
1196 if f not in pctx and copies[f] in pctx:
1224 if f not in pctx and copies[f] in pctx:
1197 self.dirstate.copy(copies[f], f)
1225 self.dirstate.copy(copies[f], f)
1198 if p2 == nullid:
1226 if p2 == nullid:
1199 for f, s in sorted(self.dirstate.copies().items()):
1227 for f, s in sorted(self.dirstate.copies().items()):
1200 if f not in pctx and s not in pctx:
1228 if f not in pctx and s not in pctx:
1201 self.dirstate.copy(None, f)
1229 self.dirstate.copy(None, f)
1202
1230
1203 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1231 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1204 """changeid can be a changeset revision, node, or tag.
1232 """changeid can be a changeset revision, node, or tag.
1205 fileid can be a file revision or node."""
1233 fileid can be a file revision or node."""
1206 return context.filectx(self, path, changeid, fileid,
1234 return context.filectx(self, path, changeid, fileid,
1207 changectx=changectx)
1235 changectx=changectx)
1208
1236
1209 def getcwd(self):
1237 def getcwd(self):
1210 return self.dirstate.getcwd()
1238 return self.dirstate.getcwd()
1211
1239
1212 def pathto(self, f, cwd=None):
1240 def pathto(self, f, cwd=None):
1213 return self.dirstate.pathto(f, cwd)
1241 return self.dirstate.pathto(f, cwd)
1214
1242
1215 def _loadfilter(self, filter):
1243 def _loadfilter(self, filter):
1216 if filter not in self._filterpats:
1244 if filter not in self._filterpats:
1217 l = []
1245 l = []
1218 for pat, cmd in self.ui.configitems(filter):
1246 for pat, cmd in self.ui.configitems(filter):
1219 if cmd == '!':
1247 if cmd == '!':
1220 continue
1248 continue
1221 mf = matchmod.match(self.root, '', [pat])
1249 mf = matchmod.match(self.root, '', [pat])
1222 fn = None
1250 fn = None
1223 params = cmd
1251 params = cmd
1224 for name, filterfn in self._datafilters.iteritems():
1252 for name, filterfn in self._datafilters.iteritems():
1225 if cmd.startswith(name):
1253 if cmd.startswith(name):
1226 fn = filterfn
1254 fn = filterfn
1227 params = cmd[len(name):].lstrip()
1255 params = cmd[len(name):].lstrip()
1228 break
1256 break
1229 if not fn:
1257 if not fn:
1230 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1258 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1231 # Wrap old filters not supporting keyword arguments
1259 # Wrap old filters not supporting keyword arguments
1232 if not pycompat.getargspec(fn)[2]:
1260 if not pycompat.getargspec(fn)[2]:
1233 oldfn = fn
1261 oldfn = fn
1234 fn = lambda s, c, **kwargs: oldfn(s, c)
1262 fn = lambda s, c, **kwargs: oldfn(s, c)
1235 l.append((mf, fn, params))
1263 l.append((mf, fn, params))
1236 self._filterpats[filter] = l
1264 self._filterpats[filter] = l
1237 return self._filterpats[filter]
1265 return self._filterpats[filter]
1238
1266
1239 def _filter(self, filterpats, filename, data):
1267 def _filter(self, filterpats, filename, data):
1240 for mf, fn, cmd in filterpats:
1268 for mf, fn, cmd in filterpats:
1241 if mf(filename):
1269 if mf(filename):
1242 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1270 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1243 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1271 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1244 break
1272 break
1245
1273
1246 return data
1274 return data
1247
1275
1248 @unfilteredpropertycache
1276 @unfilteredpropertycache
1249 def _encodefilterpats(self):
1277 def _encodefilterpats(self):
1250 return self._loadfilter('encode')
1278 return self._loadfilter('encode')
1251
1279
1252 @unfilteredpropertycache
1280 @unfilteredpropertycache
1253 def _decodefilterpats(self):
1281 def _decodefilterpats(self):
1254 return self._loadfilter('decode')
1282 return self._loadfilter('decode')
1255
1283
1256 def adddatafilter(self, name, filter):
1284 def adddatafilter(self, name, filter):
1257 self._datafilters[name] = filter
1285 self._datafilters[name] = filter
1258
1286
1259 def wread(self, filename):
1287 def wread(self, filename):
1260 if self.wvfs.islink(filename):
1288 if self.wvfs.islink(filename):
1261 data = self.wvfs.readlink(filename)
1289 data = self.wvfs.readlink(filename)
1262 else:
1290 else:
1263 data = self.wvfs.read(filename)
1291 data = self.wvfs.read(filename)
1264 return self._filter(self._encodefilterpats, filename, data)
1292 return self._filter(self._encodefilterpats, filename, data)
1265
1293
1266 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1294 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1267 """write ``data`` into ``filename`` in the working directory
1295 """write ``data`` into ``filename`` in the working directory
1268
1296
1269 This returns length of written (maybe decoded) data.
1297 This returns length of written (maybe decoded) data.
1270 """
1298 """
1271 data = self._filter(self._decodefilterpats, filename, data)
1299 data = self._filter(self._decodefilterpats, filename, data)
1272 if 'l' in flags:
1300 if 'l' in flags:
1273 self.wvfs.symlink(data, filename)
1301 self.wvfs.symlink(data, filename)
1274 else:
1302 else:
1275 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1303 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1276 **kwargs)
1304 **kwargs)
1277 if 'x' in flags:
1305 if 'x' in flags:
1278 self.wvfs.setflags(filename, False, True)
1306 self.wvfs.setflags(filename, False, True)
1279 else:
1307 else:
1280 self.wvfs.setflags(filename, False, False)
1308 self.wvfs.setflags(filename, False, False)
1281 return len(data)
1309 return len(data)
1282
1310
1283 def wwritedata(self, filename, data):
1311 def wwritedata(self, filename, data):
1284 return self._filter(self._decodefilterpats, filename, data)
1312 return self._filter(self._decodefilterpats, filename, data)
1285
1313
1286 def currenttransaction(self):
1314 def currenttransaction(self):
1287 """return the current transaction or None if non exists"""
1315 """return the current transaction or None if non exists"""
1288 if self._transref:
1316 if self._transref:
1289 tr = self._transref()
1317 tr = self._transref()
1290 else:
1318 else:
1291 tr = None
1319 tr = None
1292
1320
1293 if tr and tr.running():
1321 if tr and tr.running():
1294 return tr
1322 return tr
1295 return None
1323 return None
1296
1324
1297 def transaction(self, desc, report=None):
1325 def transaction(self, desc, report=None):
1298 if (self.ui.configbool('devel', 'all-warnings')
1326 if (self.ui.configbool('devel', 'all-warnings')
1299 or self.ui.configbool('devel', 'check-locks')):
1327 or self.ui.configbool('devel', 'check-locks')):
1300 if self._currentlock(self._lockref) is None:
1328 if self._currentlock(self._lockref) is None:
1301 raise error.ProgrammingError('transaction requires locking')
1329 raise error.ProgrammingError('transaction requires locking')
1302 tr = self.currenttransaction()
1330 tr = self.currenttransaction()
1303 if tr is not None:
1331 if tr is not None:
1304 return tr.nest(name=desc)
1332 return tr.nest(name=desc)
1305
1333
1306 # abort here if the journal already exists
1334 # abort here if the journal already exists
1307 if self.svfs.exists("journal"):
1335 if self.svfs.exists("journal"):
1308 raise error.RepoError(
1336 raise error.RepoError(
1309 _("abandoned transaction found"),
1337 _("abandoned transaction found"),
1310 hint=_("run 'hg recover' to clean up transaction"))
1338 hint=_("run 'hg recover' to clean up transaction"))
1311
1339
1312 idbase = "%.40f#%f" % (random.random(), time.time())
1340 idbase = "%.40f#%f" % (random.random(), time.time())
1313 ha = hex(hashlib.sha1(idbase).digest())
1341 ha = hex(hashlib.sha1(idbase).digest())
1314 txnid = 'TXN:' + ha
1342 txnid = 'TXN:' + ha
1315 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1343 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1316
1344
1317 self._writejournal(desc)
1345 self._writejournal(desc)
1318 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1346 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1319 if report:
1347 if report:
1320 rp = report
1348 rp = report
1321 else:
1349 else:
1322 rp = self.ui.warn
1350 rp = self.ui.warn
1323 vfsmap = {'plain': self.vfs} # root of .hg/
1351 vfsmap = {'plain': self.vfs} # root of .hg/
1324 # we must avoid cyclic reference between repo and transaction.
1352 # we must avoid cyclic reference between repo and transaction.
1325 reporef = weakref.ref(self)
1353 reporef = weakref.ref(self)
1326 # Code to track tag movement
1354 # Code to track tag movement
1327 #
1355 #
1328 # Since tags are all handled as file content, it is actually quite hard
1356 # Since tags are all handled as file content, it is actually quite hard
1329 # to track these movement from a code perspective. So we fallback to a
1357 # to track these movement from a code perspective. So we fallback to a
1330 # tracking at the repository level. One could envision to track changes
1358 # tracking at the repository level. One could envision to track changes
1331 # to the '.hgtags' file through changegroup apply but that fails to
1359 # to the '.hgtags' file through changegroup apply but that fails to
1332 # cope with case where transaction expose new heads without changegroup
1360 # cope with case where transaction expose new heads without changegroup
1333 # being involved (eg: phase movement).
1361 # being involved (eg: phase movement).
1334 #
1362 #
1335 # For now, We gate the feature behind a flag since this likely comes
1363 # For now, We gate the feature behind a flag since this likely comes
1336 # with performance impacts. The current code run more often than needed
1364 # with performance impacts. The current code run more often than needed
1337 # and do not use caches as much as it could. The current focus is on
1365 # and do not use caches as much as it could. The current focus is on
1338 # the behavior of the feature so we disable it by default. The flag
1366 # the behavior of the feature so we disable it by default. The flag
1339 # will be removed when we are happy with the performance impact.
1367 # will be removed when we are happy with the performance impact.
1340 #
1368 #
1341 # Once this feature is no longer experimental move the following
1369 # Once this feature is no longer experimental move the following
1342 # documentation to the appropriate help section:
1370 # documentation to the appropriate help section:
1343 #
1371 #
1344 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1372 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1345 # tags (new or changed or deleted tags). In addition the details of
1373 # tags (new or changed or deleted tags). In addition the details of
1346 # these changes are made available in a file at:
1374 # these changes are made available in a file at:
1347 # ``REPOROOT/.hg/changes/tags.changes``.
1375 # ``REPOROOT/.hg/changes/tags.changes``.
1348 # Make sure you check for HG_TAG_MOVED before reading that file as it
1376 # Make sure you check for HG_TAG_MOVED before reading that file as it
1349 # might exist from a previous transaction even if no tag were touched
1377 # might exist from a previous transaction even if no tag were touched
1350 # in this one. Changes are recorded in a line base format::
1378 # in this one. Changes are recorded in a line base format::
1351 #
1379 #
1352 # <action> <hex-node> <tag-name>\n
1380 # <action> <hex-node> <tag-name>\n
1353 #
1381 #
1354 # Actions are defined as follow:
1382 # Actions are defined as follow:
1355 # "-R": tag is removed,
1383 # "-R": tag is removed,
1356 # "+A": tag is added,
1384 # "+A": tag is added,
1357 # "-M": tag is moved (old value),
1385 # "-M": tag is moved (old value),
1358 # "+M": tag is moved (new value),
1386 # "+M": tag is moved (new value),
1359 tracktags = lambda x: None
1387 tracktags = lambda x: None
1360 # experimental config: experimental.hook-track-tags
1388 # experimental config: experimental.hook-track-tags
1361 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1389 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1362 if desc != 'strip' and shouldtracktags:
1390 if desc != 'strip' and shouldtracktags:
1363 oldheads = self.changelog.headrevs()
1391 oldheads = self.changelog.headrevs()
1364 def tracktags(tr2):
1392 def tracktags(tr2):
1365 repo = reporef()
1393 repo = reporef()
1366 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1394 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1367 newheads = repo.changelog.headrevs()
1395 newheads = repo.changelog.headrevs()
1368 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1396 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1369 # notes: we compare lists here.
1397 # notes: we compare lists here.
1370 # As we do it only once buiding set would not be cheaper
1398 # As we do it only once buiding set would not be cheaper
1371 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1399 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1372 if changes:
1400 if changes:
1373 tr2.hookargs['tag_moved'] = '1'
1401 tr2.hookargs['tag_moved'] = '1'
1374 with repo.vfs('changes/tags.changes', 'w',
1402 with repo.vfs('changes/tags.changes', 'w',
1375 atomictemp=True) as changesfile:
1403 atomictemp=True) as changesfile:
1376 # note: we do not register the file to the transaction
1404 # note: we do not register the file to the transaction
1377 # because we needs it to still exist on the transaction
1405 # because we needs it to still exist on the transaction
1378 # is close (for txnclose hooks)
1406 # is close (for txnclose hooks)
1379 tagsmod.writediff(changesfile, changes)
1407 tagsmod.writediff(changesfile, changes)
1380 def validate(tr2):
1408 def validate(tr2):
1381 """will run pre-closing hooks"""
1409 """will run pre-closing hooks"""
1382 # XXX the transaction API is a bit lacking here so we take a hacky
1410 # XXX the transaction API is a bit lacking here so we take a hacky
1383 # path for now
1411 # path for now
1384 #
1412 #
1385 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1413 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1386 # dict is copied before these run. In addition we needs the data
1414 # dict is copied before these run. In addition we needs the data
1387 # available to in memory hooks too.
1415 # available to in memory hooks too.
1388 #
1416 #
1389 # Moreover, we also need to make sure this runs before txnclose
1417 # Moreover, we also need to make sure this runs before txnclose
1390 # hooks and there is no "pending" mechanism that would execute
1418 # hooks and there is no "pending" mechanism that would execute
1391 # logic only if hooks are about to run.
1419 # logic only if hooks are about to run.
1392 #
1420 #
1393 # Fixing this limitation of the transaction is also needed to track
1421 # Fixing this limitation of the transaction is also needed to track
1394 # other families of changes (bookmarks, phases, obsolescence).
1422 # other families of changes (bookmarks, phases, obsolescence).
1395 #
1423 #
1396 # This will have to be fixed before we remove the experimental
1424 # This will have to be fixed before we remove the experimental
1397 # gating.
1425 # gating.
1398 tracktags(tr2)
1426 tracktags(tr2)
1399 repo = reporef()
1427 repo = reporef()
1400 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1428 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1401 scmutil.enforcesinglehead(repo, tr2, desc)
1429 scmutil.enforcesinglehead(repo, tr2, desc)
1402 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1430 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1403 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1431 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1404 args = tr.hookargs.copy()
1432 args = tr.hookargs.copy()
1405 args.update(bookmarks.preparehookargs(name, old, new))
1433 args.update(bookmarks.preparehookargs(name, old, new))
1406 repo.hook('pretxnclose-bookmark', throw=True,
1434 repo.hook('pretxnclose-bookmark', throw=True,
1407 txnname=desc,
1435 txnname=desc,
1408 **pycompat.strkwargs(args))
1436 **pycompat.strkwargs(args))
1409 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1437 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1410 cl = repo.unfiltered().changelog
1438 cl = repo.unfiltered().changelog
1411 for rev, (old, new) in tr.changes['phases'].items():
1439 for rev, (old, new) in tr.changes['phases'].items():
1412 args = tr.hookargs.copy()
1440 args = tr.hookargs.copy()
1413 node = hex(cl.node(rev))
1441 node = hex(cl.node(rev))
1414 args.update(phases.preparehookargs(node, old, new))
1442 args.update(phases.preparehookargs(node, old, new))
1415 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1443 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1416 **pycompat.strkwargs(args))
1444 **pycompat.strkwargs(args))
1417
1445
1418 repo.hook('pretxnclose', throw=True,
1446 repo.hook('pretxnclose', throw=True,
1419 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1447 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1420 def releasefn(tr, success):
1448 def releasefn(tr, success):
1421 repo = reporef()
1449 repo = reporef()
1422 if success:
1450 if success:
1423 # this should be explicitly invoked here, because
1451 # this should be explicitly invoked here, because
1424 # in-memory changes aren't written out at closing
1452 # in-memory changes aren't written out at closing
1425 # transaction, if tr.addfilegenerator (via
1453 # transaction, if tr.addfilegenerator (via
1426 # dirstate.write or so) isn't invoked while
1454 # dirstate.write or so) isn't invoked while
1427 # transaction running
1455 # transaction running
1428 repo.dirstate.write(None)
1456 repo.dirstate.write(None)
1429 else:
1457 else:
1430 # discard all changes (including ones already written
1458 # discard all changes (including ones already written
1431 # out) in this transaction
1459 # out) in this transaction
1432 narrowspec.restorebackup(self, 'journal.narrowspec')
1460 narrowspec.restorebackup(self, 'journal.narrowspec')
1433 repo.dirstate.restorebackup(None, 'journal.dirstate')
1461 repo.dirstate.restorebackup(None, 'journal.dirstate')
1434
1462
1435 repo.invalidate(clearfilecache=True)
1463 repo.invalidate(clearfilecache=True)
1436
1464
1437 tr = transaction.transaction(rp, self.svfs, vfsmap,
1465 tr = transaction.transaction(rp, self.svfs, vfsmap,
1438 "journal",
1466 "journal",
1439 "undo",
1467 "undo",
1440 aftertrans(renames),
1468 aftertrans(renames),
1441 self.store.createmode,
1469 self.store.createmode,
1442 validator=validate,
1470 validator=validate,
1443 releasefn=releasefn,
1471 releasefn=releasefn,
1444 checkambigfiles=_cachedfiles,
1472 checkambigfiles=_cachedfiles,
1445 name=desc)
1473 name=desc)
1446 tr.changes['origrepolen'] = len(self)
1474 tr.changes['origrepolen'] = len(self)
1447 tr.changes['obsmarkers'] = set()
1475 tr.changes['obsmarkers'] = set()
1448 tr.changes['phases'] = {}
1476 tr.changes['phases'] = {}
1449 tr.changes['bookmarks'] = {}
1477 tr.changes['bookmarks'] = {}
1450
1478
1451 tr.hookargs['txnid'] = txnid
1479 tr.hookargs['txnid'] = txnid
1452 # note: writing the fncache only during finalize mean that the file is
1480 # note: writing the fncache only during finalize mean that the file is
1453 # outdated when running hooks. As fncache is used for streaming clone,
1481 # outdated when running hooks. As fncache is used for streaming clone,
1454 # this is not expected to break anything that happen during the hooks.
1482 # this is not expected to break anything that happen during the hooks.
1455 tr.addfinalize('flush-fncache', self.store.write)
1483 tr.addfinalize('flush-fncache', self.store.write)
1456 def txnclosehook(tr2):
1484 def txnclosehook(tr2):
1457 """To be run if transaction is successful, will schedule a hook run
1485 """To be run if transaction is successful, will schedule a hook run
1458 """
1486 """
1459 # Don't reference tr2 in hook() so we don't hold a reference.
1487 # Don't reference tr2 in hook() so we don't hold a reference.
1460 # This reduces memory consumption when there are multiple
1488 # This reduces memory consumption when there are multiple
1461 # transactions per lock. This can likely go away if issue5045
1489 # transactions per lock. This can likely go away if issue5045
1462 # fixes the function accumulation.
1490 # fixes the function accumulation.
1463 hookargs = tr2.hookargs
1491 hookargs = tr2.hookargs
1464
1492
1465 def hookfunc():
1493 def hookfunc():
1466 repo = reporef()
1494 repo = reporef()
1467 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1495 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1468 bmchanges = sorted(tr.changes['bookmarks'].items())
1496 bmchanges = sorted(tr.changes['bookmarks'].items())
1469 for name, (old, new) in bmchanges:
1497 for name, (old, new) in bmchanges:
1470 args = tr.hookargs.copy()
1498 args = tr.hookargs.copy()
1471 args.update(bookmarks.preparehookargs(name, old, new))
1499 args.update(bookmarks.preparehookargs(name, old, new))
1472 repo.hook('txnclose-bookmark', throw=False,
1500 repo.hook('txnclose-bookmark', throw=False,
1473 txnname=desc, **pycompat.strkwargs(args))
1501 txnname=desc, **pycompat.strkwargs(args))
1474
1502
1475 if hook.hashook(repo.ui, 'txnclose-phase'):
1503 if hook.hashook(repo.ui, 'txnclose-phase'):
1476 cl = repo.unfiltered().changelog
1504 cl = repo.unfiltered().changelog
1477 phasemv = sorted(tr.changes['phases'].items())
1505 phasemv = sorted(tr.changes['phases'].items())
1478 for rev, (old, new) in phasemv:
1506 for rev, (old, new) in phasemv:
1479 args = tr.hookargs.copy()
1507 args = tr.hookargs.copy()
1480 node = hex(cl.node(rev))
1508 node = hex(cl.node(rev))
1481 args.update(phases.preparehookargs(node, old, new))
1509 args.update(phases.preparehookargs(node, old, new))
1482 repo.hook('txnclose-phase', throw=False, txnname=desc,
1510 repo.hook('txnclose-phase', throw=False, txnname=desc,
1483 **pycompat.strkwargs(args))
1511 **pycompat.strkwargs(args))
1484
1512
1485 repo.hook('txnclose', throw=False, txnname=desc,
1513 repo.hook('txnclose', throw=False, txnname=desc,
1486 **pycompat.strkwargs(hookargs))
1514 **pycompat.strkwargs(hookargs))
1487 reporef()._afterlock(hookfunc)
1515 reporef()._afterlock(hookfunc)
1488 tr.addfinalize('txnclose-hook', txnclosehook)
1516 tr.addfinalize('txnclose-hook', txnclosehook)
1489 # Include a leading "-" to make it happen before the transaction summary
1517 # Include a leading "-" to make it happen before the transaction summary
1490 # reports registered via scmutil.registersummarycallback() whose names
1518 # reports registered via scmutil.registersummarycallback() whose names
1491 # are 00-txnreport etc. That way, the caches will be warm when the
1519 # are 00-txnreport etc. That way, the caches will be warm when the
1492 # callbacks run.
1520 # callbacks run.
1493 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1521 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1494 def txnaborthook(tr2):
1522 def txnaborthook(tr2):
1495 """To be run if transaction is aborted
1523 """To be run if transaction is aborted
1496 """
1524 """
1497 reporef().hook('txnabort', throw=False, txnname=desc,
1525 reporef().hook('txnabort', throw=False, txnname=desc,
1498 **pycompat.strkwargs(tr2.hookargs))
1526 **pycompat.strkwargs(tr2.hookargs))
1499 tr.addabort('txnabort-hook', txnaborthook)
1527 tr.addabort('txnabort-hook', txnaborthook)
1500 # avoid eager cache invalidation. in-memory data should be identical
1528 # avoid eager cache invalidation. in-memory data should be identical
1501 # to stored data if transaction has no error.
1529 # to stored data if transaction has no error.
1502 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1530 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1503 self._transref = weakref.ref(tr)
1531 self._transref = weakref.ref(tr)
1504 scmutil.registersummarycallback(self, tr, desc)
1532 scmutil.registersummarycallback(self, tr, desc)
1505 return tr
1533 return tr
1506
1534
1507 def _journalfiles(self):
1535 def _journalfiles(self):
1508 return ((self.svfs, 'journal'),
1536 return ((self.svfs, 'journal'),
1509 (self.vfs, 'journal.dirstate'),
1537 (self.vfs, 'journal.dirstate'),
1510 (self.vfs, 'journal.branch'),
1538 (self.vfs, 'journal.branch'),
1511 (self.vfs, 'journal.desc'),
1539 (self.vfs, 'journal.desc'),
1512 (self.vfs, 'journal.bookmarks'),
1540 (self.vfs, 'journal.bookmarks'),
1513 (self.svfs, 'journal.phaseroots'))
1541 (self.svfs, 'journal.phaseroots'))
1514
1542
1515 def undofiles(self):
1543 def undofiles(self):
1516 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1544 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1517
1545
1518 @unfilteredmethod
1546 @unfilteredmethod
1519 def _writejournal(self, desc):
1547 def _writejournal(self, desc):
1520 self.dirstate.savebackup(None, 'journal.dirstate')
1548 self.dirstate.savebackup(None, 'journal.dirstate')
1521 narrowspec.savebackup(self, 'journal.narrowspec')
1549 narrowspec.savebackup(self, 'journal.narrowspec')
1522 self.vfs.write("journal.branch",
1550 self.vfs.write("journal.branch",
1523 encoding.fromlocal(self.dirstate.branch()))
1551 encoding.fromlocal(self.dirstate.branch()))
1524 self.vfs.write("journal.desc",
1552 self.vfs.write("journal.desc",
1525 "%d\n%s\n" % (len(self), desc))
1553 "%d\n%s\n" % (len(self), desc))
1526 self.vfs.write("journal.bookmarks",
1554 self.vfs.write("journal.bookmarks",
1527 self.vfs.tryread("bookmarks"))
1555 self.vfs.tryread("bookmarks"))
1528 self.svfs.write("journal.phaseroots",
1556 self.svfs.write("journal.phaseroots",
1529 self.svfs.tryread("phaseroots"))
1557 self.svfs.tryread("phaseroots"))
1530
1558
1531 def recover(self):
1559 def recover(self):
1532 with self.lock():
1560 with self.lock():
1533 if self.svfs.exists("journal"):
1561 if self.svfs.exists("journal"):
1534 self.ui.status(_("rolling back interrupted transaction\n"))
1562 self.ui.status(_("rolling back interrupted transaction\n"))
1535 vfsmap = {'': self.svfs,
1563 vfsmap = {'': self.svfs,
1536 'plain': self.vfs,}
1564 'plain': self.vfs,}
1537 transaction.rollback(self.svfs, vfsmap, "journal",
1565 transaction.rollback(self.svfs, vfsmap, "journal",
1538 self.ui.warn,
1566 self.ui.warn,
1539 checkambigfiles=_cachedfiles)
1567 checkambigfiles=_cachedfiles)
1540 self.invalidate()
1568 self.invalidate()
1541 return True
1569 return True
1542 else:
1570 else:
1543 self.ui.warn(_("no interrupted transaction available\n"))
1571 self.ui.warn(_("no interrupted transaction available\n"))
1544 return False
1572 return False
1545
1573
1546 def rollback(self, dryrun=False, force=False):
1574 def rollback(self, dryrun=False, force=False):
1547 wlock = lock = dsguard = None
1575 wlock = lock = dsguard = None
1548 try:
1576 try:
1549 wlock = self.wlock()
1577 wlock = self.wlock()
1550 lock = self.lock()
1578 lock = self.lock()
1551 if self.svfs.exists("undo"):
1579 if self.svfs.exists("undo"):
1552 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1580 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1553
1581
1554 return self._rollback(dryrun, force, dsguard)
1582 return self._rollback(dryrun, force, dsguard)
1555 else:
1583 else:
1556 self.ui.warn(_("no rollback information available\n"))
1584 self.ui.warn(_("no rollback information available\n"))
1557 return 1
1585 return 1
1558 finally:
1586 finally:
1559 release(dsguard, lock, wlock)
1587 release(dsguard, lock, wlock)
1560
1588
1561 @unfilteredmethod # Until we get smarter cache management
1589 @unfilteredmethod # Until we get smarter cache management
1562 def _rollback(self, dryrun, force, dsguard):
1590 def _rollback(self, dryrun, force, dsguard):
1563 ui = self.ui
1591 ui = self.ui
1564 try:
1592 try:
1565 args = self.vfs.read('undo.desc').splitlines()
1593 args = self.vfs.read('undo.desc').splitlines()
1566 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1594 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1567 if len(args) >= 3:
1595 if len(args) >= 3:
1568 detail = args[2]
1596 detail = args[2]
1569 oldtip = oldlen - 1
1597 oldtip = oldlen - 1
1570
1598
1571 if detail and ui.verbose:
1599 if detail and ui.verbose:
1572 msg = (_('repository tip rolled back to revision %d'
1600 msg = (_('repository tip rolled back to revision %d'
1573 ' (undo %s: %s)\n')
1601 ' (undo %s: %s)\n')
1574 % (oldtip, desc, detail))
1602 % (oldtip, desc, detail))
1575 else:
1603 else:
1576 msg = (_('repository tip rolled back to revision %d'
1604 msg = (_('repository tip rolled back to revision %d'
1577 ' (undo %s)\n')
1605 ' (undo %s)\n')
1578 % (oldtip, desc))
1606 % (oldtip, desc))
1579 except IOError:
1607 except IOError:
1580 msg = _('rolling back unknown transaction\n')
1608 msg = _('rolling back unknown transaction\n')
1581 desc = None
1609 desc = None
1582
1610
1583 if not force and self['.'] != self['tip'] and desc == 'commit':
1611 if not force and self['.'] != self['tip'] and desc == 'commit':
1584 raise error.Abort(
1612 raise error.Abort(
1585 _('rollback of last commit while not checked out '
1613 _('rollback of last commit while not checked out '
1586 'may lose data'), hint=_('use -f to force'))
1614 'may lose data'), hint=_('use -f to force'))
1587
1615
1588 ui.status(msg)
1616 ui.status(msg)
1589 if dryrun:
1617 if dryrun:
1590 return 0
1618 return 0
1591
1619
1592 parents = self.dirstate.parents()
1620 parents = self.dirstate.parents()
1593 self.destroying()
1621 self.destroying()
1594 vfsmap = {'plain': self.vfs, '': self.svfs}
1622 vfsmap = {'plain': self.vfs, '': self.svfs}
1595 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1623 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1596 checkambigfiles=_cachedfiles)
1624 checkambigfiles=_cachedfiles)
1597 if self.vfs.exists('undo.bookmarks'):
1625 if self.vfs.exists('undo.bookmarks'):
1598 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1626 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1599 if self.svfs.exists('undo.phaseroots'):
1627 if self.svfs.exists('undo.phaseroots'):
1600 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1628 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1601 self.invalidate()
1629 self.invalidate()
1602
1630
1603 parentgone = (parents[0] not in self.changelog.nodemap or
1631 parentgone = (parents[0] not in self.changelog.nodemap or
1604 parents[1] not in self.changelog.nodemap)
1632 parents[1] not in self.changelog.nodemap)
1605 if parentgone:
1633 if parentgone:
1606 # prevent dirstateguard from overwriting already restored one
1634 # prevent dirstateguard from overwriting already restored one
1607 dsguard.close()
1635 dsguard.close()
1608
1636
1609 narrowspec.restorebackup(self, 'undo.narrowspec')
1637 narrowspec.restorebackup(self, 'undo.narrowspec')
1610 self.dirstate.restorebackup(None, 'undo.dirstate')
1638 self.dirstate.restorebackup(None, 'undo.dirstate')
1611 try:
1639 try:
1612 branch = self.vfs.read('undo.branch')
1640 branch = self.vfs.read('undo.branch')
1613 self.dirstate.setbranch(encoding.tolocal(branch))
1641 self.dirstate.setbranch(encoding.tolocal(branch))
1614 except IOError:
1642 except IOError:
1615 ui.warn(_('named branch could not be reset: '
1643 ui.warn(_('named branch could not be reset: '
1616 'current branch is still \'%s\'\n')
1644 'current branch is still \'%s\'\n')
1617 % self.dirstate.branch())
1645 % self.dirstate.branch())
1618
1646
1619 parents = tuple([p.rev() for p in self[None].parents()])
1647 parents = tuple([p.rev() for p in self[None].parents()])
1620 if len(parents) > 1:
1648 if len(parents) > 1:
1621 ui.status(_('working directory now based on '
1649 ui.status(_('working directory now based on '
1622 'revisions %d and %d\n') % parents)
1650 'revisions %d and %d\n') % parents)
1623 else:
1651 else:
1624 ui.status(_('working directory now based on '
1652 ui.status(_('working directory now based on '
1625 'revision %d\n') % parents)
1653 'revision %d\n') % parents)
1626 mergemod.mergestate.clean(self, self['.'].node())
1654 mergemod.mergestate.clean(self, self['.'].node())
1627
1655
1628 # TODO: if we know which new heads may result from this rollback, pass
1656 # TODO: if we know which new heads may result from this rollback, pass
1629 # them to destroy(), which will prevent the branchhead cache from being
1657 # them to destroy(), which will prevent the branchhead cache from being
1630 # invalidated.
1658 # invalidated.
1631 self.destroyed()
1659 self.destroyed()
1632 return 0
1660 return 0
1633
1661
1634 def _buildcacheupdater(self, newtransaction):
1662 def _buildcacheupdater(self, newtransaction):
1635 """called during transaction to build the callback updating cache
1663 """called during transaction to build the callback updating cache
1636
1664
1637 Lives on the repository to help extension who might want to augment
1665 Lives on the repository to help extension who might want to augment
1638 this logic. For this purpose, the created transaction is passed to the
1666 this logic. For this purpose, the created transaction is passed to the
1639 method.
1667 method.
1640 """
1668 """
1641 # we must avoid cyclic reference between repo and transaction.
1669 # we must avoid cyclic reference between repo and transaction.
1642 reporef = weakref.ref(self)
1670 reporef = weakref.ref(self)
1643 def updater(tr):
1671 def updater(tr):
1644 repo = reporef()
1672 repo = reporef()
1645 repo.updatecaches(tr)
1673 repo.updatecaches(tr)
1646 return updater
1674 return updater
1647
1675
1648 @unfilteredmethod
1676 @unfilteredmethod
1649 def updatecaches(self, tr=None, full=False):
1677 def updatecaches(self, tr=None, full=False):
1650 """warm appropriate caches
1678 """warm appropriate caches
1651
1679
1652 If this function is called after a transaction closed. The transaction
1680 If this function is called after a transaction closed. The transaction
1653 will be available in the 'tr' argument. This can be used to selectively
1681 will be available in the 'tr' argument. This can be used to selectively
1654 update caches relevant to the changes in that transaction.
1682 update caches relevant to the changes in that transaction.
1655
1683
1656 If 'full' is set, make sure all caches the function knows about have
1684 If 'full' is set, make sure all caches the function knows about have
1657 up-to-date data. Even the ones usually loaded more lazily.
1685 up-to-date data. Even the ones usually loaded more lazily.
1658 """
1686 """
1659 if tr is not None and tr.hookargs.get('source') == 'strip':
1687 if tr is not None and tr.hookargs.get('source') == 'strip':
1660 # During strip, many caches are invalid but
1688 # During strip, many caches are invalid but
1661 # later call to `destroyed` will refresh them.
1689 # later call to `destroyed` will refresh them.
1662 return
1690 return
1663
1691
1664 if tr is None or tr.changes['origrepolen'] < len(self):
1692 if tr is None or tr.changes['origrepolen'] < len(self):
1665 # updating the unfiltered branchmap should refresh all the others,
1693 # updating the unfiltered branchmap should refresh all the others,
1666 self.ui.debug('updating the branch cache\n')
1694 self.ui.debug('updating the branch cache\n')
1667 branchmap.updatecache(self.filtered('served'))
1695 branchmap.updatecache(self.filtered('served'))
1668
1696
1669 if full:
1697 if full:
1670 rbc = self.revbranchcache()
1698 rbc = self.revbranchcache()
1671 for r in self.changelog:
1699 for r in self.changelog:
1672 rbc.branchinfo(r)
1700 rbc.branchinfo(r)
1673 rbc.write()
1701 rbc.write()
1674
1702
1675 # ensure the working copy parents are in the manifestfulltextcache
1703 # ensure the working copy parents are in the manifestfulltextcache
1676 for ctx in self['.'].parents():
1704 for ctx in self['.'].parents():
1677 ctx.manifest() # accessing the manifest is enough
1705 ctx.manifest() # accessing the manifest is enough
1678
1706
1679 def invalidatecaches(self):
1707 def invalidatecaches(self):
1680
1708
1681 if '_tagscache' in vars(self):
1709 if '_tagscache' in vars(self):
1682 # can't use delattr on proxy
1710 # can't use delattr on proxy
1683 del self.__dict__['_tagscache']
1711 del self.__dict__['_tagscache']
1684
1712
1685 self.unfiltered()._branchcaches.clear()
1713 self.unfiltered()._branchcaches.clear()
1686 self.invalidatevolatilesets()
1714 self.invalidatevolatilesets()
1687 self._sparsesignaturecache.clear()
1715 self._sparsesignaturecache.clear()
1688
1716
1689 def invalidatevolatilesets(self):
1717 def invalidatevolatilesets(self):
1690 self.filteredrevcache.clear()
1718 self.filteredrevcache.clear()
1691 obsolete.clearobscaches(self)
1719 obsolete.clearobscaches(self)
1692
1720
1693 def invalidatedirstate(self):
1721 def invalidatedirstate(self):
1694 '''Invalidates the dirstate, causing the next call to dirstate
1722 '''Invalidates the dirstate, causing the next call to dirstate
1695 to check if it was modified since the last time it was read,
1723 to check if it was modified since the last time it was read,
1696 rereading it if it has.
1724 rereading it if it has.
1697
1725
1698 This is different to dirstate.invalidate() that it doesn't always
1726 This is different to dirstate.invalidate() that it doesn't always
1699 rereads the dirstate. Use dirstate.invalidate() if you want to
1727 rereads the dirstate. Use dirstate.invalidate() if you want to
1700 explicitly read the dirstate again (i.e. restoring it to a previous
1728 explicitly read the dirstate again (i.e. restoring it to a previous
1701 known good state).'''
1729 known good state).'''
1702 if hasunfilteredcache(self, 'dirstate'):
1730 if hasunfilteredcache(self, 'dirstate'):
1703 for k in self.dirstate._filecache:
1731 for k in self.dirstate._filecache:
1704 try:
1732 try:
1705 delattr(self.dirstate, k)
1733 delattr(self.dirstate, k)
1706 except AttributeError:
1734 except AttributeError:
1707 pass
1735 pass
1708 delattr(self.unfiltered(), 'dirstate')
1736 delattr(self.unfiltered(), 'dirstate')
1709
1737
1710 def invalidate(self, clearfilecache=False):
1738 def invalidate(self, clearfilecache=False):
1711 '''Invalidates both store and non-store parts other than dirstate
1739 '''Invalidates both store and non-store parts other than dirstate
1712
1740
1713 If a transaction is running, invalidation of store is omitted,
1741 If a transaction is running, invalidation of store is omitted,
1714 because discarding in-memory changes might cause inconsistency
1742 because discarding in-memory changes might cause inconsistency
1715 (e.g. incomplete fncache causes unintentional failure, but
1743 (e.g. incomplete fncache causes unintentional failure, but
1716 redundant one doesn't).
1744 redundant one doesn't).
1717 '''
1745 '''
1718 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1746 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1719 for k in list(self._filecache.keys()):
1747 for k in list(self._filecache.keys()):
1720 # dirstate is invalidated separately in invalidatedirstate()
1748 # dirstate is invalidated separately in invalidatedirstate()
1721 if k == 'dirstate':
1749 if k == 'dirstate':
1722 continue
1750 continue
1723 if (k == 'changelog' and
1751 if (k == 'changelog' and
1724 self.currenttransaction() and
1752 self.currenttransaction() and
1725 self.changelog._delayed):
1753 self.changelog._delayed):
1726 # The changelog object may store unwritten revisions. We don't
1754 # The changelog object may store unwritten revisions. We don't
1727 # want to lose them.
1755 # want to lose them.
1728 # TODO: Solve the problem instead of working around it.
1756 # TODO: Solve the problem instead of working around it.
1729 continue
1757 continue
1730
1758
1731 if clearfilecache:
1759 if clearfilecache:
1732 del self._filecache[k]
1760 del self._filecache[k]
1733 try:
1761 try:
1734 delattr(unfiltered, k)
1762 delattr(unfiltered, k)
1735 except AttributeError:
1763 except AttributeError:
1736 pass
1764 pass
1737 self.invalidatecaches()
1765 self.invalidatecaches()
1738 if not self.currenttransaction():
1766 if not self.currenttransaction():
1739 # TODO: Changing contents of store outside transaction
1767 # TODO: Changing contents of store outside transaction
1740 # causes inconsistency. We should make in-memory store
1768 # causes inconsistency. We should make in-memory store
1741 # changes detectable, and abort if changed.
1769 # changes detectable, and abort if changed.
1742 self.store.invalidatecaches()
1770 self.store.invalidatecaches()
1743
1771
1744 def invalidateall(self):
1772 def invalidateall(self):
1745 '''Fully invalidates both store and non-store parts, causing the
1773 '''Fully invalidates both store and non-store parts, causing the
1746 subsequent operation to reread any outside changes.'''
1774 subsequent operation to reread any outside changes.'''
1747 # extension should hook this to invalidate its caches
1775 # extension should hook this to invalidate its caches
1748 self.invalidate()
1776 self.invalidate()
1749 self.invalidatedirstate()
1777 self.invalidatedirstate()
1750
1778
1751 @unfilteredmethod
1779 @unfilteredmethod
1752 def _refreshfilecachestats(self, tr):
1780 def _refreshfilecachestats(self, tr):
1753 """Reload stats of cached files so that they are flagged as valid"""
1781 """Reload stats of cached files so that they are flagged as valid"""
1754 for k, ce in self._filecache.items():
1782 for k, ce in self._filecache.items():
1755 k = pycompat.sysstr(k)
1783 k = pycompat.sysstr(k)
1756 if k == r'dirstate' or k not in self.__dict__:
1784 if k == r'dirstate' or k not in self.__dict__:
1757 continue
1785 continue
1758 ce.refresh()
1786 ce.refresh()
1759
1787
1760 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1788 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1761 inheritchecker=None, parentenvvar=None):
1789 inheritchecker=None, parentenvvar=None):
1762 parentlock = None
1790 parentlock = None
1763 # the contents of parentenvvar are used by the underlying lock to
1791 # the contents of parentenvvar are used by the underlying lock to
1764 # determine whether it can be inherited
1792 # determine whether it can be inherited
1765 if parentenvvar is not None:
1793 if parentenvvar is not None:
1766 parentlock = encoding.environ.get(parentenvvar)
1794 parentlock = encoding.environ.get(parentenvvar)
1767
1795
1768 timeout = 0
1796 timeout = 0
1769 warntimeout = 0
1797 warntimeout = 0
1770 if wait:
1798 if wait:
1771 timeout = self.ui.configint("ui", "timeout")
1799 timeout = self.ui.configint("ui", "timeout")
1772 warntimeout = self.ui.configint("ui", "timeout.warn")
1800 warntimeout = self.ui.configint("ui", "timeout.warn")
1773 # internal config: ui.signal-safe-lock
1801 # internal config: ui.signal-safe-lock
1774 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1802 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1775
1803
1776 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1804 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1777 releasefn=releasefn,
1805 releasefn=releasefn,
1778 acquirefn=acquirefn, desc=desc,
1806 acquirefn=acquirefn, desc=desc,
1779 inheritchecker=inheritchecker,
1807 inheritchecker=inheritchecker,
1780 parentlock=parentlock,
1808 parentlock=parentlock,
1781 signalsafe=signalsafe)
1809 signalsafe=signalsafe)
1782 return l
1810 return l
1783
1811
1784 def _afterlock(self, callback):
1812 def _afterlock(self, callback):
1785 """add a callback to be run when the repository is fully unlocked
1813 """add a callback to be run when the repository is fully unlocked
1786
1814
1787 The callback will be executed when the outermost lock is released
1815 The callback will be executed when the outermost lock is released
1788 (with wlock being higher level than 'lock')."""
1816 (with wlock being higher level than 'lock')."""
1789 for ref in (self._wlockref, self._lockref):
1817 for ref in (self._wlockref, self._lockref):
1790 l = ref and ref()
1818 l = ref and ref()
1791 if l and l.held:
1819 if l and l.held:
1792 l.postrelease.append(callback)
1820 l.postrelease.append(callback)
1793 break
1821 break
1794 else: # no lock have been found.
1822 else: # no lock have been found.
1795 callback()
1823 callback()
1796
1824
1797 def lock(self, wait=True):
1825 def lock(self, wait=True):
1798 '''Lock the repository store (.hg/store) and return a weak reference
1826 '''Lock the repository store (.hg/store) and return a weak reference
1799 to the lock. Use this before modifying the store (e.g. committing or
1827 to the lock. Use this before modifying the store (e.g. committing or
1800 stripping). If you are opening a transaction, get a lock as well.)
1828 stripping). If you are opening a transaction, get a lock as well.)
1801
1829
1802 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1830 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1803 'wlock' first to avoid a dead-lock hazard.'''
1831 'wlock' first to avoid a dead-lock hazard.'''
1804 l = self._currentlock(self._lockref)
1832 l = self._currentlock(self._lockref)
1805 if l is not None:
1833 if l is not None:
1806 l.lock()
1834 l.lock()
1807 return l
1835 return l
1808
1836
1809 l = self._lock(self.svfs, "lock", wait, None,
1837 l = self._lock(self.svfs, "lock", wait, None,
1810 self.invalidate, _('repository %s') % self.origroot)
1838 self.invalidate, _('repository %s') % self.origroot)
1811 self._lockref = weakref.ref(l)
1839 self._lockref = weakref.ref(l)
1812 return l
1840 return l
1813
1841
1814 def _wlockchecktransaction(self):
1842 def _wlockchecktransaction(self):
1815 if self.currenttransaction() is not None:
1843 if self.currenttransaction() is not None:
1816 raise error.LockInheritanceContractViolation(
1844 raise error.LockInheritanceContractViolation(
1817 'wlock cannot be inherited in the middle of a transaction')
1845 'wlock cannot be inherited in the middle of a transaction')
1818
1846
1819 def wlock(self, wait=True):
1847 def wlock(self, wait=True):
1820 '''Lock the non-store parts of the repository (everything under
1848 '''Lock the non-store parts of the repository (everything under
1821 .hg except .hg/store) and return a weak reference to the lock.
1849 .hg except .hg/store) and return a weak reference to the lock.
1822
1850
1823 Use this before modifying files in .hg.
1851 Use this before modifying files in .hg.
1824
1852
1825 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1853 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1826 'wlock' first to avoid a dead-lock hazard.'''
1854 'wlock' first to avoid a dead-lock hazard.'''
1827 l = self._wlockref and self._wlockref()
1855 l = self._wlockref and self._wlockref()
1828 if l is not None and l.held:
1856 if l is not None and l.held:
1829 l.lock()
1857 l.lock()
1830 return l
1858 return l
1831
1859
1832 # We do not need to check for non-waiting lock acquisition. Such
1860 # We do not need to check for non-waiting lock acquisition. Such
1833 # acquisition would not cause dead-lock as they would just fail.
1861 # acquisition would not cause dead-lock as they would just fail.
1834 if wait and (self.ui.configbool('devel', 'all-warnings')
1862 if wait and (self.ui.configbool('devel', 'all-warnings')
1835 or self.ui.configbool('devel', 'check-locks')):
1863 or self.ui.configbool('devel', 'check-locks')):
1836 if self._currentlock(self._lockref) is not None:
1864 if self._currentlock(self._lockref) is not None:
1837 self.ui.develwarn('"wlock" acquired after "lock"')
1865 self.ui.develwarn('"wlock" acquired after "lock"')
1838
1866
1839 def unlock():
1867 def unlock():
1840 if self.dirstate.pendingparentchange():
1868 if self.dirstate.pendingparentchange():
1841 self.dirstate.invalidate()
1869 self.dirstate.invalidate()
1842 else:
1870 else:
1843 self.dirstate.write(None)
1871 self.dirstate.write(None)
1844
1872
1845 self._filecache['dirstate'].refresh()
1873 self._filecache['dirstate'].refresh()
1846
1874
1847 l = self._lock(self.vfs, "wlock", wait, unlock,
1875 l = self._lock(self.vfs, "wlock", wait, unlock,
1848 self.invalidatedirstate, _('working directory of %s') %
1876 self.invalidatedirstate, _('working directory of %s') %
1849 self.origroot,
1877 self.origroot,
1850 inheritchecker=self._wlockchecktransaction,
1878 inheritchecker=self._wlockchecktransaction,
1851 parentenvvar='HG_WLOCK_LOCKER')
1879 parentenvvar='HG_WLOCK_LOCKER')
1852 self._wlockref = weakref.ref(l)
1880 self._wlockref = weakref.ref(l)
1853 return l
1881 return l
1854
1882
1855 def _currentlock(self, lockref):
1883 def _currentlock(self, lockref):
1856 """Returns the lock if it's held, or None if it's not."""
1884 """Returns the lock if it's held, or None if it's not."""
1857 if lockref is None:
1885 if lockref is None:
1858 return None
1886 return None
1859 l = lockref()
1887 l = lockref()
1860 if l is None or not l.held:
1888 if l is None or not l.held:
1861 return None
1889 return None
1862 return l
1890 return l
1863
1891
1864 def currentwlock(self):
1892 def currentwlock(self):
1865 """Returns the wlock if it's held, or None if it's not."""
1893 """Returns the wlock if it's held, or None if it's not."""
1866 return self._currentlock(self._wlockref)
1894 return self._currentlock(self._wlockref)
1867
1895
1868 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1896 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1869 """
1897 """
1870 commit an individual file as part of a larger transaction
1898 commit an individual file as part of a larger transaction
1871 """
1899 """
1872
1900
1873 fname = fctx.path()
1901 fname = fctx.path()
1874 fparent1 = manifest1.get(fname, nullid)
1902 fparent1 = manifest1.get(fname, nullid)
1875 fparent2 = manifest2.get(fname, nullid)
1903 fparent2 = manifest2.get(fname, nullid)
1876 if isinstance(fctx, context.filectx):
1904 if isinstance(fctx, context.filectx):
1877 node = fctx.filenode()
1905 node = fctx.filenode()
1878 if node in [fparent1, fparent2]:
1906 if node in [fparent1, fparent2]:
1879 self.ui.debug('reusing %s filelog entry\n' % fname)
1907 self.ui.debug('reusing %s filelog entry\n' % fname)
1880 if manifest1.flags(fname) != fctx.flags():
1908 if manifest1.flags(fname) != fctx.flags():
1881 changelist.append(fname)
1909 changelist.append(fname)
1882 return node
1910 return node
1883
1911
1884 flog = self.file(fname)
1912 flog = self.file(fname)
1885 meta = {}
1913 meta = {}
1886 copy = fctx.renamed()
1914 copy = fctx.renamed()
1887 if copy and copy[0] != fname:
1915 if copy and copy[0] != fname:
1888 # Mark the new revision of this file as a copy of another
1916 # Mark the new revision of this file as a copy of another
1889 # file. This copy data will effectively act as a parent
1917 # file. This copy data will effectively act as a parent
1890 # of this new revision. If this is a merge, the first
1918 # of this new revision. If this is a merge, the first
1891 # parent will be the nullid (meaning "look up the copy data")
1919 # parent will be the nullid (meaning "look up the copy data")
1892 # and the second one will be the other parent. For example:
1920 # and the second one will be the other parent. For example:
1893 #
1921 #
1894 # 0 --- 1 --- 3 rev1 changes file foo
1922 # 0 --- 1 --- 3 rev1 changes file foo
1895 # \ / rev2 renames foo to bar and changes it
1923 # \ / rev2 renames foo to bar and changes it
1896 # \- 2 -/ rev3 should have bar with all changes and
1924 # \- 2 -/ rev3 should have bar with all changes and
1897 # should record that bar descends from
1925 # should record that bar descends from
1898 # bar in rev2 and foo in rev1
1926 # bar in rev2 and foo in rev1
1899 #
1927 #
1900 # this allows this merge to succeed:
1928 # this allows this merge to succeed:
1901 #
1929 #
1902 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1930 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1903 # \ / merging rev3 and rev4 should use bar@rev2
1931 # \ / merging rev3 and rev4 should use bar@rev2
1904 # \- 2 --- 4 as the merge base
1932 # \- 2 --- 4 as the merge base
1905 #
1933 #
1906
1934
1907 cfname = copy[0]
1935 cfname = copy[0]
1908 crev = manifest1.get(cfname)
1936 crev = manifest1.get(cfname)
1909 newfparent = fparent2
1937 newfparent = fparent2
1910
1938
1911 if manifest2: # branch merge
1939 if manifest2: # branch merge
1912 if fparent2 == nullid or crev is None: # copied on remote side
1940 if fparent2 == nullid or crev is None: # copied on remote side
1913 if cfname in manifest2:
1941 if cfname in manifest2:
1914 crev = manifest2[cfname]
1942 crev = manifest2[cfname]
1915 newfparent = fparent1
1943 newfparent = fparent1
1916
1944
1917 # Here, we used to search backwards through history to try to find
1945 # Here, we used to search backwards through history to try to find
1918 # where the file copy came from if the source of a copy was not in
1946 # where the file copy came from if the source of a copy was not in
1919 # the parent directory. However, this doesn't actually make sense to
1947 # the parent directory. However, this doesn't actually make sense to
1920 # do (what does a copy from something not in your working copy even
1948 # do (what does a copy from something not in your working copy even
1921 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1949 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1922 # the user that copy information was dropped, so if they didn't
1950 # the user that copy information was dropped, so if they didn't
1923 # expect this outcome it can be fixed, but this is the correct
1951 # expect this outcome it can be fixed, but this is the correct
1924 # behavior in this circumstance.
1952 # behavior in this circumstance.
1925
1953
1926 if crev:
1954 if crev:
1927 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1955 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1928 meta["copy"] = cfname
1956 meta["copy"] = cfname
1929 meta["copyrev"] = hex(crev)
1957 meta["copyrev"] = hex(crev)
1930 fparent1, fparent2 = nullid, newfparent
1958 fparent1, fparent2 = nullid, newfparent
1931 else:
1959 else:
1932 self.ui.warn(_("warning: can't find ancestor for '%s' "
1960 self.ui.warn(_("warning: can't find ancestor for '%s' "
1933 "copied from '%s'!\n") % (fname, cfname))
1961 "copied from '%s'!\n") % (fname, cfname))
1934
1962
1935 elif fparent1 == nullid:
1963 elif fparent1 == nullid:
1936 fparent1, fparent2 = fparent2, nullid
1964 fparent1, fparent2 = fparent2, nullid
1937 elif fparent2 != nullid:
1965 elif fparent2 != nullid:
1938 # is one parent an ancestor of the other?
1966 # is one parent an ancestor of the other?
1939 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1967 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1940 if fparent1 in fparentancestors:
1968 if fparent1 in fparentancestors:
1941 fparent1, fparent2 = fparent2, nullid
1969 fparent1, fparent2 = fparent2, nullid
1942 elif fparent2 in fparentancestors:
1970 elif fparent2 in fparentancestors:
1943 fparent2 = nullid
1971 fparent2 = nullid
1944
1972
1945 # is the file changed?
1973 # is the file changed?
1946 text = fctx.data()
1974 text = fctx.data()
1947 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1975 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1948 changelist.append(fname)
1976 changelist.append(fname)
1949 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1977 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1950 # are just the flags changed during merge?
1978 # are just the flags changed during merge?
1951 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1979 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1952 changelist.append(fname)
1980 changelist.append(fname)
1953
1981
1954 return fparent1
1982 return fparent1
1955
1983
1956 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1984 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1957 """check for commit arguments that aren't committable"""
1985 """check for commit arguments that aren't committable"""
1958 if match.isexact() or match.prefix():
1986 if match.isexact() or match.prefix():
1959 matched = set(status.modified + status.added + status.removed)
1987 matched = set(status.modified + status.added + status.removed)
1960
1988
1961 for f in match.files():
1989 for f in match.files():
1962 f = self.dirstate.normalize(f)
1990 f = self.dirstate.normalize(f)
1963 if f == '.' or f in matched or f in wctx.substate:
1991 if f == '.' or f in matched or f in wctx.substate:
1964 continue
1992 continue
1965 if f in status.deleted:
1993 if f in status.deleted:
1966 fail(f, _('file not found!'))
1994 fail(f, _('file not found!'))
1967 if f in vdirs: # visited directory
1995 if f in vdirs: # visited directory
1968 d = f + '/'
1996 d = f + '/'
1969 for mf in matched:
1997 for mf in matched:
1970 if mf.startswith(d):
1998 if mf.startswith(d):
1971 break
1999 break
1972 else:
2000 else:
1973 fail(f, _("no match under directory!"))
2001 fail(f, _("no match under directory!"))
1974 elif f not in self.dirstate:
2002 elif f not in self.dirstate:
1975 fail(f, _("file not tracked!"))
2003 fail(f, _("file not tracked!"))
1976
2004
1977 @unfilteredmethod
2005 @unfilteredmethod
1978 def commit(self, text="", user=None, date=None, match=None, force=False,
2006 def commit(self, text="", user=None, date=None, match=None, force=False,
1979 editor=False, extra=None):
2007 editor=False, extra=None):
1980 """Add a new revision to current repository.
2008 """Add a new revision to current repository.
1981
2009
1982 Revision information is gathered from the working directory,
2010 Revision information is gathered from the working directory,
1983 match can be used to filter the committed files. If editor is
2011 match can be used to filter the committed files. If editor is
1984 supplied, it is called to get a commit message.
2012 supplied, it is called to get a commit message.
1985 """
2013 """
1986 if extra is None:
2014 if extra is None:
1987 extra = {}
2015 extra = {}
1988
2016
1989 def fail(f, msg):
2017 def fail(f, msg):
1990 raise error.Abort('%s: %s' % (f, msg))
2018 raise error.Abort('%s: %s' % (f, msg))
1991
2019
1992 if not match:
2020 if not match:
1993 match = matchmod.always(self.root, '')
2021 match = matchmod.always(self.root, '')
1994
2022
1995 if not force:
2023 if not force:
1996 vdirs = []
2024 vdirs = []
1997 match.explicitdir = vdirs.append
2025 match.explicitdir = vdirs.append
1998 match.bad = fail
2026 match.bad = fail
1999
2027
2000 wlock = lock = tr = None
2028 wlock = lock = tr = None
2001 try:
2029 try:
2002 wlock = self.wlock()
2030 wlock = self.wlock()
2003 lock = self.lock() # for recent changelog (see issue4368)
2031 lock = self.lock() # for recent changelog (see issue4368)
2004
2032
2005 wctx = self[None]
2033 wctx = self[None]
2006 merge = len(wctx.parents()) > 1
2034 merge = len(wctx.parents()) > 1
2007
2035
2008 if not force and merge and not match.always():
2036 if not force and merge and not match.always():
2009 raise error.Abort(_('cannot partially commit a merge '
2037 raise error.Abort(_('cannot partially commit a merge '
2010 '(do not specify files or patterns)'))
2038 '(do not specify files or patterns)'))
2011
2039
2012 status = self.status(match=match, clean=force)
2040 status = self.status(match=match, clean=force)
2013 if force:
2041 if force:
2014 status.modified.extend(status.clean) # mq may commit clean files
2042 status.modified.extend(status.clean) # mq may commit clean files
2015
2043
2016 # check subrepos
2044 # check subrepos
2017 subs, commitsubs, newstate = subrepoutil.precommit(
2045 subs, commitsubs, newstate = subrepoutil.precommit(
2018 self.ui, wctx, status, match, force=force)
2046 self.ui, wctx, status, match, force=force)
2019
2047
2020 # make sure all explicit patterns are matched
2048 # make sure all explicit patterns are matched
2021 if not force:
2049 if not force:
2022 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2050 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2023
2051
2024 cctx = context.workingcommitctx(self, status,
2052 cctx = context.workingcommitctx(self, status,
2025 text, user, date, extra)
2053 text, user, date, extra)
2026
2054
2027 # internal config: ui.allowemptycommit
2055 # internal config: ui.allowemptycommit
2028 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2056 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2029 or extra.get('close') or merge or cctx.files()
2057 or extra.get('close') or merge or cctx.files()
2030 or self.ui.configbool('ui', 'allowemptycommit'))
2058 or self.ui.configbool('ui', 'allowemptycommit'))
2031 if not allowemptycommit:
2059 if not allowemptycommit:
2032 return None
2060 return None
2033
2061
2034 if merge and cctx.deleted():
2062 if merge and cctx.deleted():
2035 raise error.Abort(_("cannot commit merge with missing files"))
2063 raise error.Abort(_("cannot commit merge with missing files"))
2036
2064
2037 ms = mergemod.mergestate.read(self)
2065 ms = mergemod.mergestate.read(self)
2038 mergeutil.checkunresolved(ms)
2066 mergeutil.checkunresolved(ms)
2039
2067
2040 if editor:
2068 if editor:
2041 cctx._text = editor(self, cctx, subs)
2069 cctx._text = editor(self, cctx, subs)
2042 edited = (text != cctx._text)
2070 edited = (text != cctx._text)
2043
2071
2044 # Save commit message in case this transaction gets rolled back
2072 # Save commit message in case this transaction gets rolled back
2045 # (e.g. by a pretxncommit hook). Leave the content alone on
2073 # (e.g. by a pretxncommit hook). Leave the content alone on
2046 # the assumption that the user will use the same editor again.
2074 # the assumption that the user will use the same editor again.
2047 msgfn = self.savecommitmessage(cctx._text)
2075 msgfn = self.savecommitmessage(cctx._text)
2048
2076
2049 # commit subs and write new state
2077 # commit subs and write new state
2050 if subs:
2078 if subs:
2051 for s in sorted(commitsubs):
2079 for s in sorted(commitsubs):
2052 sub = wctx.sub(s)
2080 sub = wctx.sub(s)
2053 self.ui.status(_('committing subrepository %s\n') %
2081 self.ui.status(_('committing subrepository %s\n') %
2054 subrepoutil.subrelpath(sub))
2082 subrepoutil.subrelpath(sub))
2055 sr = sub.commit(cctx._text, user, date)
2083 sr = sub.commit(cctx._text, user, date)
2056 newstate[s] = (newstate[s][0], sr)
2084 newstate[s] = (newstate[s][0], sr)
2057 subrepoutil.writestate(self, newstate)
2085 subrepoutil.writestate(self, newstate)
2058
2086
2059 p1, p2 = self.dirstate.parents()
2087 p1, p2 = self.dirstate.parents()
2060 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2088 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2061 try:
2089 try:
2062 self.hook("precommit", throw=True, parent1=hookp1,
2090 self.hook("precommit", throw=True, parent1=hookp1,
2063 parent2=hookp2)
2091 parent2=hookp2)
2064 tr = self.transaction('commit')
2092 tr = self.transaction('commit')
2065 ret = self.commitctx(cctx, True)
2093 ret = self.commitctx(cctx, True)
2066 except: # re-raises
2094 except: # re-raises
2067 if edited:
2095 if edited:
2068 self.ui.write(
2096 self.ui.write(
2069 _('note: commit message saved in %s\n') % msgfn)
2097 _('note: commit message saved in %s\n') % msgfn)
2070 raise
2098 raise
2071 # update bookmarks, dirstate and mergestate
2099 # update bookmarks, dirstate and mergestate
2072 bookmarks.update(self, [p1, p2], ret)
2100 bookmarks.update(self, [p1, p2], ret)
2073 cctx.markcommitted(ret)
2101 cctx.markcommitted(ret)
2074 ms.reset()
2102 ms.reset()
2075 tr.close()
2103 tr.close()
2076
2104
2077 finally:
2105 finally:
2078 lockmod.release(tr, lock, wlock)
2106 lockmod.release(tr, lock, wlock)
2079
2107
2080 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2108 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2081 # hack for command that use a temporary commit (eg: histedit)
2109 # hack for command that use a temporary commit (eg: histedit)
2082 # temporary commit got stripped before hook release
2110 # temporary commit got stripped before hook release
2083 if self.changelog.hasnode(ret):
2111 if self.changelog.hasnode(ret):
2084 self.hook("commit", node=node, parent1=parent1,
2112 self.hook("commit", node=node, parent1=parent1,
2085 parent2=parent2)
2113 parent2=parent2)
2086 self._afterlock(commithook)
2114 self._afterlock(commithook)
2087 return ret
2115 return ret
2088
2116
2089 @unfilteredmethod
2117 @unfilteredmethod
2090 def commitctx(self, ctx, error=False):
2118 def commitctx(self, ctx, error=False):
2091 """Add a new revision to current repository.
2119 """Add a new revision to current repository.
2092 Revision information is passed via the context argument.
2120 Revision information is passed via the context argument.
2093
2121
2094 ctx.files() should list all files involved in this commit, i.e.
2122 ctx.files() should list all files involved in this commit, i.e.
2095 modified/added/removed files. On merge, it may be wider than the
2123 modified/added/removed files. On merge, it may be wider than the
2096 ctx.files() to be committed, since any file nodes derived directly
2124 ctx.files() to be committed, since any file nodes derived directly
2097 from p1 or p2 are excluded from the committed ctx.files().
2125 from p1 or p2 are excluded from the committed ctx.files().
2098 """
2126 """
2099
2127
2100 tr = None
2128 tr = None
2101 p1, p2 = ctx.p1(), ctx.p2()
2129 p1, p2 = ctx.p1(), ctx.p2()
2102 user = ctx.user()
2130 user = ctx.user()
2103
2131
2104 lock = self.lock()
2132 lock = self.lock()
2105 try:
2133 try:
2106 tr = self.transaction("commit")
2134 tr = self.transaction("commit")
2107 trp = weakref.proxy(tr)
2135 trp = weakref.proxy(tr)
2108
2136
2109 if ctx.manifestnode():
2137 if ctx.manifestnode():
2110 # reuse an existing manifest revision
2138 # reuse an existing manifest revision
2111 self.ui.debug('reusing known manifest\n')
2139 self.ui.debug('reusing known manifest\n')
2112 mn = ctx.manifestnode()
2140 mn = ctx.manifestnode()
2113 files = ctx.files()
2141 files = ctx.files()
2114 elif ctx.files():
2142 elif ctx.files():
2115 m1ctx = p1.manifestctx()
2143 m1ctx = p1.manifestctx()
2116 m2ctx = p2.manifestctx()
2144 m2ctx = p2.manifestctx()
2117 mctx = m1ctx.copy()
2145 mctx = m1ctx.copy()
2118
2146
2119 m = mctx.read()
2147 m = mctx.read()
2120 m1 = m1ctx.read()
2148 m1 = m1ctx.read()
2121 m2 = m2ctx.read()
2149 m2 = m2ctx.read()
2122
2150
2123 # check in files
2151 # check in files
2124 added = []
2152 added = []
2125 changed = []
2153 changed = []
2126 removed = list(ctx.removed())
2154 removed = list(ctx.removed())
2127 linkrev = len(self)
2155 linkrev = len(self)
2128 self.ui.note(_("committing files:\n"))
2156 self.ui.note(_("committing files:\n"))
2129 for f in sorted(ctx.modified() + ctx.added()):
2157 for f in sorted(ctx.modified() + ctx.added()):
2130 self.ui.note(f + "\n")
2158 self.ui.note(f + "\n")
2131 try:
2159 try:
2132 fctx = ctx[f]
2160 fctx = ctx[f]
2133 if fctx is None:
2161 if fctx is None:
2134 removed.append(f)
2162 removed.append(f)
2135 else:
2163 else:
2136 added.append(f)
2164 added.append(f)
2137 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2165 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2138 trp, changed)
2166 trp, changed)
2139 m.setflag(f, fctx.flags())
2167 m.setflag(f, fctx.flags())
2140 except OSError as inst:
2168 except OSError as inst:
2141 self.ui.warn(_("trouble committing %s!\n") % f)
2169 self.ui.warn(_("trouble committing %s!\n") % f)
2142 raise
2170 raise
2143 except IOError as inst:
2171 except IOError as inst:
2144 errcode = getattr(inst, 'errno', errno.ENOENT)
2172 errcode = getattr(inst, 'errno', errno.ENOENT)
2145 if error or errcode and errcode != errno.ENOENT:
2173 if error or errcode and errcode != errno.ENOENT:
2146 self.ui.warn(_("trouble committing %s!\n") % f)
2174 self.ui.warn(_("trouble committing %s!\n") % f)
2147 raise
2175 raise
2148
2176
2149 # update manifest
2177 # update manifest
2150 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2178 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2151 drop = [f for f in removed if f in m]
2179 drop = [f for f in removed if f in m]
2152 for f in drop:
2180 for f in drop:
2153 del m[f]
2181 del m[f]
2154 files = changed + removed
2182 files = changed + removed
2155 md = None
2183 md = None
2156 if not files:
2184 if not files:
2157 # if no "files" actually changed in terms of the changelog,
2185 # if no "files" actually changed in terms of the changelog,
2158 # try hard to detect unmodified manifest entry so that the
2186 # try hard to detect unmodified manifest entry so that the
2159 # exact same commit can be reproduced later on convert.
2187 # exact same commit can be reproduced later on convert.
2160 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2188 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2161 if not files and md:
2189 if not files and md:
2162 self.ui.debug('not reusing manifest (no file change in '
2190 self.ui.debug('not reusing manifest (no file change in '
2163 'changelog, but manifest differs)\n')
2191 'changelog, but manifest differs)\n')
2164 if files or md:
2192 if files or md:
2165 self.ui.note(_("committing manifest\n"))
2193 self.ui.note(_("committing manifest\n"))
2166 # we're using narrowmatch here since it's already applied at
2194 # we're using narrowmatch here since it's already applied at
2167 # other stages (such as dirstate.walk), so we're already
2195 # other stages (such as dirstate.walk), so we're already
2168 # ignoring things outside of narrowspec in most cases. The
2196 # ignoring things outside of narrowspec in most cases. The
2169 # one case where we might have files outside the narrowspec
2197 # one case where we might have files outside the narrowspec
2170 # at this point is merges, and we already error out in the
2198 # at this point is merges, and we already error out in the
2171 # case where the merge has files outside of the narrowspec,
2199 # case where the merge has files outside of the narrowspec,
2172 # so this is safe.
2200 # so this is safe.
2173 mn = mctx.write(trp, linkrev,
2201 mn = mctx.write(trp, linkrev,
2174 p1.manifestnode(), p2.manifestnode(),
2202 p1.manifestnode(), p2.manifestnode(),
2175 added, drop, match=self.narrowmatch())
2203 added, drop, match=self.narrowmatch())
2176 else:
2204 else:
2177 self.ui.debug('reusing manifest form p1 (listed files '
2205 self.ui.debug('reusing manifest form p1 (listed files '
2178 'actually unchanged)\n')
2206 'actually unchanged)\n')
2179 mn = p1.manifestnode()
2207 mn = p1.manifestnode()
2180 else:
2208 else:
2181 self.ui.debug('reusing manifest from p1 (no file change)\n')
2209 self.ui.debug('reusing manifest from p1 (no file change)\n')
2182 mn = p1.manifestnode()
2210 mn = p1.manifestnode()
2183 files = []
2211 files = []
2184
2212
2185 # update changelog
2213 # update changelog
2186 self.ui.note(_("committing changelog\n"))
2214 self.ui.note(_("committing changelog\n"))
2187 self.changelog.delayupdate(tr)
2215 self.changelog.delayupdate(tr)
2188 n = self.changelog.add(mn, files, ctx.description(),
2216 n = self.changelog.add(mn, files, ctx.description(),
2189 trp, p1.node(), p2.node(),
2217 trp, p1.node(), p2.node(),
2190 user, ctx.date(), ctx.extra().copy())
2218 user, ctx.date(), ctx.extra().copy())
2191 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2219 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2192 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2220 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2193 parent2=xp2)
2221 parent2=xp2)
2194 # set the new commit is proper phase
2222 # set the new commit is proper phase
2195 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2223 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2196 if targetphase:
2224 if targetphase:
2197 # retract boundary do not alter parent changeset.
2225 # retract boundary do not alter parent changeset.
2198 # if a parent have higher the resulting phase will
2226 # if a parent have higher the resulting phase will
2199 # be compliant anyway
2227 # be compliant anyway
2200 #
2228 #
2201 # if minimal phase was 0 we don't need to retract anything
2229 # if minimal phase was 0 we don't need to retract anything
2202 phases.registernew(self, tr, targetphase, [n])
2230 phases.registernew(self, tr, targetphase, [n])
2203 tr.close()
2231 tr.close()
2204 return n
2232 return n
2205 finally:
2233 finally:
2206 if tr:
2234 if tr:
2207 tr.release()
2235 tr.release()
2208 lock.release()
2236 lock.release()
2209
2237
2210 @unfilteredmethod
2238 @unfilteredmethod
2211 def destroying(self):
2239 def destroying(self):
2212 '''Inform the repository that nodes are about to be destroyed.
2240 '''Inform the repository that nodes are about to be destroyed.
2213 Intended for use by strip and rollback, so there's a common
2241 Intended for use by strip and rollback, so there's a common
2214 place for anything that has to be done before destroying history.
2242 place for anything that has to be done before destroying history.
2215
2243
2216 This is mostly useful for saving state that is in memory and waiting
2244 This is mostly useful for saving state that is in memory and waiting
2217 to be flushed when the current lock is released. Because a call to
2245 to be flushed when the current lock is released. Because a call to
2218 destroyed is imminent, the repo will be invalidated causing those
2246 destroyed is imminent, the repo will be invalidated causing those
2219 changes to stay in memory (waiting for the next unlock), or vanish
2247 changes to stay in memory (waiting for the next unlock), or vanish
2220 completely.
2248 completely.
2221 '''
2249 '''
2222 # When using the same lock to commit and strip, the phasecache is left
2250 # When using the same lock to commit and strip, the phasecache is left
2223 # dirty after committing. Then when we strip, the repo is invalidated,
2251 # dirty after committing. Then when we strip, the repo is invalidated,
2224 # causing those changes to disappear.
2252 # causing those changes to disappear.
2225 if '_phasecache' in vars(self):
2253 if '_phasecache' in vars(self):
2226 self._phasecache.write()
2254 self._phasecache.write()
2227
2255
2228 @unfilteredmethod
2256 @unfilteredmethod
2229 def destroyed(self):
2257 def destroyed(self):
2230 '''Inform the repository that nodes have been destroyed.
2258 '''Inform the repository that nodes have been destroyed.
2231 Intended for use by strip and rollback, so there's a common
2259 Intended for use by strip and rollback, so there's a common
2232 place for anything that has to be done after destroying history.
2260 place for anything that has to be done after destroying history.
2233 '''
2261 '''
2234 # When one tries to:
2262 # When one tries to:
2235 # 1) destroy nodes thus calling this method (e.g. strip)
2263 # 1) destroy nodes thus calling this method (e.g. strip)
2236 # 2) use phasecache somewhere (e.g. commit)
2264 # 2) use phasecache somewhere (e.g. commit)
2237 #
2265 #
2238 # then 2) will fail because the phasecache contains nodes that were
2266 # then 2) will fail because the phasecache contains nodes that were
2239 # removed. We can either remove phasecache from the filecache,
2267 # removed. We can either remove phasecache from the filecache,
2240 # causing it to reload next time it is accessed, or simply filter
2268 # causing it to reload next time it is accessed, or simply filter
2241 # the removed nodes now and write the updated cache.
2269 # the removed nodes now and write the updated cache.
2242 self._phasecache.filterunknown(self)
2270 self._phasecache.filterunknown(self)
2243 self._phasecache.write()
2271 self._phasecache.write()
2244
2272
2245 # refresh all repository caches
2273 # refresh all repository caches
2246 self.updatecaches()
2274 self.updatecaches()
2247
2275
2248 # Ensure the persistent tag cache is updated. Doing it now
2276 # Ensure the persistent tag cache is updated. Doing it now
2249 # means that the tag cache only has to worry about destroyed
2277 # means that the tag cache only has to worry about destroyed
2250 # heads immediately after a strip/rollback. That in turn
2278 # heads immediately after a strip/rollback. That in turn
2251 # guarantees that "cachetip == currenttip" (comparing both rev
2279 # guarantees that "cachetip == currenttip" (comparing both rev
2252 # and node) always means no nodes have been added or destroyed.
2280 # and node) always means no nodes have been added or destroyed.
2253
2281
2254 # XXX this is suboptimal when qrefresh'ing: we strip the current
2282 # XXX this is suboptimal when qrefresh'ing: we strip the current
2255 # head, refresh the tag cache, then immediately add a new head.
2283 # head, refresh the tag cache, then immediately add a new head.
2256 # But I think doing it this way is necessary for the "instant
2284 # But I think doing it this way is necessary for the "instant
2257 # tag cache retrieval" case to work.
2285 # tag cache retrieval" case to work.
2258 self.invalidate()
2286 self.invalidate()
2259
2287
2260 def status(self, node1='.', node2=None, match=None,
2288 def status(self, node1='.', node2=None, match=None,
2261 ignored=False, clean=False, unknown=False,
2289 ignored=False, clean=False, unknown=False,
2262 listsubrepos=False):
2290 listsubrepos=False):
2263 '''a convenience method that calls node1.status(node2)'''
2291 '''a convenience method that calls node1.status(node2)'''
2264 return self[node1].status(node2, match, ignored, clean, unknown,
2292 return self[node1].status(node2, match, ignored, clean, unknown,
2265 listsubrepos)
2293 listsubrepos)
2266
2294
2267 def addpostdsstatus(self, ps):
2295 def addpostdsstatus(self, ps):
2268 """Add a callback to run within the wlock, at the point at which status
2296 """Add a callback to run within the wlock, at the point at which status
2269 fixups happen.
2297 fixups happen.
2270
2298
2271 On status completion, callback(wctx, status) will be called with the
2299 On status completion, callback(wctx, status) will be called with the
2272 wlock held, unless the dirstate has changed from underneath or the wlock
2300 wlock held, unless the dirstate has changed from underneath or the wlock
2273 couldn't be grabbed.
2301 couldn't be grabbed.
2274
2302
2275 Callbacks should not capture and use a cached copy of the dirstate --
2303 Callbacks should not capture and use a cached copy of the dirstate --
2276 it might change in the meanwhile. Instead, they should access the
2304 it might change in the meanwhile. Instead, they should access the
2277 dirstate via wctx.repo().dirstate.
2305 dirstate via wctx.repo().dirstate.
2278
2306
2279 This list is emptied out after each status run -- extensions should
2307 This list is emptied out after each status run -- extensions should
2280 make sure it adds to this list each time dirstate.status is called.
2308 make sure it adds to this list each time dirstate.status is called.
2281 Extensions should also make sure they don't call this for statuses
2309 Extensions should also make sure they don't call this for statuses
2282 that don't involve the dirstate.
2310 that don't involve the dirstate.
2283 """
2311 """
2284
2312
2285 # The list is located here for uniqueness reasons -- it is actually
2313 # The list is located here for uniqueness reasons -- it is actually
2286 # managed by the workingctx, but that isn't unique per-repo.
2314 # managed by the workingctx, but that isn't unique per-repo.
2287 self._postdsstatus.append(ps)
2315 self._postdsstatus.append(ps)
2288
2316
2289 def postdsstatus(self):
2317 def postdsstatus(self):
2290 """Used by workingctx to get the list of post-dirstate-status hooks."""
2318 """Used by workingctx to get the list of post-dirstate-status hooks."""
2291 return self._postdsstatus
2319 return self._postdsstatus
2292
2320
2293 def clearpostdsstatus(self):
2321 def clearpostdsstatus(self):
2294 """Used by workingctx to clear post-dirstate-status hooks."""
2322 """Used by workingctx to clear post-dirstate-status hooks."""
2295 del self._postdsstatus[:]
2323 del self._postdsstatus[:]
2296
2324
2297 def heads(self, start=None):
2325 def heads(self, start=None):
2298 if start is None:
2326 if start is None:
2299 cl = self.changelog
2327 cl = self.changelog
2300 headrevs = reversed(cl.headrevs())
2328 headrevs = reversed(cl.headrevs())
2301 return [cl.node(rev) for rev in headrevs]
2329 return [cl.node(rev) for rev in headrevs]
2302
2330
2303 heads = self.changelog.heads(start)
2331 heads = self.changelog.heads(start)
2304 # sort the output in rev descending order
2332 # sort the output in rev descending order
2305 return sorted(heads, key=self.changelog.rev, reverse=True)
2333 return sorted(heads, key=self.changelog.rev, reverse=True)
2306
2334
2307 def branchheads(self, branch=None, start=None, closed=False):
2335 def branchheads(self, branch=None, start=None, closed=False):
2308 '''return a (possibly filtered) list of heads for the given branch
2336 '''return a (possibly filtered) list of heads for the given branch
2309
2337
2310 Heads are returned in topological order, from newest to oldest.
2338 Heads are returned in topological order, from newest to oldest.
2311 If branch is None, use the dirstate branch.
2339 If branch is None, use the dirstate branch.
2312 If start is not None, return only heads reachable from start.
2340 If start is not None, return only heads reachable from start.
2313 If closed is True, return heads that are marked as closed as well.
2341 If closed is True, return heads that are marked as closed as well.
2314 '''
2342 '''
2315 if branch is None:
2343 if branch is None:
2316 branch = self[None].branch()
2344 branch = self[None].branch()
2317 branches = self.branchmap()
2345 branches = self.branchmap()
2318 if branch not in branches:
2346 if branch not in branches:
2319 return []
2347 return []
2320 # the cache returns heads ordered lowest to highest
2348 # the cache returns heads ordered lowest to highest
2321 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2349 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2322 if start is not None:
2350 if start is not None:
2323 # filter out the heads that cannot be reached from startrev
2351 # filter out the heads that cannot be reached from startrev
2324 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2352 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2325 bheads = [h for h in bheads if h in fbheads]
2353 bheads = [h for h in bheads if h in fbheads]
2326 return bheads
2354 return bheads
2327
2355
2328 def branches(self, nodes):
2356 def branches(self, nodes):
2329 if not nodes:
2357 if not nodes:
2330 nodes = [self.changelog.tip()]
2358 nodes = [self.changelog.tip()]
2331 b = []
2359 b = []
2332 for n in nodes:
2360 for n in nodes:
2333 t = n
2361 t = n
2334 while True:
2362 while True:
2335 p = self.changelog.parents(n)
2363 p = self.changelog.parents(n)
2336 if p[1] != nullid or p[0] == nullid:
2364 if p[1] != nullid or p[0] == nullid:
2337 b.append((t, n, p[0], p[1]))
2365 b.append((t, n, p[0], p[1]))
2338 break
2366 break
2339 n = p[0]
2367 n = p[0]
2340 return b
2368 return b
2341
2369
2342 def between(self, pairs):
2370 def between(self, pairs):
2343 r = []
2371 r = []
2344
2372
2345 for top, bottom in pairs:
2373 for top, bottom in pairs:
2346 n, l, i = top, [], 0
2374 n, l, i = top, [], 0
2347 f = 1
2375 f = 1
2348
2376
2349 while n != bottom and n != nullid:
2377 while n != bottom and n != nullid:
2350 p = self.changelog.parents(n)[0]
2378 p = self.changelog.parents(n)[0]
2351 if i == f:
2379 if i == f:
2352 l.append(n)
2380 l.append(n)
2353 f = f * 2
2381 f = f * 2
2354 n = p
2382 n = p
2355 i += 1
2383 i += 1
2356
2384
2357 r.append(l)
2385 r.append(l)
2358
2386
2359 return r
2387 return r
2360
2388
2361 def checkpush(self, pushop):
2389 def checkpush(self, pushop):
2362 """Extensions can override this function if additional checks have
2390 """Extensions can override this function if additional checks have
2363 to be performed before pushing, or call it if they override push
2391 to be performed before pushing, or call it if they override push
2364 command.
2392 command.
2365 """
2393 """
2366
2394
2367 @unfilteredpropertycache
2395 @unfilteredpropertycache
2368 def prepushoutgoinghooks(self):
2396 def prepushoutgoinghooks(self):
2369 """Return util.hooks consists of a pushop with repo, remote, outgoing
2397 """Return util.hooks consists of a pushop with repo, remote, outgoing
2370 methods, which are called before pushing changesets.
2398 methods, which are called before pushing changesets.
2371 """
2399 """
2372 return util.hooks()
2400 return util.hooks()
2373
2401
2374 def pushkey(self, namespace, key, old, new):
2402 def pushkey(self, namespace, key, old, new):
2375 try:
2403 try:
2376 tr = self.currenttransaction()
2404 tr = self.currenttransaction()
2377 hookargs = {}
2405 hookargs = {}
2378 if tr is not None:
2406 if tr is not None:
2379 hookargs.update(tr.hookargs)
2407 hookargs.update(tr.hookargs)
2380 hookargs = pycompat.strkwargs(hookargs)
2408 hookargs = pycompat.strkwargs(hookargs)
2381 hookargs[r'namespace'] = namespace
2409 hookargs[r'namespace'] = namespace
2382 hookargs[r'key'] = key
2410 hookargs[r'key'] = key
2383 hookargs[r'old'] = old
2411 hookargs[r'old'] = old
2384 hookargs[r'new'] = new
2412 hookargs[r'new'] = new
2385 self.hook('prepushkey', throw=True, **hookargs)
2413 self.hook('prepushkey', throw=True, **hookargs)
2386 except error.HookAbort as exc:
2414 except error.HookAbort as exc:
2387 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2415 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2388 if exc.hint:
2416 if exc.hint:
2389 self.ui.write_err(_("(%s)\n") % exc.hint)
2417 self.ui.write_err(_("(%s)\n") % exc.hint)
2390 return False
2418 return False
2391 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2419 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2392 ret = pushkey.push(self, namespace, key, old, new)
2420 ret = pushkey.push(self, namespace, key, old, new)
2393 def runhook():
2421 def runhook():
2394 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2422 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2395 ret=ret)
2423 ret=ret)
2396 self._afterlock(runhook)
2424 self._afterlock(runhook)
2397 return ret
2425 return ret
2398
2426
2399 def listkeys(self, namespace):
2427 def listkeys(self, namespace):
2400 self.hook('prelistkeys', throw=True, namespace=namespace)
2428 self.hook('prelistkeys', throw=True, namespace=namespace)
2401 self.ui.debug('listing keys for "%s"\n' % namespace)
2429 self.ui.debug('listing keys for "%s"\n' % namespace)
2402 values = pushkey.list(self, namespace)
2430 values = pushkey.list(self, namespace)
2403 self.hook('listkeys', namespace=namespace, values=values)
2431 self.hook('listkeys', namespace=namespace, values=values)
2404 return values
2432 return values
2405
2433
2406 def debugwireargs(self, one, two, three=None, four=None, five=None):
2434 def debugwireargs(self, one, two, three=None, four=None, five=None):
2407 '''used to test argument passing over the wire'''
2435 '''used to test argument passing over the wire'''
2408 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2436 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2409 pycompat.bytestr(four),
2437 pycompat.bytestr(four),
2410 pycompat.bytestr(five))
2438 pycompat.bytestr(five))
2411
2439
2412 def savecommitmessage(self, text):
2440 def savecommitmessage(self, text):
2413 fp = self.vfs('last-message.txt', 'wb')
2441 fp = self.vfs('last-message.txt', 'wb')
2414 try:
2442 try:
2415 fp.write(text)
2443 fp.write(text)
2416 finally:
2444 finally:
2417 fp.close()
2445 fp.close()
2418 return self.pathto(fp.name[len(self.root) + 1:])
2446 return self.pathto(fp.name[len(self.root) + 1:])
2419
2447
2420 # used to avoid circular references so destructors work
2448 # used to avoid circular references so destructors work
2421 def aftertrans(files):
2449 def aftertrans(files):
2422 renamefiles = [tuple(t) for t in files]
2450 renamefiles = [tuple(t) for t in files]
2423 def a():
2451 def a():
2424 for vfs, src, dest in renamefiles:
2452 for vfs, src, dest in renamefiles:
2425 # if src and dest refer to a same file, vfs.rename is a no-op,
2453 # if src and dest refer to a same file, vfs.rename is a no-op,
2426 # leaving both src and dest on disk. delete dest to make sure
2454 # leaving both src and dest on disk. delete dest to make sure
2427 # the rename couldn't be such a no-op.
2455 # the rename couldn't be such a no-op.
2428 vfs.tryunlink(dest)
2456 vfs.tryunlink(dest)
2429 try:
2457 try:
2430 vfs.rename(src, dest)
2458 vfs.rename(src, dest)
2431 except OSError: # journal file does not yet exist
2459 except OSError: # journal file does not yet exist
2432 pass
2460 pass
2433 return a
2461 return a
2434
2462
2435 def undoname(fn):
2463 def undoname(fn):
2436 base, name = os.path.split(fn)
2464 base, name = os.path.split(fn)
2437 assert name.startswith('journal')
2465 assert name.startswith('journal')
2438 return os.path.join(base, name.replace('journal', 'undo', 1))
2466 return os.path.join(base, name.replace('journal', 'undo', 1))
2439
2467
2440 def instance(ui, path, create, intents=None, createopts=None):
2468 def instance(ui, path, create, intents=None, createopts=None):
2441 localpath = util.urllocalpath(path)
2469 localpath = util.urllocalpath(path)
2442 if create:
2470 if create:
2443 createrepository(ui, localpath, createopts=createopts)
2471 createrepository(ui, localpath, createopts=createopts)
2444
2472
2445 return makelocalrepository(ui, localpath, intents=intents)
2473 return makelocalrepository(ui, localpath, intents=intents)
2446
2474
2447 def islocal(path):
2475 def islocal(path):
2448 return True
2476 return True
2449
2477
2450 def newreporequirements(ui, createopts=None):
2478 def newreporequirements(ui, createopts=None):
2451 """Determine the set of requirements for a new local repository.
2479 """Determine the set of requirements for a new local repository.
2452
2480
2453 Extensions can wrap this function to specify custom requirements for
2481 Extensions can wrap this function to specify custom requirements for
2454 new repositories.
2482 new repositories.
2455 """
2483 """
2456 createopts = createopts or {}
2484 createopts = createopts or {}
2457
2485
2458 requirements = {'revlogv1'}
2486 requirements = {'revlogv1'}
2459 if ui.configbool('format', 'usestore'):
2487 if ui.configbool('format', 'usestore'):
2460 requirements.add('store')
2488 requirements.add('store')
2461 if ui.configbool('format', 'usefncache'):
2489 if ui.configbool('format', 'usefncache'):
2462 requirements.add('fncache')
2490 requirements.add('fncache')
2463 if ui.configbool('format', 'dotencode'):
2491 if ui.configbool('format', 'dotencode'):
2464 requirements.add('dotencode')
2492 requirements.add('dotencode')
2465
2493
2466 compengine = ui.config('experimental', 'format.compression')
2494 compengine = ui.config('experimental', 'format.compression')
2467 if compengine not in util.compengines:
2495 if compengine not in util.compengines:
2468 raise error.Abort(_('compression engine %s defined by '
2496 raise error.Abort(_('compression engine %s defined by '
2469 'experimental.format.compression not available') %
2497 'experimental.format.compression not available') %
2470 compengine,
2498 compengine,
2471 hint=_('run "hg debuginstall" to list available '
2499 hint=_('run "hg debuginstall" to list available '
2472 'compression engines'))
2500 'compression engines'))
2473
2501
2474 # zlib is the historical default and doesn't need an explicit requirement.
2502 # zlib is the historical default and doesn't need an explicit requirement.
2475 if compengine != 'zlib':
2503 if compengine != 'zlib':
2476 requirements.add('exp-compression-%s' % compengine)
2504 requirements.add('exp-compression-%s' % compengine)
2477
2505
2478 if scmutil.gdinitconfig(ui):
2506 if scmutil.gdinitconfig(ui):
2479 requirements.add('generaldelta')
2507 requirements.add('generaldelta')
2480 if ui.configbool('experimental', 'treemanifest'):
2508 if ui.configbool('experimental', 'treemanifest'):
2481 requirements.add('treemanifest')
2509 requirements.add('treemanifest')
2482 # experimental config: format.sparse-revlog
2510 # experimental config: format.sparse-revlog
2483 if ui.configbool('format', 'sparse-revlog'):
2511 if ui.configbool('format', 'sparse-revlog'):
2484 requirements.add(SPARSEREVLOG_REQUIREMENT)
2512 requirements.add(SPARSEREVLOG_REQUIREMENT)
2485
2513
2486 revlogv2 = ui.config('experimental', 'revlogv2')
2514 revlogv2 = ui.config('experimental', 'revlogv2')
2487 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2515 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2488 requirements.remove('revlogv1')
2516 requirements.remove('revlogv1')
2489 # generaldelta is implied by revlogv2.
2517 # generaldelta is implied by revlogv2.
2490 requirements.discard('generaldelta')
2518 requirements.discard('generaldelta')
2491 requirements.add(REVLOGV2_REQUIREMENT)
2519 requirements.add(REVLOGV2_REQUIREMENT)
2492 # experimental config: format.internal-phase
2520 # experimental config: format.internal-phase
2493 if ui.configbool('format', 'internal-phase'):
2521 if ui.configbool('format', 'internal-phase'):
2494 requirements.add('internal-phase')
2522 requirements.add('internal-phase')
2495
2523
2496 if createopts.get('narrowfiles'):
2524 if createopts.get('narrowfiles'):
2497 requirements.add(repository.NARROW_REQUIREMENT)
2525 requirements.add(repository.NARROW_REQUIREMENT)
2498
2526
2499 return requirements
2527 return requirements
2500
2528
2501 def filterknowncreateopts(ui, createopts):
2529 def filterknowncreateopts(ui, createopts):
2502 """Filters a dict of repo creation options against options that are known.
2530 """Filters a dict of repo creation options against options that are known.
2503
2531
2504 Receives a dict of repo creation options and returns a dict of those
2532 Receives a dict of repo creation options and returns a dict of those
2505 options that we don't know how to handle.
2533 options that we don't know how to handle.
2506
2534
2507 This function is called as part of repository creation. If the
2535 This function is called as part of repository creation. If the
2508 returned dict contains any items, repository creation will not
2536 returned dict contains any items, repository creation will not
2509 be allowed, as it means there was a request to create a repository
2537 be allowed, as it means there was a request to create a repository
2510 with options not recognized by loaded code.
2538 with options not recognized by loaded code.
2511
2539
2512 Extensions can wrap this function to filter out creation options
2540 Extensions can wrap this function to filter out creation options
2513 they know how to handle.
2541 they know how to handle.
2514 """
2542 """
2515 known = {'narrowfiles'}
2543 known = {'narrowfiles'}
2516
2544
2517 return {k: v for k, v in createopts.items() if k not in known}
2545 return {k: v for k, v in createopts.items() if k not in known}
2518
2546
2519 def createrepository(ui, path, createopts=None):
2547 def createrepository(ui, path, createopts=None):
2520 """Create a new repository in a vfs.
2548 """Create a new repository in a vfs.
2521
2549
2522 ``path`` path to the new repo's working directory.
2550 ``path`` path to the new repo's working directory.
2523 ``createopts`` options for the new repository.
2551 ``createopts`` options for the new repository.
2524 """
2552 """
2525 createopts = createopts or {}
2553 createopts = createopts or {}
2526
2554
2527 unknownopts = filterknowncreateopts(ui, createopts)
2555 unknownopts = filterknowncreateopts(ui, createopts)
2528
2556
2529 if not isinstance(unknownopts, dict):
2557 if not isinstance(unknownopts, dict):
2530 raise error.ProgrammingError('filterknowncreateopts() did not return '
2558 raise error.ProgrammingError('filterknowncreateopts() did not return '
2531 'a dict')
2559 'a dict')
2532
2560
2533 if unknownopts:
2561 if unknownopts:
2534 raise error.Abort(_('unable to create repository because of unknown '
2562 raise error.Abort(_('unable to create repository because of unknown '
2535 'creation option: %s') %
2563 'creation option: %s') %
2536 ', '.sorted(unknownopts),
2564 ', '.sorted(unknownopts),
2537 hint=_('is a required extension not loaded?'))
2565 hint=_('is a required extension not loaded?'))
2538
2566
2539 requirements = newreporequirements(ui, createopts=createopts)
2567 requirements = newreporequirements(ui, createopts=createopts)
2540
2568
2541 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2569 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2542 if not wdirvfs.exists():
2570 if not wdirvfs.exists():
2543 wdirvfs.makedirs()
2571 wdirvfs.makedirs()
2544
2572
2545 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2573 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2546 if hgvfs.exists():
2574 if hgvfs.exists():
2547 raise error.RepoError(_('repository %s already exists') % path)
2575 raise error.RepoError(_('repository %s already exists') % path)
2548
2576
2549 hgvfs.makedir(notindexed=True)
2577 hgvfs.makedir(notindexed=True)
2550
2578
2551 if b'store' in requirements:
2579 if b'store' in requirements:
2552 hgvfs.mkdir(b'store')
2580 hgvfs.mkdir(b'store')
2553
2581
2554 # We create an invalid changelog outside the store so very old
2582 # We create an invalid changelog outside the store so very old
2555 # Mercurial versions (which didn't know about the requirements
2583 # Mercurial versions (which didn't know about the requirements
2556 # file) encounter an error on reading the changelog. This
2584 # file) encounter an error on reading the changelog. This
2557 # effectively locks out old clients and prevents them from
2585 # effectively locks out old clients and prevents them from
2558 # mucking with a repo in an unknown format.
2586 # mucking with a repo in an unknown format.
2559 #
2587 #
2560 # The revlog header has version 2, which won't be recognized by
2588 # The revlog header has version 2, which won't be recognized by
2561 # such old clients.
2589 # such old clients.
2562 hgvfs.append(b'00changelog.i',
2590 hgvfs.append(b'00changelog.i',
2563 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2591 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2564 b'layout')
2592 b'layout')
2565
2593
2566 scmutil.writerequires(hgvfs, requirements)
2594 scmutil.writerequires(hgvfs, requirements)
2567
2595
2568 def poisonrepository(repo):
2596 def poisonrepository(repo):
2569 """Poison a repository instance so it can no longer be used."""
2597 """Poison a repository instance so it can no longer be used."""
2570 # Perform any cleanup on the instance.
2598 # Perform any cleanup on the instance.
2571 repo.close()
2599 repo.close()
2572
2600
2573 # Our strategy is to replace the type of the object with one that
2601 # Our strategy is to replace the type of the object with one that
2574 # has all attribute lookups result in error.
2602 # has all attribute lookups result in error.
2575 #
2603 #
2576 # But we have to allow the close() method because some constructors
2604 # But we have to allow the close() method because some constructors
2577 # of repos call close() on repo references.
2605 # of repos call close() on repo references.
2578 class poisonedrepository(object):
2606 class poisonedrepository(object):
2579 def __getattribute__(self, item):
2607 def __getattribute__(self, item):
2580 if item == r'close':
2608 if item == r'close':
2581 return object.__getattribute__(self, item)
2609 return object.__getattribute__(self, item)
2582
2610
2583 raise error.ProgrammingError('repo instances should not be used '
2611 raise error.ProgrammingError('repo instances should not be used '
2584 'after unshare')
2612 'after unshare')
2585
2613
2586 def close(self):
2614 def close(self):
2587 pass
2615 pass
2588
2616
2589 # We may have a repoview, which intercepts __setattr__. So be sure
2617 # We may have a repoview, which intercepts __setattr__. So be sure
2590 # we operate at the lowest level possible.
2618 # we operate at the lowest level possible.
2591 object.__setattr__(repo, r'__class__', poisonedrepository)
2619 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now