##// END OF EJS Templates
narrow: remove hack to read narowspec from shared .hg directory...
Martin von Zweigbergk -
r39794:543f26ec default
parent child Browse files
Show More
@@ -1,2746 +1,2742 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store as storemod,
59 store as storemod,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 def makelocalrepository(baseui, path, intents=None):
379 def makelocalrepository(baseui, path, intents=None):
380 """Create a local repository object.
380 """Create a local repository object.
381
381
382 Given arguments needed to construct a local repository, this function
382 Given arguments needed to construct a local repository, this function
383 derives a type suitable for representing that repository and returns an
383 derives a type suitable for representing that repository and returns an
384 instance of it.
384 instance of it.
385
385
386 The returned object conforms to the ``repository.completelocalrepository``
386 The returned object conforms to the ``repository.completelocalrepository``
387 interface.
387 interface.
388 """
388 """
389 ui = baseui.copy()
389 ui = baseui.copy()
390 # Prevent copying repo configuration.
390 # Prevent copying repo configuration.
391 ui.copy = baseui.copy
391 ui.copy = baseui.copy
392
392
393 # Working directory VFS rooted at repository root.
393 # Working directory VFS rooted at repository root.
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
394 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
395
395
396 # Main VFS for .hg/ directory.
396 # Main VFS for .hg/ directory.
397 hgpath = wdirvfs.join(b'.hg')
397 hgpath = wdirvfs.join(b'.hg')
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
398 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
399
399
400 # The .hg/ path should exist and should be a directory. All other
400 # The .hg/ path should exist and should be a directory. All other
401 # cases are errors.
401 # cases are errors.
402 if not hgvfs.isdir():
402 if not hgvfs.isdir():
403 try:
403 try:
404 hgvfs.stat()
404 hgvfs.stat()
405 except OSError as e:
405 except OSError as e:
406 if e.errno != errno.ENOENT:
406 if e.errno != errno.ENOENT:
407 raise
407 raise
408
408
409 raise error.RepoError(_(b'repository %s not found') % path)
409 raise error.RepoError(_(b'repository %s not found') % path)
410
410
411 # .hg/requires file contains a newline-delimited list of
411 # .hg/requires file contains a newline-delimited list of
412 # features/capabilities the opener (us) must have in order to use
412 # features/capabilities the opener (us) must have in order to use
413 # the repository. This file was introduced in Mercurial 0.9.2,
413 # the repository. This file was introduced in Mercurial 0.9.2,
414 # which means very old repositories may not have one. We assume
414 # which means very old repositories may not have one. We assume
415 # a missing file translates to no requirements.
415 # a missing file translates to no requirements.
416 try:
416 try:
417 requirements = set(hgvfs.read(b'requires').splitlines())
417 requirements = set(hgvfs.read(b'requires').splitlines())
418 except IOError as e:
418 except IOError as e:
419 if e.errno != errno.ENOENT:
419 if e.errno != errno.ENOENT:
420 raise
420 raise
421 requirements = set()
421 requirements = set()
422
422
423 # The .hg/hgrc file may load extensions or contain config options
423 # The .hg/hgrc file may load extensions or contain config options
424 # that influence repository construction. Attempt to load it and
424 # that influence repository construction. Attempt to load it and
425 # process any new extensions that it may have pulled in.
425 # process any new extensions that it may have pulled in.
426 try:
426 try:
427 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
427 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
428 except IOError:
428 except IOError:
429 pass
429 pass
430 else:
430 else:
431 extensions.loadall(ui)
431 extensions.loadall(ui)
432
432
433 supportedrequirements = gathersupportedrequirements(ui)
433 supportedrequirements = gathersupportedrequirements(ui)
434
434
435 # We first validate the requirements are known.
435 # We first validate the requirements are known.
436 ensurerequirementsrecognized(requirements, supportedrequirements)
436 ensurerequirementsrecognized(requirements, supportedrequirements)
437
437
438 # Then we validate that the known set is reasonable to use together.
438 # Then we validate that the known set is reasonable to use together.
439 ensurerequirementscompatible(ui, requirements)
439 ensurerequirementscompatible(ui, requirements)
440
440
441 # TODO there are unhandled edge cases related to opening repositories with
441 # TODO there are unhandled edge cases related to opening repositories with
442 # shared storage. If storage is shared, we should also test for requirements
442 # shared storage. If storage is shared, we should also test for requirements
443 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
443 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
444 # that repo, as that repo may load extensions needed to open it. This is a
444 # that repo, as that repo may load extensions needed to open it. This is a
445 # bit complicated because we don't want the other hgrc to overwrite settings
445 # bit complicated because we don't want the other hgrc to overwrite settings
446 # in this hgrc.
446 # in this hgrc.
447 #
447 #
448 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
448 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
449 # file when sharing repos. But if a requirement is added after the share is
449 # file when sharing repos. But if a requirement is added after the share is
450 # performed, thereby introducing a new requirement for the opener, we may
450 # performed, thereby introducing a new requirement for the opener, we may
451 # will not see that and could encounter a run-time error interacting with
451 # will not see that and could encounter a run-time error interacting with
452 # that shared store since it has an unknown-to-us requirement.
452 # that shared store since it has an unknown-to-us requirement.
453
453
454 # At this point, we know we should be capable of opening the repository.
454 # At this point, we know we should be capable of opening the repository.
455 # Now get on with doing that.
455 # Now get on with doing that.
456
456
457 # The "store" part of the repository holds versioned data. How it is
457 # The "store" part of the repository holds versioned data. How it is
458 # accessed is determined by various requirements. The ``shared`` or
458 # accessed is determined by various requirements. The ``shared`` or
459 # ``relshared`` requirements indicate the store lives in the path contained
459 # ``relshared`` requirements indicate the store lives in the path contained
460 # in the ``.hg/sharedpath`` file. This is an absolute path for
460 # in the ``.hg/sharedpath`` file. This is an absolute path for
461 # ``shared`` and relative to ``.hg/`` for ``relshared``.
461 # ``shared`` and relative to ``.hg/`` for ``relshared``.
462 if b'shared' in requirements or b'relshared' in requirements:
462 if b'shared' in requirements or b'relshared' in requirements:
463 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
463 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
464 if b'relshared' in requirements:
464 if b'relshared' in requirements:
465 sharedpath = hgvfs.join(sharedpath)
465 sharedpath = hgvfs.join(sharedpath)
466
466
467 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
467 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
468
468
469 if not sharedvfs.exists():
469 if not sharedvfs.exists():
470 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
470 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
471 b'directory %s') % sharedvfs.base)
471 b'directory %s') % sharedvfs.base)
472
472
473 storebasepath = sharedvfs.base
473 storebasepath = sharedvfs.base
474 cachepath = sharedvfs.join(b'cache')
474 cachepath = sharedvfs.join(b'cache')
475 else:
475 else:
476 storebasepath = hgvfs.base
476 storebasepath = hgvfs.base
477 cachepath = hgvfs.join(b'cache')
477 cachepath = hgvfs.join(b'cache')
478
478
479 # The store has changed over time and the exact layout is dictated by
479 # The store has changed over time and the exact layout is dictated by
480 # requirements. The store interface abstracts differences across all
480 # requirements. The store interface abstracts differences across all
481 # of them.
481 # of them.
482 store = makestore(requirements, storebasepath,
482 store = makestore(requirements, storebasepath,
483 lambda base: vfsmod.vfs(base, cacheaudited=True))
483 lambda base: vfsmod.vfs(base, cacheaudited=True))
484 hgvfs.createmode = store.createmode
484 hgvfs.createmode = store.createmode
485
485
486 storevfs = store.vfs
486 storevfs = store.vfs
487 storevfs.options = resolvestorevfsoptions(ui, requirements)
487 storevfs.options = resolvestorevfsoptions(ui, requirements)
488
488
489 # The cache vfs is used to manage cache files.
489 # The cache vfs is used to manage cache files.
490 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
490 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
491 cachevfs.createmode = store.createmode
491 cachevfs.createmode = store.createmode
492
492
493 return localrepository(
493 return localrepository(
494 baseui=baseui,
494 baseui=baseui,
495 ui=ui,
495 ui=ui,
496 origroot=path,
496 origroot=path,
497 wdirvfs=wdirvfs,
497 wdirvfs=wdirvfs,
498 hgvfs=hgvfs,
498 hgvfs=hgvfs,
499 requirements=requirements,
499 requirements=requirements,
500 supportedrequirements=supportedrequirements,
500 supportedrequirements=supportedrequirements,
501 sharedpath=storebasepath,
501 sharedpath=storebasepath,
502 store=store,
502 store=store,
503 cachevfs=cachevfs,
503 cachevfs=cachevfs,
504 intents=intents)
504 intents=intents)
505
505
506 def gathersupportedrequirements(ui):
506 def gathersupportedrequirements(ui):
507 """Determine the complete set of recognized requirements."""
507 """Determine the complete set of recognized requirements."""
508 # Start with all requirements supported by this file.
508 # Start with all requirements supported by this file.
509 supported = set(localrepository._basesupported)
509 supported = set(localrepository._basesupported)
510
510
511 # Execute ``featuresetupfuncs`` entries if they belong to an extension
511 # Execute ``featuresetupfuncs`` entries if they belong to an extension
512 # relevant to this ui instance.
512 # relevant to this ui instance.
513 modules = {m.__name__ for n, m in extensions.extensions(ui)}
513 modules = {m.__name__ for n, m in extensions.extensions(ui)}
514
514
515 for fn in featuresetupfuncs:
515 for fn in featuresetupfuncs:
516 if fn.__module__ in modules:
516 if fn.__module__ in modules:
517 fn(ui, supported)
517 fn(ui, supported)
518
518
519 # Add derived requirements from registered compression engines.
519 # Add derived requirements from registered compression engines.
520 for name in util.compengines:
520 for name in util.compengines:
521 engine = util.compengines[name]
521 engine = util.compengines[name]
522 if engine.revlogheader():
522 if engine.revlogheader():
523 supported.add(b'exp-compression-%s' % name)
523 supported.add(b'exp-compression-%s' % name)
524
524
525 return supported
525 return supported
526
526
527 def ensurerequirementsrecognized(requirements, supported):
527 def ensurerequirementsrecognized(requirements, supported):
528 """Validate that a set of local requirements is recognized.
528 """Validate that a set of local requirements is recognized.
529
529
530 Receives a set of requirements. Raises an ``error.RepoError`` if there
530 Receives a set of requirements. Raises an ``error.RepoError`` if there
531 exists any requirement in that set that currently loaded code doesn't
531 exists any requirement in that set that currently loaded code doesn't
532 recognize.
532 recognize.
533
533
534 Returns a set of supported requirements.
534 Returns a set of supported requirements.
535 """
535 """
536 missing = set()
536 missing = set()
537
537
538 for requirement in requirements:
538 for requirement in requirements:
539 if requirement in supported:
539 if requirement in supported:
540 continue
540 continue
541
541
542 if not requirement or not requirement[0:1].isalnum():
542 if not requirement or not requirement[0:1].isalnum():
543 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
543 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
544
544
545 missing.add(requirement)
545 missing.add(requirement)
546
546
547 if missing:
547 if missing:
548 raise error.RequirementError(
548 raise error.RequirementError(
549 _(b'repository requires features unknown to this Mercurial: %s') %
549 _(b'repository requires features unknown to this Mercurial: %s') %
550 b' '.join(sorted(missing)),
550 b' '.join(sorted(missing)),
551 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
551 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
552 b'for more information'))
552 b'for more information'))
553
553
554 def ensurerequirementscompatible(ui, requirements):
554 def ensurerequirementscompatible(ui, requirements):
555 """Validates that a set of recognized requirements is mutually compatible.
555 """Validates that a set of recognized requirements is mutually compatible.
556
556
557 Some requirements may not be compatible with others or require
557 Some requirements may not be compatible with others or require
558 config options that aren't enabled. This function is called during
558 config options that aren't enabled. This function is called during
559 repository opening to ensure that the set of requirements needed
559 repository opening to ensure that the set of requirements needed
560 to open a repository is sane and compatible with config options.
560 to open a repository is sane and compatible with config options.
561
561
562 Extensions can monkeypatch this function to perform additional
562 Extensions can monkeypatch this function to perform additional
563 checking.
563 checking.
564
564
565 ``error.RepoError`` should be raised on failure.
565 ``error.RepoError`` should be raised on failure.
566 """
566 """
567 if b'exp-sparse' in requirements and not sparse.enabled:
567 if b'exp-sparse' in requirements and not sparse.enabled:
568 raise error.RepoError(_(b'repository is using sparse feature but '
568 raise error.RepoError(_(b'repository is using sparse feature but '
569 b'sparse is not enabled; enable the '
569 b'sparse is not enabled; enable the '
570 b'"sparse" extensions to access'))
570 b'"sparse" extensions to access'))
571
571
572 def makestore(requirements, path, vfstype):
572 def makestore(requirements, path, vfstype):
573 """Construct a storage object for a repository."""
573 """Construct a storage object for a repository."""
574 if b'store' in requirements:
574 if b'store' in requirements:
575 if b'fncache' in requirements:
575 if b'fncache' in requirements:
576 return storemod.fncachestore(path, vfstype,
576 return storemod.fncachestore(path, vfstype,
577 b'dotencode' in requirements)
577 b'dotencode' in requirements)
578
578
579 return storemod.encodedstore(path, vfstype)
579 return storemod.encodedstore(path, vfstype)
580
580
581 return storemod.basicstore(path, vfstype)
581 return storemod.basicstore(path, vfstype)
582
582
583 def resolvestorevfsoptions(ui, requirements):
583 def resolvestorevfsoptions(ui, requirements):
584 """Resolve the options to pass to the store vfs opener.
584 """Resolve the options to pass to the store vfs opener.
585
585
586 The returned dict is used to influence behavior of the storage layer.
586 The returned dict is used to influence behavior of the storage layer.
587 """
587 """
588 options = {}
588 options = {}
589
589
590 if b'treemanifest' in requirements:
590 if b'treemanifest' in requirements:
591 options[b'treemanifest'] = True
591 options[b'treemanifest'] = True
592
592
593 # experimental config: format.manifestcachesize
593 # experimental config: format.manifestcachesize
594 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
594 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
595 if manifestcachesize is not None:
595 if manifestcachesize is not None:
596 options[b'manifestcachesize'] = manifestcachesize
596 options[b'manifestcachesize'] = manifestcachesize
597
597
598 # In the absence of another requirement superseding a revlog-related
598 # In the absence of another requirement superseding a revlog-related
599 # requirement, we have to assume the repo is using revlog version 0.
599 # requirement, we have to assume the repo is using revlog version 0.
600 # This revlog format is super old and we don't bother trying to parse
600 # This revlog format is super old and we don't bother trying to parse
601 # opener options for it because those options wouldn't do anything
601 # opener options for it because those options wouldn't do anything
602 # meaningful on such old repos.
602 # meaningful on such old repos.
603 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
603 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
604 options.update(resolverevlogstorevfsoptions(ui, requirements))
604 options.update(resolverevlogstorevfsoptions(ui, requirements))
605
605
606 return options
606 return options
607
607
608 def resolverevlogstorevfsoptions(ui, requirements):
608 def resolverevlogstorevfsoptions(ui, requirements):
609 """Resolve opener options specific to revlogs."""
609 """Resolve opener options specific to revlogs."""
610
610
611 options = {}
611 options = {}
612
612
613 if b'revlogv1' in requirements:
613 if b'revlogv1' in requirements:
614 options[b'revlogv1'] = True
614 options[b'revlogv1'] = True
615 if REVLOGV2_REQUIREMENT in requirements:
615 if REVLOGV2_REQUIREMENT in requirements:
616 options[b'revlogv2'] = True
616 options[b'revlogv2'] = True
617
617
618 if b'generaldelta' in requirements:
618 if b'generaldelta' in requirements:
619 options[b'generaldelta'] = True
619 options[b'generaldelta'] = True
620
620
621 # experimental config: format.chunkcachesize
621 # experimental config: format.chunkcachesize
622 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
622 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
623 if chunkcachesize is not None:
623 if chunkcachesize is not None:
624 options[b'chunkcachesize'] = chunkcachesize
624 options[b'chunkcachesize'] = chunkcachesize
625
625
626 deltabothparents = ui.configbool(b'storage',
626 deltabothparents = ui.configbool(b'storage',
627 b'revlog.optimize-delta-parent-choice')
627 b'revlog.optimize-delta-parent-choice')
628 options[b'deltabothparents'] = deltabothparents
628 options[b'deltabothparents'] = deltabothparents
629
629
630 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
630 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
631
631
632 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
632 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
633 if 0 <= chainspan:
633 if 0 <= chainspan:
634 options[b'maxdeltachainspan'] = chainspan
634 options[b'maxdeltachainspan'] = chainspan
635
635
636 mmapindexthreshold = ui.configbytes(b'experimental',
636 mmapindexthreshold = ui.configbytes(b'experimental',
637 b'mmapindexthreshold')
637 b'mmapindexthreshold')
638 if mmapindexthreshold is not None:
638 if mmapindexthreshold is not None:
639 options[b'mmapindexthreshold'] = mmapindexthreshold
639 options[b'mmapindexthreshold'] = mmapindexthreshold
640
640
641 withsparseread = ui.configbool(b'experimental', b'sparse-read')
641 withsparseread = ui.configbool(b'experimental', b'sparse-read')
642 srdensitythres = float(ui.config(b'experimental',
642 srdensitythres = float(ui.config(b'experimental',
643 b'sparse-read.density-threshold'))
643 b'sparse-read.density-threshold'))
644 srmingapsize = ui.configbytes(b'experimental',
644 srmingapsize = ui.configbytes(b'experimental',
645 b'sparse-read.min-gap-size')
645 b'sparse-read.min-gap-size')
646 options[b'with-sparse-read'] = withsparseread
646 options[b'with-sparse-read'] = withsparseread
647 options[b'sparse-read-density-threshold'] = srdensitythres
647 options[b'sparse-read-density-threshold'] = srdensitythres
648 options[b'sparse-read-min-gap-size'] = srmingapsize
648 options[b'sparse-read-min-gap-size'] = srmingapsize
649
649
650 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
650 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
651 options[b'sparse-revlog'] = sparserevlog
651 options[b'sparse-revlog'] = sparserevlog
652 if sparserevlog:
652 if sparserevlog:
653 options[b'generaldelta'] = True
653 options[b'generaldelta'] = True
654
654
655 maxchainlen = None
655 maxchainlen = None
656 if sparserevlog:
656 if sparserevlog:
657 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
657 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
658 # experimental config: format.maxchainlen
658 # experimental config: format.maxchainlen
659 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
659 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
660 if maxchainlen is not None:
660 if maxchainlen is not None:
661 options[b'maxchainlen'] = maxchainlen
661 options[b'maxchainlen'] = maxchainlen
662
662
663 for r in requirements:
663 for r in requirements:
664 if r.startswith(b'exp-compression-'):
664 if r.startswith(b'exp-compression-'):
665 options[b'compengine'] = r[len(b'exp-compression-'):]
665 options[b'compengine'] = r[len(b'exp-compression-'):]
666
666
667 return options
667 return options
668
668
669 @interfaceutil.implementer(repository.completelocalrepository)
669 @interfaceutil.implementer(repository.completelocalrepository)
670 class localrepository(object):
670 class localrepository(object):
671
671
672 # obsolete experimental requirements:
672 # obsolete experimental requirements:
673 # - manifestv2: An experimental new manifest format that allowed
673 # - manifestv2: An experimental new manifest format that allowed
674 # for stem compression of long paths. Experiment ended up not
674 # for stem compression of long paths. Experiment ended up not
675 # being successful (repository sizes went up due to worse delta
675 # being successful (repository sizes went up due to worse delta
676 # chains), and the code was deleted in 4.6.
676 # chains), and the code was deleted in 4.6.
677 supportedformats = {
677 supportedformats = {
678 'revlogv1',
678 'revlogv1',
679 'generaldelta',
679 'generaldelta',
680 'treemanifest',
680 'treemanifest',
681 REVLOGV2_REQUIREMENT,
681 REVLOGV2_REQUIREMENT,
682 SPARSEREVLOG_REQUIREMENT,
682 SPARSEREVLOG_REQUIREMENT,
683 }
683 }
684 _basesupported = supportedformats | {
684 _basesupported = supportedformats | {
685 'store',
685 'store',
686 'fncache',
686 'fncache',
687 'shared',
687 'shared',
688 'relshared',
688 'relshared',
689 'dotencode',
689 'dotencode',
690 'exp-sparse',
690 'exp-sparse',
691 'internal-phase'
691 'internal-phase'
692 }
692 }
693
693
694 # list of prefix for file which can be written without 'wlock'
694 # list of prefix for file which can be written without 'wlock'
695 # Extensions should extend this list when needed
695 # Extensions should extend this list when needed
696 _wlockfreeprefix = {
696 _wlockfreeprefix = {
697 # We migh consider requiring 'wlock' for the next
697 # We migh consider requiring 'wlock' for the next
698 # two, but pretty much all the existing code assume
698 # two, but pretty much all the existing code assume
699 # wlock is not needed so we keep them excluded for
699 # wlock is not needed so we keep them excluded for
700 # now.
700 # now.
701 'hgrc',
701 'hgrc',
702 'requires',
702 'requires',
703 # XXX cache is a complicatged business someone
703 # XXX cache is a complicatged business someone
704 # should investigate this in depth at some point
704 # should investigate this in depth at some point
705 'cache/',
705 'cache/',
706 # XXX shouldn't be dirstate covered by the wlock?
706 # XXX shouldn't be dirstate covered by the wlock?
707 'dirstate',
707 'dirstate',
708 # XXX bisect was still a bit too messy at the time
708 # XXX bisect was still a bit too messy at the time
709 # this changeset was introduced. Someone should fix
709 # this changeset was introduced. Someone should fix
710 # the remainig bit and drop this line
710 # the remainig bit and drop this line
711 'bisect.state',
711 'bisect.state',
712 }
712 }
713
713
714 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
714 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
715 supportedrequirements, sharedpath, store, cachevfs,
715 supportedrequirements, sharedpath, store, cachevfs,
716 intents=None):
716 intents=None):
717 """Create a new local repository instance.
717 """Create a new local repository instance.
718
718
719 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
719 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
720 or ``localrepo.makelocalrepository()`` for obtaining a new repository
720 or ``localrepo.makelocalrepository()`` for obtaining a new repository
721 object.
721 object.
722
722
723 Arguments:
723 Arguments:
724
724
725 baseui
725 baseui
726 ``ui.ui`` instance that ``ui`` argument was based off of.
726 ``ui.ui`` instance that ``ui`` argument was based off of.
727
727
728 ui
728 ui
729 ``ui.ui`` instance for use by the repository.
729 ``ui.ui`` instance for use by the repository.
730
730
731 origroot
731 origroot
732 ``bytes`` path to working directory root of this repository.
732 ``bytes`` path to working directory root of this repository.
733
733
734 wdirvfs
734 wdirvfs
735 ``vfs.vfs`` rooted at the working directory.
735 ``vfs.vfs`` rooted at the working directory.
736
736
737 hgvfs
737 hgvfs
738 ``vfs.vfs`` rooted at .hg/
738 ``vfs.vfs`` rooted at .hg/
739
739
740 requirements
740 requirements
741 ``set`` of bytestrings representing repository opening requirements.
741 ``set`` of bytestrings representing repository opening requirements.
742
742
743 supportedrequirements
743 supportedrequirements
744 ``set`` of bytestrings representing repository requirements that we
744 ``set`` of bytestrings representing repository requirements that we
745 know how to open. May be a supetset of ``requirements``.
745 know how to open. May be a supetset of ``requirements``.
746
746
747 sharedpath
747 sharedpath
748 ``bytes`` Defining path to storage base directory. Points to a
748 ``bytes`` Defining path to storage base directory. Points to a
749 ``.hg/`` directory somewhere.
749 ``.hg/`` directory somewhere.
750
750
751 store
751 store
752 ``store.basicstore`` (or derived) instance providing access to
752 ``store.basicstore`` (or derived) instance providing access to
753 versioned storage.
753 versioned storage.
754
754
755 cachevfs
755 cachevfs
756 ``vfs.vfs`` used for cache files.
756 ``vfs.vfs`` used for cache files.
757
757
758 intents
758 intents
759 ``set`` of system strings indicating what this repo will be used
759 ``set`` of system strings indicating what this repo will be used
760 for.
760 for.
761 """
761 """
762 self.baseui = baseui
762 self.baseui = baseui
763 self.ui = ui
763 self.ui = ui
764 self.origroot = origroot
764 self.origroot = origroot
765 # vfs rooted at working directory.
765 # vfs rooted at working directory.
766 self.wvfs = wdirvfs
766 self.wvfs = wdirvfs
767 self.root = wdirvfs.base
767 self.root = wdirvfs.base
768 # vfs rooted at .hg/. Used to access most non-store paths.
768 # vfs rooted at .hg/. Used to access most non-store paths.
769 self.vfs = hgvfs
769 self.vfs = hgvfs
770 self.path = hgvfs.base
770 self.path = hgvfs.base
771 self.requirements = requirements
771 self.requirements = requirements
772 self.supported = supportedrequirements
772 self.supported = supportedrequirements
773 self.sharedpath = sharedpath
773 self.sharedpath = sharedpath
774 self.store = store
774 self.store = store
775 self.cachevfs = cachevfs
775 self.cachevfs = cachevfs
776
776
777 self.filtername = None
777 self.filtername = None
778
778
779 if (self.ui.configbool('devel', 'all-warnings') or
779 if (self.ui.configbool('devel', 'all-warnings') or
780 self.ui.configbool('devel', 'check-locks')):
780 self.ui.configbool('devel', 'check-locks')):
781 self.vfs.audit = self._getvfsward(self.vfs.audit)
781 self.vfs.audit = self._getvfsward(self.vfs.audit)
782 # A list of callback to shape the phase if no data were found.
782 # A list of callback to shape the phase if no data were found.
783 # Callback are in the form: func(repo, roots) --> processed root.
783 # Callback are in the form: func(repo, roots) --> processed root.
784 # This list it to be filled by extension during repo setup
784 # This list it to be filled by extension during repo setup
785 self._phasedefaults = []
785 self._phasedefaults = []
786
786
787 color.setup(self.ui)
787 color.setup(self.ui)
788
788
789 self.spath = self.store.path
789 self.spath = self.store.path
790 self.svfs = self.store.vfs
790 self.svfs = self.store.vfs
791 self.sjoin = self.store.join
791 self.sjoin = self.store.join
792 if (self.ui.configbool('devel', 'all-warnings') or
792 if (self.ui.configbool('devel', 'all-warnings') or
793 self.ui.configbool('devel', 'check-locks')):
793 self.ui.configbool('devel', 'check-locks')):
794 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
794 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
795 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
795 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
796 else: # standard vfs
796 else: # standard vfs
797 self.svfs.audit = self._getsvfsward(self.svfs.audit)
797 self.svfs.audit = self._getsvfsward(self.svfs.audit)
798
798
799 self._dirstatevalidatewarned = False
799 self._dirstatevalidatewarned = False
800
800
801 self._branchcaches = {}
801 self._branchcaches = {}
802 self._revbranchcache = None
802 self._revbranchcache = None
803 self._filterpats = {}
803 self._filterpats = {}
804 self._datafilters = {}
804 self._datafilters = {}
805 self._transref = self._lockref = self._wlockref = None
805 self._transref = self._lockref = self._wlockref = None
806
806
807 # A cache for various files under .hg/ that tracks file changes,
807 # A cache for various files under .hg/ that tracks file changes,
808 # (used by the filecache decorator)
808 # (used by the filecache decorator)
809 #
809 #
810 # Maps a property name to its util.filecacheentry
810 # Maps a property name to its util.filecacheentry
811 self._filecache = {}
811 self._filecache = {}
812
812
813 # hold sets of revision to be filtered
813 # hold sets of revision to be filtered
814 # should be cleared when something might have changed the filter value:
814 # should be cleared when something might have changed the filter value:
815 # - new changesets,
815 # - new changesets,
816 # - phase change,
816 # - phase change,
817 # - new obsolescence marker,
817 # - new obsolescence marker,
818 # - working directory parent change,
818 # - working directory parent change,
819 # - bookmark changes
819 # - bookmark changes
820 self.filteredrevcache = {}
820 self.filteredrevcache = {}
821
821
822 # post-dirstate-status hooks
822 # post-dirstate-status hooks
823 self._postdsstatus = []
823 self._postdsstatus = []
824
824
825 # generic mapping between names and nodes
825 # generic mapping between names and nodes
826 self.names = namespaces.namespaces()
826 self.names = namespaces.namespaces()
827
827
828 # Key to signature value.
828 # Key to signature value.
829 self._sparsesignaturecache = {}
829 self._sparsesignaturecache = {}
830 # Signature to cached matcher instance.
830 # Signature to cached matcher instance.
831 self._sparsematchercache = {}
831 self._sparsematchercache = {}
832
832
833 def _getvfsward(self, origfunc):
833 def _getvfsward(self, origfunc):
834 """build a ward for self.vfs"""
834 """build a ward for self.vfs"""
835 rref = weakref.ref(self)
835 rref = weakref.ref(self)
836 def checkvfs(path, mode=None):
836 def checkvfs(path, mode=None):
837 ret = origfunc(path, mode=mode)
837 ret = origfunc(path, mode=mode)
838 repo = rref()
838 repo = rref()
839 if (repo is None
839 if (repo is None
840 or not util.safehasattr(repo, '_wlockref')
840 or not util.safehasattr(repo, '_wlockref')
841 or not util.safehasattr(repo, '_lockref')):
841 or not util.safehasattr(repo, '_lockref')):
842 return
842 return
843 if mode in (None, 'r', 'rb'):
843 if mode in (None, 'r', 'rb'):
844 return
844 return
845 if path.startswith(repo.path):
845 if path.startswith(repo.path):
846 # truncate name relative to the repository (.hg)
846 # truncate name relative to the repository (.hg)
847 path = path[len(repo.path) + 1:]
847 path = path[len(repo.path) + 1:]
848 if path.startswith('cache/'):
848 if path.startswith('cache/'):
849 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
849 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
850 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
850 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
851 if path.startswith('journal.'):
851 if path.startswith('journal.'):
852 # journal is covered by 'lock'
852 # journal is covered by 'lock'
853 if repo._currentlock(repo._lockref) is None:
853 if repo._currentlock(repo._lockref) is None:
854 repo.ui.develwarn('write with no lock: "%s"' % path,
854 repo.ui.develwarn('write with no lock: "%s"' % path,
855 stacklevel=2, config='check-locks')
855 stacklevel=2, config='check-locks')
856 elif repo._currentlock(repo._wlockref) is None:
856 elif repo._currentlock(repo._wlockref) is None:
857 # rest of vfs files are covered by 'wlock'
857 # rest of vfs files are covered by 'wlock'
858 #
858 #
859 # exclude special files
859 # exclude special files
860 for prefix in self._wlockfreeprefix:
860 for prefix in self._wlockfreeprefix:
861 if path.startswith(prefix):
861 if path.startswith(prefix):
862 return
862 return
863 repo.ui.develwarn('write with no wlock: "%s"' % path,
863 repo.ui.develwarn('write with no wlock: "%s"' % path,
864 stacklevel=2, config='check-locks')
864 stacklevel=2, config='check-locks')
865 return ret
865 return ret
866 return checkvfs
866 return checkvfs
867
867
868 def _getsvfsward(self, origfunc):
868 def _getsvfsward(self, origfunc):
869 """build a ward for self.svfs"""
869 """build a ward for self.svfs"""
870 rref = weakref.ref(self)
870 rref = weakref.ref(self)
871 def checksvfs(path, mode=None):
871 def checksvfs(path, mode=None):
872 ret = origfunc(path, mode=mode)
872 ret = origfunc(path, mode=mode)
873 repo = rref()
873 repo = rref()
874 if repo is None or not util.safehasattr(repo, '_lockref'):
874 if repo is None or not util.safehasattr(repo, '_lockref'):
875 return
875 return
876 if mode in (None, 'r', 'rb'):
876 if mode in (None, 'r', 'rb'):
877 return
877 return
878 if path.startswith(repo.sharedpath):
878 if path.startswith(repo.sharedpath):
879 # truncate name relative to the repository (.hg)
879 # truncate name relative to the repository (.hg)
880 path = path[len(repo.sharedpath) + 1:]
880 path = path[len(repo.sharedpath) + 1:]
881 if repo._currentlock(repo._lockref) is None:
881 if repo._currentlock(repo._lockref) is None:
882 repo.ui.develwarn('write with no lock: "%s"' % path,
882 repo.ui.develwarn('write with no lock: "%s"' % path,
883 stacklevel=3)
883 stacklevel=3)
884 return ret
884 return ret
885 return checksvfs
885 return checksvfs
886
886
887 def close(self):
887 def close(self):
888 self._writecaches()
888 self._writecaches()
889
889
890 def _writecaches(self):
890 def _writecaches(self):
891 if self._revbranchcache:
891 if self._revbranchcache:
892 self._revbranchcache.write()
892 self._revbranchcache.write()
893
893
894 def _restrictcapabilities(self, caps):
894 def _restrictcapabilities(self, caps):
895 if self.ui.configbool('experimental', 'bundle2-advertise'):
895 if self.ui.configbool('experimental', 'bundle2-advertise'):
896 caps = set(caps)
896 caps = set(caps)
897 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
897 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
898 role='client'))
898 role='client'))
899 caps.add('bundle2=' + urlreq.quote(capsblob))
899 caps.add('bundle2=' + urlreq.quote(capsblob))
900 return caps
900 return caps
901
901
902 def _writerequirements(self):
902 def _writerequirements(self):
903 scmutil.writerequires(self.vfs, self.requirements)
903 scmutil.writerequires(self.vfs, self.requirements)
904
904
905 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
905 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
906 # self -> auditor -> self._checknested -> self
906 # self -> auditor -> self._checknested -> self
907
907
908 @property
908 @property
909 def auditor(self):
909 def auditor(self):
910 # This is only used by context.workingctx.match in order to
910 # This is only used by context.workingctx.match in order to
911 # detect files in subrepos.
911 # detect files in subrepos.
912 return pathutil.pathauditor(self.root, callback=self._checknested)
912 return pathutil.pathauditor(self.root, callback=self._checknested)
913
913
914 @property
914 @property
915 def nofsauditor(self):
915 def nofsauditor(self):
916 # This is only used by context.basectx.match in order to detect
916 # This is only used by context.basectx.match in order to detect
917 # files in subrepos.
917 # files in subrepos.
918 return pathutil.pathauditor(self.root, callback=self._checknested,
918 return pathutil.pathauditor(self.root, callback=self._checknested,
919 realfs=False, cached=True)
919 realfs=False, cached=True)
920
920
921 def _checknested(self, path):
921 def _checknested(self, path):
922 """Determine if path is a legal nested repository."""
922 """Determine if path is a legal nested repository."""
923 if not path.startswith(self.root):
923 if not path.startswith(self.root):
924 return False
924 return False
925 subpath = path[len(self.root) + 1:]
925 subpath = path[len(self.root) + 1:]
926 normsubpath = util.pconvert(subpath)
926 normsubpath = util.pconvert(subpath)
927
927
928 # XXX: Checking against the current working copy is wrong in
928 # XXX: Checking against the current working copy is wrong in
929 # the sense that it can reject things like
929 # the sense that it can reject things like
930 #
930 #
931 # $ hg cat -r 10 sub/x.txt
931 # $ hg cat -r 10 sub/x.txt
932 #
932 #
933 # if sub/ is no longer a subrepository in the working copy
933 # if sub/ is no longer a subrepository in the working copy
934 # parent revision.
934 # parent revision.
935 #
935 #
936 # However, it can of course also allow things that would have
936 # However, it can of course also allow things that would have
937 # been rejected before, such as the above cat command if sub/
937 # been rejected before, such as the above cat command if sub/
938 # is a subrepository now, but was a normal directory before.
938 # is a subrepository now, but was a normal directory before.
939 # The old path auditor would have rejected by mistake since it
939 # The old path auditor would have rejected by mistake since it
940 # panics when it sees sub/.hg/.
940 # panics when it sees sub/.hg/.
941 #
941 #
942 # All in all, checking against the working copy seems sensible
942 # All in all, checking against the working copy seems sensible
943 # since we want to prevent access to nested repositories on
943 # since we want to prevent access to nested repositories on
944 # the filesystem *now*.
944 # the filesystem *now*.
945 ctx = self[None]
945 ctx = self[None]
946 parts = util.splitpath(subpath)
946 parts = util.splitpath(subpath)
947 while parts:
947 while parts:
948 prefix = '/'.join(parts)
948 prefix = '/'.join(parts)
949 if prefix in ctx.substate:
949 if prefix in ctx.substate:
950 if prefix == normsubpath:
950 if prefix == normsubpath:
951 return True
951 return True
952 else:
952 else:
953 sub = ctx.sub(prefix)
953 sub = ctx.sub(prefix)
954 return sub.checknested(subpath[len(prefix) + 1:])
954 return sub.checknested(subpath[len(prefix) + 1:])
955 else:
955 else:
956 parts.pop()
956 parts.pop()
957 return False
957 return False
958
958
959 def peer(self):
959 def peer(self):
960 return localpeer(self) # not cached to avoid reference cycle
960 return localpeer(self) # not cached to avoid reference cycle
961
961
962 def unfiltered(self):
962 def unfiltered(self):
963 """Return unfiltered version of the repository
963 """Return unfiltered version of the repository
964
964
965 Intended to be overwritten by filtered repo."""
965 Intended to be overwritten by filtered repo."""
966 return self
966 return self
967
967
968 def filtered(self, name, visibilityexceptions=None):
968 def filtered(self, name, visibilityexceptions=None):
969 """Return a filtered version of a repository"""
969 """Return a filtered version of a repository"""
970 cls = repoview.newtype(self.unfiltered().__class__)
970 cls = repoview.newtype(self.unfiltered().__class__)
971 return cls(self, name, visibilityexceptions)
971 return cls(self, name, visibilityexceptions)
972
972
973 @repofilecache('bookmarks', 'bookmarks.current')
973 @repofilecache('bookmarks', 'bookmarks.current')
974 def _bookmarks(self):
974 def _bookmarks(self):
975 return bookmarks.bmstore(self)
975 return bookmarks.bmstore(self)
976
976
977 @property
977 @property
978 def _activebookmark(self):
978 def _activebookmark(self):
979 return self._bookmarks.active
979 return self._bookmarks.active
980
980
981 # _phasesets depend on changelog. what we need is to call
981 # _phasesets depend on changelog. what we need is to call
982 # _phasecache.invalidate() if '00changelog.i' was changed, but it
982 # _phasecache.invalidate() if '00changelog.i' was changed, but it
983 # can't be easily expressed in filecache mechanism.
983 # can't be easily expressed in filecache mechanism.
984 @storecache('phaseroots', '00changelog.i')
984 @storecache('phaseroots', '00changelog.i')
985 def _phasecache(self):
985 def _phasecache(self):
986 return phases.phasecache(self, self._phasedefaults)
986 return phases.phasecache(self, self._phasedefaults)
987
987
988 @storecache('obsstore')
988 @storecache('obsstore')
989 def obsstore(self):
989 def obsstore(self):
990 return obsolete.makestore(self.ui, self)
990 return obsolete.makestore(self.ui, self)
991
991
992 @storecache('00changelog.i')
992 @storecache('00changelog.i')
993 def changelog(self):
993 def changelog(self):
994 return changelog.changelog(self.svfs,
994 return changelog.changelog(self.svfs,
995 trypending=txnutil.mayhavepending(self.root))
995 trypending=txnutil.mayhavepending(self.root))
996
996
997 def _constructmanifest(self):
997 def _constructmanifest(self):
998 # This is a temporary function while we migrate from manifest to
998 # This is a temporary function while we migrate from manifest to
999 # manifestlog. It allows bundlerepo and unionrepo to intercept the
999 # manifestlog. It allows bundlerepo and unionrepo to intercept the
1000 # manifest creation.
1000 # manifest creation.
1001 return manifest.manifestrevlog(self.svfs)
1001 return manifest.manifestrevlog(self.svfs)
1002
1002
1003 @storecache('00manifest.i')
1003 @storecache('00manifest.i')
1004 def manifestlog(self):
1004 def manifestlog(self):
1005 return manifest.manifestlog(self.svfs, self)
1005 return manifest.manifestlog(self.svfs, self)
1006
1006
1007 @repofilecache('dirstate')
1007 @repofilecache('dirstate')
1008 def dirstate(self):
1008 def dirstate(self):
1009 return self._makedirstate()
1009 return self._makedirstate()
1010
1010
1011 def _makedirstate(self):
1011 def _makedirstate(self):
1012 """Extension point for wrapping the dirstate per-repo."""
1012 """Extension point for wrapping the dirstate per-repo."""
1013 sparsematchfn = lambda: sparse.matcher(self)
1013 sparsematchfn = lambda: sparse.matcher(self)
1014
1014
1015 return dirstate.dirstate(self.vfs, self.ui, self.root,
1015 return dirstate.dirstate(self.vfs, self.ui, self.root,
1016 self._dirstatevalidate, sparsematchfn)
1016 self._dirstatevalidate, sparsematchfn)
1017
1017
1018 def _dirstatevalidate(self, node):
1018 def _dirstatevalidate(self, node):
1019 try:
1019 try:
1020 self.changelog.rev(node)
1020 self.changelog.rev(node)
1021 return node
1021 return node
1022 except error.LookupError:
1022 except error.LookupError:
1023 if not self._dirstatevalidatewarned:
1023 if not self._dirstatevalidatewarned:
1024 self._dirstatevalidatewarned = True
1024 self._dirstatevalidatewarned = True
1025 self.ui.warn(_("warning: ignoring unknown"
1025 self.ui.warn(_("warning: ignoring unknown"
1026 " working parent %s!\n") % short(node))
1026 " working parent %s!\n") % short(node))
1027 return nullid
1027 return nullid
1028
1028
1029 @storecache(narrowspec.FILENAME)
1029 @storecache(narrowspec.FILENAME)
1030 def narrowpats(self):
1030 def narrowpats(self):
1031 """matcher patterns for this repository's narrowspec
1031 """matcher patterns for this repository's narrowspec
1032
1032
1033 A tuple of (includes, excludes).
1033 A tuple of (includes, excludes).
1034 """
1034 """
1035 source = self
1035 return narrowspec.load(self)
1036 if self.shared():
1037 from . import hg
1038 source = hg.sharedreposource(self)
1039 return narrowspec.load(source)
1040
1036
1041 @storecache(narrowspec.FILENAME)
1037 @storecache(narrowspec.FILENAME)
1042 def _narrowmatch(self):
1038 def _narrowmatch(self):
1043 if repository.NARROW_REQUIREMENT not in self.requirements:
1039 if repository.NARROW_REQUIREMENT not in self.requirements:
1044 return matchmod.always(self.root, '')
1040 return matchmod.always(self.root, '')
1045 include, exclude = self.narrowpats
1041 include, exclude = self.narrowpats
1046 return narrowspec.match(self.root, include=include, exclude=exclude)
1042 return narrowspec.match(self.root, include=include, exclude=exclude)
1047
1043
1048 # TODO(martinvonz): make this property-like instead?
1044 # TODO(martinvonz): make this property-like instead?
1049 def narrowmatch(self):
1045 def narrowmatch(self):
1050 return self._narrowmatch
1046 return self._narrowmatch
1051
1047
1052 def setnarrowpats(self, newincludes, newexcludes):
1048 def setnarrowpats(self, newincludes, newexcludes):
1053 narrowspec.save(self, newincludes, newexcludes)
1049 narrowspec.save(self, newincludes, newexcludes)
1054 self.invalidate(clearfilecache=True)
1050 self.invalidate(clearfilecache=True)
1055
1051
1056 def __getitem__(self, changeid):
1052 def __getitem__(self, changeid):
1057 if changeid is None:
1053 if changeid is None:
1058 return context.workingctx(self)
1054 return context.workingctx(self)
1059 if isinstance(changeid, context.basectx):
1055 if isinstance(changeid, context.basectx):
1060 return changeid
1056 return changeid
1061 if isinstance(changeid, slice):
1057 if isinstance(changeid, slice):
1062 # wdirrev isn't contiguous so the slice shouldn't include it
1058 # wdirrev isn't contiguous so the slice shouldn't include it
1063 return [context.changectx(self, i)
1059 return [context.changectx(self, i)
1064 for i in pycompat.xrange(*changeid.indices(len(self)))
1060 for i in pycompat.xrange(*changeid.indices(len(self)))
1065 if i not in self.changelog.filteredrevs]
1061 if i not in self.changelog.filteredrevs]
1066 try:
1062 try:
1067 return context.changectx(self, changeid)
1063 return context.changectx(self, changeid)
1068 except error.WdirUnsupported:
1064 except error.WdirUnsupported:
1069 return context.workingctx(self)
1065 return context.workingctx(self)
1070
1066
1071 def __contains__(self, changeid):
1067 def __contains__(self, changeid):
1072 """True if the given changeid exists
1068 """True if the given changeid exists
1073
1069
1074 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1070 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1075 specified.
1071 specified.
1076 """
1072 """
1077 try:
1073 try:
1078 self[changeid]
1074 self[changeid]
1079 return True
1075 return True
1080 except error.RepoLookupError:
1076 except error.RepoLookupError:
1081 return False
1077 return False
1082
1078
1083 def __nonzero__(self):
1079 def __nonzero__(self):
1084 return True
1080 return True
1085
1081
1086 __bool__ = __nonzero__
1082 __bool__ = __nonzero__
1087
1083
1088 def __len__(self):
1084 def __len__(self):
1089 # no need to pay the cost of repoview.changelog
1085 # no need to pay the cost of repoview.changelog
1090 unfi = self.unfiltered()
1086 unfi = self.unfiltered()
1091 return len(unfi.changelog)
1087 return len(unfi.changelog)
1092
1088
1093 def __iter__(self):
1089 def __iter__(self):
1094 return iter(self.changelog)
1090 return iter(self.changelog)
1095
1091
1096 def revs(self, expr, *args):
1092 def revs(self, expr, *args):
1097 '''Find revisions matching a revset.
1093 '''Find revisions matching a revset.
1098
1094
1099 The revset is specified as a string ``expr`` that may contain
1095 The revset is specified as a string ``expr`` that may contain
1100 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1096 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1101
1097
1102 Revset aliases from the configuration are not expanded. To expand
1098 Revset aliases from the configuration are not expanded. To expand
1103 user aliases, consider calling ``scmutil.revrange()`` or
1099 user aliases, consider calling ``scmutil.revrange()`` or
1104 ``repo.anyrevs([expr], user=True)``.
1100 ``repo.anyrevs([expr], user=True)``.
1105
1101
1106 Returns a revset.abstractsmartset, which is a list-like interface
1102 Returns a revset.abstractsmartset, which is a list-like interface
1107 that contains integer revisions.
1103 that contains integer revisions.
1108 '''
1104 '''
1109 expr = revsetlang.formatspec(expr, *args)
1105 expr = revsetlang.formatspec(expr, *args)
1110 m = revset.match(None, expr)
1106 m = revset.match(None, expr)
1111 return m(self)
1107 return m(self)
1112
1108
1113 def set(self, expr, *args):
1109 def set(self, expr, *args):
1114 '''Find revisions matching a revset and emit changectx instances.
1110 '''Find revisions matching a revset and emit changectx instances.
1115
1111
1116 This is a convenience wrapper around ``revs()`` that iterates the
1112 This is a convenience wrapper around ``revs()`` that iterates the
1117 result and is a generator of changectx instances.
1113 result and is a generator of changectx instances.
1118
1114
1119 Revset aliases from the configuration are not expanded. To expand
1115 Revset aliases from the configuration are not expanded. To expand
1120 user aliases, consider calling ``scmutil.revrange()``.
1116 user aliases, consider calling ``scmutil.revrange()``.
1121 '''
1117 '''
1122 for r in self.revs(expr, *args):
1118 for r in self.revs(expr, *args):
1123 yield self[r]
1119 yield self[r]
1124
1120
1125 def anyrevs(self, specs, user=False, localalias=None):
1121 def anyrevs(self, specs, user=False, localalias=None):
1126 '''Find revisions matching one of the given revsets.
1122 '''Find revisions matching one of the given revsets.
1127
1123
1128 Revset aliases from the configuration are not expanded by default. To
1124 Revset aliases from the configuration are not expanded by default. To
1129 expand user aliases, specify ``user=True``. To provide some local
1125 expand user aliases, specify ``user=True``. To provide some local
1130 definitions overriding user aliases, set ``localalias`` to
1126 definitions overriding user aliases, set ``localalias`` to
1131 ``{name: definitionstring}``.
1127 ``{name: definitionstring}``.
1132 '''
1128 '''
1133 if user:
1129 if user:
1134 m = revset.matchany(self.ui, specs,
1130 m = revset.matchany(self.ui, specs,
1135 lookup=revset.lookupfn(self),
1131 lookup=revset.lookupfn(self),
1136 localalias=localalias)
1132 localalias=localalias)
1137 else:
1133 else:
1138 m = revset.matchany(None, specs, localalias=localalias)
1134 m = revset.matchany(None, specs, localalias=localalias)
1139 return m(self)
1135 return m(self)
1140
1136
1141 def url(self):
1137 def url(self):
1142 return 'file:' + self.root
1138 return 'file:' + self.root
1143
1139
1144 def hook(self, name, throw=False, **args):
1140 def hook(self, name, throw=False, **args):
1145 """Call a hook, passing this repo instance.
1141 """Call a hook, passing this repo instance.
1146
1142
1147 This a convenience method to aid invoking hooks. Extensions likely
1143 This a convenience method to aid invoking hooks. Extensions likely
1148 won't call this unless they have registered a custom hook or are
1144 won't call this unless they have registered a custom hook or are
1149 replacing code that is expected to call a hook.
1145 replacing code that is expected to call a hook.
1150 """
1146 """
1151 return hook.hook(self.ui, self, name, throw, **args)
1147 return hook.hook(self.ui, self, name, throw, **args)
1152
1148
1153 @filteredpropertycache
1149 @filteredpropertycache
1154 def _tagscache(self):
1150 def _tagscache(self):
1155 '''Returns a tagscache object that contains various tags related
1151 '''Returns a tagscache object that contains various tags related
1156 caches.'''
1152 caches.'''
1157
1153
1158 # This simplifies its cache management by having one decorated
1154 # This simplifies its cache management by having one decorated
1159 # function (this one) and the rest simply fetch things from it.
1155 # function (this one) and the rest simply fetch things from it.
1160 class tagscache(object):
1156 class tagscache(object):
1161 def __init__(self):
1157 def __init__(self):
1162 # These two define the set of tags for this repository. tags
1158 # These two define the set of tags for this repository. tags
1163 # maps tag name to node; tagtypes maps tag name to 'global' or
1159 # maps tag name to node; tagtypes maps tag name to 'global' or
1164 # 'local'. (Global tags are defined by .hgtags across all
1160 # 'local'. (Global tags are defined by .hgtags across all
1165 # heads, and local tags are defined in .hg/localtags.)
1161 # heads, and local tags are defined in .hg/localtags.)
1166 # They constitute the in-memory cache of tags.
1162 # They constitute the in-memory cache of tags.
1167 self.tags = self.tagtypes = None
1163 self.tags = self.tagtypes = None
1168
1164
1169 self.nodetagscache = self.tagslist = None
1165 self.nodetagscache = self.tagslist = None
1170
1166
1171 cache = tagscache()
1167 cache = tagscache()
1172 cache.tags, cache.tagtypes = self._findtags()
1168 cache.tags, cache.tagtypes = self._findtags()
1173
1169
1174 return cache
1170 return cache
1175
1171
1176 def tags(self):
1172 def tags(self):
1177 '''return a mapping of tag to node'''
1173 '''return a mapping of tag to node'''
1178 t = {}
1174 t = {}
1179 if self.changelog.filteredrevs:
1175 if self.changelog.filteredrevs:
1180 tags, tt = self._findtags()
1176 tags, tt = self._findtags()
1181 else:
1177 else:
1182 tags = self._tagscache.tags
1178 tags = self._tagscache.tags
1183 for k, v in tags.iteritems():
1179 for k, v in tags.iteritems():
1184 try:
1180 try:
1185 # ignore tags to unknown nodes
1181 # ignore tags to unknown nodes
1186 self.changelog.rev(v)
1182 self.changelog.rev(v)
1187 t[k] = v
1183 t[k] = v
1188 except (error.LookupError, ValueError):
1184 except (error.LookupError, ValueError):
1189 pass
1185 pass
1190 return t
1186 return t
1191
1187
1192 def _findtags(self):
1188 def _findtags(self):
1193 '''Do the hard work of finding tags. Return a pair of dicts
1189 '''Do the hard work of finding tags. Return a pair of dicts
1194 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1190 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1195 maps tag name to a string like \'global\' or \'local\'.
1191 maps tag name to a string like \'global\' or \'local\'.
1196 Subclasses or extensions are free to add their own tags, but
1192 Subclasses or extensions are free to add their own tags, but
1197 should be aware that the returned dicts will be retained for the
1193 should be aware that the returned dicts will be retained for the
1198 duration of the localrepo object.'''
1194 duration of the localrepo object.'''
1199
1195
1200 # XXX what tagtype should subclasses/extensions use? Currently
1196 # XXX what tagtype should subclasses/extensions use? Currently
1201 # mq and bookmarks add tags, but do not set the tagtype at all.
1197 # mq and bookmarks add tags, but do not set the tagtype at all.
1202 # Should each extension invent its own tag type? Should there
1198 # Should each extension invent its own tag type? Should there
1203 # be one tagtype for all such "virtual" tags? Or is the status
1199 # be one tagtype for all such "virtual" tags? Or is the status
1204 # quo fine?
1200 # quo fine?
1205
1201
1206
1202
1207 # map tag name to (node, hist)
1203 # map tag name to (node, hist)
1208 alltags = tagsmod.findglobaltags(self.ui, self)
1204 alltags = tagsmod.findglobaltags(self.ui, self)
1209 # map tag name to tag type
1205 # map tag name to tag type
1210 tagtypes = dict((tag, 'global') for tag in alltags)
1206 tagtypes = dict((tag, 'global') for tag in alltags)
1211
1207
1212 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1208 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1213
1209
1214 # Build the return dicts. Have to re-encode tag names because
1210 # Build the return dicts. Have to re-encode tag names because
1215 # the tags module always uses UTF-8 (in order not to lose info
1211 # the tags module always uses UTF-8 (in order not to lose info
1216 # writing to the cache), but the rest of Mercurial wants them in
1212 # writing to the cache), but the rest of Mercurial wants them in
1217 # local encoding.
1213 # local encoding.
1218 tags = {}
1214 tags = {}
1219 for (name, (node, hist)) in alltags.iteritems():
1215 for (name, (node, hist)) in alltags.iteritems():
1220 if node != nullid:
1216 if node != nullid:
1221 tags[encoding.tolocal(name)] = node
1217 tags[encoding.tolocal(name)] = node
1222 tags['tip'] = self.changelog.tip()
1218 tags['tip'] = self.changelog.tip()
1223 tagtypes = dict([(encoding.tolocal(name), value)
1219 tagtypes = dict([(encoding.tolocal(name), value)
1224 for (name, value) in tagtypes.iteritems()])
1220 for (name, value) in tagtypes.iteritems()])
1225 return (tags, tagtypes)
1221 return (tags, tagtypes)
1226
1222
1227 def tagtype(self, tagname):
1223 def tagtype(self, tagname):
1228 '''
1224 '''
1229 return the type of the given tag. result can be:
1225 return the type of the given tag. result can be:
1230
1226
1231 'local' : a local tag
1227 'local' : a local tag
1232 'global' : a global tag
1228 'global' : a global tag
1233 None : tag does not exist
1229 None : tag does not exist
1234 '''
1230 '''
1235
1231
1236 return self._tagscache.tagtypes.get(tagname)
1232 return self._tagscache.tagtypes.get(tagname)
1237
1233
1238 def tagslist(self):
1234 def tagslist(self):
1239 '''return a list of tags ordered by revision'''
1235 '''return a list of tags ordered by revision'''
1240 if not self._tagscache.tagslist:
1236 if not self._tagscache.tagslist:
1241 l = []
1237 l = []
1242 for t, n in self.tags().iteritems():
1238 for t, n in self.tags().iteritems():
1243 l.append((self.changelog.rev(n), t, n))
1239 l.append((self.changelog.rev(n), t, n))
1244 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1240 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1245
1241
1246 return self._tagscache.tagslist
1242 return self._tagscache.tagslist
1247
1243
1248 def nodetags(self, node):
1244 def nodetags(self, node):
1249 '''return the tags associated with a node'''
1245 '''return the tags associated with a node'''
1250 if not self._tagscache.nodetagscache:
1246 if not self._tagscache.nodetagscache:
1251 nodetagscache = {}
1247 nodetagscache = {}
1252 for t, n in self._tagscache.tags.iteritems():
1248 for t, n in self._tagscache.tags.iteritems():
1253 nodetagscache.setdefault(n, []).append(t)
1249 nodetagscache.setdefault(n, []).append(t)
1254 for tags in nodetagscache.itervalues():
1250 for tags in nodetagscache.itervalues():
1255 tags.sort()
1251 tags.sort()
1256 self._tagscache.nodetagscache = nodetagscache
1252 self._tagscache.nodetagscache = nodetagscache
1257 return self._tagscache.nodetagscache.get(node, [])
1253 return self._tagscache.nodetagscache.get(node, [])
1258
1254
1259 def nodebookmarks(self, node):
1255 def nodebookmarks(self, node):
1260 """return the list of bookmarks pointing to the specified node"""
1256 """return the list of bookmarks pointing to the specified node"""
1261 return self._bookmarks.names(node)
1257 return self._bookmarks.names(node)
1262
1258
1263 def branchmap(self):
1259 def branchmap(self):
1264 '''returns a dictionary {branch: [branchheads]} with branchheads
1260 '''returns a dictionary {branch: [branchheads]} with branchheads
1265 ordered by increasing revision number'''
1261 ordered by increasing revision number'''
1266 branchmap.updatecache(self)
1262 branchmap.updatecache(self)
1267 return self._branchcaches[self.filtername]
1263 return self._branchcaches[self.filtername]
1268
1264
1269 @unfilteredmethod
1265 @unfilteredmethod
1270 def revbranchcache(self):
1266 def revbranchcache(self):
1271 if not self._revbranchcache:
1267 if not self._revbranchcache:
1272 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1268 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1273 return self._revbranchcache
1269 return self._revbranchcache
1274
1270
1275 def branchtip(self, branch, ignoremissing=False):
1271 def branchtip(self, branch, ignoremissing=False):
1276 '''return the tip node for a given branch
1272 '''return the tip node for a given branch
1277
1273
1278 If ignoremissing is True, then this method will not raise an error.
1274 If ignoremissing is True, then this method will not raise an error.
1279 This is helpful for callers that only expect None for a missing branch
1275 This is helpful for callers that only expect None for a missing branch
1280 (e.g. namespace).
1276 (e.g. namespace).
1281
1277
1282 '''
1278 '''
1283 try:
1279 try:
1284 return self.branchmap().branchtip(branch)
1280 return self.branchmap().branchtip(branch)
1285 except KeyError:
1281 except KeyError:
1286 if not ignoremissing:
1282 if not ignoremissing:
1287 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1283 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1288 else:
1284 else:
1289 pass
1285 pass
1290
1286
1291 def lookup(self, key):
1287 def lookup(self, key):
1292 return scmutil.revsymbol(self, key).node()
1288 return scmutil.revsymbol(self, key).node()
1293
1289
1294 def lookupbranch(self, key):
1290 def lookupbranch(self, key):
1295 if key in self.branchmap():
1291 if key in self.branchmap():
1296 return key
1292 return key
1297
1293
1298 return scmutil.revsymbol(self, key).branch()
1294 return scmutil.revsymbol(self, key).branch()
1299
1295
1300 def known(self, nodes):
1296 def known(self, nodes):
1301 cl = self.changelog
1297 cl = self.changelog
1302 nm = cl.nodemap
1298 nm = cl.nodemap
1303 filtered = cl.filteredrevs
1299 filtered = cl.filteredrevs
1304 result = []
1300 result = []
1305 for n in nodes:
1301 for n in nodes:
1306 r = nm.get(n)
1302 r = nm.get(n)
1307 resp = not (r is None or r in filtered)
1303 resp = not (r is None or r in filtered)
1308 result.append(resp)
1304 result.append(resp)
1309 return result
1305 return result
1310
1306
1311 def local(self):
1307 def local(self):
1312 return self
1308 return self
1313
1309
1314 def publishing(self):
1310 def publishing(self):
1315 # it's safe (and desirable) to trust the publish flag unconditionally
1311 # it's safe (and desirable) to trust the publish flag unconditionally
1316 # so that we don't finalize changes shared between users via ssh or nfs
1312 # so that we don't finalize changes shared between users via ssh or nfs
1317 return self.ui.configbool('phases', 'publish', untrusted=True)
1313 return self.ui.configbool('phases', 'publish', untrusted=True)
1318
1314
1319 def cancopy(self):
1315 def cancopy(self):
1320 # so statichttprepo's override of local() works
1316 # so statichttprepo's override of local() works
1321 if not self.local():
1317 if not self.local():
1322 return False
1318 return False
1323 if not self.publishing():
1319 if not self.publishing():
1324 return True
1320 return True
1325 # if publishing we can't copy if there is filtered content
1321 # if publishing we can't copy if there is filtered content
1326 return not self.filtered('visible').changelog.filteredrevs
1322 return not self.filtered('visible').changelog.filteredrevs
1327
1323
1328 def shared(self):
1324 def shared(self):
1329 '''the type of shared repository (None if not shared)'''
1325 '''the type of shared repository (None if not shared)'''
1330 if self.sharedpath != self.path:
1326 if self.sharedpath != self.path:
1331 return 'store'
1327 return 'store'
1332 return None
1328 return None
1333
1329
1334 def wjoin(self, f, *insidef):
1330 def wjoin(self, f, *insidef):
1335 return self.vfs.reljoin(self.root, f, *insidef)
1331 return self.vfs.reljoin(self.root, f, *insidef)
1336
1332
1337 def file(self, f):
1333 def file(self, f):
1338 if f[0] == '/':
1334 if f[0] == '/':
1339 f = f[1:]
1335 f = f[1:]
1340 return filelog.filelog(self.svfs, f)
1336 return filelog.filelog(self.svfs, f)
1341
1337
1342 def setparents(self, p1, p2=nullid):
1338 def setparents(self, p1, p2=nullid):
1343 with self.dirstate.parentchange():
1339 with self.dirstate.parentchange():
1344 copies = self.dirstate.setparents(p1, p2)
1340 copies = self.dirstate.setparents(p1, p2)
1345 pctx = self[p1]
1341 pctx = self[p1]
1346 if copies:
1342 if copies:
1347 # Adjust copy records, the dirstate cannot do it, it
1343 # Adjust copy records, the dirstate cannot do it, it
1348 # requires access to parents manifests. Preserve them
1344 # requires access to parents manifests. Preserve them
1349 # only for entries added to first parent.
1345 # only for entries added to first parent.
1350 for f in copies:
1346 for f in copies:
1351 if f not in pctx and copies[f] in pctx:
1347 if f not in pctx and copies[f] in pctx:
1352 self.dirstate.copy(copies[f], f)
1348 self.dirstate.copy(copies[f], f)
1353 if p2 == nullid:
1349 if p2 == nullid:
1354 for f, s in sorted(self.dirstate.copies().items()):
1350 for f, s in sorted(self.dirstate.copies().items()):
1355 if f not in pctx and s not in pctx:
1351 if f not in pctx and s not in pctx:
1356 self.dirstate.copy(None, f)
1352 self.dirstate.copy(None, f)
1357
1353
1358 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1354 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1359 """changeid can be a changeset revision, node, or tag.
1355 """changeid can be a changeset revision, node, or tag.
1360 fileid can be a file revision or node."""
1356 fileid can be a file revision or node."""
1361 return context.filectx(self, path, changeid, fileid,
1357 return context.filectx(self, path, changeid, fileid,
1362 changectx=changectx)
1358 changectx=changectx)
1363
1359
1364 def getcwd(self):
1360 def getcwd(self):
1365 return self.dirstate.getcwd()
1361 return self.dirstate.getcwd()
1366
1362
1367 def pathto(self, f, cwd=None):
1363 def pathto(self, f, cwd=None):
1368 return self.dirstate.pathto(f, cwd)
1364 return self.dirstate.pathto(f, cwd)
1369
1365
1370 def _loadfilter(self, filter):
1366 def _loadfilter(self, filter):
1371 if filter not in self._filterpats:
1367 if filter not in self._filterpats:
1372 l = []
1368 l = []
1373 for pat, cmd in self.ui.configitems(filter):
1369 for pat, cmd in self.ui.configitems(filter):
1374 if cmd == '!':
1370 if cmd == '!':
1375 continue
1371 continue
1376 mf = matchmod.match(self.root, '', [pat])
1372 mf = matchmod.match(self.root, '', [pat])
1377 fn = None
1373 fn = None
1378 params = cmd
1374 params = cmd
1379 for name, filterfn in self._datafilters.iteritems():
1375 for name, filterfn in self._datafilters.iteritems():
1380 if cmd.startswith(name):
1376 if cmd.startswith(name):
1381 fn = filterfn
1377 fn = filterfn
1382 params = cmd[len(name):].lstrip()
1378 params = cmd[len(name):].lstrip()
1383 break
1379 break
1384 if not fn:
1380 if not fn:
1385 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1381 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1386 # Wrap old filters not supporting keyword arguments
1382 # Wrap old filters not supporting keyword arguments
1387 if not pycompat.getargspec(fn)[2]:
1383 if not pycompat.getargspec(fn)[2]:
1388 oldfn = fn
1384 oldfn = fn
1389 fn = lambda s, c, **kwargs: oldfn(s, c)
1385 fn = lambda s, c, **kwargs: oldfn(s, c)
1390 l.append((mf, fn, params))
1386 l.append((mf, fn, params))
1391 self._filterpats[filter] = l
1387 self._filterpats[filter] = l
1392 return self._filterpats[filter]
1388 return self._filterpats[filter]
1393
1389
1394 def _filter(self, filterpats, filename, data):
1390 def _filter(self, filterpats, filename, data):
1395 for mf, fn, cmd in filterpats:
1391 for mf, fn, cmd in filterpats:
1396 if mf(filename):
1392 if mf(filename):
1397 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1393 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1398 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1394 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1399 break
1395 break
1400
1396
1401 return data
1397 return data
1402
1398
1403 @unfilteredpropertycache
1399 @unfilteredpropertycache
1404 def _encodefilterpats(self):
1400 def _encodefilterpats(self):
1405 return self._loadfilter('encode')
1401 return self._loadfilter('encode')
1406
1402
1407 @unfilteredpropertycache
1403 @unfilteredpropertycache
1408 def _decodefilterpats(self):
1404 def _decodefilterpats(self):
1409 return self._loadfilter('decode')
1405 return self._loadfilter('decode')
1410
1406
1411 def adddatafilter(self, name, filter):
1407 def adddatafilter(self, name, filter):
1412 self._datafilters[name] = filter
1408 self._datafilters[name] = filter
1413
1409
1414 def wread(self, filename):
1410 def wread(self, filename):
1415 if self.wvfs.islink(filename):
1411 if self.wvfs.islink(filename):
1416 data = self.wvfs.readlink(filename)
1412 data = self.wvfs.readlink(filename)
1417 else:
1413 else:
1418 data = self.wvfs.read(filename)
1414 data = self.wvfs.read(filename)
1419 return self._filter(self._encodefilterpats, filename, data)
1415 return self._filter(self._encodefilterpats, filename, data)
1420
1416
1421 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1417 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1422 """write ``data`` into ``filename`` in the working directory
1418 """write ``data`` into ``filename`` in the working directory
1423
1419
1424 This returns length of written (maybe decoded) data.
1420 This returns length of written (maybe decoded) data.
1425 """
1421 """
1426 data = self._filter(self._decodefilterpats, filename, data)
1422 data = self._filter(self._decodefilterpats, filename, data)
1427 if 'l' in flags:
1423 if 'l' in flags:
1428 self.wvfs.symlink(data, filename)
1424 self.wvfs.symlink(data, filename)
1429 else:
1425 else:
1430 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1426 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1431 **kwargs)
1427 **kwargs)
1432 if 'x' in flags:
1428 if 'x' in flags:
1433 self.wvfs.setflags(filename, False, True)
1429 self.wvfs.setflags(filename, False, True)
1434 else:
1430 else:
1435 self.wvfs.setflags(filename, False, False)
1431 self.wvfs.setflags(filename, False, False)
1436 return len(data)
1432 return len(data)
1437
1433
1438 def wwritedata(self, filename, data):
1434 def wwritedata(self, filename, data):
1439 return self._filter(self._decodefilterpats, filename, data)
1435 return self._filter(self._decodefilterpats, filename, data)
1440
1436
1441 def currenttransaction(self):
1437 def currenttransaction(self):
1442 """return the current transaction or None if non exists"""
1438 """return the current transaction or None if non exists"""
1443 if self._transref:
1439 if self._transref:
1444 tr = self._transref()
1440 tr = self._transref()
1445 else:
1441 else:
1446 tr = None
1442 tr = None
1447
1443
1448 if tr and tr.running():
1444 if tr and tr.running():
1449 return tr
1445 return tr
1450 return None
1446 return None
1451
1447
1452 def transaction(self, desc, report=None):
1448 def transaction(self, desc, report=None):
1453 if (self.ui.configbool('devel', 'all-warnings')
1449 if (self.ui.configbool('devel', 'all-warnings')
1454 or self.ui.configbool('devel', 'check-locks')):
1450 or self.ui.configbool('devel', 'check-locks')):
1455 if self._currentlock(self._lockref) is None:
1451 if self._currentlock(self._lockref) is None:
1456 raise error.ProgrammingError('transaction requires locking')
1452 raise error.ProgrammingError('transaction requires locking')
1457 tr = self.currenttransaction()
1453 tr = self.currenttransaction()
1458 if tr is not None:
1454 if tr is not None:
1459 return tr.nest(name=desc)
1455 return tr.nest(name=desc)
1460
1456
1461 # abort here if the journal already exists
1457 # abort here if the journal already exists
1462 if self.svfs.exists("journal"):
1458 if self.svfs.exists("journal"):
1463 raise error.RepoError(
1459 raise error.RepoError(
1464 _("abandoned transaction found"),
1460 _("abandoned transaction found"),
1465 hint=_("run 'hg recover' to clean up transaction"))
1461 hint=_("run 'hg recover' to clean up transaction"))
1466
1462
1467 idbase = "%.40f#%f" % (random.random(), time.time())
1463 idbase = "%.40f#%f" % (random.random(), time.time())
1468 ha = hex(hashlib.sha1(idbase).digest())
1464 ha = hex(hashlib.sha1(idbase).digest())
1469 txnid = 'TXN:' + ha
1465 txnid = 'TXN:' + ha
1470 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1466 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1471
1467
1472 self._writejournal(desc)
1468 self._writejournal(desc)
1473 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1469 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1474 if report:
1470 if report:
1475 rp = report
1471 rp = report
1476 else:
1472 else:
1477 rp = self.ui.warn
1473 rp = self.ui.warn
1478 vfsmap = {'plain': self.vfs} # root of .hg/
1474 vfsmap = {'plain': self.vfs} # root of .hg/
1479 # we must avoid cyclic reference between repo and transaction.
1475 # we must avoid cyclic reference between repo and transaction.
1480 reporef = weakref.ref(self)
1476 reporef = weakref.ref(self)
1481 # Code to track tag movement
1477 # Code to track tag movement
1482 #
1478 #
1483 # Since tags are all handled as file content, it is actually quite hard
1479 # Since tags are all handled as file content, it is actually quite hard
1484 # to track these movement from a code perspective. So we fallback to a
1480 # to track these movement from a code perspective. So we fallback to a
1485 # tracking at the repository level. One could envision to track changes
1481 # tracking at the repository level. One could envision to track changes
1486 # to the '.hgtags' file through changegroup apply but that fails to
1482 # to the '.hgtags' file through changegroup apply but that fails to
1487 # cope with case where transaction expose new heads without changegroup
1483 # cope with case where transaction expose new heads without changegroup
1488 # being involved (eg: phase movement).
1484 # being involved (eg: phase movement).
1489 #
1485 #
1490 # For now, We gate the feature behind a flag since this likely comes
1486 # For now, We gate the feature behind a flag since this likely comes
1491 # with performance impacts. The current code run more often than needed
1487 # with performance impacts. The current code run more often than needed
1492 # and do not use caches as much as it could. The current focus is on
1488 # and do not use caches as much as it could. The current focus is on
1493 # the behavior of the feature so we disable it by default. The flag
1489 # the behavior of the feature so we disable it by default. The flag
1494 # will be removed when we are happy with the performance impact.
1490 # will be removed when we are happy with the performance impact.
1495 #
1491 #
1496 # Once this feature is no longer experimental move the following
1492 # Once this feature is no longer experimental move the following
1497 # documentation to the appropriate help section:
1493 # documentation to the appropriate help section:
1498 #
1494 #
1499 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1495 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1500 # tags (new or changed or deleted tags). In addition the details of
1496 # tags (new or changed or deleted tags). In addition the details of
1501 # these changes are made available in a file at:
1497 # these changes are made available in a file at:
1502 # ``REPOROOT/.hg/changes/tags.changes``.
1498 # ``REPOROOT/.hg/changes/tags.changes``.
1503 # Make sure you check for HG_TAG_MOVED before reading that file as it
1499 # Make sure you check for HG_TAG_MOVED before reading that file as it
1504 # might exist from a previous transaction even if no tag were touched
1500 # might exist from a previous transaction even if no tag were touched
1505 # in this one. Changes are recorded in a line base format::
1501 # in this one. Changes are recorded in a line base format::
1506 #
1502 #
1507 # <action> <hex-node> <tag-name>\n
1503 # <action> <hex-node> <tag-name>\n
1508 #
1504 #
1509 # Actions are defined as follow:
1505 # Actions are defined as follow:
1510 # "-R": tag is removed,
1506 # "-R": tag is removed,
1511 # "+A": tag is added,
1507 # "+A": tag is added,
1512 # "-M": tag is moved (old value),
1508 # "-M": tag is moved (old value),
1513 # "+M": tag is moved (new value),
1509 # "+M": tag is moved (new value),
1514 tracktags = lambda x: None
1510 tracktags = lambda x: None
1515 # experimental config: experimental.hook-track-tags
1511 # experimental config: experimental.hook-track-tags
1516 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1512 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1517 if desc != 'strip' and shouldtracktags:
1513 if desc != 'strip' and shouldtracktags:
1518 oldheads = self.changelog.headrevs()
1514 oldheads = self.changelog.headrevs()
1519 def tracktags(tr2):
1515 def tracktags(tr2):
1520 repo = reporef()
1516 repo = reporef()
1521 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1517 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1522 newheads = repo.changelog.headrevs()
1518 newheads = repo.changelog.headrevs()
1523 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1519 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1524 # notes: we compare lists here.
1520 # notes: we compare lists here.
1525 # As we do it only once buiding set would not be cheaper
1521 # As we do it only once buiding set would not be cheaper
1526 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1522 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1527 if changes:
1523 if changes:
1528 tr2.hookargs['tag_moved'] = '1'
1524 tr2.hookargs['tag_moved'] = '1'
1529 with repo.vfs('changes/tags.changes', 'w',
1525 with repo.vfs('changes/tags.changes', 'w',
1530 atomictemp=True) as changesfile:
1526 atomictemp=True) as changesfile:
1531 # note: we do not register the file to the transaction
1527 # note: we do not register the file to the transaction
1532 # because we needs it to still exist on the transaction
1528 # because we needs it to still exist on the transaction
1533 # is close (for txnclose hooks)
1529 # is close (for txnclose hooks)
1534 tagsmod.writediff(changesfile, changes)
1530 tagsmod.writediff(changesfile, changes)
1535 def validate(tr2):
1531 def validate(tr2):
1536 """will run pre-closing hooks"""
1532 """will run pre-closing hooks"""
1537 # XXX the transaction API is a bit lacking here so we take a hacky
1533 # XXX the transaction API is a bit lacking here so we take a hacky
1538 # path for now
1534 # path for now
1539 #
1535 #
1540 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1536 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1541 # dict is copied before these run. In addition we needs the data
1537 # dict is copied before these run. In addition we needs the data
1542 # available to in memory hooks too.
1538 # available to in memory hooks too.
1543 #
1539 #
1544 # Moreover, we also need to make sure this runs before txnclose
1540 # Moreover, we also need to make sure this runs before txnclose
1545 # hooks and there is no "pending" mechanism that would execute
1541 # hooks and there is no "pending" mechanism that would execute
1546 # logic only if hooks are about to run.
1542 # logic only if hooks are about to run.
1547 #
1543 #
1548 # Fixing this limitation of the transaction is also needed to track
1544 # Fixing this limitation of the transaction is also needed to track
1549 # other families of changes (bookmarks, phases, obsolescence).
1545 # other families of changes (bookmarks, phases, obsolescence).
1550 #
1546 #
1551 # This will have to be fixed before we remove the experimental
1547 # This will have to be fixed before we remove the experimental
1552 # gating.
1548 # gating.
1553 tracktags(tr2)
1549 tracktags(tr2)
1554 repo = reporef()
1550 repo = reporef()
1555 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1551 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1556 scmutil.enforcesinglehead(repo, tr2, desc)
1552 scmutil.enforcesinglehead(repo, tr2, desc)
1557 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1553 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1558 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1554 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1559 args = tr.hookargs.copy()
1555 args = tr.hookargs.copy()
1560 args.update(bookmarks.preparehookargs(name, old, new))
1556 args.update(bookmarks.preparehookargs(name, old, new))
1561 repo.hook('pretxnclose-bookmark', throw=True,
1557 repo.hook('pretxnclose-bookmark', throw=True,
1562 txnname=desc,
1558 txnname=desc,
1563 **pycompat.strkwargs(args))
1559 **pycompat.strkwargs(args))
1564 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1560 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1565 cl = repo.unfiltered().changelog
1561 cl = repo.unfiltered().changelog
1566 for rev, (old, new) in tr.changes['phases'].items():
1562 for rev, (old, new) in tr.changes['phases'].items():
1567 args = tr.hookargs.copy()
1563 args = tr.hookargs.copy()
1568 node = hex(cl.node(rev))
1564 node = hex(cl.node(rev))
1569 args.update(phases.preparehookargs(node, old, new))
1565 args.update(phases.preparehookargs(node, old, new))
1570 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1566 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1571 **pycompat.strkwargs(args))
1567 **pycompat.strkwargs(args))
1572
1568
1573 repo.hook('pretxnclose', throw=True,
1569 repo.hook('pretxnclose', throw=True,
1574 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1570 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1575 def releasefn(tr, success):
1571 def releasefn(tr, success):
1576 repo = reporef()
1572 repo = reporef()
1577 if success:
1573 if success:
1578 # this should be explicitly invoked here, because
1574 # this should be explicitly invoked here, because
1579 # in-memory changes aren't written out at closing
1575 # in-memory changes aren't written out at closing
1580 # transaction, if tr.addfilegenerator (via
1576 # transaction, if tr.addfilegenerator (via
1581 # dirstate.write or so) isn't invoked while
1577 # dirstate.write or so) isn't invoked while
1582 # transaction running
1578 # transaction running
1583 repo.dirstate.write(None)
1579 repo.dirstate.write(None)
1584 else:
1580 else:
1585 # discard all changes (including ones already written
1581 # discard all changes (including ones already written
1586 # out) in this transaction
1582 # out) in this transaction
1587 narrowspec.restorebackup(self, 'journal.narrowspec')
1583 narrowspec.restorebackup(self, 'journal.narrowspec')
1588 repo.dirstate.restorebackup(None, 'journal.dirstate')
1584 repo.dirstate.restorebackup(None, 'journal.dirstate')
1589
1585
1590 repo.invalidate(clearfilecache=True)
1586 repo.invalidate(clearfilecache=True)
1591
1587
1592 tr = transaction.transaction(rp, self.svfs, vfsmap,
1588 tr = transaction.transaction(rp, self.svfs, vfsmap,
1593 "journal",
1589 "journal",
1594 "undo",
1590 "undo",
1595 aftertrans(renames),
1591 aftertrans(renames),
1596 self.store.createmode,
1592 self.store.createmode,
1597 validator=validate,
1593 validator=validate,
1598 releasefn=releasefn,
1594 releasefn=releasefn,
1599 checkambigfiles=_cachedfiles,
1595 checkambigfiles=_cachedfiles,
1600 name=desc)
1596 name=desc)
1601 tr.changes['origrepolen'] = len(self)
1597 tr.changes['origrepolen'] = len(self)
1602 tr.changes['obsmarkers'] = set()
1598 tr.changes['obsmarkers'] = set()
1603 tr.changes['phases'] = {}
1599 tr.changes['phases'] = {}
1604 tr.changes['bookmarks'] = {}
1600 tr.changes['bookmarks'] = {}
1605
1601
1606 tr.hookargs['txnid'] = txnid
1602 tr.hookargs['txnid'] = txnid
1607 # note: writing the fncache only during finalize mean that the file is
1603 # note: writing the fncache only during finalize mean that the file is
1608 # outdated when running hooks. As fncache is used for streaming clone,
1604 # outdated when running hooks. As fncache is used for streaming clone,
1609 # this is not expected to break anything that happen during the hooks.
1605 # this is not expected to break anything that happen during the hooks.
1610 tr.addfinalize('flush-fncache', self.store.write)
1606 tr.addfinalize('flush-fncache', self.store.write)
1611 def txnclosehook(tr2):
1607 def txnclosehook(tr2):
1612 """To be run if transaction is successful, will schedule a hook run
1608 """To be run if transaction is successful, will schedule a hook run
1613 """
1609 """
1614 # Don't reference tr2 in hook() so we don't hold a reference.
1610 # Don't reference tr2 in hook() so we don't hold a reference.
1615 # This reduces memory consumption when there are multiple
1611 # This reduces memory consumption when there are multiple
1616 # transactions per lock. This can likely go away if issue5045
1612 # transactions per lock. This can likely go away if issue5045
1617 # fixes the function accumulation.
1613 # fixes the function accumulation.
1618 hookargs = tr2.hookargs
1614 hookargs = tr2.hookargs
1619
1615
1620 def hookfunc():
1616 def hookfunc():
1621 repo = reporef()
1617 repo = reporef()
1622 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1618 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1623 bmchanges = sorted(tr.changes['bookmarks'].items())
1619 bmchanges = sorted(tr.changes['bookmarks'].items())
1624 for name, (old, new) in bmchanges:
1620 for name, (old, new) in bmchanges:
1625 args = tr.hookargs.copy()
1621 args = tr.hookargs.copy()
1626 args.update(bookmarks.preparehookargs(name, old, new))
1622 args.update(bookmarks.preparehookargs(name, old, new))
1627 repo.hook('txnclose-bookmark', throw=False,
1623 repo.hook('txnclose-bookmark', throw=False,
1628 txnname=desc, **pycompat.strkwargs(args))
1624 txnname=desc, **pycompat.strkwargs(args))
1629
1625
1630 if hook.hashook(repo.ui, 'txnclose-phase'):
1626 if hook.hashook(repo.ui, 'txnclose-phase'):
1631 cl = repo.unfiltered().changelog
1627 cl = repo.unfiltered().changelog
1632 phasemv = sorted(tr.changes['phases'].items())
1628 phasemv = sorted(tr.changes['phases'].items())
1633 for rev, (old, new) in phasemv:
1629 for rev, (old, new) in phasemv:
1634 args = tr.hookargs.copy()
1630 args = tr.hookargs.copy()
1635 node = hex(cl.node(rev))
1631 node = hex(cl.node(rev))
1636 args.update(phases.preparehookargs(node, old, new))
1632 args.update(phases.preparehookargs(node, old, new))
1637 repo.hook('txnclose-phase', throw=False, txnname=desc,
1633 repo.hook('txnclose-phase', throw=False, txnname=desc,
1638 **pycompat.strkwargs(args))
1634 **pycompat.strkwargs(args))
1639
1635
1640 repo.hook('txnclose', throw=False, txnname=desc,
1636 repo.hook('txnclose', throw=False, txnname=desc,
1641 **pycompat.strkwargs(hookargs))
1637 **pycompat.strkwargs(hookargs))
1642 reporef()._afterlock(hookfunc)
1638 reporef()._afterlock(hookfunc)
1643 tr.addfinalize('txnclose-hook', txnclosehook)
1639 tr.addfinalize('txnclose-hook', txnclosehook)
1644 # Include a leading "-" to make it happen before the transaction summary
1640 # Include a leading "-" to make it happen before the transaction summary
1645 # reports registered via scmutil.registersummarycallback() whose names
1641 # reports registered via scmutil.registersummarycallback() whose names
1646 # are 00-txnreport etc. That way, the caches will be warm when the
1642 # are 00-txnreport etc. That way, the caches will be warm when the
1647 # callbacks run.
1643 # callbacks run.
1648 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1644 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1649 def txnaborthook(tr2):
1645 def txnaborthook(tr2):
1650 """To be run if transaction is aborted
1646 """To be run if transaction is aborted
1651 """
1647 """
1652 reporef().hook('txnabort', throw=False, txnname=desc,
1648 reporef().hook('txnabort', throw=False, txnname=desc,
1653 **pycompat.strkwargs(tr2.hookargs))
1649 **pycompat.strkwargs(tr2.hookargs))
1654 tr.addabort('txnabort-hook', txnaborthook)
1650 tr.addabort('txnabort-hook', txnaborthook)
1655 # avoid eager cache invalidation. in-memory data should be identical
1651 # avoid eager cache invalidation. in-memory data should be identical
1656 # to stored data if transaction has no error.
1652 # to stored data if transaction has no error.
1657 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1653 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1658 self._transref = weakref.ref(tr)
1654 self._transref = weakref.ref(tr)
1659 scmutil.registersummarycallback(self, tr, desc)
1655 scmutil.registersummarycallback(self, tr, desc)
1660 return tr
1656 return tr
1661
1657
1662 def _journalfiles(self):
1658 def _journalfiles(self):
1663 return ((self.svfs, 'journal'),
1659 return ((self.svfs, 'journal'),
1664 (self.vfs, 'journal.dirstate'),
1660 (self.vfs, 'journal.dirstate'),
1665 (self.vfs, 'journal.branch'),
1661 (self.vfs, 'journal.branch'),
1666 (self.vfs, 'journal.desc'),
1662 (self.vfs, 'journal.desc'),
1667 (self.vfs, 'journal.bookmarks'),
1663 (self.vfs, 'journal.bookmarks'),
1668 (self.svfs, 'journal.phaseroots'))
1664 (self.svfs, 'journal.phaseroots'))
1669
1665
1670 def undofiles(self):
1666 def undofiles(self):
1671 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1667 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1672
1668
1673 @unfilteredmethod
1669 @unfilteredmethod
1674 def _writejournal(self, desc):
1670 def _writejournal(self, desc):
1675 self.dirstate.savebackup(None, 'journal.dirstate')
1671 self.dirstate.savebackup(None, 'journal.dirstate')
1676 narrowspec.savebackup(self, 'journal.narrowspec')
1672 narrowspec.savebackup(self, 'journal.narrowspec')
1677 self.vfs.write("journal.branch",
1673 self.vfs.write("journal.branch",
1678 encoding.fromlocal(self.dirstate.branch()))
1674 encoding.fromlocal(self.dirstate.branch()))
1679 self.vfs.write("journal.desc",
1675 self.vfs.write("journal.desc",
1680 "%d\n%s\n" % (len(self), desc))
1676 "%d\n%s\n" % (len(self), desc))
1681 self.vfs.write("journal.bookmarks",
1677 self.vfs.write("journal.bookmarks",
1682 self.vfs.tryread("bookmarks"))
1678 self.vfs.tryread("bookmarks"))
1683 self.svfs.write("journal.phaseroots",
1679 self.svfs.write("journal.phaseroots",
1684 self.svfs.tryread("phaseroots"))
1680 self.svfs.tryread("phaseroots"))
1685
1681
1686 def recover(self):
1682 def recover(self):
1687 with self.lock():
1683 with self.lock():
1688 if self.svfs.exists("journal"):
1684 if self.svfs.exists("journal"):
1689 self.ui.status(_("rolling back interrupted transaction\n"))
1685 self.ui.status(_("rolling back interrupted transaction\n"))
1690 vfsmap = {'': self.svfs,
1686 vfsmap = {'': self.svfs,
1691 'plain': self.vfs,}
1687 'plain': self.vfs,}
1692 transaction.rollback(self.svfs, vfsmap, "journal",
1688 transaction.rollback(self.svfs, vfsmap, "journal",
1693 self.ui.warn,
1689 self.ui.warn,
1694 checkambigfiles=_cachedfiles)
1690 checkambigfiles=_cachedfiles)
1695 self.invalidate()
1691 self.invalidate()
1696 return True
1692 return True
1697 else:
1693 else:
1698 self.ui.warn(_("no interrupted transaction available\n"))
1694 self.ui.warn(_("no interrupted transaction available\n"))
1699 return False
1695 return False
1700
1696
1701 def rollback(self, dryrun=False, force=False):
1697 def rollback(self, dryrun=False, force=False):
1702 wlock = lock = dsguard = None
1698 wlock = lock = dsguard = None
1703 try:
1699 try:
1704 wlock = self.wlock()
1700 wlock = self.wlock()
1705 lock = self.lock()
1701 lock = self.lock()
1706 if self.svfs.exists("undo"):
1702 if self.svfs.exists("undo"):
1707 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1703 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1708
1704
1709 return self._rollback(dryrun, force, dsguard)
1705 return self._rollback(dryrun, force, dsguard)
1710 else:
1706 else:
1711 self.ui.warn(_("no rollback information available\n"))
1707 self.ui.warn(_("no rollback information available\n"))
1712 return 1
1708 return 1
1713 finally:
1709 finally:
1714 release(dsguard, lock, wlock)
1710 release(dsguard, lock, wlock)
1715
1711
1716 @unfilteredmethod # Until we get smarter cache management
1712 @unfilteredmethod # Until we get smarter cache management
1717 def _rollback(self, dryrun, force, dsguard):
1713 def _rollback(self, dryrun, force, dsguard):
1718 ui = self.ui
1714 ui = self.ui
1719 try:
1715 try:
1720 args = self.vfs.read('undo.desc').splitlines()
1716 args = self.vfs.read('undo.desc').splitlines()
1721 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1717 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1722 if len(args) >= 3:
1718 if len(args) >= 3:
1723 detail = args[2]
1719 detail = args[2]
1724 oldtip = oldlen - 1
1720 oldtip = oldlen - 1
1725
1721
1726 if detail and ui.verbose:
1722 if detail and ui.verbose:
1727 msg = (_('repository tip rolled back to revision %d'
1723 msg = (_('repository tip rolled back to revision %d'
1728 ' (undo %s: %s)\n')
1724 ' (undo %s: %s)\n')
1729 % (oldtip, desc, detail))
1725 % (oldtip, desc, detail))
1730 else:
1726 else:
1731 msg = (_('repository tip rolled back to revision %d'
1727 msg = (_('repository tip rolled back to revision %d'
1732 ' (undo %s)\n')
1728 ' (undo %s)\n')
1733 % (oldtip, desc))
1729 % (oldtip, desc))
1734 except IOError:
1730 except IOError:
1735 msg = _('rolling back unknown transaction\n')
1731 msg = _('rolling back unknown transaction\n')
1736 desc = None
1732 desc = None
1737
1733
1738 if not force and self['.'] != self['tip'] and desc == 'commit':
1734 if not force and self['.'] != self['tip'] and desc == 'commit':
1739 raise error.Abort(
1735 raise error.Abort(
1740 _('rollback of last commit while not checked out '
1736 _('rollback of last commit while not checked out '
1741 'may lose data'), hint=_('use -f to force'))
1737 'may lose data'), hint=_('use -f to force'))
1742
1738
1743 ui.status(msg)
1739 ui.status(msg)
1744 if dryrun:
1740 if dryrun:
1745 return 0
1741 return 0
1746
1742
1747 parents = self.dirstate.parents()
1743 parents = self.dirstate.parents()
1748 self.destroying()
1744 self.destroying()
1749 vfsmap = {'plain': self.vfs, '': self.svfs}
1745 vfsmap = {'plain': self.vfs, '': self.svfs}
1750 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1746 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1751 checkambigfiles=_cachedfiles)
1747 checkambigfiles=_cachedfiles)
1752 if self.vfs.exists('undo.bookmarks'):
1748 if self.vfs.exists('undo.bookmarks'):
1753 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1749 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1754 if self.svfs.exists('undo.phaseroots'):
1750 if self.svfs.exists('undo.phaseroots'):
1755 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1751 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1756 self.invalidate()
1752 self.invalidate()
1757
1753
1758 parentgone = (parents[0] not in self.changelog.nodemap or
1754 parentgone = (parents[0] not in self.changelog.nodemap or
1759 parents[1] not in self.changelog.nodemap)
1755 parents[1] not in self.changelog.nodemap)
1760 if parentgone:
1756 if parentgone:
1761 # prevent dirstateguard from overwriting already restored one
1757 # prevent dirstateguard from overwriting already restored one
1762 dsguard.close()
1758 dsguard.close()
1763
1759
1764 narrowspec.restorebackup(self, 'undo.narrowspec')
1760 narrowspec.restorebackup(self, 'undo.narrowspec')
1765 self.dirstate.restorebackup(None, 'undo.dirstate')
1761 self.dirstate.restorebackup(None, 'undo.dirstate')
1766 try:
1762 try:
1767 branch = self.vfs.read('undo.branch')
1763 branch = self.vfs.read('undo.branch')
1768 self.dirstate.setbranch(encoding.tolocal(branch))
1764 self.dirstate.setbranch(encoding.tolocal(branch))
1769 except IOError:
1765 except IOError:
1770 ui.warn(_('named branch could not be reset: '
1766 ui.warn(_('named branch could not be reset: '
1771 'current branch is still \'%s\'\n')
1767 'current branch is still \'%s\'\n')
1772 % self.dirstate.branch())
1768 % self.dirstate.branch())
1773
1769
1774 parents = tuple([p.rev() for p in self[None].parents()])
1770 parents = tuple([p.rev() for p in self[None].parents()])
1775 if len(parents) > 1:
1771 if len(parents) > 1:
1776 ui.status(_('working directory now based on '
1772 ui.status(_('working directory now based on '
1777 'revisions %d and %d\n') % parents)
1773 'revisions %d and %d\n') % parents)
1778 else:
1774 else:
1779 ui.status(_('working directory now based on '
1775 ui.status(_('working directory now based on '
1780 'revision %d\n') % parents)
1776 'revision %d\n') % parents)
1781 mergemod.mergestate.clean(self, self['.'].node())
1777 mergemod.mergestate.clean(self, self['.'].node())
1782
1778
1783 # TODO: if we know which new heads may result from this rollback, pass
1779 # TODO: if we know which new heads may result from this rollback, pass
1784 # them to destroy(), which will prevent the branchhead cache from being
1780 # them to destroy(), which will prevent the branchhead cache from being
1785 # invalidated.
1781 # invalidated.
1786 self.destroyed()
1782 self.destroyed()
1787 return 0
1783 return 0
1788
1784
1789 def _buildcacheupdater(self, newtransaction):
1785 def _buildcacheupdater(self, newtransaction):
1790 """called during transaction to build the callback updating cache
1786 """called during transaction to build the callback updating cache
1791
1787
1792 Lives on the repository to help extension who might want to augment
1788 Lives on the repository to help extension who might want to augment
1793 this logic. For this purpose, the created transaction is passed to the
1789 this logic. For this purpose, the created transaction is passed to the
1794 method.
1790 method.
1795 """
1791 """
1796 # we must avoid cyclic reference between repo and transaction.
1792 # we must avoid cyclic reference between repo and transaction.
1797 reporef = weakref.ref(self)
1793 reporef = weakref.ref(self)
1798 def updater(tr):
1794 def updater(tr):
1799 repo = reporef()
1795 repo = reporef()
1800 repo.updatecaches(tr)
1796 repo.updatecaches(tr)
1801 return updater
1797 return updater
1802
1798
1803 @unfilteredmethod
1799 @unfilteredmethod
1804 def updatecaches(self, tr=None, full=False):
1800 def updatecaches(self, tr=None, full=False):
1805 """warm appropriate caches
1801 """warm appropriate caches
1806
1802
1807 If this function is called after a transaction closed. The transaction
1803 If this function is called after a transaction closed. The transaction
1808 will be available in the 'tr' argument. This can be used to selectively
1804 will be available in the 'tr' argument. This can be used to selectively
1809 update caches relevant to the changes in that transaction.
1805 update caches relevant to the changes in that transaction.
1810
1806
1811 If 'full' is set, make sure all caches the function knows about have
1807 If 'full' is set, make sure all caches the function knows about have
1812 up-to-date data. Even the ones usually loaded more lazily.
1808 up-to-date data. Even the ones usually loaded more lazily.
1813 """
1809 """
1814 if tr is not None and tr.hookargs.get('source') == 'strip':
1810 if tr is not None and tr.hookargs.get('source') == 'strip':
1815 # During strip, many caches are invalid but
1811 # During strip, many caches are invalid but
1816 # later call to `destroyed` will refresh them.
1812 # later call to `destroyed` will refresh them.
1817 return
1813 return
1818
1814
1819 if tr is None or tr.changes['origrepolen'] < len(self):
1815 if tr is None or tr.changes['origrepolen'] < len(self):
1820 # updating the unfiltered branchmap should refresh all the others,
1816 # updating the unfiltered branchmap should refresh all the others,
1821 self.ui.debug('updating the branch cache\n')
1817 self.ui.debug('updating the branch cache\n')
1822 branchmap.updatecache(self.filtered('served'))
1818 branchmap.updatecache(self.filtered('served'))
1823
1819
1824 if full:
1820 if full:
1825 rbc = self.revbranchcache()
1821 rbc = self.revbranchcache()
1826 for r in self.changelog:
1822 for r in self.changelog:
1827 rbc.branchinfo(r)
1823 rbc.branchinfo(r)
1828 rbc.write()
1824 rbc.write()
1829
1825
1830 # ensure the working copy parents are in the manifestfulltextcache
1826 # ensure the working copy parents are in the manifestfulltextcache
1831 for ctx in self['.'].parents():
1827 for ctx in self['.'].parents():
1832 ctx.manifest() # accessing the manifest is enough
1828 ctx.manifest() # accessing the manifest is enough
1833
1829
1834 def invalidatecaches(self):
1830 def invalidatecaches(self):
1835
1831
1836 if '_tagscache' in vars(self):
1832 if '_tagscache' in vars(self):
1837 # can't use delattr on proxy
1833 # can't use delattr on proxy
1838 del self.__dict__['_tagscache']
1834 del self.__dict__['_tagscache']
1839
1835
1840 self.unfiltered()._branchcaches.clear()
1836 self.unfiltered()._branchcaches.clear()
1841 self.invalidatevolatilesets()
1837 self.invalidatevolatilesets()
1842 self._sparsesignaturecache.clear()
1838 self._sparsesignaturecache.clear()
1843
1839
1844 def invalidatevolatilesets(self):
1840 def invalidatevolatilesets(self):
1845 self.filteredrevcache.clear()
1841 self.filteredrevcache.clear()
1846 obsolete.clearobscaches(self)
1842 obsolete.clearobscaches(self)
1847
1843
1848 def invalidatedirstate(self):
1844 def invalidatedirstate(self):
1849 '''Invalidates the dirstate, causing the next call to dirstate
1845 '''Invalidates the dirstate, causing the next call to dirstate
1850 to check if it was modified since the last time it was read,
1846 to check if it was modified since the last time it was read,
1851 rereading it if it has.
1847 rereading it if it has.
1852
1848
1853 This is different to dirstate.invalidate() that it doesn't always
1849 This is different to dirstate.invalidate() that it doesn't always
1854 rereads the dirstate. Use dirstate.invalidate() if you want to
1850 rereads the dirstate. Use dirstate.invalidate() if you want to
1855 explicitly read the dirstate again (i.e. restoring it to a previous
1851 explicitly read the dirstate again (i.e. restoring it to a previous
1856 known good state).'''
1852 known good state).'''
1857 if hasunfilteredcache(self, 'dirstate'):
1853 if hasunfilteredcache(self, 'dirstate'):
1858 for k in self.dirstate._filecache:
1854 for k in self.dirstate._filecache:
1859 try:
1855 try:
1860 delattr(self.dirstate, k)
1856 delattr(self.dirstate, k)
1861 except AttributeError:
1857 except AttributeError:
1862 pass
1858 pass
1863 delattr(self.unfiltered(), 'dirstate')
1859 delattr(self.unfiltered(), 'dirstate')
1864
1860
1865 def invalidate(self, clearfilecache=False):
1861 def invalidate(self, clearfilecache=False):
1866 '''Invalidates both store and non-store parts other than dirstate
1862 '''Invalidates both store and non-store parts other than dirstate
1867
1863
1868 If a transaction is running, invalidation of store is omitted,
1864 If a transaction is running, invalidation of store is omitted,
1869 because discarding in-memory changes might cause inconsistency
1865 because discarding in-memory changes might cause inconsistency
1870 (e.g. incomplete fncache causes unintentional failure, but
1866 (e.g. incomplete fncache causes unintentional failure, but
1871 redundant one doesn't).
1867 redundant one doesn't).
1872 '''
1868 '''
1873 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1869 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1874 for k in list(self._filecache.keys()):
1870 for k in list(self._filecache.keys()):
1875 # dirstate is invalidated separately in invalidatedirstate()
1871 # dirstate is invalidated separately in invalidatedirstate()
1876 if k == 'dirstate':
1872 if k == 'dirstate':
1877 continue
1873 continue
1878 if (k == 'changelog' and
1874 if (k == 'changelog' and
1879 self.currenttransaction() and
1875 self.currenttransaction() and
1880 self.changelog._delayed):
1876 self.changelog._delayed):
1881 # The changelog object may store unwritten revisions. We don't
1877 # The changelog object may store unwritten revisions. We don't
1882 # want to lose them.
1878 # want to lose them.
1883 # TODO: Solve the problem instead of working around it.
1879 # TODO: Solve the problem instead of working around it.
1884 continue
1880 continue
1885
1881
1886 if clearfilecache:
1882 if clearfilecache:
1887 del self._filecache[k]
1883 del self._filecache[k]
1888 try:
1884 try:
1889 delattr(unfiltered, k)
1885 delattr(unfiltered, k)
1890 except AttributeError:
1886 except AttributeError:
1891 pass
1887 pass
1892 self.invalidatecaches()
1888 self.invalidatecaches()
1893 if not self.currenttransaction():
1889 if not self.currenttransaction():
1894 # TODO: Changing contents of store outside transaction
1890 # TODO: Changing contents of store outside transaction
1895 # causes inconsistency. We should make in-memory store
1891 # causes inconsistency. We should make in-memory store
1896 # changes detectable, and abort if changed.
1892 # changes detectable, and abort if changed.
1897 self.store.invalidatecaches()
1893 self.store.invalidatecaches()
1898
1894
1899 def invalidateall(self):
1895 def invalidateall(self):
1900 '''Fully invalidates both store and non-store parts, causing the
1896 '''Fully invalidates both store and non-store parts, causing the
1901 subsequent operation to reread any outside changes.'''
1897 subsequent operation to reread any outside changes.'''
1902 # extension should hook this to invalidate its caches
1898 # extension should hook this to invalidate its caches
1903 self.invalidate()
1899 self.invalidate()
1904 self.invalidatedirstate()
1900 self.invalidatedirstate()
1905
1901
1906 @unfilteredmethod
1902 @unfilteredmethod
1907 def _refreshfilecachestats(self, tr):
1903 def _refreshfilecachestats(self, tr):
1908 """Reload stats of cached files so that they are flagged as valid"""
1904 """Reload stats of cached files so that they are flagged as valid"""
1909 for k, ce in self._filecache.items():
1905 for k, ce in self._filecache.items():
1910 k = pycompat.sysstr(k)
1906 k = pycompat.sysstr(k)
1911 if k == r'dirstate' or k not in self.__dict__:
1907 if k == r'dirstate' or k not in self.__dict__:
1912 continue
1908 continue
1913 ce.refresh()
1909 ce.refresh()
1914
1910
1915 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1911 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1916 inheritchecker=None, parentenvvar=None):
1912 inheritchecker=None, parentenvvar=None):
1917 parentlock = None
1913 parentlock = None
1918 # the contents of parentenvvar are used by the underlying lock to
1914 # the contents of parentenvvar are used by the underlying lock to
1919 # determine whether it can be inherited
1915 # determine whether it can be inherited
1920 if parentenvvar is not None:
1916 if parentenvvar is not None:
1921 parentlock = encoding.environ.get(parentenvvar)
1917 parentlock = encoding.environ.get(parentenvvar)
1922
1918
1923 timeout = 0
1919 timeout = 0
1924 warntimeout = 0
1920 warntimeout = 0
1925 if wait:
1921 if wait:
1926 timeout = self.ui.configint("ui", "timeout")
1922 timeout = self.ui.configint("ui", "timeout")
1927 warntimeout = self.ui.configint("ui", "timeout.warn")
1923 warntimeout = self.ui.configint("ui", "timeout.warn")
1928 # internal config: ui.signal-safe-lock
1924 # internal config: ui.signal-safe-lock
1929 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1925 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1930
1926
1931 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1927 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1932 releasefn=releasefn,
1928 releasefn=releasefn,
1933 acquirefn=acquirefn, desc=desc,
1929 acquirefn=acquirefn, desc=desc,
1934 inheritchecker=inheritchecker,
1930 inheritchecker=inheritchecker,
1935 parentlock=parentlock,
1931 parentlock=parentlock,
1936 signalsafe=signalsafe)
1932 signalsafe=signalsafe)
1937 return l
1933 return l
1938
1934
1939 def _afterlock(self, callback):
1935 def _afterlock(self, callback):
1940 """add a callback to be run when the repository is fully unlocked
1936 """add a callback to be run when the repository is fully unlocked
1941
1937
1942 The callback will be executed when the outermost lock is released
1938 The callback will be executed when the outermost lock is released
1943 (with wlock being higher level than 'lock')."""
1939 (with wlock being higher level than 'lock')."""
1944 for ref in (self._wlockref, self._lockref):
1940 for ref in (self._wlockref, self._lockref):
1945 l = ref and ref()
1941 l = ref and ref()
1946 if l and l.held:
1942 if l and l.held:
1947 l.postrelease.append(callback)
1943 l.postrelease.append(callback)
1948 break
1944 break
1949 else: # no lock have been found.
1945 else: # no lock have been found.
1950 callback()
1946 callback()
1951
1947
1952 def lock(self, wait=True):
1948 def lock(self, wait=True):
1953 '''Lock the repository store (.hg/store) and return a weak reference
1949 '''Lock the repository store (.hg/store) and return a weak reference
1954 to the lock. Use this before modifying the store (e.g. committing or
1950 to the lock. Use this before modifying the store (e.g. committing or
1955 stripping). If you are opening a transaction, get a lock as well.)
1951 stripping). If you are opening a transaction, get a lock as well.)
1956
1952
1957 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1953 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1958 'wlock' first to avoid a dead-lock hazard.'''
1954 'wlock' first to avoid a dead-lock hazard.'''
1959 l = self._currentlock(self._lockref)
1955 l = self._currentlock(self._lockref)
1960 if l is not None:
1956 if l is not None:
1961 l.lock()
1957 l.lock()
1962 return l
1958 return l
1963
1959
1964 l = self._lock(self.svfs, "lock", wait, None,
1960 l = self._lock(self.svfs, "lock", wait, None,
1965 self.invalidate, _('repository %s') % self.origroot)
1961 self.invalidate, _('repository %s') % self.origroot)
1966 self._lockref = weakref.ref(l)
1962 self._lockref = weakref.ref(l)
1967 return l
1963 return l
1968
1964
1969 def _wlockchecktransaction(self):
1965 def _wlockchecktransaction(self):
1970 if self.currenttransaction() is not None:
1966 if self.currenttransaction() is not None:
1971 raise error.LockInheritanceContractViolation(
1967 raise error.LockInheritanceContractViolation(
1972 'wlock cannot be inherited in the middle of a transaction')
1968 'wlock cannot be inherited in the middle of a transaction')
1973
1969
1974 def wlock(self, wait=True):
1970 def wlock(self, wait=True):
1975 '''Lock the non-store parts of the repository (everything under
1971 '''Lock the non-store parts of the repository (everything under
1976 .hg except .hg/store) and return a weak reference to the lock.
1972 .hg except .hg/store) and return a weak reference to the lock.
1977
1973
1978 Use this before modifying files in .hg.
1974 Use this before modifying files in .hg.
1979
1975
1980 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1976 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1981 'wlock' first to avoid a dead-lock hazard.'''
1977 'wlock' first to avoid a dead-lock hazard.'''
1982 l = self._wlockref and self._wlockref()
1978 l = self._wlockref and self._wlockref()
1983 if l is not None and l.held:
1979 if l is not None and l.held:
1984 l.lock()
1980 l.lock()
1985 return l
1981 return l
1986
1982
1987 # We do not need to check for non-waiting lock acquisition. Such
1983 # We do not need to check for non-waiting lock acquisition. Such
1988 # acquisition would not cause dead-lock as they would just fail.
1984 # acquisition would not cause dead-lock as they would just fail.
1989 if wait and (self.ui.configbool('devel', 'all-warnings')
1985 if wait and (self.ui.configbool('devel', 'all-warnings')
1990 or self.ui.configbool('devel', 'check-locks')):
1986 or self.ui.configbool('devel', 'check-locks')):
1991 if self._currentlock(self._lockref) is not None:
1987 if self._currentlock(self._lockref) is not None:
1992 self.ui.develwarn('"wlock" acquired after "lock"')
1988 self.ui.develwarn('"wlock" acquired after "lock"')
1993
1989
1994 def unlock():
1990 def unlock():
1995 if self.dirstate.pendingparentchange():
1991 if self.dirstate.pendingparentchange():
1996 self.dirstate.invalidate()
1992 self.dirstate.invalidate()
1997 else:
1993 else:
1998 self.dirstate.write(None)
1994 self.dirstate.write(None)
1999
1995
2000 self._filecache['dirstate'].refresh()
1996 self._filecache['dirstate'].refresh()
2001
1997
2002 l = self._lock(self.vfs, "wlock", wait, unlock,
1998 l = self._lock(self.vfs, "wlock", wait, unlock,
2003 self.invalidatedirstate, _('working directory of %s') %
1999 self.invalidatedirstate, _('working directory of %s') %
2004 self.origroot,
2000 self.origroot,
2005 inheritchecker=self._wlockchecktransaction,
2001 inheritchecker=self._wlockchecktransaction,
2006 parentenvvar='HG_WLOCK_LOCKER')
2002 parentenvvar='HG_WLOCK_LOCKER')
2007 self._wlockref = weakref.ref(l)
2003 self._wlockref = weakref.ref(l)
2008 return l
2004 return l
2009
2005
2010 def _currentlock(self, lockref):
2006 def _currentlock(self, lockref):
2011 """Returns the lock if it's held, or None if it's not."""
2007 """Returns the lock if it's held, or None if it's not."""
2012 if lockref is None:
2008 if lockref is None:
2013 return None
2009 return None
2014 l = lockref()
2010 l = lockref()
2015 if l is None or not l.held:
2011 if l is None or not l.held:
2016 return None
2012 return None
2017 return l
2013 return l
2018
2014
2019 def currentwlock(self):
2015 def currentwlock(self):
2020 """Returns the wlock if it's held, or None if it's not."""
2016 """Returns the wlock if it's held, or None if it's not."""
2021 return self._currentlock(self._wlockref)
2017 return self._currentlock(self._wlockref)
2022
2018
2023 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2019 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2024 """
2020 """
2025 commit an individual file as part of a larger transaction
2021 commit an individual file as part of a larger transaction
2026 """
2022 """
2027
2023
2028 fname = fctx.path()
2024 fname = fctx.path()
2029 fparent1 = manifest1.get(fname, nullid)
2025 fparent1 = manifest1.get(fname, nullid)
2030 fparent2 = manifest2.get(fname, nullid)
2026 fparent2 = manifest2.get(fname, nullid)
2031 if isinstance(fctx, context.filectx):
2027 if isinstance(fctx, context.filectx):
2032 node = fctx.filenode()
2028 node = fctx.filenode()
2033 if node in [fparent1, fparent2]:
2029 if node in [fparent1, fparent2]:
2034 self.ui.debug('reusing %s filelog entry\n' % fname)
2030 self.ui.debug('reusing %s filelog entry\n' % fname)
2035 if manifest1.flags(fname) != fctx.flags():
2031 if manifest1.flags(fname) != fctx.flags():
2036 changelist.append(fname)
2032 changelist.append(fname)
2037 return node
2033 return node
2038
2034
2039 flog = self.file(fname)
2035 flog = self.file(fname)
2040 meta = {}
2036 meta = {}
2041 copy = fctx.renamed()
2037 copy = fctx.renamed()
2042 if copy and copy[0] != fname:
2038 if copy and copy[0] != fname:
2043 # Mark the new revision of this file as a copy of another
2039 # Mark the new revision of this file as a copy of another
2044 # file. This copy data will effectively act as a parent
2040 # file. This copy data will effectively act as a parent
2045 # of this new revision. If this is a merge, the first
2041 # of this new revision. If this is a merge, the first
2046 # parent will be the nullid (meaning "look up the copy data")
2042 # parent will be the nullid (meaning "look up the copy data")
2047 # and the second one will be the other parent. For example:
2043 # and the second one will be the other parent. For example:
2048 #
2044 #
2049 # 0 --- 1 --- 3 rev1 changes file foo
2045 # 0 --- 1 --- 3 rev1 changes file foo
2050 # \ / rev2 renames foo to bar and changes it
2046 # \ / rev2 renames foo to bar and changes it
2051 # \- 2 -/ rev3 should have bar with all changes and
2047 # \- 2 -/ rev3 should have bar with all changes and
2052 # should record that bar descends from
2048 # should record that bar descends from
2053 # bar in rev2 and foo in rev1
2049 # bar in rev2 and foo in rev1
2054 #
2050 #
2055 # this allows this merge to succeed:
2051 # this allows this merge to succeed:
2056 #
2052 #
2057 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2053 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2058 # \ / merging rev3 and rev4 should use bar@rev2
2054 # \ / merging rev3 and rev4 should use bar@rev2
2059 # \- 2 --- 4 as the merge base
2055 # \- 2 --- 4 as the merge base
2060 #
2056 #
2061
2057
2062 cfname = copy[0]
2058 cfname = copy[0]
2063 crev = manifest1.get(cfname)
2059 crev = manifest1.get(cfname)
2064 newfparent = fparent2
2060 newfparent = fparent2
2065
2061
2066 if manifest2: # branch merge
2062 if manifest2: # branch merge
2067 if fparent2 == nullid or crev is None: # copied on remote side
2063 if fparent2 == nullid or crev is None: # copied on remote side
2068 if cfname in manifest2:
2064 if cfname in manifest2:
2069 crev = manifest2[cfname]
2065 crev = manifest2[cfname]
2070 newfparent = fparent1
2066 newfparent = fparent1
2071
2067
2072 # Here, we used to search backwards through history to try to find
2068 # Here, we used to search backwards through history to try to find
2073 # where the file copy came from if the source of a copy was not in
2069 # where the file copy came from if the source of a copy was not in
2074 # the parent directory. However, this doesn't actually make sense to
2070 # the parent directory. However, this doesn't actually make sense to
2075 # do (what does a copy from something not in your working copy even
2071 # do (what does a copy from something not in your working copy even
2076 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2072 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2077 # the user that copy information was dropped, so if they didn't
2073 # the user that copy information was dropped, so if they didn't
2078 # expect this outcome it can be fixed, but this is the correct
2074 # expect this outcome it can be fixed, but this is the correct
2079 # behavior in this circumstance.
2075 # behavior in this circumstance.
2080
2076
2081 if crev:
2077 if crev:
2082 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2078 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2083 meta["copy"] = cfname
2079 meta["copy"] = cfname
2084 meta["copyrev"] = hex(crev)
2080 meta["copyrev"] = hex(crev)
2085 fparent1, fparent2 = nullid, newfparent
2081 fparent1, fparent2 = nullid, newfparent
2086 else:
2082 else:
2087 self.ui.warn(_("warning: can't find ancestor for '%s' "
2083 self.ui.warn(_("warning: can't find ancestor for '%s' "
2088 "copied from '%s'!\n") % (fname, cfname))
2084 "copied from '%s'!\n") % (fname, cfname))
2089
2085
2090 elif fparent1 == nullid:
2086 elif fparent1 == nullid:
2091 fparent1, fparent2 = fparent2, nullid
2087 fparent1, fparent2 = fparent2, nullid
2092 elif fparent2 != nullid:
2088 elif fparent2 != nullid:
2093 # is one parent an ancestor of the other?
2089 # is one parent an ancestor of the other?
2094 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2090 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2095 if fparent1 in fparentancestors:
2091 if fparent1 in fparentancestors:
2096 fparent1, fparent2 = fparent2, nullid
2092 fparent1, fparent2 = fparent2, nullid
2097 elif fparent2 in fparentancestors:
2093 elif fparent2 in fparentancestors:
2098 fparent2 = nullid
2094 fparent2 = nullid
2099
2095
2100 # is the file changed?
2096 # is the file changed?
2101 text = fctx.data()
2097 text = fctx.data()
2102 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2098 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2103 changelist.append(fname)
2099 changelist.append(fname)
2104 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2100 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2105 # are just the flags changed during merge?
2101 # are just the flags changed during merge?
2106 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2102 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2107 changelist.append(fname)
2103 changelist.append(fname)
2108
2104
2109 return fparent1
2105 return fparent1
2110
2106
2111 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2107 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2112 """check for commit arguments that aren't committable"""
2108 """check for commit arguments that aren't committable"""
2113 if match.isexact() or match.prefix():
2109 if match.isexact() or match.prefix():
2114 matched = set(status.modified + status.added + status.removed)
2110 matched = set(status.modified + status.added + status.removed)
2115
2111
2116 for f in match.files():
2112 for f in match.files():
2117 f = self.dirstate.normalize(f)
2113 f = self.dirstate.normalize(f)
2118 if f == '.' or f in matched or f in wctx.substate:
2114 if f == '.' or f in matched or f in wctx.substate:
2119 continue
2115 continue
2120 if f in status.deleted:
2116 if f in status.deleted:
2121 fail(f, _('file not found!'))
2117 fail(f, _('file not found!'))
2122 if f in vdirs: # visited directory
2118 if f in vdirs: # visited directory
2123 d = f + '/'
2119 d = f + '/'
2124 for mf in matched:
2120 for mf in matched:
2125 if mf.startswith(d):
2121 if mf.startswith(d):
2126 break
2122 break
2127 else:
2123 else:
2128 fail(f, _("no match under directory!"))
2124 fail(f, _("no match under directory!"))
2129 elif f not in self.dirstate:
2125 elif f not in self.dirstate:
2130 fail(f, _("file not tracked!"))
2126 fail(f, _("file not tracked!"))
2131
2127
2132 @unfilteredmethod
2128 @unfilteredmethod
2133 def commit(self, text="", user=None, date=None, match=None, force=False,
2129 def commit(self, text="", user=None, date=None, match=None, force=False,
2134 editor=False, extra=None):
2130 editor=False, extra=None):
2135 """Add a new revision to current repository.
2131 """Add a new revision to current repository.
2136
2132
2137 Revision information is gathered from the working directory,
2133 Revision information is gathered from the working directory,
2138 match can be used to filter the committed files. If editor is
2134 match can be used to filter the committed files. If editor is
2139 supplied, it is called to get a commit message.
2135 supplied, it is called to get a commit message.
2140 """
2136 """
2141 if extra is None:
2137 if extra is None:
2142 extra = {}
2138 extra = {}
2143
2139
2144 def fail(f, msg):
2140 def fail(f, msg):
2145 raise error.Abort('%s: %s' % (f, msg))
2141 raise error.Abort('%s: %s' % (f, msg))
2146
2142
2147 if not match:
2143 if not match:
2148 match = matchmod.always(self.root, '')
2144 match = matchmod.always(self.root, '')
2149
2145
2150 if not force:
2146 if not force:
2151 vdirs = []
2147 vdirs = []
2152 match.explicitdir = vdirs.append
2148 match.explicitdir = vdirs.append
2153 match.bad = fail
2149 match.bad = fail
2154
2150
2155 wlock = lock = tr = None
2151 wlock = lock = tr = None
2156 try:
2152 try:
2157 wlock = self.wlock()
2153 wlock = self.wlock()
2158 lock = self.lock() # for recent changelog (see issue4368)
2154 lock = self.lock() # for recent changelog (see issue4368)
2159
2155
2160 wctx = self[None]
2156 wctx = self[None]
2161 merge = len(wctx.parents()) > 1
2157 merge = len(wctx.parents()) > 1
2162
2158
2163 if not force and merge and not match.always():
2159 if not force and merge and not match.always():
2164 raise error.Abort(_('cannot partially commit a merge '
2160 raise error.Abort(_('cannot partially commit a merge '
2165 '(do not specify files or patterns)'))
2161 '(do not specify files or patterns)'))
2166
2162
2167 status = self.status(match=match, clean=force)
2163 status = self.status(match=match, clean=force)
2168 if force:
2164 if force:
2169 status.modified.extend(status.clean) # mq may commit clean files
2165 status.modified.extend(status.clean) # mq may commit clean files
2170
2166
2171 # check subrepos
2167 # check subrepos
2172 subs, commitsubs, newstate = subrepoutil.precommit(
2168 subs, commitsubs, newstate = subrepoutil.precommit(
2173 self.ui, wctx, status, match, force=force)
2169 self.ui, wctx, status, match, force=force)
2174
2170
2175 # make sure all explicit patterns are matched
2171 # make sure all explicit patterns are matched
2176 if not force:
2172 if not force:
2177 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2173 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2178
2174
2179 cctx = context.workingcommitctx(self, status,
2175 cctx = context.workingcommitctx(self, status,
2180 text, user, date, extra)
2176 text, user, date, extra)
2181
2177
2182 # internal config: ui.allowemptycommit
2178 # internal config: ui.allowemptycommit
2183 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2179 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2184 or extra.get('close') or merge or cctx.files()
2180 or extra.get('close') or merge or cctx.files()
2185 or self.ui.configbool('ui', 'allowemptycommit'))
2181 or self.ui.configbool('ui', 'allowemptycommit'))
2186 if not allowemptycommit:
2182 if not allowemptycommit:
2187 return None
2183 return None
2188
2184
2189 if merge and cctx.deleted():
2185 if merge and cctx.deleted():
2190 raise error.Abort(_("cannot commit merge with missing files"))
2186 raise error.Abort(_("cannot commit merge with missing files"))
2191
2187
2192 ms = mergemod.mergestate.read(self)
2188 ms = mergemod.mergestate.read(self)
2193 mergeutil.checkunresolved(ms)
2189 mergeutil.checkunresolved(ms)
2194
2190
2195 if editor:
2191 if editor:
2196 cctx._text = editor(self, cctx, subs)
2192 cctx._text = editor(self, cctx, subs)
2197 edited = (text != cctx._text)
2193 edited = (text != cctx._text)
2198
2194
2199 # Save commit message in case this transaction gets rolled back
2195 # Save commit message in case this transaction gets rolled back
2200 # (e.g. by a pretxncommit hook). Leave the content alone on
2196 # (e.g. by a pretxncommit hook). Leave the content alone on
2201 # the assumption that the user will use the same editor again.
2197 # the assumption that the user will use the same editor again.
2202 msgfn = self.savecommitmessage(cctx._text)
2198 msgfn = self.savecommitmessage(cctx._text)
2203
2199
2204 # commit subs and write new state
2200 # commit subs and write new state
2205 if subs:
2201 if subs:
2206 for s in sorted(commitsubs):
2202 for s in sorted(commitsubs):
2207 sub = wctx.sub(s)
2203 sub = wctx.sub(s)
2208 self.ui.status(_('committing subrepository %s\n') %
2204 self.ui.status(_('committing subrepository %s\n') %
2209 subrepoutil.subrelpath(sub))
2205 subrepoutil.subrelpath(sub))
2210 sr = sub.commit(cctx._text, user, date)
2206 sr = sub.commit(cctx._text, user, date)
2211 newstate[s] = (newstate[s][0], sr)
2207 newstate[s] = (newstate[s][0], sr)
2212 subrepoutil.writestate(self, newstate)
2208 subrepoutil.writestate(self, newstate)
2213
2209
2214 p1, p2 = self.dirstate.parents()
2210 p1, p2 = self.dirstate.parents()
2215 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2211 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2216 try:
2212 try:
2217 self.hook("precommit", throw=True, parent1=hookp1,
2213 self.hook("precommit", throw=True, parent1=hookp1,
2218 parent2=hookp2)
2214 parent2=hookp2)
2219 tr = self.transaction('commit')
2215 tr = self.transaction('commit')
2220 ret = self.commitctx(cctx, True)
2216 ret = self.commitctx(cctx, True)
2221 except: # re-raises
2217 except: # re-raises
2222 if edited:
2218 if edited:
2223 self.ui.write(
2219 self.ui.write(
2224 _('note: commit message saved in %s\n') % msgfn)
2220 _('note: commit message saved in %s\n') % msgfn)
2225 raise
2221 raise
2226 # update bookmarks, dirstate and mergestate
2222 # update bookmarks, dirstate and mergestate
2227 bookmarks.update(self, [p1, p2], ret)
2223 bookmarks.update(self, [p1, p2], ret)
2228 cctx.markcommitted(ret)
2224 cctx.markcommitted(ret)
2229 ms.reset()
2225 ms.reset()
2230 tr.close()
2226 tr.close()
2231
2227
2232 finally:
2228 finally:
2233 lockmod.release(tr, lock, wlock)
2229 lockmod.release(tr, lock, wlock)
2234
2230
2235 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2231 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2236 # hack for command that use a temporary commit (eg: histedit)
2232 # hack for command that use a temporary commit (eg: histedit)
2237 # temporary commit got stripped before hook release
2233 # temporary commit got stripped before hook release
2238 if self.changelog.hasnode(ret):
2234 if self.changelog.hasnode(ret):
2239 self.hook("commit", node=node, parent1=parent1,
2235 self.hook("commit", node=node, parent1=parent1,
2240 parent2=parent2)
2236 parent2=parent2)
2241 self._afterlock(commithook)
2237 self._afterlock(commithook)
2242 return ret
2238 return ret
2243
2239
2244 @unfilteredmethod
2240 @unfilteredmethod
2245 def commitctx(self, ctx, error=False):
2241 def commitctx(self, ctx, error=False):
2246 """Add a new revision to current repository.
2242 """Add a new revision to current repository.
2247 Revision information is passed via the context argument.
2243 Revision information is passed via the context argument.
2248
2244
2249 ctx.files() should list all files involved in this commit, i.e.
2245 ctx.files() should list all files involved in this commit, i.e.
2250 modified/added/removed files. On merge, it may be wider than the
2246 modified/added/removed files. On merge, it may be wider than the
2251 ctx.files() to be committed, since any file nodes derived directly
2247 ctx.files() to be committed, since any file nodes derived directly
2252 from p1 or p2 are excluded from the committed ctx.files().
2248 from p1 or p2 are excluded from the committed ctx.files().
2253 """
2249 """
2254
2250
2255 tr = None
2251 tr = None
2256 p1, p2 = ctx.p1(), ctx.p2()
2252 p1, p2 = ctx.p1(), ctx.p2()
2257 user = ctx.user()
2253 user = ctx.user()
2258
2254
2259 lock = self.lock()
2255 lock = self.lock()
2260 try:
2256 try:
2261 tr = self.transaction("commit")
2257 tr = self.transaction("commit")
2262 trp = weakref.proxy(tr)
2258 trp = weakref.proxy(tr)
2263
2259
2264 if ctx.manifestnode():
2260 if ctx.manifestnode():
2265 # reuse an existing manifest revision
2261 # reuse an existing manifest revision
2266 self.ui.debug('reusing known manifest\n')
2262 self.ui.debug('reusing known manifest\n')
2267 mn = ctx.manifestnode()
2263 mn = ctx.manifestnode()
2268 files = ctx.files()
2264 files = ctx.files()
2269 elif ctx.files():
2265 elif ctx.files():
2270 m1ctx = p1.manifestctx()
2266 m1ctx = p1.manifestctx()
2271 m2ctx = p2.manifestctx()
2267 m2ctx = p2.manifestctx()
2272 mctx = m1ctx.copy()
2268 mctx = m1ctx.copy()
2273
2269
2274 m = mctx.read()
2270 m = mctx.read()
2275 m1 = m1ctx.read()
2271 m1 = m1ctx.read()
2276 m2 = m2ctx.read()
2272 m2 = m2ctx.read()
2277
2273
2278 # check in files
2274 # check in files
2279 added = []
2275 added = []
2280 changed = []
2276 changed = []
2281 removed = list(ctx.removed())
2277 removed = list(ctx.removed())
2282 linkrev = len(self)
2278 linkrev = len(self)
2283 self.ui.note(_("committing files:\n"))
2279 self.ui.note(_("committing files:\n"))
2284 for f in sorted(ctx.modified() + ctx.added()):
2280 for f in sorted(ctx.modified() + ctx.added()):
2285 self.ui.note(f + "\n")
2281 self.ui.note(f + "\n")
2286 try:
2282 try:
2287 fctx = ctx[f]
2283 fctx = ctx[f]
2288 if fctx is None:
2284 if fctx is None:
2289 removed.append(f)
2285 removed.append(f)
2290 else:
2286 else:
2291 added.append(f)
2287 added.append(f)
2292 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2288 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2293 trp, changed)
2289 trp, changed)
2294 m.setflag(f, fctx.flags())
2290 m.setflag(f, fctx.flags())
2295 except OSError as inst:
2291 except OSError as inst:
2296 self.ui.warn(_("trouble committing %s!\n") % f)
2292 self.ui.warn(_("trouble committing %s!\n") % f)
2297 raise
2293 raise
2298 except IOError as inst:
2294 except IOError as inst:
2299 errcode = getattr(inst, 'errno', errno.ENOENT)
2295 errcode = getattr(inst, 'errno', errno.ENOENT)
2300 if error or errcode and errcode != errno.ENOENT:
2296 if error or errcode and errcode != errno.ENOENT:
2301 self.ui.warn(_("trouble committing %s!\n") % f)
2297 self.ui.warn(_("trouble committing %s!\n") % f)
2302 raise
2298 raise
2303
2299
2304 # update manifest
2300 # update manifest
2305 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2301 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2306 drop = [f for f in removed if f in m]
2302 drop = [f for f in removed if f in m]
2307 for f in drop:
2303 for f in drop:
2308 del m[f]
2304 del m[f]
2309 files = changed + removed
2305 files = changed + removed
2310 md = None
2306 md = None
2311 if not files:
2307 if not files:
2312 # if no "files" actually changed in terms of the changelog,
2308 # if no "files" actually changed in terms of the changelog,
2313 # try hard to detect unmodified manifest entry so that the
2309 # try hard to detect unmodified manifest entry so that the
2314 # exact same commit can be reproduced later on convert.
2310 # exact same commit can be reproduced later on convert.
2315 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2311 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2316 if not files and md:
2312 if not files and md:
2317 self.ui.debug('not reusing manifest (no file change in '
2313 self.ui.debug('not reusing manifest (no file change in '
2318 'changelog, but manifest differs)\n')
2314 'changelog, but manifest differs)\n')
2319 if files or md:
2315 if files or md:
2320 self.ui.note(_("committing manifest\n"))
2316 self.ui.note(_("committing manifest\n"))
2321 # we're using narrowmatch here since it's already applied at
2317 # we're using narrowmatch here since it's already applied at
2322 # other stages (such as dirstate.walk), so we're already
2318 # other stages (such as dirstate.walk), so we're already
2323 # ignoring things outside of narrowspec in most cases. The
2319 # ignoring things outside of narrowspec in most cases. The
2324 # one case where we might have files outside the narrowspec
2320 # one case where we might have files outside the narrowspec
2325 # at this point is merges, and we already error out in the
2321 # at this point is merges, and we already error out in the
2326 # case where the merge has files outside of the narrowspec,
2322 # case where the merge has files outside of the narrowspec,
2327 # so this is safe.
2323 # so this is safe.
2328 mn = mctx.write(trp, linkrev,
2324 mn = mctx.write(trp, linkrev,
2329 p1.manifestnode(), p2.manifestnode(),
2325 p1.manifestnode(), p2.manifestnode(),
2330 added, drop, match=self.narrowmatch())
2326 added, drop, match=self.narrowmatch())
2331 else:
2327 else:
2332 self.ui.debug('reusing manifest form p1 (listed files '
2328 self.ui.debug('reusing manifest form p1 (listed files '
2333 'actually unchanged)\n')
2329 'actually unchanged)\n')
2334 mn = p1.manifestnode()
2330 mn = p1.manifestnode()
2335 else:
2331 else:
2336 self.ui.debug('reusing manifest from p1 (no file change)\n')
2332 self.ui.debug('reusing manifest from p1 (no file change)\n')
2337 mn = p1.manifestnode()
2333 mn = p1.manifestnode()
2338 files = []
2334 files = []
2339
2335
2340 # update changelog
2336 # update changelog
2341 self.ui.note(_("committing changelog\n"))
2337 self.ui.note(_("committing changelog\n"))
2342 self.changelog.delayupdate(tr)
2338 self.changelog.delayupdate(tr)
2343 n = self.changelog.add(mn, files, ctx.description(),
2339 n = self.changelog.add(mn, files, ctx.description(),
2344 trp, p1.node(), p2.node(),
2340 trp, p1.node(), p2.node(),
2345 user, ctx.date(), ctx.extra().copy())
2341 user, ctx.date(), ctx.extra().copy())
2346 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2342 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2347 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2343 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2348 parent2=xp2)
2344 parent2=xp2)
2349 # set the new commit is proper phase
2345 # set the new commit is proper phase
2350 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2346 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2351 if targetphase:
2347 if targetphase:
2352 # retract boundary do not alter parent changeset.
2348 # retract boundary do not alter parent changeset.
2353 # if a parent have higher the resulting phase will
2349 # if a parent have higher the resulting phase will
2354 # be compliant anyway
2350 # be compliant anyway
2355 #
2351 #
2356 # if minimal phase was 0 we don't need to retract anything
2352 # if minimal phase was 0 we don't need to retract anything
2357 phases.registernew(self, tr, targetphase, [n])
2353 phases.registernew(self, tr, targetphase, [n])
2358 tr.close()
2354 tr.close()
2359 return n
2355 return n
2360 finally:
2356 finally:
2361 if tr:
2357 if tr:
2362 tr.release()
2358 tr.release()
2363 lock.release()
2359 lock.release()
2364
2360
2365 @unfilteredmethod
2361 @unfilteredmethod
2366 def destroying(self):
2362 def destroying(self):
2367 '''Inform the repository that nodes are about to be destroyed.
2363 '''Inform the repository that nodes are about to be destroyed.
2368 Intended for use by strip and rollback, so there's a common
2364 Intended for use by strip and rollback, so there's a common
2369 place for anything that has to be done before destroying history.
2365 place for anything that has to be done before destroying history.
2370
2366
2371 This is mostly useful for saving state that is in memory and waiting
2367 This is mostly useful for saving state that is in memory and waiting
2372 to be flushed when the current lock is released. Because a call to
2368 to be flushed when the current lock is released. Because a call to
2373 destroyed is imminent, the repo will be invalidated causing those
2369 destroyed is imminent, the repo will be invalidated causing those
2374 changes to stay in memory (waiting for the next unlock), or vanish
2370 changes to stay in memory (waiting for the next unlock), or vanish
2375 completely.
2371 completely.
2376 '''
2372 '''
2377 # When using the same lock to commit and strip, the phasecache is left
2373 # When using the same lock to commit and strip, the phasecache is left
2378 # dirty after committing. Then when we strip, the repo is invalidated,
2374 # dirty after committing. Then when we strip, the repo is invalidated,
2379 # causing those changes to disappear.
2375 # causing those changes to disappear.
2380 if '_phasecache' in vars(self):
2376 if '_phasecache' in vars(self):
2381 self._phasecache.write()
2377 self._phasecache.write()
2382
2378
2383 @unfilteredmethod
2379 @unfilteredmethod
2384 def destroyed(self):
2380 def destroyed(self):
2385 '''Inform the repository that nodes have been destroyed.
2381 '''Inform the repository that nodes have been destroyed.
2386 Intended for use by strip and rollback, so there's a common
2382 Intended for use by strip and rollback, so there's a common
2387 place for anything that has to be done after destroying history.
2383 place for anything that has to be done after destroying history.
2388 '''
2384 '''
2389 # When one tries to:
2385 # When one tries to:
2390 # 1) destroy nodes thus calling this method (e.g. strip)
2386 # 1) destroy nodes thus calling this method (e.g. strip)
2391 # 2) use phasecache somewhere (e.g. commit)
2387 # 2) use phasecache somewhere (e.g. commit)
2392 #
2388 #
2393 # then 2) will fail because the phasecache contains nodes that were
2389 # then 2) will fail because the phasecache contains nodes that were
2394 # removed. We can either remove phasecache from the filecache,
2390 # removed. We can either remove phasecache from the filecache,
2395 # causing it to reload next time it is accessed, or simply filter
2391 # causing it to reload next time it is accessed, or simply filter
2396 # the removed nodes now and write the updated cache.
2392 # the removed nodes now and write the updated cache.
2397 self._phasecache.filterunknown(self)
2393 self._phasecache.filterunknown(self)
2398 self._phasecache.write()
2394 self._phasecache.write()
2399
2395
2400 # refresh all repository caches
2396 # refresh all repository caches
2401 self.updatecaches()
2397 self.updatecaches()
2402
2398
2403 # Ensure the persistent tag cache is updated. Doing it now
2399 # Ensure the persistent tag cache is updated. Doing it now
2404 # means that the tag cache only has to worry about destroyed
2400 # means that the tag cache only has to worry about destroyed
2405 # heads immediately after a strip/rollback. That in turn
2401 # heads immediately after a strip/rollback. That in turn
2406 # guarantees that "cachetip == currenttip" (comparing both rev
2402 # guarantees that "cachetip == currenttip" (comparing both rev
2407 # and node) always means no nodes have been added or destroyed.
2403 # and node) always means no nodes have been added or destroyed.
2408
2404
2409 # XXX this is suboptimal when qrefresh'ing: we strip the current
2405 # XXX this is suboptimal when qrefresh'ing: we strip the current
2410 # head, refresh the tag cache, then immediately add a new head.
2406 # head, refresh the tag cache, then immediately add a new head.
2411 # But I think doing it this way is necessary for the "instant
2407 # But I think doing it this way is necessary for the "instant
2412 # tag cache retrieval" case to work.
2408 # tag cache retrieval" case to work.
2413 self.invalidate()
2409 self.invalidate()
2414
2410
2415 def status(self, node1='.', node2=None, match=None,
2411 def status(self, node1='.', node2=None, match=None,
2416 ignored=False, clean=False, unknown=False,
2412 ignored=False, clean=False, unknown=False,
2417 listsubrepos=False):
2413 listsubrepos=False):
2418 '''a convenience method that calls node1.status(node2)'''
2414 '''a convenience method that calls node1.status(node2)'''
2419 return self[node1].status(node2, match, ignored, clean, unknown,
2415 return self[node1].status(node2, match, ignored, clean, unknown,
2420 listsubrepos)
2416 listsubrepos)
2421
2417
2422 def addpostdsstatus(self, ps):
2418 def addpostdsstatus(self, ps):
2423 """Add a callback to run within the wlock, at the point at which status
2419 """Add a callback to run within the wlock, at the point at which status
2424 fixups happen.
2420 fixups happen.
2425
2421
2426 On status completion, callback(wctx, status) will be called with the
2422 On status completion, callback(wctx, status) will be called with the
2427 wlock held, unless the dirstate has changed from underneath or the wlock
2423 wlock held, unless the dirstate has changed from underneath or the wlock
2428 couldn't be grabbed.
2424 couldn't be grabbed.
2429
2425
2430 Callbacks should not capture and use a cached copy of the dirstate --
2426 Callbacks should not capture and use a cached copy of the dirstate --
2431 it might change in the meanwhile. Instead, they should access the
2427 it might change in the meanwhile. Instead, they should access the
2432 dirstate via wctx.repo().dirstate.
2428 dirstate via wctx.repo().dirstate.
2433
2429
2434 This list is emptied out after each status run -- extensions should
2430 This list is emptied out after each status run -- extensions should
2435 make sure it adds to this list each time dirstate.status is called.
2431 make sure it adds to this list each time dirstate.status is called.
2436 Extensions should also make sure they don't call this for statuses
2432 Extensions should also make sure they don't call this for statuses
2437 that don't involve the dirstate.
2433 that don't involve the dirstate.
2438 """
2434 """
2439
2435
2440 # The list is located here for uniqueness reasons -- it is actually
2436 # The list is located here for uniqueness reasons -- it is actually
2441 # managed by the workingctx, but that isn't unique per-repo.
2437 # managed by the workingctx, but that isn't unique per-repo.
2442 self._postdsstatus.append(ps)
2438 self._postdsstatus.append(ps)
2443
2439
2444 def postdsstatus(self):
2440 def postdsstatus(self):
2445 """Used by workingctx to get the list of post-dirstate-status hooks."""
2441 """Used by workingctx to get the list of post-dirstate-status hooks."""
2446 return self._postdsstatus
2442 return self._postdsstatus
2447
2443
2448 def clearpostdsstatus(self):
2444 def clearpostdsstatus(self):
2449 """Used by workingctx to clear post-dirstate-status hooks."""
2445 """Used by workingctx to clear post-dirstate-status hooks."""
2450 del self._postdsstatus[:]
2446 del self._postdsstatus[:]
2451
2447
2452 def heads(self, start=None):
2448 def heads(self, start=None):
2453 if start is None:
2449 if start is None:
2454 cl = self.changelog
2450 cl = self.changelog
2455 headrevs = reversed(cl.headrevs())
2451 headrevs = reversed(cl.headrevs())
2456 return [cl.node(rev) for rev in headrevs]
2452 return [cl.node(rev) for rev in headrevs]
2457
2453
2458 heads = self.changelog.heads(start)
2454 heads = self.changelog.heads(start)
2459 # sort the output in rev descending order
2455 # sort the output in rev descending order
2460 return sorted(heads, key=self.changelog.rev, reverse=True)
2456 return sorted(heads, key=self.changelog.rev, reverse=True)
2461
2457
2462 def branchheads(self, branch=None, start=None, closed=False):
2458 def branchheads(self, branch=None, start=None, closed=False):
2463 '''return a (possibly filtered) list of heads for the given branch
2459 '''return a (possibly filtered) list of heads for the given branch
2464
2460
2465 Heads are returned in topological order, from newest to oldest.
2461 Heads are returned in topological order, from newest to oldest.
2466 If branch is None, use the dirstate branch.
2462 If branch is None, use the dirstate branch.
2467 If start is not None, return only heads reachable from start.
2463 If start is not None, return only heads reachable from start.
2468 If closed is True, return heads that are marked as closed as well.
2464 If closed is True, return heads that are marked as closed as well.
2469 '''
2465 '''
2470 if branch is None:
2466 if branch is None:
2471 branch = self[None].branch()
2467 branch = self[None].branch()
2472 branches = self.branchmap()
2468 branches = self.branchmap()
2473 if branch not in branches:
2469 if branch not in branches:
2474 return []
2470 return []
2475 # the cache returns heads ordered lowest to highest
2471 # the cache returns heads ordered lowest to highest
2476 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2472 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2477 if start is not None:
2473 if start is not None:
2478 # filter out the heads that cannot be reached from startrev
2474 # filter out the heads that cannot be reached from startrev
2479 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2475 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2480 bheads = [h for h in bheads if h in fbheads]
2476 bheads = [h for h in bheads if h in fbheads]
2481 return bheads
2477 return bheads
2482
2478
2483 def branches(self, nodes):
2479 def branches(self, nodes):
2484 if not nodes:
2480 if not nodes:
2485 nodes = [self.changelog.tip()]
2481 nodes = [self.changelog.tip()]
2486 b = []
2482 b = []
2487 for n in nodes:
2483 for n in nodes:
2488 t = n
2484 t = n
2489 while True:
2485 while True:
2490 p = self.changelog.parents(n)
2486 p = self.changelog.parents(n)
2491 if p[1] != nullid or p[0] == nullid:
2487 if p[1] != nullid or p[0] == nullid:
2492 b.append((t, n, p[0], p[1]))
2488 b.append((t, n, p[0], p[1]))
2493 break
2489 break
2494 n = p[0]
2490 n = p[0]
2495 return b
2491 return b
2496
2492
2497 def between(self, pairs):
2493 def between(self, pairs):
2498 r = []
2494 r = []
2499
2495
2500 for top, bottom in pairs:
2496 for top, bottom in pairs:
2501 n, l, i = top, [], 0
2497 n, l, i = top, [], 0
2502 f = 1
2498 f = 1
2503
2499
2504 while n != bottom and n != nullid:
2500 while n != bottom and n != nullid:
2505 p = self.changelog.parents(n)[0]
2501 p = self.changelog.parents(n)[0]
2506 if i == f:
2502 if i == f:
2507 l.append(n)
2503 l.append(n)
2508 f = f * 2
2504 f = f * 2
2509 n = p
2505 n = p
2510 i += 1
2506 i += 1
2511
2507
2512 r.append(l)
2508 r.append(l)
2513
2509
2514 return r
2510 return r
2515
2511
2516 def checkpush(self, pushop):
2512 def checkpush(self, pushop):
2517 """Extensions can override this function if additional checks have
2513 """Extensions can override this function if additional checks have
2518 to be performed before pushing, or call it if they override push
2514 to be performed before pushing, or call it if they override push
2519 command.
2515 command.
2520 """
2516 """
2521
2517
2522 @unfilteredpropertycache
2518 @unfilteredpropertycache
2523 def prepushoutgoinghooks(self):
2519 def prepushoutgoinghooks(self):
2524 """Return util.hooks consists of a pushop with repo, remote, outgoing
2520 """Return util.hooks consists of a pushop with repo, remote, outgoing
2525 methods, which are called before pushing changesets.
2521 methods, which are called before pushing changesets.
2526 """
2522 """
2527 return util.hooks()
2523 return util.hooks()
2528
2524
2529 def pushkey(self, namespace, key, old, new):
2525 def pushkey(self, namespace, key, old, new):
2530 try:
2526 try:
2531 tr = self.currenttransaction()
2527 tr = self.currenttransaction()
2532 hookargs = {}
2528 hookargs = {}
2533 if tr is not None:
2529 if tr is not None:
2534 hookargs.update(tr.hookargs)
2530 hookargs.update(tr.hookargs)
2535 hookargs = pycompat.strkwargs(hookargs)
2531 hookargs = pycompat.strkwargs(hookargs)
2536 hookargs[r'namespace'] = namespace
2532 hookargs[r'namespace'] = namespace
2537 hookargs[r'key'] = key
2533 hookargs[r'key'] = key
2538 hookargs[r'old'] = old
2534 hookargs[r'old'] = old
2539 hookargs[r'new'] = new
2535 hookargs[r'new'] = new
2540 self.hook('prepushkey', throw=True, **hookargs)
2536 self.hook('prepushkey', throw=True, **hookargs)
2541 except error.HookAbort as exc:
2537 except error.HookAbort as exc:
2542 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2538 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2543 if exc.hint:
2539 if exc.hint:
2544 self.ui.write_err(_("(%s)\n") % exc.hint)
2540 self.ui.write_err(_("(%s)\n") % exc.hint)
2545 return False
2541 return False
2546 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2542 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2547 ret = pushkey.push(self, namespace, key, old, new)
2543 ret = pushkey.push(self, namespace, key, old, new)
2548 def runhook():
2544 def runhook():
2549 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2545 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2550 ret=ret)
2546 ret=ret)
2551 self._afterlock(runhook)
2547 self._afterlock(runhook)
2552 return ret
2548 return ret
2553
2549
2554 def listkeys(self, namespace):
2550 def listkeys(self, namespace):
2555 self.hook('prelistkeys', throw=True, namespace=namespace)
2551 self.hook('prelistkeys', throw=True, namespace=namespace)
2556 self.ui.debug('listing keys for "%s"\n' % namespace)
2552 self.ui.debug('listing keys for "%s"\n' % namespace)
2557 values = pushkey.list(self, namespace)
2553 values = pushkey.list(self, namespace)
2558 self.hook('listkeys', namespace=namespace, values=values)
2554 self.hook('listkeys', namespace=namespace, values=values)
2559 return values
2555 return values
2560
2556
2561 def debugwireargs(self, one, two, three=None, four=None, five=None):
2557 def debugwireargs(self, one, two, three=None, four=None, five=None):
2562 '''used to test argument passing over the wire'''
2558 '''used to test argument passing over the wire'''
2563 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2559 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2564 pycompat.bytestr(four),
2560 pycompat.bytestr(four),
2565 pycompat.bytestr(five))
2561 pycompat.bytestr(five))
2566
2562
2567 def savecommitmessage(self, text):
2563 def savecommitmessage(self, text):
2568 fp = self.vfs('last-message.txt', 'wb')
2564 fp = self.vfs('last-message.txt', 'wb')
2569 try:
2565 try:
2570 fp.write(text)
2566 fp.write(text)
2571 finally:
2567 finally:
2572 fp.close()
2568 fp.close()
2573 return self.pathto(fp.name[len(self.root) + 1:])
2569 return self.pathto(fp.name[len(self.root) + 1:])
2574
2570
2575 # used to avoid circular references so destructors work
2571 # used to avoid circular references so destructors work
2576 def aftertrans(files):
2572 def aftertrans(files):
2577 renamefiles = [tuple(t) for t in files]
2573 renamefiles = [tuple(t) for t in files]
2578 def a():
2574 def a():
2579 for vfs, src, dest in renamefiles:
2575 for vfs, src, dest in renamefiles:
2580 # if src and dest refer to a same file, vfs.rename is a no-op,
2576 # if src and dest refer to a same file, vfs.rename is a no-op,
2581 # leaving both src and dest on disk. delete dest to make sure
2577 # leaving both src and dest on disk. delete dest to make sure
2582 # the rename couldn't be such a no-op.
2578 # the rename couldn't be such a no-op.
2583 vfs.tryunlink(dest)
2579 vfs.tryunlink(dest)
2584 try:
2580 try:
2585 vfs.rename(src, dest)
2581 vfs.rename(src, dest)
2586 except OSError: # journal file does not yet exist
2582 except OSError: # journal file does not yet exist
2587 pass
2583 pass
2588 return a
2584 return a
2589
2585
2590 def undoname(fn):
2586 def undoname(fn):
2591 base, name = os.path.split(fn)
2587 base, name = os.path.split(fn)
2592 assert name.startswith('journal')
2588 assert name.startswith('journal')
2593 return os.path.join(base, name.replace('journal', 'undo', 1))
2589 return os.path.join(base, name.replace('journal', 'undo', 1))
2594
2590
2595 def instance(ui, path, create, intents=None, createopts=None):
2591 def instance(ui, path, create, intents=None, createopts=None):
2596 localpath = util.urllocalpath(path)
2592 localpath = util.urllocalpath(path)
2597 if create:
2593 if create:
2598 createrepository(ui, localpath, createopts=createopts)
2594 createrepository(ui, localpath, createopts=createopts)
2599
2595
2600 return makelocalrepository(ui, localpath, intents=intents)
2596 return makelocalrepository(ui, localpath, intents=intents)
2601
2597
2602 def islocal(path):
2598 def islocal(path):
2603 return True
2599 return True
2604
2600
2605 def newreporequirements(ui, createopts=None):
2601 def newreporequirements(ui, createopts=None):
2606 """Determine the set of requirements for a new local repository.
2602 """Determine the set of requirements for a new local repository.
2607
2603
2608 Extensions can wrap this function to specify custom requirements for
2604 Extensions can wrap this function to specify custom requirements for
2609 new repositories.
2605 new repositories.
2610 """
2606 """
2611 createopts = createopts or {}
2607 createopts = createopts or {}
2612
2608
2613 requirements = {'revlogv1'}
2609 requirements = {'revlogv1'}
2614 if ui.configbool('format', 'usestore'):
2610 if ui.configbool('format', 'usestore'):
2615 requirements.add('store')
2611 requirements.add('store')
2616 if ui.configbool('format', 'usefncache'):
2612 if ui.configbool('format', 'usefncache'):
2617 requirements.add('fncache')
2613 requirements.add('fncache')
2618 if ui.configbool('format', 'dotencode'):
2614 if ui.configbool('format', 'dotencode'):
2619 requirements.add('dotencode')
2615 requirements.add('dotencode')
2620
2616
2621 compengine = ui.config('experimental', 'format.compression')
2617 compengine = ui.config('experimental', 'format.compression')
2622 if compengine not in util.compengines:
2618 if compengine not in util.compengines:
2623 raise error.Abort(_('compression engine %s defined by '
2619 raise error.Abort(_('compression engine %s defined by '
2624 'experimental.format.compression not available') %
2620 'experimental.format.compression not available') %
2625 compengine,
2621 compengine,
2626 hint=_('run "hg debuginstall" to list available '
2622 hint=_('run "hg debuginstall" to list available '
2627 'compression engines'))
2623 'compression engines'))
2628
2624
2629 # zlib is the historical default and doesn't need an explicit requirement.
2625 # zlib is the historical default and doesn't need an explicit requirement.
2630 if compengine != 'zlib':
2626 if compengine != 'zlib':
2631 requirements.add('exp-compression-%s' % compengine)
2627 requirements.add('exp-compression-%s' % compengine)
2632
2628
2633 if scmutil.gdinitconfig(ui):
2629 if scmutil.gdinitconfig(ui):
2634 requirements.add('generaldelta')
2630 requirements.add('generaldelta')
2635 if ui.configbool('experimental', 'treemanifest'):
2631 if ui.configbool('experimental', 'treemanifest'):
2636 requirements.add('treemanifest')
2632 requirements.add('treemanifest')
2637 # experimental config: format.sparse-revlog
2633 # experimental config: format.sparse-revlog
2638 if ui.configbool('format', 'sparse-revlog'):
2634 if ui.configbool('format', 'sparse-revlog'):
2639 requirements.add(SPARSEREVLOG_REQUIREMENT)
2635 requirements.add(SPARSEREVLOG_REQUIREMENT)
2640
2636
2641 revlogv2 = ui.config('experimental', 'revlogv2')
2637 revlogv2 = ui.config('experimental', 'revlogv2')
2642 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2638 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2643 requirements.remove('revlogv1')
2639 requirements.remove('revlogv1')
2644 # generaldelta is implied by revlogv2.
2640 # generaldelta is implied by revlogv2.
2645 requirements.discard('generaldelta')
2641 requirements.discard('generaldelta')
2646 requirements.add(REVLOGV2_REQUIREMENT)
2642 requirements.add(REVLOGV2_REQUIREMENT)
2647 # experimental config: format.internal-phase
2643 # experimental config: format.internal-phase
2648 if ui.configbool('format', 'internal-phase'):
2644 if ui.configbool('format', 'internal-phase'):
2649 requirements.add('internal-phase')
2645 requirements.add('internal-phase')
2650
2646
2651 if createopts.get('narrowfiles'):
2647 if createopts.get('narrowfiles'):
2652 requirements.add(repository.NARROW_REQUIREMENT)
2648 requirements.add(repository.NARROW_REQUIREMENT)
2653
2649
2654 return requirements
2650 return requirements
2655
2651
2656 def filterknowncreateopts(ui, createopts):
2652 def filterknowncreateopts(ui, createopts):
2657 """Filters a dict of repo creation options against options that are known.
2653 """Filters a dict of repo creation options against options that are known.
2658
2654
2659 Receives a dict of repo creation options and returns a dict of those
2655 Receives a dict of repo creation options and returns a dict of those
2660 options that we don't know how to handle.
2656 options that we don't know how to handle.
2661
2657
2662 This function is called as part of repository creation. If the
2658 This function is called as part of repository creation. If the
2663 returned dict contains any items, repository creation will not
2659 returned dict contains any items, repository creation will not
2664 be allowed, as it means there was a request to create a repository
2660 be allowed, as it means there was a request to create a repository
2665 with options not recognized by loaded code.
2661 with options not recognized by loaded code.
2666
2662
2667 Extensions can wrap this function to filter out creation options
2663 Extensions can wrap this function to filter out creation options
2668 they know how to handle.
2664 they know how to handle.
2669 """
2665 """
2670 known = {'narrowfiles'}
2666 known = {'narrowfiles'}
2671
2667
2672 return {k: v for k, v in createopts.items() if k not in known}
2668 return {k: v for k, v in createopts.items() if k not in known}
2673
2669
2674 def createrepository(ui, path, createopts=None):
2670 def createrepository(ui, path, createopts=None):
2675 """Create a new repository in a vfs.
2671 """Create a new repository in a vfs.
2676
2672
2677 ``path`` path to the new repo's working directory.
2673 ``path`` path to the new repo's working directory.
2678 ``createopts`` options for the new repository.
2674 ``createopts`` options for the new repository.
2679 """
2675 """
2680 createopts = createopts or {}
2676 createopts = createopts or {}
2681
2677
2682 unknownopts = filterknowncreateopts(ui, createopts)
2678 unknownopts = filterknowncreateopts(ui, createopts)
2683
2679
2684 if not isinstance(unknownopts, dict):
2680 if not isinstance(unknownopts, dict):
2685 raise error.ProgrammingError('filterknowncreateopts() did not return '
2681 raise error.ProgrammingError('filterknowncreateopts() did not return '
2686 'a dict')
2682 'a dict')
2687
2683
2688 if unknownopts:
2684 if unknownopts:
2689 raise error.Abort(_('unable to create repository because of unknown '
2685 raise error.Abort(_('unable to create repository because of unknown '
2690 'creation option: %s') %
2686 'creation option: %s') %
2691 ', '.sorted(unknownopts),
2687 ', '.sorted(unknownopts),
2692 hint=_('is a required extension not loaded?'))
2688 hint=_('is a required extension not loaded?'))
2693
2689
2694 requirements = newreporequirements(ui, createopts=createopts)
2690 requirements = newreporequirements(ui, createopts=createopts)
2695
2691
2696 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2692 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2697 if not wdirvfs.exists():
2693 if not wdirvfs.exists():
2698 wdirvfs.makedirs()
2694 wdirvfs.makedirs()
2699
2695
2700 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2696 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2701 if hgvfs.exists():
2697 if hgvfs.exists():
2702 raise error.RepoError(_('repository %s already exists') % path)
2698 raise error.RepoError(_('repository %s already exists') % path)
2703
2699
2704 hgvfs.makedir(notindexed=True)
2700 hgvfs.makedir(notindexed=True)
2705
2701
2706 if b'store' in requirements:
2702 if b'store' in requirements:
2707 hgvfs.mkdir(b'store')
2703 hgvfs.mkdir(b'store')
2708
2704
2709 # We create an invalid changelog outside the store so very old
2705 # We create an invalid changelog outside the store so very old
2710 # Mercurial versions (which didn't know about the requirements
2706 # Mercurial versions (which didn't know about the requirements
2711 # file) encounter an error on reading the changelog. This
2707 # file) encounter an error on reading the changelog. This
2712 # effectively locks out old clients and prevents them from
2708 # effectively locks out old clients and prevents them from
2713 # mucking with a repo in an unknown format.
2709 # mucking with a repo in an unknown format.
2714 #
2710 #
2715 # The revlog header has version 2, which won't be recognized by
2711 # The revlog header has version 2, which won't be recognized by
2716 # such old clients.
2712 # such old clients.
2717 hgvfs.append(b'00changelog.i',
2713 hgvfs.append(b'00changelog.i',
2718 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2714 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2719 b'layout')
2715 b'layout')
2720
2716
2721 scmutil.writerequires(hgvfs, requirements)
2717 scmutil.writerequires(hgvfs, requirements)
2722
2718
2723 def poisonrepository(repo):
2719 def poisonrepository(repo):
2724 """Poison a repository instance so it can no longer be used."""
2720 """Poison a repository instance so it can no longer be used."""
2725 # Perform any cleanup on the instance.
2721 # Perform any cleanup on the instance.
2726 repo.close()
2722 repo.close()
2727
2723
2728 # Our strategy is to replace the type of the object with one that
2724 # Our strategy is to replace the type of the object with one that
2729 # has all attribute lookups result in error.
2725 # has all attribute lookups result in error.
2730 #
2726 #
2731 # But we have to allow the close() method because some constructors
2727 # But we have to allow the close() method because some constructors
2732 # of repos call close() on repo references.
2728 # of repos call close() on repo references.
2733 class poisonedrepository(object):
2729 class poisonedrepository(object):
2734 def __getattribute__(self, item):
2730 def __getattribute__(self, item):
2735 if item == r'close':
2731 if item == r'close':
2736 return object.__getattribute__(self, item)
2732 return object.__getattribute__(self, item)
2737
2733
2738 raise error.ProgrammingError('repo instances should not be used '
2734 raise error.ProgrammingError('repo instances should not be used '
2739 'after unshare')
2735 'after unshare')
2740
2736
2741 def close(self):
2737 def close(self):
2742 pass
2738 pass
2743
2739
2744 # We may have a repoview, which intercepts __setattr__. So be sure
2740 # We may have a repoview, which intercepts __setattr__. So be sure
2745 # we operate at the lowest level possible.
2741 # we operate at the lowest level possible.
2746 object.__setattr__(repo, r'__class__', poisonedrepository)
2742 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now