##// END OF EJS Templates
localrepo: move some vfs initialization out of __init__...
Gregory Szorc -
r39724:2f9cdb5b default
parent child Browse files
Show More
@@ -1,2549 +1,2581 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 def makelocalrepository(ui, path, intents=None):
379 def makelocalrepository(ui, path, intents=None):
380 """Create a local repository object.
380 """Create a local repository object.
381
381
382 Given arguments needed to construct a local repository, this function
382 Given arguments needed to construct a local repository, this function
383 derives a type suitable for representing that repository and returns an
383 derives a type suitable for representing that repository and returns an
384 instance of it.
384 instance of it.
385
385
386 The returned object conforms to the ``repository.completelocalrepository``
386 The returned object conforms to the ``repository.completelocalrepository``
387 interface.
387 interface.
388 """
388 """
389 return localrepository(ui, path, intents=intents)
389 # Working directory VFS rooted at repository root.
390 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
391
392 # Main VFS for .hg/ directory.
393 hgpath = wdirvfs.join(b'.hg')
394 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
395
396 return localrepository(
397 ui, path,
398 wdirvfs=wdirvfs,
399 hgvfs=hgvfs,
400 intents=intents)
390
401
391 @interfaceutil.implementer(repository.completelocalrepository)
402 @interfaceutil.implementer(repository.completelocalrepository)
392 class localrepository(object):
403 class localrepository(object):
393
404
394 # obsolete experimental requirements:
405 # obsolete experimental requirements:
395 # - manifestv2: An experimental new manifest format that allowed
406 # - manifestv2: An experimental new manifest format that allowed
396 # for stem compression of long paths. Experiment ended up not
407 # for stem compression of long paths. Experiment ended up not
397 # being successful (repository sizes went up due to worse delta
408 # being successful (repository sizes went up due to worse delta
398 # chains), and the code was deleted in 4.6.
409 # chains), and the code was deleted in 4.6.
399 supportedformats = {
410 supportedformats = {
400 'revlogv1',
411 'revlogv1',
401 'generaldelta',
412 'generaldelta',
402 'treemanifest',
413 'treemanifest',
403 REVLOGV2_REQUIREMENT,
414 REVLOGV2_REQUIREMENT,
404 SPARSEREVLOG_REQUIREMENT,
415 SPARSEREVLOG_REQUIREMENT,
405 }
416 }
406 _basesupported = supportedformats | {
417 _basesupported = supportedformats | {
407 'store',
418 'store',
408 'fncache',
419 'fncache',
409 'shared',
420 'shared',
410 'relshared',
421 'relshared',
411 'dotencode',
422 'dotencode',
412 'exp-sparse',
423 'exp-sparse',
413 'internal-phase'
424 'internal-phase'
414 }
425 }
415 openerreqs = {
426 openerreqs = {
416 'revlogv1',
427 'revlogv1',
417 'generaldelta',
428 'generaldelta',
418 'treemanifest',
429 'treemanifest',
419 }
430 }
420
431
421 # list of prefix for file which can be written without 'wlock'
432 # list of prefix for file which can be written without 'wlock'
422 # Extensions should extend this list when needed
433 # Extensions should extend this list when needed
423 _wlockfreeprefix = {
434 _wlockfreeprefix = {
424 # We migh consider requiring 'wlock' for the next
435 # We migh consider requiring 'wlock' for the next
425 # two, but pretty much all the existing code assume
436 # two, but pretty much all the existing code assume
426 # wlock is not needed so we keep them excluded for
437 # wlock is not needed so we keep them excluded for
427 # now.
438 # now.
428 'hgrc',
439 'hgrc',
429 'requires',
440 'requires',
430 # XXX cache is a complicatged business someone
441 # XXX cache is a complicatged business someone
431 # should investigate this in depth at some point
442 # should investigate this in depth at some point
432 'cache/',
443 'cache/',
433 # XXX shouldn't be dirstate covered by the wlock?
444 # XXX shouldn't be dirstate covered by the wlock?
434 'dirstate',
445 'dirstate',
435 # XXX bisect was still a bit too messy at the time
446 # XXX bisect was still a bit too messy at the time
436 # this changeset was introduced. Someone should fix
447 # this changeset was introduced. Someone should fix
437 # the remainig bit and drop this line
448 # the remainig bit and drop this line
438 'bisect.state',
449 'bisect.state',
439 }
450 }
440
451
441 def __init__(self, baseui, path, intents=None):
452 def __init__(self, baseui, origroot, wdirvfs, hgvfs, intents=None):
442 """Create a new local repository instance.
453 """Create a new local repository instance.
443
454
444 Most callers should use ``hg.repository()`` or ``localrepo.instance()``
455 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
445 for obtaining a new repository object.
456 or ``localrepo.makelocalrepository()`` for obtaining a new repository
457 object.
458
459 Arguments:
460
461 baseui
462 ``ui.ui`` instance to use. A copy will be made (since new config
463 options may be loaded into it).
464
465 origroot
466 ``bytes`` path to working directory root of this repository.
467
468 wdirvfs
469 ``vfs.vfs`` rooted at the working directory.
470
471 hgvfs
472 ``vfs.vfs`` rooted at .hg/
473
474 intents
475 ``set`` of system strings indicating what this repo will be used
476 for.
446 """
477 """
478 self.baseui = baseui
479 self.ui = baseui.copy()
480 self.ui.copy = baseui.copy # prevent copying repo configuration
481
482 self.origroot = origroot
483 # vfs rooted at working directory.
484 self.wvfs = wdirvfs
485 self.root = wdirvfs.base
486 # vfs rooted at .hg/. Used to access most non-store paths.
487 self.vfs = hgvfs
488 self.path = hgvfs.base
447
489
448 self.requirements = set()
490 self.requirements = set()
449 self.filtername = None
491 self.filtername = None
450 # wvfs: rooted at the repository root, used to access the working copy
451 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
452 # vfs: rooted at .hg, used to access repo files outside of .hg/store
453 self.vfs = None
454 # svfs: usually rooted at .hg/store, used to access repository history
492 # svfs: usually rooted at .hg/store, used to access repository history
455 # If this is a shared repository, this vfs may point to another
493 # If this is a shared repository, this vfs may point to another
456 # repository's .hg/store directory.
494 # repository's .hg/store directory.
457 self.svfs = None
495 self.svfs = None
458 self.root = self.wvfs.base
496
459 self.path = self.wvfs.join(".hg")
460 self.origroot = path
461 self.baseui = baseui
462 self.ui = baseui.copy()
463 self.ui.copy = baseui.copy # prevent copying repo configuration
464 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
465 if (self.ui.configbool('devel', 'all-warnings') or
497 if (self.ui.configbool('devel', 'all-warnings') or
466 self.ui.configbool('devel', 'check-locks')):
498 self.ui.configbool('devel', 'check-locks')):
467 self.vfs.audit = self._getvfsward(self.vfs.audit)
499 self.vfs.audit = self._getvfsward(self.vfs.audit)
468 # A list of callback to shape the phase if no data were found.
500 # A list of callback to shape the phase if no data were found.
469 # Callback are in the form: func(repo, roots) --> processed root.
501 # Callback are in the form: func(repo, roots) --> processed root.
470 # This list it to be filled by extension during repo setup
502 # This list it to be filled by extension during repo setup
471 self._phasedefaults = []
503 self._phasedefaults = []
472 try:
504 try:
473 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
505 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
474 self._loadextensions()
506 self._loadextensions()
475 except IOError:
507 except IOError:
476 pass
508 pass
477
509
478 if featuresetupfuncs:
510 if featuresetupfuncs:
479 self.supported = set(self._basesupported) # use private copy
511 self.supported = set(self._basesupported) # use private copy
480 extmods = set(m.__name__ for n, m
512 extmods = set(m.__name__ for n, m
481 in extensions.extensions(self.ui))
513 in extensions.extensions(self.ui))
482 for setupfunc in featuresetupfuncs:
514 for setupfunc in featuresetupfuncs:
483 if setupfunc.__module__ in extmods:
515 if setupfunc.__module__ in extmods:
484 setupfunc(self.ui, self.supported)
516 setupfunc(self.ui, self.supported)
485 else:
517 else:
486 self.supported = self._basesupported
518 self.supported = self._basesupported
487 color.setup(self.ui)
519 color.setup(self.ui)
488
520
489 # Add compression engines.
521 # Add compression engines.
490 for name in util.compengines:
522 for name in util.compengines:
491 engine = util.compengines[name]
523 engine = util.compengines[name]
492 if engine.revlogheader():
524 if engine.revlogheader():
493 self.supported.add('exp-compression-%s' % name)
525 self.supported.add('exp-compression-%s' % name)
494
526
495 if not self.vfs.isdir():
527 if not self.vfs.isdir():
496 try:
528 try:
497 self.vfs.stat()
529 self.vfs.stat()
498 except OSError as inst:
530 except OSError as inst:
499 if inst.errno != errno.ENOENT:
531 if inst.errno != errno.ENOENT:
500 raise
532 raise
501 raise error.RepoError(_("repository %s not found") % path)
533 raise error.RepoError(_("repository %s not found") % origroot)
502 else:
534 else:
503 try:
535 try:
504 self.requirements = scmutil.readrequires(
536 self.requirements = scmutil.readrequires(
505 self.vfs, self.supported)
537 self.vfs, self.supported)
506 except IOError as inst:
538 except IOError as inst:
507 if inst.errno != errno.ENOENT:
539 if inst.errno != errno.ENOENT:
508 raise
540 raise
509
541
510 cachepath = self.vfs.join('cache')
542 cachepath = self.vfs.join('cache')
511 self.sharedpath = self.path
543 self.sharedpath = self.path
512 try:
544 try:
513 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
545 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
514 if 'relshared' in self.requirements:
546 if 'relshared' in self.requirements:
515 sharedpath = self.vfs.join(sharedpath)
547 sharedpath = self.vfs.join(sharedpath)
516 vfs = vfsmod.vfs(sharedpath, realpath=True)
548 vfs = vfsmod.vfs(sharedpath, realpath=True)
517 cachepath = vfs.join('cache')
549 cachepath = vfs.join('cache')
518 s = vfs.base
550 s = vfs.base
519 if not vfs.exists():
551 if not vfs.exists():
520 raise error.RepoError(
552 raise error.RepoError(
521 _('.hg/sharedpath points to nonexistent directory %s') % s)
553 _('.hg/sharedpath points to nonexistent directory %s') % s)
522 self.sharedpath = s
554 self.sharedpath = s
523 except IOError as inst:
555 except IOError as inst:
524 if inst.errno != errno.ENOENT:
556 if inst.errno != errno.ENOENT:
525 raise
557 raise
526
558
527 if 'exp-sparse' in self.requirements and not sparse.enabled:
559 if 'exp-sparse' in self.requirements and not sparse.enabled:
528 raise error.RepoError(_('repository is using sparse feature but '
560 raise error.RepoError(_('repository is using sparse feature but '
529 'sparse is not enabled; enable the '
561 'sparse is not enabled; enable the '
530 '"sparse" extensions to access'))
562 '"sparse" extensions to access'))
531
563
532 self.store = store.store(
564 self.store = store.store(
533 self.requirements, self.sharedpath,
565 self.requirements, self.sharedpath,
534 lambda base: vfsmod.vfs(base, cacheaudited=True))
566 lambda base: vfsmod.vfs(base, cacheaudited=True))
535 self.spath = self.store.path
567 self.spath = self.store.path
536 self.svfs = self.store.vfs
568 self.svfs = self.store.vfs
537 self.sjoin = self.store.join
569 self.sjoin = self.store.join
538 self.vfs.createmode = self.store.createmode
570 self.vfs.createmode = self.store.createmode
539 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
571 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
540 self.cachevfs.createmode = self.store.createmode
572 self.cachevfs.createmode = self.store.createmode
541 if (self.ui.configbool('devel', 'all-warnings') or
573 if (self.ui.configbool('devel', 'all-warnings') or
542 self.ui.configbool('devel', 'check-locks')):
574 self.ui.configbool('devel', 'check-locks')):
543 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
575 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
544 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
576 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
545 else: # standard vfs
577 else: # standard vfs
546 self.svfs.audit = self._getsvfsward(self.svfs.audit)
578 self.svfs.audit = self._getsvfsward(self.svfs.audit)
547 self._applyopenerreqs()
579 self._applyopenerreqs()
548
580
549 self._dirstatevalidatewarned = False
581 self._dirstatevalidatewarned = False
550
582
551 self._branchcaches = {}
583 self._branchcaches = {}
552 self._revbranchcache = None
584 self._revbranchcache = None
553 self._filterpats = {}
585 self._filterpats = {}
554 self._datafilters = {}
586 self._datafilters = {}
555 self._transref = self._lockref = self._wlockref = None
587 self._transref = self._lockref = self._wlockref = None
556
588
557 # A cache for various files under .hg/ that tracks file changes,
589 # A cache for various files under .hg/ that tracks file changes,
558 # (used by the filecache decorator)
590 # (used by the filecache decorator)
559 #
591 #
560 # Maps a property name to its util.filecacheentry
592 # Maps a property name to its util.filecacheentry
561 self._filecache = {}
593 self._filecache = {}
562
594
563 # hold sets of revision to be filtered
595 # hold sets of revision to be filtered
564 # should be cleared when something might have changed the filter value:
596 # should be cleared when something might have changed the filter value:
565 # - new changesets,
597 # - new changesets,
566 # - phase change,
598 # - phase change,
567 # - new obsolescence marker,
599 # - new obsolescence marker,
568 # - working directory parent change,
600 # - working directory parent change,
569 # - bookmark changes
601 # - bookmark changes
570 self.filteredrevcache = {}
602 self.filteredrevcache = {}
571
603
572 # post-dirstate-status hooks
604 # post-dirstate-status hooks
573 self._postdsstatus = []
605 self._postdsstatus = []
574
606
575 # generic mapping between names and nodes
607 # generic mapping between names and nodes
576 self.names = namespaces.namespaces()
608 self.names = namespaces.namespaces()
577
609
578 # Key to signature value.
610 # Key to signature value.
579 self._sparsesignaturecache = {}
611 self._sparsesignaturecache = {}
580 # Signature to cached matcher instance.
612 # Signature to cached matcher instance.
581 self._sparsematchercache = {}
613 self._sparsematchercache = {}
582
614
583 def _getvfsward(self, origfunc):
615 def _getvfsward(self, origfunc):
584 """build a ward for self.vfs"""
616 """build a ward for self.vfs"""
585 rref = weakref.ref(self)
617 rref = weakref.ref(self)
586 def checkvfs(path, mode=None):
618 def checkvfs(path, mode=None):
587 ret = origfunc(path, mode=mode)
619 ret = origfunc(path, mode=mode)
588 repo = rref()
620 repo = rref()
589 if (repo is None
621 if (repo is None
590 or not util.safehasattr(repo, '_wlockref')
622 or not util.safehasattr(repo, '_wlockref')
591 or not util.safehasattr(repo, '_lockref')):
623 or not util.safehasattr(repo, '_lockref')):
592 return
624 return
593 if mode in (None, 'r', 'rb'):
625 if mode in (None, 'r', 'rb'):
594 return
626 return
595 if path.startswith(repo.path):
627 if path.startswith(repo.path):
596 # truncate name relative to the repository (.hg)
628 # truncate name relative to the repository (.hg)
597 path = path[len(repo.path) + 1:]
629 path = path[len(repo.path) + 1:]
598 if path.startswith('cache/'):
630 if path.startswith('cache/'):
599 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
631 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
600 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
632 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
601 if path.startswith('journal.'):
633 if path.startswith('journal.'):
602 # journal is covered by 'lock'
634 # journal is covered by 'lock'
603 if repo._currentlock(repo._lockref) is None:
635 if repo._currentlock(repo._lockref) is None:
604 repo.ui.develwarn('write with no lock: "%s"' % path,
636 repo.ui.develwarn('write with no lock: "%s"' % path,
605 stacklevel=2, config='check-locks')
637 stacklevel=2, config='check-locks')
606 elif repo._currentlock(repo._wlockref) is None:
638 elif repo._currentlock(repo._wlockref) is None:
607 # rest of vfs files are covered by 'wlock'
639 # rest of vfs files are covered by 'wlock'
608 #
640 #
609 # exclude special files
641 # exclude special files
610 for prefix in self._wlockfreeprefix:
642 for prefix in self._wlockfreeprefix:
611 if path.startswith(prefix):
643 if path.startswith(prefix):
612 return
644 return
613 repo.ui.develwarn('write with no wlock: "%s"' % path,
645 repo.ui.develwarn('write with no wlock: "%s"' % path,
614 stacklevel=2, config='check-locks')
646 stacklevel=2, config='check-locks')
615 return ret
647 return ret
616 return checkvfs
648 return checkvfs
617
649
618 def _getsvfsward(self, origfunc):
650 def _getsvfsward(self, origfunc):
619 """build a ward for self.svfs"""
651 """build a ward for self.svfs"""
620 rref = weakref.ref(self)
652 rref = weakref.ref(self)
621 def checksvfs(path, mode=None):
653 def checksvfs(path, mode=None):
622 ret = origfunc(path, mode=mode)
654 ret = origfunc(path, mode=mode)
623 repo = rref()
655 repo = rref()
624 if repo is None or not util.safehasattr(repo, '_lockref'):
656 if repo is None or not util.safehasattr(repo, '_lockref'):
625 return
657 return
626 if mode in (None, 'r', 'rb'):
658 if mode in (None, 'r', 'rb'):
627 return
659 return
628 if path.startswith(repo.sharedpath):
660 if path.startswith(repo.sharedpath):
629 # truncate name relative to the repository (.hg)
661 # truncate name relative to the repository (.hg)
630 path = path[len(repo.sharedpath) + 1:]
662 path = path[len(repo.sharedpath) + 1:]
631 if repo._currentlock(repo._lockref) is None:
663 if repo._currentlock(repo._lockref) is None:
632 repo.ui.develwarn('write with no lock: "%s"' % path,
664 repo.ui.develwarn('write with no lock: "%s"' % path,
633 stacklevel=3)
665 stacklevel=3)
634 return ret
666 return ret
635 return checksvfs
667 return checksvfs
636
668
637 def close(self):
669 def close(self):
638 self._writecaches()
670 self._writecaches()
639
671
640 def _loadextensions(self):
672 def _loadextensions(self):
641 extensions.loadall(self.ui)
673 extensions.loadall(self.ui)
642
674
643 def _writecaches(self):
675 def _writecaches(self):
644 if self._revbranchcache:
676 if self._revbranchcache:
645 self._revbranchcache.write()
677 self._revbranchcache.write()
646
678
647 def _restrictcapabilities(self, caps):
679 def _restrictcapabilities(self, caps):
648 if self.ui.configbool('experimental', 'bundle2-advertise'):
680 if self.ui.configbool('experimental', 'bundle2-advertise'):
649 caps = set(caps)
681 caps = set(caps)
650 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
682 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
651 role='client'))
683 role='client'))
652 caps.add('bundle2=' + urlreq.quote(capsblob))
684 caps.add('bundle2=' + urlreq.quote(capsblob))
653 return caps
685 return caps
654
686
655 def _applyopenerreqs(self):
687 def _applyopenerreqs(self):
656 self.svfs.options = dict((r, 1) for r in self.requirements
688 self.svfs.options = dict((r, 1) for r in self.requirements
657 if r in self.openerreqs)
689 if r in self.openerreqs)
658 # experimental config: format.chunkcachesize
690 # experimental config: format.chunkcachesize
659 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
691 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
660 if chunkcachesize is not None:
692 if chunkcachesize is not None:
661 self.svfs.options['chunkcachesize'] = chunkcachesize
693 self.svfs.options['chunkcachesize'] = chunkcachesize
662 # experimental config: format.manifestcachesize
694 # experimental config: format.manifestcachesize
663 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
695 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
664 if manifestcachesize is not None:
696 if manifestcachesize is not None:
665 self.svfs.options['manifestcachesize'] = manifestcachesize
697 self.svfs.options['manifestcachesize'] = manifestcachesize
666 deltabothparents = self.ui.configbool('storage',
698 deltabothparents = self.ui.configbool('storage',
667 'revlog.optimize-delta-parent-choice')
699 'revlog.optimize-delta-parent-choice')
668 self.svfs.options['deltabothparents'] = deltabothparents
700 self.svfs.options['deltabothparents'] = deltabothparents
669 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
701 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
670 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
702 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
671 if 0 <= chainspan:
703 if 0 <= chainspan:
672 self.svfs.options['maxdeltachainspan'] = chainspan
704 self.svfs.options['maxdeltachainspan'] = chainspan
673 mmapindexthreshold = self.ui.configbytes('experimental',
705 mmapindexthreshold = self.ui.configbytes('experimental',
674 'mmapindexthreshold')
706 'mmapindexthreshold')
675 if mmapindexthreshold is not None:
707 if mmapindexthreshold is not None:
676 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
708 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
677 withsparseread = self.ui.configbool('experimental', 'sparse-read')
709 withsparseread = self.ui.configbool('experimental', 'sparse-read')
678 srdensitythres = float(self.ui.config('experimental',
710 srdensitythres = float(self.ui.config('experimental',
679 'sparse-read.density-threshold'))
711 'sparse-read.density-threshold'))
680 srmingapsize = self.ui.configbytes('experimental',
712 srmingapsize = self.ui.configbytes('experimental',
681 'sparse-read.min-gap-size')
713 'sparse-read.min-gap-size')
682 self.svfs.options['with-sparse-read'] = withsparseread
714 self.svfs.options['with-sparse-read'] = withsparseread
683 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
715 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
684 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
716 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
685 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
717 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
686 self.svfs.options['sparse-revlog'] = sparserevlog
718 self.svfs.options['sparse-revlog'] = sparserevlog
687 if sparserevlog:
719 if sparserevlog:
688 self.svfs.options['generaldelta'] = True
720 self.svfs.options['generaldelta'] = True
689 maxchainlen = None
721 maxchainlen = None
690 if sparserevlog:
722 if sparserevlog:
691 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
723 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
692 # experimental config: format.maxchainlen
724 # experimental config: format.maxchainlen
693 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
725 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
694 if maxchainlen is not None:
726 if maxchainlen is not None:
695 self.svfs.options['maxchainlen'] = maxchainlen
727 self.svfs.options['maxchainlen'] = maxchainlen
696
728
697 for r in self.requirements:
729 for r in self.requirements:
698 if r.startswith('exp-compression-'):
730 if r.startswith('exp-compression-'):
699 self.svfs.options['compengine'] = r[len('exp-compression-'):]
731 self.svfs.options['compengine'] = r[len('exp-compression-'):]
700
732
701 # TODO move "revlogv2" to openerreqs once finalized.
733 # TODO move "revlogv2" to openerreqs once finalized.
702 if REVLOGV2_REQUIREMENT in self.requirements:
734 if REVLOGV2_REQUIREMENT in self.requirements:
703 self.svfs.options['revlogv2'] = True
735 self.svfs.options['revlogv2'] = True
704
736
705 def _writerequirements(self):
737 def _writerequirements(self):
706 scmutil.writerequires(self.vfs, self.requirements)
738 scmutil.writerequires(self.vfs, self.requirements)
707
739
708 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
740 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
709 # self -> auditor -> self._checknested -> self
741 # self -> auditor -> self._checknested -> self
710
742
711 @property
743 @property
712 def auditor(self):
744 def auditor(self):
713 # This is only used by context.workingctx.match in order to
745 # This is only used by context.workingctx.match in order to
714 # detect files in subrepos.
746 # detect files in subrepos.
715 return pathutil.pathauditor(self.root, callback=self._checknested)
747 return pathutil.pathauditor(self.root, callback=self._checknested)
716
748
717 @property
749 @property
718 def nofsauditor(self):
750 def nofsauditor(self):
719 # This is only used by context.basectx.match in order to detect
751 # This is only used by context.basectx.match in order to detect
720 # files in subrepos.
752 # files in subrepos.
721 return pathutil.pathauditor(self.root, callback=self._checknested,
753 return pathutil.pathauditor(self.root, callback=self._checknested,
722 realfs=False, cached=True)
754 realfs=False, cached=True)
723
755
724 def _checknested(self, path):
756 def _checknested(self, path):
725 """Determine if path is a legal nested repository."""
757 """Determine if path is a legal nested repository."""
726 if not path.startswith(self.root):
758 if not path.startswith(self.root):
727 return False
759 return False
728 subpath = path[len(self.root) + 1:]
760 subpath = path[len(self.root) + 1:]
729 normsubpath = util.pconvert(subpath)
761 normsubpath = util.pconvert(subpath)
730
762
731 # XXX: Checking against the current working copy is wrong in
763 # XXX: Checking against the current working copy is wrong in
732 # the sense that it can reject things like
764 # the sense that it can reject things like
733 #
765 #
734 # $ hg cat -r 10 sub/x.txt
766 # $ hg cat -r 10 sub/x.txt
735 #
767 #
736 # if sub/ is no longer a subrepository in the working copy
768 # if sub/ is no longer a subrepository in the working copy
737 # parent revision.
769 # parent revision.
738 #
770 #
739 # However, it can of course also allow things that would have
771 # However, it can of course also allow things that would have
740 # been rejected before, such as the above cat command if sub/
772 # been rejected before, such as the above cat command if sub/
741 # is a subrepository now, but was a normal directory before.
773 # is a subrepository now, but was a normal directory before.
742 # The old path auditor would have rejected by mistake since it
774 # The old path auditor would have rejected by mistake since it
743 # panics when it sees sub/.hg/.
775 # panics when it sees sub/.hg/.
744 #
776 #
745 # All in all, checking against the working copy seems sensible
777 # All in all, checking against the working copy seems sensible
746 # since we want to prevent access to nested repositories on
778 # since we want to prevent access to nested repositories on
747 # the filesystem *now*.
779 # the filesystem *now*.
748 ctx = self[None]
780 ctx = self[None]
749 parts = util.splitpath(subpath)
781 parts = util.splitpath(subpath)
750 while parts:
782 while parts:
751 prefix = '/'.join(parts)
783 prefix = '/'.join(parts)
752 if prefix in ctx.substate:
784 if prefix in ctx.substate:
753 if prefix == normsubpath:
785 if prefix == normsubpath:
754 return True
786 return True
755 else:
787 else:
756 sub = ctx.sub(prefix)
788 sub = ctx.sub(prefix)
757 return sub.checknested(subpath[len(prefix) + 1:])
789 return sub.checknested(subpath[len(prefix) + 1:])
758 else:
790 else:
759 parts.pop()
791 parts.pop()
760 return False
792 return False
761
793
762 def peer(self):
794 def peer(self):
763 return localpeer(self) # not cached to avoid reference cycle
795 return localpeer(self) # not cached to avoid reference cycle
764
796
765 def unfiltered(self):
797 def unfiltered(self):
766 """Return unfiltered version of the repository
798 """Return unfiltered version of the repository
767
799
768 Intended to be overwritten by filtered repo."""
800 Intended to be overwritten by filtered repo."""
769 return self
801 return self
770
802
771 def filtered(self, name, visibilityexceptions=None):
803 def filtered(self, name, visibilityexceptions=None):
772 """Return a filtered version of a repository"""
804 """Return a filtered version of a repository"""
773 cls = repoview.newtype(self.unfiltered().__class__)
805 cls = repoview.newtype(self.unfiltered().__class__)
774 return cls(self, name, visibilityexceptions)
806 return cls(self, name, visibilityexceptions)
775
807
776 @repofilecache('bookmarks', 'bookmarks.current')
808 @repofilecache('bookmarks', 'bookmarks.current')
777 def _bookmarks(self):
809 def _bookmarks(self):
778 return bookmarks.bmstore(self)
810 return bookmarks.bmstore(self)
779
811
780 @property
812 @property
781 def _activebookmark(self):
813 def _activebookmark(self):
782 return self._bookmarks.active
814 return self._bookmarks.active
783
815
784 # _phasesets depend on changelog. what we need is to call
816 # _phasesets depend on changelog. what we need is to call
785 # _phasecache.invalidate() if '00changelog.i' was changed, but it
817 # _phasecache.invalidate() if '00changelog.i' was changed, but it
786 # can't be easily expressed in filecache mechanism.
818 # can't be easily expressed in filecache mechanism.
787 @storecache('phaseroots', '00changelog.i')
819 @storecache('phaseroots', '00changelog.i')
788 def _phasecache(self):
820 def _phasecache(self):
789 return phases.phasecache(self, self._phasedefaults)
821 return phases.phasecache(self, self._phasedefaults)
790
822
791 @storecache('obsstore')
823 @storecache('obsstore')
792 def obsstore(self):
824 def obsstore(self):
793 return obsolete.makestore(self.ui, self)
825 return obsolete.makestore(self.ui, self)
794
826
795 @storecache('00changelog.i')
827 @storecache('00changelog.i')
796 def changelog(self):
828 def changelog(self):
797 return changelog.changelog(self.svfs,
829 return changelog.changelog(self.svfs,
798 trypending=txnutil.mayhavepending(self.root))
830 trypending=txnutil.mayhavepending(self.root))
799
831
800 def _constructmanifest(self):
832 def _constructmanifest(self):
801 # This is a temporary function while we migrate from manifest to
833 # This is a temporary function while we migrate from manifest to
802 # manifestlog. It allows bundlerepo and unionrepo to intercept the
834 # manifestlog. It allows bundlerepo and unionrepo to intercept the
803 # manifest creation.
835 # manifest creation.
804 return manifest.manifestrevlog(self.svfs)
836 return manifest.manifestrevlog(self.svfs)
805
837
806 @storecache('00manifest.i')
838 @storecache('00manifest.i')
807 def manifestlog(self):
839 def manifestlog(self):
808 return manifest.manifestlog(self.svfs, self)
840 return manifest.manifestlog(self.svfs, self)
809
841
810 @repofilecache('dirstate')
842 @repofilecache('dirstate')
811 def dirstate(self):
843 def dirstate(self):
812 return self._makedirstate()
844 return self._makedirstate()
813
845
814 def _makedirstate(self):
846 def _makedirstate(self):
815 """Extension point for wrapping the dirstate per-repo."""
847 """Extension point for wrapping the dirstate per-repo."""
816 sparsematchfn = lambda: sparse.matcher(self)
848 sparsematchfn = lambda: sparse.matcher(self)
817
849
818 return dirstate.dirstate(self.vfs, self.ui, self.root,
850 return dirstate.dirstate(self.vfs, self.ui, self.root,
819 self._dirstatevalidate, sparsematchfn)
851 self._dirstatevalidate, sparsematchfn)
820
852
821 def _dirstatevalidate(self, node):
853 def _dirstatevalidate(self, node):
822 try:
854 try:
823 self.changelog.rev(node)
855 self.changelog.rev(node)
824 return node
856 return node
825 except error.LookupError:
857 except error.LookupError:
826 if not self._dirstatevalidatewarned:
858 if not self._dirstatevalidatewarned:
827 self._dirstatevalidatewarned = True
859 self._dirstatevalidatewarned = True
828 self.ui.warn(_("warning: ignoring unknown"
860 self.ui.warn(_("warning: ignoring unknown"
829 " working parent %s!\n") % short(node))
861 " working parent %s!\n") % short(node))
830 return nullid
862 return nullid
831
863
832 @storecache(narrowspec.FILENAME)
864 @storecache(narrowspec.FILENAME)
833 def narrowpats(self):
865 def narrowpats(self):
834 """matcher patterns for this repository's narrowspec
866 """matcher patterns for this repository's narrowspec
835
867
836 A tuple of (includes, excludes).
868 A tuple of (includes, excludes).
837 """
869 """
838 source = self
870 source = self
839 if self.shared():
871 if self.shared():
840 from . import hg
872 from . import hg
841 source = hg.sharedreposource(self)
873 source = hg.sharedreposource(self)
842 return narrowspec.load(source)
874 return narrowspec.load(source)
843
875
844 @storecache(narrowspec.FILENAME)
876 @storecache(narrowspec.FILENAME)
845 def _narrowmatch(self):
877 def _narrowmatch(self):
846 if repository.NARROW_REQUIREMENT not in self.requirements:
878 if repository.NARROW_REQUIREMENT not in self.requirements:
847 return matchmod.always(self.root, '')
879 return matchmod.always(self.root, '')
848 include, exclude = self.narrowpats
880 include, exclude = self.narrowpats
849 return narrowspec.match(self.root, include=include, exclude=exclude)
881 return narrowspec.match(self.root, include=include, exclude=exclude)
850
882
851 # TODO(martinvonz): make this property-like instead?
883 # TODO(martinvonz): make this property-like instead?
852 def narrowmatch(self):
884 def narrowmatch(self):
853 return self._narrowmatch
885 return self._narrowmatch
854
886
855 def setnarrowpats(self, newincludes, newexcludes):
887 def setnarrowpats(self, newincludes, newexcludes):
856 narrowspec.save(self, newincludes, newexcludes)
888 narrowspec.save(self, newincludes, newexcludes)
857 self.invalidate(clearfilecache=True)
889 self.invalidate(clearfilecache=True)
858
890
859 def __getitem__(self, changeid):
891 def __getitem__(self, changeid):
860 if changeid is None:
892 if changeid is None:
861 return context.workingctx(self)
893 return context.workingctx(self)
862 if isinstance(changeid, context.basectx):
894 if isinstance(changeid, context.basectx):
863 return changeid
895 return changeid
864 if isinstance(changeid, slice):
896 if isinstance(changeid, slice):
865 # wdirrev isn't contiguous so the slice shouldn't include it
897 # wdirrev isn't contiguous so the slice shouldn't include it
866 return [context.changectx(self, i)
898 return [context.changectx(self, i)
867 for i in pycompat.xrange(*changeid.indices(len(self)))
899 for i in pycompat.xrange(*changeid.indices(len(self)))
868 if i not in self.changelog.filteredrevs]
900 if i not in self.changelog.filteredrevs]
869 try:
901 try:
870 return context.changectx(self, changeid)
902 return context.changectx(self, changeid)
871 except error.WdirUnsupported:
903 except error.WdirUnsupported:
872 return context.workingctx(self)
904 return context.workingctx(self)
873
905
874 def __contains__(self, changeid):
906 def __contains__(self, changeid):
875 """True if the given changeid exists
907 """True if the given changeid exists
876
908
877 error.AmbiguousPrefixLookupError is raised if an ambiguous node
909 error.AmbiguousPrefixLookupError is raised if an ambiguous node
878 specified.
910 specified.
879 """
911 """
880 try:
912 try:
881 self[changeid]
913 self[changeid]
882 return True
914 return True
883 except error.RepoLookupError:
915 except error.RepoLookupError:
884 return False
916 return False
885
917
886 def __nonzero__(self):
918 def __nonzero__(self):
887 return True
919 return True
888
920
889 __bool__ = __nonzero__
921 __bool__ = __nonzero__
890
922
891 def __len__(self):
923 def __len__(self):
892 # no need to pay the cost of repoview.changelog
924 # no need to pay the cost of repoview.changelog
893 unfi = self.unfiltered()
925 unfi = self.unfiltered()
894 return len(unfi.changelog)
926 return len(unfi.changelog)
895
927
896 def __iter__(self):
928 def __iter__(self):
897 return iter(self.changelog)
929 return iter(self.changelog)
898
930
899 def revs(self, expr, *args):
931 def revs(self, expr, *args):
900 '''Find revisions matching a revset.
932 '''Find revisions matching a revset.
901
933
902 The revset is specified as a string ``expr`` that may contain
934 The revset is specified as a string ``expr`` that may contain
903 %-formatting to escape certain types. See ``revsetlang.formatspec``.
935 %-formatting to escape certain types. See ``revsetlang.formatspec``.
904
936
905 Revset aliases from the configuration are not expanded. To expand
937 Revset aliases from the configuration are not expanded. To expand
906 user aliases, consider calling ``scmutil.revrange()`` or
938 user aliases, consider calling ``scmutil.revrange()`` or
907 ``repo.anyrevs([expr], user=True)``.
939 ``repo.anyrevs([expr], user=True)``.
908
940
909 Returns a revset.abstractsmartset, which is a list-like interface
941 Returns a revset.abstractsmartset, which is a list-like interface
910 that contains integer revisions.
942 that contains integer revisions.
911 '''
943 '''
912 expr = revsetlang.formatspec(expr, *args)
944 expr = revsetlang.formatspec(expr, *args)
913 m = revset.match(None, expr)
945 m = revset.match(None, expr)
914 return m(self)
946 return m(self)
915
947
916 def set(self, expr, *args):
948 def set(self, expr, *args):
917 '''Find revisions matching a revset and emit changectx instances.
949 '''Find revisions matching a revset and emit changectx instances.
918
950
919 This is a convenience wrapper around ``revs()`` that iterates the
951 This is a convenience wrapper around ``revs()`` that iterates the
920 result and is a generator of changectx instances.
952 result and is a generator of changectx instances.
921
953
922 Revset aliases from the configuration are not expanded. To expand
954 Revset aliases from the configuration are not expanded. To expand
923 user aliases, consider calling ``scmutil.revrange()``.
955 user aliases, consider calling ``scmutil.revrange()``.
924 '''
956 '''
925 for r in self.revs(expr, *args):
957 for r in self.revs(expr, *args):
926 yield self[r]
958 yield self[r]
927
959
928 def anyrevs(self, specs, user=False, localalias=None):
960 def anyrevs(self, specs, user=False, localalias=None):
929 '''Find revisions matching one of the given revsets.
961 '''Find revisions matching one of the given revsets.
930
962
931 Revset aliases from the configuration are not expanded by default. To
963 Revset aliases from the configuration are not expanded by default. To
932 expand user aliases, specify ``user=True``. To provide some local
964 expand user aliases, specify ``user=True``. To provide some local
933 definitions overriding user aliases, set ``localalias`` to
965 definitions overriding user aliases, set ``localalias`` to
934 ``{name: definitionstring}``.
966 ``{name: definitionstring}``.
935 '''
967 '''
936 if user:
968 if user:
937 m = revset.matchany(self.ui, specs,
969 m = revset.matchany(self.ui, specs,
938 lookup=revset.lookupfn(self),
970 lookup=revset.lookupfn(self),
939 localalias=localalias)
971 localalias=localalias)
940 else:
972 else:
941 m = revset.matchany(None, specs, localalias=localalias)
973 m = revset.matchany(None, specs, localalias=localalias)
942 return m(self)
974 return m(self)
943
975
944 def url(self):
976 def url(self):
945 return 'file:' + self.root
977 return 'file:' + self.root
946
978
947 def hook(self, name, throw=False, **args):
979 def hook(self, name, throw=False, **args):
948 """Call a hook, passing this repo instance.
980 """Call a hook, passing this repo instance.
949
981
950 This a convenience method to aid invoking hooks. Extensions likely
982 This a convenience method to aid invoking hooks. Extensions likely
951 won't call this unless they have registered a custom hook or are
983 won't call this unless they have registered a custom hook or are
952 replacing code that is expected to call a hook.
984 replacing code that is expected to call a hook.
953 """
985 """
954 return hook.hook(self.ui, self, name, throw, **args)
986 return hook.hook(self.ui, self, name, throw, **args)
955
987
956 @filteredpropertycache
988 @filteredpropertycache
957 def _tagscache(self):
989 def _tagscache(self):
958 '''Returns a tagscache object that contains various tags related
990 '''Returns a tagscache object that contains various tags related
959 caches.'''
991 caches.'''
960
992
961 # This simplifies its cache management by having one decorated
993 # This simplifies its cache management by having one decorated
962 # function (this one) and the rest simply fetch things from it.
994 # function (this one) and the rest simply fetch things from it.
963 class tagscache(object):
995 class tagscache(object):
964 def __init__(self):
996 def __init__(self):
965 # These two define the set of tags for this repository. tags
997 # These two define the set of tags for this repository. tags
966 # maps tag name to node; tagtypes maps tag name to 'global' or
998 # maps tag name to node; tagtypes maps tag name to 'global' or
967 # 'local'. (Global tags are defined by .hgtags across all
999 # 'local'. (Global tags are defined by .hgtags across all
968 # heads, and local tags are defined in .hg/localtags.)
1000 # heads, and local tags are defined in .hg/localtags.)
969 # They constitute the in-memory cache of tags.
1001 # They constitute the in-memory cache of tags.
970 self.tags = self.tagtypes = None
1002 self.tags = self.tagtypes = None
971
1003
972 self.nodetagscache = self.tagslist = None
1004 self.nodetagscache = self.tagslist = None
973
1005
974 cache = tagscache()
1006 cache = tagscache()
975 cache.tags, cache.tagtypes = self._findtags()
1007 cache.tags, cache.tagtypes = self._findtags()
976
1008
977 return cache
1009 return cache
978
1010
979 def tags(self):
1011 def tags(self):
980 '''return a mapping of tag to node'''
1012 '''return a mapping of tag to node'''
981 t = {}
1013 t = {}
982 if self.changelog.filteredrevs:
1014 if self.changelog.filteredrevs:
983 tags, tt = self._findtags()
1015 tags, tt = self._findtags()
984 else:
1016 else:
985 tags = self._tagscache.tags
1017 tags = self._tagscache.tags
986 for k, v in tags.iteritems():
1018 for k, v in tags.iteritems():
987 try:
1019 try:
988 # ignore tags to unknown nodes
1020 # ignore tags to unknown nodes
989 self.changelog.rev(v)
1021 self.changelog.rev(v)
990 t[k] = v
1022 t[k] = v
991 except (error.LookupError, ValueError):
1023 except (error.LookupError, ValueError):
992 pass
1024 pass
993 return t
1025 return t
994
1026
995 def _findtags(self):
1027 def _findtags(self):
996 '''Do the hard work of finding tags. Return a pair of dicts
1028 '''Do the hard work of finding tags. Return a pair of dicts
997 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1029 (tags, tagtypes) where tags maps tag name to node, and tagtypes
998 maps tag name to a string like \'global\' or \'local\'.
1030 maps tag name to a string like \'global\' or \'local\'.
999 Subclasses or extensions are free to add their own tags, but
1031 Subclasses or extensions are free to add their own tags, but
1000 should be aware that the returned dicts will be retained for the
1032 should be aware that the returned dicts will be retained for the
1001 duration of the localrepo object.'''
1033 duration of the localrepo object.'''
1002
1034
1003 # XXX what tagtype should subclasses/extensions use? Currently
1035 # XXX what tagtype should subclasses/extensions use? Currently
1004 # mq and bookmarks add tags, but do not set the tagtype at all.
1036 # mq and bookmarks add tags, but do not set the tagtype at all.
1005 # Should each extension invent its own tag type? Should there
1037 # Should each extension invent its own tag type? Should there
1006 # be one tagtype for all such "virtual" tags? Or is the status
1038 # be one tagtype for all such "virtual" tags? Or is the status
1007 # quo fine?
1039 # quo fine?
1008
1040
1009
1041
1010 # map tag name to (node, hist)
1042 # map tag name to (node, hist)
1011 alltags = tagsmod.findglobaltags(self.ui, self)
1043 alltags = tagsmod.findglobaltags(self.ui, self)
1012 # map tag name to tag type
1044 # map tag name to tag type
1013 tagtypes = dict((tag, 'global') for tag in alltags)
1045 tagtypes = dict((tag, 'global') for tag in alltags)
1014
1046
1015 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1047 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1016
1048
1017 # Build the return dicts. Have to re-encode tag names because
1049 # Build the return dicts. Have to re-encode tag names because
1018 # the tags module always uses UTF-8 (in order not to lose info
1050 # the tags module always uses UTF-8 (in order not to lose info
1019 # writing to the cache), but the rest of Mercurial wants them in
1051 # writing to the cache), but the rest of Mercurial wants them in
1020 # local encoding.
1052 # local encoding.
1021 tags = {}
1053 tags = {}
1022 for (name, (node, hist)) in alltags.iteritems():
1054 for (name, (node, hist)) in alltags.iteritems():
1023 if node != nullid:
1055 if node != nullid:
1024 tags[encoding.tolocal(name)] = node
1056 tags[encoding.tolocal(name)] = node
1025 tags['tip'] = self.changelog.tip()
1057 tags['tip'] = self.changelog.tip()
1026 tagtypes = dict([(encoding.tolocal(name), value)
1058 tagtypes = dict([(encoding.tolocal(name), value)
1027 for (name, value) in tagtypes.iteritems()])
1059 for (name, value) in tagtypes.iteritems()])
1028 return (tags, tagtypes)
1060 return (tags, tagtypes)
1029
1061
1030 def tagtype(self, tagname):
1062 def tagtype(self, tagname):
1031 '''
1063 '''
1032 return the type of the given tag. result can be:
1064 return the type of the given tag. result can be:
1033
1065
1034 'local' : a local tag
1066 'local' : a local tag
1035 'global' : a global tag
1067 'global' : a global tag
1036 None : tag does not exist
1068 None : tag does not exist
1037 '''
1069 '''
1038
1070
1039 return self._tagscache.tagtypes.get(tagname)
1071 return self._tagscache.tagtypes.get(tagname)
1040
1072
1041 def tagslist(self):
1073 def tagslist(self):
1042 '''return a list of tags ordered by revision'''
1074 '''return a list of tags ordered by revision'''
1043 if not self._tagscache.tagslist:
1075 if not self._tagscache.tagslist:
1044 l = []
1076 l = []
1045 for t, n in self.tags().iteritems():
1077 for t, n in self.tags().iteritems():
1046 l.append((self.changelog.rev(n), t, n))
1078 l.append((self.changelog.rev(n), t, n))
1047 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1079 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1048
1080
1049 return self._tagscache.tagslist
1081 return self._tagscache.tagslist
1050
1082
1051 def nodetags(self, node):
1083 def nodetags(self, node):
1052 '''return the tags associated with a node'''
1084 '''return the tags associated with a node'''
1053 if not self._tagscache.nodetagscache:
1085 if not self._tagscache.nodetagscache:
1054 nodetagscache = {}
1086 nodetagscache = {}
1055 for t, n in self._tagscache.tags.iteritems():
1087 for t, n in self._tagscache.tags.iteritems():
1056 nodetagscache.setdefault(n, []).append(t)
1088 nodetagscache.setdefault(n, []).append(t)
1057 for tags in nodetagscache.itervalues():
1089 for tags in nodetagscache.itervalues():
1058 tags.sort()
1090 tags.sort()
1059 self._tagscache.nodetagscache = nodetagscache
1091 self._tagscache.nodetagscache = nodetagscache
1060 return self._tagscache.nodetagscache.get(node, [])
1092 return self._tagscache.nodetagscache.get(node, [])
1061
1093
1062 def nodebookmarks(self, node):
1094 def nodebookmarks(self, node):
1063 """return the list of bookmarks pointing to the specified node"""
1095 """return the list of bookmarks pointing to the specified node"""
1064 return self._bookmarks.names(node)
1096 return self._bookmarks.names(node)
1065
1097
1066 def branchmap(self):
1098 def branchmap(self):
1067 '''returns a dictionary {branch: [branchheads]} with branchheads
1099 '''returns a dictionary {branch: [branchheads]} with branchheads
1068 ordered by increasing revision number'''
1100 ordered by increasing revision number'''
1069 branchmap.updatecache(self)
1101 branchmap.updatecache(self)
1070 return self._branchcaches[self.filtername]
1102 return self._branchcaches[self.filtername]
1071
1103
1072 @unfilteredmethod
1104 @unfilteredmethod
1073 def revbranchcache(self):
1105 def revbranchcache(self):
1074 if not self._revbranchcache:
1106 if not self._revbranchcache:
1075 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1107 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1076 return self._revbranchcache
1108 return self._revbranchcache
1077
1109
1078 def branchtip(self, branch, ignoremissing=False):
1110 def branchtip(self, branch, ignoremissing=False):
1079 '''return the tip node for a given branch
1111 '''return the tip node for a given branch
1080
1112
1081 If ignoremissing is True, then this method will not raise an error.
1113 If ignoremissing is True, then this method will not raise an error.
1082 This is helpful for callers that only expect None for a missing branch
1114 This is helpful for callers that only expect None for a missing branch
1083 (e.g. namespace).
1115 (e.g. namespace).
1084
1116
1085 '''
1117 '''
1086 try:
1118 try:
1087 return self.branchmap().branchtip(branch)
1119 return self.branchmap().branchtip(branch)
1088 except KeyError:
1120 except KeyError:
1089 if not ignoremissing:
1121 if not ignoremissing:
1090 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1122 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1091 else:
1123 else:
1092 pass
1124 pass
1093
1125
1094 def lookup(self, key):
1126 def lookup(self, key):
1095 return scmutil.revsymbol(self, key).node()
1127 return scmutil.revsymbol(self, key).node()
1096
1128
1097 def lookupbranch(self, key):
1129 def lookupbranch(self, key):
1098 if key in self.branchmap():
1130 if key in self.branchmap():
1099 return key
1131 return key
1100
1132
1101 return scmutil.revsymbol(self, key).branch()
1133 return scmutil.revsymbol(self, key).branch()
1102
1134
1103 def known(self, nodes):
1135 def known(self, nodes):
1104 cl = self.changelog
1136 cl = self.changelog
1105 nm = cl.nodemap
1137 nm = cl.nodemap
1106 filtered = cl.filteredrevs
1138 filtered = cl.filteredrevs
1107 result = []
1139 result = []
1108 for n in nodes:
1140 for n in nodes:
1109 r = nm.get(n)
1141 r = nm.get(n)
1110 resp = not (r is None or r in filtered)
1142 resp = not (r is None or r in filtered)
1111 result.append(resp)
1143 result.append(resp)
1112 return result
1144 return result
1113
1145
1114 def local(self):
1146 def local(self):
1115 return self
1147 return self
1116
1148
1117 def publishing(self):
1149 def publishing(self):
1118 # it's safe (and desirable) to trust the publish flag unconditionally
1150 # it's safe (and desirable) to trust the publish flag unconditionally
1119 # so that we don't finalize changes shared between users via ssh or nfs
1151 # so that we don't finalize changes shared between users via ssh or nfs
1120 return self.ui.configbool('phases', 'publish', untrusted=True)
1152 return self.ui.configbool('phases', 'publish', untrusted=True)
1121
1153
1122 def cancopy(self):
1154 def cancopy(self):
1123 # so statichttprepo's override of local() works
1155 # so statichttprepo's override of local() works
1124 if not self.local():
1156 if not self.local():
1125 return False
1157 return False
1126 if not self.publishing():
1158 if not self.publishing():
1127 return True
1159 return True
1128 # if publishing we can't copy if there is filtered content
1160 # if publishing we can't copy if there is filtered content
1129 return not self.filtered('visible').changelog.filteredrevs
1161 return not self.filtered('visible').changelog.filteredrevs
1130
1162
1131 def shared(self):
1163 def shared(self):
1132 '''the type of shared repository (None if not shared)'''
1164 '''the type of shared repository (None if not shared)'''
1133 if self.sharedpath != self.path:
1165 if self.sharedpath != self.path:
1134 return 'store'
1166 return 'store'
1135 return None
1167 return None
1136
1168
1137 def wjoin(self, f, *insidef):
1169 def wjoin(self, f, *insidef):
1138 return self.vfs.reljoin(self.root, f, *insidef)
1170 return self.vfs.reljoin(self.root, f, *insidef)
1139
1171
1140 def file(self, f):
1172 def file(self, f):
1141 if f[0] == '/':
1173 if f[0] == '/':
1142 f = f[1:]
1174 f = f[1:]
1143 return filelog.filelog(self.svfs, f)
1175 return filelog.filelog(self.svfs, f)
1144
1176
1145 def setparents(self, p1, p2=nullid):
1177 def setparents(self, p1, p2=nullid):
1146 with self.dirstate.parentchange():
1178 with self.dirstate.parentchange():
1147 copies = self.dirstate.setparents(p1, p2)
1179 copies = self.dirstate.setparents(p1, p2)
1148 pctx = self[p1]
1180 pctx = self[p1]
1149 if copies:
1181 if copies:
1150 # Adjust copy records, the dirstate cannot do it, it
1182 # Adjust copy records, the dirstate cannot do it, it
1151 # requires access to parents manifests. Preserve them
1183 # requires access to parents manifests. Preserve them
1152 # only for entries added to first parent.
1184 # only for entries added to first parent.
1153 for f in copies:
1185 for f in copies:
1154 if f not in pctx and copies[f] in pctx:
1186 if f not in pctx and copies[f] in pctx:
1155 self.dirstate.copy(copies[f], f)
1187 self.dirstate.copy(copies[f], f)
1156 if p2 == nullid:
1188 if p2 == nullid:
1157 for f, s in sorted(self.dirstate.copies().items()):
1189 for f, s in sorted(self.dirstate.copies().items()):
1158 if f not in pctx and s not in pctx:
1190 if f not in pctx and s not in pctx:
1159 self.dirstate.copy(None, f)
1191 self.dirstate.copy(None, f)
1160
1192
1161 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1193 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1162 """changeid can be a changeset revision, node, or tag.
1194 """changeid can be a changeset revision, node, or tag.
1163 fileid can be a file revision or node."""
1195 fileid can be a file revision or node."""
1164 return context.filectx(self, path, changeid, fileid,
1196 return context.filectx(self, path, changeid, fileid,
1165 changectx=changectx)
1197 changectx=changectx)
1166
1198
1167 def getcwd(self):
1199 def getcwd(self):
1168 return self.dirstate.getcwd()
1200 return self.dirstate.getcwd()
1169
1201
1170 def pathto(self, f, cwd=None):
1202 def pathto(self, f, cwd=None):
1171 return self.dirstate.pathto(f, cwd)
1203 return self.dirstate.pathto(f, cwd)
1172
1204
1173 def _loadfilter(self, filter):
1205 def _loadfilter(self, filter):
1174 if filter not in self._filterpats:
1206 if filter not in self._filterpats:
1175 l = []
1207 l = []
1176 for pat, cmd in self.ui.configitems(filter):
1208 for pat, cmd in self.ui.configitems(filter):
1177 if cmd == '!':
1209 if cmd == '!':
1178 continue
1210 continue
1179 mf = matchmod.match(self.root, '', [pat])
1211 mf = matchmod.match(self.root, '', [pat])
1180 fn = None
1212 fn = None
1181 params = cmd
1213 params = cmd
1182 for name, filterfn in self._datafilters.iteritems():
1214 for name, filterfn in self._datafilters.iteritems():
1183 if cmd.startswith(name):
1215 if cmd.startswith(name):
1184 fn = filterfn
1216 fn = filterfn
1185 params = cmd[len(name):].lstrip()
1217 params = cmd[len(name):].lstrip()
1186 break
1218 break
1187 if not fn:
1219 if not fn:
1188 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1220 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1189 # Wrap old filters not supporting keyword arguments
1221 # Wrap old filters not supporting keyword arguments
1190 if not pycompat.getargspec(fn)[2]:
1222 if not pycompat.getargspec(fn)[2]:
1191 oldfn = fn
1223 oldfn = fn
1192 fn = lambda s, c, **kwargs: oldfn(s, c)
1224 fn = lambda s, c, **kwargs: oldfn(s, c)
1193 l.append((mf, fn, params))
1225 l.append((mf, fn, params))
1194 self._filterpats[filter] = l
1226 self._filterpats[filter] = l
1195 return self._filterpats[filter]
1227 return self._filterpats[filter]
1196
1228
1197 def _filter(self, filterpats, filename, data):
1229 def _filter(self, filterpats, filename, data):
1198 for mf, fn, cmd in filterpats:
1230 for mf, fn, cmd in filterpats:
1199 if mf(filename):
1231 if mf(filename):
1200 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1232 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1201 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1233 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1202 break
1234 break
1203
1235
1204 return data
1236 return data
1205
1237
1206 @unfilteredpropertycache
1238 @unfilteredpropertycache
1207 def _encodefilterpats(self):
1239 def _encodefilterpats(self):
1208 return self._loadfilter('encode')
1240 return self._loadfilter('encode')
1209
1241
1210 @unfilteredpropertycache
1242 @unfilteredpropertycache
1211 def _decodefilterpats(self):
1243 def _decodefilterpats(self):
1212 return self._loadfilter('decode')
1244 return self._loadfilter('decode')
1213
1245
1214 def adddatafilter(self, name, filter):
1246 def adddatafilter(self, name, filter):
1215 self._datafilters[name] = filter
1247 self._datafilters[name] = filter
1216
1248
1217 def wread(self, filename):
1249 def wread(self, filename):
1218 if self.wvfs.islink(filename):
1250 if self.wvfs.islink(filename):
1219 data = self.wvfs.readlink(filename)
1251 data = self.wvfs.readlink(filename)
1220 else:
1252 else:
1221 data = self.wvfs.read(filename)
1253 data = self.wvfs.read(filename)
1222 return self._filter(self._encodefilterpats, filename, data)
1254 return self._filter(self._encodefilterpats, filename, data)
1223
1255
1224 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1256 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1225 """write ``data`` into ``filename`` in the working directory
1257 """write ``data`` into ``filename`` in the working directory
1226
1258
1227 This returns length of written (maybe decoded) data.
1259 This returns length of written (maybe decoded) data.
1228 """
1260 """
1229 data = self._filter(self._decodefilterpats, filename, data)
1261 data = self._filter(self._decodefilterpats, filename, data)
1230 if 'l' in flags:
1262 if 'l' in flags:
1231 self.wvfs.symlink(data, filename)
1263 self.wvfs.symlink(data, filename)
1232 else:
1264 else:
1233 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1265 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1234 **kwargs)
1266 **kwargs)
1235 if 'x' in flags:
1267 if 'x' in flags:
1236 self.wvfs.setflags(filename, False, True)
1268 self.wvfs.setflags(filename, False, True)
1237 else:
1269 else:
1238 self.wvfs.setflags(filename, False, False)
1270 self.wvfs.setflags(filename, False, False)
1239 return len(data)
1271 return len(data)
1240
1272
1241 def wwritedata(self, filename, data):
1273 def wwritedata(self, filename, data):
1242 return self._filter(self._decodefilterpats, filename, data)
1274 return self._filter(self._decodefilterpats, filename, data)
1243
1275
1244 def currenttransaction(self):
1276 def currenttransaction(self):
1245 """return the current transaction or None if non exists"""
1277 """return the current transaction or None if non exists"""
1246 if self._transref:
1278 if self._transref:
1247 tr = self._transref()
1279 tr = self._transref()
1248 else:
1280 else:
1249 tr = None
1281 tr = None
1250
1282
1251 if tr and tr.running():
1283 if tr and tr.running():
1252 return tr
1284 return tr
1253 return None
1285 return None
1254
1286
1255 def transaction(self, desc, report=None):
1287 def transaction(self, desc, report=None):
1256 if (self.ui.configbool('devel', 'all-warnings')
1288 if (self.ui.configbool('devel', 'all-warnings')
1257 or self.ui.configbool('devel', 'check-locks')):
1289 or self.ui.configbool('devel', 'check-locks')):
1258 if self._currentlock(self._lockref) is None:
1290 if self._currentlock(self._lockref) is None:
1259 raise error.ProgrammingError('transaction requires locking')
1291 raise error.ProgrammingError('transaction requires locking')
1260 tr = self.currenttransaction()
1292 tr = self.currenttransaction()
1261 if tr is not None:
1293 if tr is not None:
1262 return tr.nest(name=desc)
1294 return tr.nest(name=desc)
1263
1295
1264 # abort here if the journal already exists
1296 # abort here if the journal already exists
1265 if self.svfs.exists("journal"):
1297 if self.svfs.exists("journal"):
1266 raise error.RepoError(
1298 raise error.RepoError(
1267 _("abandoned transaction found"),
1299 _("abandoned transaction found"),
1268 hint=_("run 'hg recover' to clean up transaction"))
1300 hint=_("run 'hg recover' to clean up transaction"))
1269
1301
1270 idbase = "%.40f#%f" % (random.random(), time.time())
1302 idbase = "%.40f#%f" % (random.random(), time.time())
1271 ha = hex(hashlib.sha1(idbase).digest())
1303 ha = hex(hashlib.sha1(idbase).digest())
1272 txnid = 'TXN:' + ha
1304 txnid = 'TXN:' + ha
1273 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1305 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1274
1306
1275 self._writejournal(desc)
1307 self._writejournal(desc)
1276 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1308 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1277 if report:
1309 if report:
1278 rp = report
1310 rp = report
1279 else:
1311 else:
1280 rp = self.ui.warn
1312 rp = self.ui.warn
1281 vfsmap = {'plain': self.vfs} # root of .hg/
1313 vfsmap = {'plain': self.vfs} # root of .hg/
1282 # we must avoid cyclic reference between repo and transaction.
1314 # we must avoid cyclic reference between repo and transaction.
1283 reporef = weakref.ref(self)
1315 reporef = weakref.ref(self)
1284 # Code to track tag movement
1316 # Code to track tag movement
1285 #
1317 #
1286 # Since tags are all handled as file content, it is actually quite hard
1318 # Since tags are all handled as file content, it is actually quite hard
1287 # to track these movement from a code perspective. So we fallback to a
1319 # to track these movement from a code perspective. So we fallback to a
1288 # tracking at the repository level. One could envision to track changes
1320 # tracking at the repository level. One could envision to track changes
1289 # to the '.hgtags' file through changegroup apply but that fails to
1321 # to the '.hgtags' file through changegroup apply but that fails to
1290 # cope with case where transaction expose new heads without changegroup
1322 # cope with case where transaction expose new heads without changegroup
1291 # being involved (eg: phase movement).
1323 # being involved (eg: phase movement).
1292 #
1324 #
1293 # For now, We gate the feature behind a flag since this likely comes
1325 # For now, We gate the feature behind a flag since this likely comes
1294 # with performance impacts. The current code run more often than needed
1326 # with performance impacts. The current code run more often than needed
1295 # and do not use caches as much as it could. The current focus is on
1327 # and do not use caches as much as it could. The current focus is on
1296 # the behavior of the feature so we disable it by default. The flag
1328 # the behavior of the feature so we disable it by default. The flag
1297 # will be removed when we are happy with the performance impact.
1329 # will be removed when we are happy with the performance impact.
1298 #
1330 #
1299 # Once this feature is no longer experimental move the following
1331 # Once this feature is no longer experimental move the following
1300 # documentation to the appropriate help section:
1332 # documentation to the appropriate help section:
1301 #
1333 #
1302 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1334 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1303 # tags (new or changed or deleted tags). In addition the details of
1335 # tags (new or changed or deleted tags). In addition the details of
1304 # these changes are made available in a file at:
1336 # these changes are made available in a file at:
1305 # ``REPOROOT/.hg/changes/tags.changes``.
1337 # ``REPOROOT/.hg/changes/tags.changes``.
1306 # Make sure you check for HG_TAG_MOVED before reading that file as it
1338 # Make sure you check for HG_TAG_MOVED before reading that file as it
1307 # might exist from a previous transaction even if no tag were touched
1339 # might exist from a previous transaction even if no tag were touched
1308 # in this one. Changes are recorded in a line base format::
1340 # in this one. Changes are recorded in a line base format::
1309 #
1341 #
1310 # <action> <hex-node> <tag-name>\n
1342 # <action> <hex-node> <tag-name>\n
1311 #
1343 #
1312 # Actions are defined as follow:
1344 # Actions are defined as follow:
1313 # "-R": tag is removed,
1345 # "-R": tag is removed,
1314 # "+A": tag is added,
1346 # "+A": tag is added,
1315 # "-M": tag is moved (old value),
1347 # "-M": tag is moved (old value),
1316 # "+M": tag is moved (new value),
1348 # "+M": tag is moved (new value),
1317 tracktags = lambda x: None
1349 tracktags = lambda x: None
1318 # experimental config: experimental.hook-track-tags
1350 # experimental config: experimental.hook-track-tags
1319 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1351 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1320 if desc != 'strip' and shouldtracktags:
1352 if desc != 'strip' and shouldtracktags:
1321 oldheads = self.changelog.headrevs()
1353 oldheads = self.changelog.headrevs()
1322 def tracktags(tr2):
1354 def tracktags(tr2):
1323 repo = reporef()
1355 repo = reporef()
1324 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1356 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1325 newheads = repo.changelog.headrevs()
1357 newheads = repo.changelog.headrevs()
1326 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1358 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1327 # notes: we compare lists here.
1359 # notes: we compare lists here.
1328 # As we do it only once buiding set would not be cheaper
1360 # As we do it only once buiding set would not be cheaper
1329 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1361 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1330 if changes:
1362 if changes:
1331 tr2.hookargs['tag_moved'] = '1'
1363 tr2.hookargs['tag_moved'] = '1'
1332 with repo.vfs('changes/tags.changes', 'w',
1364 with repo.vfs('changes/tags.changes', 'w',
1333 atomictemp=True) as changesfile:
1365 atomictemp=True) as changesfile:
1334 # note: we do not register the file to the transaction
1366 # note: we do not register the file to the transaction
1335 # because we needs it to still exist on the transaction
1367 # because we needs it to still exist on the transaction
1336 # is close (for txnclose hooks)
1368 # is close (for txnclose hooks)
1337 tagsmod.writediff(changesfile, changes)
1369 tagsmod.writediff(changesfile, changes)
1338 def validate(tr2):
1370 def validate(tr2):
1339 """will run pre-closing hooks"""
1371 """will run pre-closing hooks"""
1340 # XXX the transaction API is a bit lacking here so we take a hacky
1372 # XXX the transaction API is a bit lacking here so we take a hacky
1341 # path for now
1373 # path for now
1342 #
1374 #
1343 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1375 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1344 # dict is copied before these run. In addition we needs the data
1376 # dict is copied before these run. In addition we needs the data
1345 # available to in memory hooks too.
1377 # available to in memory hooks too.
1346 #
1378 #
1347 # Moreover, we also need to make sure this runs before txnclose
1379 # Moreover, we also need to make sure this runs before txnclose
1348 # hooks and there is no "pending" mechanism that would execute
1380 # hooks and there is no "pending" mechanism that would execute
1349 # logic only if hooks are about to run.
1381 # logic only if hooks are about to run.
1350 #
1382 #
1351 # Fixing this limitation of the transaction is also needed to track
1383 # Fixing this limitation of the transaction is also needed to track
1352 # other families of changes (bookmarks, phases, obsolescence).
1384 # other families of changes (bookmarks, phases, obsolescence).
1353 #
1385 #
1354 # This will have to be fixed before we remove the experimental
1386 # This will have to be fixed before we remove the experimental
1355 # gating.
1387 # gating.
1356 tracktags(tr2)
1388 tracktags(tr2)
1357 repo = reporef()
1389 repo = reporef()
1358 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1390 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1359 scmutil.enforcesinglehead(repo, tr2, desc)
1391 scmutil.enforcesinglehead(repo, tr2, desc)
1360 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1392 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1361 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1393 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1362 args = tr.hookargs.copy()
1394 args = tr.hookargs.copy()
1363 args.update(bookmarks.preparehookargs(name, old, new))
1395 args.update(bookmarks.preparehookargs(name, old, new))
1364 repo.hook('pretxnclose-bookmark', throw=True,
1396 repo.hook('pretxnclose-bookmark', throw=True,
1365 txnname=desc,
1397 txnname=desc,
1366 **pycompat.strkwargs(args))
1398 **pycompat.strkwargs(args))
1367 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1399 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1368 cl = repo.unfiltered().changelog
1400 cl = repo.unfiltered().changelog
1369 for rev, (old, new) in tr.changes['phases'].items():
1401 for rev, (old, new) in tr.changes['phases'].items():
1370 args = tr.hookargs.copy()
1402 args = tr.hookargs.copy()
1371 node = hex(cl.node(rev))
1403 node = hex(cl.node(rev))
1372 args.update(phases.preparehookargs(node, old, new))
1404 args.update(phases.preparehookargs(node, old, new))
1373 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1405 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1374 **pycompat.strkwargs(args))
1406 **pycompat.strkwargs(args))
1375
1407
1376 repo.hook('pretxnclose', throw=True,
1408 repo.hook('pretxnclose', throw=True,
1377 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1409 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1378 def releasefn(tr, success):
1410 def releasefn(tr, success):
1379 repo = reporef()
1411 repo = reporef()
1380 if success:
1412 if success:
1381 # this should be explicitly invoked here, because
1413 # this should be explicitly invoked here, because
1382 # in-memory changes aren't written out at closing
1414 # in-memory changes aren't written out at closing
1383 # transaction, if tr.addfilegenerator (via
1415 # transaction, if tr.addfilegenerator (via
1384 # dirstate.write or so) isn't invoked while
1416 # dirstate.write or so) isn't invoked while
1385 # transaction running
1417 # transaction running
1386 repo.dirstate.write(None)
1418 repo.dirstate.write(None)
1387 else:
1419 else:
1388 # discard all changes (including ones already written
1420 # discard all changes (including ones already written
1389 # out) in this transaction
1421 # out) in this transaction
1390 narrowspec.restorebackup(self, 'journal.narrowspec')
1422 narrowspec.restorebackup(self, 'journal.narrowspec')
1391 repo.dirstate.restorebackup(None, 'journal.dirstate')
1423 repo.dirstate.restorebackup(None, 'journal.dirstate')
1392
1424
1393 repo.invalidate(clearfilecache=True)
1425 repo.invalidate(clearfilecache=True)
1394
1426
1395 tr = transaction.transaction(rp, self.svfs, vfsmap,
1427 tr = transaction.transaction(rp, self.svfs, vfsmap,
1396 "journal",
1428 "journal",
1397 "undo",
1429 "undo",
1398 aftertrans(renames),
1430 aftertrans(renames),
1399 self.store.createmode,
1431 self.store.createmode,
1400 validator=validate,
1432 validator=validate,
1401 releasefn=releasefn,
1433 releasefn=releasefn,
1402 checkambigfiles=_cachedfiles,
1434 checkambigfiles=_cachedfiles,
1403 name=desc)
1435 name=desc)
1404 tr.changes['origrepolen'] = len(self)
1436 tr.changes['origrepolen'] = len(self)
1405 tr.changes['obsmarkers'] = set()
1437 tr.changes['obsmarkers'] = set()
1406 tr.changes['phases'] = {}
1438 tr.changes['phases'] = {}
1407 tr.changes['bookmarks'] = {}
1439 tr.changes['bookmarks'] = {}
1408
1440
1409 tr.hookargs['txnid'] = txnid
1441 tr.hookargs['txnid'] = txnid
1410 # note: writing the fncache only during finalize mean that the file is
1442 # note: writing the fncache only during finalize mean that the file is
1411 # outdated when running hooks. As fncache is used for streaming clone,
1443 # outdated when running hooks. As fncache is used for streaming clone,
1412 # this is not expected to break anything that happen during the hooks.
1444 # this is not expected to break anything that happen during the hooks.
1413 tr.addfinalize('flush-fncache', self.store.write)
1445 tr.addfinalize('flush-fncache', self.store.write)
1414 def txnclosehook(tr2):
1446 def txnclosehook(tr2):
1415 """To be run if transaction is successful, will schedule a hook run
1447 """To be run if transaction is successful, will schedule a hook run
1416 """
1448 """
1417 # Don't reference tr2 in hook() so we don't hold a reference.
1449 # Don't reference tr2 in hook() so we don't hold a reference.
1418 # This reduces memory consumption when there are multiple
1450 # This reduces memory consumption when there are multiple
1419 # transactions per lock. This can likely go away if issue5045
1451 # transactions per lock. This can likely go away if issue5045
1420 # fixes the function accumulation.
1452 # fixes the function accumulation.
1421 hookargs = tr2.hookargs
1453 hookargs = tr2.hookargs
1422
1454
1423 def hookfunc():
1455 def hookfunc():
1424 repo = reporef()
1456 repo = reporef()
1425 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1457 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1426 bmchanges = sorted(tr.changes['bookmarks'].items())
1458 bmchanges = sorted(tr.changes['bookmarks'].items())
1427 for name, (old, new) in bmchanges:
1459 for name, (old, new) in bmchanges:
1428 args = tr.hookargs.copy()
1460 args = tr.hookargs.copy()
1429 args.update(bookmarks.preparehookargs(name, old, new))
1461 args.update(bookmarks.preparehookargs(name, old, new))
1430 repo.hook('txnclose-bookmark', throw=False,
1462 repo.hook('txnclose-bookmark', throw=False,
1431 txnname=desc, **pycompat.strkwargs(args))
1463 txnname=desc, **pycompat.strkwargs(args))
1432
1464
1433 if hook.hashook(repo.ui, 'txnclose-phase'):
1465 if hook.hashook(repo.ui, 'txnclose-phase'):
1434 cl = repo.unfiltered().changelog
1466 cl = repo.unfiltered().changelog
1435 phasemv = sorted(tr.changes['phases'].items())
1467 phasemv = sorted(tr.changes['phases'].items())
1436 for rev, (old, new) in phasemv:
1468 for rev, (old, new) in phasemv:
1437 args = tr.hookargs.copy()
1469 args = tr.hookargs.copy()
1438 node = hex(cl.node(rev))
1470 node = hex(cl.node(rev))
1439 args.update(phases.preparehookargs(node, old, new))
1471 args.update(phases.preparehookargs(node, old, new))
1440 repo.hook('txnclose-phase', throw=False, txnname=desc,
1472 repo.hook('txnclose-phase', throw=False, txnname=desc,
1441 **pycompat.strkwargs(args))
1473 **pycompat.strkwargs(args))
1442
1474
1443 repo.hook('txnclose', throw=False, txnname=desc,
1475 repo.hook('txnclose', throw=False, txnname=desc,
1444 **pycompat.strkwargs(hookargs))
1476 **pycompat.strkwargs(hookargs))
1445 reporef()._afterlock(hookfunc)
1477 reporef()._afterlock(hookfunc)
1446 tr.addfinalize('txnclose-hook', txnclosehook)
1478 tr.addfinalize('txnclose-hook', txnclosehook)
1447 # Include a leading "-" to make it happen before the transaction summary
1479 # Include a leading "-" to make it happen before the transaction summary
1448 # reports registered via scmutil.registersummarycallback() whose names
1480 # reports registered via scmutil.registersummarycallback() whose names
1449 # are 00-txnreport etc. That way, the caches will be warm when the
1481 # are 00-txnreport etc. That way, the caches will be warm when the
1450 # callbacks run.
1482 # callbacks run.
1451 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1483 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1452 def txnaborthook(tr2):
1484 def txnaborthook(tr2):
1453 """To be run if transaction is aborted
1485 """To be run if transaction is aborted
1454 """
1486 """
1455 reporef().hook('txnabort', throw=False, txnname=desc,
1487 reporef().hook('txnabort', throw=False, txnname=desc,
1456 **pycompat.strkwargs(tr2.hookargs))
1488 **pycompat.strkwargs(tr2.hookargs))
1457 tr.addabort('txnabort-hook', txnaborthook)
1489 tr.addabort('txnabort-hook', txnaborthook)
1458 # avoid eager cache invalidation. in-memory data should be identical
1490 # avoid eager cache invalidation. in-memory data should be identical
1459 # to stored data if transaction has no error.
1491 # to stored data if transaction has no error.
1460 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1492 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1461 self._transref = weakref.ref(tr)
1493 self._transref = weakref.ref(tr)
1462 scmutil.registersummarycallback(self, tr, desc)
1494 scmutil.registersummarycallback(self, tr, desc)
1463 return tr
1495 return tr
1464
1496
1465 def _journalfiles(self):
1497 def _journalfiles(self):
1466 return ((self.svfs, 'journal'),
1498 return ((self.svfs, 'journal'),
1467 (self.vfs, 'journal.dirstate'),
1499 (self.vfs, 'journal.dirstate'),
1468 (self.vfs, 'journal.branch'),
1500 (self.vfs, 'journal.branch'),
1469 (self.vfs, 'journal.desc'),
1501 (self.vfs, 'journal.desc'),
1470 (self.vfs, 'journal.bookmarks'),
1502 (self.vfs, 'journal.bookmarks'),
1471 (self.svfs, 'journal.phaseroots'))
1503 (self.svfs, 'journal.phaseroots'))
1472
1504
1473 def undofiles(self):
1505 def undofiles(self):
1474 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1506 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1475
1507
1476 @unfilteredmethod
1508 @unfilteredmethod
1477 def _writejournal(self, desc):
1509 def _writejournal(self, desc):
1478 self.dirstate.savebackup(None, 'journal.dirstate')
1510 self.dirstate.savebackup(None, 'journal.dirstate')
1479 narrowspec.savebackup(self, 'journal.narrowspec')
1511 narrowspec.savebackup(self, 'journal.narrowspec')
1480 self.vfs.write("journal.branch",
1512 self.vfs.write("journal.branch",
1481 encoding.fromlocal(self.dirstate.branch()))
1513 encoding.fromlocal(self.dirstate.branch()))
1482 self.vfs.write("journal.desc",
1514 self.vfs.write("journal.desc",
1483 "%d\n%s\n" % (len(self), desc))
1515 "%d\n%s\n" % (len(self), desc))
1484 self.vfs.write("journal.bookmarks",
1516 self.vfs.write("journal.bookmarks",
1485 self.vfs.tryread("bookmarks"))
1517 self.vfs.tryread("bookmarks"))
1486 self.svfs.write("journal.phaseroots",
1518 self.svfs.write("journal.phaseroots",
1487 self.svfs.tryread("phaseroots"))
1519 self.svfs.tryread("phaseroots"))
1488
1520
1489 def recover(self):
1521 def recover(self):
1490 with self.lock():
1522 with self.lock():
1491 if self.svfs.exists("journal"):
1523 if self.svfs.exists("journal"):
1492 self.ui.status(_("rolling back interrupted transaction\n"))
1524 self.ui.status(_("rolling back interrupted transaction\n"))
1493 vfsmap = {'': self.svfs,
1525 vfsmap = {'': self.svfs,
1494 'plain': self.vfs,}
1526 'plain': self.vfs,}
1495 transaction.rollback(self.svfs, vfsmap, "journal",
1527 transaction.rollback(self.svfs, vfsmap, "journal",
1496 self.ui.warn,
1528 self.ui.warn,
1497 checkambigfiles=_cachedfiles)
1529 checkambigfiles=_cachedfiles)
1498 self.invalidate()
1530 self.invalidate()
1499 return True
1531 return True
1500 else:
1532 else:
1501 self.ui.warn(_("no interrupted transaction available\n"))
1533 self.ui.warn(_("no interrupted transaction available\n"))
1502 return False
1534 return False
1503
1535
1504 def rollback(self, dryrun=False, force=False):
1536 def rollback(self, dryrun=False, force=False):
1505 wlock = lock = dsguard = None
1537 wlock = lock = dsguard = None
1506 try:
1538 try:
1507 wlock = self.wlock()
1539 wlock = self.wlock()
1508 lock = self.lock()
1540 lock = self.lock()
1509 if self.svfs.exists("undo"):
1541 if self.svfs.exists("undo"):
1510 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1542 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1511
1543
1512 return self._rollback(dryrun, force, dsguard)
1544 return self._rollback(dryrun, force, dsguard)
1513 else:
1545 else:
1514 self.ui.warn(_("no rollback information available\n"))
1546 self.ui.warn(_("no rollback information available\n"))
1515 return 1
1547 return 1
1516 finally:
1548 finally:
1517 release(dsguard, lock, wlock)
1549 release(dsguard, lock, wlock)
1518
1550
1519 @unfilteredmethod # Until we get smarter cache management
1551 @unfilteredmethod # Until we get smarter cache management
1520 def _rollback(self, dryrun, force, dsguard):
1552 def _rollback(self, dryrun, force, dsguard):
1521 ui = self.ui
1553 ui = self.ui
1522 try:
1554 try:
1523 args = self.vfs.read('undo.desc').splitlines()
1555 args = self.vfs.read('undo.desc').splitlines()
1524 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1556 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1525 if len(args) >= 3:
1557 if len(args) >= 3:
1526 detail = args[2]
1558 detail = args[2]
1527 oldtip = oldlen - 1
1559 oldtip = oldlen - 1
1528
1560
1529 if detail and ui.verbose:
1561 if detail and ui.verbose:
1530 msg = (_('repository tip rolled back to revision %d'
1562 msg = (_('repository tip rolled back to revision %d'
1531 ' (undo %s: %s)\n')
1563 ' (undo %s: %s)\n')
1532 % (oldtip, desc, detail))
1564 % (oldtip, desc, detail))
1533 else:
1565 else:
1534 msg = (_('repository tip rolled back to revision %d'
1566 msg = (_('repository tip rolled back to revision %d'
1535 ' (undo %s)\n')
1567 ' (undo %s)\n')
1536 % (oldtip, desc))
1568 % (oldtip, desc))
1537 except IOError:
1569 except IOError:
1538 msg = _('rolling back unknown transaction\n')
1570 msg = _('rolling back unknown transaction\n')
1539 desc = None
1571 desc = None
1540
1572
1541 if not force and self['.'] != self['tip'] and desc == 'commit':
1573 if not force and self['.'] != self['tip'] and desc == 'commit':
1542 raise error.Abort(
1574 raise error.Abort(
1543 _('rollback of last commit while not checked out '
1575 _('rollback of last commit while not checked out '
1544 'may lose data'), hint=_('use -f to force'))
1576 'may lose data'), hint=_('use -f to force'))
1545
1577
1546 ui.status(msg)
1578 ui.status(msg)
1547 if dryrun:
1579 if dryrun:
1548 return 0
1580 return 0
1549
1581
1550 parents = self.dirstate.parents()
1582 parents = self.dirstate.parents()
1551 self.destroying()
1583 self.destroying()
1552 vfsmap = {'plain': self.vfs, '': self.svfs}
1584 vfsmap = {'plain': self.vfs, '': self.svfs}
1553 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1585 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1554 checkambigfiles=_cachedfiles)
1586 checkambigfiles=_cachedfiles)
1555 if self.vfs.exists('undo.bookmarks'):
1587 if self.vfs.exists('undo.bookmarks'):
1556 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1588 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1557 if self.svfs.exists('undo.phaseroots'):
1589 if self.svfs.exists('undo.phaseroots'):
1558 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1590 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1559 self.invalidate()
1591 self.invalidate()
1560
1592
1561 parentgone = (parents[0] not in self.changelog.nodemap or
1593 parentgone = (parents[0] not in self.changelog.nodemap or
1562 parents[1] not in self.changelog.nodemap)
1594 parents[1] not in self.changelog.nodemap)
1563 if parentgone:
1595 if parentgone:
1564 # prevent dirstateguard from overwriting already restored one
1596 # prevent dirstateguard from overwriting already restored one
1565 dsguard.close()
1597 dsguard.close()
1566
1598
1567 narrowspec.restorebackup(self, 'undo.narrowspec')
1599 narrowspec.restorebackup(self, 'undo.narrowspec')
1568 self.dirstate.restorebackup(None, 'undo.dirstate')
1600 self.dirstate.restorebackup(None, 'undo.dirstate')
1569 try:
1601 try:
1570 branch = self.vfs.read('undo.branch')
1602 branch = self.vfs.read('undo.branch')
1571 self.dirstate.setbranch(encoding.tolocal(branch))
1603 self.dirstate.setbranch(encoding.tolocal(branch))
1572 except IOError:
1604 except IOError:
1573 ui.warn(_('named branch could not be reset: '
1605 ui.warn(_('named branch could not be reset: '
1574 'current branch is still \'%s\'\n')
1606 'current branch is still \'%s\'\n')
1575 % self.dirstate.branch())
1607 % self.dirstate.branch())
1576
1608
1577 parents = tuple([p.rev() for p in self[None].parents()])
1609 parents = tuple([p.rev() for p in self[None].parents()])
1578 if len(parents) > 1:
1610 if len(parents) > 1:
1579 ui.status(_('working directory now based on '
1611 ui.status(_('working directory now based on '
1580 'revisions %d and %d\n') % parents)
1612 'revisions %d and %d\n') % parents)
1581 else:
1613 else:
1582 ui.status(_('working directory now based on '
1614 ui.status(_('working directory now based on '
1583 'revision %d\n') % parents)
1615 'revision %d\n') % parents)
1584 mergemod.mergestate.clean(self, self['.'].node())
1616 mergemod.mergestate.clean(self, self['.'].node())
1585
1617
1586 # TODO: if we know which new heads may result from this rollback, pass
1618 # TODO: if we know which new heads may result from this rollback, pass
1587 # them to destroy(), which will prevent the branchhead cache from being
1619 # them to destroy(), which will prevent the branchhead cache from being
1588 # invalidated.
1620 # invalidated.
1589 self.destroyed()
1621 self.destroyed()
1590 return 0
1622 return 0
1591
1623
1592 def _buildcacheupdater(self, newtransaction):
1624 def _buildcacheupdater(self, newtransaction):
1593 """called during transaction to build the callback updating cache
1625 """called during transaction to build the callback updating cache
1594
1626
1595 Lives on the repository to help extension who might want to augment
1627 Lives on the repository to help extension who might want to augment
1596 this logic. For this purpose, the created transaction is passed to the
1628 this logic. For this purpose, the created transaction is passed to the
1597 method.
1629 method.
1598 """
1630 """
1599 # we must avoid cyclic reference between repo and transaction.
1631 # we must avoid cyclic reference between repo and transaction.
1600 reporef = weakref.ref(self)
1632 reporef = weakref.ref(self)
1601 def updater(tr):
1633 def updater(tr):
1602 repo = reporef()
1634 repo = reporef()
1603 repo.updatecaches(tr)
1635 repo.updatecaches(tr)
1604 return updater
1636 return updater
1605
1637
1606 @unfilteredmethod
1638 @unfilteredmethod
1607 def updatecaches(self, tr=None, full=False):
1639 def updatecaches(self, tr=None, full=False):
1608 """warm appropriate caches
1640 """warm appropriate caches
1609
1641
1610 If this function is called after a transaction closed. The transaction
1642 If this function is called after a transaction closed. The transaction
1611 will be available in the 'tr' argument. This can be used to selectively
1643 will be available in the 'tr' argument. This can be used to selectively
1612 update caches relevant to the changes in that transaction.
1644 update caches relevant to the changes in that transaction.
1613
1645
1614 If 'full' is set, make sure all caches the function knows about have
1646 If 'full' is set, make sure all caches the function knows about have
1615 up-to-date data. Even the ones usually loaded more lazily.
1647 up-to-date data. Even the ones usually loaded more lazily.
1616 """
1648 """
1617 if tr is not None and tr.hookargs.get('source') == 'strip':
1649 if tr is not None and tr.hookargs.get('source') == 'strip':
1618 # During strip, many caches are invalid but
1650 # During strip, many caches are invalid but
1619 # later call to `destroyed` will refresh them.
1651 # later call to `destroyed` will refresh them.
1620 return
1652 return
1621
1653
1622 if tr is None or tr.changes['origrepolen'] < len(self):
1654 if tr is None or tr.changes['origrepolen'] < len(self):
1623 # updating the unfiltered branchmap should refresh all the others,
1655 # updating the unfiltered branchmap should refresh all the others,
1624 self.ui.debug('updating the branch cache\n')
1656 self.ui.debug('updating the branch cache\n')
1625 branchmap.updatecache(self.filtered('served'))
1657 branchmap.updatecache(self.filtered('served'))
1626
1658
1627 if full:
1659 if full:
1628 rbc = self.revbranchcache()
1660 rbc = self.revbranchcache()
1629 for r in self.changelog:
1661 for r in self.changelog:
1630 rbc.branchinfo(r)
1662 rbc.branchinfo(r)
1631 rbc.write()
1663 rbc.write()
1632
1664
1633 # ensure the working copy parents are in the manifestfulltextcache
1665 # ensure the working copy parents are in the manifestfulltextcache
1634 for ctx in self['.'].parents():
1666 for ctx in self['.'].parents():
1635 ctx.manifest() # accessing the manifest is enough
1667 ctx.manifest() # accessing the manifest is enough
1636
1668
1637 def invalidatecaches(self):
1669 def invalidatecaches(self):
1638
1670
1639 if '_tagscache' in vars(self):
1671 if '_tagscache' in vars(self):
1640 # can't use delattr on proxy
1672 # can't use delattr on proxy
1641 del self.__dict__['_tagscache']
1673 del self.__dict__['_tagscache']
1642
1674
1643 self.unfiltered()._branchcaches.clear()
1675 self.unfiltered()._branchcaches.clear()
1644 self.invalidatevolatilesets()
1676 self.invalidatevolatilesets()
1645 self._sparsesignaturecache.clear()
1677 self._sparsesignaturecache.clear()
1646
1678
1647 def invalidatevolatilesets(self):
1679 def invalidatevolatilesets(self):
1648 self.filteredrevcache.clear()
1680 self.filteredrevcache.clear()
1649 obsolete.clearobscaches(self)
1681 obsolete.clearobscaches(self)
1650
1682
1651 def invalidatedirstate(self):
1683 def invalidatedirstate(self):
1652 '''Invalidates the dirstate, causing the next call to dirstate
1684 '''Invalidates the dirstate, causing the next call to dirstate
1653 to check if it was modified since the last time it was read,
1685 to check if it was modified since the last time it was read,
1654 rereading it if it has.
1686 rereading it if it has.
1655
1687
1656 This is different to dirstate.invalidate() that it doesn't always
1688 This is different to dirstate.invalidate() that it doesn't always
1657 rereads the dirstate. Use dirstate.invalidate() if you want to
1689 rereads the dirstate. Use dirstate.invalidate() if you want to
1658 explicitly read the dirstate again (i.e. restoring it to a previous
1690 explicitly read the dirstate again (i.e. restoring it to a previous
1659 known good state).'''
1691 known good state).'''
1660 if hasunfilteredcache(self, 'dirstate'):
1692 if hasunfilteredcache(self, 'dirstate'):
1661 for k in self.dirstate._filecache:
1693 for k in self.dirstate._filecache:
1662 try:
1694 try:
1663 delattr(self.dirstate, k)
1695 delattr(self.dirstate, k)
1664 except AttributeError:
1696 except AttributeError:
1665 pass
1697 pass
1666 delattr(self.unfiltered(), 'dirstate')
1698 delattr(self.unfiltered(), 'dirstate')
1667
1699
1668 def invalidate(self, clearfilecache=False):
1700 def invalidate(self, clearfilecache=False):
1669 '''Invalidates both store and non-store parts other than dirstate
1701 '''Invalidates both store and non-store parts other than dirstate
1670
1702
1671 If a transaction is running, invalidation of store is omitted,
1703 If a transaction is running, invalidation of store is omitted,
1672 because discarding in-memory changes might cause inconsistency
1704 because discarding in-memory changes might cause inconsistency
1673 (e.g. incomplete fncache causes unintentional failure, but
1705 (e.g. incomplete fncache causes unintentional failure, but
1674 redundant one doesn't).
1706 redundant one doesn't).
1675 '''
1707 '''
1676 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1708 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1677 for k in list(self._filecache.keys()):
1709 for k in list(self._filecache.keys()):
1678 # dirstate is invalidated separately in invalidatedirstate()
1710 # dirstate is invalidated separately in invalidatedirstate()
1679 if k == 'dirstate':
1711 if k == 'dirstate':
1680 continue
1712 continue
1681 if (k == 'changelog' and
1713 if (k == 'changelog' and
1682 self.currenttransaction() and
1714 self.currenttransaction() and
1683 self.changelog._delayed):
1715 self.changelog._delayed):
1684 # The changelog object may store unwritten revisions. We don't
1716 # The changelog object may store unwritten revisions. We don't
1685 # want to lose them.
1717 # want to lose them.
1686 # TODO: Solve the problem instead of working around it.
1718 # TODO: Solve the problem instead of working around it.
1687 continue
1719 continue
1688
1720
1689 if clearfilecache:
1721 if clearfilecache:
1690 del self._filecache[k]
1722 del self._filecache[k]
1691 try:
1723 try:
1692 delattr(unfiltered, k)
1724 delattr(unfiltered, k)
1693 except AttributeError:
1725 except AttributeError:
1694 pass
1726 pass
1695 self.invalidatecaches()
1727 self.invalidatecaches()
1696 if not self.currenttransaction():
1728 if not self.currenttransaction():
1697 # TODO: Changing contents of store outside transaction
1729 # TODO: Changing contents of store outside transaction
1698 # causes inconsistency. We should make in-memory store
1730 # causes inconsistency. We should make in-memory store
1699 # changes detectable, and abort if changed.
1731 # changes detectable, and abort if changed.
1700 self.store.invalidatecaches()
1732 self.store.invalidatecaches()
1701
1733
1702 def invalidateall(self):
1734 def invalidateall(self):
1703 '''Fully invalidates both store and non-store parts, causing the
1735 '''Fully invalidates both store and non-store parts, causing the
1704 subsequent operation to reread any outside changes.'''
1736 subsequent operation to reread any outside changes.'''
1705 # extension should hook this to invalidate its caches
1737 # extension should hook this to invalidate its caches
1706 self.invalidate()
1738 self.invalidate()
1707 self.invalidatedirstate()
1739 self.invalidatedirstate()
1708
1740
1709 @unfilteredmethod
1741 @unfilteredmethod
1710 def _refreshfilecachestats(self, tr):
1742 def _refreshfilecachestats(self, tr):
1711 """Reload stats of cached files so that they are flagged as valid"""
1743 """Reload stats of cached files so that they are flagged as valid"""
1712 for k, ce in self._filecache.items():
1744 for k, ce in self._filecache.items():
1713 k = pycompat.sysstr(k)
1745 k = pycompat.sysstr(k)
1714 if k == r'dirstate' or k not in self.__dict__:
1746 if k == r'dirstate' or k not in self.__dict__:
1715 continue
1747 continue
1716 ce.refresh()
1748 ce.refresh()
1717
1749
1718 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1750 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1719 inheritchecker=None, parentenvvar=None):
1751 inheritchecker=None, parentenvvar=None):
1720 parentlock = None
1752 parentlock = None
1721 # the contents of parentenvvar are used by the underlying lock to
1753 # the contents of parentenvvar are used by the underlying lock to
1722 # determine whether it can be inherited
1754 # determine whether it can be inherited
1723 if parentenvvar is not None:
1755 if parentenvvar is not None:
1724 parentlock = encoding.environ.get(parentenvvar)
1756 parentlock = encoding.environ.get(parentenvvar)
1725
1757
1726 timeout = 0
1758 timeout = 0
1727 warntimeout = 0
1759 warntimeout = 0
1728 if wait:
1760 if wait:
1729 timeout = self.ui.configint("ui", "timeout")
1761 timeout = self.ui.configint("ui", "timeout")
1730 warntimeout = self.ui.configint("ui", "timeout.warn")
1762 warntimeout = self.ui.configint("ui", "timeout.warn")
1731 # internal config: ui.signal-safe-lock
1763 # internal config: ui.signal-safe-lock
1732 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1764 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1733
1765
1734 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1766 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1735 releasefn=releasefn,
1767 releasefn=releasefn,
1736 acquirefn=acquirefn, desc=desc,
1768 acquirefn=acquirefn, desc=desc,
1737 inheritchecker=inheritchecker,
1769 inheritchecker=inheritchecker,
1738 parentlock=parentlock,
1770 parentlock=parentlock,
1739 signalsafe=signalsafe)
1771 signalsafe=signalsafe)
1740 return l
1772 return l
1741
1773
1742 def _afterlock(self, callback):
1774 def _afterlock(self, callback):
1743 """add a callback to be run when the repository is fully unlocked
1775 """add a callback to be run when the repository is fully unlocked
1744
1776
1745 The callback will be executed when the outermost lock is released
1777 The callback will be executed when the outermost lock is released
1746 (with wlock being higher level than 'lock')."""
1778 (with wlock being higher level than 'lock')."""
1747 for ref in (self._wlockref, self._lockref):
1779 for ref in (self._wlockref, self._lockref):
1748 l = ref and ref()
1780 l = ref and ref()
1749 if l and l.held:
1781 if l and l.held:
1750 l.postrelease.append(callback)
1782 l.postrelease.append(callback)
1751 break
1783 break
1752 else: # no lock have been found.
1784 else: # no lock have been found.
1753 callback()
1785 callback()
1754
1786
1755 def lock(self, wait=True):
1787 def lock(self, wait=True):
1756 '''Lock the repository store (.hg/store) and return a weak reference
1788 '''Lock the repository store (.hg/store) and return a weak reference
1757 to the lock. Use this before modifying the store (e.g. committing or
1789 to the lock. Use this before modifying the store (e.g. committing or
1758 stripping). If you are opening a transaction, get a lock as well.)
1790 stripping). If you are opening a transaction, get a lock as well.)
1759
1791
1760 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1792 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1761 'wlock' first to avoid a dead-lock hazard.'''
1793 'wlock' first to avoid a dead-lock hazard.'''
1762 l = self._currentlock(self._lockref)
1794 l = self._currentlock(self._lockref)
1763 if l is not None:
1795 if l is not None:
1764 l.lock()
1796 l.lock()
1765 return l
1797 return l
1766
1798
1767 l = self._lock(self.svfs, "lock", wait, None,
1799 l = self._lock(self.svfs, "lock", wait, None,
1768 self.invalidate, _('repository %s') % self.origroot)
1800 self.invalidate, _('repository %s') % self.origroot)
1769 self._lockref = weakref.ref(l)
1801 self._lockref = weakref.ref(l)
1770 return l
1802 return l
1771
1803
1772 def _wlockchecktransaction(self):
1804 def _wlockchecktransaction(self):
1773 if self.currenttransaction() is not None:
1805 if self.currenttransaction() is not None:
1774 raise error.LockInheritanceContractViolation(
1806 raise error.LockInheritanceContractViolation(
1775 'wlock cannot be inherited in the middle of a transaction')
1807 'wlock cannot be inherited in the middle of a transaction')
1776
1808
1777 def wlock(self, wait=True):
1809 def wlock(self, wait=True):
1778 '''Lock the non-store parts of the repository (everything under
1810 '''Lock the non-store parts of the repository (everything under
1779 .hg except .hg/store) and return a weak reference to the lock.
1811 .hg except .hg/store) and return a weak reference to the lock.
1780
1812
1781 Use this before modifying files in .hg.
1813 Use this before modifying files in .hg.
1782
1814
1783 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1815 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1784 'wlock' first to avoid a dead-lock hazard.'''
1816 'wlock' first to avoid a dead-lock hazard.'''
1785 l = self._wlockref and self._wlockref()
1817 l = self._wlockref and self._wlockref()
1786 if l is not None and l.held:
1818 if l is not None and l.held:
1787 l.lock()
1819 l.lock()
1788 return l
1820 return l
1789
1821
1790 # We do not need to check for non-waiting lock acquisition. Such
1822 # We do not need to check for non-waiting lock acquisition. Such
1791 # acquisition would not cause dead-lock as they would just fail.
1823 # acquisition would not cause dead-lock as they would just fail.
1792 if wait and (self.ui.configbool('devel', 'all-warnings')
1824 if wait and (self.ui.configbool('devel', 'all-warnings')
1793 or self.ui.configbool('devel', 'check-locks')):
1825 or self.ui.configbool('devel', 'check-locks')):
1794 if self._currentlock(self._lockref) is not None:
1826 if self._currentlock(self._lockref) is not None:
1795 self.ui.develwarn('"wlock" acquired after "lock"')
1827 self.ui.develwarn('"wlock" acquired after "lock"')
1796
1828
1797 def unlock():
1829 def unlock():
1798 if self.dirstate.pendingparentchange():
1830 if self.dirstate.pendingparentchange():
1799 self.dirstate.invalidate()
1831 self.dirstate.invalidate()
1800 else:
1832 else:
1801 self.dirstate.write(None)
1833 self.dirstate.write(None)
1802
1834
1803 self._filecache['dirstate'].refresh()
1835 self._filecache['dirstate'].refresh()
1804
1836
1805 l = self._lock(self.vfs, "wlock", wait, unlock,
1837 l = self._lock(self.vfs, "wlock", wait, unlock,
1806 self.invalidatedirstate, _('working directory of %s') %
1838 self.invalidatedirstate, _('working directory of %s') %
1807 self.origroot,
1839 self.origroot,
1808 inheritchecker=self._wlockchecktransaction,
1840 inheritchecker=self._wlockchecktransaction,
1809 parentenvvar='HG_WLOCK_LOCKER')
1841 parentenvvar='HG_WLOCK_LOCKER')
1810 self._wlockref = weakref.ref(l)
1842 self._wlockref = weakref.ref(l)
1811 return l
1843 return l
1812
1844
1813 def _currentlock(self, lockref):
1845 def _currentlock(self, lockref):
1814 """Returns the lock if it's held, or None if it's not."""
1846 """Returns the lock if it's held, or None if it's not."""
1815 if lockref is None:
1847 if lockref is None:
1816 return None
1848 return None
1817 l = lockref()
1849 l = lockref()
1818 if l is None or not l.held:
1850 if l is None or not l.held:
1819 return None
1851 return None
1820 return l
1852 return l
1821
1853
1822 def currentwlock(self):
1854 def currentwlock(self):
1823 """Returns the wlock if it's held, or None if it's not."""
1855 """Returns the wlock if it's held, or None if it's not."""
1824 return self._currentlock(self._wlockref)
1856 return self._currentlock(self._wlockref)
1825
1857
1826 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1858 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1827 """
1859 """
1828 commit an individual file as part of a larger transaction
1860 commit an individual file as part of a larger transaction
1829 """
1861 """
1830
1862
1831 fname = fctx.path()
1863 fname = fctx.path()
1832 fparent1 = manifest1.get(fname, nullid)
1864 fparent1 = manifest1.get(fname, nullid)
1833 fparent2 = manifest2.get(fname, nullid)
1865 fparent2 = manifest2.get(fname, nullid)
1834 if isinstance(fctx, context.filectx):
1866 if isinstance(fctx, context.filectx):
1835 node = fctx.filenode()
1867 node = fctx.filenode()
1836 if node in [fparent1, fparent2]:
1868 if node in [fparent1, fparent2]:
1837 self.ui.debug('reusing %s filelog entry\n' % fname)
1869 self.ui.debug('reusing %s filelog entry\n' % fname)
1838 if manifest1.flags(fname) != fctx.flags():
1870 if manifest1.flags(fname) != fctx.flags():
1839 changelist.append(fname)
1871 changelist.append(fname)
1840 return node
1872 return node
1841
1873
1842 flog = self.file(fname)
1874 flog = self.file(fname)
1843 meta = {}
1875 meta = {}
1844 copy = fctx.renamed()
1876 copy = fctx.renamed()
1845 if copy and copy[0] != fname:
1877 if copy and copy[0] != fname:
1846 # Mark the new revision of this file as a copy of another
1878 # Mark the new revision of this file as a copy of another
1847 # file. This copy data will effectively act as a parent
1879 # file. This copy data will effectively act as a parent
1848 # of this new revision. If this is a merge, the first
1880 # of this new revision. If this is a merge, the first
1849 # parent will be the nullid (meaning "look up the copy data")
1881 # parent will be the nullid (meaning "look up the copy data")
1850 # and the second one will be the other parent. For example:
1882 # and the second one will be the other parent. For example:
1851 #
1883 #
1852 # 0 --- 1 --- 3 rev1 changes file foo
1884 # 0 --- 1 --- 3 rev1 changes file foo
1853 # \ / rev2 renames foo to bar and changes it
1885 # \ / rev2 renames foo to bar and changes it
1854 # \- 2 -/ rev3 should have bar with all changes and
1886 # \- 2 -/ rev3 should have bar with all changes and
1855 # should record that bar descends from
1887 # should record that bar descends from
1856 # bar in rev2 and foo in rev1
1888 # bar in rev2 and foo in rev1
1857 #
1889 #
1858 # this allows this merge to succeed:
1890 # this allows this merge to succeed:
1859 #
1891 #
1860 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1892 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1861 # \ / merging rev3 and rev4 should use bar@rev2
1893 # \ / merging rev3 and rev4 should use bar@rev2
1862 # \- 2 --- 4 as the merge base
1894 # \- 2 --- 4 as the merge base
1863 #
1895 #
1864
1896
1865 cfname = copy[0]
1897 cfname = copy[0]
1866 crev = manifest1.get(cfname)
1898 crev = manifest1.get(cfname)
1867 newfparent = fparent2
1899 newfparent = fparent2
1868
1900
1869 if manifest2: # branch merge
1901 if manifest2: # branch merge
1870 if fparent2 == nullid or crev is None: # copied on remote side
1902 if fparent2 == nullid or crev is None: # copied on remote side
1871 if cfname in manifest2:
1903 if cfname in manifest2:
1872 crev = manifest2[cfname]
1904 crev = manifest2[cfname]
1873 newfparent = fparent1
1905 newfparent = fparent1
1874
1906
1875 # Here, we used to search backwards through history to try to find
1907 # Here, we used to search backwards through history to try to find
1876 # where the file copy came from if the source of a copy was not in
1908 # where the file copy came from if the source of a copy was not in
1877 # the parent directory. However, this doesn't actually make sense to
1909 # the parent directory. However, this doesn't actually make sense to
1878 # do (what does a copy from something not in your working copy even
1910 # do (what does a copy from something not in your working copy even
1879 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1911 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1880 # the user that copy information was dropped, so if they didn't
1912 # the user that copy information was dropped, so if they didn't
1881 # expect this outcome it can be fixed, but this is the correct
1913 # expect this outcome it can be fixed, but this is the correct
1882 # behavior in this circumstance.
1914 # behavior in this circumstance.
1883
1915
1884 if crev:
1916 if crev:
1885 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1917 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1886 meta["copy"] = cfname
1918 meta["copy"] = cfname
1887 meta["copyrev"] = hex(crev)
1919 meta["copyrev"] = hex(crev)
1888 fparent1, fparent2 = nullid, newfparent
1920 fparent1, fparent2 = nullid, newfparent
1889 else:
1921 else:
1890 self.ui.warn(_("warning: can't find ancestor for '%s' "
1922 self.ui.warn(_("warning: can't find ancestor for '%s' "
1891 "copied from '%s'!\n") % (fname, cfname))
1923 "copied from '%s'!\n") % (fname, cfname))
1892
1924
1893 elif fparent1 == nullid:
1925 elif fparent1 == nullid:
1894 fparent1, fparent2 = fparent2, nullid
1926 fparent1, fparent2 = fparent2, nullid
1895 elif fparent2 != nullid:
1927 elif fparent2 != nullid:
1896 # is one parent an ancestor of the other?
1928 # is one parent an ancestor of the other?
1897 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1929 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1898 if fparent1 in fparentancestors:
1930 if fparent1 in fparentancestors:
1899 fparent1, fparent2 = fparent2, nullid
1931 fparent1, fparent2 = fparent2, nullid
1900 elif fparent2 in fparentancestors:
1932 elif fparent2 in fparentancestors:
1901 fparent2 = nullid
1933 fparent2 = nullid
1902
1934
1903 # is the file changed?
1935 # is the file changed?
1904 text = fctx.data()
1936 text = fctx.data()
1905 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1937 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1906 changelist.append(fname)
1938 changelist.append(fname)
1907 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1939 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1908 # are just the flags changed during merge?
1940 # are just the flags changed during merge?
1909 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1941 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1910 changelist.append(fname)
1942 changelist.append(fname)
1911
1943
1912 return fparent1
1944 return fparent1
1913
1945
1914 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1946 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1915 """check for commit arguments that aren't committable"""
1947 """check for commit arguments that aren't committable"""
1916 if match.isexact() or match.prefix():
1948 if match.isexact() or match.prefix():
1917 matched = set(status.modified + status.added + status.removed)
1949 matched = set(status.modified + status.added + status.removed)
1918
1950
1919 for f in match.files():
1951 for f in match.files():
1920 f = self.dirstate.normalize(f)
1952 f = self.dirstate.normalize(f)
1921 if f == '.' or f in matched or f in wctx.substate:
1953 if f == '.' or f in matched or f in wctx.substate:
1922 continue
1954 continue
1923 if f in status.deleted:
1955 if f in status.deleted:
1924 fail(f, _('file not found!'))
1956 fail(f, _('file not found!'))
1925 if f in vdirs: # visited directory
1957 if f in vdirs: # visited directory
1926 d = f + '/'
1958 d = f + '/'
1927 for mf in matched:
1959 for mf in matched:
1928 if mf.startswith(d):
1960 if mf.startswith(d):
1929 break
1961 break
1930 else:
1962 else:
1931 fail(f, _("no match under directory!"))
1963 fail(f, _("no match under directory!"))
1932 elif f not in self.dirstate:
1964 elif f not in self.dirstate:
1933 fail(f, _("file not tracked!"))
1965 fail(f, _("file not tracked!"))
1934
1966
1935 @unfilteredmethod
1967 @unfilteredmethod
1936 def commit(self, text="", user=None, date=None, match=None, force=False,
1968 def commit(self, text="", user=None, date=None, match=None, force=False,
1937 editor=False, extra=None):
1969 editor=False, extra=None):
1938 """Add a new revision to current repository.
1970 """Add a new revision to current repository.
1939
1971
1940 Revision information is gathered from the working directory,
1972 Revision information is gathered from the working directory,
1941 match can be used to filter the committed files. If editor is
1973 match can be used to filter the committed files. If editor is
1942 supplied, it is called to get a commit message.
1974 supplied, it is called to get a commit message.
1943 """
1975 """
1944 if extra is None:
1976 if extra is None:
1945 extra = {}
1977 extra = {}
1946
1978
1947 def fail(f, msg):
1979 def fail(f, msg):
1948 raise error.Abort('%s: %s' % (f, msg))
1980 raise error.Abort('%s: %s' % (f, msg))
1949
1981
1950 if not match:
1982 if not match:
1951 match = matchmod.always(self.root, '')
1983 match = matchmod.always(self.root, '')
1952
1984
1953 if not force:
1985 if not force:
1954 vdirs = []
1986 vdirs = []
1955 match.explicitdir = vdirs.append
1987 match.explicitdir = vdirs.append
1956 match.bad = fail
1988 match.bad = fail
1957
1989
1958 wlock = lock = tr = None
1990 wlock = lock = tr = None
1959 try:
1991 try:
1960 wlock = self.wlock()
1992 wlock = self.wlock()
1961 lock = self.lock() # for recent changelog (see issue4368)
1993 lock = self.lock() # for recent changelog (see issue4368)
1962
1994
1963 wctx = self[None]
1995 wctx = self[None]
1964 merge = len(wctx.parents()) > 1
1996 merge = len(wctx.parents()) > 1
1965
1997
1966 if not force and merge and not match.always():
1998 if not force and merge and not match.always():
1967 raise error.Abort(_('cannot partially commit a merge '
1999 raise error.Abort(_('cannot partially commit a merge '
1968 '(do not specify files or patterns)'))
2000 '(do not specify files or patterns)'))
1969
2001
1970 status = self.status(match=match, clean=force)
2002 status = self.status(match=match, clean=force)
1971 if force:
2003 if force:
1972 status.modified.extend(status.clean) # mq may commit clean files
2004 status.modified.extend(status.clean) # mq may commit clean files
1973
2005
1974 # check subrepos
2006 # check subrepos
1975 subs, commitsubs, newstate = subrepoutil.precommit(
2007 subs, commitsubs, newstate = subrepoutil.precommit(
1976 self.ui, wctx, status, match, force=force)
2008 self.ui, wctx, status, match, force=force)
1977
2009
1978 # make sure all explicit patterns are matched
2010 # make sure all explicit patterns are matched
1979 if not force:
2011 if not force:
1980 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2012 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1981
2013
1982 cctx = context.workingcommitctx(self, status,
2014 cctx = context.workingcommitctx(self, status,
1983 text, user, date, extra)
2015 text, user, date, extra)
1984
2016
1985 # internal config: ui.allowemptycommit
2017 # internal config: ui.allowemptycommit
1986 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2018 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1987 or extra.get('close') or merge or cctx.files()
2019 or extra.get('close') or merge or cctx.files()
1988 or self.ui.configbool('ui', 'allowemptycommit'))
2020 or self.ui.configbool('ui', 'allowemptycommit'))
1989 if not allowemptycommit:
2021 if not allowemptycommit:
1990 return None
2022 return None
1991
2023
1992 if merge and cctx.deleted():
2024 if merge and cctx.deleted():
1993 raise error.Abort(_("cannot commit merge with missing files"))
2025 raise error.Abort(_("cannot commit merge with missing files"))
1994
2026
1995 ms = mergemod.mergestate.read(self)
2027 ms = mergemod.mergestate.read(self)
1996 mergeutil.checkunresolved(ms)
2028 mergeutil.checkunresolved(ms)
1997
2029
1998 if editor:
2030 if editor:
1999 cctx._text = editor(self, cctx, subs)
2031 cctx._text = editor(self, cctx, subs)
2000 edited = (text != cctx._text)
2032 edited = (text != cctx._text)
2001
2033
2002 # Save commit message in case this transaction gets rolled back
2034 # Save commit message in case this transaction gets rolled back
2003 # (e.g. by a pretxncommit hook). Leave the content alone on
2035 # (e.g. by a pretxncommit hook). Leave the content alone on
2004 # the assumption that the user will use the same editor again.
2036 # the assumption that the user will use the same editor again.
2005 msgfn = self.savecommitmessage(cctx._text)
2037 msgfn = self.savecommitmessage(cctx._text)
2006
2038
2007 # commit subs and write new state
2039 # commit subs and write new state
2008 if subs:
2040 if subs:
2009 for s in sorted(commitsubs):
2041 for s in sorted(commitsubs):
2010 sub = wctx.sub(s)
2042 sub = wctx.sub(s)
2011 self.ui.status(_('committing subrepository %s\n') %
2043 self.ui.status(_('committing subrepository %s\n') %
2012 subrepoutil.subrelpath(sub))
2044 subrepoutil.subrelpath(sub))
2013 sr = sub.commit(cctx._text, user, date)
2045 sr = sub.commit(cctx._text, user, date)
2014 newstate[s] = (newstate[s][0], sr)
2046 newstate[s] = (newstate[s][0], sr)
2015 subrepoutil.writestate(self, newstate)
2047 subrepoutil.writestate(self, newstate)
2016
2048
2017 p1, p2 = self.dirstate.parents()
2049 p1, p2 = self.dirstate.parents()
2018 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2050 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2019 try:
2051 try:
2020 self.hook("precommit", throw=True, parent1=hookp1,
2052 self.hook("precommit", throw=True, parent1=hookp1,
2021 parent2=hookp2)
2053 parent2=hookp2)
2022 tr = self.transaction('commit')
2054 tr = self.transaction('commit')
2023 ret = self.commitctx(cctx, True)
2055 ret = self.commitctx(cctx, True)
2024 except: # re-raises
2056 except: # re-raises
2025 if edited:
2057 if edited:
2026 self.ui.write(
2058 self.ui.write(
2027 _('note: commit message saved in %s\n') % msgfn)
2059 _('note: commit message saved in %s\n') % msgfn)
2028 raise
2060 raise
2029 # update bookmarks, dirstate and mergestate
2061 # update bookmarks, dirstate and mergestate
2030 bookmarks.update(self, [p1, p2], ret)
2062 bookmarks.update(self, [p1, p2], ret)
2031 cctx.markcommitted(ret)
2063 cctx.markcommitted(ret)
2032 ms.reset()
2064 ms.reset()
2033 tr.close()
2065 tr.close()
2034
2066
2035 finally:
2067 finally:
2036 lockmod.release(tr, lock, wlock)
2068 lockmod.release(tr, lock, wlock)
2037
2069
2038 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2070 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2039 # hack for command that use a temporary commit (eg: histedit)
2071 # hack for command that use a temporary commit (eg: histedit)
2040 # temporary commit got stripped before hook release
2072 # temporary commit got stripped before hook release
2041 if self.changelog.hasnode(ret):
2073 if self.changelog.hasnode(ret):
2042 self.hook("commit", node=node, parent1=parent1,
2074 self.hook("commit", node=node, parent1=parent1,
2043 parent2=parent2)
2075 parent2=parent2)
2044 self._afterlock(commithook)
2076 self._afterlock(commithook)
2045 return ret
2077 return ret
2046
2078
2047 @unfilteredmethod
2079 @unfilteredmethod
2048 def commitctx(self, ctx, error=False):
2080 def commitctx(self, ctx, error=False):
2049 """Add a new revision to current repository.
2081 """Add a new revision to current repository.
2050 Revision information is passed via the context argument.
2082 Revision information is passed via the context argument.
2051
2083
2052 ctx.files() should list all files involved in this commit, i.e.
2084 ctx.files() should list all files involved in this commit, i.e.
2053 modified/added/removed files. On merge, it may be wider than the
2085 modified/added/removed files. On merge, it may be wider than the
2054 ctx.files() to be committed, since any file nodes derived directly
2086 ctx.files() to be committed, since any file nodes derived directly
2055 from p1 or p2 are excluded from the committed ctx.files().
2087 from p1 or p2 are excluded from the committed ctx.files().
2056 """
2088 """
2057
2089
2058 tr = None
2090 tr = None
2059 p1, p2 = ctx.p1(), ctx.p2()
2091 p1, p2 = ctx.p1(), ctx.p2()
2060 user = ctx.user()
2092 user = ctx.user()
2061
2093
2062 lock = self.lock()
2094 lock = self.lock()
2063 try:
2095 try:
2064 tr = self.transaction("commit")
2096 tr = self.transaction("commit")
2065 trp = weakref.proxy(tr)
2097 trp = weakref.proxy(tr)
2066
2098
2067 if ctx.manifestnode():
2099 if ctx.manifestnode():
2068 # reuse an existing manifest revision
2100 # reuse an existing manifest revision
2069 self.ui.debug('reusing known manifest\n')
2101 self.ui.debug('reusing known manifest\n')
2070 mn = ctx.manifestnode()
2102 mn = ctx.manifestnode()
2071 files = ctx.files()
2103 files = ctx.files()
2072 elif ctx.files():
2104 elif ctx.files():
2073 m1ctx = p1.manifestctx()
2105 m1ctx = p1.manifestctx()
2074 m2ctx = p2.manifestctx()
2106 m2ctx = p2.manifestctx()
2075 mctx = m1ctx.copy()
2107 mctx = m1ctx.copy()
2076
2108
2077 m = mctx.read()
2109 m = mctx.read()
2078 m1 = m1ctx.read()
2110 m1 = m1ctx.read()
2079 m2 = m2ctx.read()
2111 m2 = m2ctx.read()
2080
2112
2081 # check in files
2113 # check in files
2082 added = []
2114 added = []
2083 changed = []
2115 changed = []
2084 removed = list(ctx.removed())
2116 removed = list(ctx.removed())
2085 linkrev = len(self)
2117 linkrev = len(self)
2086 self.ui.note(_("committing files:\n"))
2118 self.ui.note(_("committing files:\n"))
2087 for f in sorted(ctx.modified() + ctx.added()):
2119 for f in sorted(ctx.modified() + ctx.added()):
2088 self.ui.note(f + "\n")
2120 self.ui.note(f + "\n")
2089 try:
2121 try:
2090 fctx = ctx[f]
2122 fctx = ctx[f]
2091 if fctx is None:
2123 if fctx is None:
2092 removed.append(f)
2124 removed.append(f)
2093 else:
2125 else:
2094 added.append(f)
2126 added.append(f)
2095 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2127 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2096 trp, changed)
2128 trp, changed)
2097 m.setflag(f, fctx.flags())
2129 m.setflag(f, fctx.flags())
2098 except OSError as inst:
2130 except OSError as inst:
2099 self.ui.warn(_("trouble committing %s!\n") % f)
2131 self.ui.warn(_("trouble committing %s!\n") % f)
2100 raise
2132 raise
2101 except IOError as inst:
2133 except IOError as inst:
2102 errcode = getattr(inst, 'errno', errno.ENOENT)
2134 errcode = getattr(inst, 'errno', errno.ENOENT)
2103 if error or errcode and errcode != errno.ENOENT:
2135 if error or errcode and errcode != errno.ENOENT:
2104 self.ui.warn(_("trouble committing %s!\n") % f)
2136 self.ui.warn(_("trouble committing %s!\n") % f)
2105 raise
2137 raise
2106
2138
2107 # update manifest
2139 # update manifest
2108 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2140 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2109 drop = [f for f in removed if f in m]
2141 drop = [f for f in removed if f in m]
2110 for f in drop:
2142 for f in drop:
2111 del m[f]
2143 del m[f]
2112 files = changed + removed
2144 files = changed + removed
2113 md = None
2145 md = None
2114 if not files:
2146 if not files:
2115 # if no "files" actually changed in terms of the changelog,
2147 # if no "files" actually changed in terms of the changelog,
2116 # try hard to detect unmodified manifest entry so that the
2148 # try hard to detect unmodified manifest entry so that the
2117 # exact same commit can be reproduced later on convert.
2149 # exact same commit can be reproduced later on convert.
2118 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2150 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2119 if not files and md:
2151 if not files and md:
2120 self.ui.debug('not reusing manifest (no file change in '
2152 self.ui.debug('not reusing manifest (no file change in '
2121 'changelog, but manifest differs)\n')
2153 'changelog, but manifest differs)\n')
2122 if files or md:
2154 if files or md:
2123 self.ui.note(_("committing manifest\n"))
2155 self.ui.note(_("committing manifest\n"))
2124 # we're using narrowmatch here since it's already applied at
2156 # we're using narrowmatch here since it's already applied at
2125 # other stages (such as dirstate.walk), so we're already
2157 # other stages (such as dirstate.walk), so we're already
2126 # ignoring things outside of narrowspec in most cases. The
2158 # ignoring things outside of narrowspec in most cases. The
2127 # one case where we might have files outside the narrowspec
2159 # one case where we might have files outside the narrowspec
2128 # at this point is merges, and we already error out in the
2160 # at this point is merges, and we already error out in the
2129 # case where the merge has files outside of the narrowspec,
2161 # case where the merge has files outside of the narrowspec,
2130 # so this is safe.
2162 # so this is safe.
2131 mn = mctx.write(trp, linkrev,
2163 mn = mctx.write(trp, linkrev,
2132 p1.manifestnode(), p2.manifestnode(),
2164 p1.manifestnode(), p2.manifestnode(),
2133 added, drop, match=self.narrowmatch())
2165 added, drop, match=self.narrowmatch())
2134 else:
2166 else:
2135 self.ui.debug('reusing manifest form p1 (listed files '
2167 self.ui.debug('reusing manifest form p1 (listed files '
2136 'actually unchanged)\n')
2168 'actually unchanged)\n')
2137 mn = p1.manifestnode()
2169 mn = p1.manifestnode()
2138 else:
2170 else:
2139 self.ui.debug('reusing manifest from p1 (no file change)\n')
2171 self.ui.debug('reusing manifest from p1 (no file change)\n')
2140 mn = p1.manifestnode()
2172 mn = p1.manifestnode()
2141 files = []
2173 files = []
2142
2174
2143 # update changelog
2175 # update changelog
2144 self.ui.note(_("committing changelog\n"))
2176 self.ui.note(_("committing changelog\n"))
2145 self.changelog.delayupdate(tr)
2177 self.changelog.delayupdate(tr)
2146 n = self.changelog.add(mn, files, ctx.description(),
2178 n = self.changelog.add(mn, files, ctx.description(),
2147 trp, p1.node(), p2.node(),
2179 trp, p1.node(), p2.node(),
2148 user, ctx.date(), ctx.extra().copy())
2180 user, ctx.date(), ctx.extra().copy())
2149 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2181 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2150 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2182 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2151 parent2=xp2)
2183 parent2=xp2)
2152 # set the new commit is proper phase
2184 # set the new commit is proper phase
2153 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2185 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2154 if targetphase:
2186 if targetphase:
2155 # retract boundary do not alter parent changeset.
2187 # retract boundary do not alter parent changeset.
2156 # if a parent have higher the resulting phase will
2188 # if a parent have higher the resulting phase will
2157 # be compliant anyway
2189 # be compliant anyway
2158 #
2190 #
2159 # if minimal phase was 0 we don't need to retract anything
2191 # if minimal phase was 0 we don't need to retract anything
2160 phases.registernew(self, tr, targetphase, [n])
2192 phases.registernew(self, tr, targetphase, [n])
2161 tr.close()
2193 tr.close()
2162 return n
2194 return n
2163 finally:
2195 finally:
2164 if tr:
2196 if tr:
2165 tr.release()
2197 tr.release()
2166 lock.release()
2198 lock.release()
2167
2199
2168 @unfilteredmethod
2200 @unfilteredmethod
2169 def destroying(self):
2201 def destroying(self):
2170 '''Inform the repository that nodes are about to be destroyed.
2202 '''Inform the repository that nodes are about to be destroyed.
2171 Intended for use by strip and rollback, so there's a common
2203 Intended for use by strip and rollback, so there's a common
2172 place for anything that has to be done before destroying history.
2204 place for anything that has to be done before destroying history.
2173
2205
2174 This is mostly useful for saving state that is in memory and waiting
2206 This is mostly useful for saving state that is in memory and waiting
2175 to be flushed when the current lock is released. Because a call to
2207 to be flushed when the current lock is released. Because a call to
2176 destroyed is imminent, the repo will be invalidated causing those
2208 destroyed is imminent, the repo will be invalidated causing those
2177 changes to stay in memory (waiting for the next unlock), or vanish
2209 changes to stay in memory (waiting for the next unlock), or vanish
2178 completely.
2210 completely.
2179 '''
2211 '''
2180 # When using the same lock to commit and strip, the phasecache is left
2212 # When using the same lock to commit and strip, the phasecache is left
2181 # dirty after committing. Then when we strip, the repo is invalidated,
2213 # dirty after committing. Then when we strip, the repo is invalidated,
2182 # causing those changes to disappear.
2214 # causing those changes to disappear.
2183 if '_phasecache' in vars(self):
2215 if '_phasecache' in vars(self):
2184 self._phasecache.write()
2216 self._phasecache.write()
2185
2217
2186 @unfilteredmethod
2218 @unfilteredmethod
2187 def destroyed(self):
2219 def destroyed(self):
2188 '''Inform the repository that nodes have been destroyed.
2220 '''Inform the repository that nodes have been destroyed.
2189 Intended for use by strip and rollback, so there's a common
2221 Intended for use by strip and rollback, so there's a common
2190 place for anything that has to be done after destroying history.
2222 place for anything that has to be done after destroying history.
2191 '''
2223 '''
2192 # When one tries to:
2224 # When one tries to:
2193 # 1) destroy nodes thus calling this method (e.g. strip)
2225 # 1) destroy nodes thus calling this method (e.g. strip)
2194 # 2) use phasecache somewhere (e.g. commit)
2226 # 2) use phasecache somewhere (e.g. commit)
2195 #
2227 #
2196 # then 2) will fail because the phasecache contains nodes that were
2228 # then 2) will fail because the phasecache contains nodes that were
2197 # removed. We can either remove phasecache from the filecache,
2229 # removed. We can either remove phasecache from the filecache,
2198 # causing it to reload next time it is accessed, or simply filter
2230 # causing it to reload next time it is accessed, or simply filter
2199 # the removed nodes now and write the updated cache.
2231 # the removed nodes now and write the updated cache.
2200 self._phasecache.filterunknown(self)
2232 self._phasecache.filterunknown(self)
2201 self._phasecache.write()
2233 self._phasecache.write()
2202
2234
2203 # refresh all repository caches
2235 # refresh all repository caches
2204 self.updatecaches()
2236 self.updatecaches()
2205
2237
2206 # Ensure the persistent tag cache is updated. Doing it now
2238 # Ensure the persistent tag cache is updated. Doing it now
2207 # means that the tag cache only has to worry about destroyed
2239 # means that the tag cache only has to worry about destroyed
2208 # heads immediately after a strip/rollback. That in turn
2240 # heads immediately after a strip/rollback. That in turn
2209 # guarantees that "cachetip == currenttip" (comparing both rev
2241 # guarantees that "cachetip == currenttip" (comparing both rev
2210 # and node) always means no nodes have been added or destroyed.
2242 # and node) always means no nodes have been added or destroyed.
2211
2243
2212 # XXX this is suboptimal when qrefresh'ing: we strip the current
2244 # XXX this is suboptimal when qrefresh'ing: we strip the current
2213 # head, refresh the tag cache, then immediately add a new head.
2245 # head, refresh the tag cache, then immediately add a new head.
2214 # But I think doing it this way is necessary for the "instant
2246 # But I think doing it this way is necessary for the "instant
2215 # tag cache retrieval" case to work.
2247 # tag cache retrieval" case to work.
2216 self.invalidate()
2248 self.invalidate()
2217
2249
2218 def status(self, node1='.', node2=None, match=None,
2250 def status(self, node1='.', node2=None, match=None,
2219 ignored=False, clean=False, unknown=False,
2251 ignored=False, clean=False, unknown=False,
2220 listsubrepos=False):
2252 listsubrepos=False):
2221 '''a convenience method that calls node1.status(node2)'''
2253 '''a convenience method that calls node1.status(node2)'''
2222 return self[node1].status(node2, match, ignored, clean, unknown,
2254 return self[node1].status(node2, match, ignored, clean, unknown,
2223 listsubrepos)
2255 listsubrepos)
2224
2256
2225 def addpostdsstatus(self, ps):
2257 def addpostdsstatus(self, ps):
2226 """Add a callback to run within the wlock, at the point at which status
2258 """Add a callback to run within the wlock, at the point at which status
2227 fixups happen.
2259 fixups happen.
2228
2260
2229 On status completion, callback(wctx, status) will be called with the
2261 On status completion, callback(wctx, status) will be called with the
2230 wlock held, unless the dirstate has changed from underneath or the wlock
2262 wlock held, unless the dirstate has changed from underneath or the wlock
2231 couldn't be grabbed.
2263 couldn't be grabbed.
2232
2264
2233 Callbacks should not capture and use a cached copy of the dirstate --
2265 Callbacks should not capture and use a cached copy of the dirstate --
2234 it might change in the meanwhile. Instead, they should access the
2266 it might change in the meanwhile. Instead, they should access the
2235 dirstate via wctx.repo().dirstate.
2267 dirstate via wctx.repo().dirstate.
2236
2268
2237 This list is emptied out after each status run -- extensions should
2269 This list is emptied out after each status run -- extensions should
2238 make sure it adds to this list each time dirstate.status is called.
2270 make sure it adds to this list each time dirstate.status is called.
2239 Extensions should also make sure they don't call this for statuses
2271 Extensions should also make sure they don't call this for statuses
2240 that don't involve the dirstate.
2272 that don't involve the dirstate.
2241 """
2273 """
2242
2274
2243 # The list is located here for uniqueness reasons -- it is actually
2275 # The list is located here for uniqueness reasons -- it is actually
2244 # managed by the workingctx, but that isn't unique per-repo.
2276 # managed by the workingctx, but that isn't unique per-repo.
2245 self._postdsstatus.append(ps)
2277 self._postdsstatus.append(ps)
2246
2278
2247 def postdsstatus(self):
2279 def postdsstatus(self):
2248 """Used by workingctx to get the list of post-dirstate-status hooks."""
2280 """Used by workingctx to get the list of post-dirstate-status hooks."""
2249 return self._postdsstatus
2281 return self._postdsstatus
2250
2282
2251 def clearpostdsstatus(self):
2283 def clearpostdsstatus(self):
2252 """Used by workingctx to clear post-dirstate-status hooks."""
2284 """Used by workingctx to clear post-dirstate-status hooks."""
2253 del self._postdsstatus[:]
2285 del self._postdsstatus[:]
2254
2286
2255 def heads(self, start=None):
2287 def heads(self, start=None):
2256 if start is None:
2288 if start is None:
2257 cl = self.changelog
2289 cl = self.changelog
2258 headrevs = reversed(cl.headrevs())
2290 headrevs = reversed(cl.headrevs())
2259 return [cl.node(rev) for rev in headrevs]
2291 return [cl.node(rev) for rev in headrevs]
2260
2292
2261 heads = self.changelog.heads(start)
2293 heads = self.changelog.heads(start)
2262 # sort the output in rev descending order
2294 # sort the output in rev descending order
2263 return sorted(heads, key=self.changelog.rev, reverse=True)
2295 return sorted(heads, key=self.changelog.rev, reverse=True)
2264
2296
2265 def branchheads(self, branch=None, start=None, closed=False):
2297 def branchheads(self, branch=None, start=None, closed=False):
2266 '''return a (possibly filtered) list of heads for the given branch
2298 '''return a (possibly filtered) list of heads for the given branch
2267
2299
2268 Heads are returned in topological order, from newest to oldest.
2300 Heads are returned in topological order, from newest to oldest.
2269 If branch is None, use the dirstate branch.
2301 If branch is None, use the dirstate branch.
2270 If start is not None, return only heads reachable from start.
2302 If start is not None, return only heads reachable from start.
2271 If closed is True, return heads that are marked as closed as well.
2303 If closed is True, return heads that are marked as closed as well.
2272 '''
2304 '''
2273 if branch is None:
2305 if branch is None:
2274 branch = self[None].branch()
2306 branch = self[None].branch()
2275 branches = self.branchmap()
2307 branches = self.branchmap()
2276 if branch not in branches:
2308 if branch not in branches:
2277 return []
2309 return []
2278 # the cache returns heads ordered lowest to highest
2310 # the cache returns heads ordered lowest to highest
2279 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2311 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2280 if start is not None:
2312 if start is not None:
2281 # filter out the heads that cannot be reached from startrev
2313 # filter out the heads that cannot be reached from startrev
2282 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2314 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2283 bheads = [h for h in bheads if h in fbheads]
2315 bheads = [h for h in bheads if h in fbheads]
2284 return bheads
2316 return bheads
2285
2317
2286 def branches(self, nodes):
2318 def branches(self, nodes):
2287 if not nodes:
2319 if not nodes:
2288 nodes = [self.changelog.tip()]
2320 nodes = [self.changelog.tip()]
2289 b = []
2321 b = []
2290 for n in nodes:
2322 for n in nodes:
2291 t = n
2323 t = n
2292 while True:
2324 while True:
2293 p = self.changelog.parents(n)
2325 p = self.changelog.parents(n)
2294 if p[1] != nullid or p[0] == nullid:
2326 if p[1] != nullid or p[0] == nullid:
2295 b.append((t, n, p[0], p[1]))
2327 b.append((t, n, p[0], p[1]))
2296 break
2328 break
2297 n = p[0]
2329 n = p[0]
2298 return b
2330 return b
2299
2331
2300 def between(self, pairs):
2332 def between(self, pairs):
2301 r = []
2333 r = []
2302
2334
2303 for top, bottom in pairs:
2335 for top, bottom in pairs:
2304 n, l, i = top, [], 0
2336 n, l, i = top, [], 0
2305 f = 1
2337 f = 1
2306
2338
2307 while n != bottom and n != nullid:
2339 while n != bottom and n != nullid:
2308 p = self.changelog.parents(n)[0]
2340 p = self.changelog.parents(n)[0]
2309 if i == f:
2341 if i == f:
2310 l.append(n)
2342 l.append(n)
2311 f = f * 2
2343 f = f * 2
2312 n = p
2344 n = p
2313 i += 1
2345 i += 1
2314
2346
2315 r.append(l)
2347 r.append(l)
2316
2348
2317 return r
2349 return r
2318
2350
2319 def checkpush(self, pushop):
2351 def checkpush(self, pushop):
2320 """Extensions can override this function if additional checks have
2352 """Extensions can override this function if additional checks have
2321 to be performed before pushing, or call it if they override push
2353 to be performed before pushing, or call it if they override push
2322 command.
2354 command.
2323 """
2355 """
2324
2356
2325 @unfilteredpropertycache
2357 @unfilteredpropertycache
2326 def prepushoutgoinghooks(self):
2358 def prepushoutgoinghooks(self):
2327 """Return util.hooks consists of a pushop with repo, remote, outgoing
2359 """Return util.hooks consists of a pushop with repo, remote, outgoing
2328 methods, which are called before pushing changesets.
2360 methods, which are called before pushing changesets.
2329 """
2361 """
2330 return util.hooks()
2362 return util.hooks()
2331
2363
2332 def pushkey(self, namespace, key, old, new):
2364 def pushkey(self, namespace, key, old, new):
2333 try:
2365 try:
2334 tr = self.currenttransaction()
2366 tr = self.currenttransaction()
2335 hookargs = {}
2367 hookargs = {}
2336 if tr is not None:
2368 if tr is not None:
2337 hookargs.update(tr.hookargs)
2369 hookargs.update(tr.hookargs)
2338 hookargs = pycompat.strkwargs(hookargs)
2370 hookargs = pycompat.strkwargs(hookargs)
2339 hookargs[r'namespace'] = namespace
2371 hookargs[r'namespace'] = namespace
2340 hookargs[r'key'] = key
2372 hookargs[r'key'] = key
2341 hookargs[r'old'] = old
2373 hookargs[r'old'] = old
2342 hookargs[r'new'] = new
2374 hookargs[r'new'] = new
2343 self.hook('prepushkey', throw=True, **hookargs)
2375 self.hook('prepushkey', throw=True, **hookargs)
2344 except error.HookAbort as exc:
2376 except error.HookAbort as exc:
2345 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2377 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2346 if exc.hint:
2378 if exc.hint:
2347 self.ui.write_err(_("(%s)\n") % exc.hint)
2379 self.ui.write_err(_("(%s)\n") % exc.hint)
2348 return False
2380 return False
2349 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2381 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2350 ret = pushkey.push(self, namespace, key, old, new)
2382 ret = pushkey.push(self, namespace, key, old, new)
2351 def runhook():
2383 def runhook():
2352 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2384 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2353 ret=ret)
2385 ret=ret)
2354 self._afterlock(runhook)
2386 self._afterlock(runhook)
2355 return ret
2387 return ret
2356
2388
2357 def listkeys(self, namespace):
2389 def listkeys(self, namespace):
2358 self.hook('prelistkeys', throw=True, namespace=namespace)
2390 self.hook('prelistkeys', throw=True, namespace=namespace)
2359 self.ui.debug('listing keys for "%s"\n' % namespace)
2391 self.ui.debug('listing keys for "%s"\n' % namespace)
2360 values = pushkey.list(self, namespace)
2392 values = pushkey.list(self, namespace)
2361 self.hook('listkeys', namespace=namespace, values=values)
2393 self.hook('listkeys', namespace=namespace, values=values)
2362 return values
2394 return values
2363
2395
2364 def debugwireargs(self, one, two, three=None, four=None, five=None):
2396 def debugwireargs(self, one, two, three=None, four=None, five=None):
2365 '''used to test argument passing over the wire'''
2397 '''used to test argument passing over the wire'''
2366 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2398 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2367 pycompat.bytestr(four),
2399 pycompat.bytestr(four),
2368 pycompat.bytestr(five))
2400 pycompat.bytestr(five))
2369
2401
2370 def savecommitmessage(self, text):
2402 def savecommitmessage(self, text):
2371 fp = self.vfs('last-message.txt', 'wb')
2403 fp = self.vfs('last-message.txt', 'wb')
2372 try:
2404 try:
2373 fp.write(text)
2405 fp.write(text)
2374 finally:
2406 finally:
2375 fp.close()
2407 fp.close()
2376 return self.pathto(fp.name[len(self.root) + 1:])
2408 return self.pathto(fp.name[len(self.root) + 1:])
2377
2409
2378 # used to avoid circular references so destructors work
2410 # used to avoid circular references so destructors work
2379 def aftertrans(files):
2411 def aftertrans(files):
2380 renamefiles = [tuple(t) for t in files]
2412 renamefiles = [tuple(t) for t in files]
2381 def a():
2413 def a():
2382 for vfs, src, dest in renamefiles:
2414 for vfs, src, dest in renamefiles:
2383 # if src and dest refer to a same file, vfs.rename is a no-op,
2415 # if src and dest refer to a same file, vfs.rename is a no-op,
2384 # leaving both src and dest on disk. delete dest to make sure
2416 # leaving both src and dest on disk. delete dest to make sure
2385 # the rename couldn't be such a no-op.
2417 # the rename couldn't be such a no-op.
2386 vfs.tryunlink(dest)
2418 vfs.tryunlink(dest)
2387 try:
2419 try:
2388 vfs.rename(src, dest)
2420 vfs.rename(src, dest)
2389 except OSError: # journal file does not yet exist
2421 except OSError: # journal file does not yet exist
2390 pass
2422 pass
2391 return a
2423 return a
2392
2424
2393 def undoname(fn):
2425 def undoname(fn):
2394 base, name = os.path.split(fn)
2426 base, name = os.path.split(fn)
2395 assert name.startswith('journal')
2427 assert name.startswith('journal')
2396 return os.path.join(base, name.replace('journal', 'undo', 1))
2428 return os.path.join(base, name.replace('journal', 'undo', 1))
2397
2429
2398 def instance(ui, path, create, intents=None, createopts=None):
2430 def instance(ui, path, create, intents=None, createopts=None):
2399 localpath = util.urllocalpath(path)
2431 localpath = util.urllocalpath(path)
2400 if create:
2432 if create:
2401 createrepository(ui, localpath, createopts=createopts)
2433 createrepository(ui, localpath, createopts=createopts)
2402
2434
2403 return makelocalrepository(ui, localpath, intents=intents)
2435 return makelocalrepository(ui, localpath, intents=intents)
2404
2436
2405 def islocal(path):
2437 def islocal(path):
2406 return True
2438 return True
2407
2439
2408 def newreporequirements(ui, createopts=None):
2440 def newreporequirements(ui, createopts=None):
2409 """Determine the set of requirements for a new local repository.
2441 """Determine the set of requirements for a new local repository.
2410
2442
2411 Extensions can wrap this function to specify custom requirements for
2443 Extensions can wrap this function to specify custom requirements for
2412 new repositories.
2444 new repositories.
2413 """
2445 """
2414 createopts = createopts or {}
2446 createopts = createopts or {}
2415
2447
2416 requirements = {'revlogv1'}
2448 requirements = {'revlogv1'}
2417 if ui.configbool('format', 'usestore'):
2449 if ui.configbool('format', 'usestore'):
2418 requirements.add('store')
2450 requirements.add('store')
2419 if ui.configbool('format', 'usefncache'):
2451 if ui.configbool('format', 'usefncache'):
2420 requirements.add('fncache')
2452 requirements.add('fncache')
2421 if ui.configbool('format', 'dotencode'):
2453 if ui.configbool('format', 'dotencode'):
2422 requirements.add('dotencode')
2454 requirements.add('dotencode')
2423
2455
2424 compengine = ui.config('experimental', 'format.compression')
2456 compengine = ui.config('experimental', 'format.compression')
2425 if compengine not in util.compengines:
2457 if compengine not in util.compengines:
2426 raise error.Abort(_('compression engine %s defined by '
2458 raise error.Abort(_('compression engine %s defined by '
2427 'experimental.format.compression not available') %
2459 'experimental.format.compression not available') %
2428 compengine,
2460 compengine,
2429 hint=_('run "hg debuginstall" to list available '
2461 hint=_('run "hg debuginstall" to list available '
2430 'compression engines'))
2462 'compression engines'))
2431
2463
2432 # zlib is the historical default and doesn't need an explicit requirement.
2464 # zlib is the historical default and doesn't need an explicit requirement.
2433 if compengine != 'zlib':
2465 if compengine != 'zlib':
2434 requirements.add('exp-compression-%s' % compengine)
2466 requirements.add('exp-compression-%s' % compengine)
2435
2467
2436 if scmutil.gdinitconfig(ui):
2468 if scmutil.gdinitconfig(ui):
2437 requirements.add('generaldelta')
2469 requirements.add('generaldelta')
2438 if ui.configbool('experimental', 'treemanifest'):
2470 if ui.configbool('experimental', 'treemanifest'):
2439 requirements.add('treemanifest')
2471 requirements.add('treemanifest')
2440 # experimental config: format.sparse-revlog
2472 # experimental config: format.sparse-revlog
2441 if ui.configbool('format', 'sparse-revlog'):
2473 if ui.configbool('format', 'sparse-revlog'):
2442 requirements.add(SPARSEREVLOG_REQUIREMENT)
2474 requirements.add(SPARSEREVLOG_REQUIREMENT)
2443
2475
2444 revlogv2 = ui.config('experimental', 'revlogv2')
2476 revlogv2 = ui.config('experimental', 'revlogv2')
2445 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2477 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2446 requirements.remove('revlogv1')
2478 requirements.remove('revlogv1')
2447 # generaldelta is implied by revlogv2.
2479 # generaldelta is implied by revlogv2.
2448 requirements.discard('generaldelta')
2480 requirements.discard('generaldelta')
2449 requirements.add(REVLOGV2_REQUIREMENT)
2481 requirements.add(REVLOGV2_REQUIREMENT)
2450 # experimental config: format.internal-phase
2482 # experimental config: format.internal-phase
2451 if ui.configbool('format', 'internal-phase'):
2483 if ui.configbool('format', 'internal-phase'):
2452 requirements.add('internal-phase')
2484 requirements.add('internal-phase')
2453
2485
2454 if createopts.get('narrowfiles'):
2486 if createopts.get('narrowfiles'):
2455 requirements.add(repository.NARROW_REQUIREMENT)
2487 requirements.add(repository.NARROW_REQUIREMENT)
2456
2488
2457 return requirements
2489 return requirements
2458
2490
2459 def filterknowncreateopts(ui, createopts):
2491 def filterknowncreateopts(ui, createopts):
2460 """Filters a dict of repo creation options against options that are known.
2492 """Filters a dict of repo creation options against options that are known.
2461
2493
2462 Receives a dict of repo creation options and returns a dict of those
2494 Receives a dict of repo creation options and returns a dict of those
2463 options that we don't know how to handle.
2495 options that we don't know how to handle.
2464
2496
2465 This function is called as part of repository creation. If the
2497 This function is called as part of repository creation. If the
2466 returned dict contains any items, repository creation will not
2498 returned dict contains any items, repository creation will not
2467 be allowed, as it means there was a request to create a repository
2499 be allowed, as it means there was a request to create a repository
2468 with options not recognized by loaded code.
2500 with options not recognized by loaded code.
2469
2501
2470 Extensions can wrap this function to filter out creation options
2502 Extensions can wrap this function to filter out creation options
2471 they know how to handle.
2503 they know how to handle.
2472 """
2504 """
2473 known = {'narrowfiles'}
2505 known = {'narrowfiles'}
2474
2506
2475 return {k: v for k, v in createopts.items() if k not in known}
2507 return {k: v for k, v in createopts.items() if k not in known}
2476
2508
2477 def createrepository(ui, path, createopts=None):
2509 def createrepository(ui, path, createopts=None):
2478 """Create a new repository in a vfs.
2510 """Create a new repository in a vfs.
2479
2511
2480 ``path`` path to the new repo's working directory.
2512 ``path`` path to the new repo's working directory.
2481 ``createopts`` options for the new repository.
2513 ``createopts`` options for the new repository.
2482 """
2514 """
2483 createopts = createopts or {}
2515 createopts = createopts or {}
2484
2516
2485 unknownopts = filterknowncreateopts(ui, createopts)
2517 unknownopts = filterknowncreateopts(ui, createopts)
2486
2518
2487 if not isinstance(unknownopts, dict):
2519 if not isinstance(unknownopts, dict):
2488 raise error.ProgrammingError('filterknowncreateopts() did not return '
2520 raise error.ProgrammingError('filterknowncreateopts() did not return '
2489 'a dict')
2521 'a dict')
2490
2522
2491 if unknownopts:
2523 if unknownopts:
2492 raise error.Abort(_('unable to create repository because of unknown '
2524 raise error.Abort(_('unable to create repository because of unknown '
2493 'creation option: %s') %
2525 'creation option: %s') %
2494 ', '.sorted(unknownopts),
2526 ', '.sorted(unknownopts),
2495 hint=_('is a required extension not loaded?'))
2527 hint=_('is a required extension not loaded?'))
2496
2528
2497 requirements = newreporequirements(ui, createopts=createopts)
2529 requirements = newreporequirements(ui, createopts=createopts)
2498
2530
2499 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2531 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2500 if not wdirvfs.exists():
2532 if not wdirvfs.exists():
2501 wdirvfs.makedirs()
2533 wdirvfs.makedirs()
2502
2534
2503 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2535 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2504 if hgvfs.exists():
2536 if hgvfs.exists():
2505 raise error.RepoError(_('repository %s already exists') % path)
2537 raise error.RepoError(_('repository %s already exists') % path)
2506
2538
2507 hgvfs.makedir(notindexed=True)
2539 hgvfs.makedir(notindexed=True)
2508
2540
2509 if b'store' in requirements:
2541 if b'store' in requirements:
2510 hgvfs.mkdir(b'store')
2542 hgvfs.mkdir(b'store')
2511
2543
2512 # We create an invalid changelog outside the store so very old
2544 # We create an invalid changelog outside the store so very old
2513 # Mercurial versions (which didn't know about the requirements
2545 # Mercurial versions (which didn't know about the requirements
2514 # file) encounter an error on reading the changelog. This
2546 # file) encounter an error on reading the changelog. This
2515 # effectively locks out old clients and prevents them from
2547 # effectively locks out old clients and prevents them from
2516 # mucking with a repo in an unknown format.
2548 # mucking with a repo in an unknown format.
2517 #
2549 #
2518 # The revlog header has version 2, which won't be recognized by
2550 # The revlog header has version 2, which won't be recognized by
2519 # such old clients.
2551 # such old clients.
2520 hgvfs.append(b'00changelog.i',
2552 hgvfs.append(b'00changelog.i',
2521 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2553 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2522 b'layout')
2554 b'layout')
2523
2555
2524 scmutil.writerequires(hgvfs, requirements)
2556 scmutil.writerequires(hgvfs, requirements)
2525
2557
2526 def poisonrepository(repo):
2558 def poisonrepository(repo):
2527 """Poison a repository instance so it can no longer be used."""
2559 """Poison a repository instance so it can no longer be used."""
2528 # Perform any cleanup on the instance.
2560 # Perform any cleanup on the instance.
2529 repo.close()
2561 repo.close()
2530
2562
2531 # Our strategy is to replace the type of the object with one that
2563 # Our strategy is to replace the type of the object with one that
2532 # has all attribute lookups result in error.
2564 # has all attribute lookups result in error.
2533 #
2565 #
2534 # But we have to allow the close() method because some constructors
2566 # But we have to allow the close() method because some constructors
2535 # of repos call close() on repo references.
2567 # of repos call close() on repo references.
2536 class poisonedrepository(object):
2568 class poisonedrepository(object):
2537 def __getattribute__(self, item):
2569 def __getattribute__(self, item):
2538 if item == r'close':
2570 if item == r'close':
2539 return object.__getattribute__(self, item)
2571 return object.__getattribute__(self, item)
2540
2572
2541 raise error.ProgrammingError('repo instances should not be used '
2573 raise error.ProgrammingError('repo instances should not be used '
2542 'after unshare')
2574 'after unshare')
2543
2575
2544 def close(self):
2576 def close(self):
2545 pass
2577 pass
2546
2578
2547 # We may have a repoview, which intercepts __setattr__. So be sure
2579 # We may have a repoview, which intercepts __setattr__. So be sure
2548 # we operate at the lowest level possible.
2580 # we operate at the lowest level possible.
2549 object.__setattr__(repo, r'__class__', poisonedrepository)
2581 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now