##// END OF EJS Templates
localrepo: do not cache auditor/nofsauditor which would make reference cycle...
Yuya Nishihara -
r39348:9198e41d default
parent child Browse files
Show More
@@ -1,2435 +1,2443
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 release = lockmod.release
73 release = lockmod.release
74 urlerr = util.urlerr
74 urlerr = util.urlerr
75 urlreq = util.urlreq
75 urlreq = util.urlreq
76
76
77 # set of (path, vfs-location) tuples. vfs-location is:
77 # set of (path, vfs-location) tuples. vfs-location is:
78 # - 'plain for vfs relative paths
78 # - 'plain for vfs relative paths
79 # - '' for svfs relative paths
79 # - '' for svfs relative paths
80 _cachedfiles = set()
80 _cachedfiles = set()
81
81
82 class _basefilecache(scmutil.filecache):
82 class _basefilecache(scmutil.filecache):
83 """All filecache usage on repo are done for logic that should be unfiltered
83 """All filecache usage on repo are done for logic that should be unfiltered
84 """
84 """
85 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
86 if repo is None:
86 if repo is None:
87 return self
87 return self
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
89 def __set__(self, repo, value):
89 def __set__(self, repo, value):
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
91 def __delete__(self, repo):
91 def __delete__(self, repo):
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
93
93
94 class repofilecache(_basefilecache):
94 class repofilecache(_basefilecache):
95 """filecache for files in .hg but outside of .hg/store"""
95 """filecache for files in .hg but outside of .hg/store"""
96 def __init__(self, *paths):
96 def __init__(self, *paths):
97 super(repofilecache, self).__init__(*paths)
97 super(repofilecache, self).__init__(*paths)
98 for path in paths:
98 for path in paths:
99 _cachedfiles.add((path, 'plain'))
99 _cachedfiles.add((path, 'plain'))
100
100
101 def join(self, obj, fname):
101 def join(self, obj, fname):
102 return obj.vfs.join(fname)
102 return obj.vfs.join(fname)
103
103
104 class storecache(_basefilecache):
104 class storecache(_basefilecache):
105 """filecache for files in the store"""
105 """filecache for files in the store"""
106 def __init__(self, *paths):
106 def __init__(self, *paths):
107 super(storecache, self).__init__(*paths)
107 super(storecache, self).__init__(*paths)
108 for path in paths:
108 for path in paths:
109 _cachedfiles.add((path, ''))
109 _cachedfiles.add((path, ''))
110
110
111 def join(self, obj, fname):
111 def join(self, obj, fname):
112 return obj.sjoin(fname)
112 return obj.sjoin(fname)
113
113
114 def isfilecached(repo, name):
114 def isfilecached(repo, name):
115 """check if a repo has already cached "name" filecache-ed property
115 """check if a repo has already cached "name" filecache-ed property
116
116
117 This returns (cachedobj-or-None, iscached) tuple.
117 This returns (cachedobj-or-None, iscached) tuple.
118 """
118 """
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
120 if not cacheentry:
120 if not cacheentry:
121 return None, False
121 return None, False
122 return cacheentry.obj, True
122 return cacheentry.obj, True
123
123
124 class unfilteredpropertycache(util.propertycache):
124 class unfilteredpropertycache(util.propertycache):
125 """propertycache that apply to unfiltered repo only"""
125 """propertycache that apply to unfiltered repo only"""
126
126
127 def __get__(self, repo, type=None):
127 def __get__(self, repo, type=None):
128 unfi = repo.unfiltered()
128 unfi = repo.unfiltered()
129 if unfi is repo:
129 if unfi is repo:
130 return super(unfilteredpropertycache, self).__get__(unfi)
130 return super(unfilteredpropertycache, self).__get__(unfi)
131 return getattr(unfi, self.name)
131 return getattr(unfi, self.name)
132
132
133 class filteredpropertycache(util.propertycache):
133 class filteredpropertycache(util.propertycache):
134 """propertycache that must take filtering in account"""
134 """propertycache that must take filtering in account"""
135
135
136 def cachevalue(self, obj, value):
136 def cachevalue(self, obj, value):
137 object.__setattr__(obj, self.name, value)
137 object.__setattr__(obj, self.name, value)
138
138
139
139
140 def hasunfilteredcache(repo, name):
140 def hasunfilteredcache(repo, name):
141 """check if a repo has an unfilteredpropertycache value for <name>"""
141 """check if a repo has an unfilteredpropertycache value for <name>"""
142 return name in vars(repo.unfiltered())
142 return name in vars(repo.unfiltered())
143
143
144 def unfilteredmethod(orig):
144 def unfilteredmethod(orig):
145 """decorate method that always need to be run on unfiltered version"""
145 """decorate method that always need to be run on unfiltered version"""
146 def wrapper(repo, *args, **kwargs):
146 def wrapper(repo, *args, **kwargs):
147 return orig(repo.unfiltered(), *args, **kwargs)
147 return orig(repo.unfiltered(), *args, **kwargs)
148 return wrapper
148 return wrapper
149
149
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
151 'unbundle'}
151 'unbundle'}
152 legacycaps = moderncaps.union({'changegroupsubset'})
152 legacycaps = moderncaps.union({'changegroupsubset'})
153
153
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
155 class localcommandexecutor(object):
155 class localcommandexecutor(object):
156 def __init__(self, peer):
156 def __init__(self, peer):
157 self._peer = peer
157 self._peer = peer
158 self._sent = False
158 self._sent = False
159 self._closed = False
159 self._closed = False
160
160
161 def __enter__(self):
161 def __enter__(self):
162 return self
162 return self
163
163
164 def __exit__(self, exctype, excvalue, exctb):
164 def __exit__(self, exctype, excvalue, exctb):
165 self.close()
165 self.close()
166
166
167 def callcommand(self, command, args):
167 def callcommand(self, command, args):
168 if self._sent:
168 if self._sent:
169 raise error.ProgrammingError('callcommand() cannot be used after '
169 raise error.ProgrammingError('callcommand() cannot be used after '
170 'sendcommands()')
170 'sendcommands()')
171
171
172 if self._closed:
172 if self._closed:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'close()')
174 'close()')
175
175
176 # We don't need to support anything fancy. Just call the named
176 # We don't need to support anything fancy. Just call the named
177 # method on the peer and return a resolved future.
177 # method on the peer and return a resolved future.
178 fn = getattr(self._peer, pycompat.sysstr(command))
178 fn = getattr(self._peer, pycompat.sysstr(command))
179
179
180 f = pycompat.futures.Future()
180 f = pycompat.futures.Future()
181
181
182 try:
182 try:
183 result = fn(**pycompat.strkwargs(args))
183 result = fn(**pycompat.strkwargs(args))
184 except Exception:
184 except Exception:
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
186 else:
186 else:
187 f.set_result(result)
187 f.set_result(result)
188
188
189 return f
189 return f
190
190
191 def sendcommands(self):
191 def sendcommands(self):
192 self._sent = True
192 self._sent = True
193
193
194 def close(self):
194 def close(self):
195 self._closed = True
195 self._closed = True
196
196
197 @interfaceutil.implementer(repository.ipeercommands)
197 @interfaceutil.implementer(repository.ipeercommands)
198 class localpeer(repository.peer):
198 class localpeer(repository.peer):
199 '''peer for a local repo; reflects only the most recent API'''
199 '''peer for a local repo; reflects only the most recent API'''
200
200
201 def __init__(self, repo, caps=None):
201 def __init__(self, repo, caps=None):
202 super(localpeer, self).__init__()
202 super(localpeer, self).__init__()
203
203
204 if caps is None:
204 if caps is None:
205 caps = moderncaps.copy()
205 caps = moderncaps.copy()
206 self._repo = repo.filtered('served')
206 self._repo = repo.filtered('served')
207 self.ui = repo.ui
207 self.ui = repo.ui
208 self._caps = repo._restrictcapabilities(caps)
208 self._caps = repo._restrictcapabilities(caps)
209
209
210 # Begin of _basepeer interface.
210 # Begin of _basepeer interface.
211
211
212 def url(self):
212 def url(self):
213 return self._repo.url()
213 return self._repo.url()
214
214
215 def local(self):
215 def local(self):
216 return self._repo
216 return self._repo
217
217
218 def peer(self):
218 def peer(self):
219 return self
219 return self
220
220
221 def canpush(self):
221 def canpush(self):
222 return True
222 return True
223
223
224 def close(self):
224 def close(self):
225 self._repo.close()
225 self._repo.close()
226
226
227 # End of _basepeer interface.
227 # End of _basepeer interface.
228
228
229 # Begin of _basewirecommands interface.
229 # Begin of _basewirecommands interface.
230
230
231 def branchmap(self):
231 def branchmap(self):
232 return self._repo.branchmap()
232 return self._repo.branchmap()
233
233
234 def capabilities(self):
234 def capabilities(self):
235 return self._caps
235 return self._caps
236
236
237 def clonebundles(self):
237 def clonebundles(self):
238 return self._repo.tryread('clonebundles.manifest')
238 return self._repo.tryread('clonebundles.manifest')
239
239
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 """Used to test argument passing over the wire"""
241 """Used to test argument passing over the wire"""
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
243 pycompat.bytestr(four),
243 pycompat.bytestr(four),
244 pycompat.bytestr(five))
244 pycompat.bytestr(five))
245
245
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
247 **kwargs):
247 **kwargs):
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
249 common=common, bundlecaps=bundlecaps,
249 common=common, bundlecaps=bundlecaps,
250 **kwargs)[1]
250 **kwargs)[1]
251 cb = util.chunkbuffer(chunks)
251 cb = util.chunkbuffer(chunks)
252
252
253 if exchange.bundle2requested(bundlecaps):
253 if exchange.bundle2requested(bundlecaps):
254 # When requesting a bundle2, getbundle returns a stream to make the
254 # When requesting a bundle2, getbundle returns a stream to make the
255 # wire level function happier. We need to build a proper object
255 # wire level function happier. We need to build a proper object
256 # from it in local peer.
256 # from it in local peer.
257 return bundle2.getunbundler(self.ui, cb)
257 return bundle2.getunbundler(self.ui, cb)
258 else:
258 else:
259 return changegroup.getunbundler('01', cb, None)
259 return changegroup.getunbundler('01', cb, None)
260
260
261 def heads(self):
261 def heads(self):
262 return self._repo.heads()
262 return self._repo.heads()
263
263
264 def known(self, nodes):
264 def known(self, nodes):
265 return self._repo.known(nodes)
265 return self._repo.known(nodes)
266
266
267 def listkeys(self, namespace):
267 def listkeys(self, namespace):
268 return self._repo.listkeys(namespace)
268 return self._repo.listkeys(namespace)
269
269
270 def lookup(self, key):
270 def lookup(self, key):
271 return self._repo.lookup(key)
271 return self._repo.lookup(key)
272
272
273 def pushkey(self, namespace, key, old, new):
273 def pushkey(self, namespace, key, old, new):
274 return self._repo.pushkey(namespace, key, old, new)
274 return self._repo.pushkey(namespace, key, old, new)
275
275
276 def stream_out(self):
276 def stream_out(self):
277 raise error.Abort(_('cannot perform stream clone against local '
277 raise error.Abort(_('cannot perform stream clone against local '
278 'peer'))
278 'peer'))
279
279
280 def unbundle(self, bundle, heads, url):
280 def unbundle(self, bundle, heads, url):
281 """apply a bundle on a repo
281 """apply a bundle on a repo
282
282
283 This function handles the repo locking itself."""
283 This function handles the repo locking itself."""
284 try:
284 try:
285 try:
285 try:
286 bundle = exchange.readbundle(self.ui, bundle, None)
286 bundle = exchange.readbundle(self.ui, bundle, None)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
288 if util.safehasattr(ret, 'getchunks'):
288 if util.safehasattr(ret, 'getchunks'):
289 # This is a bundle20 object, turn it into an unbundler.
289 # This is a bundle20 object, turn it into an unbundler.
290 # This little dance should be dropped eventually when the
290 # This little dance should be dropped eventually when the
291 # API is finally improved.
291 # API is finally improved.
292 stream = util.chunkbuffer(ret.getchunks())
292 stream = util.chunkbuffer(ret.getchunks())
293 ret = bundle2.getunbundler(self.ui, stream)
293 ret = bundle2.getunbundler(self.ui, stream)
294 return ret
294 return ret
295 except Exception as exc:
295 except Exception as exc:
296 # If the exception contains output salvaged from a bundle2
296 # If the exception contains output salvaged from a bundle2
297 # reply, we need to make sure it is printed before continuing
297 # reply, we need to make sure it is printed before continuing
298 # to fail. So we build a bundle2 with such output and consume
298 # to fail. So we build a bundle2 with such output and consume
299 # it directly.
299 # it directly.
300 #
300 #
301 # This is not very elegant but allows a "simple" solution for
301 # This is not very elegant but allows a "simple" solution for
302 # issue4594
302 # issue4594
303 output = getattr(exc, '_bundle2salvagedoutput', ())
303 output = getattr(exc, '_bundle2salvagedoutput', ())
304 if output:
304 if output:
305 bundler = bundle2.bundle20(self._repo.ui)
305 bundler = bundle2.bundle20(self._repo.ui)
306 for out in output:
306 for out in output:
307 bundler.addpart(out)
307 bundler.addpart(out)
308 stream = util.chunkbuffer(bundler.getchunks())
308 stream = util.chunkbuffer(bundler.getchunks())
309 b = bundle2.getunbundler(self.ui, stream)
309 b = bundle2.getunbundler(self.ui, stream)
310 bundle2.processbundle(self._repo, b)
310 bundle2.processbundle(self._repo, b)
311 raise
311 raise
312 except error.PushRaced as exc:
312 except error.PushRaced as exc:
313 raise error.ResponseError(_('push failed:'),
313 raise error.ResponseError(_('push failed:'),
314 stringutil.forcebytestr(exc))
314 stringutil.forcebytestr(exc))
315
315
316 # End of _basewirecommands interface.
316 # End of _basewirecommands interface.
317
317
318 # Begin of peer interface.
318 # Begin of peer interface.
319
319
320 def commandexecutor(self):
320 def commandexecutor(self):
321 return localcommandexecutor(self)
321 return localcommandexecutor(self)
322
322
323 # End of peer interface.
323 # End of peer interface.
324
324
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
326 class locallegacypeer(localpeer):
326 class locallegacypeer(localpeer):
327 '''peer extension which implements legacy methods too; used for tests with
327 '''peer extension which implements legacy methods too; used for tests with
328 restricted capabilities'''
328 restricted capabilities'''
329
329
330 def __init__(self, repo):
330 def __init__(self, repo):
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
332
332
333 # Begin of baselegacywirecommands interface.
333 # Begin of baselegacywirecommands interface.
334
334
335 def between(self, pairs):
335 def between(self, pairs):
336 return self._repo.between(pairs)
336 return self._repo.between(pairs)
337
337
338 def branches(self, nodes):
338 def branches(self, nodes):
339 return self._repo.branches(nodes)
339 return self._repo.branches(nodes)
340
340
341 def changegroup(self, nodes, source):
341 def changegroup(self, nodes, source):
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
343 missingheads=self._repo.heads())
343 missingheads=self._repo.heads())
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
345
345
346 def changegroupsubset(self, bases, heads, source):
346 def changegroupsubset(self, bases, heads, source):
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
348 missingheads=heads)
348 missingheads=heads)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350
350
351 # End of baselegacywirecommands interface.
351 # End of baselegacywirecommands interface.
352
352
353 # Increment the sub-version when the revlog v2 format changes to lock out old
353 # Increment the sub-version when the revlog v2 format changes to lock out old
354 # clients.
354 # clients.
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
356
356
357 # A repository with the sparserevlog feature will have delta chains that
357 # A repository with the sparserevlog feature will have delta chains that
358 # can spread over a larger span. Sparse reading cuts these large spans into
358 # can spread over a larger span. Sparse reading cuts these large spans into
359 # pieces, so that each piece isn't too big.
359 # pieces, so that each piece isn't too big.
360 # Without the sparserevlog capability, reading from the repository could use
360 # Without the sparserevlog capability, reading from the repository could use
361 # huge amounts of memory, because the whole span would be read at once,
361 # huge amounts of memory, because the whole span would be read at once,
362 # including all the intermediate revisions that aren't pertinent for the chain.
362 # including all the intermediate revisions that aren't pertinent for the chain.
363 # This is why once a repository has enabled sparse-read, it becomes required.
363 # This is why once a repository has enabled sparse-read, it becomes required.
364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
365
365
366 # Functions receiving (ui, features) that extensions can register to impact
366 # Functions receiving (ui, features) that extensions can register to impact
367 # the ability to load repositories with custom requirements. Only
367 # the ability to load repositories with custom requirements. Only
368 # functions defined in loaded extensions are called.
368 # functions defined in loaded extensions are called.
369 #
369 #
370 # The function receives a set of requirement strings that the repository
370 # The function receives a set of requirement strings that the repository
371 # is capable of opening. Functions will typically add elements to the
371 # is capable of opening. Functions will typically add elements to the
372 # set to reflect that the extension knows how to handle that requirements.
372 # set to reflect that the extension knows how to handle that requirements.
373 featuresetupfuncs = set()
373 featuresetupfuncs = set()
374
374
375 @interfaceutil.implementer(repository.completelocalrepository)
375 @interfaceutil.implementer(repository.completelocalrepository)
376 class localrepository(object):
376 class localrepository(object):
377
377
378 # obsolete experimental requirements:
378 # obsolete experimental requirements:
379 # - manifestv2: An experimental new manifest format that allowed
379 # - manifestv2: An experimental new manifest format that allowed
380 # for stem compression of long paths. Experiment ended up not
380 # for stem compression of long paths. Experiment ended up not
381 # being successful (repository sizes went up due to worse delta
381 # being successful (repository sizes went up due to worse delta
382 # chains), and the code was deleted in 4.6.
382 # chains), and the code was deleted in 4.6.
383 supportedformats = {
383 supportedformats = {
384 'revlogv1',
384 'revlogv1',
385 'generaldelta',
385 'generaldelta',
386 'treemanifest',
386 'treemanifest',
387 REVLOGV2_REQUIREMENT,
387 REVLOGV2_REQUIREMENT,
388 SPARSEREVLOG_REQUIREMENT,
388 SPARSEREVLOG_REQUIREMENT,
389 }
389 }
390 _basesupported = supportedformats | {
390 _basesupported = supportedformats | {
391 'store',
391 'store',
392 'fncache',
392 'fncache',
393 'shared',
393 'shared',
394 'relshared',
394 'relshared',
395 'dotencode',
395 'dotencode',
396 'exp-sparse',
396 'exp-sparse',
397 'internal-phase'
397 'internal-phase'
398 }
398 }
399 openerreqs = {
399 openerreqs = {
400 'revlogv1',
400 'revlogv1',
401 'generaldelta',
401 'generaldelta',
402 'treemanifest',
402 'treemanifest',
403 }
403 }
404
404
405 # list of prefix for file which can be written without 'wlock'
405 # list of prefix for file which can be written without 'wlock'
406 # Extensions should extend this list when needed
406 # Extensions should extend this list when needed
407 _wlockfreeprefix = {
407 _wlockfreeprefix = {
408 # We migh consider requiring 'wlock' for the next
408 # We migh consider requiring 'wlock' for the next
409 # two, but pretty much all the existing code assume
409 # two, but pretty much all the existing code assume
410 # wlock is not needed so we keep them excluded for
410 # wlock is not needed so we keep them excluded for
411 # now.
411 # now.
412 'hgrc',
412 'hgrc',
413 'requires',
413 'requires',
414 # XXX cache is a complicatged business someone
414 # XXX cache is a complicatged business someone
415 # should investigate this in depth at some point
415 # should investigate this in depth at some point
416 'cache/',
416 'cache/',
417 # XXX shouldn't be dirstate covered by the wlock?
417 # XXX shouldn't be dirstate covered by the wlock?
418 'dirstate',
418 'dirstate',
419 # XXX bisect was still a bit too messy at the time
419 # XXX bisect was still a bit too messy at the time
420 # this changeset was introduced. Someone should fix
420 # this changeset was introduced. Someone should fix
421 # the remainig bit and drop this line
421 # the remainig bit and drop this line
422 'bisect.state',
422 'bisect.state',
423 }
423 }
424
424
425 def __init__(self, baseui, path, create=False, intents=None):
425 def __init__(self, baseui, path, create=False, intents=None):
426 self.requirements = set()
426 self.requirements = set()
427 self.filtername = None
427 self.filtername = None
428 # wvfs: rooted at the repository root, used to access the working copy
428 # wvfs: rooted at the repository root, used to access the working copy
429 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
429 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
430 # vfs: rooted at .hg, used to access repo files outside of .hg/store
430 # vfs: rooted at .hg, used to access repo files outside of .hg/store
431 self.vfs = None
431 self.vfs = None
432 # svfs: usually rooted at .hg/store, used to access repository history
432 # svfs: usually rooted at .hg/store, used to access repository history
433 # If this is a shared repository, this vfs may point to another
433 # If this is a shared repository, this vfs may point to another
434 # repository's .hg/store directory.
434 # repository's .hg/store directory.
435 self.svfs = None
435 self.svfs = None
436 self.root = self.wvfs.base
436 self.root = self.wvfs.base
437 self.path = self.wvfs.join(".hg")
437 self.path = self.wvfs.join(".hg")
438 self.origroot = path
438 self.origroot = path
439 # This is only used by context.workingctx.match in order to
440 # detect files in subrepos.
441 self.auditor = pathutil.pathauditor(
442 self.root, callback=self._checknested)
443 # This is only used by context.basectx.match in order to detect
444 # files in subrepos.
445 self.nofsauditor = pathutil.pathauditor(
446 self.root, callback=self._checknested, realfs=False, cached=True)
447 self.baseui = baseui
439 self.baseui = baseui
448 self.ui = baseui.copy()
440 self.ui = baseui.copy()
449 self.ui.copy = baseui.copy # prevent copying repo configuration
441 self.ui.copy = baseui.copy # prevent copying repo configuration
450 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
442 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
451 if (self.ui.configbool('devel', 'all-warnings') or
443 if (self.ui.configbool('devel', 'all-warnings') or
452 self.ui.configbool('devel', 'check-locks')):
444 self.ui.configbool('devel', 'check-locks')):
453 self.vfs.audit = self._getvfsward(self.vfs.audit)
445 self.vfs.audit = self._getvfsward(self.vfs.audit)
454 # A list of callback to shape the phase if no data were found.
446 # A list of callback to shape the phase if no data were found.
455 # Callback are in the form: func(repo, roots) --> processed root.
447 # Callback are in the form: func(repo, roots) --> processed root.
456 # This list it to be filled by extension during repo setup
448 # This list it to be filled by extension during repo setup
457 self._phasedefaults = []
449 self._phasedefaults = []
458 try:
450 try:
459 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
451 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
460 self._loadextensions()
452 self._loadextensions()
461 except IOError:
453 except IOError:
462 pass
454 pass
463
455
464 if featuresetupfuncs:
456 if featuresetupfuncs:
465 self.supported = set(self._basesupported) # use private copy
457 self.supported = set(self._basesupported) # use private copy
466 extmods = set(m.__name__ for n, m
458 extmods = set(m.__name__ for n, m
467 in extensions.extensions(self.ui))
459 in extensions.extensions(self.ui))
468 for setupfunc in featuresetupfuncs:
460 for setupfunc in featuresetupfuncs:
469 if setupfunc.__module__ in extmods:
461 if setupfunc.__module__ in extmods:
470 setupfunc(self.ui, self.supported)
462 setupfunc(self.ui, self.supported)
471 else:
463 else:
472 self.supported = self._basesupported
464 self.supported = self._basesupported
473 color.setup(self.ui)
465 color.setup(self.ui)
474
466
475 # Add compression engines.
467 # Add compression engines.
476 for name in util.compengines:
468 for name in util.compengines:
477 engine = util.compengines[name]
469 engine = util.compengines[name]
478 if engine.revlogheader():
470 if engine.revlogheader():
479 self.supported.add('exp-compression-%s' % name)
471 self.supported.add('exp-compression-%s' % name)
480
472
481 if not self.vfs.isdir():
473 if not self.vfs.isdir():
482 if create:
474 if create:
483 self.requirements = newreporequirements(self)
475 self.requirements = newreporequirements(self)
484
476
485 if not self.wvfs.exists():
477 if not self.wvfs.exists():
486 self.wvfs.makedirs()
478 self.wvfs.makedirs()
487 self.vfs.makedir(notindexed=True)
479 self.vfs.makedir(notindexed=True)
488
480
489 if 'store' in self.requirements:
481 if 'store' in self.requirements:
490 self.vfs.mkdir("store")
482 self.vfs.mkdir("store")
491
483
492 # create an invalid changelog
484 # create an invalid changelog
493 self.vfs.append(
485 self.vfs.append(
494 "00changelog.i",
486 "00changelog.i",
495 '\0\0\0\2' # represents revlogv2
487 '\0\0\0\2' # represents revlogv2
496 ' dummy changelog to prevent using the old repo layout'
488 ' dummy changelog to prevent using the old repo layout'
497 )
489 )
498 else:
490 else:
499 try:
491 try:
500 self.vfs.stat()
492 self.vfs.stat()
501 except OSError as inst:
493 except OSError as inst:
502 if inst.errno != errno.ENOENT:
494 if inst.errno != errno.ENOENT:
503 raise
495 raise
504 raise error.RepoError(_("repository %s not found") % path)
496 raise error.RepoError(_("repository %s not found") % path)
505 elif create:
497 elif create:
506 raise error.RepoError(_("repository %s already exists") % path)
498 raise error.RepoError(_("repository %s already exists") % path)
507 else:
499 else:
508 try:
500 try:
509 self.requirements = scmutil.readrequires(
501 self.requirements = scmutil.readrequires(
510 self.vfs, self.supported)
502 self.vfs, self.supported)
511 except IOError as inst:
503 except IOError as inst:
512 if inst.errno != errno.ENOENT:
504 if inst.errno != errno.ENOENT:
513 raise
505 raise
514
506
515 cachepath = self.vfs.join('cache')
507 cachepath = self.vfs.join('cache')
516 self.sharedpath = self.path
508 self.sharedpath = self.path
517 try:
509 try:
518 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
510 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
519 if 'relshared' in self.requirements:
511 if 'relshared' in self.requirements:
520 sharedpath = self.vfs.join(sharedpath)
512 sharedpath = self.vfs.join(sharedpath)
521 vfs = vfsmod.vfs(sharedpath, realpath=True)
513 vfs = vfsmod.vfs(sharedpath, realpath=True)
522 cachepath = vfs.join('cache')
514 cachepath = vfs.join('cache')
523 s = vfs.base
515 s = vfs.base
524 if not vfs.exists():
516 if not vfs.exists():
525 raise error.RepoError(
517 raise error.RepoError(
526 _('.hg/sharedpath points to nonexistent directory %s') % s)
518 _('.hg/sharedpath points to nonexistent directory %s') % s)
527 self.sharedpath = s
519 self.sharedpath = s
528 except IOError as inst:
520 except IOError as inst:
529 if inst.errno != errno.ENOENT:
521 if inst.errno != errno.ENOENT:
530 raise
522 raise
531
523
532 if 'exp-sparse' in self.requirements and not sparse.enabled:
524 if 'exp-sparse' in self.requirements and not sparse.enabled:
533 raise error.RepoError(_('repository is using sparse feature but '
525 raise error.RepoError(_('repository is using sparse feature but '
534 'sparse is not enabled; enable the '
526 'sparse is not enabled; enable the '
535 '"sparse" extensions to access'))
527 '"sparse" extensions to access'))
536
528
537 self.store = store.store(
529 self.store = store.store(
538 self.requirements, self.sharedpath,
530 self.requirements, self.sharedpath,
539 lambda base: vfsmod.vfs(base, cacheaudited=True))
531 lambda base: vfsmod.vfs(base, cacheaudited=True))
540 self.spath = self.store.path
532 self.spath = self.store.path
541 self.svfs = self.store.vfs
533 self.svfs = self.store.vfs
542 self.sjoin = self.store.join
534 self.sjoin = self.store.join
543 self.vfs.createmode = self.store.createmode
535 self.vfs.createmode = self.store.createmode
544 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
536 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
545 self.cachevfs.createmode = self.store.createmode
537 self.cachevfs.createmode = self.store.createmode
546 if (self.ui.configbool('devel', 'all-warnings') or
538 if (self.ui.configbool('devel', 'all-warnings') or
547 self.ui.configbool('devel', 'check-locks')):
539 self.ui.configbool('devel', 'check-locks')):
548 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
540 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
549 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
541 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
550 else: # standard vfs
542 else: # standard vfs
551 self.svfs.audit = self._getsvfsward(self.svfs.audit)
543 self.svfs.audit = self._getsvfsward(self.svfs.audit)
552 self._applyopenerreqs()
544 self._applyopenerreqs()
553 if create:
545 if create:
554 self._writerequirements()
546 self._writerequirements()
555
547
556 self._dirstatevalidatewarned = False
548 self._dirstatevalidatewarned = False
557
549
558 self._branchcaches = {}
550 self._branchcaches = {}
559 self._revbranchcache = None
551 self._revbranchcache = None
560 self._filterpats = {}
552 self._filterpats = {}
561 self._datafilters = {}
553 self._datafilters = {}
562 self._transref = self._lockref = self._wlockref = None
554 self._transref = self._lockref = self._wlockref = None
563
555
564 # A cache for various files under .hg/ that tracks file changes,
556 # A cache for various files under .hg/ that tracks file changes,
565 # (used by the filecache decorator)
557 # (used by the filecache decorator)
566 #
558 #
567 # Maps a property name to its util.filecacheentry
559 # Maps a property name to its util.filecacheentry
568 self._filecache = {}
560 self._filecache = {}
569
561
570 # hold sets of revision to be filtered
562 # hold sets of revision to be filtered
571 # should be cleared when something might have changed the filter value:
563 # should be cleared when something might have changed the filter value:
572 # - new changesets,
564 # - new changesets,
573 # - phase change,
565 # - phase change,
574 # - new obsolescence marker,
566 # - new obsolescence marker,
575 # - working directory parent change,
567 # - working directory parent change,
576 # - bookmark changes
568 # - bookmark changes
577 self.filteredrevcache = {}
569 self.filteredrevcache = {}
578
570
579 # post-dirstate-status hooks
571 # post-dirstate-status hooks
580 self._postdsstatus = []
572 self._postdsstatus = []
581
573
582 # generic mapping between names and nodes
574 # generic mapping between names and nodes
583 self.names = namespaces.namespaces()
575 self.names = namespaces.namespaces()
584
576
585 # Key to signature value.
577 # Key to signature value.
586 self._sparsesignaturecache = {}
578 self._sparsesignaturecache = {}
587 # Signature to cached matcher instance.
579 # Signature to cached matcher instance.
588 self._sparsematchercache = {}
580 self._sparsematchercache = {}
589
581
590 def _getvfsward(self, origfunc):
582 def _getvfsward(self, origfunc):
591 """build a ward for self.vfs"""
583 """build a ward for self.vfs"""
592 rref = weakref.ref(self)
584 rref = weakref.ref(self)
593 def checkvfs(path, mode=None):
585 def checkvfs(path, mode=None):
594 ret = origfunc(path, mode=mode)
586 ret = origfunc(path, mode=mode)
595 repo = rref()
587 repo = rref()
596 if (repo is None
588 if (repo is None
597 or not util.safehasattr(repo, '_wlockref')
589 or not util.safehasattr(repo, '_wlockref')
598 or not util.safehasattr(repo, '_lockref')):
590 or not util.safehasattr(repo, '_lockref')):
599 return
591 return
600 if mode in (None, 'r', 'rb'):
592 if mode in (None, 'r', 'rb'):
601 return
593 return
602 if path.startswith(repo.path):
594 if path.startswith(repo.path):
603 # truncate name relative to the repository (.hg)
595 # truncate name relative to the repository (.hg)
604 path = path[len(repo.path) + 1:]
596 path = path[len(repo.path) + 1:]
605 if path.startswith('cache/'):
597 if path.startswith('cache/'):
606 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
598 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
607 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
599 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
608 if path.startswith('journal.'):
600 if path.startswith('journal.'):
609 # journal is covered by 'lock'
601 # journal is covered by 'lock'
610 if repo._currentlock(repo._lockref) is None:
602 if repo._currentlock(repo._lockref) is None:
611 repo.ui.develwarn('write with no lock: "%s"' % path,
603 repo.ui.develwarn('write with no lock: "%s"' % path,
612 stacklevel=2, config='check-locks')
604 stacklevel=2, config='check-locks')
613 elif repo._currentlock(repo._wlockref) is None:
605 elif repo._currentlock(repo._wlockref) is None:
614 # rest of vfs files are covered by 'wlock'
606 # rest of vfs files are covered by 'wlock'
615 #
607 #
616 # exclude special files
608 # exclude special files
617 for prefix in self._wlockfreeprefix:
609 for prefix in self._wlockfreeprefix:
618 if path.startswith(prefix):
610 if path.startswith(prefix):
619 return
611 return
620 repo.ui.develwarn('write with no wlock: "%s"' % path,
612 repo.ui.develwarn('write with no wlock: "%s"' % path,
621 stacklevel=2, config='check-locks')
613 stacklevel=2, config='check-locks')
622 return ret
614 return ret
623 return checkvfs
615 return checkvfs
624
616
625 def _getsvfsward(self, origfunc):
617 def _getsvfsward(self, origfunc):
626 """build a ward for self.svfs"""
618 """build a ward for self.svfs"""
627 rref = weakref.ref(self)
619 rref = weakref.ref(self)
628 def checksvfs(path, mode=None):
620 def checksvfs(path, mode=None):
629 ret = origfunc(path, mode=mode)
621 ret = origfunc(path, mode=mode)
630 repo = rref()
622 repo = rref()
631 if repo is None or not util.safehasattr(repo, '_lockref'):
623 if repo is None or not util.safehasattr(repo, '_lockref'):
632 return
624 return
633 if mode in (None, 'r', 'rb'):
625 if mode in (None, 'r', 'rb'):
634 return
626 return
635 if path.startswith(repo.sharedpath):
627 if path.startswith(repo.sharedpath):
636 # truncate name relative to the repository (.hg)
628 # truncate name relative to the repository (.hg)
637 path = path[len(repo.sharedpath) + 1:]
629 path = path[len(repo.sharedpath) + 1:]
638 if repo._currentlock(repo._lockref) is None:
630 if repo._currentlock(repo._lockref) is None:
639 repo.ui.develwarn('write with no lock: "%s"' % path,
631 repo.ui.develwarn('write with no lock: "%s"' % path,
640 stacklevel=3)
632 stacklevel=3)
641 return ret
633 return ret
642 return checksvfs
634 return checksvfs
643
635
644 def close(self):
636 def close(self):
645 self._writecaches()
637 self._writecaches()
646
638
647 def _loadextensions(self):
639 def _loadextensions(self):
648 extensions.loadall(self.ui)
640 extensions.loadall(self.ui)
649
641
650 def _writecaches(self):
642 def _writecaches(self):
651 if self._revbranchcache:
643 if self._revbranchcache:
652 self._revbranchcache.write()
644 self._revbranchcache.write()
653
645
654 def _restrictcapabilities(self, caps):
646 def _restrictcapabilities(self, caps):
655 if self.ui.configbool('experimental', 'bundle2-advertise'):
647 if self.ui.configbool('experimental', 'bundle2-advertise'):
656 caps = set(caps)
648 caps = set(caps)
657 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
649 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
658 role='client'))
650 role='client'))
659 caps.add('bundle2=' + urlreq.quote(capsblob))
651 caps.add('bundle2=' + urlreq.quote(capsblob))
660 return caps
652 return caps
661
653
662 def _applyopenerreqs(self):
654 def _applyopenerreqs(self):
663 self.svfs.options = dict((r, 1) for r in self.requirements
655 self.svfs.options = dict((r, 1) for r in self.requirements
664 if r in self.openerreqs)
656 if r in self.openerreqs)
665 # experimental config: format.chunkcachesize
657 # experimental config: format.chunkcachesize
666 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
658 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
667 if chunkcachesize is not None:
659 if chunkcachesize is not None:
668 self.svfs.options['chunkcachesize'] = chunkcachesize
660 self.svfs.options['chunkcachesize'] = chunkcachesize
669 # experimental config: format.maxchainlen
661 # experimental config: format.maxchainlen
670 maxchainlen = self.ui.configint('format', 'maxchainlen')
662 maxchainlen = self.ui.configint('format', 'maxchainlen')
671 if maxchainlen is not None:
663 if maxchainlen is not None:
672 self.svfs.options['maxchainlen'] = maxchainlen
664 self.svfs.options['maxchainlen'] = maxchainlen
673 # experimental config: format.manifestcachesize
665 # experimental config: format.manifestcachesize
674 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
666 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
675 if manifestcachesize is not None:
667 if manifestcachesize is not None:
676 self.svfs.options['manifestcachesize'] = manifestcachesize
668 self.svfs.options['manifestcachesize'] = manifestcachesize
677 deltabothparents = self.ui.configbool('storage',
669 deltabothparents = self.ui.configbool('storage',
678 'revlog.optimize-delta-parent-choice')
670 'revlog.optimize-delta-parent-choice')
679 self.svfs.options['deltabothparents'] = deltabothparents
671 self.svfs.options['deltabothparents'] = deltabothparents
680 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
672 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
681 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
673 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
682 if 0 <= chainspan:
674 if 0 <= chainspan:
683 self.svfs.options['maxdeltachainspan'] = chainspan
675 self.svfs.options['maxdeltachainspan'] = chainspan
684 mmapindexthreshold = self.ui.configbytes('experimental',
676 mmapindexthreshold = self.ui.configbytes('experimental',
685 'mmapindexthreshold')
677 'mmapindexthreshold')
686 if mmapindexthreshold is not None:
678 if mmapindexthreshold is not None:
687 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
679 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
688 withsparseread = self.ui.configbool('experimental', 'sparse-read')
680 withsparseread = self.ui.configbool('experimental', 'sparse-read')
689 srdensitythres = float(self.ui.config('experimental',
681 srdensitythres = float(self.ui.config('experimental',
690 'sparse-read.density-threshold'))
682 'sparse-read.density-threshold'))
691 srmingapsize = self.ui.configbytes('experimental',
683 srmingapsize = self.ui.configbytes('experimental',
692 'sparse-read.min-gap-size')
684 'sparse-read.min-gap-size')
693 self.svfs.options['with-sparse-read'] = withsparseread
685 self.svfs.options['with-sparse-read'] = withsparseread
694 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
686 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
695 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
687 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
696 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
688 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
697 self.svfs.options['sparse-revlog'] = sparserevlog
689 self.svfs.options['sparse-revlog'] = sparserevlog
698 if sparserevlog:
690 if sparserevlog:
699 self.svfs.options['generaldelta'] = True
691 self.svfs.options['generaldelta'] = True
700
692
701 for r in self.requirements:
693 for r in self.requirements:
702 if r.startswith('exp-compression-'):
694 if r.startswith('exp-compression-'):
703 self.svfs.options['compengine'] = r[len('exp-compression-'):]
695 self.svfs.options['compengine'] = r[len('exp-compression-'):]
704
696
705 # TODO move "revlogv2" to openerreqs once finalized.
697 # TODO move "revlogv2" to openerreqs once finalized.
706 if REVLOGV2_REQUIREMENT in self.requirements:
698 if REVLOGV2_REQUIREMENT in self.requirements:
707 self.svfs.options['revlogv2'] = True
699 self.svfs.options['revlogv2'] = True
708
700
709 def _writerequirements(self):
701 def _writerequirements(self):
710 scmutil.writerequires(self.vfs, self.requirements)
702 scmutil.writerequires(self.vfs, self.requirements)
711
703
704 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
705 # self -> auditor -> self._checknested -> self
706
707 @property
708 def auditor(self):
709 # This is only used by context.workingctx.match in order to
710 # detect files in subrepos.
711 return pathutil.pathauditor(self.root, callback=self._checknested)
712
713 @property
714 def nofsauditor(self):
715 # This is only used by context.basectx.match in order to detect
716 # files in subrepos.
717 return pathutil.pathauditor(self.root, callback=self._checknested,
718 realfs=False, cached=True)
719
712 def _checknested(self, path):
720 def _checknested(self, path):
713 """Determine if path is a legal nested repository."""
721 """Determine if path is a legal nested repository."""
714 if not path.startswith(self.root):
722 if not path.startswith(self.root):
715 return False
723 return False
716 subpath = path[len(self.root) + 1:]
724 subpath = path[len(self.root) + 1:]
717 normsubpath = util.pconvert(subpath)
725 normsubpath = util.pconvert(subpath)
718
726
719 # XXX: Checking against the current working copy is wrong in
727 # XXX: Checking against the current working copy is wrong in
720 # the sense that it can reject things like
728 # the sense that it can reject things like
721 #
729 #
722 # $ hg cat -r 10 sub/x.txt
730 # $ hg cat -r 10 sub/x.txt
723 #
731 #
724 # if sub/ is no longer a subrepository in the working copy
732 # if sub/ is no longer a subrepository in the working copy
725 # parent revision.
733 # parent revision.
726 #
734 #
727 # However, it can of course also allow things that would have
735 # However, it can of course also allow things that would have
728 # been rejected before, such as the above cat command if sub/
736 # been rejected before, such as the above cat command if sub/
729 # is a subrepository now, but was a normal directory before.
737 # is a subrepository now, but was a normal directory before.
730 # The old path auditor would have rejected by mistake since it
738 # The old path auditor would have rejected by mistake since it
731 # panics when it sees sub/.hg/.
739 # panics when it sees sub/.hg/.
732 #
740 #
733 # All in all, checking against the working copy seems sensible
741 # All in all, checking against the working copy seems sensible
734 # since we want to prevent access to nested repositories on
742 # since we want to prevent access to nested repositories on
735 # the filesystem *now*.
743 # the filesystem *now*.
736 ctx = self[None]
744 ctx = self[None]
737 parts = util.splitpath(subpath)
745 parts = util.splitpath(subpath)
738 while parts:
746 while parts:
739 prefix = '/'.join(parts)
747 prefix = '/'.join(parts)
740 if prefix in ctx.substate:
748 if prefix in ctx.substate:
741 if prefix == normsubpath:
749 if prefix == normsubpath:
742 return True
750 return True
743 else:
751 else:
744 sub = ctx.sub(prefix)
752 sub = ctx.sub(prefix)
745 return sub.checknested(subpath[len(prefix) + 1:])
753 return sub.checknested(subpath[len(prefix) + 1:])
746 else:
754 else:
747 parts.pop()
755 parts.pop()
748 return False
756 return False
749
757
750 def peer(self):
758 def peer(self):
751 return localpeer(self) # not cached to avoid reference cycle
759 return localpeer(self) # not cached to avoid reference cycle
752
760
753 def unfiltered(self):
761 def unfiltered(self):
754 """Return unfiltered version of the repository
762 """Return unfiltered version of the repository
755
763
756 Intended to be overwritten by filtered repo."""
764 Intended to be overwritten by filtered repo."""
757 return self
765 return self
758
766
759 def filtered(self, name, visibilityexceptions=None):
767 def filtered(self, name, visibilityexceptions=None):
760 """Return a filtered version of a repository"""
768 """Return a filtered version of a repository"""
761 cls = repoview.newtype(self.unfiltered().__class__)
769 cls = repoview.newtype(self.unfiltered().__class__)
762 return cls(self, name, visibilityexceptions)
770 return cls(self, name, visibilityexceptions)
763
771
764 @repofilecache('bookmarks', 'bookmarks.current')
772 @repofilecache('bookmarks', 'bookmarks.current')
765 def _bookmarks(self):
773 def _bookmarks(self):
766 return bookmarks.bmstore(self)
774 return bookmarks.bmstore(self)
767
775
768 @property
776 @property
769 def _activebookmark(self):
777 def _activebookmark(self):
770 return self._bookmarks.active
778 return self._bookmarks.active
771
779
772 # _phasesets depend on changelog. what we need is to call
780 # _phasesets depend on changelog. what we need is to call
773 # _phasecache.invalidate() if '00changelog.i' was changed, but it
781 # _phasecache.invalidate() if '00changelog.i' was changed, but it
774 # can't be easily expressed in filecache mechanism.
782 # can't be easily expressed in filecache mechanism.
775 @storecache('phaseroots', '00changelog.i')
783 @storecache('phaseroots', '00changelog.i')
776 def _phasecache(self):
784 def _phasecache(self):
777 return phases.phasecache(self, self._phasedefaults)
785 return phases.phasecache(self, self._phasedefaults)
778
786
779 @storecache('obsstore')
787 @storecache('obsstore')
780 def obsstore(self):
788 def obsstore(self):
781 return obsolete.makestore(self.ui, self)
789 return obsolete.makestore(self.ui, self)
782
790
783 @storecache('00changelog.i')
791 @storecache('00changelog.i')
784 def changelog(self):
792 def changelog(self):
785 return changelog.changelog(self.svfs,
793 return changelog.changelog(self.svfs,
786 trypending=txnutil.mayhavepending(self.root))
794 trypending=txnutil.mayhavepending(self.root))
787
795
788 def _constructmanifest(self):
796 def _constructmanifest(self):
789 # This is a temporary function while we migrate from manifest to
797 # This is a temporary function while we migrate from manifest to
790 # manifestlog. It allows bundlerepo and unionrepo to intercept the
798 # manifestlog. It allows bundlerepo and unionrepo to intercept the
791 # manifest creation.
799 # manifest creation.
792 return manifest.manifestrevlog(self.svfs)
800 return manifest.manifestrevlog(self.svfs)
793
801
794 @storecache('00manifest.i')
802 @storecache('00manifest.i')
795 def manifestlog(self):
803 def manifestlog(self):
796 return manifest.manifestlog(self.svfs, self)
804 return manifest.manifestlog(self.svfs, self)
797
805
798 @repofilecache('dirstate')
806 @repofilecache('dirstate')
799 def dirstate(self):
807 def dirstate(self):
800 return self._makedirstate()
808 return self._makedirstate()
801
809
802 def _makedirstate(self):
810 def _makedirstate(self):
803 """Extension point for wrapping the dirstate per-repo."""
811 """Extension point for wrapping the dirstate per-repo."""
804 sparsematchfn = lambda: sparse.matcher(self)
812 sparsematchfn = lambda: sparse.matcher(self)
805
813
806 return dirstate.dirstate(self.vfs, self.ui, self.root,
814 return dirstate.dirstate(self.vfs, self.ui, self.root,
807 self._dirstatevalidate, sparsematchfn)
815 self._dirstatevalidate, sparsematchfn)
808
816
809 def _dirstatevalidate(self, node):
817 def _dirstatevalidate(self, node):
810 try:
818 try:
811 self.changelog.rev(node)
819 self.changelog.rev(node)
812 return node
820 return node
813 except error.LookupError:
821 except error.LookupError:
814 if not self._dirstatevalidatewarned:
822 if not self._dirstatevalidatewarned:
815 self._dirstatevalidatewarned = True
823 self._dirstatevalidatewarned = True
816 self.ui.warn(_("warning: ignoring unknown"
824 self.ui.warn(_("warning: ignoring unknown"
817 " working parent %s!\n") % short(node))
825 " working parent %s!\n") % short(node))
818 return nullid
826 return nullid
819
827
820 @storecache(narrowspec.FILENAME)
828 @storecache(narrowspec.FILENAME)
821 def narrowpats(self):
829 def narrowpats(self):
822 """matcher patterns for this repository's narrowspec
830 """matcher patterns for this repository's narrowspec
823
831
824 A tuple of (includes, excludes).
832 A tuple of (includes, excludes).
825 """
833 """
826 source = self
834 source = self
827 if self.shared():
835 if self.shared():
828 from . import hg
836 from . import hg
829 source = hg.sharedreposource(self)
837 source = hg.sharedreposource(self)
830 return narrowspec.load(source)
838 return narrowspec.load(source)
831
839
832 @storecache(narrowspec.FILENAME)
840 @storecache(narrowspec.FILENAME)
833 def _narrowmatch(self):
841 def _narrowmatch(self):
834 if repository.NARROW_REQUIREMENT not in self.requirements:
842 if repository.NARROW_REQUIREMENT not in self.requirements:
835 return matchmod.always(self.root, '')
843 return matchmod.always(self.root, '')
836 include, exclude = self.narrowpats
844 include, exclude = self.narrowpats
837 return narrowspec.match(self.root, include=include, exclude=exclude)
845 return narrowspec.match(self.root, include=include, exclude=exclude)
838
846
839 # TODO(martinvonz): make this property-like instead?
847 # TODO(martinvonz): make this property-like instead?
840 def narrowmatch(self):
848 def narrowmatch(self):
841 return self._narrowmatch
849 return self._narrowmatch
842
850
843 def setnarrowpats(self, newincludes, newexcludes):
851 def setnarrowpats(self, newincludes, newexcludes):
844 target = self
852 target = self
845 if self.shared():
853 if self.shared():
846 from . import hg
854 from . import hg
847 target = hg.sharedreposource(self)
855 target = hg.sharedreposource(self)
848 narrowspec.save(target, newincludes, newexcludes)
856 narrowspec.save(target, newincludes, newexcludes)
849 self.invalidate(clearfilecache=True)
857 self.invalidate(clearfilecache=True)
850
858
851 def __getitem__(self, changeid):
859 def __getitem__(self, changeid):
852 if changeid is None:
860 if changeid is None:
853 return context.workingctx(self)
861 return context.workingctx(self)
854 if isinstance(changeid, context.basectx):
862 if isinstance(changeid, context.basectx):
855 return changeid
863 return changeid
856 if isinstance(changeid, slice):
864 if isinstance(changeid, slice):
857 # wdirrev isn't contiguous so the slice shouldn't include it
865 # wdirrev isn't contiguous so the slice shouldn't include it
858 return [context.changectx(self, i)
866 return [context.changectx(self, i)
859 for i in pycompat.xrange(*changeid.indices(len(self)))
867 for i in pycompat.xrange(*changeid.indices(len(self)))
860 if i not in self.changelog.filteredrevs]
868 if i not in self.changelog.filteredrevs]
861 try:
869 try:
862 return context.changectx(self, changeid)
870 return context.changectx(self, changeid)
863 except error.WdirUnsupported:
871 except error.WdirUnsupported:
864 return context.workingctx(self)
872 return context.workingctx(self)
865
873
866 def __contains__(self, changeid):
874 def __contains__(self, changeid):
867 """True if the given changeid exists
875 """True if the given changeid exists
868
876
869 error.AmbiguousPrefixLookupError is raised if an ambiguous node
877 error.AmbiguousPrefixLookupError is raised if an ambiguous node
870 specified.
878 specified.
871 """
879 """
872 try:
880 try:
873 self[changeid]
881 self[changeid]
874 return True
882 return True
875 except error.RepoLookupError:
883 except error.RepoLookupError:
876 return False
884 return False
877
885
878 def __nonzero__(self):
886 def __nonzero__(self):
879 return True
887 return True
880
888
881 __bool__ = __nonzero__
889 __bool__ = __nonzero__
882
890
883 def __len__(self):
891 def __len__(self):
884 # no need to pay the cost of repoview.changelog
892 # no need to pay the cost of repoview.changelog
885 unfi = self.unfiltered()
893 unfi = self.unfiltered()
886 return len(unfi.changelog)
894 return len(unfi.changelog)
887
895
888 def __iter__(self):
896 def __iter__(self):
889 return iter(self.changelog)
897 return iter(self.changelog)
890
898
891 def revs(self, expr, *args):
899 def revs(self, expr, *args):
892 '''Find revisions matching a revset.
900 '''Find revisions matching a revset.
893
901
894 The revset is specified as a string ``expr`` that may contain
902 The revset is specified as a string ``expr`` that may contain
895 %-formatting to escape certain types. See ``revsetlang.formatspec``.
903 %-formatting to escape certain types. See ``revsetlang.formatspec``.
896
904
897 Revset aliases from the configuration are not expanded. To expand
905 Revset aliases from the configuration are not expanded. To expand
898 user aliases, consider calling ``scmutil.revrange()`` or
906 user aliases, consider calling ``scmutil.revrange()`` or
899 ``repo.anyrevs([expr], user=True)``.
907 ``repo.anyrevs([expr], user=True)``.
900
908
901 Returns a revset.abstractsmartset, which is a list-like interface
909 Returns a revset.abstractsmartset, which is a list-like interface
902 that contains integer revisions.
910 that contains integer revisions.
903 '''
911 '''
904 expr = revsetlang.formatspec(expr, *args)
912 expr = revsetlang.formatspec(expr, *args)
905 m = revset.match(None, expr)
913 m = revset.match(None, expr)
906 return m(self)
914 return m(self)
907
915
908 def set(self, expr, *args):
916 def set(self, expr, *args):
909 '''Find revisions matching a revset and emit changectx instances.
917 '''Find revisions matching a revset and emit changectx instances.
910
918
911 This is a convenience wrapper around ``revs()`` that iterates the
919 This is a convenience wrapper around ``revs()`` that iterates the
912 result and is a generator of changectx instances.
920 result and is a generator of changectx instances.
913
921
914 Revset aliases from the configuration are not expanded. To expand
922 Revset aliases from the configuration are not expanded. To expand
915 user aliases, consider calling ``scmutil.revrange()``.
923 user aliases, consider calling ``scmutil.revrange()``.
916 '''
924 '''
917 for r in self.revs(expr, *args):
925 for r in self.revs(expr, *args):
918 yield self[r]
926 yield self[r]
919
927
920 def anyrevs(self, specs, user=False, localalias=None):
928 def anyrevs(self, specs, user=False, localalias=None):
921 '''Find revisions matching one of the given revsets.
929 '''Find revisions matching one of the given revsets.
922
930
923 Revset aliases from the configuration are not expanded by default. To
931 Revset aliases from the configuration are not expanded by default. To
924 expand user aliases, specify ``user=True``. To provide some local
932 expand user aliases, specify ``user=True``. To provide some local
925 definitions overriding user aliases, set ``localalias`` to
933 definitions overriding user aliases, set ``localalias`` to
926 ``{name: definitionstring}``.
934 ``{name: definitionstring}``.
927 '''
935 '''
928 if user:
936 if user:
929 m = revset.matchany(self.ui, specs,
937 m = revset.matchany(self.ui, specs,
930 lookup=revset.lookupfn(self),
938 lookup=revset.lookupfn(self),
931 localalias=localalias)
939 localalias=localalias)
932 else:
940 else:
933 m = revset.matchany(None, specs, localalias=localalias)
941 m = revset.matchany(None, specs, localalias=localalias)
934 return m(self)
942 return m(self)
935
943
936 def url(self):
944 def url(self):
937 return 'file:' + self.root
945 return 'file:' + self.root
938
946
939 def hook(self, name, throw=False, **args):
947 def hook(self, name, throw=False, **args):
940 """Call a hook, passing this repo instance.
948 """Call a hook, passing this repo instance.
941
949
942 This a convenience method to aid invoking hooks. Extensions likely
950 This a convenience method to aid invoking hooks. Extensions likely
943 won't call this unless they have registered a custom hook or are
951 won't call this unless they have registered a custom hook or are
944 replacing code that is expected to call a hook.
952 replacing code that is expected to call a hook.
945 """
953 """
946 return hook.hook(self.ui, self, name, throw, **args)
954 return hook.hook(self.ui, self, name, throw, **args)
947
955
948 @filteredpropertycache
956 @filteredpropertycache
949 def _tagscache(self):
957 def _tagscache(self):
950 '''Returns a tagscache object that contains various tags related
958 '''Returns a tagscache object that contains various tags related
951 caches.'''
959 caches.'''
952
960
953 # This simplifies its cache management by having one decorated
961 # This simplifies its cache management by having one decorated
954 # function (this one) and the rest simply fetch things from it.
962 # function (this one) and the rest simply fetch things from it.
955 class tagscache(object):
963 class tagscache(object):
956 def __init__(self):
964 def __init__(self):
957 # These two define the set of tags for this repository. tags
965 # These two define the set of tags for this repository. tags
958 # maps tag name to node; tagtypes maps tag name to 'global' or
966 # maps tag name to node; tagtypes maps tag name to 'global' or
959 # 'local'. (Global tags are defined by .hgtags across all
967 # 'local'. (Global tags are defined by .hgtags across all
960 # heads, and local tags are defined in .hg/localtags.)
968 # heads, and local tags are defined in .hg/localtags.)
961 # They constitute the in-memory cache of tags.
969 # They constitute the in-memory cache of tags.
962 self.tags = self.tagtypes = None
970 self.tags = self.tagtypes = None
963
971
964 self.nodetagscache = self.tagslist = None
972 self.nodetagscache = self.tagslist = None
965
973
966 cache = tagscache()
974 cache = tagscache()
967 cache.tags, cache.tagtypes = self._findtags()
975 cache.tags, cache.tagtypes = self._findtags()
968
976
969 return cache
977 return cache
970
978
971 def tags(self):
979 def tags(self):
972 '''return a mapping of tag to node'''
980 '''return a mapping of tag to node'''
973 t = {}
981 t = {}
974 if self.changelog.filteredrevs:
982 if self.changelog.filteredrevs:
975 tags, tt = self._findtags()
983 tags, tt = self._findtags()
976 else:
984 else:
977 tags = self._tagscache.tags
985 tags = self._tagscache.tags
978 for k, v in tags.iteritems():
986 for k, v in tags.iteritems():
979 try:
987 try:
980 # ignore tags to unknown nodes
988 # ignore tags to unknown nodes
981 self.changelog.rev(v)
989 self.changelog.rev(v)
982 t[k] = v
990 t[k] = v
983 except (error.LookupError, ValueError):
991 except (error.LookupError, ValueError):
984 pass
992 pass
985 return t
993 return t
986
994
987 def _findtags(self):
995 def _findtags(self):
988 '''Do the hard work of finding tags. Return a pair of dicts
996 '''Do the hard work of finding tags. Return a pair of dicts
989 (tags, tagtypes) where tags maps tag name to node, and tagtypes
997 (tags, tagtypes) where tags maps tag name to node, and tagtypes
990 maps tag name to a string like \'global\' or \'local\'.
998 maps tag name to a string like \'global\' or \'local\'.
991 Subclasses or extensions are free to add their own tags, but
999 Subclasses or extensions are free to add their own tags, but
992 should be aware that the returned dicts will be retained for the
1000 should be aware that the returned dicts will be retained for the
993 duration of the localrepo object.'''
1001 duration of the localrepo object.'''
994
1002
995 # XXX what tagtype should subclasses/extensions use? Currently
1003 # XXX what tagtype should subclasses/extensions use? Currently
996 # mq and bookmarks add tags, but do not set the tagtype at all.
1004 # mq and bookmarks add tags, but do not set the tagtype at all.
997 # Should each extension invent its own tag type? Should there
1005 # Should each extension invent its own tag type? Should there
998 # be one tagtype for all such "virtual" tags? Or is the status
1006 # be one tagtype for all such "virtual" tags? Or is the status
999 # quo fine?
1007 # quo fine?
1000
1008
1001
1009
1002 # map tag name to (node, hist)
1010 # map tag name to (node, hist)
1003 alltags = tagsmod.findglobaltags(self.ui, self)
1011 alltags = tagsmod.findglobaltags(self.ui, self)
1004 # map tag name to tag type
1012 # map tag name to tag type
1005 tagtypes = dict((tag, 'global') for tag in alltags)
1013 tagtypes = dict((tag, 'global') for tag in alltags)
1006
1014
1007 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1015 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1008
1016
1009 # Build the return dicts. Have to re-encode tag names because
1017 # Build the return dicts. Have to re-encode tag names because
1010 # the tags module always uses UTF-8 (in order not to lose info
1018 # the tags module always uses UTF-8 (in order not to lose info
1011 # writing to the cache), but the rest of Mercurial wants them in
1019 # writing to the cache), but the rest of Mercurial wants them in
1012 # local encoding.
1020 # local encoding.
1013 tags = {}
1021 tags = {}
1014 for (name, (node, hist)) in alltags.iteritems():
1022 for (name, (node, hist)) in alltags.iteritems():
1015 if node != nullid:
1023 if node != nullid:
1016 tags[encoding.tolocal(name)] = node
1024 tags[encoding.tolocal(name)] = node
1017 tags['tip'] = self.changelog.tip()
1025 tags['tip'] = self.changelog.tip()
1018 tagtypes = dict([(encoding.tolocal(name), value)
1026 tagtypes = dict([(encoding.tolocal(name), value)
1019 for (name, value) in tagtypes.iteritems()])
1027 for (name, value) in tagtypes.iteritems()])
1020 return (tags, tagtypes)
1028 return (tags, tagtypes)
1021
1029
1022 def tagtype(self, tagname):
1030 def tagtype(self, tagname):
1023 '''
1031 '''
1024 return the type of the given tag. result can be:
1032 return the type of the given tag. result can be:
1025
1033
1026 'local' : a local tag
1034 'local' : a local tag
1027 'global' : a global tag
1035 'global' : a global tag
1028 None : tag does not exist
1036 None : tag does not exist
1029 '''
1037 '''
1030
1038
1031 return self._tagscache.tagtypes.get(tagname)
1039 return self._tagscache.tagtypes.get(tagname)
1032
1040
1033 def tagslist(self):
1041 def tagslist(self):
1034 '''return a list of tags ordered by revision'''
1042 '''return a list of tags ordered by revision'''
1035 if not self._tagscache.tagslist:
1043 if not self._tagscache.tagslist:
1036 l = []
1044 l = []
1037 for t, n in self.tags().iteritems():
1045 for t, n in self.tags().iteritems():
1038 l.append((self.changelog.rev(n), t, n))
1046 l.append((self.changelog.rev(n), t, n))
1039 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1047 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1040
1048
1041 return self._tagscache.tagslist
1049 return self._tagscache.tagslist
1042
1050
1043 def nodetags(self, node):
1051 def nodetags(self, node):
1044 '''return the tags associated with a node'''
1052 '''return the tags associated with a node'''
1045 if not self._tagscache.nodetagscache:
1053 if not self._tagscache.nodetagscache:
1046 nodetagscache = {}
1054 nodetagscache = {}
1047 for t, n in self._tagscache.tags.iteritems():
1055 for t, n in self._tagscache.tags.iteritems():
1048 nodetagscache.setdefault(n, []).append(t)
1056 nodetagscache.setdefault(n, []).append(t)
1049 for tags in nodetagscache.itervalues():
1057 for tags in nodetagscache.itervalues():
1050 tags.sort()
1058 tags.sort()
1051 self._tagscache.nodetagscache = nodetagscache
1059 self._tagscache.nodetagscache = nodetagscache
1052 return self._tagscache.nodetagscache.get(node, [])
1060 return self._tagscache.nodetagscache.get(node, [])
1053
1061
1054 def nodebookmarks(self, node):
1062 def nodebookmarks(self, node):
1055 """return the list of bookmarks pointing to the specified node"""
1063 """return the list of bookmarks pointing to the specified node"""
1056 return self._bookmarks.names(node)
1064 return self._bookmarks.names(node)
1057
1065
1058 def branchmap(self):
1066 def branchmap(self):
1059 '''returns a dictionary {branch: [branchheads]} with branchheads
1067 '''returns a dictionary {branch: [branchheads]} with branchheads
1060 ordered by increasing revision number'''
1068 ordered by increasing revision number'''
1061 branchmap.updatecache(self)
1069 branchmap.updatecache(self)
1062 return self._branchcaches[self.filtername]
1070 return self._branchcaches[self.filtername]
1063
1071
1064 @unfilteredmethod
1072 @unfilteredmethod
1065 def revbranchcache(self):
1073 def revbranchcache(self):
1066 if not self._revbranchcache:
1074 if not self._revbranchcache:
1067 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1075 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1068 return self._revbranchcache
1076 return self._revbranchcache
1069
1077
1070 def branchtip(self, branch, ignoremissing=False):
1078 def branchtip(self, branch, ignoremissing=False):
1071 '''return the tip node for a given branch
1079 '''return the tip node for a given branch
1072
1080
1073 If ignoremissing is True, then this method will not raise an error.
1081 If ignoremissing is True, then this method will not raise an error.
1074 This is helpful for callers that only expect None for a missing branch
1082 This is helpful for callers that only expect None for a missing branch
1075 (e.g. namespace).
1083 (e.g. namespace).
1076
1084
1077 '''
1085 '''
1078 try:
1086 try:
1079 return self.branchmap().branchtip(branch)
1087 return self.branchmap().branchtip(branch)
1080 except KeyError:
1088 except KeyError:
1081 if not ignoremissing:
1089 if not ignoremissing:
1082 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1090 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1083 else:
1091 else:
1084 pass
1092 pass
1085
1093
1086 def lookup(self, key):
1094 def lookup(self, key):
1087 return scmutil.revsymbol(self, key).node()
1095 return scmutil.revsymbol(self, key).node()
1088
1096
1089 def lookupbranch(self, key):
1097 def lookupbranch(self, key):
1090 if key in self.branchmap():
1098 if key in self.branchmap():
1091 return key
1099 return key
1092
1100
1093 return scmutil.revsymbol(self, key).branch()
1101 return scmutil.revsymbol(self, key).branch()
1094
1102
1095 def known(self, nodes):
1103 def known(self, nodes):
1096 cl = self.changelog
1104 cl = self.changelog
1097 nm = cl.nodemap
1105 nm = cl.nodemap
1098 filtered = cl.filteredrevs
1106 filtered = cl.filteredrevs
1099 result = []
1107 result = []
1100 for n in nodes:
1108 for n in nodes:
1101 r = nm.get(n)
1109 r = nm.get(n)
1102 resp = not (r is None or r in filtered)
1110 resp = not (r is None or r in filtered)
1103 result.append(resp)
1111 result.append(resp)
1104 return result
1112 return result
1105
1113
1106 def local(self):
1114 def local(self):
1107 return self
1115 return self
1108
1116
1109 def publishing(self):
1117 def publishing(self):
1110 # it's safe (and desirable) to trust the publish flag unconditionally
1118 # it's safe (and desirable) to trust the publish flag unconditionally
1111 # so that we don't finalize changes shared between users via ssh or nfs
1119 # so that we don't finalize changes shared between users via ssh or nfs
1112 return self.ui.configbool('phases', 'publish', untrusted=True)
1120 return self.ui.configbool('phases', 'publish', untrusted=True)
1113
1121
1114 def cancopy(self):
1122 def cancopy(self):
1115 # so statichttprepo's override of local() works
1123 # so statichttprepo's override of local() works
1116 if not self.local():
1124 if not self.local():
1117 return False
1125 return False
1118 if not self.publishing():
1126 if not self.publishing():
1119 return True
1127 return True
1120 # if publishing we can't copy if there is filtered content
1128 # if publishing we can't copy if there is filtered content
1121 return not self.filtered('visible').changelog.filteredrevs
1129 return not self.filtered('visible').changelog.filteredrevs
1122
1130
1123 def shared(self):
1131 def shared(self):
1124 '''the type of shared repository (None if not shared)'''
1132 '''the type of shared repository (None if not shared)'''
1125 if self.sharedpath != self.path:
1133 if self.sharedpath != self.path:
1126 return 'store'
1134 return 'store'
1127 return None
1135 return None
1128
1136
1129 def wjoin(self, f, *insidef):
1137 def wjoin(self, f, *insidef):
1130 return self.vfs.reljoin(self.root, f, *insidef)
1138 return self.vfs.reljoin(self.root, f, *insidef)
1131
1139
1132 def file(self, f):
1140 def file(self, f):
1133 if f[0] == '/':
1141 if f[0] == '/':
1134 f = f[1:]
1142 f = f[1:]
1135 return filelog.filelog(self.svfs, f)
1143 return filelog.filelog(self.svfs, f)
1136
1144
1137 def setparents(self, p1, p2=nullid):
1145 def setparents(self, p1, p2=nullid):
1138 with self.dirstate.parentchange():
1146 with self.dirstate.parentchange():
1139 copies = self.dirstate.setparents(p1, p2)
1147 copies = self.dirstate.setparents(p1, p2)
1140 pctx = self[p1]
1148 pctx = self[p1]
1141 if copies:
1149 if copies:
1142 # Adjust copy records, the dirstate cannot do it, it
1150 # Adjust copy records, the dirstate cannot do it, it
1143 # requires access to parents manifests. Preserve them
1151 # requires access to parents manifests. Preserve them
1144 # only for entries added to first parent.
1152 # only for entries added to first parent.
1145 for f in copies:
1153 for f in copies:
1146 if f not in pctx and copies[f] in pctx:
1154 if f not in pctx and copies[f] in pctx:
1147 self.dirstate.copy(copies[f], f)
1155 self.dirstate.copy(copies[f], f)
1148 if p2 == nullid:
1156 if p2 == nullid:
1149 for f, s in sorted(self.dirstate.copies().items()):
1157 for f, s in sorted(self.dirstate.copies().items()):
1150 if f not in pctx and s not in pctx:
1158 if f not in pctx and s not in pctx:
1151 self.dirstate.copy(None, f)
1159 self.dirstate.copy(None, f)
1152
1160
1153 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1161 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1154 """changeid can be a changeset revision, node, or tag.
1162 """changeid can be a changeset revision, node, or tag.
1155 fileid can be a file revision or node."""
1163 fileid can be a file revision or node."""
1156 return context.filectx(self, path, changeid, fileid,
1164 return context.filectx(self, path, changeid, fileid,
1157 changectx=changectx)
1165 changectx=changectx)
1158
1166
1159 def getcwd(self):
1167 def getcwd(self):
1160 return self.dirstate.getcwd()
1168 return self.dirstate.getcwd()
1161
1169
1162 def pathto(self, f, cwd=None):
1170 def pathto(self, f, cwd=None):
1163 return self.dirstate.pathto(f, cwd)
1171 return self.dirstate.pathto(f, cwd)
1164
1172
1165 def _loadfilter(self, filter):
1173 def _loadfilter(self, filter):
1166 if filter not in self._filterpats:
1174 if filter not in self._filterpats:
1167 l = []
1175 l = []
1168 for pat, cmd in self.ui.configitems(filter):
1176 for pat, cmd in self.ui.configitems(filter):
1169 if cmd == '!':
1177 if cmd == '!':
1170 continue
1178 continue
1171 mf = matchmod.match(self.root, '', [pat])
1179 mf = matchmod.match(self.root, '', [pat])
1172 fn = None
1180 fn = None
1173 params = cmd
1181 params = cmd
1174 for name, filterfn in self._datafilters.iteritems():
1182 for name, filterfn in self._datafilters.iteritems():
1175 if cmd.startswith(name):
1183 if cmd.startswith(name):
1176 fn = filterfn
1184 fn = filterfn
1177 params = cmd[len(name):].lstrip()
1185 params = cmd[len(name):].lstrip()
1178 break
1186 break
1179 if not fn:
1187 if not fn:
1180 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1188 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1181 # Wrap old filters not supporting keyword arguments
1189 # Wrap old filters not supporting keyword arguments
1182 if not pycompat.getargspec(fn)[2]:
1190 if not pycompat.getargspec(fn)[2]:
1183 oldfn = fn
1191 oldfn = fn
1184 fn = lambda s, c, **kwargs: oldfn(s, c)
1192 fn = lambda s, c, **kwargs: oldfn(s, c)
1185 l.append((mf, fn, params))
1193 l.append((mf, fn, params))
1186 self._filterpats[filter] = l
1194 self._filterpats[filter] = l
1187 return self._filterpats[filter]
1195 return self._filterpats[filter]
1188
1196
1189 def _filter(self, filterpats, filename, data):
1197 def _filter(self, filterpats, filename, data):
1190 for mf, fn, cmd in filterpats:
1198 for mf, fn, cmd in filterpats:
1191 if mf(filename):
1199 if mf(filename):
1192 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1200 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1193 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1201 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1194 break
1202 break
1195
1203
1196 return data
1204 return data
1197
1205
1198 @unfilteredpropertycache
1206 @unfilteredpropertycache
1199 def _encodefilterpats(self):
1207 def _encodefilterpats(self):
1200 return self._loadfilter('encode')
1208 return self._loadfilter('encode')
1201
1209
1202 @unfilteredpropertycache
1210 @unfilteredpropertycache
1203 def _decodefilterpats(self):
1211 def _decodefilterpats(self):
1204 return self._loadfilter('decode')
1212 return self._loadfilter('decode')
1205
1213
1206 def adddatafilter(self, name, filter):
1214 def adddatafilter(self, name, filter):
1207 self._datafilters[name] = filter
1215 self._datafilters[name] = filter
1208
1216
1209 def wread(self, filename):
1217 def wread(self, filename):
1210 if self.wvfs.islink(filename):
1218 if self.wvfs.islink(filename):
1211 data = self.wvfs.readlink(filename)
1219 data = self.wvfs.readlink(filename)
1212 else:
1220 else:
1213 data = self.wvfs.read(filename)
1221 data = self.wvfs.read(filename)
1214 return self._filter(self._encodefilterpats, filename, data)
1222 return self._filter(self._encodefilterpats, filename, data)
1215
1223
1216 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1224 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1217 """write ``data`` into ``filename`` in the working directory
1225 """write ``data`` into ``filename`` in the working directory
1218
1226
1219 This returns length of written (maybe decoded) data.
1227 This returns length of written (maybe decoded) data.
1220 """
1228 """
1221 data = self._filter(self._decodefilterpats, filename, data)
1229 data = self._filter(self._decodefilterpats, filename, data)
1222 if 'l' in flags:
1230 if 'l' in flags:
1223 self.wvfs.symlink(data, filename)
1231 self.wvfs.symlink(data, filename)
1224 else:
1232 else:
1225 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1233 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1226 **kwargs)
1234 **kwargs)
1227 if 'x' in flags:
1235 if 'x' in flags:
1228 self.wvfs.setflags(filename, False, True)
1236 self.wvfs.setflags(filename, False, True)
1229 else:
1237 else:
1230 self.wvfs.setflags(filename, False, False)
1238 self.wvfs.setflags(filename, False, False)
1231 return len(data)
1239 return len(data)
1232
1240
1233 def wwritedata(self, filename, data):
1241 def wwritedata(self, filename, data):
1234 return self._filter(self._decodefilterpats, filename, data)
1242 return self._filter(self._decodefilterpats, filename, data)
1235
1243
1236 def currenttransaction(self):
1244 def currenttransaction(self):
1237 """return the current transaction or None if non exists"""
1245 """return the current transaction or None if non exists"""
1238 if self._transref:
1246 if self._transref:
1239 tr = self._transref()
1247 tr = self._transref()
1240 else:
1248 else:
1241 tr = None
1249 tr = None
1242
1250
1243 if tr and tr.running():
1251 if tr and tr.running():
1244 return tr
1252 return tr
1245 return None
1253 return None
1246
1254
1247 def transaction(self, desc, report=None):
1255 def transaction(self, desc, report=None):
1248 if (self.ui.configbool('devel', 'all-warnings')
1256 if (self.ui.configbool('devel', 'all-warnings')
1249 or self.ui.configbool('devel', 'check-locks')):
1257 or self.ui.configbool('devel', 'check-locks')):
1250 if self._currentlock(self._lockref) is None:
1258 if self._currentlock(self._lockref) is None:
1251 raise error.ProgrammingError('transaction requires locking')
1259 raise error.ProgrammingError('transaction requires locking')
1252 tr = self.currenttransaction()
1260 tr = self.currenttransaction()
1253 if tr is not None:
1261 if tr is not None:
1254 return tr.nest(name=desc)
1262 return tr.nest(name=desc)
1255
1263
1256 # abort here if the journal already exists
1264 # abort here if the journal already exists
1257 if self.svfs.exists("journal"):
1265 if self.svfs.exists("journal"):
1258 raise error.RepoError(
1266 raise error.RepoError(
1259 _("abandoned transaction found"),
1267 _("abandoned transaction found"),
1260 hint=_("run 'hg recover' to clean up transaction"))
1268 hint=_("run 'hg recover' to clean up transaction"))
1261
1269
1262 idbase = "%.40f#%f" % (random.random(), time.time())
1270 idbase = "%.40f#%f" % (random.random(), time.time())
1263 ha = hex(hashlib.sha1(idbase).digest())
1271 ha = hex(hashlib.sha1(idbase).digest())
1264 txnid = 'TXN:' + ha
1272 txnid = 'TXN:' + ha
1265 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1273 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1266
1274
1267 self._writejournal(desc)
1275 self._writejournal(desc)
1268 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1276 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1269 if report:
1277 if report:
1270 rp = report
1278 rp = report
1271 else:
1279 else:
1272 rp = self.ui.warn
1280 rp = self.ui.warn
1273 vfsmap = {'plain': self.vfs} # root of .hg/
1281 vfsmap = {'plain': self.vfs} # root of .hg/
1274 # we must avoid cyclic reference between repo and transaction.
1282 # we must avoid cyclic reference between repo and transaction.
1275 reporef = weakref.ref(self)
1283 reporef = weakref.ref(self)
1276 # Code to track tag movement
1284 # Code to track tag movement
1277 #
1285 #
1278 # Since tags are all handled as file content, it is actually quite hard
1286 # Since tags are all handled as file content, it is actually quite hard
1279 # to track these movement from a code perspective. So we fallback to a
1287 # to track these movement from a code perspective. So we fallback to a
1280 # tracking at the repository level. One could envision to track changes
1288 # tracking at the repository level. One could envision to track changes
1281 # to the '.hgtags' file through changegroup apply but that fails to
1289 # to the '.hgtags' file through changegroup apply but that fails to
1282 # cope with case where transaction expose new heads without changegroup
1290 # cope with case where transaction expose new heads without changegroup
1283 # being involved (eg: phase movement).
1291 # being involved (eg: phase movement).
1284 #
1292 #
1285 # For now, We gate the feature behind a flag since this likely comes
1293 # For now, We gate the feature behind a flag since this likely comes
1286 # with performance impacts. The current code run more often than needed
1294 # with performance impacts. The current code run more often than needed
1287 # and do not use caches as much as it could. The current focus is on
1295 # and do not use caches as much as it could. The current focus is on
1288 # the behavior of the feature so we disable it by default. The flag
1296 # the behavior of the feature so we disable it by default. The flag
1289 # will be removed when we are happy with the performance impact.
1297 # will be removed when we are happy with the performance impact.
1290 #
1298 #
1291 # Once this feature is no longer experimental move the following
1299 # Once this feature is no longer experimental move the following
1292 # documentation to the appropriate help section:
1300 # documentation to the appropriate help section:
1293 #
1301 #
1294 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1302 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1295 # tags (new or changed or deleted tags). In addition the details of
1303 # tags (new or changed or deleted tags). In addition the details of
1296 # these changes are made available in a file at:
1304 # these changes are made available in a file at:
1297 # ``REPOROOT/.hg/changes/tags.changes``.
1305 # ``REPOROOT/.hg/changes/tags.changes``.
1298 # Make sure you check for HG_TAG_MOVED before reading that file as it
1306 # Make sure you check for HG_TAG_MOVED before reading that file as it
1299 # might exist from a previous transaction even if no tag were touched
1307 # might exist from a previous transaction even if no tag were touched
1300 # in this one. Changes are recorded in a line base format::
1308 # in this one. Changes are recorded in a line base format::
1301 #
1309 #
1302 # <action> <hex-node> <tag-name>\n
1310 # <action> <hex-node> <tag-name>\n
1303 #
1311 #
1304 # Actions are defined as follow:
1312 # Actions are defined as follow:
1305 # "-R": tag is removed,
1313 # "-R": tag is removed,
1306 # "+A": tag is added,
1314 # "+A": tag is added,
1307 # "-M": tag is moved (old value),
1315 # "-M": tag is moved (old value),
1308 # "+M": tag is moved (new value),
1316 # "+M": tag is moved (new value),
1309 tracktags = lambda x: None
1317 tracktags = lambda x: None
1310 # experimental config: experimental.hook-track-tags
1318 # experimental config: experimental.hook-track-tags
1311 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1319 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1312 if desc != 'strip' and shouldtracktags:
1320 if desc != 'strip' and shouldtracktags:
1313 oldheads = self.changelog.headrevs()
1321 oldheads = self.changelog.headrevs()
1314 def tracktags(tr2):
1322 def tracktags(tr2):
1315 repo = reporef()
1323 repo = reporef()
1316 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1324 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1317 newheads = repo.changelog.headrevs()
1325 newheads = repo.changelog.headrevs()
1318 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1326 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1319 # notes: we compare lists here.
1327 # notes: we compare lists here.
1320 # As we do it only once buiding set would not be cheaper
1328 # As we do it only once buiding set would not be cheaper
1321 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1329 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1322 if changes:
1330 if changes:
1323 tr2.hookargs['tag_moved'] = '1'
1331 tr2.hookargs['tag_moved'] = '1'
1324 with repo.vfs('changes/tags.changes', 'w',
1332 with repo.vfs('changes/tags.changes', 'w',
1325 atomictemp=True) as changesfile:
1333 atomictemp=True) as changesfile:
1326 # note: we do not register the file to the transaction
1334 # note: we do not register the file to the transaction
1327 # because we needs it to still exist on the transaction
1335 # because we needs it to still exist on the transaction
1328 # is close (for txnclose hooks)
1336 # is close (for txnclose hooks)
1329 tagsmod.writediff(changesfile, changes)
1337 tagsmod.writediff(changesfile, changes)
1330 def validate(tr2):
1338 def validate(tr2):
1331 """will run pre-closing hooks"""
1339 """will run pre-closing hooks"""
1332 # XXX the transaction API is a bit lacking here so we take a hacky
1340 # XXX the transaction API is a bit lacking here so we take a hacky
1333 # path for now
1341 # path for now
1334 #
1342 #
1335 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1343 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1336 # dict is copied before these run. In addition we needs the data
1344 # dict is copied before these run. In addition we needs the data
1337 # available to in memory hooks too.
1345 # available to in memory hooks too.
1338 #
1346 #
1339 # Moreover, we also need to make sure this runs before txnclose
1347 # Moreover, we also need to make sure this runs before txnclose
1340 # hooks and there is no "pending" mechanism that would execute
1348 # hooks and there is no "pending" mechanism that would execute
1341 # logic only if hooks are about to run.
1349 # logic only if hooks are about to run.
1342 #
1350 #
1343 # Fixing this limitation of the transaction is also needed to track
1351 # Fixing this limitation of the transaction is also needed to track
1344 # other families of changes (bookmarks, phases, obsolescence).
1352 # other families of changes (bookmarks, phases, obsolescence).
1345 #
1353 #
1346 # This will have to be fixed before we remove the experimental
1354 # This will have to be fixed before we remove the experimental
1347 # gating.
1355 # gating.
1348 tracktags(tr2)
1356 tracktags(tr2)
1349 repo = reporef()
1357 repo = reporef()
1350 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1358 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1351 scmutil.enforcesinglehead(repo, tr2, desc)
1359 scmutil.enforcesinglehead(repo, tr2, desc)
1352 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1360 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1353 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1361 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1354 args = tr.hookargs.copy()
1362 args = tr.hookargs.copy()
1355 args.update(bookmarks.preparehookargs(name, old, new))
1363 args.update(bookmarks.preparehookargs(name, old, new))
1356 repo.hook('pretxnclose-bookmark', throw=True,
1364 repo.hook('pretxnclose-bookmark', throw=True,
1357 txnname=desc,
1365 txnname=desc,
1358 **pycompat.strkwargs(args))
1366 **pycompat.strkwargs(args))
1359 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1367 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1360 cl = repo.unfiltered().changelog
1368 cl = repo.unfiltered().changelog
1361 for rev, (old, new) in tr.changes['phases'].items():
1369 for rev, (old, new) in tr.changes['phases'].items():
1362 args = tr.hookargs.copy()
1370 args = tr.hookargs.copy()
1363 node = hex(cl.node(rev))
1371 node = hex(cl.node(rev))
1364 args.update(phases.preparehookargs(node, old, new))
1372 args.update(phases.preparehookargs(node, old, new))
1365 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1373 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1366 **pycompat.strkwargs(args))
1374 **pycompat.strkwargs(args))
1367
1375
1368 repo.hook('pretxnclose', throw=True,
1376 repo.hook('pretxnclose', throw=True,
1369 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1377 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1370 def releasefn(tr, success):
1378 def releasefn(tr, success):
1371 repo = reporef()
1379 repo = reporef()
1372 if success:
1380 if success:
1373 # this should be explicitly invoked here, because
1381 # this should be explicitly invoked here, because
1374 # in-memory changes aren't written out at closing
1382 # in-memory changes aren't written out at closing
1375 # transaction, if tr.addfilegenerator (via
1383 # transaction, if tr.addfilegenerator (via
1376 # dirstate.write or so) isn't invoked while
1384 # dirstate.write or so) isn't invoked while
1377 # transaction running
1385 # transaction running
1378 repo.dirstate.write(None)
1386 repo.dirstate.write(None)
1379 else:
1387 else:
1380 # discard all changes (including ones already written
1388 # discard all changes (including ones already written
1381 # out) in this transaction
1389 # out) in this transaction
1382 narrowspec.restorebackup(self, 'journal.narrowspec')
1390 narrowspec.restorebackup(self, 'journal.narrowspec')
1383 repo.dirstate.restorebackup(None, 'journal.dirstate')
1391 repo.dirstate.restorebackup(None, 'journal.dirstate')
1384
1392
1385 repo.invalidate(clearfilecache=True)
1393 repo.invalidate(clearfilecache=True)
1386
1394
1387 tr = transaction.transaction(rp, self.svfs, vfsmap,
1395 tr = transaction.transaction(rp, self.svfs, vfsmap,
1388 "journal",
1396 "journal",
1389 "undo",
1397 "undo",
1390 aftertrans(renames),
1398 aftertrans(renames),
1391 self.store.createmode,
1399 self.store.createmode,
1392 validator=validate,
1400 validator=validate,
1393 releasefn=releasefn,
1401 releasefn=releasefn,
1394 checkambigfiles=_cachedfiles,
1402 checkambigfiles=_cachedfiles,
1395 name=desc)
1403 name=desc)
1396 tr.changes['origrepolen'] = len(self)
1404 tr.changes['origrepolen'] = len(self)
1397 tr.changes['obsmarkers'] = set()
1405 tr.changes['obsmarkers'] = set()
1398 tr.changes['phases'] = {}
1406 tr.changes['phases'] = {}
1399 tr.changes['bookmarks'] = {}
1407 tr.changes['bookmarks'] = {}
1400
1408
1401 tr.hookargs['txnid'] = txnid
1409 tr.hookargs['txnid'] = txnid
1402 # note: writing the fncache only during finalize mean that the file is
1410 # note: writing the fncache only during finalize mean that the file is
1403 # outdated when running hooks. As fncache is used for streaming clone,
1411 # outdated when running hooks. As fncache is used for streaming clone,
1404 # this is not expected to break anything that happen during the hooks.
1412 # this is not expected to break anything that happen during the hooks.
1405 tr.addfinalize('flush-fncache', self.store.write)
1413 tr.addfinalize('flush-fncache', self.store.write)
1406 def txnclosehook(tr2):
1414 def txnclosehook(tr2):
1407 """To be run if transaction is successful, will schedule a hook run
1415 """To be run if transaction is successful, will schedule a hook run
1408 """
1416 """
1409 # Don't reference tr2 in hook() so we don't hold a reference.
1417 # Don't reference tr2 in hook() so we don't hold a reference.
1410 # This reduces memory consumption when there are multiple
1418 # This reduces memory consumption when there are multiple
1411 # transactions per lock. This can likely go away if issue5045
1419 # transactions per lock. This can likely go away if issue5045
1412 # fixes the function accumulation.
1420 # fixes the function accumulation.
1413 hookargs = tr2.hookargs
1421 hookargs = tr2.hookargs
1414
1422
1415 def hookfunc():
1423 def hookfunc():
1416 repo = reporef()
1424 repo = reporef()
1417 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1425 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1418 bmchanges = sorted(tr.changes['bookmarks'].items())
1426 bmchanges = sorted(tr.changes['bookmarks'].items())
1419 for name, (old, new) in bmchanges:
1427 for name, (old, new) in bmchanges:
1420 args = tr.hookargs.copy()
1428 args = tr.hookargs.copy()
1421 args.update(bookmarks.preparehookargs(name, old, new))
1429 args.update(bookmarks.preparehookargs(name, old, new))
1422 repo.hook('txnclose-bookmark', throw=False,
1430 repo.hook('txnclose-bookmark', throw=False,
1423 txnname=desc, **pycompat.strkwargs(args))
1431 txnname=desc, **pycompat.strkwargs(args))
1424
1432
1425 if hook.hashook(repo.ui, 'txnclose-phase'):
1433 if hook.hashook(repo.ui, 'txnclose-phase'):
1426 cl = repo.unfiltered().changelog
1434 cl = repo.unfiltered().changelog
1427 phasemv = sorted(tr.changes['phases'].items())
1435 phasemv = sorted(tr.changes['phases'].items())
1428 for rev, (old, new) in phasemv:
1436 for rev, (old, new) in phasemv:
1429 args = tr.hookargs.copy()
1437 args = tr.hookargs.copy()
1430 node = hex(cl.node(rev))
1438 node = hex(cl.node(rev))
1431 args.update(phases.preparehookargs(node, old, new))
1439 args.update(phases.preparehookargs(node, old, new))
1432 repo.hook('txnclose-phase', throw=False, txnname=desc,
1440 repo.hook('txnclose-phase', throw=False, txnname=desc,
1433 **pycompat.strkwargs(args))
1441 **pycompat.strkwargs(args))
1434
1442
1435 repo.hook('txnclose', throw=False, txnname=desc,
1443 repo.hook('txnclose', throw=False, txnname=desc,
1436 **pycompat.strkwargs(hookargs))
1444 **pycompat.strkwargs(hookargs))
1437 reporef()._afterlock(hookfunc)
1445 reporef()._afterlock(hookfunc)
1438 tr.addfinalize('txnclose-hook', txnclosehook)
1446 tr.addfinalize('txnclose-hook', txnclosehook)
1439 # Include a leading "-" to make it happen before the transaction summary
1447 # Include a leading "-" to make it happen before the transaction summary
1440 # reports registered via scmutil.registersummarycallback() whose names
1448 # reports registered via scmutil.registersummarycallback() whose names
1441 # are 00-txnreport etc. That way, the caches will be warm when the
1449 # are 00-txnreport etc. That way, the caches will be warm when the
1442 # callbacks run.
1450 # callbacks run.
1443 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1451 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1444 def txnaborthook(tr2):
1452 def txnaborthook(tr2):
1445 """To be run if transaction is aborted
1453 """To be run if transaction is aborted
1446 """
1454 """
1447 reporef().hook('txnabort', throw=False, txnname=desc,
1455 reporef().hook('txnabort', throw=False, txnname=desc,
1448 **pycompat.strkwargs(tr2.hookargs))
1456 **pycompat.strkwargs(tr2.hookargs))
1449 tr.addabort('txnabort-hook', txnaborthook)
1457 tr.addabort('txnabort-hook', txnaborthook)
1450 # avoid eager cache invalidation. in-memory data should be identical
1458 # avoid eager cache invalidation. in-memory data should be identical
1451 # to stored data if transaction has no error.
1459 # to stored data if transaction has no error.
1452 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1460 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1453 self._transref = weakref.ref(tr)
1461 self._transref = weakref.ref(tr)
1454 scmutil.registersummarycallback(self, tr, desc)
1462 scmutil.registersummarycallback(self, tr, desc)
1455 return tr
1463 return tr
1456
1464
1457 def _journalfiles(self):
1465 def _journalfiles(self):
1458 return ((self.svfs, 'journal'),
1466 return ((self.svfs, 'journal'),
1459 (self.vfs, 'journal.dirstate'),
1467 (self.vfs, 'journal.dirstate'),
1460 (self.vfs, 'journal.branch'),
1468 (self.vfs, 'journal.branch'),
1461 (self.vfs, 'journal.desc'),
1469 (self.vfs, 'journal.desc'),
1462 (self.vfs, 'journal.bookmarks'),
1470 (self.vfs, 'journal.bookmarks'),
1463 (self.svfs, 'journal.phaseroots'))
1471 (self.svfs, 'journal.phaseroots'))
1464
1472
1465 def undofiles(self):
1473 def undofiles(self):
1466 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1474 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1467
1475
1468 @unfilteredmethod
1476 @unfilteredmethod
1469 def _writejournal(self, desc):
1477 def _writejournal(self, desc):
1470 self.dirstate.savebackup(None, 'journal.dirstate')
1478 self.dirstate.savebackup(None, 'journal.dirstate')
1471 narrowspec.savebackup(self, 'journal.narrowspec')
1479 narrowspec.savebackup(self, 'journal.narrowspec')
1472 self.vfs.write("journal.branch",
1480 self.vfs.write("journal.branch",
1473 encoding.fromlocal(self.dirstate.branch()))
1481 encoding.fromlocal(self.dirstate.branch()))
1474 self.vfs.write("journal.desc",
1482 self.vfs.write("journal.desc",
1475 "%d\n%s\n" % (len(self), desc))
1483 "%d\n%s\n" % (len(self), desc))
1476 self.vfs.write("journal.bookmarks",
1484 self.vfs.write("journal.bookmarks",
1477 self.vfs.tryread("bookmarks"))
1485 self.vfs.tryread("bookmarks"))
1478 self.svfs.write("journal.phaseroots",
1486 self.svfs.write("journal.phaseroots",
1479 self.svfs.tryread("phaseroots"))
1487 self.svfs.tryread("phaseroots"))
1480
1488
1481 def recover(self):
1489 def recover(self):
1482 with self.lock():
1490 with self.lock():
1483 if self.svfs.exists("journal"):
1491 if self.svfs.exists("journal"):
1484 self.ui.status(_("rolling back interrupted transaction\n"))
1492 self.ui.status(_("rolling back interrupted transaction\n"))
1485 vfsmap = {'': self.svfs,
1493 vfsmap = {'': self.svfs,
1486 'plain': self.vfs,}
1494 'plain': self.vfs,}
1487 transaction.rollback(self.svfs, vfsmap, "journal",
1495 transaction.rollback(self.svfs, vfsmap, "journal",
1488 self.ui.warn,
1496 self.ui.warn,
1489 checkambigfiles=_cachedfiles)
1497 checkambigfiles=_cachedfiles)
1490 self.invalidate()
1498 self.invalidate()
1491 return True
1499 return True
1492 else:
1500 else:
1493 self.ui.warn(_("no interrupted transaction available\n"))
1501 self.ui.warn(_("no interrupted transaction available\n"))
1494 return False
1502 return False
1495
1503
1496 def rollback(self, dryrun=False, force=False):
1504 def rollback(self, dryrun=False, force=False):
1497 wlock = lock = dsguard = None
1505 wlock = lock = dsguard = None
1498 try:
1506 try:
1499 wlock = self.wlock()
1507 wlock = self.wlock()
1500 lock = self.lock()
1508 lock = self.lock()
1501 if self.svfs.exists("undo"):
1509 if self.svfs.exists("undo"):
1502 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1510 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1503
1511
1504 return self._rollback(dryrun, force, dsguard)
1512 return self._rollback(dryrun, force, dsguard)
1505 else:
1513 else:
1506 self.ui.warn(_("no rollback information available\n"))
1514 self.ui.warn(_("no rollback information available\n"))
1507 return 1
1515 return 1
1508 finally:
1516 finally:
1509 release(dsguard, lock, wlock)
1517 release(dsguard, lock, wlock)
1510
1518
1511 @unfilteredmethod # Until we get smarter cache management
1519 @unfilteredmethod # Until we get smarter cache management
1512 def _rollback(self, dryrun, force, dsguard):
1520 def _rollback(self, dryrun, force, dsguard):
1513 ui = self.ui
1521 ui = self.ui
1514 try:
1522 try:
1515 args = self.vfs.read('undo.desc').splitlines()
1523 args = self.vfs.read('undo.desc').splitlines()
1516 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1524 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1517 if len(args) >= 3:
1525 if len(args) >= 3:
1518 detail = args[2]
1526 detail = args[2]
1519 oldtip = oldlen - 1
1527 oldtip = oldlen - 1
1520
1528
1521 if detail and ui.verbose:
1529 if detail and ui.verbose:
1522 msg = (_('repository tip rolled back to revision %d'
1530 msg = (_('repository tip rolled back to revision %d'
1523 ' (undo %s: %s)\n')
1531 ' (undo %s: %s)\n')
1524 % (oldtip, desc, detail))
1532 % (oldtip, desc, detail))
1525 else:
1533 else:
1526 msg = (_('repository tip rolled back to revision %d'
1534 msg = (_('repository tip rolled back to revision %d'
1527 ' (undo %s)\n')
1535 ' (undo %s)\n')
1528 % (oldtip, desc))
1536 % (oldtip, desc))
1529 except IOError:
1537 except IOError:
1530 msg = _('rolling back unknown transaction\n')
1538 msg = _('rolling back unknown transaction\n')
1531 desc = None
1539 desc = None
1532
1540
1533 if not force and self['.'] != self['tip'] and desc == 'commit':
1541 if not force and self['.'] != self['tip'] and desc == 'commit':
1534 raise error.Abort(
1542 raise error.Abort(
1535 _('rollback of last commit while not checked out '
1543 _('rollback of last commit while not checked out '
1536 'may lose data'), hint=_('use -f to force'))
1544 'may lose data'), hint=_('use -f to force'))
1537
1545
1538 ui.status(msg)
1546 ui.status(msg)
1539 if dryrun:
1547 if dryrun:
1540 return 0
1548 return 0
1541
1549
1542 parents = self.dirstate.parents()
1550 parents = self.dirstate.parents()
1543 self.destroying()
1551 self.destroying()
1544 vfsmap = {'plain': self.vfs, '': self.svfs}
1552 vfsmap = {'plain': self.vfs, '': self.svfs}
1545 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1553 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1546 checkambigfiles=_cachedfiles)
1554 checkambigfiles=_cachedfiles)
1547 if self.vfs.exists('undo.bookmarks'):
1555 if self.vfs.exists('undo.bookmarks'):
1548 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1556 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1549 if self.svfs.exists('undo.phaseroots'):
1557 if self.svfs.exists('undo.phaseroots'):
1550 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1558 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1551 self.invalidate()
1559 self.invalidate()
1552
1560
1553 parentgone = (parents[0] not in self.changelog.nodemap or
1561 parentgone = (parents[0] not in self.changelog.nodemap or
1554 parents[1] not in self.changelog.nodemap)
1562 parents[1] not in self.changelog.nodemap)
1555 if parentgone:
1563 if parentgone:
1556 # prevent dirstateguard from overwriting already restored one
1564 # prevent dirstateguard from overwriting already restored one
1557 dsguard.close()
1565 dsguard.close()
1558
1566
1559 narrowspec.restorebackup(self, 'undo.narrowspec')
1567 narrowspec.restorebackup(self, 'undo.narrowspec')
1560 self.dirstate.restorebackup(None, 'undo.dirstate')
1568 self.dirstate.restorebackup(None, 'undo.dirstate')
1561 try:
1569 try:
1562 branch = self.vfs.read('undo.branch')
1570 branch = self.vfs.read('undo.branch')
1563 self.dirstate.setbranch(encoding.tolocal(branch))
1571 self.dirstate.setbranch(encoding.tolocal(branch))
1564 except IOError:
1572 except IOError:
1565 ui.warn(_('named branch could not be reset: '
1573 ui.warn(_('named branch could not be reset: '
1566 'current branch is still \'%s\'\n')
1574 'current branch is still \'%s\'\n')
1567 % self.dirstate.branch())
1575 % self.dirstate.branch())
1568
1576
1569 parents = tuple([p.rev() for p in self[None].parents()])
1577 parents = tuple([p.rev() for p in self[None].parents()])
1570 if len(parents) > 1:
1578 if len(parents) > 1:
1571 ui.status(_('working directory now based on '
1579 ui.status(_('working directory now based on '
1572 'revisions %d and %d\n') % parents)
1580 'revisions %d and %d\n') % parents)
1573 else:
1581 else:
1574 ui.status(_('working directory now based on '
1582 ui.status(_('working directory now based on '
1575 'revision %d\n') % parents)
1583 'revision %d\n') % parents)
1576 mergemod.mergestate.clean(self, self['.'].node())
1584 mergemod.mergestate.clean(self, self['.'].node())
1577
1585
1578 # TODO: if we know which new heads may result from this rollback, pass
1586 # TODO: if we know which new heads may result from this rollback, pass
1579 # them to destroy(), which will prevent the branchhead cache from being
1587 # them to destroy(), which will prevent the branchhead cache from being
1580 # invalidated.
1588 # invalidated.
1581 self.destroyed()
1589 self.destroyed()
1582 return 0
1590 return 0
1583
1591
1584 def _buildcacheupdater(self, newtransaction):
1592 def _buildcacheupdater(self, newtransaction):
1585 """called during transaction to build the callback updating cache
1593 """called during transaction to build the callback updating cache
1586
1594
1587 Lives on the repository to help extension who might want to augment
1595 Lives on the repository to help extension who might want to augment
1588 this logic. For this purpose, the created transaction is passed to the
1596 this logic. For this purpose, the created transaction is passed to the
1589 method.
1597 method.
1590 """
1598 """
1591 # we must avoid cyclic reference between repo and transaction.
1599 # we must avoid cyclic reference between repo and transaction.
1592 reporef = weakref.ref(self)
1600 reporef = weakref.ref(self)
1593 def updater(tr):
1601 def updater(tr):
1594 repo = reporef()
1602 repo = reporef()
1595 repo.updatecaches(tr)
1603 repo.updatecaches(tr)
1596 return updater
1604 return updater
1597
1605
1598 @unfilteredmethod
1606 @unfilteredmethod
1599 def updatecaches(self, tr=None, full=False):
1607 def updatecaches(self, tr=None, full=False):
1600 """warm appropriate caches
1608 """warm appropriate caches
1601
1609
1602 If this function is called after a transaction closed. The transaction
1610 If this function is called after a transaction closed. The transaction
1603 will be available in the 'tr' argument. This can be used to selectively
1611 will be available in the 'tr' argument. This can be used to selectively
1604 update caches relevant to the changes in that transaction.
1612 update caches relevant to the changes in that transaction.
1605
1613
1606 If 'full' is set, make sure all caches the function knows about have
1614 If 'full' is set, make sure all caches the function knows about have
1607 up-to-date data. Even the ones usually loaded more lazily.
1615 up-to-date data. Even the ones usually loaded more lazily.
1608 """
1616 """
1609 if tr is not None and tr.hookargs.get('source') == 'strip':
1617 if tr is not None and tr.hookargs.get('source') == 'strip':
1610 # During strip, many caches are invalid but
1618 # During strip, many caches are invalid but
1611 # later call to `destroyed` will refresh them.
1619 # later call to `destroyed` will refresh them.
1612 return
1620 return
1613
1621
1614 if tr is None or tr.changes['origrepolen'] < len(self):
1622 if tr is None or tr.changes['origrepolen'] < len(self):
1615 # updating the unfiltered branchmap should refresh all the others,
1623 # updating the unfiltered branchmap should refresh all the others,
1616 self.ui.debug('updating the branch cache\n')
1624 self.ui.debug('updating the branch cache\n')
1617 branchmap.updatecache(self.filtered('served'))
1625 branchmap.updatecache(self.filtered('served'))
1618
1626
1619 if full:
1627 if full:
1620 rbc = self.revbranchcache()
1628 rbc = self.revbranchcache()
1621 for r in self.changelog:
1629 for r in self.changelog:
1622 rbc.branchinfo(r)
1630 rbc.branchinfo(r)
1623 rbc.write()
1631 rbc.write()
1624
1632
1625 # ensure the working copy parents are in the manifestfulltextcache
1633 # ensure the working copy parents are in the manifestfulltextcache
1626 for ctx in self['.'].parents():
1634 for ctx in self['.'].parents():
1627 ctx.manifest() # accessing the manifest is enough
1635 ctx.manifest() # accessing the manifest is enough
1628
1636
1629 def invalidatecaches(self):
1637 def invalidatecaches(self):
1630
1638
1631 if '_tagscache' in vars(self):
1639 if '_tagscache' in vars(self):
1632 # can't use delattr on proxy
1640 # can't use delattr on proxy
1633 del self.__dict__['_tagscache']
1641 del self.__dict__['_tagscache']
1634
1642
1635 self.unfiltered()._branchcaches.clear()
1643 self.unfiltered()._branchcaches.clear()
1636 self.invalidatevolatilesets()
1644 self.invalidatevolatilesets()
1637 self._sparsesignaturecache.clear()
1645 self._sparsesignaturecache.clear()
1638
1646
1639 def invalidatevolatilesets(self):
1647 def invalidatevolatilesets(self):
1640 self.filteredrevcache.clear()
1648 self.filteredrevcache.clear()
1641 obsolete.clearobscaches(self)
1649 obsolete.clearobscaches(self)
1642
1650
1643 def invalidatedirstate(self):
1651 def invalidatedirstate(self):
1644 '''Invalidates the dirstate, causing the next call to dirstate
1652 '''Invalidates the dirstate, causing the next call to dirstate
1645 to check if it was modified since the last time it was read,
1653 to check if it was modified since the last time it was read,
1646 rereading it if it has.
1654 rereading it if it has.
1647
1655
1648 This is different to dirstate.invalidate() that it doesn't always
1656 This is different to dirstate.invalidate() that it doesn't always
1649 rereads the dirstate. Use dirstate.invalidate() if you want to
1657 rereads the dirstate. Use dirstate.invalidate() if you want to
1650 explicitly read the dirstate again (i.e. restoring it to a previous
1658 explicitly read the dirstate again (i.e. restoring it to a previous
1651 known good state).'''
1659 known good state).'''
1652 if hasunfilteredcache(self, 'dirstate'):
1660 if hasunfilteredcache(self, 'dirstate'):
1653 for k in self.dirstate._filecache:
1661 for k in self.dirstate._filecache:
1654 try:
1662 try:
1655 delattr(self.dirstate, k)
1663 delattr(self.dirstate, k)
1656 except AttributeError:
1664 except AttributeError:
1657 pass
1665 pass
1658 delattr(self.unfiltered(), 'dirstate')
1666 delattr(self.unfiltered(), 'dirstate')
1659
1667
1660 def invalidate(self, clearfilecache=False):
1668 def invalidate(self, clearfilecache=False):
1661 '''Invalidates both store and non-store parts other than dirstate
1669 '''Invalidates both store and non-store parts other than dirstate
1662
1670
1663 If a transaction is running, invalidation of store is omitted,
1671 If a transaction is running, invalidation of store is omitted,
1664 because discarding in-memory changes might cause inconsistency
1672 because discarding in-memory changes might cause inconsistency
1665 (e.g. incomplete fncache causes unintentional failure, but
1673 (e.g. incomplete fncache causes unintentional failure, but
1666 redundant one doesn't).
1674 redundant one doesn't).
1667 '''
1675 '''
1668 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1676 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1669 for k in list(self._filecache.keys()):
1677 for k in list(self._filecache.keys()):
1670 # dirstate is invalidated separately in invalidatedirstate()
1678 # dirstate is invalidated separately in invalidatedirstate()
1671 if k == 'dirstate':
1679 if k == 'dirstate':
1672 continue
1680 continue
1673 if (k == 'changelog' and
1681 if (k == 'changelog' and
1674 self.currenttransaction() and
1682 self.currenttransaction() and
1675 self.changelog._delayed):
1683 self.changelog._delayed):
1676 # The changelog object may store unwritten revisions. We don't
1684 # The changelog object may store unwritten revisions. We don't
1677 # want to lose them.
1685 # want to lose them.
1678 # TODO: Solve the problem instead of working around it.
1686 # TODO: Solve the problem instead of working around it.
1679 continue
1687 continue
1680
1688
1681 if clearfilecache:
1689 if clearfilecache:
1682 del self._filecache[k]
1690 del self._filecache[k]
1683 try:
1691 try:
1684 delattr(unfiltered, k)
1692 delattr(unfiltered, k)
1685 except AttributeError:
1693 except AttributeError:
1686 pass
1694 pass
1687 self.invalidatecaches()
1695 self.invalidatecaches()
1688 if not self.currenttransaction():
1696 if not self.currenttransaction():
1689 # TODO: Changing contents of store outside transaction
1697 # TODO: Changing contents of store outside transaction
1690 # causes inconsistency. We should make in-memory store
1698 # causes inconsistency. We should make in-memory store
1691 # changes detectable, and abort if changed.
1699 # changes detectable, and abort if changed.
1692 self.store.invalidatecaches()
1700 self.store.invalidatecaches()
1693
1701
1694 def invalidateall(self):
1702 def invalidateall(self):
1695 '''Fully invalidates both store and non-store parts, causing the
1703 '''Fully invalidates both store and non-store parts, causing the
1696 subsequent operation to reread any outside changes.'''
1704 subsequent operation to reread any outside changes.'''
1697 # extension should hook this to invalidate its caches
1705 # extension should hook this to invalidate its caches
1698 self.invalidate()
1706 self.invalidate()
1699 self.invalidatedirstate()
1707 self.invalidatedirstate()
1700
1708
1701 @unfilteredmethod
1709 @unfilteredmethod
1702 def _refreshfilecachestats(self, tr):
1710 def _refreshfilecachestats(self, tr):
1703 """Reload stats of cached files so that they are flagged as valid"""
1711 """Reload stats of cached files so that they are flagged as valid"""
1704 for k, ce in self._filecache.items():
1712 for k, ce in self._filecache.items():
1705 k = pycompat.sysstr(k)
1713 k = pycompat.sysstr(k)
1706 if k == r'dirstate' or k not in self.__dict__:
1714 if k == r'dirstate' or k not in self.__dict__:
1707 continue
1715 continue
1708 ce.refresh()
1716 ce.refresh()
1709
1717
1710 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1718 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1711 inheritchecker=None, parentenvvar=None):
1719 inheritchecker=None, parentenvvar=None):
1712 parentlock = None
1720 parentlock = None
1713 # the contents of parentenvvar are used by the underlying lock to
1721 # the contents of parentenvvar are used by the underlying lock to
1714 # determine whether it can be inherited
1722 # determine whether it can be inherited
1715 if parentenvvar is not None:
1723 if parentenvvar is not None:
1716 parentlock = encoding.environ.get(parentenvvar)
1724 parentlock = encoding.environ.get(parentenvvar)
1717
1725
1718 timeout = 0
1726 timeout = 0
1719 warntimeout = 0
1727 warntimeout = 0
1720 if wait:
1728 if wait:
1721 timeout = self.ui.configint("ui", "timeout")
1729 timeout = self.ui.configint("ui", "timeout")
1722 warntimeout = self.ui.configint("ui", "timeout.warn")
1730 warntimeout = self.ui.configint("ui", "timeout.warn")
1723 # internal config: ui.signal-safe-lock
1731 # internal config: ui.signal-safe-lock
1724 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1732 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1725
1733
1726 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1734 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1727 releasefn=releasefn,
1735 releasefn=releasefn,
1728 acquirefn=acquirefn, desc=desc,
1736 acquirefn=acquirefn, desc=desc,
1729 inheritchecker=inheritchecker,
1737 inheritchecker=inheritchecker,
1730 parentlock=parentlock,
1738 parentlock=parentlock,
1731 signalsafe=signalsafe)
1739 signalsafe=signalsafe)
1732 return l
1740 return l
1733
1741
1734 def _afterlock(self, callback):
1742 def _afterlock(self, callback):
1735 """add a callback to be run when the repository is fully unlocked
1743 """add a callback to be run when the repository is fully unlocked
1736
1744
1737 The callback will be executed when the outermost lock is released
1745 The callback will be executed when the outermost lock is released
1738 (with wlock being higher level than 'lock')."""
1746 (with wlock being higher level than 'lock')."""
1739 for ref in (self._wlockref, self._lockref):
1747 for ref in (self._wlockref, self._lockref):
1740 l = ref and ref()
1748 l = ref and ref()
1741 if l and l.held:
1749 if l and l.held:
1742 l.postrelease.append(callback)
1750 l.postrelease.append(callback)
1743 break
1751 break
1744 else: # no lock have been found.
1752 else: # no lock have been found.
1745 callback()
1753 callback()
1746
1754
1747 def lock(self, wait=True):
1755 def lock(self, wait=True):
1748 '''Lock the repository store (.hg/store) and return a weak reference
1756 '''Lock the repository store (.hg/store) and return a weak reference
1749 to the lock. Use this before modifying the store (e.g. committing or
1757 to the lock. Use this before modifying the store (e.g. committing or
1750 stripping). If you are opening a transaction, get a lock as well.)
1758 stripping). If you are opening a transaction, get a lock as well.)
1751
1759
1752 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1760 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1753 'wlock' first to avoid a dead-lock hazard.'''
1761 'wlock' first to avoid a dead-lock hazard.'''
1754 l = self._currentlock(self._lockref)
1762 l = self._currentlock(self._lockref)
1755 if l is not None:
1763 if l is not None:
1756 l.lock()
1764 l.lock()
1757 return l
1765 return l
1758
1766
1759 l = self._lock(self.svfs, "lock", wait, None,
1767 l = self._lock(self.svfs, "lock", wait, None,
1760 self.invalidate, _('repository %s') % self.origroot)
1768 self.invalidate, _('repository %s') % self.origroot)
1761 self._lockref = weakref.ref(l)
1769 self._lockref = weakref.ref(l)
1762 return l
1770 return l
1763
1771
1764 def _wlockchecktransaction(self):
1772 def _wlockchecktransaction(self):
1765 if self.currenttransaction() is not None:
1773 if self.currenttransaction() is not None:
1766 raise error.LockInheritanceContractViolation(
1774 raise error.LockInheritanceContractViolation(
1767 'wlock cannot be inherited in the middle of a transaction')
1775 'wlock cannot be inherited in the middle of a transaction')
1768
1776
1769 def wlock(self, wait=True):
1777 def wlock(self, wait=True):
1770 '''Lock the non-store parts of the repository (everything under
1778 '''Lock the non-store parts of the repository (everything under
1771 .hg except .hg/store) and return a weak reference to the lock.
1779 .hg except .hg/store) and return a weak reference to the lock.
1772
1780
1773 Use this before modifying files in .hg.
1781 Use this before modifying files in .hg.
1774
1782
1775 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1783 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1776 'wlock' first to avoid a dead-lock hazard.'''
1784 'wlock' first to avoid a dead-lock hazard.'''
1777 l = self._wlockref and self._wlockref()
1785 l = self._wlockref and self._wlockref()
1778 if l is not None and l.held:
1786 if l is not None and l.held:
1779 l.lock()
1787 l.lock()
1780 return l
1788 return l
1781
1789
1782 # We do not need to check for non-waiting lock acquisition. Such
1790 # We do not need to check for non-waiting lock acquisition. Such
1783 # acquisition would not cause dead-lock as they would just fail.
1791 # acquisition would not cause dead-lock as they would just fail.
1784 if wait and (self.ui.configbool('devel', 'all-warnings')
1792 if wait and (self.ui.configbool('devel', 'all-warnings')
1785 or self.ui.configbool('devel', 'check-locks')):
1793 or self.ui.configbool('devel', 'check-locks')):
1786 if self._currentlock(self._lockref) is not None:
1794 if self._currentlock(self._lockref) is not None:
1787 self.ui.develwarn('"wlock" acquired after "lock"')
1795 self.ui.develwarn('"wlock" acquired after "lock"')
1788
1796
1789 def unlock():
1797 def unlock():
1790 if self.dirstate.pendingparentchange():
1798 if self.dirstate.pendingparentchange():
1791 self.dirstate.invalidate()
1799 self.dirstate.invalidate()
1792 else:
1800 else:
1793 self.dirstate.write(None)
1801 self.dirstate.write(None)
1794
1802
1795 self._filecache['dirstate'].refresh()
1803 self._filecache['dirstate'].refresh()
1796
1804
1797 l = self._lock(self.vfs, "wlock", wait, unlock,
1805 l = self._lock(self.vfs, "wlock", wait, unlock,
1798 self.invalidatedirstate, _('working directory of %s') %
1806 self.invalidatedirstate, _('working directory of %s') %
1799 self.origroot,
1807 self.origroot,
1800 inheritchecker=self._wlockchecktransaction,
1808 inheritchecker=self._wlockchecktransaction,
1801 parentenvvar='HG_WLOCK_LOCKER')
1809 parentenvvar='HG_WLOCK_LOCKER')
1802 self._wlockref = weakref.ref(l)
1810 self._wlockref = weakref.ref(l)
1803 return l
1811 return l
1804
1812
1805 def _currentlock(self, lockref):
1813 def _currentlock(self, lockref):
1806 """Returns the lock if it's held, or None if it's not."""
1814 """Returns the lock if it's held, or None if it's not."""
1807 if lockref is None:
1815 if lockref is None:
1808 return None
1816 return None
1809 l = lockref()
1817 l = lockref()
1810 if l is None or not l.held:
1818 if l is None or not l.held:
1811 return None
1819 return None
1812 return l
1820 return l
1813
1821
1814 def currentwlock(self):
1822 def currentwlock(self):
1815 """Returns the wlock if it's held, or None if it's not."""
1823 """Returns the wlock if it's held, or None if it's not."""
1816 return self._currentlock(self._wlockref)
1824 return self._currentlock(self._wlockref)
1817
1825
1818 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1826 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1819 """
1827 """
1820 commit an individual file as part of a larger transaction
1828 commit an individual file as part of a larger transaction
1821 """
1829 """
1822
1830
1823 fname = fctx.path()
1831 fname = fctx.path()
1824 fparent1 = manifest1.get(fname, nullid)
1832 fparent1 = manifest1.get(fname, nullid)
1825 fparent2 = manifest2.get(fname, nullid)
1833 fparent2 = manifest2.get(fname, nullid)
1826 if isinstance(fctx, context.filectx):
1834 if isinstance(fctx, context.filectx):
1827 node = fctx.filenode()
1835 node = fctx.filenode()
1828 if node in [fparent1, fparent2]:
1836 if node in [fparent1, fparent2]:
1829 self.ui.debug('reusing %s filelog entry\n' % fname)
1837 self.ui.debug('reusing %s filelog entry\n' % fname)
1830 if manifest1.flags(fname) != fctx.flags():
1838 if manifest1.flags(fname) != fctx.flags():
1831 changelist.append(fname)
1839 changelist.append(fname)
1832 return node
1840 return node
1833
1841
1834 flog = self.file(fname)
1842 flog = self.file(fname)
1835 meta = {}
1843 meta = {}
1836 copy = fctx.renamed()
1844 copy = fctx.renamed()
1837 if copy and copy[0] != fname:
1845 if copy and copy[0] != fname:
1838 # Mark the new revision of this file as a copy of another
1846 # Mark the new revision of this file as a copy of another
1839 # file. This copy data will effectively act as a parent
1847 # file. This copy data will effectively act as a parent
1840 # of this new revision. If this is a merge, the first
1848 # of this new revision. If this is a merge, the first
1841 # parent will be the nullid (meaning "look up the copy data")
1849 # parent will be the nullid (meaning "look up the copy data")
1842 # and the second one will be the other parent. For example:
1850 # and the second one will be the other parent. For example:
1843 #
1851 #
1844 # 0 --- 1 --- 3 rev1 changes file foo
1852 # 0 --- 1 --- 3 rev1 changes file foo
1845 # \ / rev2 renames foo to bar and changes it
1853 # \ / rev2 renames foo to bar and changes it
1846 # \- 2 -/ rev3 should have bar with all changes and
1854 # \- 2 -/ rev3 should have bar with all changes and
1847 # should record that bar descends from
1855 # should record that bar descends from
1848 # bar in rev2 and foo in rev1
1856 # bar in rev2 and foo in rev1
1849 #
1857 #
1850 # this allows this merge to succeed:
1858 # this allows this merge to succeed:
1851 #
1859 #
1852 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1860 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1853 # \ / merging rev3 and rev4 should use bar@rev2
1861 # \ / merging rev3 and rev4 should use bar@rev2
1854 # \- 2 --- 4 as the merge base
1862 # \- 2 --- 4 as the merge base
1855 #
1863 #
1856
1864
1857 cfname = copy[0]
1865 cfname = copy[0]
1858 crev = manifest1.get(cfname)
1866 crev = manifest1.get(cfname)
1859 newfparent = fparent2
1867 newfparent = fparent2
1860
1868
1861 if manifest2: # branch merge
1869 if manifest2: # branch merge
1862 if fparent2 == nullid or crev is None: # copied on remote side
1870 if fparent2 == nullid or crev is None: # copied on remote side
1863 if cfname in manifest2:
1871 if cfname in manifest2:
1864 crev = manifest2[cfname]
1872 crev = manifest2[cfname]
1865 newfparent = fparent1
1873 newfparent = fparent1
1866
1874
1867 # Here, we used to search backwards through history to try to find
1875 # Here, we used to search backwards through history to try to find
1868 # where the file copy came from if the source of a copy was not in
1876 # where the file copy came from if the source of a copy was not in
1869 # the parent directory. However, this doesn't actually make sense to
1877 # the parent directory. However, this doesn't actually make sense to
1870 # do (what does a copy from something not in your working copy even
1878 # do (what does a copy from something not in your working copy even
1871 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1879 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1872 # the user that copy information was dropped, so if they didn't
1880 # the user that copy information was dropped, so if they didn't
1873 # expect this outcome it can be fixed, but this is the correct
1881 # expect this outcome it can be fixed, but this is the correct
1874 # behavior in this circumstance.
1882 # behavior in this circumstance.
1875
1883
1876 if crev:
1884 if crev:
1877 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1885 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1878 meta["copy"] = cfname
1886 meta["copy"] = cfname
1879 meta["copyrev"] = hex(crev)
1887 meta["copyrev"] = hex(crev)
1880 fparent1, fparent2 = nullid, newfparent
1888 fparent1, fparent2 = nullid, newfparent
1881 else:
1889 else:
1882 self.ui.warn(_("warning: can't find ancestor for '%s' "
1890 self.ui.warn(_("warning: can't find ancestor for '%s' "
1883 "copied from '%s'!\n") % (fname, cfname))
1891 "copied from '%s'!\n") % (fname, cfname))
1884
1892
1885 elif fparent1 == nullid:
1893 elif fparent1 == nullid:
1886 fparent1, fparent2 = fparent2, nullid
1894 fparent1, fparent2 = fparent2, nullid
1887 elif fparent2 != nullid:
1895 elif fparent2 != nullid:
1888 # is one parent an ancestor of the other?
1896 # is one parent an ancestor of the other?
1889 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1897 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1890 if fparent1 in fparentancestors:
1898 if fparent1 in fparentancestors:
1891 fparent1, fparent2 = fparent2, nullid
1899 fparent1, fparent2 = fparent2, nullid
1892 elif fparent2 in fparentancestors:
1900 elif fparent2 in fparentancestors:
1893 fparent2 = nullid
1901 fparent2 = nullid
1894
1902
1895 # is the file changed?
1903 # is the file changed?
1896 text = fctx.data()
1904 text = fctx.data()
1897 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1905 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1898 changelist.append(fname)
1906 changelist.append(fname)
1899 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1907 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1900 # are just the flags changed during merge?
1908 # are just the flags changed during merge?
1901 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1909 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1902 changelist.append(fname)
1910 changelist.append(fname)
1903
1911
1904 return fparent1
1912 return fparent1
1905
1913
1906 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1914 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1907 """check for commit arguments that aren't committable"""
1915 """check for commit arguments that aren't committable"""
1908 if match.isexact() or match.prefix():
1916 if match.isexact() or match.prefix():
1909 matched = set(status.modified + status.added + status.removed)
1917 matched = set(status.modified + status.added + status.removed)
1910
1918
1911 for f in match.files():
1919 for f in match.files():
1912 f = self.dirstate.normalize(f)
1920 f = self.dirstate.normalize(f)
1913 if f == '.' or f in matched or f in wctx.substate:
1921 if f == '.' or f in matched or f in wctx.substate:
1914 continue
1922 continue
1915 if f in status.deleted:
1923 if f in status.deleted:
1916 fail(f, _('file not found!'))
1924 fail(f, _('file not found!'))
1917 if f in vdirs: # visited directory
1925 if f in vdirs: # visited directory
1918 d = f + '/'
1926 d = f + '/'
1919 for mf in matched:
1927 for mf in matched:
1920 if mf.startswith(d):
1928 if mf.startswith(d):
1921 break
1929 break
1922 else:
1930 else:
1923 fail(f, _("no match under directory!"))
1931 fail(f, _("no match under directory!"))
1924 elif f not in self.dirstate:
1932 elif f not in self.dirstate:
1925 fail(f, _("file not tracked!"))
1933 fail(f, _("file not tracked!"))
1926
1934
1927 @unfilteredmethod
1935 @unfilteredmethod
1928 def commit(self, text="", user=None, date=None, match=None, force=False,
1936 def commit(self, text="", user=None, date=None, match=None, force=False,
1929 editor=False, extra=None):
1937 editor=False, extra=None):
1930 """Add a new revision to current repository.
1938 """Add a new revision to current repository.
1931
1939
1932 Revision information is gathered from the working directory,
1940 Revision information is gathered from the working directory,
1933 match can be used to filter the committed files. If editor is
1941 match can be used to filter the committed files. If editor is
1934 supplied, it is called to get a commit message.
1942 supplied, it is called to get a commit message.
1935 """
1943 """
1936 if extra is None:
1944 if extra is None:
1937 extra = {}
1945 extra = {}
1938
1946
1939 def fail(f, msg):
1947 def fail(f, msg):
1940 raise error.Abort('%s: %s' % (f, msg))
1948 raise error.Abort('%s: %s' % (f, msg))
1941
1949
1942 if not match:
1950 if not match:
1943 match = matchmod.always(self.root, '')
1951 match = matchmod.always(self.root, '')
1944
1952
1945 if not force:
1953 if not force:
1946 vdirs = []
1954 vdirs = []
1947 match.explicitdir = vdirs.append
1955 match.explicitdir = vdirs.append
1948 match.bad = fail
1956 match.bad = fail
1949
1957
1950 wlock = lock = tr = None
1958 wlock = lock = tr = None
1951 try:
1959 try:
1952 wlock = self.wlock()
1960 wlock = self.wlock()
1953 lock = self.lock() # for recent changelog (see issue4368)
1961 lock = self.lock() # for recent changelog (see issue4368)
1954
1962
1955 wctx = self[None]
1963 wctx = self[None]
1956 merge = len(wctx.parents()) > 1
1964 merge = len(wctx.parents()) > 1
1957
1965
1958 if not force and merge and not match.always():
1966 if not force and merge and not match.always():
1959 raise error.Abort(_('cannot partially commit a merge '
1967 raise error.Abort(_('cannot partially commit a merge '
1960 '(do not specify files or patterns)'))
1968 '(do not specify files or patterns)'))
1961
1969
1962 status = self.status(match=match, clean=force)
1970 status = self.status(match=match, clean=force)
1963 if force:
1971 if force:
1964 status.modified.extend(status.clean) # mq may commit clean files
1972 status.modified.extend(status.clean) # mq may commit clean files
1965
1973
1966 # check subrepos
1974 # check subrepos
1967 subs, commitsubs, newstate = subrepoutil.precommit(
1975 subs, commitsubs, newstate = subrepoutil.precommit(
1968 self.ui, wctx, status, match, force=force)
1976 self.ui, wctx, status, match, force=force)
1969
1977
1970 # make sure all explicit patterns are matched
1978 # make sure all explicit patterns are matched
1971 if not force:
1979 if not force:
1972 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1980 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1973
1981
1974 cctx = context.workingcommitctx(self, status,
1982 cctx = context.workingcommitctx(self, status,
1975 text, user, date, extra)
1983 text, user, date, extra)
1976
1984
1977 # internal config: ui.allowemptycommit
1985 # internal config: ui.allowemptycommit
1978 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1986 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1979 or extra.get('close') or merge or cctx.files()
1987 or extra.get('close') or merge or cctx.files()
1980 or self.ui.configbool('ui', 'allowemptycommit'))
1988 or self.ui.configbool('ui', 'allowemptycommit'))
1981 if not allowemptycommit:
1989 if not allowemptycommit:
1982 return None
1990 return None
1983
1991
1984 if merge and cctx.deleted():
1992 if merge and cctx.deleted():
1985 raise error.Abort(_("cannot commit merge with missing files"))
1993 raise error.Abort(_("cannot commit merge with missing files"))
1986
1994
1987 ms = mergemod.mergestate.read(self)
1995 ms = mergemod.mergestate.read(self)
1988 mergeutil.checkunresolved(ms)
1996 mergeutil.checkunresolved(ms)
1989
1997
1990 if editor:
1998 if editor:
1991 cctx._text = editor(self, cctx, subs)
1999 cctx._text = editor(self, cctx, subs)
1992 edited = (text != cctx._text)
2000 edited = (text != cctx._text)
1993
2001
1994 # Save commit message in case this transaction gets rolled back
2002 # Save commit message in case this transaction gets rolled back
1995 # (e.g. by a pretxncommit hook). Leave the content alone on
2003 # (e.g. by a pretxncommit hook). Leave the content alone on
1996 # the assumption that the user will use the same editor again.
2004 # the assumption that the user will use the same editor again.
1997 msgfn = self.savecommitmessage(cctx._text)
2005 msgfn = self.savecommitmessage(cctx._text)
1998
2006
1999 # commit subs and write new state
2007 # commit subs and write new state
2000 if subs:
2008 if subs:
2001 for s in sorted(commitsubs):
2009 for s in sorted(commitsubs):
2002 sub = wctx.sub(s)
2010 sub = wctx.sub(s)
2003 self.ui.status(_('committing subrepository %s\n') %
2011 self.ui.status(_('committing subrepository %s\n') %
2004 subrepoutil.subrelpath(sub))
2012 subrepoutil.subrelpath(sub))
2005 sr = sub.commit(cctx._text, user, date)
2013 sr = sub.commit(cctx._text, user, date)
2006 newstate[s] = (newstate[s][0], sr)
2014 newstate[s] = (newstate[s][0], sr)
2007 subrepoutil.writestate(self, newstate)
2015 subrepoutil.writestate(self, newstate)
2008
2016
2009 p1, p2 = self.dirstate.parents()
2017 p1, p2 = self.dirstate.parents()
2010 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2018 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2011 try:
2019 try:
2012 self.hook("precommit", throw=True, parent1=hookp1,
2020 self.hook("precommit", throw=True, parent1=hookp1,
2013 parent2=hookp2)
2021 parent2=hookp2)
2014 tr = self.transaction('commit')
2022 tr = self.transaction('commit')
2015 ret = self.commitctx(cctx, True)
2023 ret = self.commitctx(cctx, True)
2016 except: # re-raises
2024 except: # re-raises
2017 if edited:
2025 if edited:
2018 self.ui.write(
2026 self.ui.write(
2019 _('note: commit message saved in %s\n') % msgfn)
2027 _('note: commit message saved in %s\n') % msgfn)
2020 raise
2028 raise
2021 # update bookmarks, dirstate and mergestate
2029 # update bookmarks, dirstate and mergestate
2022 bookmarks.update(self, [p1, p2], ret)
2030 bookmarks.update(self, [p1, p2], ret)
2023 cctx.markcommitted(ret)
2031 cctx.markcommitted(ret)
2024 ms.reset()
2032 ms.reset()
2025 tr.close()
2033 tr.close()
2026
2034
2027 finally:
2035 finally:
2028 lockmod.release(tr, lock, wlock)
2036 lockmod.release(tr, lock, wlock)
2029
2037
2030 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2038 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2031 # hack for command that use a temporary commit (eg: histedit)
2039 # hack for command that use a temporary commit (eg: histedit)
2032 # temporary commit got stripped before hook release
2040 # temporary commit got stripped before hook release
2033 if self.changelog.hasnode(ret):
2041 if self.changelog.hasnode(ret):
2034 self.hook("commit", node=node, parent1=parent1,
2042 self.hook("commit", node=node, parent1=parent1,
2035 parent2=parent2)
2043 parent2=parent2)
2036 self._afterlock(commithook)
2044 self._afterlock(commithook)
2037 return ret
2045 return ret
2038
2046
2039 @unfilteredmethod
2047 @unfilteredmethod
2040 def commitctx(self, ctx, error=False):
2048 def commitctx(self, ctx, error=False):
2041 """Add a new revision to current repository.
2049 """Add a new revision to current repository.
2042 Revision information is passed via the context argument.
2050 Revision information is passed via the context argument.
2043
2051
2044 ctx.files() should list all files involved in this commit, i.e.
2052 ctx.files() should list all files involved in this commit, i.e.
2045 modified/added/removed files. On merge, it may be wider than the
2053 modified/added/removed files. On merge, it may be wider than the
2046 ctx.files() to be committed, since any file nodes derived directly
2054 ctx.files() to be committed, since any file nodes derived directly
2047 from p1 or p2 are excluded from the committed ctx.files().
2055 from p1 or p2 are excluded from the committed ctx.files().
2048 """
2056 """
2049
2057
2050 tr = None
2058 tr = None
2051 p1, p2 = ctx.p1(), ctx.p2()
2059 p1, p2 = ctx.p1(), ctx.p2()
2052 user = ctx.user()
2060 user = ctx.user()
2053
2061
2054 lock = self.lock()
2062 lock = self.lock()
2055 try:
2063 try:
2056 tr = self.transaction("commit")
2064 tr = self.transaction("commit")
2057 trp = weakref.proxy(tr)
2065 trp = weakref.proxy(tr)
2058
2066
2059 if ctx.manifestnode():
2067 if ctx.manifestnode():
2060 # reuse an existing manifest revision
2068 # reuse an existing manifest revision
2061 self.ui.debug('reusing known manifest\n')
2069 self.ui.debug('reusing known manifest\n')
2062 mn = ctx.manifestnode()
2070 mn = ctx.manifestnode()
2063 files = ctx.files()
2071 files = ctx.files()
2064 elif ctx.files():
2072 elif ctx.files():
2065 m1ctx = p1.manifestctx()
2073 m1ctx = p1.manifestctx()
2066 m2ctx = p2.manifestctx()
2074 m2ctx = p2.manifestctx()
2067 mctx = m1ctx.copy()
2075 mctx = m1ctx.copy()
2068
2076
2069 m = mctx.read()
2077 m = mctx.read()
2070 m1 = m1ctx.read()
2078 m1 = m1ctx.read()
2071 m2 = m2ctx.read()
2079 m2 = m2ctx.read()
2072
2080
2073 # check in files
2081 # check in files
2074 added = []
2082 added = []
2075 changed = []
2083 changed = []
2076 removed = list(ctx.removed())
2084 removed = list(ctx.removed())
2077 linkrev = len(self)
2085 linkrev = len(self)
2078 self.ui.note(_("committing files:\n"))
2086 self.ui.note(_("committing files:\n"))
2079 for f in sorted(ctx.modified() + ctx.added()):
2087 for f in sorted(ctx.modified() + ctx.added()):
2080 self.ui.note(f + "\n")
2088 self.ui.note(f + "\n")
2081 try:
2089 try:
2082 fctx = ctx[f]
2090 fctx = ctx[f]
2083 if fctx is None:
2091 if fctx is None:
2084 removed.append(f)
2092 removed.append(f)
2085 else:
2093 else:
2086 added.append(f)
2094 added.append(f)
2087 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2095 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2088 trp, changed)
2096 trp, changed)
2089 m.setflag(f, fctx.flags())
2097 m.setflag(f, fctx.flags())
2090 except OSError as inst:
2098 except OSError as inst:
2091 self.ui.warn(_("trouble committing %s!\n") % f)
2099 self.ui.warn(_("trouble committing %s!\n") % f)
2092 raise
2100 raise
2093 except IOError as inst:
2101 except IOError as inst:
2094 errcode = getattr(inst, 'errno', errno.ENOENT)
2102 errcode = getattr(inst, 'errno', errno.ENOENT)
2095 if error or errcode and errcode != errno.ENOENT:
2103 if error or errcode and errcode != errno.ENOENT:
2096 self.ui.warn(_("trouble committing %s!\n") % f)
2104 self.ui.warn(_("trouble committing %s!\n") % f)
2097 raise
2105 raise
2098
2106
2099 # update manifest
2107 # update manifest
2100 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2108 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2101 drop = [f for f in removed if f in m]
2109 drop = [f for f in removed if f in m]
2102 for f in drop:
2110 for f in drop:
2103 del m[f]
2111 del m[f]
2104 files = changed + removed
2112 files = changed + removed
2105 md = None
2113 md = None
2106 if not files:
2114 if not files:
2107 # if no "files" actually changed in terms of the changelog,
2115 # if no "files" actually changed in terms of the changelog,
2108 # try hard to detect unmodified manifest entry so that the
2116 # try hard to detect unmodified manifest entry so that the
2109 # exact same commit can be reproduced later on convert.
2117 # exact same commit can be reproduced later on convert.
2110 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2118 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2111 if not files and md:
2119 if not files and md:
2112 self.ui.debug('not reusing manifest (no file change in '
2120 self.ui.debug('not reusing manifest (no file change in '
2113 'changelog, but manifest differs)\n')
2121 'changelog, but manifest differs)\n')
2114 if files or md:
2122 if files or md:
2115 self.ui.note(_("committing manifest\n"))
2123 self.ui.note(_("committing manifest\n"))
2116 mn = mctx.write(trp, linkrev,
2124 mn = mctx.write(trp, linkrev,
2117 p1.manifestnode(), p2.manifestnode(),
2125 p1.manifestnode(), p2.manifestnode(),
2118 added, drop)
2126 added, drop)
2119 else:
2127 else:
2120 self.ui.debug('reusing manifest form p1 (listed files '
2128 self.ui.debug('reusing manifest form p1 (listed files '
2121 'actually unchanged)\n')
2129 'actually unchanged)\n')
2122 mn = p1.manifestnode()
2130 mn = p1.manifestnode()
2123 else:
2131 else:
2124 self.ui.debug('reusing manifest from p1 (no file change)\n')
2132 self.ui.debug('reusing manifest from p1 (no file change)\n')
2125 mn = p1.manifestnode()
2133 mn = p1.manifestnode()
2126 files = []
2134 files = []
2127
2135
2128 # update changelog
2136 # update changelog
2129 self.ui.note(_("committing changelog\n"))
2137 self.ui.note(_("committing changelog\n"))
2130 self.changelog.delayupdate(tr)
2138 self.changelog.delayupdate(tr)
2131 n = self.changelog.add(mn, files, ctx.description(),
2139 n = self.changelog.add(mn, files, ctx.description(),
2132 trp, p1.node(), p2.node(),
2140 trp, p1.node(), p2.node(),
2133 user, ctx.date(), ctx.extra().copy())
2141 user, ctx.date(), ctx.extra().copy())
2134 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2142 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2135 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2143 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2136 parent2=xp2)
2144 parent2=xp2)
2137 # set the new commit is proper phase
2145 # set the new commit is proper phase
2138 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2146 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2139 if targetphase:
2147 if targetphase:
2140 # retract boundary do not alter parent changeset.
2148 # retract boundary do not alter parent changeset.
2141 # if a parent have higher the resulting phase will
2149 # if a parent have higher the resulting phase will
2142 # be compliant anyway
2150 # be compliant anyway
2143 #
2151 #
2144 # if minimal phase was 0 we don't need to retract anything
2152 # if minimal phase was 0 we don't need to retract anything
2145 phases.registernew(self, tr, targetphase, [n])
2153 phases.registernew(self, tr, targetphase, [n])
2146 tr.close()
2154 tr.close()
2147 return n
2155 return n
2148 finally:
2156 finally:
2149 if tr:
2157 if tr:
2150 tr.release()
2158 tr.release()
2151 lock.release()
2159 lock.release()
2152
2160
2153 @unfilteredmethod
2161 @unfilteredmethod
2154 def destroying(self):
2162 def destroying(self):
2155 '''Inform the repository that nodes are about to be destroyed.
2163 '''Inform the repository that nodes are about to be destroyed.
2156 Intended for use by strip and rollback, so there's a common
2164 Intended for use by strip and rollback, so there's a common
2157 place for anything that has to be done before destroying history.
2165 place for anything that has to be done before destroying history.
2158
2166
2159 This is mostly useful for saving state that is in memory and waiting
2167 This is mostly useful for saving state that is in memory and waiting
2160 to be flushed when the current lock is released. Because a call to
2168 to be flushed when the current lock is released. Because a call to
2161 destroyed is imminent, the repo will be invalidated causing those
2169 destroyed is imminent, the repo will be invalidated causing those
2162 changes to stay in memory (waiting for the next unlock), or vanish
2170 changes to stay in memory (waiting for the next unlock), or vanish
2163 completely.
2171 completely.
2164 '''
2172 '''
2165 # When using the same lock to commit and strip, the phasecache is left
2173 # When using the same lock to commit and strip, the phasecache is left
2166 # dirty after committing. Then when we strip, the repo is invalidated,
2174 # dirty after committing. Then when we strip, the repo is invalidated,
2167 # causing those changes to disappear.
2175 # causing those changes to disappear.
2168 if '_phasecache' in vars(self):
2176 if '_phasecache' in vars(self):
2169 self._phasecache.write()
2177 self._phasecache.write()
2170
2178
2171 @unfilteredmethod
2179 @unfilteredmethod
2172 def destroyed(self):
2180 def destroyed(self):
2173 '''Inform the repository that nodes have been destroyed.
2181 '''Inform the repository that nodes have been destroyed.
2174 Intended for use by strip and rollback, so there's a common
2182 Intended for use by strip and rollback, so there's a common
2175 place for anything that has to be done after destroying history.
2183 place for anything that has to be done after destroying history.
2176 '''
2184 '''
2177 # When one tries to:
2185 # When one tries to:
2178 # 1) destroy nodes thus calling this method (e.g. strip)
2186 # 1) destroy nodes thus calling this method (e.g. strip)
2179 # 2) use phasecache somewhere (e.g. commit)
2187 # 2) use phasecache somewhere (e.g. commit)
2180 #
2188 #
2181 # then 2) will fail because the phasecache contains nodes that were
2189 # then 2) will fail because the phasecache contains nodes that were
2182 # removed. We can either remove phasecache from the filecache,
2190 # removed. We can either remove phasecache from the filecache,
2183 # causing it to reload next time it is accessed, or simply filter
2191 # causing it to reload next time it is accessed, or simply filter
2184 # the removed nodes now and write the updated cache.
2192 # the removed nodes now and write the updated cache.
2185 self._phasecache.filterunknown(self)
2193 self._phasecache.filterunknown(self)
2186 self._phasecache.write()
2194 self._phasecache.write()
2187
2195
2188 # refresh all repository caches
2196 # refresh all repository caches
2189 self.updatecaches()
2197 self.updatecaches()
2190
2198
2191 # Ensure the persistent tag cache is updated. Doing it now
2199 # Ensure the persistent tag cache is updated. Doing it now
2192 # means that the tag cache only has to worry about destroyed
2200 # means that the tag cache only has to worry about destroyed
2193 # heads immediately after a strip/rollback. That in turn
2201 # heads immediately after a strip/rollback. That in turn
2194 # guarantees that "cachetip == currenttip" (comparing both rev
2202 # guarantees that "cachetip == currenttip" (comparing both rev
2195 # and node) always means no nodes have been added or destroyed.
2203 # and node) always means no nodes have been added or destroyed.
2196
2204
2197 # XXX this is suboptimal when qrefresh'ing: we strip the current
2205 # XXX this is suboptimal when qrefresh'ing: we strip the current
2198 # head, refresh the tag cache, then immediately add a new head.
2206 # head, refresh the tag cache, then immediately add a new head.
2199 # But I think doing it this way is necessary for the "instant
2207 # But I think doing it this way is necessary for the "instant
2200 # tag cache retrieval" case to work.
2208 # tag cache retrieval" case to work.
2201 self.invalidate()
2209 self.invalidate()
2202
2210
2203 def status(self, node1='.', node2=None, match=None,
2211 def status(self, node1='.', node2=None, match=None,
2204 ignored=False, clean=False, unknown=False,
2212 ignored=False, clean=False, unknown=False,
2205 listsubrepos=False):
2213 listsubrepos=False):
2206 '''a convenience method that calls node1.status(node2)'''
2214 '''a convenience method that calls node1.status(node2)'''
2207 return self[node1].status(node2, match, ignored, clean, unknown,
2215 return self[node1].status(node2, match, ignored, clean, unknown,
2208 listsubrepos)
2216 listsubrepos)
2209
2217
2210 def addpostdsstatus(self, ps):
2218 def addpostdsstatus(self, ps):
2211 """Add a callback to run within the wlock, at the point at which status
2219 """Add a callback to run within the wlock, at the point at which status
2212 fixups happen.
2220 fixups happen.
2213
2221
2214 On status completion, callback(wctx, status) will be called with the
2222 On status completion, callback(wctx, status) will be called with the
2215 wlock held, unless the dirstate has changed from underneath or the wlock
2223 wlock held, unless the dirstate has changed from underneath or the wlock
2216 couldn't be grabbed.
2224 couldn't be grabbed.
2217
2225
2218 Callbacks should not capture and use a cached copy of the dirstate --
2226 Callbacks should not capture and use a cached copy of the dirstate --
2219 it might change in the meanwhile. Instead, they should access the
2227 it might change in the meanwhile. Instead, they should access the
2220 dirstate via wctx.repo().dirstate.
2228 dirstate via wctx.repo().dirstate.
2221
2229
2222 This list is emptied out after each status run -- extensions should
2230 This list is emptied out after each status run -- extensions should
2223 make sure it adds to this list each time dirstate.status is called.
2231 make sure it adds to this list each time dirstate.status is called.
2224 Extensions should also make sure they don't call this for statuses
2232 Extensions should also make sure they don't call this for statuses
2225 that don't involve the dirstate.
2233 that don't involve the dirstate.
2226 """
2234 """
2227
2235
2228 # The list is located here for uniqueness reasons -- it is actually
2236 # The list is located here for uniqueness reasons -- it is actually
2229 # managed by the workingctx, but that isn't unique per-repo.
2237 # managed by the workingctx, but that isn't unique per-repo.
2230 self._postdsstatus.append(ps)
2238 self._postdsstatus.append(ps)
2231
2239
2232 def postdsstatus(self):
2240 def postdsstatus(self):
2233 """Used by workingctx to get the list of post-dirstate-status hooks."""
2241 """Used by workingctx to get the list of post-dirstate-status hooks."""
2234 return self._postdsstatus
2242 return self._postdsstatus
2235
2243
2236 def clearpostdsstatus(self):
2244 def clearpostdsstatus(self):
2237 """Used by workingctx to clear post-dirstate-status hooks."""
2245 """Used by workingctx to clear post-dirstate-status hooks."""
2238 del self._postdsstatus[:]
2246 del self._postdsstatus[:]
2239
2247
2240 def heads(self, start=None):
2248 def heads(self, start=None):
2241 if start is None:
2249 if start is None:
2242 cl = self.changelog
2250 cl = self.changelog
2243 headrevs = reversed(cl.headrevs())
2251 headrevs = reversed(cl.headrevs())
2244 return [cl.node(rev) for rev in headrevs]
2252 return [cl.node(rev) for rev in headrevs]
2245
2253
2246 heads = self.changelog.heads(start)
2254 heads = self.changelog.heads(start)
2247 # sort the output in rev descending order
2255 # sort the output in rev descending order
2248 return sorted(heads, key=self.changelog.rev, reverse=True)
2256 return sorted(heads, key=self.changelog.rev, reverse=True)
2249
2257
2250 def branchheads(self, branch=None, start=None, closed=False):
2258 def branchheads(self, branch=None, start=None, closed=False):
2251 '''return a (possibly filtered) list of heads for the given branch
2259 '''return a (possibly filtered) list of heads for the given branch
2252
2260
2253 Heads are returned in topological order, from newest to oldest.
2261 Heads are returned in topological order, from newest to oldest.
2254 If branch is None, use the dirstate branch.
2262 If branch is None, use the dirstate branch.
2255 If start is not None, return only heads reachable from start.
2263 If start is not None, return only heads reachable from start.
2256 If closed is True, return heads that are marked as closed as well.
2264 If closed is True, return heads that are marked as closed as well.
2257 '''
2265 '''
2258 if branch is None:
2266 if branch is None:
2259 branch = self[None].branch()
2267 branch = self[None].branch()
2260 branches = self.branchmap()
2268 branches = self.branchmap()
2261 if branch not in branches:
2269 if branch not in branches:
2262 return []
2270 return []
2263 # the cache returns heads ordered lowest to highest
2271 # the cache returns heads ordered lowest to highest
2264 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2272 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2265 if start is not None:
2273 if start is not None:
2266 # filter out the heads that cannot be reached from startrev
2274 # filter out the heads that cannot be reached from startrev
2267 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2275 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2268 bheads = [h for h in bheads if h in fbheads]
2276 bheads = [h for h in bheads if h in fbheads]
2269 return bheads
2277 return bheads
2270
2278
2271 def branches(self, nodes):
2279 def branches(self, nodes):
2272 if not nodes:
2280 if not nodes:
2273 nodes = [self.changelog.tip()]
2281 nodes = [self.changelog.tip()]
2274 b = []
2282 b = []
2275 for n in nodes:
2283 for n in nodes:
2276 t = n
2284 t = n
2277 while True:
2285 while True:
2278 p = self.changelog.parents(n)
2286 p = self.changelog.parents(n)
2279 if p[1] != nullid or p[0] == nullid:
2287 if p[1] != nullid or p[0] == nullid:
2280 b.append((t, n, p[0], p[1]))
2288 b.append((t, n, p[0], p[1]))
2281 break
2289 break
2282 n = p[0]
2290 n = p[0]
2283 return b
2291 return b
2284
2292
2285 def between(self, pairs):
2293 def between(self, pairs):
2286 r = []
2294 r = []
2287
2295
2288 for top, bottom in pairs:
2296 for top, bottom in pairs:
2289 n, l, i = top, [], 0
2297 n, l, i = top, [], 0
2290 f = 1
2298 f = 1
2291
2299
2292 while n != bottom and n != nullid:
2300 while n != bottom and n != nullid:
2293 p = self.changelog.parents(n)[0]
2301 p = self.changelog.parents(n)[0]
2294 if i == f:
2302 if i == f:
2295 l.append(n)
2303 l.append(n)
2296 f = f * 2
2304 f = f * 2
2297 n = p
2305 n = p
2298 i += 1
2306 i += 1
2299
2307
2300 r.append(l)
2308 r.append(l)
2301
2309
2302 return r
2310 return r
2303
2311
2304 def checkpush(self, pushop):
2312 def checkpush(self, pushop):
2305 """Extensions can override this function if additional checks have
2313 """Extensions can override this function if additional checks have
2306 to be performed before pushing, or call it if they override push
2314 to be performed before pushing, or call it if they override push
2307 command.
2315 command.
2308 """
2316 """
2309
2317
2310 @unfilteredpropertycache
2318 @unfilteredpropertycache
2311 def prepushoutgoinghooks(self):
2319 def prepushoutgoinghooks(self):
2312 """Return util.hooks consists of a pushop with repo, remote, outgoing
2320 """Return util.hooks consists of a pushop with repo, remote, outgoing
2313 methods, which are called before pushing changesets.
2321 methods, which are called before pushing changesets.
2314 """
2322 """
2315 return util.hooks()
2323 return util.hooks()
2316
2324
2317 def pushkey(self, namespace, key, old, new):
2325 def pushkey(self, namespace, key, old, new):
2318 try:
2326 try:
2319 tr = self.currenttransaction()
2327 tr = self.currenttransaction()
2320 hookargs = {}
2328 hookargs = {}
2321 if tr is not None:
2329 if tr is not None:
2322 hookargs.update(tr.hookargs)
2330 hookargs.update(tr.hookargs)
2323 hookargs = pycompat.strkwargs(hookargs)
2331 hookargs = pycompat.strkwargs(hookargs)
2324 hookargs[r'namespace'] = namespace
2332 hookargs[r'namespace'] = namespace
2325 hookargs[r'key'] = key
2333 hookargs[r'key'] = key
2326 hookargs[r'old'] = old
2334 hookargs[r'old'] = old
2327 hookargs[r'new'] = new
2335 hookargs[r'new'] = new
2328 self.hook('prepushkey', throw=True, **hookargs)
2336 self.hook('prepushkey', throw=True, **hookargs)
2329 except error.HookAbort as exc:
2337 except error.HookAbort as exc:
2330 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2338 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2331 if exc.hint:
2339 if exc.hint:
2332 self.ui.write_err(_("(%s)\n") % exc.hint)
2340 self.ui.write_err(_("(%s)\n") % exc.hint)
2333 return False
2341 return False
2334 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2342 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2335 ret = pushkey.push(self, namespace, key, old, new)
2343 ret = pushkey.push(self, namespace, key, old, new)
2336 def runhook():
2344 def runhook():
2337 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2345 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2338 ret=ret)
2346 ret=ret)
2339 self._afterlock(runhook)
2347 self._afterlock(runhook)
2340 return ret
2348 return ret
2341
2349
2342 def listkeys(self, namespace):
2350 def listkeys(self, namespace):
2343 self.hook('prelistkeys', throw=True, namespace=namespace)
2351 self.hook('prelistkeys', throw=True, namespace=namespace)
2344 self.ui.debug('listing keys for "%s"\n' % namespace)
2352 self.ui.debug('listing keys for "%s"\n' % namespace)
2345 values = pushkey.list(self, namespace)
2353 values = pushkey.list(self, namespace)
2346 self.hook('listkeys', namespace=namespace, values=values)
2354 self.hook('listkeys', namespace=namespace, values=values)
2347 return values
2355 return values
2348
2356
2349 def debugwireargs(self, one, two, three=None, four=None, five=None):
2357 def debugwireargs(self, one, two, three=None, four=None, five=None):
2350 '''used to test argument passing over the wire'''
2358 '''used to test argument passing over the wire'''
2351 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2359 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2352 pycompat.bytestr(four),
2360 pycompat.bytestr(four),
2353 pycompat.bytestr(five))
2361 pycompat.bytestr(five))
2354
2362
2355 def savecommitmessage(self, text):
2363 def savecommitmessage(self, text):
2356 fp = self.vfs('last-message.txt', 'wb')
2364 fp = self.vfs('last-message.txt', 'wb')
2357 try:
2365 try:
2358 fp.write(text)
2366 fp.write(text)
2359 finally:
2367 finally:
2360 fp.close()
2368 fp.close()
2361 return self.pathto(fp.name[len(self.root) + 1:])
2369 return self.pathto(fp.name[len(self.root) + 1:])
2362
2370
2363 # used to avoid circular references so destructors work
2371 # used to avoid circular references so destructors work
2364 def aftertrans(files):
2372 def aftertrans(files):
2365 renamefiles = [tuple(t) for t in files]
2373 renamefiles = [tuple(t) for t in files]
2366 def a():
2374 def a():
2367 for vfs, src, dest in renamefiles:
2375 for vfs, src, dest in renamefiles:
2368 # if src and dest refer to a same file, vfs.rename is a no-op,
2376 # if src and dest refer to a same file, vfs.rename is a no-op,
2369 # leaving both src and dest on disk. delete dest to make sure
2377 # leaving both src and dest on disk. delete dest to make sure
2370 # the rename couldn't be such a no-op.
2378 # the rename couldn't be such a no-op.
2371 vfs.tryunlink(dest)
2379 vfs.tryunlink(dest)
2372 try:
2380 try:
2373 vfs.rename(src, dest)
2381 vfs.rename(src, dest)
2374 except OSError: # journal file does not yet exist
2382 except OSError: # journal file does not yet exist
2375 pass
2383 pass
2376 return a
2384 return a
2377
2385
2378 def undoname(fn):
2386 def undoname(fn):
2379 base, name = os.path.split(fn)
2387 base, name = os.path.split(fn)
2380 assert name.startswith('journal')
2388 assert name.startswith('journal')
2381 return os.path.join(base, name.replace('journal', 'undo', 1))
2389 return os.path.join(base, name.replace('journal', 'undo', 1))
2382
2390
2383 def instance(ui, path, create, intents=None):
2391 def instance(ui, path, create, intents=None):
2384 return localrepository(ui, util.urllocalpath(path), create,
2392 return localrepository(ui, util.urllocalpath(path), create,
2385 intents=intents)
2393 intents=intents)
2386
2394
2387 def islocal(path):
2395 def islocal(path):
2388 return True
2396 return True
2389
2397
2390 def newreporequirements(repo):
2398 def newreporequirements(repo):
2391 """Determine the set of requirements for a new local repository.
2399 """Determine the set of requirements for a new local repository.
2392
2400
2393 Extensions can wrap this function to specify custom requirements for
2401 Extensions can wrap this function to specify custom requirements for
2394 new repositories.
2402 new repositories.
2395 """
2403 """
2396 ui = repo.ui
2404 ui = repo.ui
2397 requirements = {'revlogv1'}
2405 requirements = {'revlogv1'}
2398 if ui.configbool('format', 'usestore'):
2406 if ui.configbool('format', 'usestore'):
2399 requirements.add('store')
2407 requirements.add('store')
2400 if ui.configbool('format', 'usefncache'):
2408 if ui.configbool('format', 'usefncache'):
2401 requirements.add('fncache')
2409 requirements.add('fncache')
2402 if ui.configbool('format', 'dotencode'):
2410 if ui.configbool('format', 'dotencode'):
2403 requirements.add('dotencode')
2411 requirements.add('dotencode')
2404
2412
2405 compengine = ui.config('experimental', 'format.compression')
2413 compengine = ui.config('experimental', 'format.compression')
2406 if compengine not in util.compengines:
2414 if compengine not in util.compengines:
2407 raise error.Abort(_('compression engine %s defined by '
2415 raise error.Abort(_('compression engine %s defined by '
2408 'experimental.format.compression not available') %
2416 'experimental.format.compression not available') %
2409 compengine,
2417 compengine,
2410 hint=_('run "hg debuginstall" to list available '
2418 hint=_('run "hg debuginstall" to list available '
2411 'compression engines'))
2419 'compression engines'))
2412
2420
2413 # zlib is the historical default and doesn't need an explicit requirement.
2421 # zlib is the historical default and doesn't need an explicit requirement.
2414 if compengine != 'zlib':
2422 if compengine != 'zlib':
2415 requirements.add('exp-compression-%s' % compengine)
2423 requirements.add('exp-compression-%s' % compengine)
2416
2424
2417 if scmutil.gdinitconfig(ui):
2425 if scmutil.gdinitconfig(ui):
2418 requirements.add('generaldelta')
2426 requirements.add('generaldelta')
2419 if ui.configbool('experimental', 'treemanifest'):
2427 if ui.configbool('experimental', 'treemanifest'):
2420 requirements.add('treemanifest')
2428 requirements.add('treemanifest')
2421 # experimental config: format.sparse-revlog
2429 # experimental config: format.sparse-revlog
2422 if ui.configbool('format', 'sparse-revlog'):
2430 if ui.configbool('format', 'sparse-revlog'):
2423 requirements.add(SPARSEREVLOG_REQUIREMENT)
2431 requirements.add(SPARSEREVLOG_REQUIREMENT)
2424
2432
2425 revlogv2 = ui.config('experimental', 'revlogv2')
2433 revlogv2 = ui.config('experimental', 'revlogv2')
2426 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2434 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2427 requirements.remove('revlogv1')
2435 requirements.remove('revlogv1')
2428 # generaldelta is implied by revlogv2.
2436 # generaldelta is implied by revlogv2.
2429 requirements.discard('generaldelta')
2437 requirements.discard('generaldelta')
2430 requirements.add(REVLOGV2_REQUIREMENT)
2438 requirements.add(REVLOGV2_REQUIREMENT)
2431 # experimental config: format.internal-phase
2439 # experimental config: format.internal-phase
2432 if repo.ui.configbool('format', 'internal-phase'):
2440 if repo.ui.configbool('format', 'internal-phase'):
2433 requirements.add('internal-phase')
2441 requirements.add('internal-phase')
2434
2442
2435 return requirements
2443 return requirements
General Comments 0
You need to be logged in to leave comments. Login now