##// END OF EJS Templates
localrepo: fix a mixmatched arg name in createrepository() docstring...
Martin von Zweigbergk -
r39616:a64a965b default
parent child Browse files
Show More
@@ -1,2505 +1,2505
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 @interfaceutil.implementer(repository.completelocalrepository)
379 @interfaceutil.implementer(repository.completelocalrepository)
380 class localrepository(object):
380 class localrepository(object):
381
381
382 # obsolete experimental requirements:
382 # obsolete experimental requirements:
383 # - manifestv2: An experimental new manifest format that allowed
383 # - manifestv2: An experimental new manifest format that allowed
384 # for stem compression of long paths. Experiment ended up not
384 # for stem compression of long paths. Experiment ended up not
385 # being successful (repository sizes went up due to worse delta
385 # being successful (repository sizes went up due to worse delta
386 # chains), and the code was deleted in 4.6.
386 # chains), and the code was deleted in 4.6.
387 supportedformats = {
387 supportedformats = {
388 'revlogv1',
388 'revlogv1',
389 'generaldelta',
389 'generaldelta',
390 'treemanifest',
390 'treemanifest',
391 REVLOGV2_REQUIREMENT,
391 REVLOGV2_REQUIREMENT,
392 SPARSEREVLOG_REQUIREMENT,
392 SPARSEREVLOG_REQUIREMENT,
393 }
393 }
394 _basesupported = supportedformats | {
394 _basesupported = supportedformats | {
395 'store',
395 'store',
396 'fncache',
396 'fncache',
397 'shared',
397 'shared',
398 'relshared',
398 'relshared',
399 'dotencode',
399 'dotencode',
400 'exp-sparse',
400 'exp-sparse',
401 'internal-phase'
401 'internal-phase'
402 }
402 }
403 openerreqs = {
403 openerreqs = {
404 'revlogv1',
404 'revlogv1',
405 'generaldelta',
405 'generaldelta',
406 'treemanifest',
406 'treemanifest',
407 }
407 }
408
408
409 # list of prefix for file which can be written without 'wlock'
409 # list of prefix for file which can be written without 'wlock'
410 # Extensions should extend this list when needed
410 # Extensions should extend this list when needed
411 _wlockfreeprefix = {
411 _wlockfreeprefix = {
412 # We migh consider requiring 'wlock' for the next
412 # We migh consider requiring 'wlock' for the next
413 # two, but pretty much all the existing code assume
413 # two, but pretty much all the existing code assume
414 # wlock is not needed so we keep them excluded for
414 # wlock is not needed so we keep them excluded for
415 # now.
415 # now.
416 'hgrc',
416 'hgrc',
417 'requires',
417 'requires',
418 # XXX cache is a complicatged business someone
418 # XXX cache is a complicatged business someone
419 # should investigate this in depth at some point
419 # should investigate this in depth at some point
420 'cache/',
420 'cache/',
421 # XXX shouldn't be dirstate covered by the wlock?
421 # XXX shouldn't be dirstate covered by the wlock?
422 'dirstate',
422 'dirstate',
423 # XXX bisect was still a bit too messy at the time
423 # XXX bisect was still a bit too messy at the time
424 # this changeset was introduced. Someone should fix
424 # this changeset was introduced. Someone should fix
425 # the remainig bit and drop this line
425 # the remainig bit and drop this line
426 'bisect.state',
426 'bisect.state',
427 }
427 }
428
428
429 def __init__(self, baseui, path, intents=None):
429 def __init__(self, baseui, path, intents=None):
430 """Create a new local repository instance.
430 """Create a new local repository instance.
431
431
432 Most callers should use ``hg.repository()`` or ``localrepo.instance()``
432 Most callers should use ``hg.repository()`` or ``localrepo.instance()``
433 for obtaining a new repository object.
433 for obtaining a new repository object.
434 """
434 """
435
435
436 self.requirements = set()
436 self.requirements = set()
437 self.filtername = None
437 self.filtername = None
438 # wvfs: rooted at the repository root, used to access the working copy
438 # wvfs: rooted at the repository root, used to access the working copy
439 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
439 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
440 # vfs: rooted at .hg, used to access repo files outside of .hg/store
440 # vfs: rooted at .hg, used to access repo files outside of .hg/store
441 self.vfs = None
441 self.vfs = None
442 # svfs: usually rooted at .hg/store, used to access repository history
442 # svfs: usually rooted at .hg/store, used to access repository history
443 # If this is a shared repository, this vfs may point to another
443 # If this is a shared repository, this vfs may point to another
444 # repository's .hg/store directory.
444 # repository's .hg/store directory.
445 self.svfs = None
445 self.svfs = None
446 self.root = self.wvfs.base
446 self.root = self.wvfs.base
447 self.path = self.wvfs.join(".hg")
447 self.path = self.wvfs.join(".hg")
448 self.origroot = path
448 self.origroot = path
449 self.baseui = baseui
449 self.baseui = baseui
450 self.ui = baseui.copy()
450 self.ui = baseui.copy()
451 self.ui.copy = baseui.copy # prevent copying repo configuration
451 self.ui.copy = baseui.copy # prevent copying repo configuration
452 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
452 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
453 if (self.ui.configbool('devel', 'all-warnings') or
453 if (self.ui.configbool('devel', 'all-warnings') or
454 self.ui.configbool('devel', 'check-locks')):
454 self.ui.configbool('devel', 'check-locks')):
455 self.vfs.audit = self._getvfsward(self.vfs.audit)
455 self.vfs.audit = self._getvfsward(self.vfs.audit)
456 # A list of callback to shape the phase if no data were found.
456 # A list of callback to shape the phase if no data were found.
457 # Callback are in the form: func(repo, roots) --> processed root.
457 # Callback are in the form: func(repo, roots) --> processed root.
458 # This list it to be filled by extension during repo setup
458 # This list it to be filled by extension during repo setup
459 self._phasedefaults = []
459 self._phasedefaults = []
460 try:
460 try:
461 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
461 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
462 self._loadextensions()
462 self._loadextensions()
463 except IOError:
463 except IOError:
464 pass
464 pass
465
465
466 if featuresetupfuncs:
466 if featuresetupfuncs:
467 self.supported = set(self._basesupported) # use private copy
467 self.supported = set(self._basesupported) # use private copy
468 extmods = set(m.__name__ for n, m
468 extmods = set(m.__name__ for n, m
469 in extensions.extensions(self.ui))
469 in extensions.extensions(self.ui))
470 for setupfunc in featuresetupfuncs:
470 for setupfunc in featuresetupfuncs:
471 if setupfunc.__module__ in extmods:
471 if setupfunc.__module__ in extmods:
472 setupfunc(self.ui, self.supported)
472 setupfunc(self.ui, self.supported)
473 else:
473 else:
474 self.supported = self._basesupported
474 self.supported = self._basesupported
475 color.setup(self.ui)
475 color.setup(self.ui)
476
476
477 # Add compression engines.
477 # Add compression engines.
478 for name in util.compengines:
478 for name in util.compengines:
479 engine = util.compengines[name]
479 engine = util.compengines[name]
480 if engine.revlogheader():
480 if engine.revlogheader():
481 self.supported.add('exp-compression-%s' % name)
481 self.supported.add('exp-compression-%s' % name)
482
482
483 if not self.vfs.isdir():
483 if not self.vfs.isdir():
484 try:
484 try:
485 self.vfs.stat()
485 self.vfs.stat()
486 except OSError as inst:
486 except OSError as inst:
487 if inst.errno != errno.ENOENT:
487 if inst.errno != errno.ENOENT:
488 raise
488 raise
489 raise error.RepoError(_("repository %s not found") % path)
489 raise error.RepoError(_("repository %s not found") % path)
490 else:
490 else:
491 try:
491 try:
492 self.requirements = scmutil.readrequires(
492 self.requirements = scmutil.readrequires(
493 self.vfs, self.supported)
493 self.vfs, self.supported)
494 except IOError as inst:
494 except IOError as inst:
495 if inst.errno != errno.ENOENT:
495 if inst.errno != errno.ENOENT:
496 raise
496 raise
497
497
498 cachepath = self.vfs.join('cache')
498 cachepath = self.vfs.join('cache')
499 self.sharedpath = self.path
499 self.sharedpath = self.path
500 try:
500 try:
501 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
501 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
502 if 'relshared' in self.requirements:
502 if 'relshared' in self.requirements:
503 sharedpath = self.vfs.join(sharedpath)
503 sharedpath = self.vfs.join(sharedpath)
504 vfs = vfsmod.vfs(sharedpath, realpath=True)
504 vfs = vfsmod.vfs(sharedpath, realpath=True)
505 cachepath = vfs.join('cache')
505 cachepath = vfs.join('cache')
506 s = vfs.base
506 s = vfs.base
507 if not vfs.exists():
507 if not vfs.exists():
508 raise error.RepoError(
508 raise error.RepoError(
509 _('.hg/sharedpath points to nonexistent directory %s') % s)
509 _('.hg/sharedpath points to nonexistent directory %s') % s)
510 self.sharedpath = s
510 self.sharedpath = s
511 except IOError as inst:
511 except IOError as inst:
512 if inst.errno != errno.ENOENT:
512 if inst.errno != errno.ENOENT:
513 raise
513 raise
514
514
515 if 'exp-sparse' in self.requirements and not sparse.enabled:
515 if 'exp-sparse' in self.requirements and not sparse.enabled:
516 raise error.RepoError(_('repository is using sparse feature but '
516 raise error.RepoError(_('repository is using sparse feature but '
517 'sparse is not enabled; enable the '
517 'sparse is not enabled; enable the '
518 '"sparse" extensions to access'))
518 '"sparse" extensions to access'))
519
519
520 self.store = store.store(
520 self.store = store.store(
521 self.requirements, self.sharedpath,
521 self.requirements, self.sharedpath,
522 lambda base: vfsmod.vfs(base, cacheaudited=True))
522 lambda base: vfsmod.vfs(base, cacheaudited=True))
523 self.spath = self.store.path
523 self.spath = self.store.path
524 self.svfs = self.store.vfs
524 self.svfs = self.store.vfs
525 self.sjoin = self.store.join
525 self.sjoin = self.store.join
526 self.vfs.createmode = self.store.createmode
526 self.vfs.createmode = self.store.createmode
527 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
527 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
528 self.cachevfs.createmode = self.store.createmode
528 self.cachevfs.createmode = self.store.createmode
529 if (self.ui.configbool('devel', 'all-warnings') or
529 if (self.ui.configbool('devel', 'all-warnings') or
530 self.ui.configbool('devel', 'check-locks')):
530 self.ui.configbool('devel', 'check-locks')):
531 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
531 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
532 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
532 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
533 else: # standard vfs
533 else: # standard vfs
534 self.svfs.audit = self._getsvfsward(self.svfs.audit)
534 self.svfs.audit = self._getsvfsward(self.svfs.audit)
535 self._applyopenerreqs()
535 self._applyopenerreqs()
536
536
537 self._dirstatevalidatewarned = False
537 self._dirstatevalidatewarned = False
538
538
539 self._branchcaches = {}
539 self._branchcaches = {}
540 self._revbranchcache = None
540 self._revbranchcache = None
541 self._filterpats = {}
541 self._filterpats = {}
542 self._datafilters = {}
542 self._datafilters = {}
543 self._transref = self._lockref = self._wlockref = None
543 self._transref = self._lockref = self._wlockref = None
544
544
545 # A cache for various files under .hg/ that tracks file changes,
545 # A cache for various files under .hg/ that tracks file changes,
546 # (used by the filecache decorator)
546 # (used by the filecache decorator)
547 #
547 #
548 # Maps a property name to its util.filecacheentry
548 # Maps a property name to its util.filecacheentry
549 self._filecache = {}
549 self._filecache = {}
550
550
551 # hold sets of revision to be filtered
551 # hold sets of revision to be filtered
552 # should be cleared when something might have changed the filter value:
552 # should be cleared when something might have changed the filter value:
553 # - new changesets,
553 # - new changesets,
554 # - phase change,
554 # - phase change,
555 # - new obsolescence marker,
555 # - new obsolescence marker,
556 # - working directory parent change,
556 # - working directory parent change,
557 # - bookmark changes
557 # - bookmark changes
558 self.filteredrevcache = {}
558 self.filteredrevcache = {}
559
559
560 # post-dirstate-status hooks
560 # post-dirstate-status hooks
561 self._postdsstatus = []
561 self._postdsstatus = []
562
562
563 # generic mapping between names and nodes
563 # generic mapping between names and nodes
564 self.names = namespaces.namespaces()
564 self.names = namespaces.namespaces()
565
565
566 # Key to signature value.
566 # Key to signature value.
567 self._sparsesignaturecache = {}
567 self._sparsesignaturecache = {}
568 # Signature to cached matcher instance.
568 # Signature to cached matcher instance.
569 self._sparsematchercache = {}
569 self._sparsematchercache = {}
570
570
571 def _getvfsward(self, origfunc):
571 def _getvfsward(self, origfunc):
572 """build a ward for self.vfs"""
572 """build a ward for self.vfs"""
573 rref = weakref.ref(self)
573 rref = weakref.ref(self)
574 def checkvfs(path, mode=None):
574 def checkvfs(path, mode=None):
575 ret = origfunc(path, mode=mode)
575 ret = origfunc(path, mode=mode)
576 repo = rref()
576 repo = rref()
577 if (repo is None
577 if (repo is None
578 or not util.safehasattr(repo, '_wlockref')
578 or not util.safehasattr(repo, '_wlockref')
579 or not util.safehasattr(repo, '_lockref')):
579 or not util.safehasattr(repo, '_lockref')):
580 return
580 return
581 if mode in (None, 'r', 'rb'):
581 if mode in (None, 'r', 'rb'):
582 return
582 return
583 if path.startswith(repo.path):
583 if path.startswith(repo.path):
584 # truncate name relative to the repository (.hg)
584 # truncate name relative to the repository (.hg)
585 path = path[len(repo.path) + 1:]
585 path = path[len(repo.path) + 1:]
586 if path.startswith('cache/'):
586 if path.startswith('cache/'):
587 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
587 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
588 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
588 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
589 if path.startswith('journal.'):
589 if path.startswith('journal.'):
590 # journal is covered by 'lock'
590 # journal is covered by 'lock'
591 if repo._currentlock(repo._lockref) is None:
591 if repo._currentlock(repo._lockref) is None:
592 repo.ui.develwarn('write with no lock: "%s"' % path,
592 repo.ui.develwarn('write with no lock: "%s"' % path,
593 stacklevel=2, config='check-locks')
593 stacklevel=2, config='check-locks')
594 elif repo._currentlock(repo._wlockref) is None:
594 elif repo._currentlock(repo._wlockref) is None:
595 # rest of vfs files are covered by 'wlock'
595 # rest of vfs files are covered by 'wlock'
596 #
596 #
597 # exclude special files
597 # exclude special files
598 for prefix in self._wlockfreeprefix:
598 for prefix in self._wlockfreeprefix:
599 if path.startswith(prefix):
599 if path.startswith(prefix):
600 return
600 return
601 repo.ui.develwarn('write with no wlock: "%s"' % path,
601 repo.ui.develwarn('write with no wlock: "%s"' % path,
602 stacklevel=2, config='check-locks')
602 stacklevel=2, config='check-locks')
603 return ret
603 return ret
604 return checkvfs
604 return checkvfs
605
605
606 def _getsvfsward(self, origfunc):
606 def _getsvfsward(self, origfunc):
607 """build a ward for self.svfs"""
607 """build a ward for self.svfs"""
608 rref = weakref.ref(self)
608 rref = weakref.ref(self)
609 def checksvfs(path, mode=None):
609 def checksvfs(path, mode=None):
610 ret = origfunc(path, mode=mode)
610 ret = origfunc(path, mode=mode)
611 repo = rref()
611 repo = rref()
612 if repo is None or not util.safehasattr(repo, '_lockref'):
612 if repo is None or not util.safehasattr(repo, '_lockref'):
613 return
613 return
614 if mode in (None, 'r', 'rb'):
614 if mode in (None, 'r', 'rb'):
615 return
615 return
616 if path.startswith(repo.sharedpath):
616 if path.startswith(repo.sharedpath):
617 # truncate name relative to the repository (.hg)
617 # truncate name relative to the repository (.hg)
618 path = path[len(repo.sharedpath) + 1:]
618 path = path[len(repo.sharedpath) + 1:]
619 if repo._currentlock(repo._lockref) is None:
619 if repo._currentlock(repo._lockref) is None:
620 repo.ui.develwarn('write with no lock: "%s"' % path,
620 repo.ui.develwarn('write with no lock: "%s"' % path,
621 stacklevel=3)
621 stacklevel=3)
622 return ret
622 return ret
623 return checksvfs
623 return checksvfs
624
624
625 def close(self):
625 def close(self):
626 self._writecaches()
626 self._writecaches()
627
627
628 def _loadextensions(self):
628 def _loadextensions(self):
629 extensions.loadall(self.ui)
629 extensions.loadall(self.ui)
630
630
631 def _writecaches(self):
631 def _writecaches(self):
632 if self._revbranchcache:
632 if self._revbranchcache:
633 self._revbranchcache.write()
633 self._revbranchcache.write()
634
634
635 def _restrictcapabilities(self, caps):
635 def _restrictcapabilities(self, caps):
636 if self.ui.configbool('experimental', 'bundle2-advertise'):
636 if self.ui.configbool('experimental', 'bundle2-advertise'):
637 caps = set(caps)
637 caps = set(caps)
638 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
638 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
639 role='client'))
639 role='client'))
640 caps.add('bundle2=' + urlreq.quote(capsblob))
640 caps.add('bundle2=' + urlreq.quote(capsblob))
641 return caps
641 return caps
642
642
643 def _applyopenerreqs(self):
643 def _applyopenerreqs(self):
644 self.svfs.options = dict((r, 1) for r in self.requirements
644 self.svfs.options = dict((r, 1) for r in self.requirements
645 if r in self.openerreqs)
645 if r in self.openerreqs)
646 # experimental config: format.chunkcachesize
646 # experimental config: format.chunkcachesize
647 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
647 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
648 if chunkcachesize is not None:
648 if chunkcachesize is not None:
649 self.svfs.options['chunkcachesize'] = chunkcachesize
649 self.svfs.options['chunkcachesize'] = chunkcachesize
650 # experimental config: format.manifestcachesize
650 # experimental config: format.manifestcachesize
651 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
651 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
652 if manifestcachesize is not None:
652 if manifestcachesize is not None:
653 self.svfs.options['manifestcachesize'] = manifestcachesize
653 self.svfs.options['manifestcachesize'] = manifestcachesize
654 deltabothparents = self.ui.configbool('storage',
654 deltabothparents = self.ui.configbool('storage',
655 'revlog.optimize-delta-parent-choice')
655 'revlog.optimize-delta-parent-choice')
656 self.svfs.options['deltabothparents'] = deltabothparents
656 self.svfs.options['deltabothparents'] = deltabothparents
657 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
657 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
658 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
658 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
659 if 0 <= chainspan:
659 if 0 <= chainspan:
660 self.svfs.options['maxdeltachainspan'] = chainspan
660 self.svfs.options['maxdeltachainspan'] = chainspan
661 mmapindexthreshold = self.ui.configbytes('experimental',
661 mmapindexthreshold = self.ui.configbytes('experimental',
662 'mmapindexthreshold')
662 'mmapindexthreshold')
663 if mmapindexthreshold is not None:
663 if mmapindexthreshold is not None:
664 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
664 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
665 withsparseread = self.ui.configbool('experimental', 'sparse-read')
665 withsparseread = self.ui.configbool('experimental', 'sparse-read')
666 srdensitythres = float(self.ui.config('experimental',
666 srdensitythres = float(self.ui.config('experimental',
667 'sparse-read.density-threshold'))
667 'sparse-read.density-threshold'))
668 srmingapsize = self.ui.configbytes('experimental',
668 srmingapsize = self.ui.configbytes('experimental',
669 'sparse-read.min-gap-size')
669 'sparse-read.min-gap-size')
670 self.svfs.options['with-sparse-read'] = withsparseread
670 self.svfs.options['with-sparse-read'] = withsparseread
671 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
671 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
672 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
672 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
673 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
673 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
674 self.svfs.options['sparse-revlog'] = sparserevlog
674 self.svfs.options['sparse-revlog'] = sparserevlog
675 if sparserevlog:
675 if sparserevlog:
676 self.svfs.options['generaldelta'] = True
676 self.svfs.options['generaldelta'] = True
677 maxchainlen = None
677 maxchainlen = None
678 if sparserevlog:
678 if sparserevlog:
679 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
679 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
680 # experimental config: format.maxchainlen
680 # experimental config: format.maxchainlen
681 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
681 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
682 if maxchainlen is not None:
682 if maxchainlen is not None:
683 self.svfs.options['maxchainlen'] = maxchainlen
683 self.svfs.options['maxchainlen'] = maxchainlen
684
684
685 for r in self.requirements:
685 for r in self.requirements:
686 if r.startswith('exp-compression-'):
686 if r.startswith('exp-compression-'):
687 self.svfs.options['compengine'] = r[len('exp-compression-'):]
687 self.svfs.options['compengine'] = r[len('exp-compression-'):]
688
688
689 # TODO move "revlogv2" to openerreqs once finalized.
689 # TODO move "revlogv2" to openerreqs once finalized.
690 if REVLOGV2_REQUIREMENT in self.requirements:
690 if REVLOGV2_REQUIREMENT in self.requirements:
691 self.svfs.options['revlogv2'] = True
691 self.svfs.options['revlogv2'] = True
692
692
693 def _writerequirements(self):
693 def _writerequirements(self):
694 scmutil.writerequires(self.vfs, self.requirements)
694 scmutil.writerequires(self.vfs, self.requirements)
695
695
696 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
696 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
697 # self -> auditor -> self._checknested -> self
697 # self -> auditor -> self._checknested -> self
698
698
699 @property
699 @property
700 def auditor(self):
700 def auditor(self):
701 # This is only used by context.workingctx.match in order to
701 # This is only used by context.workingctx.match in order to
702 # detect files in subrepos.
702 # detect files in subrepos.
703 return pathutil.pathauditor(self.root, callback=self._checknested)
703 return pathutil.pathauditor(self.root, callback=self._checknested)
704
704
705 @property
705 @property
706 def nofsauditor(self):
706 def nofsauditor(self):
707 # This is only used by context.basectx.match in order to detect
707 # This is only used by context.basectx.match in order to detect
708 # files in subrepos.
708 # files in subrepos.
709 return pathutil.pathauditor(self.root, callback=self._checknested,
709 return pathutil.pathauditor(self.root, callback=self._checknested,
710 realfs=False, cached=True)
710 realfs=False, cached=True)
711
711
712 def _checknested(self, path):
712 def _checknested(self, path):
713 """Determine if path is a legal nested repository."""
713 """Determine if path is a legal nested repository."""
714 if not path.startswith(self.root):
714 if not path.startswith(self.root):
715 return False
715 return False
716 subpath = path[len(self.root) + 1:]
716 subpath = path[len(self.root) + 1:]
717 normsubpath = util.pconvert(subpath)
717 normsubpath = util.pconvert(subpath)
718
718
719 # XXX: Checking against the current working copy is wrong in
719 # XXX: Checking against the current working copy is wrong in
720 # the sense that it can reject things like
720 # the sense that it can reject things like
721 #
721 #
722 # $ hg cat -r 10 sub/x.txt
722 # $ hg cat -r 10 sub/x.txt
723 #
723 #
724 # if sub/ is no longer a subrepository in the working copy
724 # if sub/ is no longer a subrepository in the working copy
725 # parent revision.
725 # parent revision.
726 #
726 #
727 # However, it can of course also allow things that would have
727 # However, it can of course also allow things that would have
728 # been rejected before, such as the above cat command if sub/
728 # been rejected before, such as the above cat command if sub/
729 # is a subrepository now, but was a normal directory before.
729 # is a subrepository now, but was a normal directory before.
730 # The old path auditor would have rejected by mistake since it
730 # The old path auditor would have rejected by mistake since it
731 # panics when it sees sub/.hg/.
731 # panics when it sees sub/.hg/.
732 #
732 #
733 # All in all, checking against the working copy seems sensible
733 # All in all, checking against the working copy seems sensible
734 # since we want to prevent access to nested repositories on
734 # since we want to prevent access to nested repositories on
735 # the filesystem *now*.
735 # the filesystem *now*.
736 ctx = self[None]
736 ctx = self[None]
737 parts = util.splitpath(subpath)
737 parts = util.splitpath(subpath)
738 while parts:
738 while parts:
739 prefix = '/'.join(parts)
739 prefix = '/'.join(parts)
740 if prefix in ctx.substate:
740 if prefix in ctx.substate:
741 if prefix == normsubpath:
741 if prefix == normsubpath:
742 return True
742 return True
743 else:
743 else:
744 sub = ctx.sub(prefix)
744 sub = ctx.sub(prefix)
745 return sub.checknested(subpath[len(prefix) + 1:])
745 return sub.checknested(subpath[len(prefix) + 1:])
746 else:
746 else:
747 parts.pop()
747 parts.pop()
748 return False
748 return False
749
749
750 def peer(self):
750 def peer(self):
751 return localpeer(self) # not cached to avoid reference cycle
751 return localpeer(self) # not cached to avoid reference cycle
752
752
753 def unfiltered(self):
753 def unfiltered(self):
754 """Return unfiltered version of the repository
754 """Return unfiltered version of the repository
755
755
756 Intended to be overwritten by filtered repo."""
756 Intended to be overwritten by filtered repo."""
757 return self
757 return self
758
758
759 def filtered(self, name, visibilityexceptions=None):
759 def filtered(self, name, visibilityexceptions=None):
760 """Return a filtered version of a repository"""
760 """Return a filtered version of a repository"""
761 cls = repoview.newtype(self.unfiltered().__class__)
761 cls = repoview.newtype(self.unfiltered().__class__)
762 return cls(self, name, visibilityexceptions)
762 return cls(self, name, visibilityexceptions)
763
763
764 @repofilecache('bookmarks', 'bookmarks.current')
764 @repofilecache('bookmarks', 'bookmarks.current')
765 def _bookmarks(self):
765 def _bookmarks(self):
766 return bookmarks.bmstore(self)
766 return bookmarks.bmstore(self)
767
767
768 @property
768 @property
769 def _activebookmark(self):
769 def _activebookmark(self):
770 return self._bookmarks.active
770 return self._bookmarks.active
771
771
772 # _phasesets depend on changelog. what we need is to call
772 # _phasesets depend on changelog. what we need is to call
773 # _phasecache.invalidate() if '00changelog.i' was changed, but it
773 # _phasecache.invalidate() if '00changelog.i' was changed, but it
774 # can't be easily expressed in filecache mechanism.
774 # can't be easily expressed in filecache mechanism.
775 @storecache('phaseroots', '00changelog.i')
775 @storecache('phaseroots', '00changelog.i')
776 def _phasecache(self):
776 def _phasecache(self):
777 return phases.phasecache(self, self._phasedefaults)
777 return phases.phasecache(self, self._phasedefaults)
778
778
779 @storecache('obsstore')
779 @storecache('obsstore')
780 def obsstore(self):
780 def obsstore(self):
781 return obsolete.makestore(self.ui, self)
781 return obsolete.makestore(self.ui, self)
782
782
783 @storecache('00changelog.i')
783 @storecache('00changelog.i')
784 def changelog(self):
784 def changelog(self):
785 return changelog.changelog(self.svfs,
785 return changelog.changelog(self.svfs,
786 trypending=txnutil.mayhavepending(self.root))
786 trypending=txnutil.mayhavepending(self.root))
787
787
788 def _constructmanifest(self):
788 def _constructmanifest(self):
789 # This is a temporary function while we migrate from manifest to
789 # This is a temporary function while we migrate from manifest to
790 # manifestlog. It allows bundlerepo and unionrepo to intercept the
790 # manifestlog. It allows bundlerepo and unionrepo to intercept the
791 # manifest creation.
791 # manifest creation.
792 return manifest.manifestrevlog(self.svfs)
792 return manifest.manifestrevlog(self.svfs)
793
793
794 @storecache('00manifest.i')
794 @storecache('00manifest.i')
795 def manifestlog(self):
795 def manifestlog(self):
796 return manifest.manifestlog(self.svfs, self)
796 return manifest.manifestlog(self.svfs, self)
797
797
798 @repofilecache('dirstate')
798 @repofilecache('dirstate')
799 def dirstate(self):
799 def dirstate(self):
800 return self._makedirstate()
800 return self._makedirstate()
801
801
802 def _makedirstate(self):
802 def _makedirstate(self):
803 """Extension point for wrapping the dirstate per-repo."""
803 """Extension point for wrapping the dirstate per-repo."""
804 sparsematchfn = lambda: sparse.matcher(self)
804 sparsematchfn = lambda: sparse.matcher(self)
805
805
806 return dirstate.dirstate(self.vfs, self.ui, self.root,
806 return dirstate.dirstate(self.vfs, self.ui, self.root,
807 self._dirstatevalidate, sparsematchfn)
807 self._dirstatevalidate, sparsematchfn)
808
808
809 def _dirstatevalidate(self, node):
809 def _dirstatevalidate(self, node):
810 try:
810 try:
811 self.changelog.rev(node)
811 self.changelog.rev(node)
812 return node
812 return node
813 except error.LookupError:
813 except error.LookupError:
814 if not self._dirstatevalidatewarned:
814 if not self._dirstatevalidatewarned:
815 self._dirstatevalidatewarned = True
815 self._dirstatevalidatewarned = True
816 self.ui.warn(_("warning: ignoring unknown"
816 self.ui.warn(_("warning: ignoring unknown"
817 " working parent %s!\n") % short(node))
817 " working parent %s!\n") % short(node))
818 return nullid
818 return nullid
819
819
820 @storecache(narrowspec.FILENAME)
820 @storecache(narrowspec.FILENAME)
821 def narrowpats(self):
821 def narrowpats(self):
822 """matcher patterns for this repository's narrowspec
822 """matcher patterns for this repository's narrowspec
823
823
824 A tuple of (includes, excludes).
824 A tuple of (includes, excludes).
825 """
825 """
826 source = self
826 source = self
827 if self.shared():
827 if self.shared():
828 from . import hg
828 from . import hg
829 source = hg.sharedreposource(self)
829 source = hg.sharedreposource(self)
830 return narrowspec.load(source)
830 return narrowspec.load(source)
831
831
832 @storecache(narrowspec.FILENAME)
832 @storecache(narrowspec.FILENAME)
833 def _narrowmatch(self):
833 def _narrowmatch(self):
834 if repository.NARROW_REQUIREMENT not in self.requirements:
834 if repository.NARROW_REQUIREMENT not in self.requirements:
835 return matchmod.always(self.root, '')
835 return matchmod.always(self.root, '')
836 include, exclude = self.narrowpats
836 include, exclude = self.narrowpats
837 return narrowspec.match(self.root, include=include, exclude=exclude)
837 return narrowspec.match(self.root, include=include, exclude=exclude)
838
838
839 # TODO(martinvonz): make this property-like instead?
839 # TODO(martinvonz): make this property-like instead?
840 def narrowmatch(self):
840 def narrowmatch(self):
841 return self._narrowmatch
841 return self._narrowmatch
842
842
843 def setnarrowpats(self, newincludes, newexcludes):
843 def setnarrowpats(self, newincludes, newexcludes):
844 narrowspec.save(self, newincludes, newexcludes)
844 narrowspec.save(self, newincludes, newexcludes)
845 self.invalidate(clearfilecache=True)
845 self.invalidate(clearfilecache=True)
846
846
847 def __getitem__(self, changeid):
847 def __getitem__(self, changeid):
848 if changeid is None:
848 if changeid is None:
849 return context.workingctx(self)
849 return context.workingctx(self)
850 if isinstance(changeid, context.basectx):
850 if isinstance(changeid, context.basectx):
851 return changeid
851 return changeid
852 if isinstance(changeid, slice):
852 if isinstance(changeid, slice):
853 # wdirrev isn't contiguous so the slice shouldn't include it
853 # wdirrev isn't contiguous so the slice shouldn't include it
854 return [context.changectx(self, i)
854 return [context.changectx(self, i)
855 for i in pycompat.xrange(*changeid.indices(len(self)))
855 for i in pycompat.xrange(*changeid.indices(len(self)))
856 if i not in self.changelog.filteredrevs]
856 if i not in self.changelog.filteredrevs]
857 try:
857 try:
858 return context.changectx(self, changeid)
858 return context.changectx(self, changeid)
859 except error.WdirUnsupported:
859 except error.WdirUnsupported:
860 return context.workingctx(self)
860 return context.workingctx(self)
861
861
862 def __contains__(self, changeid):
862 def __contains__(self, changeid):
863 """True if the given changeid exists
863 """True if the given changeid exists
864
864
865 error.AmbiguousPrefixLookupError is raised if an ambiguous node
865 error.AmbiguousPrefixLookupError is raised if an ambiguous node
866 specified.
866 specified.
867 """
867 """
868 try:
868 try:
869 self[changeid]
869 self[changeid]
870 return True
870 return True
871 except error.RepoLookupError:
871 except error.RepoLookupError:
872 return False
872 return False
873
873
874 def __nonzero__(self):
874 def __nonzero__(self):
875 return True
875 return True
876
876
877 __bool__ = __nonzero__
877 __bool__ = __nonzero__
878
878
879 def __len__(self):
879 def __len__(self):
880 # no need to pay the cost of repoview.changelog
880 # no need to pay the cost of repoview.changelog
881 unfi = self.unfiltered()
881 unfi = self.unfiltered()
882 return len(unfi.changelog)
882 return len(unfi.changelog)
883
883
884 def __iter__(self):
884 def __iter__(self):
885 return iter(self.changelog)
885 return iter(self.changelog)
886
886
887 def revs(self, expr, *args):
887 def revs(self, expr, *args):
888 '''Find revisions matching a revset.
888 '''Find revisions matching a revset.
889
889
890 The revset is specified as a string ``expr`` that may contain
890 The revset is specified as a string ``expr`` that may contain
891 %-formatting to escape certain types. See ``revsetlang.formatspec``.
891 %-formatting to escape certain types. See ``revsetlang.formatspec``.
892
892
893 Revset aliases from the configuration are not expanded. To expand
893 Revset aliases from the configuration are not expanded. To expand
894 user aliases, consider calling ``scmutil.revrange()`` or
894 user aliases, consider calling ``scmutil.revrange()`` or
895 ``repo.anyrevs([expr], user=True)``.
895 ``repo.anyrevs([expr], user=True)``.
896
896
897 Returns a revset.abstractsmartset, which is a list-like interface
897 Returns a revset.abstractsmartset, which is a list-like interface
898 that contains integer revisions.
898 that contains integer revisions.
899 '''
899 '''
900 expr = revsetlang.formatspec(expr, *args)
900 expr = revsetlang.formatspec(expr, *args)
901 m = revset.match(None, expr)
901 m = revset.match(None, expr)
902 return m(self)
902 return m(self)
903
903
904 def set(self, expr, *args):
904 def set(self, expr, *args):
905 '''Find revisions matching a revset and emit changectx instances.
905 '''Find revisions matching a revset and emit changectx instances.
906
906
907 This is a convenience wrapper around ``revs()`` that iterates the
907 This is a convenience wrapper around ``revs()`` that iterates the
908 result and is a generator of changectx instances.
908 result and is a generator of changectx instances.
909
909
910 Revset aliases from the configuration are not expanded. To expand
910 Revset aliases from the configuration are not expanded. To expand
911 user aliases, consider calling ``scmutil.revrange()``.
911 user aliases, consider calling ``scmutil.revrange()``.
912 '''
912 '''
913 for r in self.revs(expr, *args):
913 for r in self.revs(expr, *args):
914 yield self[r]
914 yield self[r]
915
915
916 def anyrevs(self, specs, user=False, localalias=None):
916 def anyrevs(self, specs, user=False, localalias=None):
917 '''Find revisions matching one of the given revsets.
917 '''Find revisions matching one of the given revsets.
918
918
919 Revset aliases from the configuration are not expanded by default. To
919 Revset aliases from the configuration are not expanded by default. To
920 expand user aliases, specify ``user=True``. To provide some local
920 expand user aliases, specify ``user=True``. To provide some local
921 definitions overriding user aliases, set ``localalias`` to
921 definitions overriding user aliases, set ``localalias`` to
922 ``{name: definitionstring}``.
922 ``{name: definitionstring}``.
923 '''
923 '''
924 if user:
924 if user:
925 m = revset.matchany(self.ui, specs,
925 m = revset.matchany(self.ui, specs,
926 lookup=revset.lookupfn(self),
926 lookup=revset.lookupfn(self),
927 localalias=localalias)
927 localalias=localalias)
928 else:
928 else:
929 m = revset.matchany(None, specs, localalias=localalias)
929 m = revset.matchany(None, specs, localalias=localalias)
930 return m(self)
930 return m(self)
931
931
932 def url(self):
932 def url(self):
933 return 'file:' + self.root
933 return 'file:' + self.root
934
934
935 def hook(self, name, throw=False, **args):
935 def hook(self, name, throw=False, **args):
936 """Call a hook, passing this repo instance.
936 """Call a hook, passing this repo instance.
937
937
938 This a convenience method to aid invoking hooks. Extensions likely
938 This a convenience method to aid invoking hooks. Extensions likely
939 won't call this unless they have registered a custom hook or are
939 won't call this unless they have registered a custom hook or are
940 replacing code that is expected to call a hook.
940 replacing code that is expected to call a hook.
941 """
941 """
942 return hook.hook(self.ui, self, name, throw, **args)
942 return hook.hook(self.ui, self, name, throw, **args)
943
943
944 @filteredpropertycache
944 @filteredpropertycache
945 def _tagscache(self):
945 def _tagscache(self):
946 '''Returns a tagscache object that contains various tags related
946 '''Returns a tagscache object that contains various tags related
947 caches.'''
947 caches.'''
948
948
949 # This simplifies its cache management by having one decorated
949 # This simplifies its cache management by having one decorated
950 # function (this one) and the rest simply fetch things from it.
950 # function (this one) and the rest simply fetch things from it.
951 class tagscache(object):
951 class tagscache(object):
952 def __init__(self):
952 def __init__(self):
953 # These two define the set of tags for this repository. tags
953 # These two define the set of tags for this repository. tags
954 # maps tag name to node; tagtypes maps tag name to 'global' or
954 # maps tag name to node; tagtypes maps tag name to 'global' or
955 # 'local'. (Global tags are defined by .hgtags across all
955 # 'local'. (Global tags are defined by .hgtags across all
956 # heads, and local tags are defined in .hg/localtags.)
956 # heads, and local tags are defined in .hg/localtags.)
957 # They constitute the in-memory cache of tags.
957 # They constitute the in-memory cache of tags.
958 self.tags = self.tagtypes = None
958 self.tags = self.tagtypes = None
959
959
960 self.nodetagscache = self.tagslist = None
960 self.nodetagscache = self.tagslist = None
961
961
962 cache = tagscache()
962 cache = tagscache()
963 cache.tags, cache.tagtypes = self._findtags()
963 cache.tags, cache.tagtypes = self._findtags()
964
964
965 return cache
965 return cache
966
966
967 def tags(self):
967 def tags(self):
968 '''return a mapping of tag to node'''
968 '''return a mapping of tag to node'''
969 t = {}
969 t = {}
970 if self.changelog.filteredrevs:
970 if self.changelog.filteredrevs:
971 tags, tt = self._findtags()
971 tags, tt = self._findtags()
972 else:
972 else:
973 tags = self._tagscache.tags
973 tags = self._tagscache.tags
974 for k, v in tags.iteritems():
974 for k, v in tags.iteritems():
975 try:
975 try:
976 # ignore tags to unknown nodes
976 # ignore tags to unknown nodes
977 self.changelog.rev(v)
977 self.changelog.rev(v)
978 t[k] = v
978 t[k] = v
979 except (error.LookupError, ValueError):
979 except (error.LookupError, ValueError):
980 pass
980 pass
981 return t
981 return t
982
982
983 def _findtags(self):
983 def _findtags(self):
984 '''Do the hard work of finding tags. Return a pair of dicts
984 '''Do the hard work of finding tags. Return a pair of dicts
985 (tags, tagtypes) where tags maps tag name to node, and tagtypes
985 (tags, tagtypes) where tags maps tag name to node, and tagtypes
986 maps tag name to a string like \'global\' or \'local\'.
986 maps tag name to a string like \'global\' or \'local\'.
987 Subclasses or extensions are free to add their own tags, but
987 Subclasses or extensions are free to add their own tags, but
988 should be aware that the returned dicts will be retained for the
988 should be aware that the returned dicts will be retained for the
989 duration of the localrepo object.'''
989 duration of the localrepo object.'''
990
990
991 # XXX what tagtype should subclasses/extensions use? Currently
991 # XXX what tagtype should subclasses/extensions use? Currently
992 # mq and bookmarks add tags, but do not set the tagtype at all.
992 # mq and bookmarks add tags, but do not set the tagtype at all.
993 # Should each extension invent its own tag type? Should there
993 # Should each extension invent its own tag type? Should there
994 # be one tagtype for all such "virtual" tags? Or is the status
994 # be one tagtype for all such "virtual" tags? Or is the status
995 # quo fine?
995 # quo fine?
996
996
997
997
998 # map tag name to (node, hist)
998 # map tag name to (node, hist)
999 alltags = tagsmod.findglobaltags(self.ui, self)
999 alltags = tagsmod.findglobaltags(self.ui, self)
1000 # map tag name to tag type
1000 # map tag name to tag type
1001 tagtypes = dict((tag, 'global') for tag in alltags)
1001 tagtypes = dict((tag, 'global') for tag in alltags)
1002
1002
1003 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1003 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1004
1004
1005 # Build the return dicts. Have to re-encode tag names because
1005 # Build the return dicts. Have to re-encode tag names because
1006 # the tags module always uses UTF-8 (in order not to lose info
1006 # the tags module always uses UTF-8 (in order not to lose info
1007 # writing to the cache), but the rest of Mercurial wants them in
1007 # writing to the cache), but the rest of Mercurial wants them in
1008 # local encoding.
1008 # local encoding.
1009 tags = {}
1009 tags = {}
1010 for (name, (node, hist)) in alltags.iteritems():
1010 for (name, (node, hist)) in alltags.iteritems():
1011 if node != nullid:
1011 if node != nullid:
1012 tags[encoding.tolocal(name)] = node
1012 tags[encoding.tolocal(name)] = node
1013 tags['tip'] = self.changelog.tip()
1013 tags['tip'] = self.changelog.tip()
1014 tagtypes = dict([(encoding.tolocal(name), value)
1014 tagtypes = dict([(encoding.tolocal(name), value)
1015 for (name, value) in tagtypes.iteritems()])
1015 for (name, value) in tagtypes.iteritems()])
1016 return (tags, tagtypes)
1016 return (tags, tagtypes)
1017
1017
1018 def tagtype(self, tagname):
1018 def tagtype(self, tagname):
1019 '''
1019 '''
1020 return the type of the given tag. result can be:
1020 return the type of the given tag. result can be:
1021
1021
1022 'local' : a local tag
1022 'local' : a local tag
1023 'global' : a global tag
1023 'global' : a global tag
1024 None : tag does not exist
1024 None : tag does not exist
1025 '''
1025 '''
1026
1026
1027 return self._tagscache.tagtypes.get(tagname)
1027 return self._tagscache.tagtypes.get(tagname)
1028
1028
1029 def tagslist(self):
1029 def tagslist(self):
1030 '''return a list of tags ordered by revision'''
1030 '''return a list of tags ordered by revision'''
1031 if not self._tagscache.tagslist:
1031 if not self._tagscache.tagslist:
1032 l = []
1032 l = []
1033 for t, n in self.tags().iteritems():
1033 for t, n in self.tags().iteritems():
1034 l.append((self.changelog.rev(n), t, n))
1034 l.append((self.changelog.rev(n), t, n))
1035 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1035 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1036
1036
1037 return self._tagscache.tagslist
1037 return self._tagscache.tagslist
1038
1038
1039 def nodetags(self, node):
1039 def nodetags(self, node):
1040 '''return the tags associated with a node'''
1040 '''return the tags associated with a node'''
1041 if not self._tagscache.nodetagscache:
1041 if not self._tagscache.nodetagscache:
1042 nodetagscache = {}
1042 nodetagscache = {}
1043 for t, n in self._tagscache.tags.iteritems():
1043 for t, n in self._tagscache.tags.iteritems():
1044 nodetagscache.setdefault(n, []).append(t)
1044 nodetagscache.setdefault(n, []).append(t)
1045 for tags in nodetagscache.itervalues():
1045 for tags in nodetagscache.itervalues():
1046 tags.sort()
1046 tags.sort()
1047 self._tagscache.nodetagscache = nodetagscache
1047 self._tagscache.nodetagscache = nodetagscache
1048 return self._tagscache.nodetagscache.get(node, [])
1048 return self._tagscache.nodetagscache.get(node, [])
1049
1049
1050 def nodebookmarks(self, node):
1050 def nodebookmarks(self, node):
1051 """return the list of bookmarks pointing to the specified node"""
1051 """return the list of bookmarks pointing to the specified node"""
1052 return self._bookmarks.names(node)
1052 return self._bookmarks.names(node)
1053
1053
1054 def branchmap(self):
1054 def branchmap(self):
1055 '''returns a dictionary {branch: [branchheads]} with branchheads
1055 '''returns a dictionary {branch: [branchheads]} with branchheads
1056 ordered by increasing revision number'''
1056 ordered by increasing revision number'''
1057 branchmap.updatecache(self)
1057 branchmap.updatecache(self)
1058 return self._branchcaches[self.filtername]
1058 return self._branchcaches[self.filtername]
1059
1059
1060 @unfilteredmethod
1060 @unfilteredmethod
1061 def revbranchcache(self):
1061 def revbranchcache(self):
1062 if not self._revbranchcache:
1062 if not self._revbranchcache:
1063 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1063 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1064 return self._revbranchcache
1064 return self._revbranchcache
1065
1065
1066 def branchtip(self, branch, ignoremissing=False):
1066 def branchtip(self, branch, ignoremissing=False):
1067 '''return the tip node for a given branch
1067 '''return the tip node for a given branch
1068
1068
1069 If ignoremissing is True, then this method will not raise an error.
1069 If ignoremissing is True, then this method will not raise an error.
1070 This is helpful for callers that only expect None for a missing branch
1070 This is helpful for callers that only expect None for a missing branch
1071 (e.g. namespace).
1071 (e.g. namespace).
1072
1072
1073 '''
1073 '''
1074 try:
1074 try:
1075 return self.branchmap().branchtip(branch)
1075 return self.branchmap().branchtip(branch)
1076 except KeyError:
1076 except KeyError:
1077 if not ignoremissing:
1077 if not ignoremissing:
1078 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1078 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1079 else:
1079 else:
1080 pass
1080 pass
1081
1081
1082 def lookup(self, key):
1082 def lookup(self, key):
1083 return scmutil.revsymbol(self, key).node()
1083 return scmutil.revsymbol(self, key).node()
1084
1084
1085 def lookupbranch(self, key):
1085 def lookupbranch(self, key):
1086 if key in self.branchmap():
1086 if key in self.branchmap():
1087 return key
1087 return key
1088
1088
1089 return scmutil.revsymbol(self, key).branch()
1089 return scmutil.revsymbol(self, key).branch()
1090
1090
1091 def known(self, nodes):
1091 def known(self, nodes):
1092 cl = self.changelog
1092 cl = self.changelog
1093 nm = cl.nodemap
1093 nm = cl.nodemap
1094 filtered = cl.filteredrevs
1094 filtered = cl.filteredrevs
1095 result = []
1095 result = []
1096 for n in nodes:
1096 for n in nodes:
1097 r = nm.get(n)
1097 r = nm.get(n)
1098 resp = not (r is None or r in filtered)
1098 resp = not (r is None or r in filtered)
1099 result.append(resp)
1099 result.append(resp)
1100 return result
1100 return result
1101
1101
1102 def local(self):
1102 def local(self):
1103 return self
1103 return self
1104
1104
1105 def publishing(self):
1105 def publishing(self):
1106 # it's safe (and desirable) to trust the publish flag unconditionally
1106 # it's safe (and desirable) to trust the publish flag unconditionally
1107 # so that we don't finalize changes shared between users via ssh or nfs
1107 # so that we don't finalize changes shared between users via ssh or nfs
1108 return self.ui.configbool('phases', 'publish', untrusted=True)
1108 return self.ui.configbool('phases', 'publish', untrusted=True)
1109
1109
1110 def cancopy(self):
1110 def cancopy(self):
1111 # so statichttprepo's override of local() works
1111 # so statichttprepo's override of local() works
1112 if not self.local():
1112 if not self.local():
1113 return False
1113 return False
1114 if not self.publishing():
1114 if not self.publishing():
1115 return True
1115 return True
1116 # if publishing we can't copy if there is filtered content
1116 # if publishing we can't copy if there is filtered content
1117 return not self.filtered('visible').changelog.filteredrevs
1117 return not self.filtered('visible').changelog.filteredrevs
1118
1118
1119 def shared(self):
1119 def shared(self):
1120 '''the type of shared repository (None if not shared)'''
1120 '''the type of shared repository (None if not shared)'''
1121 if self.sharedpath != self.path:
1121 if self.sharedpath != self.path:
1122 return 'store'
1122 return 'store'
1123 return None
1123 return None
1124
1124
1125 def wjoin(self, f, *insidef):
1125 def wjoin(self, f, *insidef):
1126 return self.vfs.reljoin(self.root, f, *insidef)
1126 return self.vfs.reljoin(self.root, f, *insidef)
1127
1127
1128 def file(self, f):
1128 def file(self, f):
1129 if f[0] == '/':
1129 if f[0] == '/':
1130 f = f[1:]
1130 f = f[1:]
1131 return filelog.filelog(self.svfs, f)
1131 return filelog.filelog(self.svfs, f)
1132
1132
1133 def setparents(self, p1, p2=nullid):
1133 def setparents(self, p1, p2=nullid):
1134 with self.dirstate.parentchange():
1134 with self.dirstate.parentchange():
1135 copies = self.dirstate.setparents(p1, p2)
1135 copies = self.dirstate.setparents(p1, p2)
1136 pctx = self[p1]
1136 pctx = self[p1]
1137 if copies:
1137 if copies:
1138 # Adjust copy records, the dirstate cannot do it, it
1138 # Adjust copy records, the dirstate cannot do it, it
1139 # requires access to parents manifests. Preserve them
1139 # requires access to parents manifests. Preserve them
1140 # only for entries added to first parent.
1140 # only for entries added to first parent.
1141 for f in copies:
1141 for f in copies:
1142 if f not in pctx and copies[f] in pctx:
1142 if f not in pctx and copies[f] in pctx:
1143 self.dirstate.copy(copies[f], f)
1143 self.dirstate.copy(copies[f], f)
1144 if p2 == nullid:
1144 if p2 == nullid:
1145 for f, s in sorted(self.dirstate.copies().items()):
1145 for f, s in sorted(self.dirstate.copies().items()):
1146 if f not in pctx and s not in pctx:
1146 if f not in pctx and s not in pctx:
1147 self.dirstate.copy(None, f)
1147 self.dirstate.copy(None, f)
1148
1148
1149 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1149 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1150 """changeid can be a changeset revision, node, or tag.
1150 """changeid can be a changeset revision, node, or tag.
1151 fileid can be a file revision or node."""
1151 fileid can be a file revision or node."""
1152 return context.filectx(self, path, changeid, fileid,
1152 return context.filectx(self, path, changeid, fileid,
1153 changectx=changectx)
1153 changectx=changectx)
1154
1154
1155 def getcwd(self):
1155 def getcwd(self):
1156 return self.dirstate.getcwd()
1156 return self.dirstate.getcwd()
1157
1157
1158 def pathto(self, f, cwd=None):
1158 def pathto(self, f, cwd=None):
1159 return self.dirstate.pathto(f, cwd)
1159 return self.dirstate.pathto(f, cwd)
1160
1160
1161 def _loadfilter(self, filter):
1161 def _loadfilter(self, filter):
1162 if filter not in self._filterpats:
1162 if filter not in self._filterpats:
1163 l = []
1163 l = []
1164 for pat, cmd in self.ui.configitems(filter):
1164 for pat, cmd in self.ui.configitems(filter):
1165 if cmd == '!':
1165 if cmd == '!':
1166 continue
1166 continue
1167 mf = matchmod.match(self.root, '', [pat])
1167 mf = matchmod.match(self.root, '', [pat])
1168 fn = None
1168 fn = None
1169 params = cmd
1169 params = cmd
1170 for name, filterfn in self._datafilters.iteritems():
1170 for name, filterfn in self._datafilters.iteritems():
1171 if cmd.startswith(name):
1171 if cmd.startswith(name):
1172 fn = filterfn
1172 fn = filterfn
1173 params = cmd[len(name):].lstrip()
1173 params = cmd[len(name):].lstrip()
1174 break
1174 break
1175 if not fn:
1175 if not fn:
1176 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1176 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1177 # Wrap old filters not supporting keyword arguments
1177 # Wrap old filters not supporting keyword arguments
1178 if not pycompat.getargspec(fn)[2]:
1178 if not pycompat.getargspec(fn)[2]:
1179 oldfn = fn
1179 oldfn = fn
1180 fn = lambda s, c, **kwargs: oldfn(s, c)
1180 fn = lambda s, c, **kwargs: oldfn(s, c)
1181 l.append((mf, fn, params))
1181 l.append((mf, fn, params))
1182 self._filterpats[filter] = l
1182 self._filterpats[filter] = l
1183 return self._filterpats[filter]
1183 return self._filterpats[filter]
1184
1184
1185 def _filter(self, filterpats, filename, data):
1185 def _filter(self, filterpats, filename, data):
1186 for mf, fn, cmd in filterpats:
1186 for mf, fn, cmd in filterpats:
1187 if mf(filename):
1187 if mf(filename):
1188 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1188 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1189 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1189 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1190 break
1190 break
1191
1191
1192 return data
1192 return data
1193
1193
1194 @unfilteredpropertycache
1194 @unfilteredpropertycache
1195 def _encodefilterpats(self):
1195 def _encodefilterpats(self):
1196 return self._loadfilter('encode')
1196 return self._loadfilter('encode')
1197
1197
1198 @unfilteredpropertycache
1198 @unfilteredpropertycache
1199 def _decodefilterpats(self):
1199 def _decodefilterpats(self):
1200 return self._loadfilter('decode')
1200 return self._loadfilter('decode')
1201
1201
1202 def adddatafilter(self, name, filter):
1202 def adddatafilter(self, name, filter):
1203 self._datafilters[name] = filter
1203 self._datafilters[name] = filter
1204
1204
1205 def wread(self, filename):
1205 def wread(self, filename):
1206 if self.wvfs.islink(filename):
1206 if self.wvfs.islink(filename):
1207 data = self.wvfs.readlink(filename)
1207 data = self.wvfs.readlink(filename)
1208 else:
1208 else:
1209 data = self.wvfs.read(filename)
1209 data = self.wvfs.read(filename)
1210 return self._filter(self._encodefilterpats, filename, data)
1210 return self._filter(self._encodefilterpats, filename, data)
1211
1211
1212 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1212 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1213 """write ``data`` into ``filename`` in the working directory
1213 """write ``data`` into ``filename`` in the working directory
1214
1214
1215 This returns length of written (maybe decoded) data.
1215 This returns length of written (maybe decoded) data.
1216 """
1216 """
1217 data = self._filter(self._decodefilterpats, filename, data)
1217 data = self._filter(self._decodefilterpats, filename, data)
1218 if 'l' in flags:
1218 if 'l' in flags:
1219 self.wvfs.symlink(data, filename)
1219 self.wvfs.symlink(data, filename)
1220 else:
1220 else:
1221 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1221 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1222 **kwargs)
1222 **kwargs)
1223 if 'x' in flags:
1223 if 'x' in flags:
1224 self.wvfs.setflags(filename, False, True)
1224 self.wvfs.setflags(filename, False, True)
1225 else:
1225 else:
1226 self.wvfs.setflags(filename, False, False)
1226 self.wvfs.setflags(filename, False, False)
1227 return len(data)
1227 return len(data)
1228
1228
1229 def wwritedata(self, filename, data):
1229 def wwritedata(self, filename, data):
1230 return self._filter(self._decodefilterpats, filename, data)
1230 return self._filter(self._decodefilterpats, filename, data)
1231
1231
1232 def currenttransaction(self):
1232 def currenttransaction(self):
1233 """return the current transaction or None if non exists"""
1233 """return the current transaction or None if non exists"""
1234 if self._transref:
1234 if self._transref:
1235 tr = self._transref()
1235 tr = self._transref()
1236 else:
1236 else:
1237 tr = None
1237 tr = None
1238
1238
1239 if tr and tr.running():
1239 if tr and tr.running():
1240 return tr
1240 return tr
1241 return None
1241 return None
1242
1242
1243 def transaction(self, desc, report=None):
1243 def transaction(self, desc, report=None):
1244 if (self.ui.configbool('devel', 'all-warnings')
1244 if (self.ui.configbool('devel', 'all-warnings')
1245 or self.ui.configbool('devel', 'check-locks')):
1245 or self.ui.configbool('devel', 'check-locks')):
1246 if self._currentlock(self._lockref) is None:
1246 if self._currentlock(self._lockref) is None:
1247 raise error.ProgrammingError('transaction requires locking')
1247 raise error.ProgrammingError('transaction requires locking')
1248 tr = self.currenttransaction()
1248 tr = self.currenttransaction()
1249 if tr is not None:
1249 if tr is not None:
1250 return tr.nest(name=desc)
1250 return tr.nest(name=desc)
1251
1251
1252 # abort here if the journal already exists
1252 # abort here if the journal already exists
1253 if self.svfs.exists("journal"):
1253 if self.svfs.exists("journal"):
1254 raise error.RepoError(
1254 raise error.RepoError(
1255 _("abandoned transaction found"),
1255 _("abandoned transaction found"),
1256 hint=_("run 'hg recover' to clean up transaction"))
1256 hint=_("run 'hg recover' to clean up transaction"))
1257
1257
1258 idbase = "%.40f#%f" % (random.random(), time.time())
1258 idbase = "%.40f#%f" % (random.random(), time.time())
1259 ha = hex(hashlib.sha1(idbase).digest())
1259 ha = hex(hashlib.sha1(idbase).digest())
1260 txnid = 'TXN:' + ha
1260 txnid = 'TXN:' + ha
1261 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1261 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1262
1262
1263 self._writejournal(desc)
1263 self._writejournal(desc)
1264 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1264 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1265 if report:
1265 if report:
1266 rp = report
1266 rp = report
1267 else:
1267 else:
1268 rp = self.ui.warn
1268 rp = self.ui.warn
1269 vfsmap = {'plain': self.vfs} # root of .hg/
1269 vfsmap = {'plain': self.vfs} # root of .hg/
1270 # we must avoid cyclic reference between repo and transaction.
1270 # we must avoid cyclic reference between repo and transaction.
1271 reporef = weakref.ref(self)
1271 reporef = weakref.ref(self)
1272 # Code to track tag movement
1272 # Code to track tag movement
1273 #
1273 #
1274 # Since tags are all handled as file content, it is actually quite hard
1274 # Since tags are all handled as file content, it is actually quite hard
1275 # to track these movement from a code perspective. So we fallback to a
1275 # to track these movement from a code perspective. So we fallback to a
1276 # tracking at the repository level. One could envision to track changes
1276 # tracking at the repository level. One could envision to track changes
1277 # to the '.hgtags' file through changegroup apply but that fails to
1277 # to the '.hgtags' file through changegroup apply but that fails to
1278 # cope with case where transaction expose new heads without changegroup
1278 # cope with case where transaction expose new heads without changegroup
1279 # being involved (eg: phase movement).
1279 # being involved (eg: phase movement).
1280 #
1280 #
1281 # For now, We gate the feature behind a flag since this likely comes
1281 # For now, We gate the feature behind a flag since this likely comes
1282 # with performance impacts. The current code run more often than needed
1282 # with performance impacts. The current code run more often than needed
1283 # and do not use caches as much as it could. The current focus is on
1283 # and do not use caches as much as it could. The current focus is on
1284 # the behavior of the feature so we disable it by default. The flag
1284 # the behavior of the feature so we disable it by default. The flag
1285 # will be removed when we are happy with the performance impact.
1285 # will be removed when we are happy with the performance impact.
1286 #
1286 #
1287 # Once this feature is no longer experimental move the following
1287 # Once this feature is no longer experimental move the following
1288 # documentation to the appropriate help section:
1288 # documentation to the appropriate help section:
1289 #
1289 #
1290 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1290 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1291 # tags (new or changed or deleted tags). In addition the details of
1291 # tags (new or changed or deleted tags). In addition the details of
1292 # these changes are made available in a file at:
1292 # these changes are made available in a file at:
1293 # ``REPOROOT/.hg/changes/tags.changes``.
1293 # ``REPOROOT/.hg/changes/tags.changes``.
1294 # Make sure you check for HG_TAG_MOVED before reading that file as it
1294 # Make sure you check for HG_TAG_MOVED before reading that file as it
1295 # might exist from a previous transaction even if no tag were touched
1295 # might exist from a previous transaction even if no tag were touched
1296 # in this one. Changes are recorded in a line base format::
1296 # in this one. Changes are recorded in a line base format::
1297 #
1297 #
1298 # <action> <hex-node> <tag-name>\n
1298 # <action> <hex-node> <tag-name>\n
1299 #
1299 #
1300 # Actions are defined as follow:
1300 # Actions are defined as follow:
1301 # "-R": tag is removed,
1301 # "-R": tag is removed,
1302 # "+A": tag is added,
1302 # "+A": tag is added,
1303 # "-M": tag is moved (old value),
1303 # "-M": tag is moved (old value),
1304 # "+M": tag is moved (new value),
1304 # "+M": tag is moved (new value),
1305 tracktags = lambda x: None
1305 tracktags = lambda x: None
1306 # experimental config: experimental.hook-track-tags
1306 # experimental config: experimental.hook-track-tags
1307 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1307 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1308 if desc != 'strip' and shouldtracktags:
1308 if desc != 'strip' and shouldtracktags:
1309 oldheads = self.changelog.headrevs()
1309 oldheads = self.changelog.headrevs()
1310 def tracktags(tr2):
1310 def tracktags(tr2):
1311 repo = reporef()
1311 repo = reporef()
1312 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1312 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1313 newheads = repo.changelog.headrevs()
1313 newheads = repo.changelog.headrevs()
1314 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1314 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1315 # notes: we compare lists here.
1315 # notes: we compare lists here.
1316 # As we do it only once buiding set would not be cheaper
1316 # As we do it only once buiding set would not be cheaper
1317 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1317 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1318 if changes:
1318 if changes:
1319 tr2.hookargs['tag_moved'] = '1'
1319 tr2.hookargs['tag_moved'] = '1'
1320 with repo.vfs('changes/tags.changes', 'w',
1320 with repo.vfs('changes/tags.changes', 'w',
1321 atomictemp=True) as changesfile:
1321 atomictemp=True) as changesfile:
1322 # note: we do not register the file to the transaction
1322 # note: we do not register the file to the transaction
1323 # because we needs it to still exist on the transaction
1323 # because we needs it to still exist on the transaction
1324 # is close (for txnclose hooks)
1324 # is close (for txnclose hooks)
1325 tagsmod.writediff(changesfile, changes)
1325 tagsmod.writediff(changesfile, changes)
1326 def validate(tr2):
1326 def validate(tr2):
1327 """will run pre-closing hooks"""
1327 """will run pre-closing hooks"""
1328 # XXX the transaction API is a bit lacking here so we take a hacky
1328 # XXX the transaction API is a bit lacking here so we take a hacky
1329 # path for now
1329 # path for now
1330 #
1330 #
1331 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1331 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1332 # dict is copied before these run. In addition we needs the data
1332 # dict is copied before these run. In addition we needs the data
1333 # available to in memory hooks too.
1333 # available to in memory hooks too.
1334 #
1334 #
1335 # Moreover, we also need to make sure this runs before txnclose
1335 # Moreover, we also need to make sure this runs before txnclose
1336 # hooks and there is no "pending" mechanism that would execute
1336 # hooks and there is no "pending" mechanism that would execute
1337 # logic only if hooks are about to run.
1337 # logic only if hooks are about to run.
1338 #
1338 #
1339 # Fixing this limitation of the transaction is also needed to track
1339 # Fixing this limitation of the transaction is also needed to track
1340 # other families of changes (bookmarks, phases, obsolescence).
1340 # other families of changes (bookmarks, phases, obsolescence).
1341 #
1341 #
1342 # This will have to be fixed before we remove the experimental
1342 # This will have to be fixed before we remove the experimental
1343 # gating.
1343 # gating.
1344 tracktags(tr2)
1344 tracktags(tr2)
1345 repo = reporef()
1345 repo = reporef()
1346 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1346 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1347 scmutil.enforcesinglehead(repo, tr2, desc)
1347 scmutil.enforcesinglehead(repo, tr2, desc)
1348 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1348 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1349 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1349 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1350 args = tr.hookargs.copy()
1350 args = tr.hookargs.copy()
1351 args.update(bookmarks.preparehookargs(name, old, new))
1351 args.update(bookmarks.preparehookargs(name, old, new))
1352 repo.hook('pretxnclose-bookmark', throw=True,
1352 repo.hook('pretxnclose-bookmark', throw=True,
1353 txnname=desc,
1353 txnname=desc,
1354 **pycompat.strkwargs(args))
1354 **pycompat.strkwargs(args))
1355 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1355 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1356 cl = repo.unfiltered().changelog
1356 cl = repo.unfiltered().changelog
1357 for rev, (old, new) in tr.changes['phases'].items():
1357 for rev, (old, new) in tr.changes['phases'].items():
1358 args = tr.hookargs.copy()
1358 args = tr.hookargs.copy()
1359 node = hex(cl.node(rev))
1359 node = hex(cl.node(rev))
1360 args.update(phases.preparehookargs(node, old, new))
1360 args.update(phases.preparehookargs(node, old, new))
1361 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1361 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1362 **pycompat.strkwargs(args))
1362 **pycompat.strkwargs(args))
1363
1363
1364 repo.hook('pretxnclose', throw=True,
1364 repo.hook('pretxnclose', throw=True,
1365 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1365 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1366 def releasefn(tr, success):
1366 def releasefn(tr, success):
1367 repo = reporef()
1367 repo = reporef()
1368 if success:
1368 if success:
1369 # this should be explicitly invoked here, because
1369 # this should be explicitly invoked here, because
1370 # in-memory changes aren't written out at closing
1370 # in-memory changes aren't written out at closing
1371 # transaction, if tr.addfilegenerator (via
1371 # transaction, if tr.addfilegenerator (via
1372 # dirstate.write or so) isn't invoked while
1372 # dirstate.write or so) isn't invoked while
1373 # transaction running
1373 # transaction running
1374 repo.dirstate.write(None)
1374 repo.dirstate.write(None)
1375 else:
1375 else:
1376 # discard all changes (including ones already written
1376 # discard all changes (including ones already written
1377 # out) in this transaction
1377 # out) in this transaction
1378 narrowspec.restorebackup(self, 'journal.narrowspec')
1378 narrowspec.restorebackup(self, 'journal.narrowspec')
1379 repo.dirstate.restorebackup(None, 'journal.dirstate')
1379 repo.dirstate.restorebackup(None, 'journal.dirstate')
1380
1380
1381 repo.invalidate(clearfilecache=True)
1381 repo.invalidate(clearfilecache=True)
1382
1382
1383 tr = transaction.transaction(rp, self.svfs, vfsmap,
1383 tr = transaction.transaction(rp, self.svfs, vfsmap,
1384 "journal",
1384 "journal",
1385 "undo",
1385 "undo",
1386 aftertrans(renames),
1386 aftertrans(renames),
1387 self.store.createmode,
1387 self.store.createmode,
1388 validator=validate,
1388 validator=validate,
1389 releasefn=releasefn,
1389 releasefn=releasefn,
1390 checkambigfiles=_cachedfiles,
1390 checkambigfiles=_cachedfiles,
1391 name=desc)
1391 name=desc)
1392 tr.changes['origrepolen'] = len(self)
1392 tr.changes['origrepolen'] = len(self)
1393 tr.changes['obsmarkers'] = set()
1393 tr.changes['obsmarkers'] = set()
1394 tr.changes['phases'] = {}
1394 tr.changes['phases'] = {}
1395 tr.changes['bookmarks'] = {}
1395 tr.changes['bookmarks'] = {}
1396
1396
1397 tr.hookargs['txnid'] = txnid
1397 tr.hookargs['txnid'] = txnid
1398 # note: writing the fncache only during finalize mean that the file is
1398 # note: writing the fncache only during finalize mean that the file is
1399 # outdated when running hooks. As fncache is used for streaming clone,
1399 # outdated when running hooks. As fncache is used for streaming clone,
1400 # this is not expected to break anything that happen during the hooks.
1400 # this is not expected to break anything that happen during the hooks.
1401 tr.addfinalize('flush-fncache', self.store.write)
1401 tr.addfinalize('flush-fncache', self.store.write)
1402 def txnclosehook(tr2):
1402 def txnclosehook(tr2):
1403 """To be run if transaction is successful, will schedule a hook run
1403 """To be run if transaction is successful, will schedule a hook run
1404 """
1404 """
1405 # Don't reference tr2 in hook() so we don't hold a reference.
1405 # Don't reference tr2 in hook() so we don't hold a reference.
1406 # This reduces memory consumption when there are multiple
1406 # This reduces memory consumption when there are multiple
1407 # transactions per lock. This can likely go away if issue5045
1407 # transactions per lock. This can likely go away if issue5045
1408 # fixes the function accumulation.
1408 # fixes the function accumulation.
1409 hookargs = tr2.hookargs
1409 hookargs = tr2.hookargs
1410
1410
1411 def hookfunc():
1411 def hookfunc():
1412 repo = reporef()
1412 repo = reporef()
1413 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1413 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1414 bmchanges = sorted(tr.changes['bookmarks'].items())
1414 bmchanges = sorted(tr.changes['bookmarks'].items())
1415 for name, (old, new) in bmchanges:
1415 for name, (old, new) in bmchanges:
1416 args = tr.hookargs.copy()
1416 args = tr.hookargs.copy()
1417 args.update(bookmarks.preparehookargs(name, old, new))
1417 args.update(bookmarks.preparehookargs(name, old, new))
1418 repo.hook('txnclose-bookmark', throw=False,
1418 repo.hook('txnclose-bookmark', throw=False,
1419 txnname=desc, **pycompat.strkwargs(args))
1419 txnname=desc, **pycompat.strkwargs(args))
1420
1420
1421 if hook.hashook(repo.ui, 'txnclose-phase'):
1421 if hook.hashook(repo.ui, 'txnclose-phase'):
1422 cl = repo.unfiltered().changelog
1422 cl = repo.unfiltered().changelog
1423 phasemv = sorted(tr.changes['phases'].items())
1423 phasemv = sorted(tr.changes['phases'].items())
1424 for rev, (old, new) in phasemv:
1424 for rev, (old, new) in phasemv:
1425 args = tr.hookargs.copy()
1425 args = tr.hookargs.copy()
1426 node = hex(cl.node(rev))
1426 node = hex(cl.node(rev))
1427 args.update(phases.preparehookargs(node, old, new))
1427 args.update(phases.preparehookargs(node, old, new))
1428 repo.hook('txnclose-phase', throw=False, txnname=desc,
1428 repo.hook('txnclose-phase', throw=False, txnname=desc,
1429 **pycompat.strkwargs(args))
1429 **pycompat.strkwargs(args))
1430
1430
1431 repo.hook('txnclose', throw=False, txnname=desc,
1431 repo.hook('txnclose', throw=False, txnname=desc,
1432 **pycompat.strkwargs(hookargs))
1432 **pycompat.strkwargs(hookargs))
1433 reporef()._afterlock(hookfunc)
1433 reporef()._afterlock(hookfunc)
1434 tr.addfinalize('txnclose-hook', txnclosehook)
1434 tr.addfinalize('txnclose-hook', txnclosehook)
1435 # Include a leading "-" to make it happen before the transaction summary
1435 # Include a leading "-" to make it happen before the transaction summary
1436 # reports registered via scmutil.registersummarycallback() whose names
1436 # reports registered via scmutil.registersummarycallback() whose names
1437 # are 00-txnreport etc. That way, the caches will be warm when the
1437 # are 00-txnreport etc. That way, the caches will be warm when the
1438 # callbacks run.
1438 # callbacks run.
1439 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1439 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1440 def txnaborthook(tr2):
1440 def txnaborthook(tr2):
1441 """To be run if transaction is aborted
1441 """To be run if transaction is aborted
1442 """
1442 """
1443 reporef().hook('txnabort', throw=False, txnname=desc,
1443 reporef().hook('txnabort', throw=False, txnname=desc,
1444 **pycompat.strkwargs(tr2.hookargs))
1444 **pycompat.strkwargs(tr2.hookargs))
1445 tr.addabort('txnabort-hook', txnaborthook)
1445 tr.addabort('txnabort-hook', txnaborthook)
1446 # avoid eager cache invalidation. in-memory data should be identical
1446 # avoid eager cache invalidation. in-memory data should be identical
1447 # to stored data if transaction has no error.
1447 # to stored data if transaction has no error.
1448 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1448 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1449 self._transref = weakref.ref(tr)
1449 self._transref = weakref.ref(tr)
1450 scmutil.registersummarycallback(self, tr, desc)
1450 scmutil.registersummarycallback(self, tr, desc)
1451 return tr
1451 return tr
1452
1452
1453 def _journalfiles(self):
1453 def _journalfiles(self):
1454 return ((self.svfs, 'journal'),
1454 return ((self.svfs, 'journal'),
1455 (self.vfs, 'journal.dirstate'),
1455 (self.vfs, 'journal.dirstate'),
1456 (self.vfs, 'journal.branch'),
1456 (self.vfs, 'journal.branch'),
1457 (self.vfs, 'journal.desc'),
1457 (self.vfs, 'journal.desc'),
1458 (self.vfs, 'journal.bookmarks'),
1458 (self.vfs, 'journal.bookmarks'),
1459 (self.svfs, 'journal.phaseroots'))
1459 (self.svfs, 'journal.phaseroots'))
1460
1460
1461 def undofiles(self):
1461 def undofiles(self):
1462 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1462 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1463
1463
1464 @unfilteredmethod
1464 @unfilteredmethod
1465 def _writejournal(self, desc):
1465 def _writejournal(self, desc):
1466 self.dirstate.savebackup(None, 'journal.dirstate')
1466 self.dirstate.savebackup(None, 'journal.dirstate')
1467 narrowspec.savebackup(self, 'journal.narrowspec')
1467 narrowspec.savebackup(self, 'journal.narrowspec')
1468 self.vfs.write("journal.branch",
1468 self.vfs.write("journal.branch",
1469 encoding.fromlocal(self.dirstate.branch()))
1469 encoding.fromlocal(self.dirstate.branch()))
1470 self.vfs.write("journal.desc",
1470 self.vfs.write("journal.desc",
1471 "%d\n%s\n" % (len(self), desc))
1471 "%d\n%s\n" % (len(self), desc))
1472 self.vfs.write("journal.bookmarks",
1472 self.vfs.write("journal.bookmarks",
1473 self.vfs.tryread("bookmarks"))
1473 self.vfs.tryread("bookmarks"))
1474 self.svfs.write("journal.phaseroots",
1474 self.svfs.write("journal.phaseroots",
1475 self.svfs.tryread("phaseroots"))
1475 self.svfs.tryread("phaseroots"))
1476
1476
1477 def recover(self):
1477 def recover(self):
1478 with self.lock():
1478 with self.lock():
1479 if self.svfs.exists("journal"):
1479 if self.svfs.exists("journal"):
1480 self.ui.status(_("rolling back interrupted transaction\n"))
1480 self.ui.status(_("rolling back interrupted transaction\n"))
1481 vfsmap = {'': self.svfs,
1481 vfsmap = {'': self.svfs,
1482 'plain': self.vfs,}
1482 'plain': self.vfs,}
1483 transaction.rollback(self.svfs, vfsmap, "journal",
1483 transaction.rollback(self.svfs, vfsmap, "journal",
1484 self.ui.warn,
1484 self.ui.warn,
1485 checkambigfiles=_cachedfiles)
1485 checkambigfiles=_cachedfiles)
1486 self.invalidate()
1486 self.invalidate()
1487 return True
1487 return True
1488 else:
1488 else:
1489 self.ui.warn(_("no interrupted transaction available\n"))
1489 self.ui.warn(_("no interrupted transaction available\n"))
1490 return False
1490 return False
1491
1491
1492 def rollback(self, dryrun=False, force=False):
1492 def rollback(self, dryrun=False, force=False):
1493 wlock = lock = dsguard = None
1493 wlock = lock = dsguard = None
1494 try:
1494 try:
1495 wlock = self.wlock()
1495 wlock = self.wlock()
1496 lock = self.lock()
1496 lock = self.lock()
1497 if self.svfs.exists("undo"):
1497 if self.svfs.exists("undo"):
1498 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1498 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1499
1499
1500 return self._rollback(dryrun, force, dsguard)
1500 return self._rollback(dryrun, force, dsguard)
1501 else:
1501 else:
1502 self.ui.warn(_("no rollback information available\n"))
1502 self.ui.warn(_("no rollback information available\n"))
1503 return 1
1503 return 1
1504 finally:
1504 finally:
1505 release(dsguard, lock, wlock)
1505 release(dsguard, lock, wlock)
1506
1506
1507 @unfilteredmethod # Until we get smarter cache management
1507 @unfilteredmethod # Until we get smarter cache management
1508 def _rollback(self, dryrun, force, dsguard):
1508 def _rollback(self, dryrun, force, dsguard):
1509 ui = self.ui
1509 ui = self.ui
1510 try:
1510 try:
1511 args = self.vfs.read('undo.desc').splitlines()
1511 args = self.vfs.read('undo.desc').splitlines()
1512 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1512 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1513 if len(args) >= 3:
1513 if len(args) >= 3:
1514 detail = args[2]
1514 detail = args[2]
1515 oldtip = oldlen - 1
1515 oldtip = oldlen - 1
1516
1516
1517 if detail and ui.verbose:
1517 if detail and ui.verbose:
1518 msg = (_('repository tip rolled back to revision %d'
1518 msg = (_('repository tip rolled back to revision %d'
1519 ' (undo %s: %s)\n')
1519 ' (undo %s: %s)\n')
1520 % (oldtip, desc, detail))
1520 % (oldtip, desc, detail))
1521 else:
1521 else:
1522 msg = (_('repository tip rolled back to revision %d'
1522 msg = (_('repository tip rolled back to revision %d'
1523 ' (undo %s)\n')
1523 ' (undo %s)\n')
1524 % (oldtip, desc))
1524 % (oldtip, desc))
1525 except IOError:
1525 except IOError:
1526 msg = _('rolling back unknown transaction\n')
1526 msg = _('rolling back unknown transaction\n')
1527 desc = None
1527 desc = None
1528
1528
1529 if not force and self['.'] != self['tip'] and desc == 'commit':
1529 if not force and self['.'] != self['tip'] and desc == 'commit':
1530 raise error.Abort(
1530 raise error.Abort(
1531 _('rollback of last commit while not checked out '
1531 _('rollback of last commit while not checked out '
1532 'may lose data'), hint=_('use -f to force'))
1532 'may lose data'), hint=_('use -f to force'))
1533
1533
1534 ui.status(msg)
1534 ui.status(msg)
1535 if dryrun:
1535 if dryrun:
1536 return 0
1536 return 0
1537
1537
1538 parents = self.dirstate.parents()
1538 parents = self.dirstate.parents()
1539 self.destroying()
1539 self.destroying()
1540 vfsmap = {'plain': self.vfs, '': self.svfs}
1540 vfsmap = {'plain': self.vfs, '': self.svfs}
1541 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1541 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1542 checkambigfiles=_cachedfiles)
1542 checkambigfiles=_cachedfiles)
1543 if self.vfs.exists('undo.bookmarks'):
1543 if self.vfs.exists('undo.bookmarks'):
1544 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1544 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1545 if self.svfs.exists('undo.phaseroots'):
1545 if self.svfs.exists('undo.phaseroots'):
1546 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1546 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1547 self.invalidate()
1547 self.invalidate()
1548
1548
1549 parentgone = (parents[0] not in self.changelog.nodemap or
1549 parentgone = (parents[0] not in self.changelog.nodemap or
1550 parents[1] not in self.changelog.nodemap)
1550 parents[1] not in self.changelog.nodemap)
1551 if parentgone:
1551 if parentgone:
1552 # prevent dirstateguard from overwriting already restored one
1552 # prevent dirstateguard from overwriting already restored one
1553 dsguard.close()
1553 dsguard.close()
1554
1554
1555 narrowspec.restorebackup(self, 'undo.narrowspec')
1555 narrowspec.restorebackup(self, 'undo.narrowspec')
1556 self.dirstate.restorebackup(None, 'undo.dirstate')
1556 self.dirstate.restorebackup(None, 'undo.dirstate')
1557 try:
1557 try:
1558 branch = self.vfs.read('undo.branch')
1558 branch = self.vfs.read('undo.branch')
1559 self.dirstate.setbranch(encoding.tolocal(branch))
1559 self.dirstate.setbranch(encoding.tolocal(branch))
1560 except IOError:
1560 except IOError:
1561 ui.warn(_('named branch could not be reset: '
1561 ui.warn(_('named branch could not be reset: '
1562 'current branch is still \'%s\'\n')
1562 'current branch is still \'%s\'\n')
1563 % self.dirstate.branch())
1563 % self.dirstate.branch())
1564
1564
1565 parents = tuple([p.rev() for p in self[None].parents()])
1565 parents = tuple([p.rev() for p in self[None].parents()])
1566 if len(parents) > 1:
1566 if len(parents) > 1:
1567 ui.status(_('working directory now based on '
1567 ui.status(_('working directory now based on '
1568 'revisions %d and %d\n') % parents)
1568 'revisions %d and %d\n') % parents)
1569 else:
1569 else:
1570 ui.status(_('working directory now based on '
1570 ui.status(_('working directory now based on '
1571 'revision %d\n') % parents)
1571 'revision %d\n') % parents)
1572 mergemod.mergestate.clean(self, self['.'].node())
1572 mergemod.mergestate.clean(self, self['.'].node())
1573
1573
1574 # TODO: if we know which new heads may result from this rollback, pass
1574 # TODO: if we know which new heads may result from this rollback, pass
1575 # them to destroy(), which will prevent the branchhead cache from being
1575 # them to destroy(), which will prevent the branchhead cache from being
1576 # invalidated.
1576 # invalidated.
1577 self.destroyed()
1577 self.destroyed()
1578 return 0
1578 return 0
1579
1579
1580 def _buildcacheupdater(self, newtransaction):
1580 def _buildcacheupdater(self, newtransaction):
1581 """called during transaction to build the callback updating cache
1581 """called during transaction to build the callback updating cache
1582
1582
1583 Lives on the repository to help extension who might want to augment
1583 Lives on the repository to help extension who might want to augment
1584 this logic. For this purpose, the created transaction is passed to the
1584 this logic. For this purpose, the created transaction is passed to the
1585 method.
1585 method.
1586 """
1586 """
1587 # we must avoid cyclic reference between repo and transaction.
1587 # we must avoid cyclic reference between repo and transaction.
1588 reporef = weakref.ref(self)
1588 reporef = weakref.ref(self)
1589 def updater(tr):
1589 def updater(tr):
1590 repo = reporef()
1590 repo = reporef()
1591 repo.updatecaches(tr)
1591 repo.updatecaches(tr)
1592 return updater
1592 return updater
1593
1593
1594 @unfilteredmethod
1594 @unfilteredmethod
1595 def updatecaches(self, tr=None, full=False):
1595 def updatecaches(self, tr=None, full=False):
1596 """warm appropriate caches
1596 """warm appropriate caches
1597
1597
1598 If this function is called after a transaction closed. The transaction
1598 If this function is called after a transaction closed. The transaction
1599 will be available in the 'tr' argument. This can be used to selectively
1599 will be available in the 'tr' argument. This can be used to selectively
1600 update caches relevant to the changes in that transaction.
1600 update caches relevant to the changes in that transaction.
1601
1601
1602 If 'full' is set, make sure all caches the function knows about have
1602 If 'full' is set, make sure all caches the function knows about have
1603 up-to-date data. Even the ones usually loaded more lazily.
1603 up-to-date data. Even the ones usually loaded more lazily.
1604 """
1604 """
1605 if tr is not None and tr.hookargs.get('source') == 'strip':
1605 if tr is not None and tr.hookargs.get('source') == 'strip':
1606 # During strip, many caches are invalid but
1606 # During strip, many caches are invalid but
1607 # later call to `destroyed` will refresh them.
1607 # later call to `destroyed` will refresh them.
1608 return
1608 return
1609
1609
1610 if tr is None or tr.changes['origrepolen'] < len(self):
1610 if tr is None or tr.changes['origrepolen'] < len(self):
1611 # updating the unfiltered branchmap should refresh all the others,
1611 # updating the unfiltered branchmap should refresh all the others,
1612 self.ui.debug('updating the branch cache\n')
1612 self.ui.debug('updating the branch cache\n')
1613 branchmap.updatecache(self.filtered('served'))
1613 branchmap.updatecache(self.filtered('served'))
1614
1614
1615 if full:
1615 if full:
1616 rbc = self.revbranchcache()
1616 rbc = self.revbranchcache()
1617 for r in self.changelog:
1617 for r in self.changelog:
1618 rbc.branchinfo(r)
1618 rbc.branchinfo(r)
1619 rbc.write()
1619 rbc.write()
1620
1620
1621 # ensure the working copy parents are in the manifestfulltextcache
1621 # ensure the working copy parents are in the manifestfulltextcache
1622 for ctx in self['.'].parents():
1622 for ctx in self['.'].parents():
1623 ctx.manifest() # accessing the manifest is enough
1623 ctx.manifest() # accessing the manifest is enough
1624
1624
1625 def invalidatecaches(self):
1625 def invalidatecaches(self):
1626
1626
1627 if '_tagscache' in vars(self):
1627 if '_tagscache' in vars(self):
1628 # can't use delattr on proxy
1628 # can't use delattr on proxy
1629 del self.__dict__['_tagscache']
1629 del self.__dict__['_tagscache']
1630
1630
1631 self.unfiltered()._branchcaches.clear()
1631 self.unfiltered()._branchcaches.clear()
1632 self.invalidatevolatilesets()
1632 self.invalidatevolatilesets()
1633 self._sparsesignaturecache.clear()
1633 self._sparsesignaturecache.clear()
1634
1634
1635 def invalidatevolatilesets(self):
1635 def invalidatevolatilesets(self):
1636 self.filteredrevcache.clear()
1636 self.filteredrevcache.clear()
1637 obsolete.clearobscaches(self)
1637 obsolete.clearobscaches(self)
1638
1638
1639 def invalidatedirstate(self):
1639 def invalidatedirstate(self):
1640 '''Invalidates the dirstate, causing the next call to dirstate
1640 '''Invalidates the dirstate, causing the next call to dirstate
1641 to check if it was modified since the last time it was read,
1641 to check if it was modified since the last time it was read,
1642 rereading it if it has.
1642 rereading it if it has.
1643
1643
1644 This is different to dirstate.invalidate() that it doesn't always
1644 This is different to dirstate.invalidate() that it doesn't always
1645 rereads the dirstate. Use dirstate.invalidate() if you want to
1645 rereads the dirstate. Use dirstate.invalidate() if you want to
1646 explicitly read the dirstate again (i.e. restoring it to a previous
1646 explicitly read the dirstate again (i.e. restoring it to a previous
1647 known good state).'''
1647 known good state).'''
1648 if hasunfilteredcache(self, 'dirstate'):
1648 if hasunfilteredcache(self, 'dirstate'):
1649 for k in self.dirstate._filecache:
1649 for k in self.dirstate._filecache:
1650 try:
1650 try:
1651 delattr(self.dirstate, k)
1651 delattr(self.dirstate, k)
1652 except AttributeError:
1652 except AttributeError:
1653 pass
1653 pass
1654 delattr(self.unfiltered(), 'dirstate')
1654 delattr(self.unfiltered(), 'dirstate')
1655
1655
1656 def invalidate(self, clearfilecache=False):
1656 def invalidate(self, clearfilecache=False):
1657 '''Invalidates both store and non-store parts other than dirstate
1657 '''Invalidates both store and non-store parts other than dirstate
1658
1658
1659 If a transaction is running, invalidation of store is omitted,
1659 If a transaction is running, invalidation of store is omitted,
1660 because discarding in-memory changes might cause inconsistency
1660 because discarding in-memory changes might cause inconsistency
1661 (e.g. incomplete fncache causes unintentional failure, but
1661 (e.g. incomplete fncache causes unintentional failure, but
1662 redundant one doesn't).
1662 redundant one doesn't).
1663 '''
1663 '''
1664 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1664 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1665 for k in list(self._filecache.keys()):
1665 for k in list(self._filecache.keys()):
1666 # dirstate is invalidated separately in invalidatedirstate()
1666 # dirstate is invalidated separately in invalidatedirstate()
1667 if k == 'dirstate':
1667 if k == 'dirstate':
1668 continue
1668 continue
1669 if (k == 'changelog' and
1669 if (k == 'changelog' and
1670 self.currenttransaction() and
1670 self.currenttransaction() and
1671 self.changelog._delayed):
1671 self.changelog._delayed):
1672 # The changelog object may store unwritten revisions. We don't
1672 # The changelog object may store unwritten revisions. We don't
1673 # want to lose them.
1673 # want to lose them.
1674 # TODO: Solve the problem instead of working around it.
1674 # TODO: Solve the problem instead of working around it.
1675 continue
1675 continue
1676
1676
1677 if clearfilecache:
1677 if clearfilecache:
1678 del self._filecache[k]
1678 del self._filecache[k]
1679 try:
1679 try:
1680 delattr(unfiltered, k)
1680 delattr(unfiltered, k)
1681 except AttributeError:
1681 except AttributeError:
1682 pass
1682 pass
1683 self.invalidatecaches()
1683 self.invalidatecaches()
1684 if not self.currenttransaction():
1684 if not self.currenttransaction():
1685 # TODO: Changing contents of store outside transaction
1685 # TODO: Changing contents of store outside transaction
1686 # causes inconsistency. We should make in-memory store
1686 # causes inconsistency. We should make in-memory store
1687 # changes detectable, and abort if changed.
1687 # changes detectable, and abort if changed.
1688 self.store.invalidatecaches()
1688 self.store.invalidatecaches()
1689
1689
1690 def invalidateall(self):
1690 def invalidateall(self):
1691 '''Fully invalidates both store and non-store parts, causing the
1691 '''Fully invalidates both store and non-store parts, causing the
1692 subsequent operation to reread any outside changes.'''
1692 subsequent operation to reread any outside changes.'''
1693 # extension should hook this to invalidate its caches
1693 # extension should hook this to invalidate its caches
1694 self.invalidate()
1694 self.invalidate()
1695 self.invalidatedirstate()
1695 self.invalidatedirstate()
1696
1696
1697 @unfilteredmethod
1697 @unfilteredmethod
1698 def _refreshfilecachestats(self, tr):
1698 def _refreshfilecachestats(self, tr):
1699 """Reload stats of cached files so that they are flagged as valid"""
1699 """Reload stats of cached files so that they are flagged as valid"""
1700 for k, ce in self._filecache.items():
1700 for k, ce in self._filecache.items():
1701 k = pycompat.sysstr(k)
1701 k = pycompat.sysstr(k)
1702 if k == r'dirstate' or k not in self.__dict__:
1702 if k == r'dirstate' or k not in self.__dict__:
1703 continue
1703 continue
1704 ce.refresh()
1704 ce.refresh()
1705
1705
1706 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1706 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1707 inheritchecker=None, parentenvvar=None):
1707 inheritchecker=None, parentenvvar=None):
1708 parentlock = None
1708 parentlock = None
1709 # the contents of parentenvvar are used by the underlying lock to
1709 # the contents of parentenvvar are used by the underlying lock to
1710 # determine whether it can be inherited
1710 # determine whether it can be inherited
1711 if parentenvvar is not None:
1711 if parentenvvar is not None:
1712 parentlock = encoding.environ.get(parentenvvar)
1712 parentlock = encoding.environ.get(parentenvvar)
1713
1713
1714 timeout = 0
1714 timeout = 0
1715 warntimeout = 0
1715 warntimeout = 0
1716 if wait:
1716 if wait:
1717 timeout = self.ui.configint("ui", "timeout")
1717 timeout = self.ui.configint("ui", "timeout")
1718 warntimeout = self.ui.configint("ui", "timeout.warn")
1718 warntimeout = self.ui.configint("ui", "timeout.warn")
1719 # internal config: ui.signal-safe-lock
1719 # internal config: ui.signal-safe-lock
1720 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1720 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1721
1721
1722 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1722 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1723 releasefn=releasefn,
1723 releasefn=releasefn,
1724 acquirefn=acquirefn, desc=desc,
1724 acquirefn=acquirefn, desc=desc,
1725 inheritchecker=inheritchecker,
1725 inheritchecker=inheritchecker,
1726 parentlock=parentlock,
1726 parentlock=parentlock,
1727 signalsafe=signalsafe)
1727 signalsafe=signalsafe)
1728 return l
1728 return l
1729
1729
1730 def _afterlock(self, callback):
1730 def _afterlock(self, callback):
1731 """add a callback to be run when the repository is fully unlocked
1731 """add a callback to be run when the repository is fully unlocked
1732
1732
1733 The callback will be executed when the outermost lock is released
1733 The callback will be executed when the outermost lock is released
1734 (with wlock being higher level than 'lock')."""
1734 (with wlock being higher level than 'lock')."""
1735 for ref in (self._wlockref, self._lockref):
1735 for ref in (self._wlockref, self._lockref):
1736 l = ref and ref()
1736 l = ref and ref()
1737 if l and l.held:
1737 if l and l.held:
1738 l.postrelease.append(callback)
1738 l.postrelease.append(callback)
1739 break
1739 break
1740 else: # no lock have been found.
1740 else: # no lock have been found.
1741 callback()
1741 callback()
1742
1742
1743 def lock(self, wait=True):
1743 def lock(self, wait=True):
1744 '''Lock the repository store (.hg/store) and return a weak reference
1744 '''Lock the repository store (.hg/store) and return a weak reference
1745 to the lock. Use this before modifying the store (e.g. committing or
1745 to the lock. Use this before modifying the store (e.g. committing or
1746 stripping). If you are opening a transaction, get a lock as well.)
1746 stripping). If you are opening a transaction, get a lock as well.)
1747
1747
1748 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1748 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1749 'wlock' first to avoid a dead-lock hazard.'''
1749 'wlock' first to avoid a dead-lock hazard.'''
1750 l = self._currentlock(self._lockref)
1750 l = self._currentlock(self._lockref)
1751 if l is not None:
1751 if l is not None:
1752 l.lock()
1752 l.lock()
1753 return l
1753 return l
1754
1754
1755 l = self._lock(self.svfs, "lock", wait, None,
1755 l = self._lock(self.svfs, "lock", wait, None,
1756 self.invalidate, _('repository %s') % self.origroot)
1756 self.invalidate, _('repository %s') % self.origroot)
1757 self._lockref = weakref.ref(l)
1757 self._lockref = weakref.ref(l)
1758 return l
1758 return l
1759
1759
1760 def _wlockchecktransaction(self):
1760 def _wlockchecktransaction(self):
1761 if self.currenttransaction() is not None:
1761 if self.currenttransaction() is not None:
1762 raise error.LockInheritanceContractViolation(
1762 raise error.LockInheritanceContractViolation(
1763 'wlock cannot be inherited in the middle of a transaction')
1763 'wlock cannot be inherited in the middle of a transaction')
1764
1764
1765 def wlock(self, wait=True):
1765 def wlock(self, wait=True):
1766 '''Lock the non-store parts of the repository (everything under
1766 '''Lock the non-store parts of the repository (everything under
1767 .hg except .hg/store) and return a weak reference to the lock.
1767 .hg except .hg/store) and return a weak reference to the lock.
1768
1768
1769 Use this before modifying files in .hg.
1769 Use this before modifying files in .hg.
1770
1770
1771 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1771 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1772 'wlock' first to avoid a dead-lock hazard.'''
1772 'wlock' first to avoid a dead-lock hazard.'''
1773 l = self._wlockref and self._wlockref()
1773 l = self._wlockref and self._wlockref()
1774 if l is not None and l.held:
1774 if l is not None and l.held:
1775 l.lock()
1775 l.lock()
1776 return l
1776 return l
1777
1777
1778 # We do not need to check for non-waiting lock acquisition. Such
1778 # We do not need to check for non-waiting lock acquisition. Such
1779 # acquisition would not cause dead-lock as they would just fail.
1779 # acquisition would not cause dead-lock as they would just fail.
1780 if wait and (self.ui.configbool('devel', 'all-warnings')
1780 if wait and (self.ui.configbool('devel', 'all-warnings')
1781 or self.ui.configbool('devel', 'check-locks')):
1781 or self.ui.configbool('devel', 'check-locks')):
1782 if self._currentlock(self._lockref) is not None:
1782 if self._currentlock(self._lockref) is not None:
1783 self.ui.develwarn('"wlock" acquired after "lock"')
1783 self.ui.develwarn('"wlock" acquired after "lock"')
1784
1784
1785 def unlock():
1785 def unlock():
1786 if self.dirstate.pendingparentchange():
1786 if self.dirstate.pendingparentchange():
1787 self.dirstate.invalidate()
1787 self.dirstate.invalidate()
1788 else:
1788 else:
1789 self.dirstate.write(None)
1789 self.dirstate.write(None)
1790
1790
1791 self._filecache['dirstate'].refresh()
1791 self._filecache['dirstate'].refresh()
1792
1792
1793 l = self._lock(self.vfs, "wlock", wait, unlock,
1793 l = self._lock(self.vfs, "wlock", wait, unlock,
1794 self.invalidatedirstate, _('working directory of %s') %
1794 self.invalidatedirstate, _('working directory of %s') %
1795 self.origroot,
1795 self.origroot,
1796 inheritchecker=self._wlockchecktransaction,
1796 inheritchecker=self._wlockchecktransaction,
1797 parentenvvar='HG_WLOCK_LOCKER')
1797 parentenvvar='HG_WLOCK_LOCKER')
1798 self._wlockref = weakref.ref(l)
1798 self._wlockref = weakref.ref(l)
1799 return l
1799 return l
1800
1800
1801 def _currentlock(self, lockref):
1801 def _currentlock(self, lockref):
1802 """Returns the lock if it's held, or None if it's not."""
1802 """Returns the lock if it's held, or None if it's not."""
1803 if lockref is None:
1803 if lockref is None:
1804 return None
1804 return None
1805 l = lockref()
1805 l = lockref()
1806 if l is None or not l.held:
1806 if l is None or not l.held:
1807 return None
1807 return None
1808 return l
1808 return l
1809
1809
1810 def currentwlock(self):
1810 def currentwlock(self):
1811 """Returns the wlock if it's held, or None if it's not."""
1811 """Returns the wlock if it's held, or None if it's not."""
1812 return self._currentlock(self._wlockref)
1812 return self._currentlock(self._wlockref)
1813
1813
1814 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1814 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1815 """
1815 """
1816 commit an individual file as part of a larger transaction
1816 commit an individual file as part of a larger transaction
1817 """
1817 """
1818
1818
1819 fname = fctx.path()
1819 fname = fctx.path()
1820 fparent1 = manifest1.get(fname, nullid)
1820 fparent1 = manifest1.get(fname, nullid)
1821 fparent2 = manifest2.get(fname, nullid)
1821 fparent2 = manifest2.get(fname, nullid)
1822 if isinstance(fctx, context.filectx):
1822 if isinstance(fctx, context.filectx):
1823 node = fctx.filenode()
1823 node = fctx.filenode()
1824 if node in [fparent1, fparent2]:
1824 if node in [fparent1, fparent2]:
1825 self.ui.debug('reusing %s filelog entry\n' % fname)
1825 self.ui.debug('reusing %s filelog entry\n' % fname)
1826 if manifest1.flags(fname) != fctx.flags():
1826 if manifest1.flags(fname) != fctx.flags():
1827 changelist.append(fname)
1827 changelist.append(fname)
1828 return node
1828 return node
1829
1829
1830 flog = self.file(fname)
1830 flog = self.file(fname)
1831 meta = {}
1831 meta = {}
1832 copy = fctx.renamed()
1832 copy = fctx.renamed()
1833 if copy and copy[0] != fname:
1833 if copy and copy[0] != fname:
1834 # Mark the new revision of this file as a copy of another
1834 # Mark the new revision of this file as a copy of another
1835 # file. This copy data will effectively act as a parent
1835 # file. This copy data will effectively act as a parent
1836 # of this new revision. If this is a merge, the first
1836 # of this new revision. If this is a merge, the first
1837 # parent will be the nullid (meaning "look up the copy data")
1837 # parent will be the nullid (meaning "look up the copy data")
1838 # and the second one will be the other parent. For example:
1838 # and the second one will be the other parent. For example:
1839 #
1839 #
1840 # 0 --- 1 --- 3 rev1 changes file foo
1840 # 0 --- 1 --- 3 rev1 changes file foo
1841 # \ / rev2 renames foo to bar and changes it
1841 # \ / rev2 renames foo to bar and changes it
1842 # \- 2 -/ rev3 should have bar with all changes and
1842 # \- 2 -/ rev3 should have bar with all changes and
1843 # should record that bar descends from
1843 # should record that bar descends from
1844 # bar in rev2 and foo in rev1
1844 # bar in rev2 and foo in rev1
1845 #
1845 #
1846 # this allows this merge to succeed:
1846 # this allows this merge to succeed:
1847 #
1847 #
1848 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1848 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1849 # \ / merging rev3 and rev4 should use bar@rev2
1849 # \ / merging rev3 and rev4 should use bar@rev2
1850 # \- 2 --- 4 as the merge base
1850 # \- 2 --- 4 as the merge base
1851 #
1851 #
1852
1852
1853 cfname = copy[0]
1853 cfname = copy[0]
1854 crev = manifest1.get(cfname)
1854 crev = manifest1.get(cfname)
1855 newfparent = fparent2
1855 newfparent = fparent2
1856
1856
1857 if manifest2: # branch merge
1857 if manifest2: # branch merge
1858 if fparent2 == nullid or crev is None: # copied on remote side
1858 if fparent2 == nullid or crev is None: # copied on remote side
1859 if cfname in manifest2:
1859 if cfname in manifest2:
1860 crev = manifest2[cfname]
1860 crev = manifest2[cfname]
1861 newfparent = fparent1
1861 newfparent = fparent1
1862
1862
1863 # Here, we used to search backwards through history to try to find
1863 # Here, we used to search backwards through history to try to find
1864 # where the file copy came from if the source of a copy was not in
1864 # where the file copy came from if the source of a copy was not in
1865 # the parent directory. However, this doesn't actually make sense to
1865 # the parent directory. However, this doesn't actually make sense to
1866 # do (what does a copy from something not in your working copy even
1866 # do (what does a copy from something not in your working copy even
1867 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1867 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1868 # the user that copy information was dropped, so if they didn't
1868 # the user that copy information was dropped, so if they didn't
1869 # expect this outcome it can be fixed, but this is the correct
1869 # expect this outcome it can be fixed, but this is the correct
1870 # behavior in this circumstance.
1870 # behavior in this circumstance.
1871
1871
1872 if crev:
1872 if crev:
1873 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1873 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1874 meta["copy"] = cfname
1874 meta["copy"] = cfname
1875 meta["copyrev"] = hex(crev)
1875 meta["copyrev"] = hex(crev)
1876 fparent1, fparent2 = nullid, newfparent
1876 fparent1, fparent2 = nullid, newfparent
1877 else:
1877 else:
1878 self.ui.warn(_("warning: can't find ancestor for '%s' "
1878 self.ui.warn(_("warning: can't find ancestor for '%s' "
1879 "copied from '%s'!\n") % (fname, cfname))
1879 "copied from '%s'!\n") % (fname, cfname))
1880
1880
1881 elif fparent1 == nullid:
1881 elif fparent1 == nullid:
1882 fparent1, fparent2 = fparent2, nullid
1882 fparent1, fparent2 = fparent2, nullid
1883 elif fparent2 != nullid:
1883 elif fparent2 != nullid:
1884 # is one parent an ancestor of the other?
1884 # is one parent an ancestor of the other?
1885 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1885 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1886 if fparent1 in fparentancestors:
1886 if fparent1 in fparentancestors:
1887 fparent1, fparent2 = fparent2, nullid
1887 fparent1, fparent2 = fparent2, nullid
1888 elif fparent2 in fparentancestors:
1888 elif fparent2 in fparentancestors:
1889 fparent2 = nullid
1889 fparent2 = nullid
1890
1890
1891 # is the file changed?
1891 # is the file changed?
1892 text = fctx.data()
1892 text = fctx.data()
1893 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1893 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1894 changelist.append(fname)
1894 changelist.append(fname)
1895 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1895 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1896 # are just the flags changed during merge?
1896 # are just the flags changed during merge?
1897 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1897 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1898 changelist.append(fname)
1898 changelist.append(fname)
1899
1899
1900 return fparent1
1900 return fparent1
1901
1901
1902 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1902 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1903 """check for commit arguments that aren't committable"""
1903 """check for commit arguments that aren't committable"""
1904 if match.isexact() or match.prefix():
1904 if match.isexact() or match.prefix():
1905 matched = set(status.modified + status.added + status.removed)
1905 matched = set(status.modified + status.added + status.removed)
1906
1906
1907 for f in match.files():
1907 for f in match.files():
1908 f = self.dirstate.normalize(f)
1908 f = self.dirstate.normalize(f)
1909 if f == '.' or f in matched or f in wctx.substate:
1909 if f == '.' or f in matched or f in wctx.substate:
1910 continue
1910 continue
1911 if f in status.deleted:
1911 if f in status.deleted:
1912 fail(f, _('file not found!'))
1912 fail(f, _('file not found!'))
1913 if f in vdirs: # visited directory
1913 if f in vdirs: # visited directory
1914 d = f + '/'
1914 d = f + '/'
1915 for mf in matched:
1915 for mf in matched:
1916 if mf.startswith(d):
1916 if mf.startswith(d):
1917 break
1917 break
1918 else:
1918 else:
1919 fail(f, _("no match under directory!"))
1919 fail(f, _("no match under directory!"))
1920 elif f not in self.dirstate:
1920 elif f not in self.dirstate:
1921 fail(f, _("file not tracked!"))
1921 fail(f, _("file not tracked!"))
1922
1922
1923 @unfilteredmethod
1923 @unfilteredmethod
1924 def commit(self, text="", user=None, date=None, match=None, force=False,
1924 def commit(self, text="", user=None, date=None, match=None, force=False,
1925 editor=False, extra=None):
1925 editor=False, extra=None):
1926 """Add a new revision to current repository.
1926 """Add a new revision to current repository.
1927
1927
1928 Revision information is gathered from the working directory,
1928 Revision information is gathered from the working directory,
1929 match can be used to filter the committed files. If editor is
1929 match can be used to filter the committed files. If editor is
1930 supplied, it is called to get a commit message.
1930 supplied, it is called to get a commit message.
1931 """
1931 """
1932 if extra is None:
1932 if extra is None:
1933 extra = {}
1933 extra = {}
1934
1934
1935 def fail(f, msg):
1935 def fail(f, msg):
1936 raise error.Abort('%s: %s' % (f, msg))
1936 raise error.Abort('%s: %s' % (f, msg))
1937
1937
1938 if not match:
1938 if not match:
1939 match = matchmod.always(self.root, '')
1939 match = matchmod.always(self.root, '')
1940
1940
1941 if not force:
1941 if not force:
1942 vdirs = []
1942 vdirs = []
1943 match.explicitdir = vdirs.append
1943 match.explicitdir = vdirs.append
1944 match.bad = fail
1944 match.bad = fail
1945
1945
1946 wlock = lock = tr = None
1946 wlock = lock = tr = None
1947 try:
1947 try:
1948 wlock = self.wlock()
1948 wlock = self.wlock()
1949 lock = self.lock() # for recent changelog (see issue4368)
1949 lock = self.lock() # for recent changelog (see issue4368)
1950
1950
1951 wctx = self[None]
1951 wctx = self[None]
1952 merge = len(wctx.parents()) > 1
1952 merge = len(wctx.parents()) > 1
1953
1953
1954 if not force and merge and not match.always():
1954 if not force and merge and not match.always():
1955 raise error.Abort(_('cannot partially commit a merge '
1955 raise error.Abort(_('cannot partially commit a merge '
1956 '(do not specify files or patterns)'))
1956 '(do not specify files or patterns)'))
1957
1957
1958 status = self.status(match=match, clean=force)
1958 status = self.status(match=match, clean=force)
1959 if force:
1959 if force:
1960 status.modified.extend(status.clean) # mq may commit clean files
1960 status.modified.extend(status.clean) # mq may commit clean files
1961
1961
1962 # check subrepos
1962 # check subrepos
1963 subs, commitsubs, newstate = subrepoutil.precommit(
1963 subs, commitsubs, newstate = subrepoutil.precommit(
1964 self.ui, wctx, status, match, force=force)
1964 self.ui, wctx, status, match, force=force)
1965
1965
1966 # make sure all explicit patterns are matched
1966 # make sure all explicit patterns are matched
1967 if not force:
1967 if not force:
1968 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1968 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1969
1969
1970 cctx = context.workingcommitctx(self, status,
1970 cctx = context.workingcommitctx(self, status,
1971 text, user, date, extra)
1971 text, user, date, extra)
1972
1972
1973 # internal config: ui.allowemptycommit
1973 # internal config: ui.allowemptycommit
1974 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1974 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1975 or extra.get('close') or merge or cctx.files()
1975 or extra.get('close') or merge or cctx.files()
1976 or self.ui.configbool('ui', 'allowemptycommit'))
1976 or self.ui.configbool('ui', 'allowemptycommit'))
1977 if not allowemptycommit:
1977 if not allowemptycommit:
1978 return None
1978 return None
1979
1979
1980 if merge and cctx.deleted():
1980 if merge and cctx.deleted():
1981 raise error.Abort(_("cannot commit merge with missing files"))
1981 raise error.Abort(_("cannot commit merge with missing files"))
1982
1982
1983 ms = mergemod.mergestate.read(self)
1983 ms = mergemod.mergestate.read(self)
1984 mergeutil.checkunresolved(ms)
1984 mergeutil.checkunresolved(ms)
1985
1985
1986 if editor:
1986 if editor:
1987 cctx._text = editor(self, cctx, subs)
1987 cctx._text = editor(self, cctx, subs)
1988 edited = (text != cctx._text)
1988 edited = (text != cctx._text)
1989
1989
1990 # Save commit message in case this transaction gets rolled back
1990 # Save commit message in case this transaction gets rolled back
1991 # (e.g. by a pretxncommit hook). Leave the content alone on
1991 # (e.g. by a pretxncommit hook). Leave the content alone on
1992 # the assumption that the user will use the same editor again.
1992 # the assumption that the user will use the same editor again.
1993 msgfn = self.savecommitmessage(cctx._text)
1993 msgfn = self.savecommitmessage(cctx._text)
1994
1994
1995 # commit subs and write new state
1995 # commit subs and write new state
1996 if subs:
1996 if subs:
1997 for s in sorted(commitsubs):
1997 for s in sorted(commitsubs):
1998 sub = wctx.sub(s)
1998 sub = wctx.sub(s)
1999 self.ui.status(_('committing subrepository %s\n') %
1999 self.ui.status(_('committing subrepository %s\n') %
2000 subrepoutil.subrelpath(sub))
2000 subrepoutil.subrelpath(sub))
2001 sr = sub.commit(cctx._text, user, date)
2001 sr = sub.commit(cctx._text, user, date)
2002 newstate[s] = (newstate[s][0], sr)
2002 newstate[s] = (newstate[s][0], sr)
2003 subrepoutil.writestate(self, newstate)
2003 subrepoutil.writestate(self, newstate)
2004
2004
2005 p1, p2 = self.dirstate.parents()
2005 p1, p2 = self.dirstate.parents()
2006 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2006 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2007 try:
2007 try:
2008 self.hook("precommit", throw=True, parent1=hookp1,
2008 self.hook("precommit", throw=True, parent1=hookp1,
2009 parent2=hookp2)
2009 parent2=hookp2)
2010 tr = self.transaction('commit')
2010 tr = self.transaction('commit')
2011 ret = self.commitctx(cctx, True)
2011 ret = self.commitctx(cctx, True)
2012 except: # re-raises
2012 except: # re-raises
2013 if edited:
2013 if edited:
2014 self.ui.write(
2014 self.ui.write(
2015 _('note: commit message saved in %s\n') % msgfn)
2015 _('note: commit message saved in %s\n') % msgfn)
2016 raise
2016 raise
2017 # update bookmarks, dirstate and mergestate
2017 # update bookmarks, dirstate and mergestate
2018 bookmarks.update(self, [p1, p2], ret)
2018 bookmarks.update(self, [p1, p2], ret)
2019 cctx.markcommitted(ret)
2019 cctx.markcommitted(ret)
2020 ms.reset()
2020 ms.reset()
2021 tr.close()
2021 tr.close()
2022
2022
2023 finally:
2023 finally:
2024 lockmod.release(tr, lock, wlock)
2024 lockmod.release(tr, lock, wlock)
2025
2025
2026 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2026 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2027 # hack for command that use a temporary commit (eg: histedit)
2027 # hack for command that use a temporary commit (eg: histedit)
2028 # temporary commit got stripped before hook release
2028 # temporary commit got stripped before hook release
2029 if self.changelog.hasnode(ret):
2029 if self.changelog.hasnode(ret):
2030 self.hook("commit", node=node, parent1=parent1,
2030 self.hook("commit", node=node, parent1=parent1,
2031 parent2=parent2)
2031 parent2=parent2)
2032 self._afterlock(commithook)
2032 self._afterlock(commithook)
2033 return ret
2033 return ret
2034
2034
2035 @unfilteredmethod
2035 @unfilteredmethod
2036 def commitctx(self, ctx, error=False):
2036 def commitctx(self, ctx, error=False):
2037 """Add a new revision to current repository.
2037 """Add a new revision to current repository.
2038 Revision information is passed via the context argument.
2038 Revision information is passed via the context argument.
2039
2039
2040 ctx.files() should list all files involved in this commit, i.e.
2040 ctx.files() should list all files involved in this commit, i.e.
2041 modified/added/removed files. On merge, it may be wider than the
2041 modified/added/removed files. On merge, it may be wider than the
2042 ctx.files() to be committed, since any file nodes derived directly
2042 ctx.files() to be committed, since any file nodes derived directly
2043 from p1 or p2 are excluded from the committed ctx.files().
2043 from p1 or p2 are excluded from the committed ctx.files().
2044 """
2044 """
2045
2045
2046 tr = None
2046 tr = None
2047 p1, p2 = ctx.p1(), ctx.p2()
2047 p1, p2 = ctx.p1(), ctx.p2()
2048 user = ctx.user()
2048 user = ctx.user()
2049
2049
2050 lock = self.lock()
2050 lock = self.lock()
2051 try:
2051 try:
2052 tr = self.transaction("commit")
2052 tr = self.transaction("commit")
2053 trp = weakref.proxy(tr)
2053 trp = weakref.proxy(tr)
2054
2054
2055 if ctx.manifestnode():
2055 if ctx.manifestnode():
2056 # reuse an existing manifest revision
2056 # reuse an existing manifest revision
2057 self.ui.debug('reusing known manifest\n')
2057 self.ui.debug('reusing known manifest\n')
2058 mn = ctx.manifestnode()
2058 mn = ctx.manifestnode()
2059 files = ctx.files()
2059 files = ctx.files()
2060 elif ctx.files():
2060 elif ctx.files():
2061 m1ctx = p1.manifestctx()
2061 m1ctx = p1.manifestctx()
2062 m2ctx = p2.manifestctx()
2062 m2ctx = p2.manifestctx()
2063 mctx = m1ctx.copy()
2063 mctx = m1ctx.copy()
2064
2064
2065 m = mctx.read()
2065 m = mctx.read()
2066 m1 = m1ctx.read()
2066 m1 = m1ctx.read()
2067 m2 = m2ctx.read()
2067 m2 = m2ctx.read()
2068
2068
2069 # check in files
2069 # check in files
2070 added = []
2070 added = []
2071 changed = []
2071 changed = []
2072 removed = list(ctx.removed())
2072 removed = list(ctx.removed())
2073 linkrev = len(self)
2073 linkrev = len(self)
2074 self.ui.note(_("committing files:\n"))
2074 self.ui.note(_("committing files:\n"))
2075 for f in sorted(ctx.modified() + ctx.added()):
2075 for f in sorted(ctx.modified() + ctx.added()):
2076 self.ui.note(f + "\n")
2076 self.ui.note(f + "\n")
2077 try:
2077 try:
2078 fctx = ctx[f]
2078 fctx = ctx[f]
2079 if fctx is None:
2079 if fctx is None:
2080 removed.append(f)
2080 removed.append(f)
2081 else:
2081 else:
2082 added.append(f)
2082 added.append(f)
2083 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2083 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2084 trp, changed)
2084 trp, changed)
2085 m.setflag(f, fctx.flags())
2085 m.setflag(f, fctx.flags())
2086 except OSError as inst:
2086 except OSError as inst:
2087 self.ui.warn(_("trouble committing %s!\n") % f)
2087 self.ui.warn(_("trouble committing %s!\n") % f)
2088 raise
2088 raise
2089 except IOError as inst:
2089 except IOError as inst:
2090 errcode = getattr(inst, 'errno', errno.ENOENT)
2090 errcode = getattr(inst, 'errno', errno.ENOENT)
2091 if error or errcode and errcode != errno.ENOENT:
2091 if error or errcode and errcode != errno.ENOENT:
2092 self.ui.warn(_("trouble committing %s!\n") % f)
2092 self.ui.warn(_("trouble committing %s!\n") % f)
2093 raise
2093 raise
2094
2094
2095 # update manifest
2095 # update manifest
2096 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2096 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2097 drop = [f for f in removed if f in m]
2097 drop = [f for f in removed if f in m]
2098 for f in drop:
2098 for f in drop:
2099 del m[f]
2099 del m[f]
2100 files = changed + removed
2100 files = changed + removed
2101 md = None
2101 md = None
2102 if not files:
2102 if not files:
2103 # if no "files" actually changed in terms of the changelog,
2103 # if no "files" actually changed in terms of the changelog,
2104 # try hard to detect unmodified manifest entry so that the
2104 # try hard to detect unmodified manifest entry so that the
2105 # exact same commit can be reproduced later on convert.
2105 # exact same commit can be reproduced later on convert.
2106 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2106 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2107 if not files and md:
2107 if not files and md:
2108 self.ui.debug('not reusing manifest (no file change in '
2108 self.ui.debug('not reusing manifest (no file change in '
2109 'changelog, but manifest differs)\n')
2109 'changelog, but manifest differs)\n')
2110 if files or md:
2110 if files or md:
2111 self.ui.note(_("committing manifest\n"))
2111 self.ui.note(_("committing manifest\n"))
2112 mn = mctx.write(trp, linkrev,
2112 mn = mctx.write(trp, linkrev,
2113 p1.manifestnode(), p2.manifestnode(),
2113 p1.manifestnode(), p2.manifestnode(),
2114 added, drop)
2114 added, drop)
2115 else:
2115 else:
2116 self.ui.debug('reusing manifest form p1 (listed files '
2116 self.ui.debug('reusing manifest form p1 (listed files '
2117 'actually unchanged)\n')
2117 'actually unchanged)\n')
2118 mn = p1.manifestnode()
2118 mn = p1.manifestnode()
2119 else:
2119 else:
2120 self.ui.debug('reusing manifest from p1 (no file change)\n')
2120 self.ui.debug('reusing manifest from p1 (no file change)\n')
2121 mn = p1.manifestnode()
2121 mn = p1.manifestnode()
2122 files = []
2122 files = []
2123
2123
2124 # update changelog
2124 # update changelog
2125 self.ui.note(_("committing changelog\n"))
2125 self.ui.note(_("committing changelog\n"))
2126 self.changelog.delayupdate(tr)
2126 self.changelog.delayupdate(tr)
2127 n = self.changelog.add(mn, files, ctx.description(),
2127 n = self.changelog.add(mn, files, ctx.description(),
2128 trp, p1.node(), p2.node(),
2128 trp, p1.node(), p2.node(),
2129 user, ctx.date(), ctx.extra().copy())
2129 user, ctx.date(), ctx.extra().copy())
2130 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2130 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2131 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2131 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2132 parent2=xp2)
2132 parent2=xp2)
2133 # set the new commit is proper phase
2133 # set the new commit is proper phase
2134 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2134 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2135 if targetphase:
2135 if targetphase:
2136 # retract boundary do not alter parent changeset.
2136 # retract boundary do not alter parent changeset.
2137 # if a parent have higher the resulting phase will
2137 # if a parent have higher the resulting phase will
2138 # be compliant anyway
2138 # be compliant anyway
2139 #
2139 #
2140 # if minimal phase was 0 we don't need to retract anything
2140 # if minimal phase was 0 we don't need to retract anything
2141 phases.registernew(self, tr, targetphase, [n])
2141 phases.registernew(self, tr, targetphase, [n])
2142 tr.close()
2142 tr.close()
2143 return n
2143 return n
2144 finally:
2144 finally:
2145 if tr:
2145 if tr:
2146 tr.release()
2146 tr.release()
2147 lock.release()
2147 lock.release()
2148
2148
2149 @unfilteredmethod
2149 @unfilteredmethod
2150 def destroying(self):
2150 def destroying(self):
2151 '''Inform the repository that nodes are about to be destroyed.
2151 '''Inform the repository that nodes are about to be destroyed.
2152 Intended for use by strip and rollback, so there's a common
2152 Intended for use by strip and rollback, so there's a common
2153 place for anything that has to be done before destroying history.
2153 place for anything that has to be done before destroying history.
2154
2154
2155 This is mostly useful for saving state that is in memory and waiting
2155 This is mostly useful for saving state that is in memory and waiting
2156 to be flushed when the current lock is released. Because a call to
2156 to be flushed when the current lock is released. Because a call to
2157 destroyed is imminent, the repo will be invalidated causing those
2157 destroyed is imminent, the repo will be invalidated causing those
2158 changes to stay in memory (waiting for the next unlock), or vanish
2158 changes to stay in memory (waiting for the next unlock), or vanish
2159 completely.
2159 completely.
2160 '''
2160 '''
2161 # When using the same lock to commit and strip, the phasecache is left
2161 # When using the same lock to commit and strip, the phasecache is left
2162 # dirty after committing. Then when we strip, the repo is invalidated,
2162 # dirty after committing. Then when we strip, the repo is invalidated,
2163 # causing those changes to disappear.
2163 # causing those changes to disappear.
2164 if '_phasecache' in vars(self):
2164 if '_phasecache' in vars(self):
2165 self._phasecache.write()
2165 self._phasecache.write()
2166
2166
2167 @unfilteredmethod
2167 @unfilteredmethod
2168 def destroyed(self):
2168 def destroyed(self):
2169 '''Inform the repository that nodes have been destroyed.
2169 '''Inform the repository that nodes have been destroyed.
2170 Intended for use by strip and rollback, so there's a common
2170 Intended for use by strip and rollback, so there's a common
2171 place for anything that has to be done after destroying history.
2171 place for anything that has to be done after destroying history.
2172 '''
2172 '''
2173 # When one tries to:
2173 # When one tries to:
2174 # 1) destroy nodes thus calling this method (e.g. strip)
2174 # 1) destroy nodes thus calling this method (e.g. strip)
2175 # 2) use phasecache somewhere (e.g. commit)
2175 # 2) use phasecache somewhere (e.g. commit)
2176 #
2176 #
2177 # then 2) will fail because the phasecache contains nodes that were
2177 # then 2) will fail because the phasecache contains nodes that were
2178 # removed. We can either remove phasecache from the filecache,
2178 # removed. We can either remove phasecache from the filecache,
2179 # causing it to reload next time it is accessed, or simply filter
2179 # causing it to reload next time it is accessed, or simply filter
2180 # the removed nodes now and write the updated cache.
2180 # the removed nodes now and write the updated cache.
2181 self._phasecache.filterunknown(self)
2181 self._phasecache.filterunknown(self)
2182 self._phasecache.write()
2182 self._phasecache.write()
2183
2183
2184 # refresh all repository caches
2184 # refresh all repository caches
2185 self.updatecaches()
2185 self.updatecaches()
2186
2186
2187 # Ensure the persistent tag cache is updated. Doing it now
2187 # Ensure the persistent tag cache is updated. Doing it now
2188 # means that the tag cache only has to worry about destroyed
2188 # means that the tag cache only has to worry about destroyed
2189 # heads immediately after a strip/rollback. That in turn
2189 # heads immediately after a strip/rollback. That in turn
2190 # guarantees that "cachetip == currenttip" (comparing both rev
2190 # guarantees that "cachetip == currenttip" (comparing both rev
2191 # and node) always means no nodes have been added or destroyed.
2191 # and node) always means no nodes have been added or destroyed.
2192
2192
2193 # XXX this is suboptimal when qrefresh'ing: we strip the current
2193 # XXX this is suboptimal when qrefresh'ing: we strip the current
2194 # head, refresh the tag cache, then immediately add a new head.
2194 # head, refresh the tag cache, then immediately add a new head.
2195 # But I think doing it this way is necessary for the "instant
2195 # But I think doing it this way is necessary for the "instant
2196 # tag cache retrieval" case to work.
2196 # tag cache retrieval" case to work.
2197 self.invalidate()
2197 self.invalidate()
2198
2198
2199 def status(self, node1='.', node2=None, match=None,
2199 def status(self, node1='.', node2=None, match=None,
2200 ignored=False, clean=False, unknown=False,
2200 ignored=False, clean=False, unknown=False,
2201 listsubrepos=False):
2201 listsubrepos=False):
2202 '''a convenience method that calls node1.status(node2)'''
2202 '''a convenience method that calls node1.status(node2)'''
2203 return self[node1].status(node2, match, ignored, clean, unknown,
2203 return self[node1].status(node2, match, ignored, clean, unknown,
2204 listsubrepos)
2204 listsubrepos)
2205
2205
2206 def addpostdsstatus(self, ps):
2206 def addpostdsstatus(self, ps):
2207 """Add a callback to run within the wlock, at the point at which status
2207 """Add a callback to run within the wlock, at the point at which status
2208 fixups happen.
2208 fixups happen.
2209
2209
2210 On status completion, callback(wctx, status) will be called with the
2210 On status completion, callback(wctx, status) will be called with the
2211 wlock held, unless the dirstate has changed from underneath or the wlock
2211 wlock held, unless the dirstate has changed from underneath or the wlock
2212 couldn't be grabbed.
2212 couldn't be grabbed.
2213
2213
2214 Callbacks should not capture and use a cached copy of the dirstate --
2214 Callbacks should not capture and use a cached copy of the dirstate --
2215 it might change in the meanwhile. Instead, they should access the
2215 it might change in the meanwhile. Instead, they should access the
2216 dirstate via wctx.repo().dirstate.
2216 dirstate via wctx.repo().dirstate.
2217
2217
2218 This list is emptied out after each status run -- extensions should
2218 This list is emptied out after each status run -- extensions should
2219 make sure it adds to this list each time dirstate.status is called.
2219 make sure it adds to this list each time dirstate.status is called.
2220 Extensions should also make sure they don't call this for statuses
2220 Extensions should also make sure they don't call this for statuses
2221 that don't involve the dirstate.
2221 that don't involve the dirstate.
2222 """
2222 """
2223
2223
2224 # The list is located here for uniqueness reasons -- it is actually
2224 # The list is located here for uniqueness reasons -- it is actually
2225 # managed by the workingctx, but that isn't unique per-repo.
2225 # managed by the workingctx, but that isn't unique per-repo.
2226 self._postdsstatus.append(ps)
2226 self._postdsstatus.append(ps)
2227
2227
2228 def postdsstatus(self):
2228 def postdsstatus(self):
2229 """Used by workingctx to get the list of post-dirstate-status hooks."""
2229 """Used by workingctx to get the list of post-dirstate-status hooks."""
2230 return self._postdsstatus
2230 return self._postdsstatus
2231
2231
2232 def clearpostdsstatus(self):
2232 def clearpostdsstatus(self):
2233 """Used by workingctx to clear post-dirstate-status hooks."""
2233 """Used by workingctx to clear post-dirstate-status hooks."""
2234 del self._postdsstatus[:]
2234 del self._postdsstatus[:]
2235
2235
2236 def heads(self, start=None):
2236 def heads(self, start=None):
2237 if start is None:
2237 if start is None:
2238 cl = self.changelog
2238 cl = self.changelog
2239 headrevs = reversed(cl.headrevs())
2239 headrevs = reversed(cl.headrevs())
2240 return [cl.node(rev) for rev in headrevs]
2240 return [cl.node(rev) for rev in headrevs]
2241
2241
2242 heads = self.changelog.heads(start)
2242 heads = self.changelog.heads(start)
2243 # sort the output in rev descending order
2243 # sort the output in rev descending order
2244 return sorted(heads, key=self.changelog.rev, reverse=True)
2244 return sorted(heads, key=self.changelog.rev, reverse=True)
2245
2245
2246 def branchheads(self, branch=None, start=None, closed=False):
2246 def branchheads(self, branch=None, start=None, closed=False):
2247 '''return a (possibly filtered) list of heads for the given branch
2247 '''return a (possibly filtered) list of heads for the given branch
2248
2248
2249 Heads are returned in topological order, from newest to oldest.
2249 Heads are returned in topological order, from newest to oldest.
2250 If branch is None, use the dirstate branch.
2250 If branch is None, use the dirstate branch.
2251 If start is not None, return only heads reachable from start.
2251 If start is not None, return only heads reachable from start.
2252 If closed is True, return heads that are marked as closed as well.
2252 If closed is True, return heads that are marked as closed as well.
2253 '''
2253 '''
2254 if branch is None:
2254 if branch is None:
2255 branch = self[None].branch()
2255 branch = self[None].branch()
2256 branches = self.branchmap()
2256 branches = self.branchmap()
2257 if branch not in branches:
2257 if branch not in branches:
2258 return []
2258 return []
2259 # the cache returns heads ordered lowest to highest
2259 # the cache returns heads ordered lowest to highest
2260 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2260 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2261 if start is not None:
2261 if start is not None:
2262 # filter out the heads that cannot be reached from startrev
2262 # filter out the heads that cannot be reached from startrev
2263 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2263 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2264 bheads = [h for h in bheads if h in fbheads]
2264 bheads = [h for h in bheads if h in fbheads]
2265 return bheads
2265 return bheads
2266
2266
2267 def branches(self, nodes):
2267 def branches(self, nodes):
2268 if not nodes:
2268 if not nodes:
2269 nodes = [self.changelog.tip()]
2269 nodes = [self.changelog.tip()]
2270 b = []
2270 b = []
2271 for n in nodes:
2271 for n in nodes:
2272 t = n
2272 t = n
2273 while True:
2273 while True:
2274 p = self.changelog.parents(n)
2274 p = self.changelog.parents(n)
2275 if p[1] != nullid or p[0] == nullid:
2275 if p[1] != nullid or p[0] == nullid:
2276 b.append((t, n, p[0], p[1]))
2276 b.append((t, n, p[0], p[1]))
2277 break
2277 break
2278 n = p[0]
2278 n = p[0]
2279 return b
2279 return b
2280
2280
2281 def between(self, pairs):
2281 def between(self, pairs):
2282 r = []
2282 r = []
2283
2283
2284 for top, bottom in pairs:
2284 for top, bottom in pairs:
2285 n, l, i = top, [], 0
2285 n, l, i = top, [], 0
2286 f = 1
2286 f = 1
2287
2287
2288 while n != bottom and n != nullid:
2288 while n != bottom and n != nullid:
2289 p = self.changelog.parents(n)[0]
2289 p = self.changelog.parents(n)[0]
2290 if i == f:
2290 if i == f:
2291 l.append(n)
2291 l.append(n)
2292 f = f * 2
2292 f = f * 2
2293 n = p
2293 n = p
2294 i += 1
2294 i += 1
2295
2295
2296 r.append(l)
2296 r.append(l)
2297
2297
2298 return r
2298 return r
2299
2299
2300 def checkpush(self, pushop):
2300 def checkpush(self, pushop):
2301 """Extensions can override this function if additional checks have
2301 """Extensions can override this function if additional checks have
2302 to be performed before pushing, or call it if they override push
2302 to be performed before pushing, or call it if they override push
2303 command.
2303 command.
2304 """
2304 """
2305
2305
2306 @unfilteredpropertycache
2306 @unfilteredpropertycache
2307 def prepushoutgoinghooks(self):
2307 def prepushoutgoinghooks(self):
2308 """Return util.hooks consists of a pushop with repo, remote, outgoing
2308 """Return util.hooks consists of a pushop with repo, remote, outgoing
2309 methods, which are called before pushing changesets.
2309 methods, which are called before pushing changesets.
2310 """
2310 """
2311 return util.hooks()
2311 return util.hooks()
2312
2312
2313 def pushkey(self, namespace, key, old, new):
2313 def pushkey(self, namespace, key, old, new):
2314 try:
2314 try:
2315 tr = self.currenttransaction()
2315 tr = self.currenttransaction()
2316 hookargs = {}
2316 hookargs = {}
2317 if tr is not None:
2317 if tr is not None:
2318 hookargs.update(tr.hookargs)
2318 hookargs.update(tr.hookargs)
2319 hookargs = pycompat.strkwargs(hookargs)
2319 hookargs = pycompat.strkwargs(hookargs)
2320 hookargs[r'namespace'] = namespace
2320 hookargs[r'namespace'] = namespace
2321 hookargs[r'key'] = key
2321 hookargs[r'key'] = key
2322 hookargs[r'old'] = old
2322 hookargs[r'old'] = old
2323 hookargs[r'new'] = new
2323 hookargs[r'new'] = new
2324 self.hook('prepushkey', throw=True, **hookargs)
2324 self.hook('prepushkey', throw=True, **hookargs)
2325 except error.HookAbort as exc:
2325 except error.HookAbort as exc:
2326 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2326 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2327 if exc.hint:
2327 if exc.hint:
2328 self.ui.write_err(_("(%s)\n") % exc.hint)
2328 self.ui.write_err(_("(%s)\n") % exc.hint)
2329 return False
2329 return False
2330 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2330 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2331 ret = pushkey.push(self, namespace, key, old, new)
2331 ret = pushkey.push(self, namespace, key, old, new)
2332 def runhook():
2332 def runhook():
2333 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2333 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2334 ret=ret)
2334 ret=ret)
2335 self._afterlock(runhook)
2335 self._afterlock(runhook)
2336 return ret
2336 return ret
2337
2337
2338 def listkeys(self, namespace):
2338 def listkeys(self, namespace):
2339 self.hook('prelistkeys', throw=True, namespace=namespace)
2339 self.hook('prelistkeys', throw=True, namespace=namespace)
2340 self.ui.debug('listing keys for "%s"\n' % namespace)
2340 self.ui.debug('listing keys for "%s"\n' % namespace)
2341 values = pushkey.list(self, namespace)
2341 values = pushkey.list(self, namespace)
2342 self.hook('listkeys', namespace=namespace, values=values)
2342 self.hook('listkeys', namespace=namespace, values=values)
2343 return values
2343 return values
2344
2344
2345 def debugwireargs(self, one, two, three=None, four=None, five=None):
2345 def debugwireargs(self, one, two, three=None, four=None, five=None):
2346 '''used to test argument passing over the wire'''
2346 '''used to test argument passing over the wire'''
2347 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2347 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2348 pycompat.bytestr(four),
2348 pycompat.bytestr(four),
2349 pycompat.bytestr(five))
2349 pycompat.bytestr(five))
2350
2350
2351 def savecommitmessage(self, text):
2351 def savecommitmessage(self, text):
2352 fp = self.vfs('last-message.txt', 'wb')
2352 fp = self.vfs('last-message.txt', 'wb')
2353 try:
2353 try:
2354 fp.write(text)
2354 fp.write(text)
2355 finally:
2355 finally:
2356 fp.close()
2356 fp.close()
2357 return self.pathto(fp.name[len(self.root) + 1:])
2357 return self.pathto(fp.name[len(self.root) + 1:])
2358
2358
2359 # used to avoid circular references so destructors work
2359 # used to avoid circular references so destructors work
2360 def aftertrans(files):
2360 def aftertrans(files):
2361 renamefiles = [tuple(t) for t in files]
2361 renamefiles = [tuple(t) for t in files]
2362 def a():
2362 def a():
2363 for vfs, src, dest in renamefiles:
2363 for vfs, src, dest in renamefiles:
2364 # if src and dest refer to a same file, vfs.rename is a no-op,
2364 # if src and dest refer to a same file, vfs.rename is a no-op,
2365 # leaving both src and dest on disk. delete dest to make sure
2365 # leaving both src and dest on disk. delete dest to make sure
2366 # the rename couldn't be such a no-op.
2366 # the rename couldn't be such a no-op.
2367 vfs.tryunlink(dest)
2367 vfs.tryunlink(dest)
2368 try:
2368 try:
2369 vfs.rename(src, dest)
2369 vfs.rename(src, dest)
2370 except OSError: # journal file does not yet exist
2370 except OSError: # journal file does not yet exist
2371 pass
2371 pass
2372 return a
2372 return a
2373
2373
2374 def undoname(fn):
2374 def undoname(fn):
2375 base, name = os.path.split(fn)
2375 base, name = os.path.split(fn)
2376 assert name.startswith('journal')
2376 assert name.startswith('journal')
2377 return os.path.join(base, name.replace('journal', 'undo', 1))
2377 return os.path.join(base, name.replace('journal', 'undo', 1))
2378
2378
2379 def instance(ui, path, create, intents=None, createopts=None):
2379 def instance(ui, path, create, intents=None, createopts=None):
2380 if create:
2380 if create:
2381 vfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2381 vfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2382
2382
2383 if vfs.exists('.hg'):
2383 if vfs.exists('.hg'):
2384 raise error.RepoError(_('repository %s already exists') % path)
2384 raise error.RepoError(_('repository %s already exists') % path)
2385
2385
2386 createrepository(ui, vfs, createopts=createopts)
2386 createrepository(ui, vfs, createopts=createopts)
2387
2387
2388 return localrepository(ui, util.urllocalpath(path), intents=intents)
2388 return localrepository(ui, util.urllocalpath(path), intents=intents)
2389
2389
2390 def islocal(path):
2390 def islocal(path):
2391 return True
2391 return True
2392
2392
2393 def newreporequirements(ui, createopts=None):
2393 def newreporequirements(ui, createopts=None):
2394 """Determine the set of requirements for a new local repository.
2394 """Determine the set of requirements for a new local repository.
2395
2395
2396 Extensions can wrap this function to specify custom requirements for
2396 Extensions can wrap this function to specify custom requirements for
2397 new repositories.
2397 new repositories.
2398 """
2398 """
2399 createopts = createopts or {}
2399 createopts = createopts or {}
2400
2400
2401 requirements = {'revlogv1'}
2401 requirements = {'revlogv1'}
2402 if ui.configbool('format', 'usestore'):
2402 if ui.configbool('format', 'usestore'):
2403 requirements.add('store')
2403 requirements.add('store')
2404 if ui.configbool('format', 'usefncache'):
2404 if ui.configbool('format', 'usefncache'):
2405 requirements.add('fncache')
2405 requirements.add('fncache')
2406 if ui.configbool('format', 'dotencode'):
2406 if ui.configbool('format', 'dotencode'):
2407 requirements.add('dotencode')
2407 requirements.add('dotencode')
2408
2408
2409 compengine = ui.config('experimental', 'format.compression')
2409 compengine = ui.config('experimental', 'format.compression')
2410 if compengine not in util.compengines:
2410 if compengine not in util.compengines:
2411 raise error.Abort(_('compression engine %s defined by '
2411 raise error.Abort(_('compression engine %s defined by '
2412 'experimental.format.compression not available') %
2412 'experimental.format.compression not available') %
2413 compengine,
2413 compengine,
2414 hint=_('run "hg debuginstall" to list available '
2414 hint=_('run "hg debuginstall" to list available '
2415 'compression engines'))
2415 'compression engines'))
2416
2416
2417 # zlib is the historical default and doesn't need an explicit requirement.
2417 # zlib is the historical default and doesn't need an explicit requirement.
2418 if compengine != 'zlib':
2418 if compengine != 'zlib':
2419 requirements.add('exp-compression-%s' % compengine)
2419 requirements.add('exp-compression-%s' % compengine)
2420
2420
2421 if scmutil.gdinitconfig(ui):
2421 if scmutil.gdinitconfig(ui):
2422 requirements.add('generaldelta')
2422 requirements.add('generaldelta')
2423 if ui.configbool('experimental', 'treemanifest'):
2423 if ui.configbool('experimental', 'treemanifest'):
2424 requirements.add('treemanifest')
2424 requirements.add('treemanifest')
2425 # experimental config: format.sparse-revlog
2425 # experimental config: format.sparse-revlog
2426 if ui.configbool('format', 'sparse-revlog'):
2426 if ui.configbool('format', 'sparse-revlog'):
2427 requirements.add(SPARSEREVLOG_REQUIREMENT)
2427 requirements.add(SPARSEREVLOG_REQUIREMENT)
2428
2428
2429 revlogv2 = ui.config('experimental', 'revlogv2')
2429 revlogv2 = ui.config('experimental', 'revlogv2')
2430 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2430 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2431 requirements.remove('revlogv1')
2431 requirements.remove('revlogv1')
2432 # generaldelta is implied by revlogv2.
2432 # generaldelta is implied by revlogv2.
2433 requirements.discard('generaldelta')
2433 requirements.discard('generaldelta')
2434 requirements.add(REVLOGV2_REQUIREMENT)
2434 requirements.add(REVLOGV2_REQUIREMENT)
2435 # experimental config: format.internal-phase
2435 # experimental config: format.internal-phase
2436 if ui.configbool('format', 'internal-phase'):
2436 if ui.configbool('format', 'internal-phase'):
2437 requirements.add('internal-phase')
2437 requirements.add('internal-phase')
2438
2438
2439 if createopts.get('narrowfiles'):
2439 if createopts.get('narrowfiles'):
2440 requirements.add(repository.NARROW_REQUIREMENT)
2440 requirements.add(repository.NARROW_REQUIREMENT)
2441
2441
2442 return requirements
2442 return requirements
2443
2443
2444 def filterknowncreateopts(ui, createopts):
2444 def filterknowncreateopts(ui, createopts):
2445 """Filters a dict of repo creation options against options that are known.
2445 """Filters a dict of repo creation options against options that are known.
2446
2446
2447 Receives a dict of repo creation options and returns a dict of those
2447 Receives a dict of repo creation options and returns a dict of those
2448 options that we don't know how to handle.
2448 options that we don't know how to handle.
2449
2449
2450 This function is called as part of repository creation. If the
2450 This function is called as part of repository creation. If the
2451 returned dict contains any items, repository creation will not
2451 returned dict contains any items, repository creation will not
2452 be allowed, as it means there was a request to create a repository
2452 be allowed, as it means there was a request to create a repository
2453 with options not recognized by loaded code.
2453 with options not recognized by loaded code.
2454
2454
2455 Extensions can wrap this function to filter out creation options
2455 Extensions can wrap this function to filter out creation options
2456 they know how to handle.
2456 they know how to handle.
2457 """
2457 """
2458 known = {'narrowfiles'}
2458 known = {'narrowfiles'}
2459
2459
2460 return {k: v for k, v in createopts.items() if k not in known}
2460 return {k: v for k, v in createopts.items() if k not in known}
2461
2461
2462 def createrepository(ui, wdirvfs, createopts=None):
2462 def createrepository(ui, wdirvfs, createopts=None):
2463 """Create a new repository in a vfs.
2463 """Create a new repository in a vfs.
2464
2464
2465 ``wdirvfs`` is a vfs instance pointing at the working directory.
2465 ``wdirvfs`` is a vfs instance pointing at the working directory.
2466 ``requirements`` is a set of requirements for the new repository.
2466 ``createopts`` options for the new repository.
2467 """
2467 """
2468 createopts = createopts or {}
2468 createopts = createopts or {}
2469
2469
2470 unknownopts = filterknowncreateopts(ui, createopts)
2470 unknownopts = filterknowncreateopts(ui, createopts)
2471
2471
2472 if not isinstance(unknownopts, dict):
2472 if not isinstance(unknownopts, dict):
2473 raise error.ProgrammingError('filterknowncreateopts() did not return '
2473 raise error.ProgrammingError('filterknowncreateopts() did not return '
2474 'a dict')
2474 'a dict')
2475
2475
2476 if unknownopts:
2476 if unknownopts:
2477 raise error.Abort(_('unable to create repository because of unknown '
2477 raise error.Abort(_('unable to create repository because of unknown '
2478 'creation option: %s') %
2478 'creation option: %s') %
2479 ', '.sorted(unknownopts),
2479 ', '.sorted(unknownopts),
2480 hint=_('is a required extension not loaded?'))
2480 hint=_('is a required extension not loaded?'))
2481
2481
2482 requirements = newreporequirements(ui, createopts=createopts)
2482 requirements = newreporequirements(ui, createopts=createopts)
2483
2483
2484 if not wdirvfs.exists():
2484 if not wdirvfs.exists():
2485 wdirvfs.makedirs()
2485 wdirvfs.makedirs()
2486
2486
2487 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2487 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2488 hgvfs.makedir(notindexed=True)
2488 hgvfs.makedir(notindexed=True)
2489
2489
2490 if b'store' in requirements:
2490 if b'store' in requirements:
2491 hgvfs.mkdir(b'store')
2491 hgvfs.mkdir(b'store')
2492
2492
2493 # We create an invalid changelog outside the store so very old
2493 # We create an invalid changelog outside the store so very old
2494 # Mercurial versions (which didn't know about the requirements
2494 # Mercurial versions (which didn't know about the requirements
2495 # file) encounter an error on reading the changelog. This
2495 # file) encounter an error on reading the changelog. This
2496 # effectively locks out old clients and prevents them from
2496 # effectively locks out old clients and prevents them from
2497 # mucking with a repo in an unknown format.
2497 # mucking with a repo in an unknown format.
2498 #
2498 #
2499 # The revlog header has version 2, which won't be recognized by
2499 # The revlog header has version 2, which won't be recognized by
2500 # such old clients.
2500 # such old clients.
2501 hgvfs.append(b'00changelog.i',
2501 hgvfs.append(b'00changelog.i',
2502 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2502 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2503 b'layout')
2503 b'layout')
2504
2504
2505 scmutil.writerequires(hgvfs, requirements)
2505 scmutil.writerequires(hgvfs, requirements)
General Comments 0
You need to be logged in to leave comments. Login now