##// END OF EJS Templates
localrepo: use revsymbol in lookupbranch() too...
Martin von Zweigbergk -
r37370:92171562 default
parent child Browse files
Show More
@@ -1,2330 +1,2330 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 hex,
19 hex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from .thirdparty.zope import (
23 from .thirdparty.zope import (
24 interface as zi,
24 interface as zi,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 peer,
52 peer,
53 phases,
53 phases,
54 pushkey,
54 pushkey,
55 pycompat,
55 pycompat,
56 repository,
56 repository,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store,
62 store,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70 from .utils import (
70 from .utils import (
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 release = lockmod.release
75 release = lockmod.release
76 urlerr = util.urlerr
76 urlerr = util.urlerr
77 urlreq = util.urlreq
77 urlreq = util.urlreq
78
78
79 # set of (path, vfs-location) tuples. vfs-location is:
79 # set of (path, vfs-location) tuples. vfs-location is:
80 # - 'plain for vfs relative paths
80 # - 'plain for vfs relative paths
81 # - '' for svfs relative paths
81 # - '' for svfs relative paths
82 _cachedfiles = set()
82 _cachedfiles = set()
83
83
84 class _basefilecache(scmutil.filecache):
84 class _basefilecache(scmutil.filecache):
85 """All filecache usage on repo are done for logic that should be unfiltered
85 """All filecache usage on repo are done for logic that should be unfiltered
86 """
86 """
87 def __get__(self, repo, type=None):
87 def __get__(self, repo, type=None):
88 if repo is None:
88 if repo is None:
89 return self
89 return self
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
91 def __set__(self, repo, value):
91 def __set__(self, repo, value):
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
93 def __delete__(self, repo):
93 def __delete__(self, repo):
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
95
95
96 class repofilecache(_basefilecache):
96 class repofilecache(_basefilecache):
97 """filecache for files in .hg but outside of .hg/store"""
97 """filecache for files in .hg but outside of .hg/store"""
98 def __init__(self, *paths):
98 def __init__(self, *paths):
99 super(repofilecache, self).__init__(*paths)
99 super(repofilecache, self).__init__(*paths)
100 for path in paths:
100 for path in paths:
101 _cachedfiles.add((path, 'plain'))
101 _cachedfiles.add((path, 'plain'))
102
102
103 def join(self, obj, fname):
103 def join(self, obj, fname):
104 return obj.vfs.join(fname)
104 return obj.vfs.join(fname)
105
105
106 class storecache(_basefilecache):
106 class storecache(_basefilecache):
107 """filecache for files in the store"""
107 """filecache for files in the store"""
108 def __init__(self, *paths):
108 def __init__(self, *paths):
109 super(storecache, self).__init__(*paths)
109 super(storecache, self).__init__(*paths)
110 for path in paths:
110 for path in paths:
111 _cachedfiles.add((path, ''))
111 _cachedfiles.add((path, ''))
112
112
113 def join(self, obj, fname):
113 def join(self, obj, fname):
114 return obj.sjoin(fname)
114 return obj.sjoin(fname)
115
115
116 def isfilecached(repo, name):
116 def isfilecached(repo, name):
117 """check if a repo has already cached "name" filecache-ed property
117 """check if a repo has already cached "name" filecache-ed property
118
118
119 This returns (cachedobj-or-None, iscached) tuple.
119 This returns (cachedobj-or-None, iscached) tuple.
120 """
120 """
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
122 if not cacheentry:
122 if not cacheentry:
123 return None, False
123 return None, False
124 return cacheentry.obj, True
124 return cacheentry.obj, True
125
125
126 class unfilteredpropertycache(util.propertycache):
126 class unfilteredpropertycache(util.propertycache):
127 """propertycache that apply to unfiltered repo only"""
127 """propertycache that apply to unfiltered repo only"""
128
128
129 def __get__(self, repo, type=None):
129 def __get__(self, repo, type=None):
130 unfi = repo.unfiltered()
130 unfi = repo.unfiltered()
131 if unfi is repo:
131 if unfi is repo:
132 return super(unfilteredpropertycache, self).__get__(unfi)
132 return super(unfilteredpropertycache, self).__get__(unfi)
133 return getattr(unfi, self.name)
133 return getattr(unfi, self.name)
134
134
135 class filteredpropertycache(util.propertycache):
135 class filteredpropertycache(util.propertycache):
136 """propertycache that must take filtering in account"""
136 """propertycache that must take filtering in account"""
137
137
138 def cachevalue(self, obj, value):
138 def cachevalue(self, obj, value):
139 object.__setattr__(obj, self.name, value)
139 object.__setattr__(obj, self.name, value)
140
140
141
141
142 def hasunfilteredcache(repo, name):
142 def hasunfilteredcache(repo, name):
143 """check if a repo has an unfilteredpropertycache value for <name>"""
143 """check if a repo has an unfilteredpropertycache value for <name>"""
144 return name in vars(repo.unfiltered())
144 return name in vars(repo.unfiltered())
145
145
146 def unfilteredmethod(orig):
146 def unfilteredmethod(orig):
147 """decorate method that always need to be run on unfiltered version"""
147 """decorate method that always need to be run on unfiltered version"""
148 def wrapper(repo, *args, **kwargs):
148 def wrapper(repo, *args, **kwargs):
149 return orig(repo.unfiltered(), *args, **kwargs)
149 return orig(repo.unfiltered(), *args, **kwargs)
150 return wrapper
150 return wrapper
151
151
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
153 'unbundle'}
153 'unbundle'}
154 legacycaps = moderncaps.union({'changegroupsubset'})
154 legacycaps = moderncaps.union({'changegroupsubset'})
155
155
156 class localpeer(repository.peer):
156 class localpeer(repository.peer):
157 '''peer for a local repo; reflects only the most recent API'''
157 '''peer for a local repo; reflects only the most recent API'''
158
158
159 def __init__(self, repo, caps=None):
159 def __init__(self, repo, caps=None):
160 super(localpeer, self).__init__()
160 super(localpeer, self).__init__()
161
161
162 if caps is None:
162 if caps is None:
163 caps = moderncaps.copy()
163 caps = moderncaps.copy()
164 self._repo = repo.filtered('served')
164 self._repo = repo.filtered('served')
165 self.ui = repo.ui
165 self.ui = repo.ui
166 self._caps = repo._restrictcapabilities(caps)
166 self._caps = repo._restrictcapabilities(caps)
167
167
168 # Begin of _basepeer interface.
168 # Begin of _basepeer interface.
169
169
170 def url(self):
170 def url(self):
171 return self._repo.url()
171 return self._repo.url()
172
172
173 def local(self):
173 def local(self):
174 return self._repo
174 return self._repo
175
175
176 def peer(self):
176 def peer(self):
177 return self
177 return self
178
178
179 def canpush(self):
179 def canpush(self):
180 return True
180 return True
181
181
182 def close(self):
182 def close(self):
183 self._repo.close()
183 self._repo.close()
184
184
185 # End of _basepeer interface.
185 # End of _basepeer interface.
186
186
187 # Begin of _basewirecommands interface.
187 # Begin of _basewirecommands interface.
188
188
189 def branchmap(self):
189 def branchmap(self):
190 return self._repo.branchmap()
190 return self._repo.branchmap()
191
191
192 def capabilities(self):
192 def capabilities(self):
193 return self._caps
193 return self._caps
194
194
195 def debugwireargs(self, one, two, three=None, four=None, five=None):
195 def debugwireargs(self, one, two, three=None, four=None, five=None):
196 """Used to test argument passing over the wire"""
196 """Used to test argument passing over the wire"""
197 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
197 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
198 pycompat.bytestr(four),
198 pycompat.bytestr(four),
199 pycompat.bytestr(five))
199 pycompat.bytestr(five))
200
200
201 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
201 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
202 **kwargs):
202 **kwargs):
203 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
203 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
204 common=common, bundlecaps=bundlecaps,
204 common=common, bundlecaps=bundlecaps,
205 **kwargs)[1]
205 **kwargs)[1]
206 cb = util.chunkbuffer(chunks)
206 cb = util.chunkbuffer(chunks)
207
207
208 if exchange.bundle2requested(bundlecaps):
208 if exchange.bundle2requested(bundlecaps):
209 # When requesting a bundle2, getbundle returns a stream to make the
209 # When requesting a bundle2, getbundle returns a stream to make the
210 # wire level function happier. We need to build a proper object
210 # wire level function happier. We need to build a proper object
211 # from it in local peer.
211 # from it in local peer.
212 return bundle2.getunbundler(self.ui, cb)
212 return bundle2.getunbundler(self.ui, cb)
213 else:
213 else:
214 return changegroup.getunbundler('01', cb, None)
214 return changegroup.getunbundler('01', cb, None)
215
215
216 def heads(self):
216 def heads(self):
217 return self._repo.heads()
217 return self._repo.heads()
218
218
219 def known(self, nodes):
219 def known(self, nodes):
220 return self._repo.known(nodes)
220 return self._repo.known(nodes)
221
221
222 def listkeys(self, namespace):
222 def listkeys(self, namespace):
223 return self._repo.listkeys(namespace)
223 return self._repo.listkeys(namespace)
224
224
225 def lookup(self, key):
225 def lookup(self, key):
226 return self._repo.lookup(key)
226 return self._repo.lookup(key)
227
227
228 def pushkey(self, namespace, key, old, new):
228 def pushkey(self, namespace, key, old, new):
229 return self._repo.pushkey(namespace, key, old, new)
229 return self._repo.pushkey(namespace, key, old, new)
230
230
231 def stream_out(self):
231 def stream_out(self):
232 raise error.Abort(_('cannot perform stream clone against local '
232 raise error.Abort(_('cannot perform stream clone against local '
233 'peer'))
233 'peer'))
234
234
235 def unbundle(self, cg, heads, url):
235 def unbundle(self, cg, heads, url):
236 """apply a bundle on a repo
236 """apply a bundle on a repo
237
237
238 This function handles the repo locking itself."""
238 This function handles the repo locking itself."""
239 try:
239 try:
240 try:
240 try:
241 cg = exchange.readbundle(self.ui, cg, None)
241 cg = exchange.readbundle(self.ui, cg, None)
242 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
242 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
243 if util.safehasattr(ret, 'getchunks'):
243 if util.safehasattr(ret, 'getchunks'):
244 # This is a bundle20 object, turn it into an unbundler.
244 # This is a bundle20 object, turn it into an unbundler.
245 # This little dance should be dropped eventually when the
245 # This little dance should be dropped eventually when the
246 # API is finally improved.
246 # API is finally improved.
247 stream = util.chunkbuffer(ret.getchunks())
247 stream = util.chunkbuffer(ret.getchunks())
248 ret = bundle2.getunbundler(self.ui, stream)
248 ret = bundle2.getunbundler(self.ui, stream)
249 return ret
249 return ret
250 except Exception as exc:
250 except Exception as exc:
251 # If the exception contains output salvaged from a bundle2
251 # If the exception contains output salvaged from a bundle2
252 # reply, we need to make sure it is printed before continuing
252 # reply, we need to make sure it is printed before continuing
253 # to fail. So we build a bundle2 with such output and consume
253 # to fail. So we build a bundle2 with such output and consume
254 # it directly.
254 # it directly.
255 #
255 #
256 # This is not very elegant but allows a "simple" solution for
256 # This is not very elegant but allows a "simple" solution for
257 # issue4594
257 # issue4594
258 output = getattr(exc, '_bundle2salvagedoutput', ())
258 output = getattr(exc, '_bundle2salvagedoutput', ())
259 if output:
259 if output:
260 bundler = bundle2.bundle20(self._repo.ui)
260 bundler = bundle2.bundle20(self._repo.ui)
261 for out in output:
261 for out in output:
262 bundler.addpart(out)
262 bundler.addpart(out)
263 stream = util.chunkbuffer(bundler.getchunks())
263 stream = util.chunkbuffer(bundler.getchunks())
264 b = bundle2.getunbundler(self.ui, stream)
264 b = bundle2.getunbundler(self.ui, stream)
265 bundle2.processbundle(self._repo, b)
265 bundle2.processbundle(self._repo, b)
266 raise
266 raise
267 except error.PushRaced as exc:
267 except error.PushRaced as exc:
268 raise error.ResponseError(_('push failed:'),
268 raise error.ResponseError(_('push failed:'),
269 stringutil.forcebytestr(exc))
269 stringutil.forcebytestr(exc))
270
270
271 # End of _basewirecommands interface.
271 # End of _basewirecommands interface.
272
272
273 # Begin of peer interface.
273 # Begin of peer interface.
274
274
275 def iterbatch(self):
275 def iterbatch(self):
276 return peer.localiterbatcher(self)
276 return peer.localiterbatcher(self)
277
277
278 # End of peer interface.
278 # End of peer interface.
279
279
280 class locallegacypeer(repository.legacypeer, localpeer):
280 class locallegacypeer(repository.legacypeer, localpeer):
281 '''peer extension which implements legacy methods too; used for tests with
281 '''peer extension which implements legacy methods too; used for tests with
282 restricted capabilities'''
282 restricted capabilities'''
283
283
284 def __init__(self, repo):
284 def __init__(self, repo):
285 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
285 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
286
286
287 # Begin of baselegacywirecommands interface.
287 # Begin of baselegacywirecommands interface.
288
288
289 def between(self, pairs):
289 def between(self, pairs):
290 return self._repo.between(pairs)
290 return self._repo.between(pairs)
291
291
292 def branches(self, nodes):
292 def branches(self, nodes):
293 return self._repo.branches(nodes)
293 return self._repo.branches(nodes)
294
294
295 def changegroup(self, basenodes, source):
295 def changegroup(self, basenodes, source):
296 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
296 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
297 missingheads=self._repo.heads())
297 missingheads=self._repo.heads())
298 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
299
299
300 def changegroupsubset(self, bases, heads, source):
300 def changegroupsubset(self, bases, heads, source):
301 outgoing = discovery.outgoing(self._repo, missingroots=bases,
301 outgoing = discovery.outgoing(self._repo, missingroots=bases,
302 missingheads=heads)
302 missingheads=heads)
303 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
303 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
304
304
305 # End of baselegacywirecommands interface.
305 # End of baselegacywirecommands interface.
306
306
307 # Increment the sub-version when the revlog v2 format changes to lock out old
307 # Increment the sub-version when the revlog v2 format changes to lock out old
308 # clients.
308 # clients.
309 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
309 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
310
310
311 # Functions receiving (ui, features) that extensions can register to impact
311 # Functions receiving (ui, features) that extensions can register to impact
312 # the ability to load repositories with custom requirements. Only
312 # the ability to load repositories with custom requirements. Only
313 # functions defined in loaded extensions are called.
313 # functions defined in loaded extensions are called.
314 #
314 #
315 # The function receives a set of requirement strings that the repository
315 # The function receives a set of requirement strings that the repository
316 # is capable of opening. Functions will typically add elements to the
316 # is capable of opening. Functions will typically add elements to the
317 # set to reflect that the extension knows how to handle that requirements.
317 # set to reflect that the extension knows how to handle that requirements.
318 featuresetupfuncs = set()
318 featuresetupfuncs = set()
319
319
320 @zi.implementer(repository.completelocalrepository)
320 @zi.implementer(repository.completelocalrepository)
321 class localrepository(object):
321 class localrepository(object):
322
322
323 # obsolete experimental requirements:
323 # obsolete experimental requirements:
324 # - manifestv2: An experimental new manifest format that allowed
324 # - manifestv2: An experimental new manifest format that allowed
325 # for stem compression of long paths. Experiment ended up not
325 # for stem compression of long paths. Experiment ended up not
326 # being successful (repository sizes went up due to worse delta
326 # being successful (repository sizes went up due to worse delta
327 # chains), and the code was deleted in 4.6.
327 # chains), and the code was deleted in 4.6.
328 supportedformats = {
328 supportedformats = {
329 'revlogv1',
329 'revlogv1',
330 'generaldelta',
330 'generaldelta',
331 'treemanifest',
331 'treemanifest',
332 REVLOGV2_REQUIREMENT,
332 REVLOGV2_REQUIREMENT,
333 }
333 }
334 _basesupported = supportedformats | {
334 _basesupported = supportedformats | {
335 'store',
335 'store',
336 'fncache',
336 'fncache',
337 'shared',
337 'shared',
338 'relshared',
338 'relshared',
339 'dotencode',
339 'dotencode',
340 'exp-sparse',
340 'exp-sparse',
341 }
341 }
342 openerreqs = {
342 openerreqs = {
343 'revlogv1',
343 'revlogv1',
344 'generaldelta',
344 'generaldelta',
345 'treemanifest',
345 'treemanifest',
346 }
346 }
347
347
348 # list of prefix for file which can be written without 'wlock'
348 # list of prefix for file which can be written without 'wlock'
349 # Extensions should extend this list when needed
349 # Extensions should extend this list when needed
350 _wlockfreeprefix = {
350 _wlockfreeprefix = {
351 # We migh consider requiring 'wlock' for the next
351 # We migh consider requiring 'wlock' for the next
352 # two, but pretty much all the existing code assume
352 # two, but pretty much all the existing code assume
353 # wlock is not needed so we keep them excluded for
353 # wlock is not needed so we keep them excluded for
354 # now.
354 # now.
355 'hgrc',
355 'hgrc',
356 'requires',
356 'requires',
357 # XXX cache is a complicatged business someone
357 # XXX cache is a complicatged business someone
358 # should investigate this in depth at some point
358 # should investigate this in depth at some point
359 'cache/',
359 'cache/',
360 # XXX shouldn't be dirstate covered by the wlock?
360 # XXX shouldn't be dirstate covered by the wlock?
361 'dirstate',
361 'dirstate',
362 # XXX bisect was still a bit too messy at the time
362 # XXX bisect was still a bit too messy at the time
363 # this changeset was introduced. Someone should fix
363 # this changeset was introduced. Someone should fix
364 # the remainig bit and drop this line
364 # the remainig bit and drop this line
365 'bisect.state',
365 'bisect.state',
366 }
366 }
367
367
368 def __init__(self, baseui, path, create=False):
368 def __init__(self, baseui, path, create=False):
369 self.requirements = set()
369 self.requirements = set()
370 self.filtername = None
370 self.filtername = None
371 # wvfs: rooted at the repository root, used to access the working copy
371 # wvfs: rooted at the repository root, used to access the working copy
372 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
372 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
373 # vfs: rooted at .hg, used to access repo files outside of .hg/store
373 # vfs: rooted at .hg, used to access repo files outside of .hg/store
374 self.vfs = None
374 self.vfs = None
375 # svfs: usually rooted at .hg/store, used to access repository history
375 # svfs: usually rooted at .hg/store, used to access repository history
376 # If this is a shared repository, this vfs may point to another
376 # If this is a shared repository, this vfs may point to another
377 # repository's .hg/store directory.
377 # repository's .hg/store directory.
378 self.svfs = None
378 self.svfs = None
379 self.root = self.wvfs.base
379 self.root = self.wvfs.base
380 self.path = self.wvfs.join(".hg")
380 self.path = self.wvfs.join(".hg")
381 self.origroot = path
381 self.origroot = path
382 # This is only used by context.workingctx.match in order to
382 # This is only used by context.workingctx.match in order to
383 # detect files in subrepos.
383 # detect files in subrepos.
384 self.auditor = pathutil.pathauditor(
384 self.auditor = pathutil.pathauditor(
385 self.root, callback=self._checknested)
385 self.root, callback=self._checknested)
386 # This is only used by context.basectx.match in order to detect
386 # This is only used by context.basectx.match in order to detect
387 # files in subrepos.
387 # files in subrepos.
388 self.nofsauditor = pathutil.pathauditor(
388 self.nofsauditor = pathutil.pathauditor(
389 self.root, callback=self._checknested, realfs=False, cached=True)
389 self.root, callback=self._checknested, realfs=False, cached=True)
390 self.baseui = baseui
390 self.baseui = baseui
391 self.ui = baseui.copy()
391 self.ui = baseui.copy()
392 self.ui.copy = baseui.copy # prevent copying repo configuration
392 self.ui.copy = baseui.copy # prevent copying repo configuration
393 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
393 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
394 if (self.ui.configbool('devel', 'all-warnings') or
394 if (self.ui.configbool('devel', 'all-warnings') or
395 self.ui.configbool('devel', 'check-locks')):
395 self.ui.configbool('devel', 'check-locks')):
396 self.vfs.audit = self._getvfsward(self.vfs.audit)
396 self.vfs.audit = self._getvfsward(self.vfs.audit)
397 # A list of callback to shape the phase if no data were found.
397 # A list of callback to shape the phase if no data were found.
398 # Callback are in the form: func(repo, roots) --> processed root.
398 # Callback are in the form: func(repo, roots) --> processed root.
399 # This list it to be filled by extension during repo setup
399 # This list it to be filled by extension during repo setup
400 self._phasedefaults = []
400 self._phasedefaults = []
401 try:
401 try:
402 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
402 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
403 self._loadextensions()
403 self._loadextensions()
404 except IOError:
404 except IOError:
405 pass
405 pass
406
406
407 if featuresetupfuncs:
407 if featuresetupfuncs:
408 self.supported = set(self._basesupported) # use private copy
408 self.supported = set(self._basesupported) # use private copy
409 extmods = set(m.__name__ for n, m
409 extmods = set(m.__name__ for n, m
410 in extensions.extensions(self.ui))
410 in extensions.extensions(self.ui))
411 for setupfunc in featuresetupfuncs:
411 for setupfunc in featuresetupfuncs:
412 if setupfunc.__module__ in extmods:
412 if setupfunc.__module__ in extmods:
413 setupfunc(self.ui, self.supported)
413 setupfunc(self.ui, self.supported)
414 else:
414 else:
415 self.supported = self._basesupported
415 self.supported = self._basesupported
416 color.setup(self.ui)
416 color.setup(self.ui)
417
417
418 # Add compression engines.
418 # Add compression engines.
419 for name in util.compengines:
419 for name in util.compengines:
420 engine = util.compengines[name]
420 engine = util.compengines[name]
421 if engine.revlogheader():
421 if engine.revlogheader():
422 self.supported.add('exp-compression-%s' % name)
422 self.supported.add('exp-compression-%s' % name)
423
423
424 if not self.vfs.isdir():
424 if not self.vfs.isdir():
425 if create:
425 if create:
426 self.requirements = newreporequirements(self)
426 self.requirements = newreporequirements(self)
427
427
428 if not self.wvfs.exists():
428 if not self.wvfs.exists():
429 self.wvfs.makedirs()
429 self.wvfs.makedirs()
430 self.vfs.makedir(notindexed=True)
430 self.vfs.makedir(notindexed=True)
431
431
432 if 'store' in self.requirements:
432 if 'store' in self.requirements:
433 self.vfs.mkdir("store")
433 self.vfs.mkdir("store")
434
434
435 # create an invalid changelog
435 # create an invalid changelog
436 self.vfs.append(
436 self.vfs.append(
437 "00changelog.i",
437 "00changelog.i",
438 '\0\0\0\2' # represents revlogv2
438 '\0\0\0\2' # represents revlogv2
439 ' dummy changelog to prevent using the old repo layout'
439 ' dummy changelog to prevent using the old repo layout'
440 )
440 )
441 else:
441 else:
442 raise error.RepoError(_("repository %s not found") % path)
442 raise error.RepoError(_("repository %s not found") % path)
443 elif create:
443 elif create:
444 raise error.RepoError(_("repository %s already exists") % path)
444 raise error.RepoError(_("repository %s already exists") % path)
445 else:
445 else:
446 try:
446 try:
447 self.requirements = scmutil.readrequires(
447 self.requirements = scmutil.readrequires(
448 self.vfs, self.supported)
448 self.vfs, self.supported)
449 except IOError as inst:
449 except IOError as inst:
450 if inst.errno != errno.ENOENT:
450 if inst.errno != errno.ENOENT:
451 raise
451 raise
452
452
453 cachepath = self.vfs.join('cache')
453 cachepath = self.vfs.join('cache')
454 self.sharedpath = self.path
454 self.sharedpath = self.path
455 try:
455 try:
456 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
456 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
457 if 'relshared' in self.requirements:
457 if 'relshared' in self.requirements:
458 sharedpath = self.vfs.join(sharedpath)
458 sharedpath = self.vfs.join(sharedpath)
459 vfs = vfsmod.vfs(sharedpath, realpath=True)
459 vfs = vfsmod.vfs(sharedpath, realpath=True)
460 cachepath = vfs.join('cache')
460 cachepath = vfs.join('cache')
461 s = vfs.base
461 s = vfs.base
462 if not vfs.exists():
462 if not vfs.exists():
463 raise error.RepoError(
463 raise error.RepoError(
464 _('.hg/sharedpath points to nonexistent directory %s') % s)
464 _('.hg/sharedpath points to nonexistent directory %s') % s)
465 self.sharedpath = s
465 self.sharedpath = s
466 except IOError as inst:
466 except IOError as inst:
467 if inst.errno != errno.ENOENT:
467 if inst.errno != errno.ENOENT:
468 raise
468 raise
469
469
470 if 'exp-sparse' in self.requirements and not sparse.enabled:
470 if 'exp-sparse' in self.requirements and not sparse.enabled:
471 raise error.RepoError(_('repository is using sparse feature but '
471 raise error.RepoError(_('repository is using sparse feature but '
472 'sparse is not enabled; enable the '
472 'sparse is not enabled; enable the '
473 '"sparse" extensions to access'))
473 '"sparse" extensions to access'))
474
474
475 self.store = store.store(
475 self.store = store.store(
476 self.requirements, self.sharedpath,
476 self.requirements, self.sharedpath,
477 lambda base: vfsmod.vfs(base, cacheaudited=True))
477 lambda base: vfsmod.vfs(base, cacheaudited=True))
478 self.spath = self.store.path
478 self.spath = self.store.path
479 self.svfs = self.store.vfs
479 self.svfs = self.store.vfs
480 self.sjoin = self.store.join
480 self.sjoin = self.store.join
481 self.vfs.createmode = self.store.createmode
481 self.vfs.createmode = self.store.createmode
482 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
482 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
483 self.cachevfs.createmode = self.store.createmode
483 self.cachevfs.createmode = self.store.createmode
484 if (self.ui.configbool('devel', 'all-warnings') or
484 if (self.ui.configbool('devel', 'all-warnings') or
485 self.ui.configbool('devel', 'check-locks')):
485 self.ui.configbool('devel', 'check-locks')):
486 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
486 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
487 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
487 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
488 else: # standard vfs
488 else: # standard vfs
489 self.svfs.audit = self._getsvfsward(self.svfs.audit)
489 self.svfs.audit = self._getsvfsward(self.svfs.audit)
490 self._applyopenerreqs()
490 self._applyopenerreqs()
491 if create:
491 if create:
492 self._writerequirements()
492 self._writerequirements()
493
493
494 self._dirstatevalidatewarned = False
494 self._dirstatevalidatewarned = False
495
495
496 self._branchcaches = {}
496 self._branchcaches = {}
497 self._revbranchcache = None
497 self._revbranchcache = None
498 self._filterpats = {}
498 self._filterpats = {}
499 self._datafilters = {}
499 self._datafilters = {}
500 self._transref = self._lockref = self._wlockref = None
500 self._transref = self._lockref = self._wlockref = None
501
501
502 # A cache for various files under .hg/ that tracks file changes,
502 # A cache for various files under .hg/ that tracks file changes,
503 # (used by the filecache decorator)
503 # (used by the filecache decorator)
504 #
504 #
505 # Maps a property name to its util.filecacheentry
505 # Maps a property name to its util.filecacheentry
506 self._filecache = {}
506 self._filecache = {}
507
507
508 # hold sets of revision to be filtered
508 # hold sets of revision to be filtered
509 # should be cleared when something might have changed the filter value:
509 # should be cleared when something might have changed the filter value:
510 # - new changesets,
510 # - new changesets,
511 # - phase change,
511 # - phase change,
512 # - new obsolescence marker,
512 # - new obsolescence marker,
513 # - working directory parent change,
513 # - working directory parent change,
514 # - bookmark changes
514 # - bookmark changes
515 self.filteredrevcache = {}
515 self.filteredrevcache = {}
516
516
517 # post-dirstate-status hooks
517 # post-dirstate-status hooks
518 self._postdsstatus = []
518 self._postdsstatus = []
519
519
520 # generic mapping between names and nodes
520 # generic mapping between names and nodes
521 self.names = namespaces.namespaces()
521 self.names = namespaces.namespaces()
522
522
523 # Key to signature value.
523 # Key to signature value.
524 self._sparsesignaturecache = {}
524 self._sparsesignaturecache = {}
525 # Signature to cached matcher instance.
525 # Signature to cached matcher instance.
526 self._sparsematchercache = {}
526 self._sparsematchercache = {}
527
527
528 def _getvfsward(self, origfunc):
528 def _getvfsward(self, origfunc):
529 """build a ward for self.vfs"""
529 """build a ward for self.vfs"""
530 rref = weakref.ref(self)
530 rref = weakref.ref(self)
531 def checkvfs(path, mode=None):
531 def checkvfs(path, mode=None):
532 ret = origfunc(path, mode=mode)
532 ret = origfunc(path, mode=mode)
533 repo = rref()
533 repo = rref()
534 if (repo is None
534 if (repo is None
535 or not util.safehasattr(repo, '_wlockref')
535 or not util.safehasattr(repo, '_wlockref')
536 or not util.safehasattr(repo, '_lockref')):
536 or not util.safehasattr(repo, '_lockref')):
537 return
537 return
538 if mode in (None, 'r', 'rb'):
538 if mode in (None, 'r', 'rb'):
539 return
539 return
540 if path.startswith(repo.path):
540 if path.startswith(repo.path):
541 # truncate name relative to the repository (.hg)
541 # truncate name relative to the repository (.hg)
542 path = path[len(repo.path) + 1:]
542 path = path[len(repo.path) + 1:]
543 if path.startswith('cache/'):
543 if path.startswith('cache/'):
544 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
544 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
545 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
545 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
546 if path.startswith('journal.'):
546 if path.startswith('journal.'):
547 # journal is covered by 'lock'
547 # journal is covered by 'lock'
548 if repo._currentlock(repo._lockref) is None:
548 if repo._currentlock(repo._lockref) is None:
549 repo.ui.develwarn('write with no lock: "%s"' % path,
549 repo.ui.develwarn('write with no lock: "%s"' % path,
550 stacklevel=2, config='check-locks')
550 stacklevel=2, config='check-locks')
551 elif repo._currentlock(repo._wlockref) is None:
551 elif repo._currentlock(repo._wlockref) is None:
552 # rest of vfs files are covered by 'wlock'
552 # rest of vfs files are covered by 'wlock'
553 #
553 #
554 # exclude special files
554 # exclude special files
555 for prefix in self._wlockfreeprefix:
555 for prefix in self._wlockfreeprefix:
556 if path.startswith(prefix):
556 if path.startswith(prefix):
557 return
557 return
558 repo.ui.develwarn('write with no wlock: "%s"' % path,
558 repo.ui.develwarn('write with no wlock: "%s"' % path,
559 stacklevel=2, config='check-locks')
559 stacklevel=2, config='check-locks')
560 return ret
560 return ret
561 return checkvfs
561 return checkvfs
562
562
563 def _getsvfsward(self, origfunc):
563 def _getsvfsward(self, origfunc):
564 """build a ward for self.svfs"""
564 """build a ward for self.svfs"""
565 rref = weakref.ref(self)
565 rref = weakref.ref(self)
566 def checksvfs(path, mode=None):
566 def checksvfs(path, mode=None):
567 ret = origfunc(path, mode=mode)
567 ret = origfunc(path, mode=mode)
568 repo = rref()
568 repo = rref()
569 if repo is None or not util.safehasattr(repo, '_lockref'):
569 if repo is None or not util.safehasattr(repo, '_lockref'):
570 return
570 return
571 if mode in (None, 'r', 'rb'):
571 if mode in (None, 'r', 'rb'):
572 return
572 return
573 if path.startswith(repo.sharedpath):
573 if path.startswith(repo.sharedpath):
574 # truncate name relative to the repository (.hg)
574 # truncate name relative to the repository (.hg)
575 path = path[len(repo.sharedpath) + 1:]
575 path = path[len(repo.sharedpath) + 1:]
576 if repo._currentlock(repo._lockref) is None:
576 if repo._currentlock(repo._lockref) is None:
577 repo.ui.develwarn('write with no lock: "%s"' % path,
577 repo.ui.develwarn('write with no lock: "%s"' % path,
578 stacklevel=3)
578 stacklevel=3)
579 return ret
579 return ret
580 return checksvfs
580 return checksvfs
581
581
582 def close(self):
582 def close(self):
583 self._writecaches()
583 self._writecaches()
584
584
585 def _loadextensions(self):
585 def _loadextensions(self):
586 extensions.loadall(self.ui)
586 extensions.loadall(self.ui)
587
587
588 def _writecaches(self):
588 def _writecaches(self):
589 if self._revbranchcache:
589 if self._revbranchcache:
590 self._revbranchcache.write()
590 self._revbranchcache.write()
591
591
592 def _restrictcapabilities(self, caps):
592 def _restrictcapabilities(self, caps):
593 if self.ui.configbool('experimental', 'bundle2-advertise'):
593 if self.ui.configbool('experimental', 'bundle2-advertise'):
594 caps = set(caps)
594 caps = set(caps)
595 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
595 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
596 role='client'))
596 role='client'))
597 caps.add('bundle2=' + urlreq.quote(capsblob))
597 caps.add('bundle2=' + urlreq.quote(capsblob))
598 return caps
598 return caps
599
599
600 def _applyopenerreqs(self):
600 def _applyopenerreqs(self):
601 self.svfs.options = dict((r, 1) for r in self.requirements
601 self.svfs.options = dict((r, 1) for r in self.requirements
602 if r in self.openerreqs)
602 if r in self.openerreqs)
603 # experimental config: format.chunkcachesize
603 # experimental config: format.chunkcachesize
604 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
604 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
605 if chunkcachesize is not None:
605 if chunkcachesize is not None:
606 self.svfs.options['chunkcachesize'] = chunkcachesize
606 self.svfs.options['chunkcachesize'] = chunkcachesize
607 # experimental config: format.maxchainlen
607 # experimental config: format.maxchainlen
608 maxchainlen = self.ui.configint('format', 'maxchainlen')
608 maxchainlen = self.ui.configint('format', 'maxchainlen')
609 if maxchainlen is not None:
609 if maxchainlen is not None:
610 self.svfs.options['maxchainlen'] = maxchainlen
610 self.svfs.options['maxchainlen'] = maxchainlen
611 # experimental config: format.manifestcachesize
611 # experimental config: format.manifestcachesize
612 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
612 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
613 if manifestcachesize is not None:
613 if manifestcachesize is not None:
614 self.svfs.options['manifestcachesize'] = manifestcachesize
614 self.svfs.options['manifestcachesize'] = manifestcachesize
615 # experimental config: format.aggressivemergedeltas
615 # experimental config: format.aggressivemergedeltas
616 aggressivemergedeltas = self.ui.configbool('format',
616 aggressivemergedeltas = self.ui.configbool('format',
617 'aggressivemergedeltas')
617 'aggressivemergedeltas')
618 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
618 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
619 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
619 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
620 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
620 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
621 if 0 <= chainspan:
621 if 0 <= chainspan:
622 self.svfs.options['maxdeltachainspan'] = chainspan
622 self.svfs.options['maxdeltachainspan'] = chainspan
623 mmapindexthreshold = self.ui.configbytes('experimental',
623 mmapindexthreshold = self.ui.configbytes('experimental',
624 'mmapindexthreshold')
624 'mmapindexthreshold')
625 if mmapindexthreshold is not None:
625 if mmapindexthreshold is not None:
626 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
626 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
627 withsparseread = self.ui.configbool('experimental', 'sparse-read')
627 withsparseread = self.ui.configbool('experimental', 'sparse-read')
628 srdensitythres = float(self.ui.config('experimental',
628 srdensitythres = float(self.ui.config('experimental',
629 'sparse-read.density-threshold'))
629 'sparse-read.density-threshold'))
630 srmingapsize = self.ui.configbytes('experimental',
630 srmingapsize = self.ui.configbytes('experimental',
631 'sparse-read.min-gap-size')
631 'sparse-read.min-gap-size')
632 self.svfs.options['with-sparse-read'] = withsparseread
632 self.svfs.options['with-sparse-read'] = withsparseread
633 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
633 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
634 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
634 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
635
635
636 for r in self.requirements:
636 for r in self.requirements:
637 if r.startswith('exp-compression-'):
637 if r.startswith('exp-compression-'):
638 self.svfs.options['compengine'] = r[len('exp-compression-'):]
638 self.svfs.options['compengine'] = r[len('exp-compression-'):]
639
639
640 # TODO move "revlogv2" to openerreqs once finalized.
640 # TODO move "revlogv2" to openerreqs once finalized.
641 if REVLOGV2_REQUIREMENT in self.requirements:
641 if REVLOGV2_REQUIREMENT in self.requirements:
642 self.svfs.options['revlogv2'] = True
642 self.svfs.options['revlogv2'] = True
643
643
644 def _writerequirements(self):
644 def _writerequirements(self):
645 scmutil.writerequires(self.vfs, self.requirements)
645 scmutil.writerequires(self.vfs, self.requirements)
646
646
647 def _checknested(self, path):
647 def _checknested(self, path):
648 """Determine if path is a legal nested repository."""
648 """Determine if path is a legal nested repository."""
649 if not path.startswith(self.root):
649 if not path.startswith(self.root):
650 return False
650 return False
651 subpath = path[len(self.root) + 1:]
651 subpath = path[len(self.root) + 1:]
652 normsubpath = util.pconvert(subpath)
652 normsubpath = util.pconvert(subpath)
653
653
654 # XXX: Checking against the current working copy is wrong in
654 # XXX: Checking against the current working copy is wrong in
655 # the sense that it can reject things like
655 # the sense that it can reject things like
656 #
656 #
657 # $ hg cat -r 10 sub/x.txt
657 # $ hg cat -r 10 sub/x.txt
658 #
658 #
659 # if sub/ is no longer a subrepository in the working copy
659 # if sub/ is no longer a subrepository in the working copy
660 # parent revision.
660 # parent revision.
661 #
661 #
662 # However, it can of course also allow things that would have
662 # However, it can of course also allow things that would have
663 # been rejected before, such as the above cat command if sub/
663 # been rejected before, such as the above cat command if sub/
664 # is a subrepository now, but was a normal directory before.
664 # is a subrepository now, but was a normal directory before.
665 # The old path auditor would have rejected by mistake since it
665 # The old path auditor would have rejected by mistake since it
666 # panics when it sees sub/.hg/.
666 # panics when it sees sub/.hg/.
667 #
667 #
668 # All in all, checking against the working copy seems sensible
668 # All in all, checking against the working copy seems sensible
669 # since we want to prevent access to nested repositories on
669 # since we want to prevent access to nested repositories on
670 # the filesystem *now*.
670 # the filesystem *now*.
671 ctx = self[None]
671 ctx = self[None]
672 parts = util.splitpath(subpath)
672 parts = util.splitpath(subpath)
673 while parts:
673 while parts:
674 prefix = '/'.join(parts)
674 prefix = '/'.join(parts)
675 if prefix in ctx.substate:
675 if prefix in ctx.substate:
676 if prefix == normsubpath:
676 if prefix == normsubpath:
677 return True
677 return True
678 else:
678 else:
679 sub = ctx.sub(prefix)
679 sub = ctx.sub(prefix)
680 return sub.checknested(subpath[len(prefix) + 1:])
680 return sub.checknested(subpath[len(prefix) + 1:])
681 else:
681 else:
682 parts.pop()
682 parts.pop()
683 return False
683 return False
684
684
685 def peer(self):
685 def peer(self):
686 return localpeer(self) # not cached to avoid reference cycle
686 return localpeer(self) # not cached to avoid reference cycle
687
687
688 def unfiltered(self):
688 def unfiltered(self):
689 """Return unfiltered version of the repository
689 """Return unfiltered version of the repository
690
690
691 Intended to be overwritten by filtered repo."""
691 Intended to be overwritten by filtered repo."""
692 return self
692 return self
693
693
694 def filtered(self, name, visibilityexceptions=None):
694 def filtered(self, name, visibilityexceptions=None):
695 """Return a filtered version of a repository"""
695 """Return a filtered version of a repository"""
696 cls = repoview.newtype(self.unfiltered().__class__)
696 cls = repoview.newtype(self.unfiltered().__class__)
697 return cls(self, name, visibilityexceptions)
697 return cls(self, name, visibilityexceptions)
698
698
699 @repofilecache('bookmarks', 'bookmarks.current')
699 @repofilecache('bookmarks', 'bookmarks.current')
700 def _bookmarks(self):
700 def _bookmarks(self):
701 return bookmarks.bmstore(self)
701 return bookmarks.bmstore(self)
702
702
703 @property
703 @property
704 def _activebookmark(self):
704 def _activebookmark(self):
705 return self._bookmarks.active
705 return self._bookmarks.active
706
706
707 # _phasesets depend on changelog. what we need is to call
707 # _phasesets depend on changelog. what we need is to call
708 # _phasecache.invalidate() if '00changelog.i' was changed, but it
708 # _phasecache.invalidate() if '00changelog.i' was changed, but it
709 # can't be easily expressed in filecache mechanism.
709 # can't be easily expressed in filecache mechanism.
710 @storecache('phaseroots', '00changelog.i')
710 @storecache('phaseroots', '00changelog.i')
711 def _phasecache(self):
711 def _phasecache(self):
712 return phases.phasecache(self, self._phasedefaults)
712 return phases.phasecache(self, self._phasedefaults)
713
713
714 @storecache('obsstore')
714 @storecache('obsstore')
715 def obsstore(self):
715 def obsstore(self):
716 return obsolete.makestore(self.ui, self)
716 return obsolete.makestore(self.ui, self)
717
717
718 @storecache('00changelog.i')
718 @storecache('00changelog.i')
719 def changelog(self):
719 def changelog(self):
720 return changelog.changelog(self.svfs,
720 return changelog.changelog(self.svfs,
721 trypending=txnutil.mayhavepending(self.root))
721 trypending=txnutil.mayhavepending(self.root))
722
722
723 def _constructmanifest(self):
723 def _constructmanifest(self):
724 # This is a temporary function while we migrate from manifest to
724 # This is a temporary function while we migrate from manifest to
725 # manifestlog. It allows bundlerepo and unionrepo to intercept the
725 # manifestlog. It allows bundlerepo and unionrepo to intercept the
726 # manifest creation.
726 # manifest creation.
727 return manifest.manifestrevlog(self.svfs)
727 return manifest.manifestrevlog(self.svfs)
728
728
729 @storecache('00manifest.i')
729 @storecache('00manifest.i')
730 def manifestlog(self):
730 def manifestlog(self):
731 return manifest.manifestlog(self.svfs, self)
731 return manifest.manifestlog(self.svfs, self)
732
732
733 @repofilecache('dirstate')
733 @repofilecache('dirstate')
734 def dirstate(self):
734 def dirstate(self):
735 sparsematchfn = lambda: sparse.matcher(self)
735 sparsematchfn = lambda: sparse.matcher(self)
736
736
737 return dirstate.dirstate(self.vfs, self.ui, self.root,
737 return dirstate.dirstate(self.vfs, self.ui, self.root,
738 self._dirstatevalidate, sparsematchfn)
738 self._dirstatevalidate, sparsematchfn)
739
739
740 def _dirstatevalidate(self, node):
740 def _dirstatevalidate(self, node):
741 try:
741 try:
742 self.changelog.rev(node)
742 self.changelog.rev(node)
743 return node
743 return node
744 except error.LookupError:
744 except error.LookupError:
745 if not self._dirstatevalidatewarned:
745 if not self._dirstatevalidatewarned:
746 self._dirstatevalidatewarned = True
746 self._dirstatevalidatewarned = True
747 self.ui.warn(_("warning: ignoring unknown"
747 self.ui.warn(_("warning: ignoring unknown"
748 " working parent %s!\n") % short(node))
748 " working parent %s!\n") % short(node))
749 return nullid
749 return nullid
750
750
751 @repofilecache(narrowspec.FILENAME)
751 @repofilecache(narrowspec.FILENAME)
752 def narrowpats(self):
752 def narrowpats(self):
753 """matcher patterns for this repository's narrowspec
753 """matcher patterns for this repository's narrowspec
754
754
755 A tuple of (includes, excludes).
755 A tuple of (includes, excludes).
756 """
756 """
757 source = self
757 source = self
758 if self.shared():
758 if self.shared():
759 from . import hg
759 from . import hg
760 source = hg.sharedreposource(self)
760 source = hg.sharedreposource(self)
761 return narrowspec.load(source)
761 return narrowspec.load(source)
762
762
763 @repofilecache(narrowspec.FILENAME)
763 @repofilecache(narrowspec.FILENAME)
764 def _narrowmatch(self):
764 def _narrowmatch(self):
765 if changegroup.NARROW_REQUIREMENT not in self.requirements:
765 if changegroup.NARROW_REQUIREMENT not in self.requirements:
766 return matchmod.always(self.root, '')
766 return matchmod.always(self.root, '')
767 include, exclude = self.narrowpats
767 include, exclude = self.narrowpats
768 return narrowspec.match(self.root, include=include, exclude=exclude)
768 return narrowspec.match(self.root, include=include, exclude=exclude)
769
769
770 # TODO(martinvonz): make this property-like instead?
770 # TODO(martinvonz): make this property-like instead?
771 def narrowmatch(self):
771 def narrowmatch(self):
772 return self._narrowmatch
772 return self._narrowmatch
773
773
774 def setnarrowpats(self, newincludes, newexcludes):
774 def setnarrowpats(self, newincludes, newexcludes):
775 target = self
775 target = self
776 if self.shared():
776 if self.shared():
777 from . import hg
777 from . import hg
778 target = hg.sharedreposource(self)
778 target = hg.sharedreposource(self)
779 narrowspec.save(target, newincludes, newexcludes)
779 narrowspec.save(target, newincludes, newexcludes)
780 self.invalidate(clearfilecache=True)
780 self.invalidate(clearfilecache=True)
781
781
782 def __getitem__(self, changeid):
782 def __getitem__(self, changeid):
783 if changeid is None:
783 if changeid is None:
784 return context.workingctx(self)
784 return context.workingctx(self)
785 if isinstance(changeid, context.basectx):
785 if isinstance(changeid, context.basectx):
786 return changeid
786 return changeid
787 if isinstance(changeid, slice):
787 if isinstance(changeid, slice):
788 # wdirrev isn't contiguous so the slice shouldn't include it
788 # wdirrev isn't contiguous so the slice shouldn't include it
789 return [context.changectx(self, i)
789 return [context.changectx(self, i)
790 for i in xrange(*changeid.indices(len(self)))
790 for i in xrange(*changeid.indices(len(self)))
791 if i not in self.changelog.filteredrevs]
791 if i not in self.changelog.filteredrevs]
792 try:
792 try:
793 return context.changectx(self, changeid)
793 return context.changectx(self, changeid)
794 except error.WdirUnsupported:
794 except error.WdirUnsupported:
795 return context.workingctx(self)
795 return context.workingctx(self)
796
796
797 def __contains__(self, changeid):
797 def __contains__(self, changeid):
798 """True if the given changeid exists
798 """True if the given changeid exists
799
799
800 error.LookupError is raised if an ambiguous node specified.
800 error.LookupError is raised if an ambiguous node specified.
801 """
801 """
802 try:
802 try:
803 self[changeid]
803 self[changeid]
804 return True
804 return True
805 except error.RepoLookupError:
805 except error.RepoLookupError:
806 return False
806 return False
807
807
808 def __nonzero__(self):
808 def __nonzero__(self):
809 return True
809 return True
810
810
811 __bool__ = __nonzero__
811 __bool__ = __nonzero__
812
812
813 def __len__(self):
813 def __len__(self):
814 # no need to pay the cost of repoview.changelog
814 # no need to pay the cost of repoview.changelog
815 unfi = self.unfiltered()
815 unfi = self.unfiltered()
816 return len(unfi.changelog)
816 return len(unfi.changelog)
817
817
818 def __iter__(self):
818 def __iter__(self):
819 return iter(self.changelog)
819 return iter(self.changelog)
820
820
821 def revs(self, expr, *args):
821 def revs(self, expr, *args):
822 '''Find revisions matching a revset.
822 '''Find revisions matching a revset.
823
823
824 The revset is specified as a string ``expr`` that may contain
824 The revset is specified as a string ``expr`` that may contain
825 %-formatting to escape certain types. See ``revsetlang.formatspec``.
825 %-formatting to escape certain types. See ``revsetlang.formatspec``.
826
826
827 Revset aliases from the configuration are not expanded. To expand
827 Revset aliases from the configuration are not expanded. To expand
828 user aliases, consider calling ``scmutil.revrange()`` or
828 user aliases, consider calling ``scmutil.revrange()`` or
829 ``repo.anyrevs([expr], user=True)``.
829 ``repo.anyrevs([expr], user=True)``.
830
830
831 Returns a revset.abstractsmartset, which is a list-like interface
831 Returns a revset.abstractsmartset, which is a list-like interface
832 that contains integer revisions.
832 that contains integer revisions.
833 '''
833 '''
834 expr = revsetlang.formatspec(expr, *args)
834 expr = revsetlang.formatspec(expr, *args)
835 m = revset.match(None, expr)
835 m = revset.match(None, expr)
836 return m(self)
836 return m(self)
837
837
838 def set(self, expr, *args):
838 def set(self, expr, *args):
839 '''Find revisions matching a revset and emit changectx instances.
839 '''Find revisions matching a revset and emit changectx instances.
840
840
841 This is a convenience wrapper around ``revs()`` that iterates the
841 This is a convenience wrapper around ``revs()`` that iterates the
842 result and is a generator of changectx instances.
842 result and is a generator of changectx instances.
843
843
844 Revset aliases from the configuration are not expanded. To expand
844 Revset aliases from the configuration are not expanded. To expand
845 user aliases, consider calling ``scmutil.revrange()``.
845 user aliases, consider calling ``scmutil.revrange()``.
846 '''
846 '''
847 for r in self.revs(expr, *args):
847 for r in self.revs(expr, *args):
848 yield self[r]
848 yield self[r]
849
849
850 def anyrevs(self, specs, user=False, localalias=None):
850 def anyrevs(self, specs, user=False, localalias=None):
851 '''Find revisions matching one of the given revsets.
851 '''Find revisions matching one of the given revsets.
852
852
853 Revset aliases from the configuration are not expanded by default. To
853 Revset aliases from the configuration are not expanded by default. To
854 expand user aliases, specify ``user=True``. To provide some local
854 expand user aliases, specify ``user=True``. To provide some local
855 definitions overriding user aliases, set ``localalias`` to
855 definitions overriding user aliases, set ``localalias`` to
856 ``{name: definitionstring}``.
856 ``{name: definitionstring}``.
857 '''
857 '''
858 if user:
858 if user:
859 m = revset.matchany(self.ui, specs, repo=self,
859 m = revset.matchany(self.ui, specs, repo=self,
860 localalias=localalias)
860 localalias=localalias)
861 else:
861 else:
862 m = revset.matchany(None, specs, localalias=localalias)
862 m = revset.matchany(None, specs, localalias=localalias)
863 return m(self)
863 return m(self)
864
864
865 def url(self):
865 def url(self):
866 return 'file:' + self.root
866 return 'file:' + self.root
867
867
868 def hook(self, name, throw=False, **args):
868 def hook(self, name, throw=False, **args):
869 """Call a hook, passing this repo instance.
869 """Call a hook, passing this repo instance.
870
870
871 This a convenience method to aid invoking hooks. Extensions likely
871 This a convenience method to aid invoking hooks. Extensions likely
872 won't call this unless they have registered a custom hook or are
872 won't call this unless they have registered a custom hook or are
873 replacing code that is expected to call a hook.
873 replacing code that is expected to call a hook.
874 """
874 """
875 return hook.hook(self.ui, self, name, throw, **args)
875 return hook.hook(self.ui, self, name, throw, **args)
876
876
877 @filteredpropertycache
877 @filteredpropertycache
878 def _tagscache(self):
878 def _tagscache(self):
879 '''Returns a tagscache object that contains various tags related
879 '''Returns a tagscache object that contains various tags related
880 caches.'''
880 caches.'''
881
881
882 # This simplifies its cache management by having one decorated
882 # This simplifies its cache management by having one decorated
883 # function (this one) and the rest simply fetch things from it.
883 # function (this one) and the rest simply fetch things from it.
884 class tagscache(object):
884 class tagscache(object):
885 def __init__(self):
885 def __init__(self):
886 # These two define the set of tags for this repository. tags
886 # These two define the set of tags for this repository. tags
887 # maps tag name to node; tagtypes maps tag name to 'global' or
887 # maps tag name to node; tagtypes maps tag name to 'global' or
888 # 'local'. (Global tags are defined by .hgtags across all
888 # 'local'. (Global tags are defined by .hgtags across all
889 # heads, and local tags are defined in .hg/localtags.)
889 # heads, and local tags are defined in .hg/localtags.)
890 # They constitute the in-memory cache of tags.
890 # They constitute the in-memory cache of tags.
891 self.tags = self.tagtypes = None
891 self.tags = self.tagtypes = None
892
892
893 self.nodetagscache = self.tagslist = None
893 self.nodetagscache = self.tagslist = None
894
894
895 cache = tagscache()
895 cache = tagscache()
896 cache.tags, cache.tagtypes = self._findtags()
896 cache.tags, cache.tagtypes = self._findtags()
897
897
898 return cache
898 return cache
899
899
900 def tags(self):
900 def tags(self):
901 '''return a mapping of tag to node'''
901 '''return a mapping of tag to node'''
902 t = {}
902 t = {}
903 if self.changelog.filteredrevs:
903 if self.changelog.filteredrevs:
904 tags, tt = self._findtags()
904 tags, tt = self._findtags()
905 else:
905 else:
906 tags = self._tagscache.tags
906 tags = self._tagscache.tags
907 for k, v in tags.iteritems():
907 for k, v in tags.iteritems():
908 try:
908 try:
909 # ignore tags to unknown nodes
909 # ignore tags to unknown nodes
910 self.changelog.rev(v)
910 self.changelog.rev(v)
911 t[k] = v
911 t[k] = v
912 except (error.LookupError, ValueError):
912 except (error.LookupError, ValueError):
913 pass
913 pass
914 return t
914 return t
915
915
916 def _findtags(self):
916 def _findtags(self):
917 '''Do the hard work of finding tags. Return a pair of dicts
917 '''Do the hard work of finding tags. Return a pair of dicts
918 (tags, tagtypes) where tags maps tag name to node, and tagtypes
918 (tags, tagtypes) where tags maps tag name to node, and tagtypes
919 maps tag name to a string like \'global\' or \'local\'.
919 maps tag name to a string like \'global\' or \'local\'.
920 Subclasses or extensions are free to add their own tags, but
920 Subclasses or extensions are free to add their own tags, but
921 should be aware that the returned dicts will be retained for the
921 should be aware that the returned dicts will be retained for the
922 duration of the localrepo object.'''
922 duration of the localrepo object.'''
923
923
924 # XXX what tagtype should subclasses/extensions use? Currently
924 # XXX what tagtype should subclasses/extensions use? Currently
925 # mq and bookmarks add tags, but do not set the tagtype at all.
925 # mq and bookmarks add tags, but do not set the tagtype at all.
926 # Should each extension invent its own tag type? Should there
926 # Should each extension invent its own tag type? Should there
927 # be one tagtype for all such "virtual" tags? Or is the status
927 # be one tagtype for all such "virtual" tags? Or is the status
928 # quo fine?
928 # quo fine?
929
929
930
930
931 # map tag name to (node, hist)
931 # map tag name to (node, hist)
932 alltags = tagsmod.findglobaltags(self.ui, self)
932 alltags = tagsmod.findglobaltags(self.ui, self)
933 # map tag name to tag type
933 # map tag name to tag type
934 tagtypes = dict((tag, 'global') for tag in alltags)
934 tagtypes = dict((tag, 'global') for tag in alltags)
935
935
936 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
936 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
937
937
938 # Build the return dicts. Have to re-encode tag names because
938 # Build the return dicts. Have to re-encode tag names because
939 # the tags module always uses UTF-8 (in order not to lose info
939 # the tags module always uses UTF-8 (in order not to lose info
940 # writing to the cache), but the rest of Mercurial wants them in
940 # writing to the cache), but the rest of Mercurial wants them in
941 # local encoding.
941 # local encoding.
942 tags = {}
942 tags = {}
943 for (name, (node, hist)) in alltags.iteritems():
943 for (name, (node, hist)) in alltags.iteritems():
944 if node != nullid:
944 if node != nullid:
945 tags[encoding.tolocal(name)] = node
945 tags[encoding.tolocal(name)] = node
946 tags['tip'] = self.changelog.tip()
946 tags['tip'] = self.changelog.tip()
947 tagtypes = dict([(encoding.tolocal(name), value)
947 tagtypes = dict([(encoding.tolocal(name), value)
948 for (name, value) in tagtypes.iteritems()])
948 for (name, value) in tagtypes.iteritems()])
949 return (tags, tagtypes)
949 return (tags, tagtypes)
950
950
951 def tagtype(self, tagname):
951 def tagtype(self, tagname):
952 '''
952 '''
953 return the type of the given tag. result can be:
953 return the type of the given tag. result can be:
954
954
955 'local' : a local tag
955 'local' : a local tag
956 'global' : a global tag
956 'global' : a global tag
957 None : tag does not exist
957 None : tag does not exist
958 '''
958 '''
959
959
960 return self._tagscache.tagtypes.get(tagname)
960 return self._tagscache.tagtypes.get(tagname)
961
961
962 def tagslist(self):
962 def tagslist(self):
963 '''return a list of tags ordered by revision'''
963 '''return a list of tags ordered by revision'''
964 if not self._tagscache.tagslist:
964 if not self._tagscache.tagslist:
965 l = []
965 l = []
966 for t, n in self.tags().iteritems():
966 for t, n in self.tags().iteritems():
967 l.append((self.changelog.rev(n), t, n))
967 l.append((self.changelog.rev(n), t, n))
968 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
968 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
969
969
970 return self._tagscache.tagslist
970 return self._tagscache.tagslist
971
971
972 def nodetags(self, node):
972 def nodetags(self, node):
973 '''return the tags associated with a node'''
973 '''return the tags associated with a node'''
974 if not self._tagscache.nodetagscache:
974 if not self._tagscache.nodetagscache:
975 nodetagscache = {}
975 nodetagscache = {}
976 for t, n in self._tagscache.tags.iteritems():
976 for t, n in self._tagscache.tags.iteritems():
977 nodetagscache.setdefault(n, []).append(t)
977 nodetagscache.setdefault(n, []).append(t)
978 for tags in nodetagscache.itervalues():
978 for tags in nodetagscache.itervalues():
979 tags.sort()
979 tags.sort()
980 self._tagscache.nodetagscache = nodetagscache
980 self._tagscache.nodetagscache = nodetagscache
981 return self._tagscache.nodetagscache.get(node, [])
981 return self._tagscache.nodetagscache.get(node, [])
982
982
983 def nodebookmarks(self, node):
983 def nodebookmarks(self, node):
984 """return the list of bookmarks pointing to the specified node"""
984 """return the list of bookmarks pointing to the specified node"""
985 marks = []
985 marks = []
986 for bookmark, n in self._bookmarks.iteritems():
986 for bookmark, n in self._bookmarks.iteritems():
987 if n == node:
987 if n == node:
988 marks.append(bookmark)
988 marks.append(bookmark)
989 return sorted(marks)
989 return sorted(marks)
990
990
991 def branchmap(self):
991 def branchmap(self):
992 '''returns a dictionary {branch: [branchheads]} with branchheads
992 '''returns a dictionary {branch: [branchheads]} with branchheads
993 ordered by increasing revision number'''
993 ordered by increasing revision number'''
994 branchmap.updatecache(self)
994 branchmap.updatecache(self)
995 return self._branchcaches[self.filtername]
995 return self._branchcaches[self.filtername]
996
996
997 @unfilteredmethod
997 @unfilteredmethod
998 def revbranchcache(self):
998 def revbranchcache(self):
999 if not self._revbranchcache:
999 if not self._revbranchcache:
1000 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1000 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1001 return self._revbranchcache
1001 return self._revbranchcache
1002
1002
1003 def branchtip(self, branch, ignoremissing=False):
1003 def branchtip(self, branch, ignoremissing=False):
1004 '''return the tip node for a given branch
1004 '''return the tip node for a given branch
1005
1005
1006 If ignoremissing is True, then this method will not raise an error.
1006 If ignoremissing is True, then this method will not raise an error.
1007 This is helpful for callers that only expect None for a missing branch
1007 This is helpful for callers that only expect None for a missing branch
1008 (e.g. namespace).
1008 (e.g. namespace).
1009
1009
1010 '''
1010 '''
1011 try:
1011 try:
1012 return self.branchmap().branchtip(branch)
1012 return self.branchmap().branchtip(branch)
1013 except KeyError:
1013 except KeyError:
1014 if not ignoremissing:
1014 if not ignoremissing:
1015 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1015 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1016 else:
1016 else:
1017 pass
1017 pass
1018
1018
1019 def lookup(self, key):
1019 def lookup(self, key):
1020 return scmutil.revsymbol(self, key).node()
1020 return scmutil.revsymbol(self, key).node()
1021
1021
1022 def lookupbranch(self, key):
1022 def lookupbranch(self, key):
1023 if key in self.branchmap():
1023 if key in self.branchmap():
1024 return key
1024 return key
1025
1025
1026 return self[key].branch()
1026 return scmutil.revsymbol(self, key).branch()
1027
1027
1028 def known(self, nodes):
1028 def known(self, nodes):
1029 cl = self.changelog
1029 cl = self.changelog
1030 nm = cl.nodemap
1030 nm = cl.nodemap
1031 filtered = cl.filteredrevs
1031 filtered = cl.filteredrevs
1032 result = []
1032 result = []
1033 for n in nodes:
1033 for n in nodes:
1034 r = nm.get(n)
1034 r = nm.get(n)
1035 resp = not (r is None or r in filtered)
1035 resp = not (r is None or r in filtered)
1036 result.append(resp)
1036 result.append(resp)
1037 return result
1037 return result
1038
1038
1039 def local(self):
1039 def local(self):
1040 return self
1040 return self
1041
1041
1042 def publishing(self):
1042 def publishing(self):
1043 # it's safe (and desirable) to trust the publish flag unconditionally
1043 # it's safe (and desirable) to trust the publish flag unconditionally
1044 # so that we don't finalize changes shared between users via ssh or nfs
1044 # so that we don't finalize changes shared between users via ssh or nfs
1045 return self.ui.configbool('phases', 'publish', untrusted=True)
1045 return self.ui.configbool('phases', 'publish', untrusted=True)
1046
1046
1047 def cancopy(self):
1047 def cancopy(self):
1048 # so statichttprepo's override of local() works
1048 # so statichttprepo's override of local() works
1049 if not self.local():
1049 if not self.local():
1050 return False
1050 return False
1051 if not self.publishing():
1051 if not self.publishing():
1052 return True
1052 return True
1053 # if publishing we can't copy if there is filtered content
1053 # if publishing we can't copy if there is filtered content
1054 return not self.filtered('visible').changelog.filteredrevs
1054 return not self.filtered('visible').changelog.filteredrevs
1055
1055
1056 def shared(self):
1056 def shared(self):
1057 '''the type of shared repository (None if not shared)'''
1057 '''the type of shared repository (None if not shared)'''
1058 if self.sharedpath != self.path:
1058 if self.sharedpath != self.path:
1059 return 'store'
1059 return 'store'
1060 return None
1060 return None
1061
1061
1062 def wjoin(self, f, *insidef):
1062 def wjoin(self, f, *insidef):
1063 return self.vfs.reljoin(self.root, f, *insidef)
1063 return self.vfs.reljoin(self.root, f, *insidef)
1064
1064
1065 def file(self, f):
1065 def file(self, f):
1066 if f[0] == '/':
1066 if f[0] == '/':
1067 f = f[1:]
1067 f = f[1:]
1068 return filelog.filelog(self.svfs, f)
1068 return filelog.filelog(self.svfs, f)
1069
1069
1070 def setparents(self, p1, p2=nullid):
1070 def setparents(self, p1, p2=nullid):
1071 with self.dirstate.parentchange():
1071 with self.dirstate.parentchange():
1072 copies = self.dirstate.setparents(p1, p2)
1072 copies = self.dirstate.setparents(p1, p2)
1073 pctx = self[p1]
1073 pctx = self[p1]
1074 if copies:
1074 if copies:
1075 # Adjust copy records, the dirstate cannot do it, it
1075 # Adjust copy records, the dirstate cannot do it, it
1076 # requires access to parents manifests. Preserve them
1076 # requires access to parents manifests. Preserve them
1077 # only for entries added to first parent.
1077 # only for entries added to first parent.
1078 for f in copies:
1078 for f in copies:
1079 if f not in pctx and copies[f] in pctx:
1079 if f not in pctx and copies[f] in pctx:
1080 self.dirstate.copy(copies[f], f)
1080 self.dirstate.copy(copies[f], f)
1081 if p2 == nullid:
1081 if p2 == nullid:
1082 for f, s in sorted(self.dirstate.copies().items()):
1082 for f, s in sorted(self.dirstate.copies().items()):
1083 if f not in pctx and s not in pctx:
1083 if f not in pctx and s not in pctx:
1084 self.dirstate.copy(None, f)
1084 self.dirstate.copy(None, f)
1085
1085
1086 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1086 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1087 """changeid can be a changeset revision, node, or tag.
1087 """changeid can be a changeset revision, node, or tag.
1088 fileid can be a file revision or node."""
1088 fileid can be a file revision or node."""
1089 return context.filectx(self, path, changeid, fileid,
1089 return context.filectx(self, path, changeid, fileid,
1090 changectx=changectx)
1090 changectx=changectx)
1091
1091
1092 def getcwd(self):
1092 def getcwd(self):
1093 return self.dirstate.getcwd()
1093 return self.dirstate.getcwd()
1094
1094
1095 def pathto(self, f, cwd=None):
1095 def pathto(self, f, cwd=None):
1096 return self.dirstate.pathto(f, cwd)
1096 return self.dirstate.pathto(f, cwd)
1097
1097
1098 def _loadfilter(self, filter):
1098 def _loadfilter(self, filter):
1099 if filter not in self._filterpats:
1099 if filter not in self._filterpats:
1100 l = []
1100 l = []
1101 for pat, cmd in self.ui.configitems(filter):
1101 for pat, cmd in self.ui.configitems(filter):
1102 if cmd == '!':
1102 if cmd == '!':
1103 continue
1103 continue
1104 mf = matchmod.match(self.root, '', [pat])
1104 mf = matchmod.match(self.root, '', [pat])
1105 fn = None
1105 fn = None
1106 params = cmd
1106 params = cmd
1107 for name, filterfn in self._datafilters.iteritems():
1107 for name, filterfn in self._datafilters.iteritems():
1108 if cmd.startswith(name):
1108 if cmd.startswith(name):
1109 fn = filterfn
1109 fn = filterfn
1110 params = cmd[len(name):].lstrip()
1110 params = cmd[len(name):].lstrip()
1111 break
1111 break
1112 if not fn:
1112 if not fn:
1113 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1113 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1114 # Wrap old filters not supporting keyword arguments
1114 # Wrap old filters not supporting keyword arguments
1115 if not pycompat.getargspec(fn)[2]:
1115 if not pycompat.getargspec(fn)[2]:
1116 oldfn = fn
1116 oldfn = fn
1117 fn = lambda s, c, **kwargs: oldfn(s, c)
1117 fn = lambda s, c, **kwargs: oldfn(s, c)
1118 l.append((mf, fn, params))
1118 l.append((mf, fn, params))
1119 self._filterpats[filter] = l
1119 self._filterpats[filter] = l
1120 return self._filterpats[filter]
1120 return self._filterpats[filter]
1121
1121
1122 def _filter(self, filterpats, filename, data):
1122 def _filter(self, filterpats, filename, data):
1123 for mf, fn, cmd in filterpats:
1123 for mf, fn, cmd in filterpats:
1124 if mf(filename):
1124 if mf(filename):
1125 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1125 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1126 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1126 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1127 break
1127 break
1128
1128
1129 return data
1129 return data
1130
1130
1131 @unfilteredpropertycache
1131 @unfilteredpropertycache
1132 def _encodefilterpats(self):
1132 def _encodefilterpats(self):
1133 return self._loadfilter('encode')
1133 return self._loadfilter('encode')
1134
1134
1135 @unfilteredpropertycache
1135 @unfilteredpropertycache
1136 def _decodefilterpats(self):
1136 def _decodefilterpats(self):
1137 return self._loadfilter('decode')
1137 return self._loadfilter('decode')
1138
1138
1139 def adddatafilter(self, name, filter):
1139 def adddatafilter(self, name, filter):
1140 self._datafilters[name] = filter
1140 self._datafilters[name] = filter
1141
1141
1142 def wread(self, filename):
1142 def wread(self, filename):
1143 if self.wvfs.islink(filename):
1143 if self.wvfs.islink(filename):
1144 data = self.wvfs.readlink(filename)
1144 data = self.wvfs.readlink(filename)
1145 else:
1145 else:
1146 data = self.wvfs.read(filename)
1146 data = self.wvfs.read(filename)
1147 return self._filter(self._encodefilterpats, filename, data)
1147 return self._filter(self._encodefilterpats, filename, data)
1148
1148
1149 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1149 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1150 """write ``data`` into ``filename`` in the working directory
1150 """write ``data`` into ``filename`` in the working directory
1151
1151
1152 This returns length of written (maybe decoded) data.
1152 This returns length of written (maybe decoded) data.
1153 """
1153 """
1154 data = self._filter(self._decodefilterpats, filename, data)
1154 data = self._filter(self._decodefilterpats, filename, data)
1155 if 'l' in flags:
1155 if 'l' in flags:
1156 self.wvfs.symlink(data, filename)
1156 self.wvfs.symlink(data, filename)
1157 else:
1157 else:
1158 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1158 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1159 **kwargs)
1159 **kwargs)
1160 if 'x' in flags:
1160 if 'x' in flags:
1161 self.wvfs.setflags(filename, False, True)
1161 self.wvfs.setflags(filename, False, True)
1162 else:
1162 else:
1163 self.wvfs.setflags(filename, False, False)
1163 self.wvfs.setflags(filename, False, False)
1164 return len(data)
1164 return len(data)
1165
1165
1166 def wwritedata(self, filename, data):
1166 def wwritedata(self, filename, data):
1167 return self._filter(self._decodefilterpats, filename, data)
1167 return self._filter(self._decodefilterpats, filename, data)
1168
1168
1169 def currenttransaction(self):
1169 def currenttransaction(self):
1170 """return the current transaction or None if non exists"""
1170 """return the current transaction or None if non exists"""
1171 if self._transref:
1171 if self._transref:
1172 tr = self._transref()
1172 tr = self._transref()
1173 else:
1173 else:
1174 tr = None
1174 tr = None
1175
1175
1176 if tr and tr.running():
1176 if tr and tr.running():
1177 return tr
1177 return tr
1178 return None
1178 return None
1179
1179
1180 def transaction(self, desc, report=None):
1180 def transaction(self, desc, report=None):
1181 if (self.ui.configbool('devel', 'all-warnings')
1181 if (self.ui.configbool('devel', 'all-warnings')
1182 or self.ui.configbool('devel', 'check-locks')):
1182 or self.ui.configbool('devel', 'check-locks')):
1183 if self._currentlock(self._lockref) is None:
1183 if self._currentlock(self._lockref) is None:
1184 raise error.ProgrammingError('transaction requires locking')
1184 raise error.ProgrammingError('transaction requires locking')
1185 tr = self.currenttransaction()
1185 tr = self.currenttransaction()
1186 if tr is not None:
1186 if tr is not None:
1187 return tr.nest(name=desc)
1187 return tr.nest(name=desc)
1188
1188
1189 # abort here if the journal already exists
1189 # abort here if the journal already exists
1190 if self.svfs.exists("journal"):
1190 if self.svfs.exists("journal"):
1191 raise error.RepoError(
1191 raise error.RepoError(
1192 _("abandoned transaction found"),
1192 _("abandoned transaction found"),
1193 hint=_("run 'hg recover' to clean up transaction"))
1193 hint=_("run 'hg recover' to clean up transaction"))
1194
1194
1195 idbase = "%.40f#%f" % (random.random(), time.time())
1195 idbase = "%.40f#%f" % (random.random(), time.time())
1196 ha = hex(hashlib.sha1(idbase).digest())
1196 ha = hex(hashlib.sha1(idbase).digest())
1197 txnid = 'TXN:' + ha
1197 txnid = 'TXN:' + ha
1198 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1198 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1199
1199
1200 self._writejournal(desc)
1200 self._writejournal(desc)
1201 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1201 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1202 if report:
1202 if report:
1203 rp = report
1203 rp = report
1204 else:
1204 else:
1205 rp = self.ui.warn
1205 rp = self.ui.warn
1206 vfsmap = {'plain': self.vfs} # root of .hg/
1206 vfsmap = {'plain': self.vfs} # root of .hg/
1207 # we must avoid cyclic reference between repo and transaction.
1207 # we must avoid cyclic reference between repo and transaction.
1208 reporef = weakref.ref(self)
1208 reporef = weakref.ref(self)
1209 # Code to track tag movement
1209 # Code to track tag movement
1210 #
1210 #
1211 # Since tags are all handled as file content, it is actually quite hard
1211 # Since tags are all handled as file content, it is actually quite hard
1212 # to track these movement from a code perspective. So we fallback to a
1212 # to track these movement from a code perspective. So we fallback to a
1213 # tracking at the repository level. One could envision to track changes
1213 # tracking at the repository level. One could envision to track changes
1214 # to the '.hgtags' file through changegroup apply but that fails to
1214 # to the '.hgtags' file through changegroup apply but that fails to
1215 # cope with case where transaction expose new heads without changegroup
1215 # cope with case where transaction expose new heads without changegroup
1216 # being involved (eg: phase movement).
1216 # being involved (eg: phase movement).
1217 #
1217 #
1218 # For now, We gate the feature behind a flag since this likely comes
1218 # For now, We gate the feature behind a flag since this likely comes
1219 # with performance impacts. The current code run more often than needed
1219 # with performance impacts. The current code run more often than needed
1220 # and do not use caches as much as it could. The current focus is on
1220 # and do not use caches as much as it could. The current focus is on
1221 # the behavior of the feature so we disable it by default. The flag
1221 # the behavior of the feature so we disable it by default. The flag
1222 # will be removed when we are happy with the performance impact.
1222 # will be removed when we are happy with the performance impact.
1223 #
1223 #
1224 # Once this feature is no longer experimental move the following
1224 # Once this feature is no longer experimental move the following
1225 # documentation to the appropriate help section:
1225 # documentation to the appropriate help section:
1226 #
1226 #
1227 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1227 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1228 # tags (new or changed or deleted tags). In addition the details of
1228 # tags (new or changed or deleted tags). In addition the details of
1229 # these changes are made available in a file at:
1229 # these changes are made available in a file at:
1230 # ``REPOROOT/.hg/changes/tags.changes``.
1230 # ``REPOROOT/.hg/changes/tags.changes``.
1231 # Make sure you check for HG_TAG_MOVED before reading that file as it
1231 # Make sure you check for HG_TAG_MOVED before reading that file as it
1232 # might exist from a previous transaction even if no tag were touched
1232 # might exist from a previous transaction even if no tag were touched
1233 # in this one. Changes are recorded in a line base format::
1233 # in this one. Changes are recorded in a line base format::
1234 #
1234 #
1235 # <action> <hex-node> <tag-name>\n
1235 # <action> <hex-node> <tag-name>\n
1236 #
1236 #
1237 # Actions are defined as follow:
1237 # Actions are defined as follow:
1238 # "-R": tag is removed,
1238 # "-R": tag is removed,
1239 # "+A": tag is added,
1239 # "+A": tag is added,
1240 # "-M": tag is moved (old value),
1240 # "-M": tag is moved (old value),
1241 # "+M": tag is moved (new value),
1241 # "+M": tag is moved (new value),
1242 tracktags = lambda x: None
1242 tracktags = lambda x: None
1243 # experimental config: experimental.hook-track-tags
1243 # experimental config: experimental.hook-track-tags
1244 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1244 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1245 if desc != 'strip' and shouldtracktags:
1245 if desc != 'strip' and shouldtracktags:
1246 oldheads = self.changelog.headrevs()
1246 oldheads = self.changelog.headrevs()
1247 def tracktags(tr2):
1247 def tracktags(tr2):
1248 repo = reporef()
1248 repo = reporef()
1249 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1249 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1250 newheads = repo.changelog.headrevs()
1250 newheads = repo.changelog.headrevs()
1251 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1251 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1252 # notes: we compare lists here.
1252 # notes: we compare lists here.
1253 # As we do it only once buiding set would not be cheaper
1253 # As we do it only once buiding set would not be cheaper
1254 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1254 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1255 if changes:
1255 if changes:
1256 tr2.hookargs['tag_moved'] = '1'
1256 tr2.hookargs['tag_moved'] = '1'
1257 with repo.vfs('changes/tags.changes', 'w',
1257 with repo.vfs('changes/tags.changes', 'w',
1258 atomictemp=True) as changesfile:
1258 atomictemp=True) as changesfile:
1259 # note: we do not register the file to the transaction
1259 # note: we do not register the file to the transaction
1260 # because we needs it to still exist on the transaction
1260 # because we needs it to still exist on the transaction
1261 # is close (for txnclose hooks)
1261 # is close (for txnclose hooks)
1262 tagsmod.writediff(changesfile, changes)
1262 tagsmod.writediff(changesfile, changes)
1263 def validate(tr2):
1263 def validate(tr2):
1264 """will run pre-closing hooks"""
1264 """will run pre-closing hooks"""
1265 # XXX the transaction API is a bit lacking here so we take a hacky
1265 # XXX the transaction API is a bit lacking here so we take a hacky
1266 # path for now
1266 # path for now
1267 #
1267 #
1268 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1268 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1269 # dict is copied before these run. In addition we needs the data
1269 # dict is copied before these run. In addition we needs the data
1270 # available to in memory hooks too.
1270 # available to in memory hooks too.
1271 #
1271 #
1272 # Moreover, we also need to make sure this runs before txnclose
1272 # Moreover, we also need to make sure this runs before txnclose
1273 # hooks and there is no "pending" mechanism that would execute
1273 # hooks and there is no "pending" mechanism that would execute
1274 # logic only if hooks are about to run.
1274 # logic only if hooks are about to run.
1275 #
1275 #
1276 # Fixing this limitation of the transaction is also needed to track
1276 # Fixing this limitation of the transaction is also needed to track
1277 # other families of changes (bookmarks, phases, obsolescence).
1277 # other families of changes (bookmarks, phases, obsolescence).
1278 #
1278 #
1279 # This will have to be fixed before we remove the experimental
1279 # This will have to be fixed before we remove the experimental
1280 # gating.
1280 # gating.
1281 tracktags(tr2)
1281 tracktags(tr2)
1282 repo = reporef()
1282 repo = reporef()
1283 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1283 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1284 scmutil.enforcesinglehead(repo, tr2, desc)
1284 scmutil.enforcesinglehead(repo, tr2, desc)
1285 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1285 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1286 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1286 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1287 args = tr.hookargs.copy()
1287 args = tr.hookargs.copy()
1288 args.update(bookmarks.preparehookargs(name, old, new))
1288 args.update(bookmarks.preparehookargs(name, old, new))
1289 repo.hook('pretxnclose-bookmark', throw=True,
1289 repo.hook('pretxnclose-bookmark', throw=True,
1290 txnname=desc,
1290 txnname=desc,
1291 **pycompat.strkwargs(args))
1291 **pycompat.strkwargs(args))
1292 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1292 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1293 cl = repo.unfiltered().changelog
1293 cl = repo.unfiltered().changelog
1294 for rev, (old, new) in tr.changes['phases'].items():
1294 for rev, (old, new) in tr.changes['phases'].items():
1295 args = tr.hookargs.copy()
1295 args = tr.hookargs.copy()
1296 node = hex(cl.node(rev))
1296 node = hex(cl.node(rev))
1297 args.update(phases.preparehookargs(node, old, new))
1297 args.update(phases.preparehookargs(node, old, new))
1298 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1298 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1299 **pycompat.strkwargs(args))
1299 **pycompat.strkwargs(args))
1300
1300
1301 repo.hook('pretxnclose', throw=True,
1301 repo.hook('pretxnclose', throw=True,
1302 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1302 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1303 def releasefn(tr, success):
1303 def releasefn(tr, success):
1304 repo = reporef()
1304 repo = reporef()
1305 if success:
1305 if success:
1306 # this should be explicitly invoked here, because
1306 # this should be explicitly invoked here, because
1307 # in-memory changes aren't written out at closing
1307 # in-memory changes aren't written out at closing
1308 # transaction, if tr.addfilegenerator (via
1308 # transaction, if tr.addfilegenerator (via
1309 # dirstate.write or so) isn't invoked while
1309 # dirstate.write or so) isn't invoked while
1310 # transaction running
1310 # transaction running
1311 repo.dirstate.write(None)
1311 repo.dirstate.write(None)
1312 else:
1312 else:
1313 # discard all changes (including ones already written
1313 # discard all changes (including ones already written
1314 # out) in this transaction
1314 # out) in this transaction
1315 repo.dirstate.restorebackup(None, 'journal.dirstate')
1315 repo.dirstate.restorebackup(None, 'journal.dirstate')
1316
1316
1317 repo.invalidate(clearfilecache=True)
1317 repo.invalidate(clearfilecache=True)
1318
1318
1319 tr = transaction.transaction(rp, self.svfs, vfsmap,
1319 tr = transaction.transaction(rp, self.svfs, vfsmap,
1320 "journal",
1320 "journal",
1321 "undo",
1321 "undo",
1322 aftertrans(renames),
1322 aftertrans(renames),
1323 self.store.createmode,
1323 self.store.createmode,
1324 validator=validate,
1324 validator=validate,
1325 releasefn=releasefn,
1325 releasefn=releasefn,
1326 checkambigfiles=_cachedfiles,
1326 checkambigfiles=_cachedfiles,
1327 name=desc)
1327 name=desc)
1328 tr.changes['revs'] = xrange(0, 0)
1328 tr.changes['revs'] = xrange(0, 0)
1329 tr.changes['obsmarkers'] = set()
1329 tr.changes['obsmarkers'] = set()
1330 tr.changes['phases'] = {}
1330 tr.changes['phases'] = {}
1331 tr.changes['bookmarks'] = {}
1331 tr.changes['bookmarks'] = {}
1332
1332
1333 tr.hookargs['txnid'] = txnid
1333 tr.hookargs['txnid'] = txnid
1334 # note: writing the fncache only during finalize mean that the file is
1334 # note: writing the fncache only during finalize mean that the file is
1335 # outdated when running hooks. As fncache is used for streaming clone,
1335 # outdated when running hooks. As fncache is used for streaming clone,
1336 # this is not expected to break anything that happen during the hooks.
1336 # this is not expected to break anything that happen during the hooks.
1337 tr.addfinalize('flush-fncache', self.store.write)
1337 tr.addfinalize('flush-fncache', self.store.write)
1338 def txnclosehook(tr2):
1338 def txnclosehook(tr2):
1339 """To be run if transaction is successful, will schedule a hook run
1339 """To be run if transaction is successful, will schedule a hook run
1340 """
1340 """
1341 # Don't reference tr2 in hook() so we don't hold a reference.
1341 # Don't reference tr2 in hook() so we don't hold a reference.
1342 # This reduces memory consumption when there are multiple
1342 # This reduces memory consumption when there are multiple
1343 # transactions per lock. This can likely go away if issue5045
1343 # transactions per lock. This can likely go away if issue5045
1344 # fixes the function accumulation.
1344 # fixes the function accumulation.
1345 hookargs = tr2.hookargs
1345 hookargs = tr2.hookargs
1346
1346
1347 def hookfunc():
1347 def hookfunc():
1348 repo = reporef()
1348 repo = reporef()
1349 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1349 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1350 bmchanges = sorted(tr.changes['bookmarks'].items())
1350 bmchanges = sorted(tr.changes['bookmarks'].items())
1351 for name, (old, new) in bmchanges:
1351 for name, (old, new) in bmchanges:
1352 args = tr.hookargs.copy()
1352 args = tr.hookargs.copy()
1353 args.update(bookmarks.preparehookargs(name, old, new))
1353 args.update(bookmarks.preparehookargs(name, old, new))
1354 repo.hook('txnclose-bookmark', throw=False,
1354 repo.hook('txnclose-bookmark', throw=False,
1355 txnname=desc, **pycompat.strkwargs(args))
1355 txnname=desc, **pycompat.strkwargs(args))
1356
1356
1357 if hook.hashook(repo.ui, 'txnclose-phase'):
1357 if hook.hashook(repo.ui, 'txnclose-phase'):
1358 cl = repo.unfiltered().changelog
1358 cl = repo.unfiltered().changelog
1359 phasemv = sorted(tr.changes['phases'].items())
1359 phasemv = sorted(tr.changes['phases'].items())
1360 for rev, (old, new) in phasemv:
1360 for rev, (old, new) in phasemv:
1361 args = tr.hookargs.copy()
1361 args = tr.hookargs.copy()
1362 node = hex(cl.node(rev))
1362 node = hex(cl.node(rev))
1363 args.update(phases.preparehookargs(node, old, new))
1363 args.update(phases.preparehookargs(node, old, new))
1364 repo.hook('txnclose-phase', throw=False, txnname=desc,
1364 repo.hook('txnclose-phase', throw=False, txnname=desc,
1365 **pycompat.strkwargs(args))
1365 **pycompat.strkwargs(args))
1366
1366
1367 repo.hook('txnclose', throw=False, txnname=desc,
1367 repo.hook('txnclose', throw=False, txnname=desc,
1368 **pycompat.strkwargs(hookargs))
1368 **pycompat.strkwargs(hookargs))
1369 reporef()._afterlock(hookfunc)
1369 reporef()._afterlock(hookfunc)
1370 tr.addfinalize('txnclose-hook', txnclosehook)
1370 tr.addfinalize('txnclose-hook', txnclosehook)
1371 # Include a leading "-" to make it happen before the transaction summary
1371 # Include a leading "-" to make it happen before the transaction summary
1372 # reports registered via scmutil.registersummarycallback() whose names
1372 # reports registered via scmutil.registersummarycallback() whose names
1373 # are 00-txnreport etc. That way, the caches will be warm when the
1373 # are 00-txnreport etc. That way, the caches will be warm when the
1374 # callbacks run.
1374 # callbacks run.
1375 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1375 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1376 def txnaborthook(tr2):
1376 def txnaborthook(tr2):
1377 """To be run if transaction is aborted
1377 """To be run if transaction is aborted
1378 """
1378 """
1379 reporef().hook('txnabort', throw=False, txnname=desc,
1379 reporef().hook('txnabort', throw=False, txnname=desc,
1380 **pycompat.strkwargs(tr2.hookargs))
1380 **pycompat.strkwargs(tr2.hookargs))
1381 tr.addabort('txnabort-hook', txnaborthook)
1381 tr.addabort('txnabort-hook', txnaborthook)
1382 # avoid eager cache invalidation. in-memory data should be identical
1382 # avoid eager cache invalidation. in-memory data should be identical
1383 # to stored data if transaction has no error.
1383 # to stored data if transaction has no error.
1384 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1384 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1385 self._transref = weakref.ref(tr)
1385 self._transref = weakref.ref(tr)
1386 scmutil.registersummarycallback(self, tr, desc)
1386 scmutil.registersummarycallback(self, tr, desc)
1387 return tr
1387 return tr
1388
1388
1389 def _journalfiles(self):
1389 def _journalfiles(self):
1390 return ((self.svfs, 'journal'),
1390 return ((self.svfs, 'journal'),
1391 (self.vfs, 'journal.dirstate'),
1391 (self.vfs, 'journal.dirstate'),
1392 (self.vfs, 'journal.branch'),
1392 (self.vfs, 'journal.branch'),
1393 (self.vfs, 'journal.desc'),
1393 (self.vfs, 'journal.desc'),
1394 (self.vfs, 'journal.bookmarks'),
1394 (self.vfs, 'journal.bookmarks'),
1395 (self.svfs, 'journal.phaseroots'))
1395 (self.svfs, 'journal.phaseroots'))
1396
1396
1397 def undofiles(self):
1397 def undofiles(self):
1398 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1398 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1399
1399
1400 @unfilteredmethod
1400 @unfilteredmethod
1401 def _writejournal(self, desc):
1401 def _writejournal(self, desc):
1402 self.dirstate.savebackup(None, 'journal.dirstate')
1402 self.dirstate.savebackup(None, 'journal.dirstate')
1403 self.vfs.write("journal.branch",
1403 self.vfs.write("journal.branch",
1404 encoding.fromlocal(self.dirstate.branch()))
1404 encoding.fromlocal(self.dirstate.branch()))
1405 self.vfs.write("journal.desc",
1405 self.vfs.write("journal.desc",
1406 "%d\n%s\n" % (len(self), desc))
1406 "%d\n%s\n" % (len(self), desc))
1407 self.vfs.write("journal.bookmarks",
1407 self.vfs.write("journal.bookmarks",
1408 self.vfs.tryread("bookmarks"))
1408 self.vfs.tryread("bookmarks"))
1409 self.svfs.write("journal.phaseroots",
1409 self.svfs.write("journal.phaseroots",
1410 self.svfs.tryread("phaseroots"))
1410 self.svfs.tryread("phaseroots"))
1411
1411
1412 def recover(self):
1412 def recover(self):
1413 with self.lock():
1413 with self.lock():
1414 if self.svfs.exists("journal"):
1414 if self.svfs.exists("journal"):
1415 self.ui.status(_("rolling back interrupted transaction\n"))
1415 self.ui.status(_("rolling back interrupted transaction\n"))
1416 vfsmap = {'': self.svfs,
1416 vfsmap = {'': self.svfs,
1417 'plain': self.vfs,}
1417 'plain': self.vfs,}
1418 transaction.rollback(self.svfs, vfsmap, "journal",
1418 transaction.rollback(self.svfs, vfsmap, "journal",
1419 self.ui.warn,
1419 self.ui.warn,
1420 checkambigfiles=_cachedfiles)
1420 checkambigfiles=_cachedfiles)
1421 self.invalidate()
1421 self.invalidate()
1422 return True
1422 return True
1423 else:
1423 else:
1424 self.ui.warn(_("no interrupted transaction available\n"))
1424 self.ui.warn(_("no interrupted transaction available\n"))
1425 return False
1425 return False
1426
1426
1427 def rollback(self, dryrun=False, force=False):
1427 def rollback(self, dryrun=False, force=False):
1428 wlock = lock = dsguard = None
1428 wlock = lock = dsguard = None
1429 try:
1429 try:
1430 wlock = self.wlock()
1430 wlock = self.wlock()
1431 lock = self.lock()
1431 lock = self.lock()
1432 if self.svfs.exists("undo"):
1432 if self.svfs.exists("undo"):
1433 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1433 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1434
1434
1435 return self._rollback(dryrun, force, dsguard)
1435 return self._rollback(dryrun, force, dsguard)
1436 else:
1436 else:
1437 self.ui.warn(_("no rollback information available\n"))
1437 self.ui.warn(_("no rollback information available\n"))
1438 return 1
1438 return 1
1439 finally:
1439 finally:
1440 release(dsguard, lock, wlock)
1440 release(dsguard, lock, wlock)
1441
1441
1442 @unfilteredmethod # Until we get smarter cache management
1442 @unfilteredmethod # Until we get smarter cache management
1443 def _rollback(self, dryrun, force, dsguard):
1443 def _rollback(self, dryrun, force, dsguard):
1444 ui = self.ui
1444 ui = self.ui
1445 try:
1445 try:
1446 args = self.vfs.read('undo.desc').splitlines()
1446 args = self.vfs.read('undo.desc').splitlines()
1447 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1447 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1448 if len(args) >= 3:
1448 if len(args) >= 3:
1449 detail = args[2]
1449 detail = args[2]
1450 oldtip = oldlen - 1
1450 oldtip = oldlen - 1
1451
1451
1452 if detail and ui.verbose:
1452 if detail and ui.verbose:
1453 msg = (_('repository tip rolled back to revision %d'
1453 msg = (_('repository tip rolled back to revision %d'
1454 ' (undo %s: %s)\n')
1454 ' (undo %s: %s)\n')
1455 % (oldtip, desc, detail))
1455 % (oldtip, desc, detail))
1456 else:
1456 else:
1457 msg = (_('repository tip rolled back to revision %d'
1457 msg = (_('repository tip rolled back to revision %d'
1458 ' (undo %s)\n')
1458 ' (undo %s)\n')
1459 % (oldtip, desc))
1459 % (oldtip, desc))
1460 except IOError:
1460 except IOError:
1461 msg = _('rolling back unknown transaction\n')
1461 msg = _('rolling back unknown transaction\n')
1462 desc = None
1462 desc = None
1463
1463
1464 if not force and self['.'] != self['tip'] and desc == 'commit':
1464 if not force and self['.'] != self['tip'] and desc == 'commit':
1465 raise error.Abort(
1465 raise error.Abort(
1466 _('rollback of last commit while not checked out '
1466 _('rollback of last commit while not checked out '
1467 'may lose data'), hint=_('use -f to force'))
1467 'may lose data'), hint=_('use -f to force'))
1468
1468
1469 ui.status(msg)
1469 ui.status(msg)
1470 if dryrun:
1470 if dryrun:
1471 return 0
1471 return 0
1472
1472
1473 parents = self.dirstate.parents()
1473 parents = self.dirstate.parents()
1474 self.destroying()
1474 self.destroying()
1475 vfsmap = {'plain': self.vfs, '': self.svfs}
1475 vfsmap = {'plain': self.vfs, '': self.svfs}
1476 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1476 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1477 checkambigfiles=_cachedfiles)
1477 checkambigfiles=_cachedfiles)
1478 if self.vfs.exists('undo.bookmarks'):
1478 if self.vfs.exists('undo.bookmarks'):
1479 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1479 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1480 if self.svfs.exists('undo.phaseroots'):
1480 if self.svfs.exists('undo.phaseroots'):
1481 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1481 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1482 self.invalidate()
1482 self.invalidate()
1483
1483
1484 parentgone = (parents[0] not in self.changelog.nodemap or
1484 parentgone = (parents[0] not in self.changelog.nodemap or
1485 parents[1] not in self.changelog.nodemap)
1485 parents[1] not in self.changelog.nodemap)
1486 if parentgone:
1486 if parentgone:
1487 # prevent dirstateguard from overwriting already restored one
1487 # prevent dirstateguard from overwriting already restored one
1488 dsguard.close()
1488 dsguard.close()
1489
1489
1490 self.dirstate.restorebackup(None, 'undo.dirstate')
1490 self.dirstate.restorebackup(None, 'undo.dirstate')
1491 try:
1491 try:
1492 branch = self.vfs.read('undo.branch')
1492 branch = self.vfs.read('undo.branch')
1493 self.dirstate.setbranch(encoding.tolocal(branch))
1493 self.dirstate.setbranch(encoding.tolocal(branch))
1494 except IOError:
1494 except IOError:
1495 ui.warn(_('named branch could not be reset: '
1495 ui.warn(_('named branch could not be reset: '
1496 'current branch is still \'%s\'\n')
1496 'current branch is still \'%s\'\n')
1497 % self.dirstate.branch())
1497 % self.dirstate.branch())
1498
1498
1499 parents = tuple([p.rev() for p in self[None].parents()])
1499 parents = tuple([p.rev() for p in self[None].parents()])
1500 if len(parents) > 1:
1500 if len(parents) > 1:
1501 ui.status(_('working directory now based on '
1501 ui.status(_('working directory now based on '
1502 'revisions %d and %d\n') % parents)
1502 'revisions %d and %d\n') % parents)
1503 else:
1503 else:
1504 ui.status(_('working directory now based on '
1504 ui.status(_('working directory now based on '
1505 'revision %d\n') % parents)
1505 'revision %d\n') % parents)
1506 mergemod.mergestate.clean(self, self['.'].node())
1506 mergemod.mergestate.clean(self, self['.'].node())
1507
1507
1508 # TODO: if we know which new heads may result from this rollback, pass
1508 # TODO: if we know which new heads may result from this rollback, pass
1509 # them to destroy(), which will prevent the branchhead cache from being
1509 # them to destroy(), which will prevent the branchhead cache from being
1510 # invalidated.
1510 # invalidated.
1511 self.destroyed()
1511 self.destroyed()
1512 return 0
1512 return 0
1513
1513
1514 def _buildcacheupdater(self, newtransaction):
1514 def _buildcacheupdater(self, newtransaction):
1515 """called during transaction to build the callback updating cache
1515 """called during transaction to build the callback updating cache
1516
1516
1517 Lives on the repository to help extension who might want to augment
1517 Lives on the repository to help extension who might want to augment
1518 this logic. For this purpose, the created transaction is passed to the
1518 this logic. For this purpose, the created transaction is passed to the
1519 method.
1519 method.
1520 """
1520 """
1521 # we must avoid cyclic reference between repo and transaction.
1521 # we must avoid cyclic reference between repo and transaction.
1522 reporef = weakref.ref(self)
1522 reporef = weakref.ref(self)
1523 def updater(tr):
1523 def updater(tr):
1524 repo = reporef()
1524 repo = reporef()
1525 repo.updatecaches(tr)
1525 repo.updatecaches(tr)
1526 return updater
1526 return updater
1527
1527
1528 @unfilteredmethod
1528 @unfilteredmethod
1529 def updatecaches(self, tr=None, full=False):
1529 def updatecaches(self, tr=None, full=False):
1530 """warm appropriate caches
1530 """warm appropriate caches
1531
1531
1532 If this function is called after a transaction closed. The transaction
1532 If this function is called after a transaction closed. The transaction
1533 will be available in the 'tr' argument. This can be used to selectively
1533 will be available in the 'tr' argument. This can be used to selectively
1534 update caches relevant to the changes in that transaction.
1534 update caches relevant to the changes in that transaction.
1535
1535
1536 If 'full' is set, make sure all caches the function knows about have
1536 If 'full' is set, make sure all caches the function knows about have
1537 up-to-date data. Even the ones usually loaded more lazily.
1537 up-to-date data. Even the ones usually loaded more lazily.
1538 """
1538 """
1539 if tr is not None and tr.hookargs.get('source') == 'strip':
1539 if tr is not None and tr.hookargs.get('source') == 'strip':
1540 # During strip, many caches are invalid but
1540 # During strip, many caches are invalid but
1541 # later call to `destroyed` will refresh them.
1541 # later call to `destroyed` will refresh them.
1542 return
1542 return
1543
1543
1544 if tr is None or tr.changes['revs']:
1544 if tr is None or tr.changes['revs']:
1545 # updating the unfiltered branchmap should refresh all the others,
1545 # updating the unfiltered branchmap should refresh all the others,
1546 self.ui.debug('updating the branch cache\n')
1546 self.ui.debug('updating the branch cache\n')
1547 branchmap.updatecache(self.filtered('served'))
1547 branchmap.updatecache(self.filtered('served'))
1548
1548
1549 if full:
1549 if full:
1550 rbc = self.revbranchcache()
1550 rbc = self.revbranchcache()
1551 for r in self.changelog:
1551 for r in self.changelog:
1552 rbc.branchinfo(r)
1552 rbc.branchinfo(r)
1553 rbc.write()
1553 rbc.write()
1554
1554
1555 def invalidatecaches(self):
1555 def invalidatecaches(self):
1556
1556
1557 if '_tagscache' in vars(self):
1557 if '_tagscache' in vars(self):
1558 # can't use delattr on proxy
1558 # can't use delattr on proxy
1559 del self.__dict__['_tagscache']
1559 del self.__dict__['_tagscache']
1560
1560
1561 self.unfiltered()._branchcaches.clear()
1561 self.unfiltered()._branchcaches.clear()
1562 self.invalidatevolatilesets()
1562 self.invalidatevolatilesets()
1563 self._sparsesignaturecache.clear()
1563 self._sparsesignaturecache.clear()
1564
1564
1565 def invalidatevolatilesets(self):
1565 def invalidatevolatilesets(self):
1566 self.filteredrevcache.clear()
1566 self.filteredrevcache.clear()
1567 obsolete.clearobscaches(self)
1567 obsolete.clearobscaches(self)
1568
1568
1569 def invalidatedirstate(self):
1569 def invalidatedirstate(self):
1570 '''Invalidates the dirstate, causing the next call to dirstate
1570 '''Invalidates the dirstate, causing the next call to dirstate
1571 to check if it was modified since the last time it was read,
1571 to check if it was modified since the last time it was read,
1572 rereading it if it has.
1572 rereading it if it has.
1573
1573
1574 This is different to dirstate.invalidate() that it doesn't always
1574 This is different to dirstate.invalidate() that it doesn't always
1575 rereads the dirstate. Use dirstate.invalidate() if you want to
1575 rereads the dirstate. Use dirstate.invalidate() if you want to
1576 explicitly read the dirstate again (i.e. restoring it to a previous
1576 explicitly read the dirstate again (i.e. restoring it to a previous
1577 known good state).'''
1577 known good state).'''
1578 if hasunfilteredcache(self, 'dirstate'):
1578 if hasunfilteredcache(self, 'dirstate'):
1579 for k in self.dirstate._filecache:
1579 for k in self.dirstate._filecache:
1580 try:
1580 try:
1581 delattr(self.dirstate, k)
1581 delattr(self.dirstate, k)
1582 except AttributeError:
1582 except AttributeError:
1583 pass
1583 pass
1584 delattr(self.unfiltered(), 'dirstate')
1584 delattr(self.unfiltered(), 'dirstate')
1585
1585
1586 def invalidate(self, clearfilecache=False):
1586 def invalidate(self, clearfilecache=False):
1587 '''Invalidates both store and non-store parts other than dirstate
1587 '''Invalidates both store and non-store parts other than dirstate
1588
1588
1589 If a transaction is running, invalidation of store is omitted,
1589 If a transaction is running, invalidation of store is omitted,
1590 because discarding in-memory changes might cause inconsistency
1590 because discarding in-memory changes might cause inconsistency
1591 (e.g. incomplete fncache causes unintentional failure, but
1591 (e.g. incomplete fncache causes unintentional failure, but
1592 redundant one doesn't).
1592 redundant one doesn't).
1593 '''
1593 '''
1594 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1594 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1595 for k in list(self._filecache.keys()):
1595 for k in list(self._filecache.keys()):
1596 # dirstate is invalidated separately in invalidatedirstate()
1596 # dirstate is invalidated separately in invalidatedirstate()
1597 if k == 'dirstate':
1597 if k == 'dirstate':
1598 continue
1598 continue
1599 if (k == 'changelog' and
1599 if (k == 'changelog' and
1600 self.currenttransaction() and
1600 self.currenttransaction() and
1601 self.changelog._delayed):
1601 self.changelog._delayed):
1602 # The changelog object may store unwritten revisions. We don't
1602 # The changelog object may store unwritten revisions. We don't
1603 # want to lose them.
1603 # want to lose them.
1604 # TODO: Solve the problem instead of working around it.
1604 # TODO: Solve the problem instead of working around it.
1605 continue
1605 continue
1606
1606
1607 if clearfilecache:
1607 if clearfilecache:
1608 del self._filecache[k]
1608 del self._filecache[k]
1609 try:
1609 try:
1610 delattr(unfiltered, k)
1610 delattr(unfiltered, k)
1611 except AttributeError:
1611 except AttributeError:
1612 pass
1612 pass
1613 self.invalidatecaches()
1613 self.invalidatecaches()
1614 if not self.currenttransaction():
1614 if not self.currenttransaction():
1615 # TODO: Changing contents of store outside transaction
1615 # TODO: Changing contents of store outside transaction
1616 # causes inconsistency. We should make in-memory store
1616 # causes inconsistency. We should make in-memory store
1617 # changes detectable, and abort if changed.
1617 # changes detectable, and abort if changed.
1618 self.store.invalidatecaches()
1618 self.store.invalidatecaches()
1619
1619
1620 def invalidateall(self):
1620 def invalidateall(self):
1621 '''Fully invalidates both store and non-store parts, causing the
1621 '''Fully invalidates both store and non-store parts, causing the
1622 subsequent operation to reread any outside changes.'''
1622 subsequent operation to reread any outside changes.'''
1623 # extension should hook this to invalidate its caches
1623 # extension should hook this to invalidate its caches
1624 self.invalidate()
1624 self.invalidate()
1625 self.invalidatedirstate()
1625 self.invalidatedirstate()
1626
1626
1627 @unfilteredmethod
1627 @unfilteredmethod
1628 def _refreshfilecachestats(self, tr):
1628 def _refreshfilecachestats(self, tr):
1629 """Reload stats of cached files so that they are flagged as valid"""
1629 """Reload stats of cached files so that they are flagged as valid"""
1630 for k, ce in self._filecache.items():
1630 for k, ce in self._filecache.items():
1631 k = pycompat.sysstr(k)
1631 k = pycompat.sysstr(k)
1632 if k == r'dirstate' or k not in self.__dict__:
1632 if k == r'dirstate' or k not in self.__dict__:
1633 continue
1633 continue
1634 ce.refresh()
1634 ce.refresh()
1635
1635
1636 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1636 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1637 inheritchecker=None, parentenvvar=None):
1637 inheritchecker=None, parentenvvar=None):
1638 parentlock = None
1638 parentlock = None
1639 # the contents of parentenvvar are used by the underlying lock to
1639 # the contents of parentenvvar are used by the underlying lock to
1640 # determine whether it can be inherited
1640 # determine whether it can be inherited
1641 if parentenvvar is not None:
1641 if parentenvvar is not None:
1642 parentlock = encoding.environ.get(parentenvvar)
1642 parentlock = encoding.environ.get(parentenvvar)
1643
1643
1644 timeout = 0
1644 timeout = 0
1645 warntimeout = 0
1645 warntimeout = 0
1646 if wait:
1646 if wait:
1647 timeout = self.ui.configint("ui", "timeout")
1647 timeout = self.ui.configint("ui", "timeout")
1648 warntimeout = self.ui.configint("ui", "timeout.warn")
1648 warntimeout = self.ui.configint("ui", "timeout.warn")
1649
1649
1650 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1650 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1651 releasefn=releasefn,
1651 releasefn=releasefn,
1652 acquirefn=acquirefn, desc=desc,
1652 acquirefn=acquirefn, desc=desc,
1653 inheritchecker=inheritchecker,
1653 inheritchecker=inheritchecker,
1654 parentlock=parentlock)
1654 parentlock=parentlock)
1655 return l
1655 return l
1656
1656
1657 def _afterlock(self, callback):
1657 def _afterlock(self, callback):
1658 """add a callback to be run when the repository is fully unlocked
1658 """add a callback to be run when the repository is fully unlocked
1659
1659
1660 The callback will be executed when the outermost lock is released
1660 The callback will be executed when the outermost lock is released
1661 (with wlock being higher level than 'lock')."""
1661 (with wlock being higher level than 'lock')."""
1662 for ref in (self._wlockref, self._lockref):
1662 for ref in (self._wlockref, self._lockref):
1663 l = ref and ref()
1663 l = ref and ref()
1664 if l and l.held:
1664 if l and l.held:
1665 l.postrelease.append(callback)
1665 l.postrelease.append(callback)
1666 break
1666 break
1667 else: # no lock have been found.
1667 else: # no lock have been found.
1668 callback()
1668 callback()
1669
1669
1670 def lock(self, wait=True):
1670 def lock(self, wait=True):
1671 '''Lock the repository store (.hg/store) and return a weak reference
1671 '''Lock the repository store (.hg/store) and return a weak reference
1672 to the lock. Use this before modifying the store (e.g. committing or
1672 to the lock. Use this before modifying the store (e.g. committing or
1673 stripping). If you are opening a transaction, get a lock as well.)
1673 stripping). If you are opening a transaction, get a lock as well.)
1674
1674
1675 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1675 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1676 'wlock' first to avoid a dead-lock hazard.'''
1676 'wlock' first to avoid a dead-lock hazard.'''
1677 l = self._currentlock(self._lockref)
1677 l = self._currentlock(self._lockref)
1678 if l is not None:
1678 if l is not None:
1679 l.lock()
1679 l.lock()
1680 return l
1680 return l
1681
1681
1682 l = self._lock(self.svfs, "lock", wait, None,
1682 l = self._lock(self.svfs, "lock", wait, None,
1683 self.invalidate, _('repository %s') % self.origroot)
1683 self.invalidate, _('repository %s') % self.origroot)
1684 self._lockref = weakref.ref(l)
1684 self._lockref = weakref.ref(l)
1685 return l
1685 return l
1686
1686
1687 def _wlockchecktransaction(self):
1687 def _wlockchecktransaction(self):
1688 if self.currenttransaction() is not None:
1688 if self.currenttransaction() is not None:
1689 raise error.LockInheritanceContractViolation(
1689 raise error.LockInheritanceContractViolation(
1690 'wlock cannot be inherited in the middle of a transaction')
1690 'wlock cannot be inherited in the middle of a transaction')
1691
1691
1692 def wlock(self, wait=True):
1692 def wlock(self, wait=True):
1693 '''Lock the non-store parts of the repository (everything under
1693 '''Lock the non-store parts of the repository (everything under
1694 .hg except .hg/store) and return a weak reference to the lock.
1694 .hg except .hg/store) and return a weak reference to the lock.
1695
1695
1696 Use this before modifying files in .hg.
1696 Use this before modifying files in .hg.
1697
1697
1698 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1698 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1699 'wlock' first to avoid a dead-lock hazard.'''
1699 'wlock' first to avoid a dead-lock hazard.'''
1700 l = self._wlockref and self._wlockref()
1700 l = self._wlockref and self._wlockref()
1701 if l is not None and l.held:
1701 if l is not None and l.held:
1702 l.lock()
1702 l.lock()
1703 return l
1703 return l
1704
1704
1705 # We do not need to check for non-waiting lock acquisition. Such
1705 # We do not need to check for non-waiting lock acquisition. Such
1706 # acquisition would not cause dead-lock as they would just fail.
1706 # acquisition would not cause dead-lock as they would just fail.
1707 if wait and (self.ui.configbool('devel', 'all-warnings')
1707 if wait and (self.ui.configbool('devel', 'all-warnings')
1708 or self.ui.configbool('devel', 'check-locks')):
1708 or self.ui.configbool('devel', 'check-locks')):
1709 if self._currentlock(self._lockref) is not None:
1709 if self._currentlock(self._lockref) is not None:
1710 self.ui.develwarn('"wlock" acquired after "lock"')
1710 self.ui.develwarn('"wlock" acquired after "lock"')
1711
1711
1712 def unlock():
1712 def unlock():
1713 if self.dirstate.pendingparentchange():
1713 if self.dirstate.pendingparentchange():
1714 self.dirstate.invalidate()
1714 self.dirstate.invalidate()
1715 else:
1715 else:
1716 self.dirstate.write(None)
1716 self.dirstate.write(None)
1717
1717
1718 self._filecache['dirstate'].refresh()
1718 self._filecache['dirstate'].refresh()
1719
1719
1720 l = self._lock(self.vfs, "wlock", wait, unlock,
1720 l = self._lock(self.vfs, "wlock", wait, unlock,
1721 self.invalidatedirstate, _('working directory of %s') %
1721 self.invalidatedirstate, _('working directory of %s') %
1722 self.origroot,
1722 self.origroot,
1723 inheritchecker=self._wlockchecktransaction,
1723 inheritchecker=self._wlockchecktransaction,
1724 parentenvvar='HG_WLOCK_LOCKER')
1724 parentenvvar='HG_WLOCK_LOCKER')
1725 self._wlockref = weakref.ref(l)
1725 self._wlockref = weakref.ref(l)
1726 return l
1726 return l
1727
1727
1728 def _currentlock(self, lockref):
1728 def _currentlock(self, lockref):
1729 """Returns the lock if it's held, or None if it's not."""
1729 """Returns the lock if it's held, or None if it's not."""
1730 if lockref is None:
1730 if lockref is None:
1731 return None
1731 return None
1732 l = lockref()
1732 l = lockref()
1733 if l is None or not l.held:
1733 if l is None or not l.held:
1734 return None
1734 return None
1735 return l
1735 return l
1736
1736
1737 def currentwlock(self):
1737 def currentwlock(self):
1738 """Returns the wlock if it's held, or None if it's not."""
1738 """Returns the wlock if it's held, or None if it's not."""
1739 return self._currentlock(self._wlockref)
1739 return self._currentlock(self._wlockref)
1740
1740
1741 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1741 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1742 """
1742 """
1743 commit an individual file as part of a larger transaction
1743 commit an individual file as part of a larger transaction
1744 """
1744 """
1745
1745
1746 fname = fctx.path()
1746 fname = fctx.path()
1747 fparent1 = manifest1.get(fname, nullid)
1747 fparent1 = manifest1.get(fname, nullid)
1748 fparent2 = manifest2.get(fname, nullid)
1748 fparent2 = manifest2.get(fname, nullid)
1749 if isinstance(fctx, context.filectx):
1749 if isinstance(fctx, context.filectx):
1750 node = fctx.filenode()
1750 node = fctx.filenode()
1751 if node in [fparent1, fparent2]:
1751 if node in [fparent1, fparent2]:
1752 self.ui.debug('reusing %s filelog entry\n' % fname)
1752 self.ui.debug('reusing %s filelog entry\n' % fname)
1753 if manifest1.flags(fname) != fctx.flags():
1753 if manifest1.flags(fname) != fctx.flags():
1754 changelist.append(fname)
1754 changelist.append(fname)
1755 return node
1755 return node
1756
1756
1757 flog = self.file(fname)
1757 flog = self.file(fname)
1758 meta = {}
1758 meta = {}
1759 copy = fctx.renamed()
1759 copy = fctx.renamed()
1760 if copy and copy[0] != fname:
1760 if copy and copy[0] != fname:
1761 # Mark the new revision of this file as a copy of another
1761 # Mark the new revision of this file as a copy of another
1762 # file. This copy data will effectively act as a parent
1762 # file. This copy data will effectively act as a parent
1763 # of this new revision. If this is a merge, the first
1763 # of this new revision. If this is a merge, the first
1764 # parent will be the nullid (meaning "look up the copy data")
1764 # parent will be the nullid (meaning "look up the copy data")
1765 # and the second one will be the other parent. For example:
1765 # and the second one will be the other parent. For example:
1766 #
1766 #
1767 # 0 --- 1 --- 3 rev1 changes file foo
1767 # 0 --- 1 --- 3 rev1 changes file foo
1768 # \ / rev2 renames foo to bar and changes it
1768 # \ / rev2 renames foo to bar and changes it
1769 # \- 2 -/ rev3 should have bar with all changes and
1769 # \- 2 -/ rev3 should have bar with all changes and
1770 # should record that bar descends from
1770 # should record that bar descends from
1771 # bar in rev2 and foo in rev1
1771 # bar in rev2 and foo in rev1
1772 #
1772 #
1773 # this allows this merge to succeed:
1773 # this allows this merge to succeed:
1774 #
1774 #
1775 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1775 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1776 # \ / merging rev3 and rev4 should use bar@rev2
1776 # \ / merging rev3 and rev4 should use bar@rev2
1777 # \- 2 --- 4 as the merge base
1777 # \- 2 --- 4 as the merge base
1778 #
1778 #
1779
1779
1780 cfname = copy[0]
1780 cfname = copy[0]
1781 crev = manifest1.get(cfname)
1781 crev = manifest1.get(cfname)
1782 newfparent = fparent2
1782 newfparent = fparent2
1783
1783
1784 if manifest2: # branch merge
1784 if manifest2: # branch merge
1785 if fparent2 == nullid or crev is None: # copied on remote side
1785 if fparent2 == nullid or crev is None: # copied on remote side
1786 if cfname in manifest2:
1786 if cfname in manifest2:
1787 crev = manifest2[cfname]
1787 crev = manifest2[cfname]
1788 newfparent = fparent1
1788 newfparent = fparent1
1789
1789
1790 # Here, we used to search backwards through history to try to find
1790 # Here, we used to search backwards through history to try to find
1791 # where the file copy came from if the source of a copy was not in
1791 # where the file copy came from if the source of a copy was not in
1792 # the parent directory. However, this doesn't actually make sense to
1792 # the parent directory. However, this doesn't actually make sense to
1793 # do (what does a copy from something not in your working copy even
1793 # do (what does a copy from something not in your working copy even
1794 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1794 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1795 # the user that copy information was dropped, so if they didn't
1795 # the user that copy information was dropped, so if they didn't
1796 # expect this outcome it can be fixed, but this is the correct
1796 # expect this outcome it can be fixed, but this is the correct
1797 # behavior in this circumstance.
1797 # behavior in this circumstance.
1798
1798
1799 if crev:
1799 if crev:
1800 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1800 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1801 meta["copy"] = cfname
1801 meta["copy"] = cfname
1802 meta["copyrev"] = hex(crev)
1802 meta["copyrev"] = hex(crev)
1803 fparent1, fparent2 = nullid, newfparent
1803 fparent1, fparent2 = nullid, newfparent
1804 else:
1804 else:
1805 self.ui.warn(_("warning: can't find ancestor for '%s' "
1805 self.ui.warn(_("warning: can't find ancestor for '%s' "
1806 "copied from '%s'!\n") % (fname, cfname))
1806 "copied from '%s'!\n") % (fname, cfname))
1807
1807
1808 elif fparent1 == nullid:
1808 elif fparent1 == nullid:
1809 fparent1, fparent2 = fparent2, nullid
1809 fparent1, fparent2 = fparent2, nullid
1810 elif fparent2 != nullid:
1810 elif fparent2 != nullid:
1811 # is one parent an ancestor of the other?
1811 # is one parent an ancestor of the other?
1812 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1812 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1813 if fparent1 in fparentancestors:
1813 if fparent1 in fparentancestors:
1814 fparent1, fparent2 = fparent2, nullid
1814 fparent1, fparent2 = fparent2, nullid
1815 elif fparent2 in fparentancestors:
1815 elif fparent2 in fparentancestors:
1816 fparent2 = nullid
1816 fparent2 = nullid
1817
1817
1818 # is the file changed?
1818 # is the file changed?
1819 text = fctx.data()
1819 text = fctx.data()
1820 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1820 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1821 changelist.append(fname)
1821 changelist.append(fname)
1822 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1822 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1823 # are just the flags changed during merge?
1823 # are just the flags changed during merge?
1824 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1824 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1825 changelist.append(fname)
1825 changelist.append(fname)
1826
1826
1827 return fparent1
1827 return fparent1
1828
1828
1829 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1829 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1830 """check for commit arguments that aren't committable"""
1830 """check for commit arguments that aren't committable"""
1831 if match.isexact() or match.prefix():
1831 if match.isexact() or match.prefix():
1832 matched = set(status.modified + status.added + status.removed)
1832 matched = set(status.modified + status.added + status.removed)
1833
1833
1834 for f in match.files():
1834 for f in match.files():
1835 f = self.dirstate.normalize(f)
1835 f = self.dirstate.normalize(f)
1836 if f == '.' or f in matched or f in wctx.substate:
1836 if f == '.' or f in matched or f in wctx.substate:
1837 continue
1837 continue
1838 if f in status.deleted:
1838 if f in status.deleted:
1839 fail(f, _('file not found!'))
1839 fail(f, _('file not found!'))
1840 if f in vdirs: # visited directory
1840 if f in vdirs: # visited directory
1841 d = f + '/'
1841 d = f + '/'
1842 for mf in matched:
1842 for mf in matched:
1843 if mf.startswith(d):
1843 if mf.startswith(d):
1844 break
1844 break
1845 else:
1845 else:
1846 fail(f, _("no match under directory!"))
1846 fail(f, _("no match under directory!"))
1847 elif f not in self.dirstate:
1847 elif f not in self.dirstate:
1848 fail(f, _("file not tracked!"))
1848 fail(f, _("file not tracked!"))
1849
1849
1850 @unfilteredmethod
1850 @unfilteredmethod
1851 def commit(self, text="", user=None, date=None, match=None, force=False,
1851 def commit(self, text="", user=None, date=None, match=None, force=False,
1852 editor=False, extra=None):
1852 editor=False, extra=None):
1853 """Add a new revision to current repository.
1853 """Add a new revision to current repository.
1854
1854
1855 Revision information is gathered from the working directory,
1855 Revision information is gathered from the working directory,
1856 match can be used to filter the committed files. If editor is
1856 match can be used to filter the committed files. If editor is
1857 supplied, it is called to get a commit message.
1857 supplied, it is called to get a commit message.
1858 """
1858 """
1859 if extra is None:
1859 if extra is None:
1860 extra = {}
1860 extra = {}
1861
1861
1862 def fail(f, msg):
1862 def fail(f, msg):
1863 raise error.Abort('%s: %s' % (f, msg))
1863 raise error.Abort('%s: %s' % (f, msg))
1864
1864
1865 if not match:
1865 if not match:
1866 match = matchmod.always(self.root, '')
1866 match = matchmod.always(self.root, '')
1867
1867
1868 if not force:
1868 if not force:
1869 vdirs = []
1869 vdirs = []
1870 match.explicitdir = vdirs.append
1870 match.explicitdir = vdirs.append
1871 match.bad = fail
1871 match.bad = fail
1872
1872
1873 wlock = lock = tr = None
1873 wlock = lock = tr = None
1874 try:
1874 try:
1875 wlock = self.wlock()
1875 wlock = self.wlock()
1876 lock = self.lock() # for recent changelog (see issue4368)
1876 lock = self.lock() # for recent changelog (see issue4368)
1877
1877
1878 wctx = self[None]
1878 wctx = self[None]
1879 merge = len(wctx.parents()) > 1
1879 merge = len(wctx.parents()) > 1
1880
1880
1881 if not force and merge and not match.always():
1881 if not force and merge and not match.always():
1882 raise error.Abort(_('cannot partially commit a merge '
1882 raise error.Abort(_('cannot partially commit a merge '
1883 '(do not specify files or patterns)'))
1883 '(do not specify files or patterns)'))
1884
1884
1885 status = self.status(match=match, clean=force)
1885 status = self.status(match=match, clean=force)
1886 if force:
1886 if force:
1887 status.modified.extend(status.clean) # mq may commit clean files
1887 status.modified.extend(status.clean) # mq may commit clean files
1888
1888
1889 # check subrepos
1889 # check subrepos
1890 subs, commitsubs, newstate = subrepoutil.precommit(
1890 subs, commitsubs, newstate = subrepoutil.precommit(
1891 self.ui, wctx, status, match, force=force)
1891 self.ui, wctx, status, match, force=force)
1892
1892
1893 # make sure all explicit patterns are matched
1893 # make sure all explicit patterns are matched
1894 if not force:
1894 if not force:
1895 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1895 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1896
1896
1897 cctx = context.workingcommitctx(self, status,
1897 cctx = context.workingcommitctx(self, status,
1898 text, user, date, extra)
1898 text, user, date, extra)
1899
1899
1900 # internal config: ui.allowemptycommit
1900 # internal config: ui.allowemptycommit
1901 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1901 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1902 or extra.get('close') or merge or cctx.files()
1902 or extra.get('close') or merge or cctx.files()
1903 or self.ui.configbool('ui', 'allowemptycommit'))
1903 or self.ui.configbool('ui', 'allowemptycommit'))
1904 if not allowemptycommit:
1904 if not allowemptycommit:
1905 return None
1905 return None
1906
1906
1907 if merge and cctx.deleted():
1907 if merge and cctx.deleted():
1908 raise error.Abort(_("cannot commit merge with missing files"))
1908 raise error.Abort(_("cannot commit merge with missing files"))
1909
1909
1910 ms = mergemod.mergestate.read(self)
1910 ms = mergemod.mergestate.read(self)
1911 mergeutil.checkunresolved(ms)
1911 mergeutil.checkunresolved(ms)
1912
1912
1913 if editor:
1913 if editor:
1914 cctx._text = editor(self, cctx, subs)
1914 cctx._text = editor(self, cctx, subs)
1915 edited = (text != cctx._text)
1915 edited = (text != cctx._text)
1916
1916
1917 # Save commit message in case this transaction gets rolled back
1917 # Save commit message in case this transaction gets rolled back
1918 # (e.g. by a pretxncommit hook). Leave the content alone on
1918 # (e.g. by a pretxncommit hook). Leave the content alone on
1919 # the assumption that the user will use the same editor again.
1919 # the assumption that the user will use the same editor again.
1920 msgfn = self.savecommitmessage(cctx._text)
1920 msgfn = self.savecommitmessage(cctx._text)
1921
1921
1922 # commit subs and write new state
1922 # commit subs and write new state
1923 if subs:
1923 if subs:
1924 for s in sorted(commitsubs):
1924 for s in sorted(commitsubs):
1925 sub = wctx.sub(s)
1925 sub = wctx.sub(s)
1926 self.ui.status(_('committing subrepository %s\n') %
1926 self.ui.status(_('committing subrepository %s\n') %
1927 subrepoutil.subrelpath(sub))
1927 subrepoutil.subrelpath(sub))
1928 sr = sub.commit(cctx._text, user, date)
1928 sr = sub.commit(cctx._text, user, date)
1929 newstate[s] = (newstate[s][0], sr)
1929 newstate[s] = (newstate[s][0], sr)
1930 subrepoutil.writestate(self, newstate)
1930 subrepoutil.writestate(self, newstate)
1931
1931
1932 p1, p2 = self.dirstate.parents()
1932 p1, p2 = self.dirstate.parents()
1933 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1933 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1934 try:
1934 try:
1935 self.hook("precommit", throw=True, parent1=hookp1,
1935 self.hook("precommit", throw=True, parent1=hookp1,
1936 parent2=hookp2)
1936 parent2=hookp2)
1937 tr = self.transaction('commit')
1937 tr = self.transaction('commit')
1938 ret = self.commitctx(cctx, True)
1938 ret = self.commitctx(cctx, True)
1939 except: # re-raises
1939 except: # re-raises
1940 if edited:
1940 if edited:
1941 self.ui.write(
1941 self.ui.write(
1942 _('note: commit message saved in %s\n') % msgfn)
1942 _('note: commit message saved in %s\n') % msgfn)
1943 raise
1943 raise
1944 # update bookmarks, dirstate and mergestate
1944 # update bookmarks, dirstate and mergestate
1945 bookmarks.update(self, [p1, p2], ret)
1945 bookmarks.update(self, [p1, p2], ret)
1946 cctx.markcommitted(ret)
1946 cctx.markcommitted(ret)
1947 ms.reset()
1947 ms.reset()
1948 tr.close()
1948 tr.close()
1949
1949
1950 finally:
1950 finally:
1951 lockmod.release(tr, lock, wlock)
1951 lockmod.release(tr, lock, wlock)
1952
1952
1953 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1953 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1954 # hack for command that use a temporary commit (eg: histedit)
1954 # hack for command that use a temporary commit (eg: histedit)
1955 # temporary commit got stripped before hook release
1955 # temporary commit got stripped before hook release
1956 if self.changelog.hasnode(ret):
1956 if self.changelog.hasnode(ret):
1957 self.hook("commit", node=node, parent1=parent1,
1957 self.hook("commit", node=node, parent1=parent1,
1958 parent2=parent2)
1958 parent2=parent2)
1959 self._afterlock(commithook)
1959 self._afterlock(commithook)
1960 return ret
1960 return ret
1961
1961
1962 @unfilteredmethod
1962 @unfilteredmethod
1963 def commitctx(self, ctx, error=False):
1963 def commitctx(self, ctx, error=False):
1964 """Add a new revision to current repository.
1964 """Add a new revision to current repository.
1965 Revision information is passed via the context argument.
1965 Revision information is passed via the context argument.
1966 """
1966 """
1967
1967
1968 tr = None
1968 tr = None
1969 p1, p2 = ctx.p1(), ctx.p2()
1969 p1, p2 = ctx.p1(), ctx.p2()
1970 user = ctx.user()
1970 user = ctx.user()
1971
1971
1972 lock = self.lock()
1972 lock = self.lock()
1973 try:
1973 try:
1974 tr = self.transaction("commit")
1974 tr = self.transaction("commit")
1975 trp = weakref.proxy(tr)
1975 trp = weakref.proxy(tr)
1976
1976
1977 if ctx.manifestnode():
1977 if ctx.manifestnode():
1978 # reuse an existing manifest revision
1978 # reuse an existing manifest revision
1979 mn = ctx.manifestnode()
1979 mn = ctx.manifestnode()
1980 files = ctx.files()
1980 files = ctx.files()
1981 elif ctx.files():
1981 elif ctx.files():
1982 m1ctx = p1.manifestctx()
1982 m1ctx = p1.manifestctx()
1983 m2ctx = p2.manifestctx()
1983 m2ctx = p2.manifestctx()
1984 mctx = m1ctx.copy()
1984 mctx = m1ctx.copy()
1985
1985
1986 m = mctx.read()
1986 m = mctx.read()
1987 m1 = m1ctx.read()
1987 m1 = m1ctx.read()
1988 m2 = m2ctx.read()
1988 m2 = m2ctx.read()
1989
1989
1990 # check in files
1990 # check in files
1991 added = []
1991 added = []
1992 changed = []
1992 changed = []
1993 removed = list(ctx.removed())
1993 removed = list(ctx.removed())
1994 linkrev = len(self)
1994 linkrev = len(self)
1995 self.ui.note(_("committing files:\n"))
1995 self.ui.note(_("committing files:\n"))
1996 for f in sorted(ctx.modified() + ctx.added()):
1996 for f in sorted(ctx.modified() + ctx.added()):
1997 self.ui.note(f + "\n")
1997 self.ui.note(f + "\n")
1998 try:
1998 try:
1999 fctx = ctx[f]
1999 fctx = ctx[f]
2000 if fctx is None:
2000 if fctx is None:
2001 removed.append(f)
2001 removed.append(f)
2002 else:
2002 else:
2003 added.append(f)
2003 added.append(f)
2004 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2004 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2005 trp, changed)
2005 trp, changed)
2006 m.setflag(f, fctx.flags())
2006 m.setflag(f, fctx.flags())
2007 except OSError as inst:
2007 except OSError as inst:
2008 self.ui.warn(_("trouble committing %s!\n") % f)
2008 self.ui.warn(_("trouble committing %s!\n") % f)
2009 raise
2009 raise
2010 except IOError as inst:
2010 except IOError as inst:
2011 errcode = getattr(inst, 'errno', errno.ENOENT)
2011 errcode = getattr(inst, 'errno', errno.ENOENT)
2012 if error or errcode and errcode != errno.ENOENT:
2012 if error or errcode and errcode != errno.ENOENT:
2013 self.ui.warn(_("trouble committing %s!\n") % f)
2013 self.ui.warn(_("trouble committing %s!\n") % f)
2014 raise
2014 raise
2015
2015
2016 # update manifest
2016 # update manifest
2017 self.ui.note(_("committing manifest\n"))
2017 self.ui.note(_("committing manifest\n"))
2018 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2018 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2019 drop = [f for f in removed if f in m]
2019 drop = [f for f in removed if f in m]
2020 for f in drop:
2020 for f in drop:
2021 del m[f]
2021 del m[f]
2022 mn = mctx.write(trp, linkrev,
2022 mn = mctx.write(trp, linkrev,
2023 p1.manifestnode(), p2.manifestnode(),
2023 p1.manifestnode(), p2.manifestnode(),
2024 added, drop)
2024 added, drop)
2025 files = changed + removed
2025 files = changed + removed
2026 else:
2026 else:
2027 mn = p1.manifestnode()
2027 mn = p1.manifestnode()
2028 files = []
2028 files = []
2029
2029
2030 # update changelog
2030 # update changelog
2031 self.ui.note(_("committing changelog\n"))
2031 self.ui.note(_("committing changelog\n"))
2032 self.changelog.delayupdate(tr)
2032 self.changelog.delayupdate(tr)
2033 n = self.changelog.add(mn, files, ctx.description(),
2033 n = self.changelog.add(mn, files, ctx.description(),
2034 trp, p1.node(), p2.node(),
2034 trp, p1.node(), p2.node(),
2035 user, ctx.date(), ctx.extra().copy())
2035 user, ctx.date(), ctx.extra().copy())
2036 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2036 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2037 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2037 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2038 parent2=xp2)
2038 parent2=xp2)
2039 # set the new commit is proper phase
2039 # set the new commit is proper phase
2040 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2040 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2041 if targetphase:
2041 if targetphase:
2042 # retract boundary do not alter parent changeset.
2042 # retract boundary do not alter parent changeset.
2043 # if a parent have higher the resulting phase will
2043 # if a parent have higher the resulting phase will
2044 # be compliant anyway
2044 # be compliant anyway
2045 #
2045 #
2046 # if minimal phase was 0 we don't need to retract anything
2046 # if minimal phase was 0 we don't need to retract anything
2047 phases.registernew(self, tr, targetphase, [n])
2047 phases.registernew(self, tr, targetphase, [n])
2048 tr.close()
2048 tr.close()
2049 return n
2049 return n
2050 finally:
2050 finally:
2051 if tr:
2051 if tr:
2052 tr.release()
2052 tr.release()
2053 lock.release()
2053 lock.release()
2054
2054
2055 @unfilteredmethod
2055 @unfilteredmethod
2056 def destroying(self):
2056 def destroying(self):
2057 '''Inform the repository that nodes are about to be destroyed.
2057 '''Inform the repository that nodes are about to be destroyed.
2058 Intended for use by strip and rollback, so there's a common
2058 Intended for use by strip and rollback, so there's a common
2059 place for anything that has to be done before destroying history.
2059 place for anything that has to be done before destroying history.
2060
2060
2061 This is mostly useful for saving state that is in memory and waiting
2061 This is mostly useful for saving state that is in memory and waiting
2062 to be flushed when the current lock is released. Because a call to
2062 to be flushed when the current lock is released. Because a call to
2063 destroyed is imminent, the repo will be invalidated causing those
2063 destroyed is imminent, the repo will be invalidated causing those
2064 changes to stay in memory (waiting for the next unlock), or vanish
2064 changes to stay in memory (waiting for the next unlock), or vanish
2065 completely.
2065 completely.
2066 '''
2066 '''
2067 # When using the same lock to commit and strip, the phasecache is left
2067 # When using the same lock to commit and strip, the phasecache is left
2068 # dirty after committing. Then when we strip, the repo is invalidated,
2068 # dirty after committing. Then when we strip, the repo is invalidated,
2069 # causing those changes to disappear.
2069 # causing those changes to disappear.
2070 if '_phasecache' in vars(self):
2070 if '_phasecache' in vars(self):
2071 self._phasecache.write()
2071 self._phasecache.write()
2072
2072
2073 @unfilteredmethod
2073 @unfilteredmethod
2074 def destroyed(self):
2074 def destroyed(self):
2075 '''Inform the repository that nodes have been destroyed.
2075 '''Inform the repository that nodes have been destroyed.
2076 Intended for use by strip and rollback, so there's a common
2076 Intended for use by strip and rollback, so there's a common
2077 place for anything that has to be done after destroying history.
2077 place for anything that has to be done after destroying history.
2078 '''
2078 '''
2079 # When one tries to:
2079 # When one tries to:
2080 # 1) destroy nodes thus calling this method (e.g. strip)
2080 # 1) destroy nodes thus calling this method (e.g. strip)
2081 # 2) use phasecache somewhere (e.g. commit)
2081 # 2) use phasecache somewhere (e.g. commit)
2082 #
2082 #
2083 # then 2) will fail because the phasecache contains nodes that were
2083 # then 2) will fail because the phasecache contains nodes that were
2084 # removed. We can either remove phasecache from the filecache,
2084 # removed. We can either remove phasecache from the filecache,
2085 # causing it to reload next time it is accessed, or simply filter
2085 # causing it to reload next time it is accessed, or simply filter
2086 # the removed nodes now and write the updated cache.
2086 # the removed nodes now and write the updated cache.
2087 self._phasecache.filterunknown(self)
2087 self._phasecache.filterunknown(self)
2088 self._phasecache.write()
2088 self._phasecache.write()
2089
2089
2090 # refresh all repository caches
2090 # refresh all repository caches
2091 self.updatecaches()
2091 self.updatecaches()
2092
2092
2093 # Ensure the persistent tag cache is updated. Doing it now
2093 # Ensure the persistent tag cache is updated. Doing it now
2094 # means that the tag cache only has to worry about destroyed
2094 # means that the tag cache only has to worry about destroyed
2095 # heads immediately after a strip/rollback. That in turn
2095 # heads immediately after a strip/rollback. That in turn
2096 # guarantees that "cachetip == currenttip" (comparing both rev
2096 # guarantees that "cachetip == currenttip" (comparing both rev
2097 # and node) always means no nodes have been added or destroyed.
2097 # and node) always means no nodes have been added or destroyed.
2098
2098
2099 # XXX this is suboptimal when qrefresh'ing: we strip the current
2099 # XXX this is suboptimal when qrefresh'ing: we strip the current
2100 # head, refresh the tag cache, then immediately add a new head.
2100 # head, refresh the tag cache, then immediately add a new head.
2101 # But I think doing it this way is necessary for the "instant
2101 # But I think doing it this way is necessary for the "instant
2102 # tag cache retrieval" case to work.
2102 # tag cache retrieval" case to work.
2103 self.invalidate()
2103 self.invalidate()
2104
2104
2105 def status(self, node1='.', node2=None, match=None,
2105 def status(self, node1='.', node2=None, match=None,
2106 ignored=False, clean=False, unknown=False,
2106 ignored=False, clean=False, unknown=False,
2107 listsubrepos=False):
2107 listsubrepos=False):
2108 '''a convenience method that calls node1.status(node2)'''
2108 '''a convenience method that calls node1.status(node2)'''
2109 return self[node1].status(node2, match, ignored, clean, unknown,
2109 return self[node1].status(node2, match, ignored, clean, unknown,
2110 listsubrepos)
2110 listsubrepos)
2111
2111
2112 def addpostdsstatus(self, ps):
2112 def addpostdsstatus(self, ps):
2113 """Add a callback to run within the wlock, at the point at which status
2113 """Add a callback to run within the wlock, at the point at which status
2114 fixups happen.
2114 fixups happen.
2115
2115
2116 On status completion, callback(wctx, status) will be called with the
2116 On status completion, callback(wctx, status) will be called with the
2117 wlock held, unless the dirstate has changed from underneath or the wlock
2117 wlock held, unless the dirstate has changed from underneath or the wlock
2118 couldn't be grabbed.
2118 couldn't be grabbed.
2119
2119
2120 Callbacks should not capture and use a cached copy of the dirstate --
2120 Callbacks should not capture and use a cached copy of the dirstate --
2121 it might change in the meanwhile. Instead, they should access the
2121 it might change in the meanwhile. Instead, they should access the
2122 dirstate via wctx.repo().dirstate.
2122 dirstate via wctx.repo().dirstate.
2123
2123
2124 This list is emptied out after each status run -- extensions should
2124 This list is emptied out after each status run -- extensions should
2125 make sure it adds to this list each time dirstate.status is called.
2125 make sure it adds to this list each time dirstate.status is called.
2126 Extensions should also make sure they don't call this for statuses
2126 Extensions should also make sure they don't call this for statuses
2127 that don't involve the dirstate.
2127 that don't involve the dirstate.
2128 """
2128 """
2129
2129
2130 # The list is located here for uniqueness reasons -- it is actually
2130 # The list is located here for uniqueness reasons -- it is actually
2131 # managed by the workingctx, but that isn't unique per-repo.
2131 # managed by the workingctx, but that isn't unique per-repo.
2132 self._postdsstatus.append(ps)
2132 self._postdsstatus.append(ps)
2133
2133
2134 def postdsstatus(self):
2134 def postdsstatus(self):
2135 """Used by workingctx to get the list of post-dirstate-status hooks."""
2135 """Used by workingctx to get the list of post-dirstate-status hooks."""
2136 return self._postdsstatus
2136 return self._postdsstatus
2137
2137
2138 def clearpostdsstatus(self):
2138 def clearpostdsstatus(self):
2139 """Used by workingctx to clear post-dirstate-status hooks."""
2139 """Used by workingctx to clear post-dirstate-status hooks."""
2140 del self._postdsstatus[:]
2140 del self._postdsstatus[:]
2141
2141
2142 def heads(self, start=None):
2142 def heads(self, start=None):
2143 if start is None:
2143 if start is None:
2144 cl = self.changelog
2144 cl = self.changelog
2145 headrevs = reversed(cl.headrevs())
2145 headrevs = reversed(cl.headrevs())
2146 return [cl.node(rev) for rev in headrevs]
2146 return [cl.node(rev) for rev in headrevs]
2147
2147
2148 heads = self.changelog.heads(start)
2148 heads = self.changelog.heads(start)
2149 # sort the output in rev descending order
2149 # sort the output in rev descending order
2150 return sorted(heads, key=self.changelog.rev, reverse=True)
2150 return sorted(heads, key=self.changelog.rev, reverse=True)
2151
2151
2152 def branchheads(self, branch=None, start=None, closed=False):
2152 def branchheads(self, branch=None, start=None, closed=False):
2153 '''return a (possibly filtered) list of heads for the given branch
2153 '''return a (possibly filtered) list of heads for the given branch
2154
2154
2155 Heads are returned in topological order, from newest to oldest.
2155 Heads are returned in topological order, from newest to oldest.
2156 If branch is None, use the dirstate branch.
2156 If branch is None, use the dirstate branch.
2157 If start is not None, return only heads reachable from start.
2157 If start is not None, return only heads reachable from start.
2158 If closed is True, return heads that are marked as closed as well.
2158 If closed is True, return heads that are marked as closed as well.
2159 '''
2159 '''
2160 if branch is None:
2160 if branch is None:
2161 branch = self[None].branch()
2161 branch = self[None].branch()
2162 branches = self.branchmap()
2162 branches = self.branchmap()
2163 if branch not in branches:
2163 if branch not in branches:
2164 return []
2164 return []
2165 # the cache returns heads ordered lowest to highest
2165 # the cache returns heads ordered lowest to highest
2166 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2166 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2167 if start is not None:
2167 if start is not None:
2168 # filter out the heads that cannot be reached from startrev
2168 # filter out the heads that cannot be reached from startrev
2169 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2169 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2170 bheads = [h for h in bheads if h in fbheads]
2170 bheads = [h for h in bheads if h in fbheads]
2171 return bheads
2171 return bheads
2172
2172
2173 def branches(self, nodes):
2173 def branches(self, nodes):
2174 if not nodes:
2174 if not nodes:
2175 nodes = [self.changelog.tip()]
2175 nodes = [self.changelog.tip()]
2176 b = []
2176 b = []
2177 for n in nodes:
2177 for n in nodes:
2178 t = n
2178 t = n
2179 while True:
2179 while True:
2180 p = self.changelog.parents(n)
2180 p = self.changelog.parents(n)
2181 if p[1] != nullid or p[0] == nullid:
2181 if p[1] != nullid or p[0] == nullid:
2182 b.append((t, n, p[0], p[1]))
2182 b.append((t, n, p[0], p[1]))
2183 break
2183 break
2184 n = p[0]
2184 n = p[0]
2185 return b
2185 return b
2186
2186
2187 def between(self, pairs):
2187 def between(self, pairs):
2188 r = []
2188 r = []
2189
2189
2190 for top, bottom in pairs:
2190 for top, bottom in pairs:
2191 n, l, i = top, [], 0
2191 n, l, i = top, [], 0
2192 f = 1
2192 f = 1
2193
2193
2194 while n != bottom and n != nullid:
2194 while n != bottom and n != nullid:
2195 p = self.changelog.parents(n)[0]
2195 p = self.changelog.parents(n)[0]
2196 if i == f:
2196 if i == f:
2197 l.append(n)
2197 l.append(n)
2198 f = f * 2
2198 f = f * 2
2199 n = p
2199 n = p
2200 i += 1
2200 i += 1
2201
2201
2202 r.append(l)
2202 r.append(l)
2203
2203
2204 return r
2204 return r
2205
2205
2206 def checkpush(self, pushop):
2206 def checkpush(self, pushop):
2207 """Extensions can override this function if additional checks have
2207 """Extensions can override this function if additional checks have
2208 to be performed before pushing, or call it if they override push
2208 to be performed before pushing, or call it if they override push
2209 command.
2209 command.
2210 """
2210 """
2211
2211
2212 @unfilteredpropertycache
2212 @unfilteredpropertycache
2213 def prepushoutgoinghooks(self):
2213 def prepushoutgoinghooks(self):
2214 """Return util.hooks consists of a pushop with repo, remote, outgoing
2214 """Return util.hooks consists of a pushop with repo, remote, outgoing
2215 methods, which are called before pushing changesets.
2215 methods, which are called before pushing changesets.
2216 """
2216 """
2217 return util.hooks()
2217 return util.hooks()
2218
2218
2219 def pushkey(self, namespace, key, old, new):
2219 def pushkey(self, namespace, key, old, new):
2220 try:
2220 try:
2221 tr = self.currenttransaction()
2221 tr = self.currenttransaction()
2222 hookargs = {}
2222 hookargs = {}
2223 if tr is not None:
2223 if tr is not None:
2224 hookargs.update(tr.hookargs)
2224 hookargs.update(tr.hookargs)
2225 hookargs = pycompat.strkwargs(hookargs)
2225 hookargs = pycompat.strkwargs(hookargs)
2226 hookargs[r'namespace'] = namespace
2226 hookargs[r'namespace'] = namespace
2227 hookargs[r'key'] = key
2227 hookargs[r'key'] = key
2228 hookargs[r'old'] = old
2228 hookargs[r'old'] = old
2229 hookargs[r'new'] = new
2229 hookargs[r'new'] = new
2230 self.hook('prepushkey', throw=True, **hookargs)
2230 self.hook('prepushkey', throw=True, **hookargs)
2231 except error.HookAbort as exc:
2231 except error.HookAbort as exc:
2232 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2232 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2233 if exc.hint:
2233 if exc.hint:
2234 self.ui.write_err(_("(%s)\n") % exc.hint)
2234 self.ui.write_err(_("(%s)\n") % exc.hint)
2235 return False
2235 return False
2236 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2236 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2237 ret = pushkey.push(self, namespace, key, old, new)
2237 ret = pushkey.push(self, namespace, key, old, new)
2238 def runhook():
2238 def runhook():
2239 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2239 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2240 ret=ret)
2240 ret=ret)
2241 self._afterlock(runhook)
2241 self._afterlock(runhook)
2242 return ret
2242 return ret
2243
2243
2244 def listkeys(self, namespace):
2244 def listkeys(self, namespace):
2245 self.hook('prelistkeys', throw=True, namespace=namespace)
2245 self.hook('prelistkeys', throw=True, namespace=namespace)
2246 self.ui.debug('listing keys for "%s"\n' % namespace)
2246 self.ui.debug('listing keys for "%s"\n' % namespace)
2247 values = pushkey.list(self, namespace)
2247 values = pushkey.list(self, namespace)
2248 self.hook('listkeys', namespace=namespace, values=values)
2248 self.hook('listkeys', namespace=namespace, values=values)
2249 return values
2249 return values
2250
2250
2251 def debugwireargs(self, one, two, three=None, four=None, five=None):
2251 def debugwireargs(self, one, two, three=None, four=None, five=None):
2252 '''used to test argument passing over the wire'''
2252 '''used to test argument passing over the wire'''
2253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2254 pycompat.bytestr(four),
2254 pycompat.bytestr(four),
2255 pycompat.bytestr(five))
2255 pycompat.bytestr(five))
2256
2256
2257 def savecommitmessage(self, text):
2257 def savecommitmessage(self, text):
2258 fp = self.vfs('last-message.txt', 'wb')
2258 fp = self.vfs('last-message.txt', 'wb')
2259 try:
2259 try:
2260 fp.write(text)
2260 fp.write(text)
2261 finally:
2261 finally:
2262 fp.close()
2262 fp.close()
2263 return self.pathto(fp.name[len(self.root) + 1:])
2263 return self.pathto(fp.name[len(self.root) + 1:])
2264
2264
2265 # used to avoid circular references so destructors work
2265 # used to avoid circular references so destructors work
2266 def aftertrans(files):
2266 def aftertrans(files):
2267 renamefiles = [tuple(t) for t in files]
2267 renamefiles = [tuple(t) for t in files]
2268 def a():
2268 def a():
2269 for vfs, src, dest in renamefiles:
2269 for vfs, src, dest in renamefiles:
2270 # if src and dest refer to a same file, vfs.rename is a no-op,
2270 # if src and dest refer to a same file, vfs.rename is a no-op,
2271 # leaving both src and dest on disk. delete dest to make sure
2271 # leaving both src and dest on disk. delete dest to make sure
2272 # the rename couldn't be such a no-op.
2272 # the rename couldn't be such a no-op.
2273 vfs.tryunlink(dest)
2273 vfs.tryunlink(dest)
2274 try:
2274 try:
2275 vfs.rename(src, dest)
2275 vfs.rename(src, dest)
2276 except OSError: # journal file does not yet exist
2276 except OSError: # journal file does not yet exist
2277 pass
2277 pass
2278 return a
2278 return a
2279
2279
2280 def undoname(fn):
2280 def undoname(fn):
2281 base, name = os.path.split(fn)
2281 base, name = os.path.split(fn)
2282 assert name.startswith('journal')
2282 assert name.startswith('journal')
2283 return os.path.join(base, name.replace('journal', 'undo', 1))
2283 return os.path.join(base, name.replace('journal', 'undo', 1))
2284
2284
2285 def instance(ui, path, create):
2285 def instance(ui, path, create):
2286 return localrepository(ui, util.urllocalpath(path), create)
2286 return localrepository(ui, util.urllocalpath(path), create)
2287
2287
2288 def islocal(path):
2288 def islocal(path):
2289 return True
2289 return True
2290
2290
2291 def newreporequirements(repo):
2291 def newreporequirements(repo):
2292 """Determine the set of requirements for a new local repository.
2292 """Determine the set of requirements for a new local repository.
2293
2293
2294 Extensions can wrap this function to specify custom requirements for
2294 Extensions can wrap this function to specify custom requirements for
2295 new repositories.
2295 new repositories.
2296 """
2296 """
2297 ui = repo.ui
2297 ui = repo.ui
2298 requirements = {'revlogv1'}
2298 requirements = {'revlogv1'}
2299 if ui.configbool('format', 'usestore'):
2299 if ui.configbool('format', 'usestore'):
2300 requirements.add('store')
2300 requirements.add('store')
2301 if ui.configbool('format', 'usefncache'):
2301 if ui.configbool('format', 'usefncache'):
2302 requirements.add('fncache')
2302 requirements.add('fncache')
2303 if ui.configbool('format', 'dotencode'):
2303 if ui.configbool('format', 'dotencode'):
2304 requirements.add('dotencode')
2304 requirements.add('dotencode')
2305
2305
2306 compengine = ui.config('experimental', 'format.compression')
2306 compengine = ui.config('experimental', 'format.compression')
2307 if compengine not in util.compengines:
2307 if compengine not in util.compengines:
2308 raise error.Abort(_('compression engine %s defined by '
2308 raise error.Abort(_('compression engine %s defined by '
2309 'experimental.format.compression not available') %
2309 'experimental.format.compression not available') %
2310 compengine,
2310 compengine,
2311 hint=_('run "hg debuginstall" to list available '
2311 hint=_('run "hg debuginstall" to list available '
2312 'compression engines'))
2312 'compression engines'))
2313
2313
2314 # zlib is the historical default and doesn't need an explicit requirement.
2314 # zlib is the historical default and doesn't need an explicit requirement.
2315 if compengine != 'zlib':
2315 if compengine != 'zlib':
2316 requirements.add('exp-compression-%s' % compengine)
2316 requirements.add('exp-compression-%s' % compengine)
2317
2317
2318 if scmutil.gdinitconfig(ui):
2318 if scmutil.gdinitconfig(ui):
2319 requirements.add('generaldelta')
2319 requirements.add('generaldelta')
2320 if ui.configbool('experimental', 'treemanifest'):
2320 if ui.configbool('experimental', 'treemanifest'):
2321 requirements.add('treemanifest')
2321 requirements.add('treemanifest')
2322
2322
2323 revlogv2 = ui.config('experimental', 'revlogv2')
2323 revlogv2 = ui.config('experimental', 'revlogv2')
2324 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2324 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2325 requirements.remove('revlogv1')
2325 requirements.remove('revlogv1')
2326 # generaldelta is implied by revlogv2.
2326 # generaldelta is implied by revlogv2.
2327 requirements.discard('generaldelta')
2327 requirements.discard('generaldelta')
2328 requirements.add(REVLOGV2_REQUIREMENT)
2328 requirements.add(REVLOGV2_REQUIREMENT)
2329
2329
2330 return requirements
2330 return requirements
General Comments 0
You need to be logged in to leave comments. Login now