##// END OF EJS Templates
auditor: add simple comment about repo.auditor and al...
marmoute -
r33254:4ea0b7a6 default
parent child Browse files
Show More
@@ -1,2111 +1,2113 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 obsolete,
46 obsolete,
47 pathutil,
47 pathutil,
48 peer,
48 peer,
49 phases,
49 phases,
50 pushkey,
50 pushkey,
51 pycompat,
51 pycompat,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 store,
56 store,
57 subrepo,
57 subrepo,
58 tags as tagsmod,
58 tags as tagsmod,
59 transaction,
59 transaction,
60 txnutil,
60 txnutil,
61 util,
61 util,
62 vfs as vfsmod,
62 vfs as vfsmod,
63 )
63 )
64
64
65 release = lockmod.release
65 release = lockmod.release
66 urlerr = util.urlerr
66 urlerr = util.urlerr
67 urlreq = util.urlreq
67 urlreq = util.urlreq
68
68
69 class _basefilecache(scmutil.filecache):
69 class _basefilecache(scmutil.filecache):
70 """All filecache usage on repo are done for logic that should be unfiltered
70 """All filecache usage on repo are done for logic that should be unfiltered
71 """
71 """
72 def __get__(self, repo, type=None):
72 def __get__(self, repo, type=None):
73 if repo is None:
73 if repo is None:
74 return self
74 return self
75 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
75 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
76 def __set__(self, repo, value):
76 def __set__(self, repo, value):
77 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
77 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
78 def __delete__(self, repo):
78 def __delete__(self, repo):
79 return super(_basefilecache, self).__delete__(repo.unfiltered())
79 return super(_basefilecache, self).__delete__(repo.unfiltered())
80
80
81 class repofilecache(_basefilecache):
81 class repofilecache(_basefilecache):
82 """filecache for files in .hg but outside of .hg/store"""
82 """filecache for files in .hg but outside of .hg/store"""
83 def join(self, obj, fname):
83 def join(self, obj, fname):
84 return obj.vfs.join(fname)
84 return obj.vfs.join(fname)
85
85
86 class storecache(_basefilecache):
86 class storecache(_basefilecache):
87 """filecache for files in the store"""
87 """filecache for files in the store"""
88 def join(self, obj, fname):
88 def join(self, obj, fname):
89 return obj.sjoin(fname)
89 return obj.sjoin(fname)
90
90
91 class unfilteredpropertycache(util.propertycache):
91 class unfilteredpropertycache(util.propertycache):
92 """propertycache that apply to unfiltered repo only"""
92 """propertycache that apply to unfiltered repo only"""
93
93
94 def __get__(self, repo, type=None):
94 def __get__(self, repo, type=None):
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 if unfi is repo:
96 if unfi is repo:
97 return super(unfilteredpropertycache, self).__get__(unfi)
97 return super(unfilteredpropertycache, self).__get__(unfi)
98 return getattr(unfi, self.name)
98 return getattr(unfi, self.name)
99
99
100 class filteredpropertycache(util.propertycache):
100 class filteredpropertycache(util.propertycache):
101 """propertycache that must take filtering in account"""
101 """propertycache that must take filtering in account"""
102
102
103 def cachevalue(self, obj, value):
103 def cachevalue(self, obj, value):
104 object.__setattr__(obj, self.name, value)
104 object.__setattr__(obj, self.name, value)
105
105
106
106
107 def hasunfilteredcache(repo, name):
107 def hasunfilteredcache(repo, name):
108 """check if a repo has an unfilteredpropertycache value for <name>"""
108 """check if a repo has an unfilteredpropertycache value for <name>"""
109 return name in vars(repo.unfiltered())
109 return name in vars(repo.unfiltered())
110
110
111 def unfilteredmethod(orig):
111 def unfilteredmethod(orig):
112 """decorate method that always need to be run on unfiltered version"""
112 """decorate method that always need to be run on unfiltered version"""
113 def wrapper(repo, *args, **kwargs):
113 def wrapper(repo, *args, **kwargs):
114 return orig(repo.unfiltered(), *args, **kwargs)
114 return orig(repo.unfiltered(), *args, **kwargs)
115 return wrapper
115 return wrapper
116
116
117 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
118 'unbundle'}
118 'unbundle'}
119 legacycaps = moderncaps.union({'changegroupsubset'})
119 legacycaps = moderncaps.union({'changegroupsubset'})
120
120
121 class localpeer(peer.peerrepository):
121 class localpeer(peer.peerrepository):
122 '''peer for a local repo; reflects only the most recent API'''
122 '''peer for a local repo; reflects only the most recent API'''
123
123
124 def __init__(self, repo, caps=None):
124 def __init__(self, repo, caps=None):
125 if caps is None:
125 if caps is None:
126 caps = moderncaps.copy()
126 caps = moderncaps.copy()
127 peer.peerrepository.__init__(self)
127 peer.peerrepository.__init__(self)
128 self._repo = repo.filtered('served')
128 self._repo = repo.filtered('served')
129 self.ui = repo.ui
129 self.ui = repo.ui
130 self._caps = repo._restrictcapabilities(caps)
130 self._caps = repo._restrictcapabilities(caps)
131 self.requirements = repo.requirements
131 self.requirements = repo.requirements
132 self.supportedformats = repo.supportedformats
132 self.supportedformats = repo.supportedformats
133
133
134 def close(self):
134 def close(self):
135 self._repo.close()
135 self._repo.close()
136
136
137 def _capabilities(self):
137 def _capabilities(self):
138 return self._caps
138 return self._caps
139
139
140 def local(self):
140 def local(self):
141 return self._repo
141 return self._repo
142
142
143 def canpush(self):
143 def canpush(self):
144 return True
144 return True
145
145
146 def url(self):
146 def url(self):
147 return self._repo.url()
147 return self._repo.url()
148
148
149 def lookup(self, key):
149 def lookup(self, key):
150 return self._repo.lookup(key)
150 return self._repo.lookup(key)
151
151
152 def branchmap(self):
152 def branchmap(self):
153 return self._repo.branchmap()
153 return self._repo.branchmap()
154
154
155 def heads(self):
155 def heads(self):
156 return self._repo.heads()
156 return self._repo.heads()
157
157
158 def known(self, nodes):
158 def known(self, nodes):
159 return self._repo.known(nodes)
159 return self._repo.known(nodes)
160
160
161 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
162 **kwargs):
162 **kwargs):
163 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
164 common=common, bundlecaps=bundlecaps,
164 common=common, bundlecaps=bundlecaps,
165 **kwargs)
165 **kwargs)
166 cb = util.chunkbuffer(chunks)
166 cb = util.chunkbuffer(chunks)
167
167
168 if exchange.bundle2requested(bundlecaps):
168 if exchange.bundle2requested(bundlecaps):
169 # When requesting a bundle2, getbundle returns a stream to make the
169 # When requesting a bundle2, getbundle returns a stream to make the
170 # wire level function happier. We need to build a proper object
170 # wire level function happier. We need to build a proper object
171 # from it in local peer.
171 # from it in local peer.
172 return bundle2.getunbundler(self.ui, cb)
172 return bundle2.getunbundler(self.ui, cb)
173 else:
173 else:
174 return changegroup.getunbundler('01', cb, None)
174 return changegroup.getunbundler('01', cb, None)
175
175
176 # TODO We might want to move the next two calls into legacypeer and add
176 # TODO We might want to move the next two calls into legacypeer and add
177 # unbundle instead.
177 # unbundle instead.
178
178
179 def unbundle(self, cg, heads, url):
179 def unbundle(self, cg, heads, url):
180 """apply a bundle on a repo
180 """apply a bundle on a repo
181
181
182 This function handles the repo locking itself."""
182 This function handles the repo locking itself."""
183 try:
183 try:
184 try:
184 try:
185 cg = exchange.readbundle(self.ui, cg, None)
185 cg = exchange.readbundle(self.ui, cg, None)
186 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
187 if util.safehasattr(ret, 'getchunks'):
187 if util.safehasattr(ret, 'getchunks'):
188 # This is a bundle20 object, turn it into an unbundler.
188 # This is a bundle20 object, turn it into an unbundler.
189 # This little dance should be dropped eventually when the
189 # This little dance should be dropped eventually when the
190 # API is finally improved.
190 # API is finally improved.
191 stream = util.chunkbuffer(ret.getchunks())
191 stream = util.chunkbuffer(ret.getchunks())
192 ret = bundle2.getunbundler(self.ui, stream)
192 ret = bundle2.getunbundler(self.ui, stream)
193 return ret
193 return ret
194 except Exception as exc:
194 except Exception as exc:
195 # If the exception contains output salvaged from a bundle2
195 # If the exception contains output salvaged from a bundle2
196 # reply, we need to make sure it is printed before continuing
196 # reply, we need to make sure it is printed before continuing
197 # to fail. So we build a bundle2 with such output and consume
197 # to fail. So we build a bundle2 with such output and consume
198 # it directly.
198 # it directly.
199 #
199 #
200 # This is not very elegant but allows a "simple" solution for
200 # This is not very elegant but allows a "simple" solution for
201 # issue4594
201 # issue4594
202 output = getattr(exc, '_bundle2salvagedoutput', ())
202 output = getattr(exc, '_bundle2salvagedoutput', ())
203 if output:
203 if output:
204 bundler = bundle2.bundle20(self._repo.ui)
204 bundler = bundle2.bundle20(self._repo.ui)
205 for out in output:
205 for out in output:
206 bundler.addpart(out)
206 bundler.addpart(out)
207 stream = util.chunkbuffer(bundler.getchunks())
207 stream = util.chunkbuffer(bundler.getchunks())
208 b = bundle2.getunbundler(self.ui, stream)
208 b = bundle2.getunbundler(self.ui, stream)
209 bundle2.processbundle(self._repo, b)
209 bundle2.processbundle(self._repo, b)
210 raise
210 raise
211 except error.PushRaced as exc:
211 except error.PushRaced as exc:
212 raise error.ResponseError(_('push failed:'), str(exc))
212 raise error.ResponseError(_('push failed:'), str(exc))
213
213
214 def lock(self):
214 def lock(self):
215 return self._repo.lock()
215 return self._repo.lock()
216
216
217 def pushkey(self, namespace, key, old, new):
217 def pushkey(self, namespace, key, old, new):
218 return self._repo.pushkey(namespace, key, old, new)
218 return self._repo.pushkey(namespace, key, old, new)
219
219
220 def listkeys(self, namespace):
220 def listkeys(self, namespace):
221 return self._repo.listkeys(namespace)
221 return self._repo.listkeys(namespace)
222
222
223 def debugwireargs(self, one, two, three=None, four=None, five=None):
223 def debugwireargs(self, one, two, three=None, four=None, five=None):
224 '''used to test argument passing over the wire'''
224 '''used to test argument passing over the wire'''
225 return "%s %s %s %s %s" % (one, two, three, four, five)
225 return "%s %s %s %s %s" % (one, two, three, four, five)
226
226
227 class locallegacypeer(localpeer):
227 class locallegacypeer(localpeer):
228 '''peer extension which implements legacy methods too; used for tests with
228 '''peer extension which implements legacy methods too; used for tests with
229 restricted capabilities'''
229 restricted capabilities'''
230
230
231 def __init__(self, repo):
231 def __init__(self, repo):
232 localpeer.__init__(self, repo, caps=legacycaps)
232 localpeer.__init__(self, repo, caps=legacycaps)
233
233
234 def branches(self, nodes):
234 def branches(self, nodes):
235 return self._repo.branches(nodes)
235 return self._repo.branches(nodes)
236
236
237 def between(self, pairs):
237 def between(self, pairs):
238 return self._repo.between(pairs)
238 return self._repo.between(pairs)
239
239
240 def changegroup(self, basenodes, source):
240 def changegroup(self, basenodes, source):
241 return changegroup.changegroup(self._repo, basenodes, source)
241 return changegroup.changegroup(self._repo, basenodes, source)
242
242
243 def changegroupsubset(self, bases, heads, source):
243 def changegroupsubset(self, bases, heads, source):
244 return changegroup.changegroupsubset(self._repo, bases, heads, source)
244 return changegroup.changegroupsubset(self._repo, bases, heads, source)
245
245
246 # Increment the sub-version when the revlog v2 format changes to lock out old
246 # Increment the sub-version when the revlog v2 format changes to lock out old
247 # clients.
247 # clients.
248 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
248 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
249
249
250 class localrepository(object):
250 class localrepository(object):
251
251
252 supportedformats = {
252 supportedformats = {
253 'revlogv1',
253 'revlogv1',
254 'generaldelta',
254 'generaldelta',
255 'treemanifest',
255 'treemanifest',
256 'manifestv2',
256 'manifestv2',
257 REVLOGV2_REQUIREMENT,
257 REVLOGV2_REQUIREMENT,
258 }
258 }
259 _basesupported = supportedformats | {
259 _basesupported = supportedformats | {
260 'store',
260 'store',
261 'fncache',
261 'fncache',
262 'shared',
262 'shared',
263 'relshared',
263 'relshared',
264 'dotencode',
264 'dotencode',
265 }
265 }
266 openerreqs = {
266 openerreqs = {
267 'revlogv1',
267 'revlogv1',
268 'generaldelta',
268 'generaldelta',
269 'treemanifest',
269 'treemanifest',
270 'manifestv2',
270 'manifestv2',
271 }
271 }
272
272
273 # a list of (ui, featureset) functions.
273 # a list of (ui, featureset) functions.
274 # only functions defined in module of enabled extensions are invoked
274 # only functions defined in module of enabled extensions are invoked
275 featuresetupfuncs = set()
275 featuresetupfuncs = set()
276
276
277 def __init__(self, baseui, path, create=False):
277 def __init__(self, baseui, path, create=False):
278 self.requirements = set()
278 self.requirements = set()
279 self.filtername = None
279 self.filtername = None
280 # wvfs: rooted at the repository root, used to access the working copy
280 # wvfs: rooted at the repository root, used to access the working copy
281 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
281 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
282 # vfs: rooted at .hg, used to access repo files outside of .hg/store
282 # vfs: rooted at .hg, used to access repo files outside of .hg/store
283 self.vfs = None
283 self.vfs = None
284 # svfs: usually rooted at .hg/store, used to access repository history
284 # svfs: usually rooted at .hg/store, used to access repository history
285 # If this is a shared repository, this vfs may point to another
285 # If this is a shared repository, this vfs may point to another
286 # repository's .hg/store directory.
286 # repository's .hg/store directory.
287 self.svfs = None
287 self.svfs = None
288 self.root = self.wvfs.base
288 self.root = self.wvfs.base
289 self.path = self.wvfs.join(".hg")
289 self.path = self.wvfs.join(".hg")
290 self.origroot = path
290 self.origroot = path
291 # These auditor are not used by the vfs,
292 # only used when writing this comment: basectx.match
291 self.auditor = pathutil.pathauditor(self.root, self._checknested)
293 self.auditor = pathutil.pathauditor(self.root, self._checknested)
292 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
294 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
293 realfs=False)
295 realfs=False)
294 self.vfs = vfsmod.vfs(self.path)
296 self.vfs = vfsmod.vfs(self.path)
295 self.baseui = baseui
297 self.baseui = baseui
296 self.ui = baseui.copy()
298 self.ui = baseui.copy()
297 self.ui.copy = baseui.copy # prevent copying repo configuration
299 self.ui.copy = baseui.copy # prevent copying repo configuration
298 # A list of callback to shape the phase if no data were found.
300 # A list of callback to shape the phase if no data were found.
299 # Callback are in the form: func(repo, roots) --> processed root.
301 # Callback are in the form: func(repo, roots) --> processed root.
300 # This list it to be filled by extension during repo setup
302 # This list it to be filled by extension during repo setup
301 self._phasedefaults = []
303 self._phasedefaults = []
302 try:
304 try:
303 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
305 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
304 self._loadextensions()
306 self._loadextensions()
305 except IOError:
307 except IOError:
306 pass
308 pass
307
309
308 if self.featuresetupfuncs:
310 if self.featuresetupfuncs:
309 self.supported = set(self._basesupported) # use private copy
311 self.supported = set(self._basesupported) # use private copy
310 extmods = set(m.__name__ for n, m
312 extmods = set(m.__name__ for n, m
311 in extensions.extensions(self.ui))
313 in extensions.extensions(self.ui))
312 for setupfunc in self.featuresetupfuncs:
314 for setupfunc in self.featuresetupfuncs:
313 if setupfunc.__module__ in extmods:
315 if setupfunc.__module__ in extmods:
314 setupfunc(self.ui, self.supported)
316 setupfunc(self.ui, self.supported)
315 else:
317 else:
316 self.supported = self._basesupported
318 self.supported = self._basesupported
317 color.setup(self.ui)
319 color.setup(self.ui)
318
320
319 # Add compression engines.
321 # Add compression engines.
320 for name in util.compengines:
322 for name in util.compengines:
321 engine = util.compengines[name]
323 engine = util.compengines[name]
322 if engine.revlogheader():
324 if engine.revlogheader():
323 self.supported.add('exp-compression-%s' % name)
325 self.supported.add('exp-compression-%s' % name)
324
326
325 if not self.vfs.isdir():
327 if not self.vfs.isdir():
326 if create:
328 if create:
327 self.requirements = newreporequirements(self)
329 self.requirements = newreporequirements(self)
328
330
329 if not self.wvfs.exists():
331 if not self.wvfs.exists():
330 self.wvfs.makedirs()
332 self.wvfs.makedirs()
331 self.vfs.makedir(notindexed=True)
333 self.vfs.makedir(notindexed=True)
332
334
333 if 'store' in self.requirements:
335 if 'store' in self.requirements:
334 self.vfs.mkdir("store")
336 self.vfs.mkdir("store")
335
337
336 # create an invalid changelog
338 # create an invalid changelog
337 self.vfs.append(
339 self.vfs.append(
338 "00changelog.i",
340 "00changelog.i",
339 '\0\0\0\2' # represents revlogv2
341 '\0\0\0\2' # represents revlogv2
340 ' dummy changelog to prevent using the old repo layout'
342 ' dummy changelog to prevent using the old repo layout'
341 )
343 )
342 else:
344 else:
343 raise error.RepoError(_("repository %s not found") % path)
345 raise error.RepoError(_("repository %s not found") % path)
344 elif create:
346 elif create:
345 raise error.RepoError(_("repository %s already exists") % path)
347 raise error.RepoError(_("repository %s already exists") % path)
346 else:
348 else:
347 try:
349 try:
348 self.requirements = scmutil.readrequires(
350 self.requirements = scmutil.readrequires(
349 self.vfs, self.supported)
351 self.vfs, self.supported)
350 except IOError as inst:
352 except IOError as inst:
351 if inst.errno != errno.ENOENT:
353 if inst.errno != errno.ENOENT:
352 raise
354 raise
353
355
354 self.sharedpath = self.path
356 self.sharedpath = self.path
355 try:
357 try:
356 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
358 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
357 if 'relshared' in self.requirements:
359 if 'relshared' in self.requirements:
358 sharedpath = self.vfs.join(sharedpath)
360 sharedpath = self.vfs.join(sharedpath)
359 vfs = vfsmod.vfs(sharedpath, realpath=True)
361 vfs = vfsmod.vfs(sharedpath, realpath=True)
360 s = vfs.base
362 s = vfs.base
361 if not vfs.exists():
363 if not vfs.exists():
362 raise error.RepoError(
364 raise error.RepoError(
363 _('.hg/sharedpath points to nonexistent directory %s') % s)
365 _('.hg/sharedpath points to nonexistent directory %s') % s)
364 self.sharedpath = s
366 self.sharedpath = s
365 except IOError as inst:
367 except IOError as inst:
366 if inst.errno != errno.ENOENT:
368 if inst.errno != errno.ENOENT:
367 raise
369 raise
368
370
369 self.store = store.store(
371 self.store = store.store(
370 self.requirements, self.sharedpath, vfsmod.vfs)
372 self.requirements, self.sharedpath, vfsmod.vfs)
371 self.spath = self.store.path
373 self.spath = self.store.path
372 self.svfs = self.store.vfs
374 self.svfs = self.store.vfs
373 self.sjoin = self.store.join
375 self.sjoin = self.store.join
374 self.vfs.createmode = self.store.createmode
376 self.vfs.createmode = self.store.createmode
375 self._applyopenerreqs()
377 self._applyopenerreqs()
376 if create:
378 if create:
377 self._writerequirements()
379 self._writerequirements()
378
380
379 self._dirstatevalidatewarned = False
381 self._dirstatevalidatewarned = False
380
382
381 self._branchcaches = {}
383 self._branchcaches = {}
382 self._revbranchcache = None
384 self._revbranchcache = None
383 self.filterpats = {}
385 self.filterpats = {}
384 self._datafilters = {}
386 self._datafilters = {}
385 self._transref = self._lockref = self._wlockref = None
387 self._transref = self._lockref = self._wlockref = None
386
388
387 # A cache for various files under .hg/ that tracks file changes,
389 # A cache for various files under .hg/ that tracks file changes,
388 # (used by the filecache decorator)
390 # (used by the filecache decorator)
389 #
391 #
390 # Maps a property name to its util.filecacheentry
392 # Maps a property name to its util.filecacheentry
391 self._filecache = {}
393 self._filecache = {}
392
394
393 # hold sets of revision to be filtered
395 # hold sets of revision to be filtered
394 # should be cleared when something might have changed the filter value:
396 # should be cleared when something might have changed the filter value:
395 # - new changesets,
397 # - new changesets,
396 # - phase change,
398 # - phase change,
397 # - new obsolescence marker,
399 # - new obsolescence marker,
398 # - working directory parent change,
400 # - working directory parent change,
399 # - bookmark changes
401 # - bookmark changes
400 self.filteredrevcache = {}
402 self.filteredrevcache = {}
401
403
402 # post-dirstate-status hooks
404 # post-dirstate-status hooks
403 self._postdsstatus = []
405 self._postdsstatus = []
404
406
405 # generic mapping between names and nodes
407 # generic mapping between names and nodes
406 self.names = namespaces.namespaces()
408 self.names = namespaces.namespaces()
407
409
408 def close(self):
410 def close(self):
409 self._writecaches()
411 self._writecaches()
410
412
411 def _loadextensions(self):
413 def _loadextensions(self):
412 extensions.loadall(self.ui)
414 extensions.loadall(self.ui)
413
415
414 def _writecaches(self):
416 def _writecaches(self):
415 if self._revbranchcache:
417 if self._revbranchcache:
416 self._revbranchcache.write()
418 self._revbranchcache.write()
417
419
418 def _restrictcapabilities(self, caps):
420 def _restrictcapabilities(self, caps):
419 if self.ui.configbool('experimental', 'bundle2-advertise', True):
421 if self.ui.configbool('experimental', 'bundle2-advertise', True):
420 caps = set(caps)
422 caps = set(caps)
421 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
423 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
422 caps.add('bundle2=' + urlreq.quote(capsblob))
424 caps.add('bundle2=' + urlreq.quote(capsblob))
423 return caps
425 return caps
424
426
425 def _applyopenerreqs(self):
427 def _applyopenerreqs(self):
426 self.svfs.options = dict((r, 1) for r in self.requirements
428 self.svfs.options = dict((r, 1) for r in self.requirements
427 if r in self.openerreqs)
429 if r in self.openerreqs)
428 # experimental config: format.chunkcachesize
430 # experimental config: format.chunkcachesize
429 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
431 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
430 if chunkcachesize is not None:
432 if chunkcachesize is not None:
431 self.svfs.options['chunkcachesize'] = chunkcachesize
433 self.svfs.options['chunkcachesize'] = chunkcachesize
432 # experimental config: format.maxchainlen
434 # experimental config: format.maxchainlen
433 maxchainlen = self.ui.configint('format', 'maxchainlen')
435 maxchainlen = self.ui.configint('format', 'maxchainlen')
434 if maxchainlen is not None:
436 if maxchainlen is not None:
435 self.svfs.options['maxchainlen'] = maxchainlen
437 self.svfs.options['maxchainlen'] = maxchainlen
436 # experimental config: format.manifestcachesize
438 # experimental config: format.manifestcachesize
437 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
439 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
438 if manifestcachesize is not None:
440 if manifestcachesize is not None:
439 self.svfs.options['manifestcachesize'] = manifestcachesize
441 self.svfs.options['manifestcachesize'] = manifestcachesize
440 # experimental config: format.aggressivemergedeltas
442 # experimental config: format.aggressivemergedeltas
441 aggressivemergedeltas = self.ui.configbool('format',
443 aggressivemergedeltas = self.ui.configbool('format',
442 'aggressivemergedeltas')
444 'aggressivemergedeltas')
443 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
445 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
444 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
446 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
445 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
447 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
446 if 0 <= chainspan:
448 if 0 <= chainspan:
447 self.svfs.options['maxdeltachainspan'] = chainspan
449 self.svfs.options['maxdeltachainspan'] = chainspan
448
450
449 for r in self.requirements:
451 for r in self.requirements:
450 if r.startswith('exp-compression-'):
452 if r.startswith('exp-compression-'):
451 self.svfs.options['compengine'] = r[len('exp-compression-'):]
453 self.svfs.options['compengine'] = r[len('exp-compression-'):]
452
454
453 # TODO move "revlogv2" to openerreqs once finalized.
455 # TODO move "revlogv2" to openerreqs once finalized.
454 if REVLOGV2_REQUIREMENT in self.requirements:
456 if REVLOGV2_REQUIREMENT in self.requirements:
455 self.svfs.options['revlogv2'] = True
457 self.svfs.options['revlogv2'] = True
456
458
457 def _writerequirements(self):
459 def _writerequirements(self):
458 scmutil.writerequires(self.vfs, self.requirements)
460 scmutil.writerequires(self.vfs, self.requirements)
459
461
460 def _checknested(self, path):
462 def _checknested(self, path):
461 """Determine if path is a legal nested repository."""
463 """Determine if path is a legal nested repository."""
462 if not path.startswith(self.root):
464 if not path.startswith(self.root):
463 return False
465 return False
464 subpath = path[len(self.root) + 1:]
466 subpath = path[len(self.root) + 1:]
465 normsubpath = util.pconvert(subpath)
467 normsubpath = util.pconvert(subpath)
466
468
467 # XXX: Checking against the current working copy is wrong in
469 # XXX: Checking against the current working copy is wrong in
468 # the sense that it can reject things like
470 # the sense that it can reject things like
469 #
471 #
470 # $ hg cat -r 10 sub/x.txt
472 # $ hg cat -r 10 sub/x.txt
471 #
473 #
472 # if sub/ is no longer a subrepository in the working copy
474 # if sub/ is no longer a subrepository in the working copy
473 # parent revision.
475 # parent revision.
474 #
476 #
475 # However, it can of course also allow things that would have
477 # However, it can of course also allow things that would have
476 # been rejected before, such as the above cat command if sub/
478 # been rejected before, such as the above cat command if sub/
477 # is a subrepository now, but was a normal directory before.
479 # is a subrepository now, but was a normal directory before.
478 # The old path auditor would have rejected by mistake since it
480 # The old path auditor would have rejected by mistake since it
479 # panics when it sees sub/.hg/.
481 # panics when it sees sub/.hg/.
480 #
482 #
481 # All in all, checking against the working copy seems sensible
483 # All in all, checking against the working copy seems sensible
482 # since we want to prevent access to nested repositories on
484 # since we want to prevent access to nested repositories on
483 # the filesystem *now*.
485 # the filesystem *now*.
484 ctx = self[None]
486 ctx = self[None]
485 parts = util.splitpath(subpath)
487 parts = util.splitpath(subpath)
486 while parts:
488 while parts:
487 prefix = '/'.join(parts)
489 prefix = '/'.join(parts)
488 if prefix in ctx.substate:
490 if prefix in ctx.substate:
489 if prefix == normsubpath:
491 if prefix == normsubpath:
490 return True
492 return True
491 else:
493 else:
492 sub = ctx.sub(prefix)
494 sub = ctx.sub(prefix)
493 return sub.checknested(subpath[len(prefix) + 1:])
495 return sub.checknested(subpath[len(prefix) + 1:])
494 else:
496 else:
495 parts.pop()
497 parts.pop()
496 return False
498 return False
497
499
498 def peer(self):
500 def peer(self):
499 return localpeer(self) # not cached to avoid reference cycle
501 return localpeer(self) # not cached to avoid reference cycle
500
502
501 def unfiltered(self):
503 def unfiltered(self):
502 """Return unfiltered version of the repository
504 """Return unfiltered version of the repository
503
505
504 Intended to be overwritten by filtered repo."""
506 Intended to be overwritten by filtered repo."""
505 return self
507 return self
506
508
507 def filtered(self, name):
509 def filtered(self, name):
508 """Return a filtered version of a repository"""
510 """Return a filtered version of a repository"""
509 # build a new class with the mixin and the current class
511 # build a new class with the mixin and the current class
510 # (possibly subclass of the repo)
512 # (possibly subclass of the repo)
511 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
513 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
512 pass
514 pass
513 return filteredrepo(self, name)
515 return filteredrepo(self, name)
514
516
515 @repofilecache('bookmarks', 'bookmarks.current')
517 @repofilecache('bookmarks', 'bookmarks.current')
516 def _bookmarks(self):
518 def _bookmarks(self):
517 return bookmarks.bmstore(self)
519 return bookmarks.bmstore(self)
518
520
519 @property
521 @property
520 def _activebookmark(self):
522 def _activebookmark(self):
521 return self._bookmarks.active
523 return self._bookmarks.active
522
524
523 # _phaserevs and _phasesets depend on changelog. what we need is to
525 # _phaserevs and _phasesets depend on changelog. what we need is to
524 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
526 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
525 # can't be easily expressed in filecache mechanism.
527 # can't be easily expressed in filecache mechanism.
526 @storecache('phaseroots', '00changelog.i')
528 @storecache('phaseroots', '00changelog.i')
527 def _phasecache(self):
529 def _phasecache(self):
528 return phases.phasecache(self, self._phasedefaults)
530 return phases.phasecache(self, self._phasedefaults)
529
531
530 @storecache('obsstore')
532 @storecache('obsstore')
531 def obsstore(self):
533 def obsstore(self):
532 return obsolete.makestore(self.ui, self)
534 return obsolete.makestore(self.ui, self)
533
535
534 @storecache('00changelog.i')
536 @storecache('00changelog.i')
535 def changelog(self):
537 def changelog(self):
536 return changelog.changelog(self.svfs,
538 return changelog.changelog(self.svfs,
537 trypending=txnutil.mayhavepending(self.root))
539 trypending=txnutil.mayhavepending(self.root))
538
540
539 def _constructmanifest(self):
541 def _constructmanifest(self):
540 # This is a temporary function while we migrate from manifest to
542 # This is a temporary function while we migrate from manifest to
541 # manifestlog. It allows bundlerepo and unionrepo to intercept the
543 # manifestlog. It allows bundlerepo and unionrepo to intercept the
542 # manifest creation.
544 # manifest creation.
543 return manifest.manifestrevlog(self.svfs)
545 return manifest.manifestrevlog(self.svfs)
544
546
545 @storecache('00manifest.i')
547 @storecache('00manifest.i')
546 def manifestlog(self):
548 def manifestlog(self):
547 return manifest.manifestlog(self.svfs, self)
549 return manifest.manifestlog(self.svfs, self)
548
550
549 @repofilecache('dirstate')
551 @repofilecache('dirstate')
550 def dirstate(self):
552 def dirstate(self):
551 return dirstate.dirstate(self.vfs, self.ui, self.root,
553 return dirstate.dirstate(self.vfs, self.ui, self.root,
552 self._dirstatevalidate)
554 self._dirstatevalidate)
553
555
554 def _dirstatevalidate(self, node):
556 def _dirstatevalidate(self, node):
555 try:
557 try:
556 self.changelog.rev(node)
558 self.changelog.rev(node)
557 return node
559 return node
558 except error.LookupError:
560 except error.LookupError:
559 if not self._dirstatevalidatewarned:
561 if not self._dirstatevalidatewarned:
560 self._dirstatevalidatewarned = True
562 self._dirstatevalidatewarned = True
561 self.ui.warn(_("warning: ignoring unknown"
563 self.ui.warn(_("warning: ignoring unknown"
562 " working parent %s!\n") % short(node))
564 " working parent %s!\n") % short(node))
563 return nullid
565 return nullid
564
566
565 def __getitem__(self, changeid):
567 def __getitem__(self, changeid):
566 if changeid is None:
568 if changeid is None:
567 return context.workingctx(self)
569 return context.workingctx(self)
568 if isinstance(changeid, slice):
570 if isinstance(changeid, slice):
569 # wdirrev isn't contiguous so the slice shouldn't include it
571 # wdirrev isn't contiguous so the slice shouldn't include it
570 return [context.changectx(self, i)
572 return [context.changectx(self, i)
571 for i in xrange(*changeid.indices(len(self)))
573 for i in xrange(*changeid.indices(len(self)))
572 if i not in self.changelog.filteredrevs]
574 if i not in self.changelog.filteredrevs]
573 try:
575 try:
574 return context.changectx(self, changeid)
576 return context.changectx(self, changeid)
575 except error.WdirUnsupported:
577 except error.WdirUnsupported:
576 return context.workingctx(self)
578 return context.workingctx(self)
577
579
578 def __contains__(self, changeid):
580 def __contains__(self, changeid):
579 """True if the given changeid exists
581 """True if the given changeid exists
580
582
581 error.LookupError is raised if an ambiguous node specified.
583 error.LookupError is raised if an ambiguous node specified.
582 """
584 """
583 try:
585 try:
584 self[changeid]
586 self[changeid]
585 return True
587 return True
586 except error.RepoLookupError:
588 except error.RepoLookupError:
587 return False
589 return False
588
590
589 def __nonzero__(self):
591 def __nonzero__(self):
590 return True
592 return True
591
593
592 __bool__ = __nonzero__
594 __bool__ = __nonzero__
593
595
594 def __len__(self):
596 def __len__(self):
595 return len(self.changelog)
597 return len(self.changelog)
596
598
597 def __iter__(self):
599 def __iter__(self):
598 return iter(self.changelog)
600 return iter(self.changelog)
599
601
600 def revs(self, expr, *args):
602 def revs(self, expr, *args):
601 '''Find revisions matching a revset.
603 '''Find revisions matching a revset.
602
604
603 The revset is specified as a string ``expr`` that may contain
605 The revset is specified as a string ``expr`` that may contain
604 %-formatting to escape certain types. See ``revsetlang.formatspec``.
606 %-formatting to escape certain types. See ``revsetlang.formatspec``.
605
607
606 Revset aliases from the configuration are not expanded. To expand
608 Revset aliases from the configuration are not expanded. To expand
607 user aliases, consider calling ``scmutil.revrange()`` or
609 user aliases, consider calling ``scmutil.revrange()`` or
608 ``repo.anyrevs([expr], user=True)``.
610 ``repo.anyrevs([expr], user=True)``.
609
611
610 Returns a revset.abstractsmartset, which is a list-like interface
612 Returns a revset.abstractsmartset, which is a list-like interface
611 that contains integer revisions.
613 that contains integer revisions.
612 '''
614 '''
613 expr = revsetlang.formatspec(expr, *args)
615 expr = revsetlang.formatspec(expr, *args)
614 m = revset.match(None, expr)
616 m = revset.match(None, expr)
615 return m(self)
617 return m(self)
616
618
617 def set(self, expr, *args):
619 def set(self, expr, *args):
618 '''Find revisions matching a revset and emit changectx instances.
620 '''Find revisions matching a revset and emit changectx instances.
619
621
620 This is a convenience wrapper around ``revs()`` that iterates the
622 This is a convenience wrapper around ``revs()`` that iterates the
621 result and is a generator of changectx instances.
623 result and is a generator of changectx instances.
622
624
623 Revset aliases from the configuration are not expanded. To expand
625 Revset aliases from the configuration are not expanded. To expand
624 user aliases, consider calling ``scmutil.revrange()``.
626 user aliases, consider calling ``scmutil.revrange()``.
625 '''
627 '''
626 for r in self.revs(expr, *args):
628 for r in self.revs(expr, *args):
627 yield self[r]
629 yield self[r]
628
630
629 def anyrevs(self, specs, user=False):
631 def anyrevs(self, specs, user=False):
630 '''Find revisions matching one of the given revsets.
632 '''Find revisions matching one of the given revsets.
631
633
632 Revset aliases from the configuration are not expanded by default. To
634 Revset aliases from the configuration are not expanded by default. To
633 expand user aliases, specify ``user=True``.
635 expand user aliases, specify ``user=True``.
634 '''
636 '''
635 if user:
637 if user:
636 m = revset.matchany(self.ui, specs, repo=self)
638 m = revset.matchany(self.ui, specs, repo=self)
637 else:
639 else:
638 m = revset.matchany(None, specs)
640 m = revset.matchany(None, specs)
639 return m(self)
641 return m(self)
640
642
641 def url(self):
643 def url(self):
642 return 'file:' + self.root
644 return 'file:' + self.root
643
645
644 def hook(self, name, throw=False, **args):
646 def hook(self, name, throw=False, **args):
645 """Call a hook, passing this repo instance.
647 """Call a hook, passing this repo instance.
646
648
647 This a convenience method to aid invoking hooks. Extensions likely
649 This a convenience method to aid invoking hooks. Extensions likely
648 won't call this unless they have registered a custom hook or are
650 won't call this unless they have registered a custom hook or are
649 replacing code that is expected to call a hook.
651 replacing code that is expected to call a hook.
650 """
652 """
651 return hook.hook(self.ui, self, name, throw, **args)
653 return hook.hook(self.ui, self, name, throw, **args)
652
654
653 @filteredpropertycache
655 @filteredpropertycache
654 def _tagscache(self):
656 def _tagscache(self):
655 '''Returns a tagscache object that contains various tags related
657 '''Returns a tagscache object that contains various tags related
656 caches.'''
658 caches.'''
657
659
658 # This simplifies its cache management by having one decorated
660 # This simplifies its cache management by having one decorated
659 # function (this one) and the rest simply fetch things from it.
661 # function (this one) and the rest simply fetch things from it.
660 class tagscache(object):
662 class tagscache(object):
661 def __init__(self):
663 def __init__(self):
662 # These two define the set of tags for this repository. tags
664 # These two define the set of tags for this repository. tags
663 # maps tag name to node; tagtypes maps tag name to 'global' or
665 # maps tag name to node; tagtypes maps tag name to 'global' or
664 # 'local'. (Global tags are defined by .hgtags across all
666 # 'local'. (Global tags are defined by .hgtags across all
665 # heads, and local tags are defined in .hg/localtags.)
667 # heads, and local tags are defined in .hg/localtags.)
666 # They constitute the in-memory cache of tags.
668 # They constitute the in-memory cache of tags.
667 self.tags = self.tagtypes = None
669 self.tags = self.tagtypes = None
668
670
669 self.nodetagscache = self.tagslist = None
671 self.nodetagscache = self.tagslist = None
670
672
671 cache = tagscache()
673 cache = tagscache()
672 cache.tags, cache.tagtypes = self._findtags()
674 cache.tags, cache.tagtypes = self._findtags()
673
675
674 return cache
676 return cache
675
677
676 def tags(self):
678 def tags(self):
677 '''return a mapping of tag to node'''
679 '''return a mapping of tag to node'''
678 t = {}
680 t = {}
679 if self.changelog.filteredrevs:
681 if self.changelog.filteredrevs:
680 tags, tt = self._findtags()
682 tags, tt = self._findtags()
681 else:
683 else:
682 tags = self._tagscache.tags
684 tags = self._tagscache.tags
683 for k, v in tags.iteritems():
685 for k, v in tags.iteritems():
684 try:
686 try:
685 # ignore tags to unknown nodes
687 # ignore tags to unknown nodes
686 self.changelog.rev(v)
688 self.changelog.rev(v)
687 t[k] = v
689 t[k] = v
688 except (error.LookupError, ValueError):
690 except (error.LookupError, ValueError):
689 pass
691 pass
690 return t
692 return t
691
693
692 def _findtags(self):
694 def _findtags(self):
693 '''Do the hard work of finding tags. Return a pair of dicts
695 '''Do the hard work of finding tags. Return a pair of dicts
694 (tags, tagtypes) where tags maps tag name to node, and tagtypes
696 (tags, tagtypes) where tags maps tag name to node, and tagtypes
695 maps tag name to a string like \'global\' or \'local\'.
697 maps tag name to a string like \'global\' or \'local\'.
696 Subclasses or extensions are free to add their own tags, but
698 Subclasses or extensions are free to add their own tags, but
697 should be aware that the returned dicts will be retained for the
699 should be aware that the returned dicts will be retained for the
698 duration of the localrepo object.'''
700 duration of the localrepo object.'''
699
701
700 # XXX what tagtype should subclasses/extensions use? Currently
702 # XXX what tagtype should subclasses/extensions use? Currently
701 # mq and bookmarks add tags, but do not set the tagtype at all.
703 # mq and bookmarks add tags, but do not set the tagtype at all.
702 # Should each extension invent its own tag type? Should there
704 # Should each extension invent its own tag type? Should there
703 # be one tagtype for all such "virtual" tags? Or is the status
705 # be one tagtype for all such "virtual" tags? Or is the status
704 # quo fine?
706 # quo fine?
705
707
706
708
707 # map tag name to (node, hist)
709 # map tag name to (node, hist)
708 alltags = tagsmod.findglobaltags(self.ui, self)
710 alltags = tagsmod.findglobaltags(self.ui, self)
709 # map tag name to tag type
711 # map tag name to tag type
710 tagtypes = dict((tag, 'global') for tag in alltags)
712 tagtypes = dict((tag, 'global') for tag in alltags)
711
713
712 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
714 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
713
715
714 # Build the return dicts. Have to re-encode tag names because
716 # Build the return dicts. Have to re-encode tag names because
715 # the tags module always uses UTF-8 (in order not to lose info
717 # the tags module always uses UTF-8 (in order not to lose info
716 # writing to the cache), but the rest of Mercurial wants them in
718 # writing to the cache), but the rest of Mercurial wants them in
717 # local encoding.
719 # local encoding.
718 tags = {}
720 tags = {}
719 for (name, (node, hist)) in alltags.iteritems():
721 for (name, (node, hist)) in alltags.iteritems():
720 if node != nullid:
722 if node != nullid:
721 tags[encoding.tolocal(name)] = node
723 tags[encoding.tolocal(name)] = node
722 tags['tip'] = self.changelog.tip()
724 tags['tip'] = self.changelog.tip()
723 tagtypes = dict([(encoding.tolocal(name), value)
725 tagtypes = dict([(encoding.tolocal(name), value)
724 for (name, value) in tagtypes.iteritems()])
726 for (name, value) in tagtypes.iteritems()])
725 return (tags, tagtypes)
727 return (tags, tagtypes)
726
728
727 def tagtype(self, tagname):
729 def tagtype(self, tagname):
728 '''
730 '''
729 return the type of the given tag. result can be:
731 return the type of the given tag. result can be:
730
732
731 'local' : a local tag
733 'local' : a local tag
732 'global' : a global tag
734 'global' : a global tag
733 None : tag does not exist
735 None : tag does not exist
734 '''
736 '''
735
737
736 return self._tagscache.tagtypes.get(tagname)
738 return self._tagscache.tagtypes.get(tagname)
737
739
738 def tagslist(self):
740 def tagslist(self):
739 '''return a list of tags ordered by revision'''
741 '''return a list of tags ordered by revision'''
740 if not self._tagscache.tagslist:
742 if not self._tagscache.tagslist:
741 l = []
743 l = []
742 for t, n in self.tags().iteritems():
744 for t, n in self.tags().iteritems():
743 l.append((self.changelog.rev(n), t, n))
745 l.append((self.changelog.rev(n), t, n))
744 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
746 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
745
747
746 return self._tagscache.tagslist
748 return self._tagscache.tagslist
747
749
748 def nodetags(self, node):
750 def nodetags(self, node):
749 '''return the tags associated with a node'''
751 '''return the tags associated with a node'''
750 if not self._tagscache.nodetagscache:
752 if not self._tagscache.nodetagscache:
751 nodetagscache = {}
753 nodetagscache = {}
752 for t, n in self._tagscache.tags.iteritems():
754 for t, n in self._tagscache.tags.iteritems():
753 nodetagscache.setdefault(n, []).append(t)
755 nodetagscache.setdefault(n, []).append(t)
754 for tags in nodetagscache.itervalues():
756 for tags in nodetagscache.itervalues():
755 tags.sort()
757 tags.sort()
756 self._tagscache.nodetagscache = nodetagscache
758 self._tagscache.nodetagscache = nodetagscache
757 return self._tagscache.nodetagscache.get(node, [])
759 return self._tagscache.nodetagscache.get(node, [])
758
760
759 def nodebookmarks(self, node):
761 def nodebookmarks(self, node):
760 """return the list of bookmarks pointing to the specified node"""
762 """return the list of bookmarks pointing to the specified node"""
761 marks = []
763 marks = []
762 for bookmark, n in self._bookmarks.iteritems():
764 for bookmark, n in self._bookmarks.iteritems():
763 if n == node:
765 if n == node:
764 marks.append(bookmark)
766 marks.append(bookmark)
765 return sorted(marks)
767 return sorted(marks)
766
768
767 def branchmap(self):
769 def branchmap(self):
768 '''returns a dictionary {branch: [branchheads]} with branchheads
770 '''returns a dictionary {branch: [branchheads]} with branchheads
769 ordered by increasing revision number'''
771 ordered by increasing revision number'''
770 branchmap.updatecache(self)
772 branchmap.updatecache(self)
771 return self._branchcaches[self.filtername]
773 return self._branchcaches[self.filtername]
772
774
773 @unfilteredmethod
775 @unfilteredmethod
774 def revbranchcache(self):
776 def revbranchcache(self):
775 if not self._revbranchcache:
777 if not self._revbranchcache:
776 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
778 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
777 return self._revbranchcache
779 return self._revbranchcache
778
780
779 def branchtip(self, branch, ignoremissing=False):
781 def branchtip(self, branch, ignoremissing=False):
780 '''return the tip node for a given branch
782 '''return the tip node for a given branch
781
783
782 If ignoremissing is True, then this method will not raise an error.
784 If ignoremissing is True, then this method will not raise an error.
783 This is helpful for callers that only expect None for a missing branch
785 This is helpful for callers that only expect None for a missing branch
784 (e.g. namespace).
786 (e.g. namespace).
785
787
786 '''
788 '''
787 try:
789 try:
788 return self.branchmap().branchtip(branch)
790 return self.branchmap().branchtip(branch)
789 except KeyError:
791 except KeyError:
790 if not ignoremissing:
792 if not ignoremissing:
791 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
793 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
792 else:
794 else:
793 pass
795 pass
794
796
795 def lookup(self, key):
797 def lookup(self, key):
796 return self[key].node()
798 return self[key].node()
797
799
798 def lookupbranch(self, key, remote=None):
800 def lookupbranch(self, key, remote=None):
799 repo = remote or self
801 repo = remote or self
800 if key in repo.branchmap():
802 if key in repo.branchmap():
801 return key
803 return key
802
804
803 repo = (remote and remote.local()) and remote or self
805 repo = (remote and remote.local()) and remote or self
804 return repo[key].branch()
806 return repo[key].branch()
805
807
806 def known(self, nodes):
808 def known(self, nodes):
807 cl = self.changelog
809 cl = self.changelog
808 nm = cl.nodemap
810 nm = cl.nodemap
809 filtered = cl.filteredrevs
811 filtered = cl.filteredrevs
810 result = []
812 result = []
811 for n in nodes:
813 for n in nodes:
812 r = nm.get(n)
814 r = nm.get(n)
813 resp = not (r is None or r in filtered)
815 resp = not (r is None or r in filtered)
814 result.append(resp)
816 result.append(resp)
815 return result
817 return result
816
818
817 def local(self):
819 def local(self):
818 return self
820 return self
819
821
820 def publishing(self):
822 def publishing(self):
821 # it's safe (and desirable) to trust the publish flag unconditionally
823 # it's safe (and desirable) to trust the publish flag unconditionally
822 # so that we don't finalize changes shared between users via ssh or nfs
824 # so that we don't finalize changes shared between users via ssh or nfs
823 return self.ui.configbool('phases', 'publish', True, untrusted=True)
825 return self.ui.configbool('phases', 'publish', True, untrusted=True)
824
826
825 def cancopy(self):
827 def cancopy(self):
826 # so statichttprepo's override of local() works
828 # so statichttprepo's override of local() works
827 if not self.local():
829 if not self.local():
828 return False
830 return False
829 if not self.publishing():
831 if not self.publishing():
830 return True
832 return True
831 # if publishing we can't copy if there is filtered content
833 # if publishing we can't copy if there is filtered content
832 return not self.filtered('visible').changelog.filteredrevs
834 return not self.filtered('visible').changelog.filteredrevs
833
835
834 def shared(self):
836 def shared(self):
835 '''the type of shared repository (None if not shared)'''
837 '''the type of shared repository (None if not shared)'''
836 if self.sharedpath != self.path:
838 if self.sharedpath != self.path:
837 return 'store'
839 return 'store'
838 return None
840 return None
839
841
840 def wjoin(self, f, *insidef):
842 def wjoin(self, f, *insidef):
841 return self.vfs.reljoin(self.root, f, *insidef)
843 return self.vfs.reljoin(self.root, f, *insidef)
842
844
843 def file(self, f):
845 def file(self, f):
844 if f[0] == '/':
846 if f[0] == '/':
845 f = f[1:]
847 f = f[1:]
846 return filelog.filelog(self.svfs, f)
848 return filelog.filelog(self.svfs, f)
847
849
848 def changectx(self, changeid):
850 def changectx(self, changeid):
849 return self[changeid]
851 return self[changeid]
850
852
851 def setparents(self, p1, p2=nullid):
853 def setparents(self, p1, p2=nullid):
852 with self.dirstate.parentchange():
854 with self.dirstate.parentchange():
853 copies = self.dirstate.setparents(p1, p2)
855 copies = self.dirstate.setparents(p1, p2)
854 pctx = self[p1]
856 pctx = self[p1]
855 if copies:
857 if copies:
856 # Adjust copy records, the dirstate cannot do it, it
858 # Adjust copy records, the dirstate cannot do it, it
857 # requires access to parents manifests. Preserve them
859 # requires access to parents manifests. Preserve them
858 # only for entries added to first parent.
860 # only for entries added to first parent.
859 for f in copies:
861 for f in copies:
860 if f not in pctx and copies[f] in pctx:
862 if f not in pctx and copies[f] in pctx:
861 self.dirstate.copy(copies[f], f)
863 self.dirstate.copy(copies[f], f)
862 if p2 == nullid:
864 if p2 == nullid:
863 for f, s in sorted(self.dirstate.copies().items()):
865 for f, s in sorted(self.dirstate.copies().items()):
864 if f not in pctx and s not in pctx:
866 if f not in pctx and s not in pctx:
865 self.dirstate.copy(None, f)
867 self.dirstate.copy(None, f)
866
868
867 def filectx(self, path, changeid=None, fileid=None):
869 def filectx(self, path, changeid=None, fileid=None):
868 """changeid can be a changeset revision, node, or tag.
870 """changeid can be a changeset revision, node, or tag.
869 fileid can be a file revision or node."""
871 fileid can be a file revision or node."""
870 return context.filectx(self, path, changeid, fileid)
872 return context.filectx(self, path, changeid, fileid)
871
873
872 def getcwd(self):
874 def getcwd(self):
873 return self.dirstate.getcwd()
875 return self.dirstate.getcwd()
874
876
875 def pathto(self, f, cwd=None):
877 def pathto(self, f, cwd=None):
876 return self.dirstate.pathto(f, cwd)
878 return self.dirstate.pathto(f, cwd)
877
879
878 def _loadfilter(self, filter):
880 def _loadfilter(self, filter):
879 if filter not in self.filterpats:
881 if filter not in self.filterpats:
880 l = []
882 l = []
881 for pat, cmd in self.ui.configitems(filter):
883 for pat, cmd in self.ui.configitems(filter):
882 if cmd == '!':
884 if cmd == '!':
883 continue
885 continue
884 mf = matchmod.match(self.root, '', [pat])
886 mf = matchmod.match(self.root, '', [pat])
885 fn = None
887 fn = None
886 params = cmd
888 params = cmd
887 for name, filterfn in self._datafilters.iteritems():
889 for name, filterfn in self._datafilters.iteritems():
888 if cmd.startswith(name):
890 if cmd.startswith(name):
889 fn = filterfn
891 fn = filterfn
890 params = cmd[len(name):].lstrip()
892 params = cmd[len(name):].lstrip()
891 break
893 break
892 if not fn:
894 if not fn:
893 fn = lambda s, c, **kwargs: util.filter(s, c)
895 fn = lambda s, c, **kwargs: util.filter(s, c)
894 # Wrap old filters not supporting keyword arguments
896 # Wrap old filters not supporting keyword arguments
895 if not inspect.getargspec(fn)[2]:
897 if not inspect.getargspec(fn)[2]:
896 oldfn = fn
898 oldfn = fn
897 fn = lambda s, c, **kwargs: oldfn(s, c)
899 fn = lambda s, c, **kwargs: oldfn(s, c)
898 l.append((mf, fn, params))
900 l.append((mf, fn, params))
899 self.filterpats[filter] = l
901 self.filterpats[filter] = l
900 return self.filterpats[filter]
902 return self.filterpats[filter]
901
903
902 def _filter(self, filterpats, filename, data):
904 def _filter(self, filterpats, filename, data):
903 for mf, fn, cmd in filterpats:
905 for mf, fn, cmd in filterpats:
904 if mf(filename):
906 if mf(filename):
905 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
907 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
906 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
908 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
907 break
909 break
908
910
909 return data
911 return data
910
912
911 @unfilteredpropertycache
913 @unfilteredpropertycache
912 def _encodefilterpats(self):
914 def _encodefilterpats(self):
913 return self._loadfilter('encode')
915 return self._loadfilter('encode')
914
916
915 @unfilteredpropertycache
917 @unfilteredpropertycache
916 def _decodefilterpats(self):
918 def _decodefilterpats(self):
917 return self._loadfilter('decode')
919 return self._loadfilter('decode')
918
920
919 def adddatafilter(self, name, filter):
921 def adddatafilter(self, name, filter):
920 self._datafilters[name] = filter
922 self._datafilters[name] = filter
921
923
922 def wread(self, filename):
924 def wread(self, filename):
923 if self.wvfs.islink(filename):
925 if self.wvfs.islink(filename):
924 data = self.wvfs.readlink(filename)
926 data = self.wvfs.readlink(filename)
925 else:
927 else:
926 data = self.wvfs.read(filename)
928 data = self.wvfs.read(filename)
927 return self._filter(self._encodefilterpats, filename, data)
929 return self._filter(self._encodefilterpats, filename, data)
928
930
929 def wwrite(self, filename, data, flags, backgroundclose=False):
931 def wwrite(self, filename, data, flags, backgroundclose=False):
930 """write ``data`` into ``filename`` in the working directory
932 """write ``data`` into ``filename`` in the working directory
931
933
932 This returns length of written (maybe decoded) data.
934 This returns length of written (maybe decoded) data.
933 """
935 """
934 data = self._filter(self._decodefilterpats, filename, data)
936 data = self._filter(self._decodefilterpats, filename, data)
935 if 'l' in flags:
937 if 'l' in flags:
936 self.wvfs.symlink(data, filename)
938 self.wvfs.symlink(data, filename)
937 else:
939 else:
938 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
940 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
939 if 'x' in flags:
941 if 'x' in flags:
940 self.wvfs.setflags(filename, False, True)
942 self.wvfs.setflags(filename, False, True)
941 return len(data)
943 return len(data)
942
944
943 def wwritedata(self, filename, data):
945 def wwritedata(self, filename, data):
944 return self._filter(self._decodefilterpats, filename, data)
946 return self._filter(self._decodefilterpats, filename, data)
945
947
946 def currenttransaction(self):
948 def currenttransaction(self):
947 """return the current transaction or None if non exists"""
949 """return the current transaction or None if non exists"""
948 if self._transref:
950 if self._transref:
949 tr = self._transref()
951 tr = self._transref()
950 else:
952 else:
951 tr = None
953 tr = None
952
954
953 if tr and tr.running():
955 if tr and tr.running():
954 return tr
956 return tr
955 return None
957 return None
956
958
957 def transaction(self, desc, report=None):
959 def transaction(self, desc, report=None):
958 if (self.ui.configbool('devel', 'all-warnings')
960 if (self.ui.configbool('devel', 'all-warnings')
959 or self.ui.configbool('devel', 'check-locks')):
961 or self.ui.configbool('devel', 'check-locks')):
960 if self._currentlock(self._lockref) is None:
962 if self._currentlock(self._lockref) is None:
961 raise error.ProgrammingError('transaction requires locking')
963 raise error.ProgrammingError('transaction requires locking')
962 tr = self.currenttransaction()
964 tr = self.currenttransaction()
963 if tr is not None:
965 if tr is not None:
964 return tr.nest()
966 return tr.nest()
965
967
966 # abort here if the journal already exists
968 # abort here if the journal already exists
967 if self.svfs.exists("journal"):
969 if self.svfs.exists("journal"):
968 raise error.RepoError(
970 raise error.RepoError(
969 _("abandoned transaction found"),
971 _("abandoned transaction found"),
970 hint=_("run 'hg recover' to clean up transaction"))
972 hint=_("run 'hg recover' to clean up transaction"))
971
973
972 idbase = "%.40f#%f" % (random.random(), time.time())
974 idbase = "%.40f#%f" % (random.random(), time.time())
973 ha = hex(hashlib.sha1(idbase).digest())
975 ha = hex(hashlib.sha1(idbase).digest())
974 txnid = 'TXN:' + ha
976 txnid = 'TXN:' + ha
975 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
977 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
976
978
977 self._writejournal(desc)
979 self._writejournal(desc)
978 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
980 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
979 if report:
981 if report:
980 rp = report
982 rp = report
981 else:
983 else:
982 rp = self.ui.warn
984 rp = self.ui.warn
983 vfsmap = {'plain': self.vfs} # root of .hg/
985 vfsmap = {'plain': self.vfs} # root of .hg/
984 # we must avoid cyclic reference between repo and transaction.
986 # we must avoid cyclic reference between repo and transaction.
985 reporef = weakref.ref(self)
987 reporef = weakref.ref(self)
986 # Code to track tag movement
988 # Code to track tag movement
987 #
989 #
988 # Since tags are all handled as file content, it is actually quite hard
990 # Since tags are all handled as file content, it is actually quite hard
989 # to track these movement from a code perspective. So we fallback to a
991 # to track these movement from a code perspective. So we fallback to a
990 # tracking at the repository level. One could envision to track changes
992 # tracking at the repository level. One could envision to track changes
991 # to the '.hgtags' file through changegroup apply but that fails to
993 # to the '.hgtags' file through changegroup apply but that fails to
992 # cope with case where transaction expose new heads without changegroup
994 # cope with case where transaction expose new heads without changegroup
993 # being involved (eg: phase movement).
995 # being involved (eg: phase movement).
994 #
996 #
995 # For now, We gate the feature behind a flag since this likely comes
997 # For now, We gate the feature behind a flag since this likely comes
996 # with performance impacts. The current code run more often than needed
998 # with performance impacts. The current code run more often than needed
997 # and do not use caches as much as it could. The current focus is on
999 # and do not use caches as much as it could. The current focus is on
998 # the behavior of the feature so we disable it by default. The flag
1000 # the behavior of the feature so we disable it by default. The flag
999 # will be removed when we are happy with the performance impact.
1001 # will be removed when we are happy with the performance impact.
1000 #
1002 #
1001 # Once this feature is no longer experimental move the following
1003 # Once this feature is no longer experimental move the following
1002 # documentation to the appropriate help section:
1004 # documentation to the appropriate help section:
1003 #
1005 #
1004 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1006 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1005 # tags (new or changed or deleted tags). In addition the details of
1007 # tags (new or changed or deleted tags). In addition the details of
1006 # these changes are made available in a file at:
1008 # these changes are made available in a file at:
1007 # ``REPOROOT/.hg/changes/tags.changes``.
1009 # ``REPOROOT/.hg/changes/tags.changes``.
1008 # Make sure you check for HG_TAG_MOVED before reading that file as it
1010 # Make sure you check for HG_TAG_MOVED before reading that file as it
1009 # might exist from a previous transaction even if no tag were touched
1011 # might exist from a previous transaction even if no tag were touched
1010 # in this one. Changes are recorded in a line base format::
1012 # in this one. Changes are recorded in a line base format::
1011 #
1013 #
1012 # <action> <hex-node> <tag-name>\n
1014 # <action> <hex-node> <tag-name>\n
1013 #
1015 #
1014 # Actions are defined as follow:
1016 # Actions are defined as follow:
1015 # "-R": tag is removed,
1017 # "-R": tag is removed,
1016 # "+A": tag is added,
1018 # "+A": tag is added,
1017 # "-M": tag is moved (old value),
1019 # "-M": tag is moved (old value),
1018 # "+M": tag is moved (new value),
1020 # "+M": tag is moved (new value),
1019 tracktags = lambda x: None
1021 tracktags = lambda x: None
1020 # experimental config: experimental.hook-track-tags
1022 # experimental config: experimental.hook-track-tags
1021 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1023 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1022 False)
1024 False)
1023 if desc != 'strip' and shouldtracktags:
1025 if desc != 'strip' and shouldtracktags:
1024 oldheads = self.changelog.headrevs()
1026 oldheads = self.changelog.headrevs()
1025 def tracktags(tr2):
1027 def tracktags(tr2):
1026 repo = reporef()
1028 repo = reporef()
1027 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1029 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1028 newheads = repo.changelog.headrevs()
1030 newheads = repo.changelog.headrevs()
1029 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1031 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1030 # notes: we compare lists here.
1032 # notes: we compare lists here.
1031 # As we do it only once buiding set would not be cheaper
1033 # As we do it only once buiding set would not be cheaper
1032 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1034 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1033 if changes:
1035 if changes:
1034 tr2.hookargs['tag_moved'] = '1'
1036 tr2.hookargs['tag_moved'] = '1'
1035 with repo.vfs('changes/tags.changes', 'w',
1037 with repo.vfs('changes/tags.changes', 'w',
1036 atomictemp=True) as changesfile:
1038 atomictemp=True) as changesfile:
1037 # note: we do not register the file to the transaction
1039 # note: we do not register the file to the transaction
1038 # because we needs it to still exist on the transaction
1040 # because we needs it to still exist on the transaction
1039 # is close (for txnclose hooks)
1041 # is close (for txnclose hooks)
1040 tagsmod.writediff(changesfile, changes)
1042 tagsmod.writediff(changesfile, changes)
1041 def validate(tr2):
1043 def validate(tr2):
1042 """will run pre-closing hooks"""
1044 """will run pre-closing hooks"""
1043 # XXX the transaction API is a bit lacking here so we take a hacky
1045 # XXX the transaction API is a bit lacking here so we take a hacky
1044 # path for now
1046 # path for now
1045 #
1047 #
1046 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1048 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1047 # dict is copied before these run. In addition we needs the data
1049 # dict is copied before these run. In addition we needs the data
1048 # available to in memory hooks too.
1050 # available to in memory hooks too.
1049 #
1051 #
1050 # Moreover, we also need to make sure this runs before txnclose
1052 # Moreover, we also need to make sure this runs before txnclose
1051 # hooks and there is no "pending" mechanism that would execute
1053 # hooks and there is no "pending" mechanism that would execute
1052 # logic only if hooks are about to run.
1054 # logic only if hooks are about to run.
1053 #
1055 #
1054 # Fixing this limitation of the transaction is also needed to track
1056 # Fixing this limitation of the transaction is also needed to track
1055 # other families of changes (bookmarks, phases, obsolescence).
1057 # other families of changes (bookmarks, phases, obsolescence).
1056 #
1058 #
1057 # This will have to be fixed before we remove the experimental
1059 # This will have to be fixed before we remove the experimental
1058 # gating.
1060 # gating.
1059 tracktags(tr2)
1061 tracktags(tr2)
1060 reporef().hook('pretxnclose', throw=True,
1062 reporef().hook('pretxnclose', throw=True,
1061 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1063 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1062 def releasefn(tr, success):
1064 def releasefn(tr, success):
1063 repo = reporef()
1065 repo = reporef()
1064 if success:
1066 if success:
1065 # this should be explicitly invoked here, because
1067 # this should be explicitly invoked here, because
1066 # in-memory changes aren't written out at closing
1068 # in-memory changes aren't written out at closing
1067 # transaction, if tr.addfilegenerator (via
1069 # transaction, if tr.addfilegenerator (via
1068 # dirstate.write or so) isn't invoked while
1070 # dirstate.write or so) isn't invoked while
1069 # transaction running
1071 # transaction running
1070 repo.dirstate.write(None)
1072 repo.dirstate.write(None)
1071 else:
1073 else:
1072 # discard all changes (including ones already written
1074 # discard all changes (including ones already written
1073 # out) in this transaction
1075 # out) in this transaction
1074 repo.dirstate.restorebackup(None, prefix='journal.')
1076 repo.dirstate.restorebackup(None, prefix='journal.')
1075
1077
1076 repo.invalidate(clearfilecache=True)
1078 repo.invalidate(clearfilecache=True)
1077
1079
1078 tr = transaction.transaction(rp, self.svfs, vfsmap,
1080 tr = transaction.transaction(rp, self.svfs, vfsmap,
1079 "journal",
1081 "journal",
1080 "undo",
1082 "undo",
1081 aftertrans(renames),
1083 aftertrans(renames),
1082 self.store.createmode,
1084 self.store.createmode,
1083 validator=validate,
1085 validator=validate,
1084 releasefn=releasefn)
1086 releasefn=releasefn)
1085 tr.changes['revs'] = set()
1087 tr.changes['revs'] = set()
1086 tr.changes['obsmarkers'] = set()
1088 tr.changes['obsmarkers'] = set()
1087
1089
1088 tr.hookargs['txnid'] = txnid
1090 tr.hookargs['txnid'] = txnid
1089 # note: writing the fncache only during finalize mean that the file is
1091 # note: writing the fncache only during finalize mean that the file is
1090 # outdated when running hooks. As fncache is used for streaming clone,
1092 # outdated when running hooks. As fncache is used for streaming clone,
1091 # this is not expected to break anything that happen during the hooks.
1093 # this is not expected to break anything that happen during the hooks.
1092 tr.addfinalize('flush-fncache', self.store.write)
1094 tr.addfinalize('flush-fncache', self.store.write)
1093 def txnclosehook(tr2):
1095 def txnclosehook(tr2):
1094 """To be run if transaction is successful, will schedule a hook run
1096 """To be run if transaction is successful, will schedule a hook run
1095 """
1097 """
1096 # Don't reference tr2 in hook() so we don't hold a reference.
1098 # Don't reference tr2 in hook() so we don't hold a reference.
1097 # This reduces memory consumption when there are multiple
1099 # This reduces memory consumption when there are multiple
1098 # transactions per lock. This can likely go away if issue5045
1100 # transactions per lock. This can likely go away if issue5045
1099 # fixes the function accumulation.
1101 # fixes the function accumulation.
1100 hookargs = tr2.hookargs
1102 hookargs = tr2.hookargs
1101
1103
1102 def hook():
1104 def hook():
1103 reporef().hook('txnclose', throw=False, txnname=desc,
1105 reporef().hook('txnclose', throw=False, txnname=desc,
1104 **pycompat.strkwargs(hookargs))
1106 **pycompat.strkwargs(hookargs))
1105 reporef()._afterlock(hook)
1107 reporef()._afterlock(hook)
1106 tr.addfinalize('txnclose-hook', txnclosehook)
1108 tr.addfinalize('txnclose-hook', txnclosehook)
1107 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1109 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1108 def txnaborthook(tr2):
1110 def txnaborthook(tr2):
1109 """To be run if transaction is aborted
1111 """To be run if transaction is aborted
1110 """
1112 """
1111 reporef().hook('txnabort', throw=False, txnname=desc,
1113 reporef().hook('txnabort', throw=False, txnname=desc,
1112 **tr2.hookargs)
1114 **tr2.hookargs)
1113 tr.addabort('txnabort-hook', txnaborthook)
1115 tr.addabort('txnabort-hook', txnaborthook)
1114 # avoid eager cache invalidation. in-memory data should be identical
1116 # avoid eager cache invalidation. in-memory data should be identical
1115 # to stored data if transaction has no error.
1117 # to stored data if transaction has no error.
1116 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1118 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1117 self._transref = weakref.ref(tr)
1119 self._transref = weakref.ref(tr)
1118 return tr
1120 return tr
1119
1121
1120 def _journalfiles(self):
1122 def _journalfiles(self):
1121 return ((self.svfs, 'journal'),
1123 return ((self.svfs, 'journal'),
1122 (self.vfs, 'journal.dirstate'),
1124 (self.vfs, 'journal.dirstate'),
1123 (self.vfs, 'journal.branch'),
1125 (self.vfs, 'journal.branch'),
1124 (self.vfs, 'journal.desc'),
1126 (self.vfs, 'journal.desc'),
1125 (self.vfs, 'journal.bookmarks'),
1127 (self.vfs, 'journal.bookmarks'),
1126 (self.svfs, 'journal.phaseroots'))
1128 (self.svfs, 'journal.phaseroots'))
1127
1129
1128 def undofiles(self):
1130 def undofiles(self):
1129 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1131 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1130
1132
1131 @unfilteredmethod
1133 @unfilteredmethod
1132 def _writejournal(self, desc):
1134 def _writejournal(self, desc):
1133 self.dirstate.savebackup(None, prefix='journal.')
1135 self.dirstate.savebackup(None, prefix='journal.')
1134 self.vfs.write("journal.branch",
1136 self.vfs.write("journal.branch",
1135 encoding.fromlocal(self.dirstate.branch()))
1137 encoding.fromlocal(self.dirstate.branch()))
1136 self.vfs.write("journal.desc",
1138 self.vfs.write("journal.desc",
1137 "%d\n%s\n" % (len(self), desc))
1139 "%d\n%s\n" % (len(self), desc))
1138 self.vfs.write("journal.bookmarks",
1140 self.vfs.write("journal.bookmarks",
1139 self.vfs.tryread("bookmarks"))
1141 self.vfs.tryread("bookmarks"))
1140 self.svfs.write("journal.phaseroots",
1142 self.svfs.write("journal.phaseroots",
1141 self.svfs.tryread("phaseroots"))
1143 self.svfs.tryread("phaseroots"))
1142
1144
1143 def recover(self):
1145 def recover(self):
1144 with self.lock():
1146 with self.lock():
1145 if self.svfs.exists("journal"):
1147 if self.svfs.exists("journal"):
1146 self.ui.status(_("rolling back interrupted transaction\n"))
1148 self.ui.status(_("rolling back interrupted transaction\n"))
1147 vfsmap = {'': self.svfs,
1149 vfsmap = {'': self.svfs,
1148 'plain': self.vfs,}
1150 'plain': self.vfs,}
1149 transaction.rollback(self.svfs, vfsmap, "journal",
1151 transaction.rollback(self.svfs, vfsmap, "journal",
1150 self.ui.warn)
1152 self.ui.warn)
1151 self.invalidate()
1153 self.invalidate()
1152 return True
1154 return True
1153 else:
1155 else:
1154 self.ui.warn(_("no interrupted transaction available\n"))
1156 self.ui.warn(_("no interrupted transaction available\n"))
1155 return False
1157 return False
1156
1158
1157 def rollback(self, dryrun=False, force=False):
1159 def rollback(self, dryrun=False, force=False):
1158 wlock = lock = dsguard = None
1160 wlock = lock = dsguard = None
1159 try:
1161 try:
1160 wlock = self.wlock()
1162 wlock = self.wlock()
1161 lock = self.lock()
1163 lock = self.lock()
1162 if self.svfs.exists("undo"):
1164 if self.svfs.exists("undo"):
1163 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1165 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1164
1166
1165 return self._rollback(dryrun, force, dsguard)
1167 return self._rollback(dryrun, force, dsguard)
1166 else:
1168 else:
1167 self.ui.warn(_("no rollback information available\n"))
1169 self.ui.warn(_("no rollback information available\n"))
1168 return 1
1170 return 1
1169 finally:
1171 finally:
1170 release(dsguard, lock, wlock)
1172 release(dsguard, lock, wlock)
1171
1173
1172 @unfilteredmethod # Until we get smarter cache management
1174 @unfilteredmethod # Until we get smarter cache management
1173 def _rollback(self, dryrun, force, dsguard):
1175 def _rollback(self, dryrun, force, dsguard):
1174 ui = self.ui
1176 ui = self.ui
1175 try:
1177 try:
1176 args = self.vfs.read('undo.desc').splitlines()
1178 args = self.vfs.read('undo.desc').splitlines()
1177 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1179 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1178 if len(args) >= 3:
1180 if len(args) >= 3:
1179 detail = args[2]
1181 detail = args[2]
1180 oldtip = oldlen - 1
1182 oldtip = oldlen - 1
1181
1183
1182 if detail and ui.verbose:
1184 if detail and ui.verbose:
1183 msg = (_('repository tip rolled back to revision %d'
1185 msg = (_('repository tip rolled back to revision %d'
1184 ' (undo %s: %s)\n')
1186 ' (undo %s: %s)\n')
1185 % (oldtip, desc, detail))
1187 % (oldtip, desc, detail))
1186 else:
1188 else:
1187 msg = (_('repository tip rolled back to revision %d'
1189 msg = (_('repository tip rolled back to revision %d'
1188 ' (undo %s)\n')
1190 ' (undo %s)\n')
1189 % (oldtip, desc))
1191 % (oldtip, desc))
1190 except IOError:
1192 except IOError:
1191 msg = _('rolling back unknown transaction\n')
1193 msg = _('rolling back unknown transaction\n')
1192 desc = None
1194 desc = None
1193
1195
1194 if not force and self['.'] != self['tip'] and desc == 'commit':
1196 if not force and self['.'] != self['tip'] and desc == 'commit':
1195 raise error.Abort(
1197 raise error.Abort(
1196 _('rollback of last commit while not checked out '
1198 _('rollback of last commit while not checked out '
1197 'may lose data'), hint=_('use -f to force'))
1199 'may lose data'), hint=_('use -f to force'))
1198
1200
1199 ui.status(msg)
1201 ui.status(msg)
1200 if dryrun:
1202 if dryrun:
1201 return 0
1203 return 0
1202
1204
1203 parents = self.dirstate.parents()
1205 parents = self.dirstate.parents()
1204 self.destroying()
1206 self.destroying()
1205 vfsmap = {'plain': self.vfs, '': self.svfs}
1207 vfsmap = {'plain': self.vfs, '': self.svfs}
1206 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1208 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1207 if self.vfs.exists('undo.bookmarks'):
1209 if self.vfs.exists('undo.bookmarks'):
1208 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1210 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1209 if self.svfs.exists('undo.phaseroots'):
1211 if self.svfs.exists('undo.phaseroots'):
1210 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1212 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1211 self.invalidate()
1213 self.invalidate()
1212
1214
1213 parentgone = (parents[0] not in self.changelog.nodemap or
1215 parentgone = (parents[0] not in self.changelog.nodemap or
1214 parents[1] not in self.changelog.nodemap)
1216 parents[1] not in self.changelog.nodemap)
1215 if parentgone:
1217 if parentgone:
1216 # prevent dirstateguard from overwriting already restored one
1218 # prevent dirstateguard from overwriting already restored one
1217 dsguard.close()
1219 dsguard.close()
1218
1220
1219 self.dirstate.restorebackup(None, prefix='undo.')
1221 self.dirstate.restorebackup(None, prefix='undo.')
1220 try:
1222 try:
1221 branch = self.vfs.read('undo.branch')
1223 branch = self.vfs.read('undo.branch')
1222 self.dirstate.setbranch(encoding.tolocal(branch))
1224 self.dirstate.setbranch(encoding.tolocal(branch))
1223 except IOError:
1225 except IOError:
1224 ui.warn(_('named branch could not be reset: '
1226 ui.warn(_('named branch could not be reset: '
1225 'current branch is still \'%s\'\n')
1227 'current branch is still \'%s\'\n')
1226 % self.dirstate.branch())
1228 % self.dirstate.branch())
1227
1229
1228 parents = tuple([p.rev() for p in self[None].parents()])
1230 parents = tuple([p.rev() for p in self[None].parents()])
1229 if len(parents) > 1:
1231 if len(parents) > 1:
1230 ui.status(_('working directory now based on '
1232 ui.status(_('working directory now based on '
1231 'revisions %d and %d\n') % parents)
1233 'revisions %d and %d\n') % parents)
1232 else:
1234 else:
1233 ui.status(_('working directory now based on '
1235 ui.status(_('working directory now based on '
1234 'revision %d\n') % parents)
1236 'revision %d\n') % parents)
1235 mergemod.mergestate.clean(self, self['.'].node())
1237 mergemod.mergestate.clean(self, self['.'].node())
1236
1238
1237 # TODO: if we know which new heads may result from this rollback, pass
1239 # TODO: if we know which new heads may result from this rollback, pass
1238 # them to destroy(), which will prevent the branchhead cache from being
1240 # them to destroy(), which will prevent the branchhead cache from being
1239 # invalidated.
1241 # invalidated.
1240 self.destroyed()
1242 self.destroyed()
1241 return 0
1243 return 0
1242
1244
1243 def _buildcacheupdater(self, newtransaction):
1245 def _buildcacheupdater(self, newtransaction):
1244 """called during transaction to build the callback updating cache
1246 """called during transaction to build the callback updating cache
1245
1247
1246 Lives on the repository to help extension who might want to augment
1248 Lives on the repository to help extension who might want to augment
1247 this logic. For this purpose, the created transaction is passed to the
1249 this logic. For this purpose, the created transaction is passed to the
1248 method.
1250 method.
1249 """
1251 """
1250 # we must avoid cyclic reference between repo and transaction.
1252 # we must avoid cyclic reference between repo and transaction.
1251 reporef = weakref.ref(self)
1253 reporef = weakref.ref(self)
1252 def updater(tr):
1254 def updater(tr):
1253 repo = reporef()
1255 repo = reporef()
1254 repo.updatecaches(tr)
1256 repo.updatecaches(tr)
1255 return updater
1257 return updater
1256
1258
1257 @unfilteredmethod
1259 @unfilteredmethod
1258 def updatecaches(self, tr=None):
1260 def updatecaches(self, tr=None):
1259 """warm appropriate caches
1261 """warm appropriate caches
1260
1262
1261 If this function is called after a transaction closed. The transaction
1263 If this function is called after a transaction closed. The transaction
1262 will be available in the 'tr' argument. This can be used to selectively
1264 will be available in the 'tr' argument. This can be used to selectively
1263 update caches relevant to the changes in that transaction.
1265 update caches relevant to the changes in that transaction.
1264 """
1266 """
1265 if tr is not None and tr.hookargs.get('source') == 'strip':
1267 if tr is not None and tr.hookargs.get('source') == 'strip':
1266 # During strip, many caches are invalid but
1268 # During strip, many caches are invalid but
1267 # later call to `destroyed` will refresh them.
1269 # later call to `destroyed` will refresh them.
1268 return
1270 return
1269
1271
1270 if tr is None or tr.changes['revs']:
1272 if tr is None or tr.changes['revs']:
1271 # updating the unfiltered branchmap should refresh all the others,
1273 # updating the unfiltered branchmap should refresh all the others,
1272 self.ui.debug('updating the branch cache\n')
1274 self.ui.debug('updating the branch cache\n')
1273 branchmap.updatecache(self.filtered('served'))
1275 branchmap.updatecache(self.filtered('served'))
1274
1276
1275 def invalidatecaches(self):
1277 def invalidatecaches(self):
1276
1278
1277 if '_tagscache' in vars(self):
1279 if '_tagscache' in vars(self):
1278 # can't use delattr on proxy
1280 # can't use delattr on proxy
1279 del self.__dict__['_tagscache']
1281 del self.__dict__['_tagscache']
1280
1282
1281 self.unfiltered()._branchcaches.clear()
1283 self.unfiltered()._branchcaches.clear()
1282 self.invalidatevolatilesets()
1284 self.invalidatevolatilesets()
1283
1285
1284 def invalidatevolatilesets(self):
1286 def invalidatevolatilesets(self):
1285 self.filteredrevcache.clear()
1287 self.filteredrevcache.clear()
1286 obsolete.clearobscaches(self)
1288 obsolete.clearobscaches(self)
1287
1289
1288 def invalidatedirstate(self):
1290 def invalidatedirstate(self):
1289 '''Invalidates the dirstate, causing the next call to dirstate
1291 '''Invalidates the dirstate, causing the next call to dirstate
1290 to check if it was modified since the last time it was read,
1292 to check if it was modified since the last time it was read,
1291 rereading it if it has.
1293 rereading it if it has.
1292
1294
1293 This is different to dirstate.invalidate() that it doesn't always
1295 This is different to dirstate.invalidate() that it doesn't always
1294 rereads the dirstate. Use dirstate.invalidate() if you want to
1296 rereads the dirstate. Use dirstate.invalidate() if you want to
1295 explicitly read the dirstate again (i.e. restoring it to a previous
1297 explicitly read the dirstate again (i.e. restoring it to a previous
1296 known good state).'''
1298 known good state).'''
1297 if hasunfilteredcache(self, 'dirstate'):
1299 if hasunfilteredcache(self, 'dirstate'):
1298 for k in self.dirstate._filecache:
1300 for k in self.dirstate._filecache:
1299 try:
1301 try:
1300 delattr(self.dirstate, k)
1302 delattr(self.dirstate, k)
1301 except AttributeError:
1303 except AttributeError:
1302 pass
1304 pass
1303 delattr(self.unfiltered(), 'dirstate')
1305 delattr(self.unfiltered(), 'dirstate')
1304
1306
1305 def invalidate(self, clearfilecache=False):
1307 def invalidate(self, clearfilecache=False):
1306 '''Invalidates both store and non-store parts other than dirstate
1308 '''Invalidates both store and non-store parts other than dirstate
1307
1309
1308 If a transaction is running, invalidation of store is omitted,
1310 If a transaction is running, invalidation of store is omitted,
1309 because discarding in-memory changes might cause inconsistency
1311 because discarding in-memory changes might cause inconsistency
1310 (e.g. incomplete fncache causes unintentional failure, but
1312 (e.g. incomplete fncache causes unintentional failure, but
1311 redundant one doesn't).
1313 redundant one doesn't).
1312 '''
1314 '''
1313 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1315 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1314 for k in list(self._filecache.keys()):
1316 for k in list(self._filecache.keys()):
1315 # dirstate is invalidated separately in invalidatedirstate()
1317 # dirstate is invalidated separately in invalidatedirstate()
1316 if k == 'dirstate':
1318 if k == 'dirstate':
1317 continue
1319 continue
1318
1320
1319 if clearfilecache:
1321 if clearfilecache:
1320 del self._filecache[k]
1322 del self._filecache[k]
1321 try:
1323 try:
1322 delattr(unfiltered, k)
1324 delattr(unfiltered, k)
1323 except AttributeError:
1325 except AttributeError:
1324 pass
1326 pass
1325 self.invalidatecaches()
1327 self.invalidatecaches()
1326 if not self.currenttransaction():
1328 if not self.currenttransaction():
1327 # TODO: Changing contents of store outside transaction
1329 # TODO: Changing contents of store outside transaction
1328 # causes inconsistency. We should make in-memory store
1330 # causes inconsistency. We should make in-memory store
1329 # changes detectable, and abort if changed.
1331 # changes detectable, and abort if changed.
1330 self.store.invalidatecaches()
1332 self.store.invalidatecaches()
1331
1333
1332 def invalidateall(self):
1334 def invalidateall(self):
1333 '''Fully invalidates both store and non-store parts, causing the
1335 '''Fully invalidates both store and non-store parts, causing the
1334 subsequent operation to reread any outside changes.'''
1336 subsequent operation to reread any outside changes.'''
1335 # extension should hook this to invalidate its caches
1337 # extension should hook this to invalidate its caches
1336 self.invalidate()
1338 self.invalidate()
1337 self.invalidatedirstate()
1339 self.invalidatedirstate()
1338
1340
1339 @unfilteredmethod
1341 @unfilteredmethod
1340 def _refreshfilecachestats(self, tr):
1342 def _refreshfilecachestats(self, tr):
1341 """Reload stats of cached files so that they are flagged as valid"""
1343 """Reload stats of cached files so that they are flagged as valid"""
1342 for k, ce in self._filecache.items():
1344 for k, ce in self._filecache.items():
1343 if k == 'dirstate' or k not in self.__dict__:
1345 if k == 'dirstate' or k not in self.__dict__:
1344 continue
1346 continue
1345 ce.refresh()
1347 ce.refresh()
1346
1348
1347 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1349 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1348 inheritchecker=None, parentenvvar=None):
1350 inheritchecker=None, parentenvvar=None):
1349 parentlock = None
1351 parentlock = None
1350 # the contents of parentenvvar are used by the underlying lock to
1352 # the contents of parentenvvar are used by the underlying lock to
1351 # determine whether it can be inherited
1353 # determine whether it can be inherited
1352 if parentenvvar is not None:
1354 if parentenvvar is not None:
1353 parentlock = encoding.environ.get(parentenvvar)
1355 parentlock = encoding.environ.get(parentenvvar)
1354 try:
1356 try:
1355 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1357 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1356 acquirefn=acquirefn, desc=desc,
1358 acquirefn=acquirefn, desc=desc,
1357 inheritchecker=inheritchecker,
1359 inheritchecker=inheritchecker,
1358 parentlock=parentlock)
1360 parentlock=parentlock)
1359 except error.LockHeld as inst:
1361 except error.LockHeld as inst:
1360 if not wait:
1362 if not wait:
1361 raise
1363 raise
1362 # show more details for new-style locks
1364 # show more details for new-style locks
1363 if ':' in inst.locker:
1365 if ':' in inst.locker:
1364 host, pid = inst.locker.split(":", 1)
1366 host, pid = inst.locker.split(":", 1)
1365 self.ui.warn(
1367 self.ui.warn(
1366 _("waiting for lock on %s held by process %r "
1368 _("waiting for lock on %s held by process %r "
1367 "on host %r\n") % (desc, pid, host))
1369 "on host %r\n") % (desc, pid, host))
1368 else:
1370 else:
1369 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1371 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1370 (desc, inst.locker))
1372 (desc, inst.locker))
1371 # default to 600 seconds timeout
1373 # default to 600 seconds timeout
1372 l = lockmod.lock(vfs, lockname,
1374 l = lockmod.lock(vfs, lockname,
1373 int(self.ui.config("ui", "timeout", "600")),
1375 int(self.ui.config("ui", "timeout", "600")),
1374 releasefn=releasefn, acquirefn=acquirefn,
1376 releasefn=releasefn, acquirefn=acquirefn,
1375 desc=desc)
1377 desc=desc)
1376 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1378 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1377 return l
1379 return l
1378
1380
1379 def _afterlock(self, callback):
1381 def _afterlock(self, callback):
1380 """add a callback to be run when the repository is fully unlocked
1382 """add a callback to be run when the repository is fully unlocked
1381
1383
1382 The callback will be executed when the outermost lock is released
1384 The callback will be executed when the outermost lock is released
1383 (with wlock being higher level than 'lock')."""
1385 (with wlock being higher level than 'lock')."""
1384 for ref in (self._wlockref, self._lockref):
1386 for ref in (self._wlockref, self._lockref):
1385 l = ref and ref()
1387 l = ref and ref()
1386 if l and l.held:
1388 if l and l.held:
1387 l.postrelease.append(callback)
1389 l.postrelease.append(callback)
1388 break
1390 break
1389 else: # no lock have been found.
1391 else: # no lock have been found.
1390 callback()
1392 callback()
1391
1393
1392 def lock(self, wait=True):
1394 def lock(self, wait=True):
1393 '''Lock the repository store (.hg/store) and return a weak reference
1395 '''Lock the repository store (.hg/store) and return a weak reference
1394 to the lock. Use this before modifying the store (e.g. committing or
1396 to the lock. Use this before modifying the store (e.g. committing or
1395 stripping). If you are opening a transaction, get a lock as well.)
1397 stripping). If you are opening a transaction, get a lock as well.)
1396
1398
1397 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1399 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1398 'wlock' first to avoid a dead-lock hazard.'''
1400 'wlock' first to avoid a dead-lock hazard.'''
1399 l = self._currentlock(self._lockref)
1401 l = self._currentlock(self._lockref)
1400 if l is not None:
1402 if l is not None:
1401 l.lock()
1403 l.lock()
1402 return l
1404 return l
1403
1405
1404 l = self._lock(self.svfs, "lock", wait, None,
1406 l = self._lock(self.svfs, "lock", wait, None,
1405 self.invalidate, _('repository %s') % self.origroot)
1407 self.invalidate, _('repository %s') % self.origroot)
1406 self._lockref = weakref.ref(l)
1408 self._lockref = weakref.ref(l)
1407 return l
1409 return l
1408
1410
1409 def _wlockchecktransaction(self):
1411 def _wlockchecktransaction(self):
1410 if self.currenttransaction() is not None:
1412 if self.currenttransaction() is not None:
1411 raise error.LockInheritanceContractViolation(
1413 raise error.LockInheritanceContractViolation(
1412 'wlock cannot be inherited in the middle of a transaction')
1414 'wlock cannot be inherited in the middle of a transaction')
1413
1415
1414 def wlock(self, wait=True):
1416 def wlock(self, wait=True):
1415 '''Lock the non-store parts of the repository (everything under
1417 '''Lock the non-store parts of the repository (everything under
1416 .hg except .hg/store) and return a weak reference to the lock.
1418 .hg except .hg/store) and return a weak reference to the lock.
1417
1419
1418 Use this before modifying files in .hg.
1420 Use this before modifying files in .hg.
1419
1421
1420 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1422 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1421 'wlock' first to avoid a dead-lock hazard.'''
1423 'wlock' first to avoid a dead-lock hazard.'''
1422 l = self._wlockref and self._wlockref()
1424 l = self._wlockref and self._wlockref()
1423 if l is not None and l.held:
1425 if l is not None and l.held:
1424 l.lock()
1426 l.lock()
1425 return l
1427 return l
1426
1428
1427 # We do not need to check for non-waiting lock acquisition. Such
1429 # We do not need to check for non-waiting lock acquisition. Such
1428 # acquisition would not cause dead-lock as they would just fail.
1430 # acquisition would not cause dead-lock as they would just fail.
1429 if wait and (self.ui.configbool('devel', 'all-warnings')
1431 if wait and (self.ui.configbool('devel', 'all-warnings')
1430 or self.ui.configbool('devel', 'check-locks')):
1432 or self.ui.configbool('devel', 'check-locks')):
1431 if self._currentlock(self._lockref) is not None:
1433 if self._currentlock(self._lockref) is not None:
1432 self.ui.develwarn('"wlock" acquired after "lock"')
1434 self.ui.develwarn('"wlock" acquired after "lock"')
1433
1435
1434 def unlock():
1436 def unlock():
1435 if self.dirstate.pendingparentchange():
1437 if self.dirstate.pendingparentchange():
1436 self.dirstate.invalidate()
1438 self.dirstate.invalidate()
1437 else:
1439 else:
1438 self.dirstate.write(None)
1440 self.dirstate.write(None)
1439
1441
1440 self._filecache['dirstate'].refresh()
1442 self._filecache['dirstate'].refresh()
1441
1443
1442 l = self._lock(self.vfs, "wlock", wait, unlock,
1444 l = self._lock(self.vfs, "wlock", wait, unlock,
1443 self.invalidatedirstate, _('working directory of %s') %
1445 self.invalidatedirstate, _('working directory of %s') %
1444 self.origroot,
1446 self.origroot,
1445 inheritchecker=self._wlockchecktransaction,
1447 inheritchecker=self._wlockchecktransaction,
1446 parentenvvar='HG_WLOCK_LOCKER')
1448 parentenvvar='HG_WLOCK_LOCKER')
1447 self._wlockref = weakref.ref(l)
1449 self._wlockref = weakref.ref(l)
1448 return l
1450 return l
1449
1451
1450 def _currentlock(self, lockref):
1452 def _currentlock(self, lockref):
1451 """Returns the lock if it's held, or None if it's not."""
1453 """Returns the lock if it's held, or None if it's not."""
1452 if lockref is None:
1454 if lockref is None:
1453 return None
1455 return None
1454 l = lockref()
1456 l = lockref()
1455 if l is None or not l.held:
1457 if l is None or not l.held:
1456 return None
1458 return None
1457 return l
1459 return l
1458
1460
1459 def currentwlock(self):
1461 def currentwlock(self):
1460 """Returns the wlock if it's held, or None if it's not."""
1462 """Returns the wlock if it's held, or None if it's not."""
1461 return self._currentlock(self._wlockref)
1463 return self._currentlock(self._wlockref)
1462
1464
1463 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1465 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1464 """
1466 """
1465 commit an individual file as part of a larger transaction
1467 commit an individual file as part of a larger transaction
1466 """
1468 """
1467
1469
1468 fname = fctx.path()
1470 fname = fctx.path()
1469 fparent1 = manifest1.get(fname, nullid)
1471 fparent1 = manifest1.get(fname, nullid)
1470 fparent2 = manifest2.get(fname, nullid)
1472 fparent2 = manifest2.get(fname, nullid)
1471 if isinstance(fctx, context.filectx):
1473 if isinstance(fctx, context.filectx):
1472 node = fctx.filenode()
1474 node = fctx.filenode()
1473 if node in [fparent1, fparent2]:
1475 if node in [fparent1, fparent2]:
1474 self.ui.debug('reusing %s filelog entry\n' % fname)
1476 self.ui.debug('reusing %s filelog entry\n' % fname)
1475 if manifest1.flags(fname) != fctx.flags():
1477 if manifest1.flags(fname) != fctx.flags():
1476 changelist.append(fname)
1478 changelist.append(fname)
1477 return node
1479 return node
1478
1480
1479 flog = self.file(fname)
1481 flog = self.file(fname)
1480 meta = {}
1482 meta = {}
1481 copy = fctx.renamed()
1483 copy = fctx.renamed()
1482 if copy and copy[0] != fname:
1484 if copy and copy[0] != fname:
1483 # Mark the new revision of this file as a copy of another
1485 # Mark the new revision of this file as a copy of another
1484 # file. This copy data will effectively act as a parent
1486 # file. This copy data will effectively act as a parent
1485 # of this new revision. If this is a merge, the first
1487 # of this new revision. If this is a merge, the first
1486 # parent will be the nullid (meaning "look up the copy data")
1488 # parent will be the nullid (meaning "look up the copy data")
1487 # and the second one will be the other parent. For example:
1489 # and the second one will be the other parent. For example:
1488 #
1490 #
1489 # 0 --- 1 --- 3 rev1 changes file foo
1491 # 0 --- 1 --- 3 rev1 changes file foo
1490 # \ / rev2 renames foo to bar and changes it
1492 # \ / rev2 renames foo to bar and changes it
1491 # \- 2 -/ rev3 should have bar with all changes and
1493 # \- 2 -/ rev3 should have bar with all changes and
1492 # should record that bar descends from
1494 # should record that bar descends from
1493 # bar in rev2 and foo in rev1
1495 # bar in rev2 and foo in rev1
1494 #
1496 #
1495 # this allows this merge to succeed:
1497 # this allows this merge to succeed:
1496 #
1498 #
1497 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1499 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1498 # \ / merging rev3 and rev4 should use bar@rev2
1500 # \ / merging rev3 and rev4 should use bar@rev2
1499 # \- 2 --- 4 as the merge base
1501 # \- 2 --- 4 as the merge base
1500 #
1502 #
1501
1503
1502 cfname = copy[0]
1504 cfname = copy[0]
1503 crev = manifest1.get(cfname)
1505 crev = manifest1.get(cfname)
1504 newfparent = fparent2
1506 newfparent = fparent2
1505
1507
1506 if manifest2: # branch merge
1508 if manifest2: # branch merge
1507 if fparent2 == nullid or crev is None: # copied on remote side
1509 if fparent2 == nullid or crev is None: # copied on remote side
1508 if cfname in manifest2:
1510 if cfname in manifest2:
1509 crev = manifest2[cfname]
1511 crev = manifest2[cfname]
1510 newfparent = fparent1
1512 newfparent = fparent1
1511
1513
1512 # Here, we used to search backwards through history to try to find
1514 # Here, we used to search backwards through history to try to find
1513 # where the file copy came from if the source of a copy was not in
1515 # where the file copy came from if the source of a copy was not in
1514 # the parent directory. However, this doesn't actually make sense to
1516 # the parent directory. However, this doesn't actually make sense to
1515 # do (what does a copy from something not in your working copy even
1517 # do (what does a copy from something not in your working copy even
1516 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1518 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1517 # the user that copy information was dropped, so if they didn't
1519 # the user that copy information was dropped, so if they didn't
1518 # expect this outcome it can be fixed, but this is the correct
1520 # expect this outcome it can be fixed, but this is the correct
1519 # behavior in this circumstance.
1521 # behavior in this circumstance.
1520
1522
1521 if crev:
1523 if crev:
1522 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1524 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1523 meta["copy"] = cfname
1525 meta["copy"] = cfname
1524 meta["copyrev"] = hex(crev)
1526 meta["copyrev"] = hex(crev)
1525 fparent1, fparent2 = nullid, newfparent
1527 fparent1, fparent2 = nullid, newfparent
1526 else:
1528 else:
1527 self.ui.warn(_("warning: can't find ancestor for '%s' "
1529 self.ui.warn(_("warning: can't find ancestor for '%s' "
1528 "copied from '%s'!\n") % (fname, cfname))
1530 "copied from '%s'!\n") % (fname, cfname))
1529
1531
1530 elif fparent1 == nullid:
1532 elif fparent1 == nullid:
1531 fparent1, fparent2 = fparent2, nullid
1533 fparent1, fparent2 = fparent2, nullid
1532 elif fparent2 != nullid:
1534 elif fparent2 != nullid:
1533 # is one parent an ancestor of the other?
1535 # is one parent an ancestor of the other?
1534 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1536 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1535 if fparent1 in fparentancestors:
1537 if fparent1 in fparentancestors:
1536 fparent1, fparent2 = fparent2, nullid
1538 fparent1, fparent2 = fparent2, nullid
1537 elif fparent2 in fparentancestors:
1539 elif fparent2 in fparentancestors:
1538 fparent2 = nullid
1540 fparent2 = nullid
1539
1541
1540 # is the file changed?
1542 # is the file changed?
1541 text = fctx.data()
1543 text = fctx.data()
1542 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1544 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1543 changelist.append(fname)
1545 changelist.append(fname)
1544 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1546 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1545 # are just the flags changed during merge?
1547 # are just the flags changed during merge?
1546 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1548 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1547 changelist.append(fname)
1549 changelist.append(fname)
1548
1550
1549 return fparent1
1551 return fparent1
1550
1552
1551 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1553 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1552 """check for commit arguments that aren't committable"""
1554 """check for commit arguments that aren't committable"""
1553 if match.isexact() or match.prefix():
1555 if match.isexact() or match.prefix():
1554 matched = set(status.modified + status.added + status.removed)
1556 matched = set(status.modified + status.added + status.removed)
1555
1557
1556 for f in match.files():
1558 for f in match.files():
1557 f = self.dirstate.normalize(f)
1559 f = self.dirstate.normalize(f)
1558 if f == '.' or f in matched or f in wctx.substate:
1560 if f == '.' or f in matched or f in wctx.substate:
1559 continue
1561 continue
1560 if f in status.deleted:
1562 if f in status.deleted:
1561 fail(f, _('file not found!'))
1563 fail(f, _('file not found!'))
1562 if f in vdirs: # visited directory
1564 if f in vdirs: # visited directory
1563 d = f + '/'
1565 d = f + '/'
1564 for mf in matched:
1566 for mf in matched:
1565 if mf.startswith(d):
1567 if mf.startswith(d):
1566 break
1568 break
1567 else:
1569 else:
1568 fail(f, _("no match under directory!"))
1570 fail(f, _("no match under directory!"))
1569 elif f not in self.dirstate:
1571 elif f not in self.dirstate:
1570 fail(f, _("file not tracked!"))
1572 fail(f, _("file not tracked!"))
1571
1573
1572 @unfilteredmethod
1574 @unfilteredmethod
1573 def commit(self, text="", user=None, date=None, match=None, force=False,
1575 def commit(self, text="", user=None, date=None, match=None, force=False,
1574 editor=False, extra=None):
1576 editor=False, extra=None):
1575 """Add a new revision to current repository.
1577 """Add a new revision to current repository.
1576
1578
1577 Revision information is gathered from the working directory,
1579 Revision information is gathered from the working directory,
1578 match can be used to filter the committed files. If editor is
1580 match can be used to filter the committed files. If editor is
1579 supplied, it is called to get a commit message.
1581 supplied, it is called to get a commit message.
1580 """
1582 """
1581 if extra is None:
1583 if extra is None:
1582 extra = {}
1584 extra = {}
1583
1585
1584 def fail(f, msg):
1586 def fail(f, msg):
1585 raise error.Abort('%s: %s' % (f, msg))
1587 raise error.Abort('%s: %s' % (f, msg))
1586
1588
1587 if not match:
1589 if not match:
1588 match = matchmod.always(self.root, '')
1590 match = matchmod.always(self.root, '')
1589
1591
1590 if not force:
1592 if not force:
1591 vdirs = []
1593 vdirs = []
1592 match.explicitdir = vdirs.append
1594 match.explicitdir = vdirs.append
1593 match.bad = fail
1595 match.bad = fail
1594
1596
1595 wlock = lock = tr = None
1597 wlock = lock = tr = None
1596 try:
1598 try:
1597 wlock = self.wlock()
1599 wlock = self.wlock()
1598 lock = self.lock() # for recent changelog (see issue4368)
1600 lock = self.lock() # for recent changelog (see issue4368)
1599
1601
1600 wctx = self[None]
1602 wctx = self[None]
1601 merge = len(wctx.parents()) > 1
1603 merge = len(wctx.parents()) > 1
1602
1604
1603 if not force and merge and not match.always():
1605 if not force and merge and not match.always():
1604 raise error.Abort(_('cannot partially commit a merge '
1606 raise error.Abort(_('cannot partially commit a merge '
1605 '(do not specify files or patterns)'))
1607 '(do not specify files or patterns)'))
1606
1608
1607 status = self.status(match=match, clean=force)
1609 status = self.status(match=match, clean=force)
1608 if force:
1610 if force:
1609 status.modified.extend(status.clean) # mq may commit clean files
1611 status.modified.extend(status.clean) # mq may commit clean files
1610
1612
1611 # check subrepos
1613 # check subrepos
1612 subs = []
1614 subs = []
1613 commitsubs = set()
1615 commitsubs = set()
1614 newstate = wctx.substate.copy()
1616 newstate = wctx.substate.copy()
1615 # only manage subrepos and .hgsubstate if .hgsub is present
1617 # only manage subrepos and .hgsubstate if .hgsub is present
1616 if '.hgsub' in wctx:
1618 if '.hgsub' in wctx:
1617 # we'll decide whether to track this ourselves, thanks
1619 # we'll decide whether to track this ourselves, thanks
1618 for c in status.modified, status.added, status.removed:
1620 for c in status.modified, status.added, status.removed:
1619 if '.hgsubstate' in c:
1621 if '.hgsubstate' in c:
1620 c.remove('.hgsubstate')
1622 c.remove('.hgsubstate')
1621
1623
1622 # compare current state to last committed state
1624 # compare current state to last committed state
1623 # build new substate based on last committed state
1625 # build new substate based on last committed state
1624 oldstate = wctx.p1().substate
1626 oldstate = wctx.p1().substate
1625 for s in sorted(newstate.keys()):
1627 for s in sorted(newstate.keys()):
1626 if not match(s):
1628 if not match(s):
1627 # ignore working copy, use old state if present
1629 # ignore working copy, use old state if present
1628 if s in oldstate:
1630 if s in oldstate:
1629 newstate[s] = oldstate[s]
1631 newstate[s] = oldstate[s]
1630 continue
1632 continue
1631 if not force:
1633 if not force:
1632 raise error.Abort(
1634 raise error.Abort(
1633 _("commit with new subrepo %s excluded") % s)
1635 _("commit with new subrepo %s excluded") % s)
1634 dirtyreason = wctx.sub(s).dirtyreason(True)
1636 dirtyreason = wctx.sub(s).dirtyreason(True)
1635 if dirtyreason:
1637 if dirtyreason:
1636 if not self.ui.configbool('ui', 'commitsubrepos'):
1638 if not self.ui.configbool('ui', 'commitsubrepos'):
1637 raise error.Abort(dirtyreason,
1639 raise error.Abort(dirtyreason,
1638 hint=_("use --subrepos for recursive commit"))
1640 hint=_("use --subrepos for recursive commit"))
1639 subs.append(s)
1641 subs.append(s)
1640 commitsubs.add(s)
1642 commitsubs.add(s)
1641 else:
1643 else:
1642 bs = wctx.sub(s).basestate()
1644 bs = wctx.sub(s).basestate()
1643 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1645 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1644 if oldstate.get(s, (None, None, None))[1] != bs:
1646 if oldstate.get(s, (None, None, None))[1] != bs:
1645 subs.append(s)
1647 subs.append(s)
1646
1648
1647 # check for removed subrepos
1649 # check for removed subrepos
1648 for p in wctx.parents():
1650 for p in wctx.parents():
1649 r = [s for s in p.substate if s not in newstate]
1651 r = [s for s in p.substate if s not in newstate]
1650 subs += [s for s in r if match(s)]
1652 subs += [s for s in r if match(s)]
1651 if subs:
1653 if subs:
1652 if (not match('.hgsub') and
1654 if (not match('.hgsub') and
1653 '.hgsub' in (wctx.modified() + wctx.added())):
1655 '.hgsub' in (wctx.modified() + wctx.added())):
1654 raise error.Abort(
1656 raise error.Abort(
1655 _("can't commit subrepos without .hgsub"))
1657 _("can't commit subrepos without .hgsub"))
1656 status.modified.insert(0, '.hgsubstate')
1658 status.modified.insert(0, '.hgsubstate')
1657
1659
1658 elif '.hgsub' in status.removed:
1660 elif '.hgsub' in status.removed:
1659 # clean up .hgsubstate when .hgsub is removed
1661 # clean up .hgsubstate when .hgsub is removed
1660 if ('.hgsubstate' in wctx and
1662 if ('.hgsubstate' in wctx and
1661 '.hgsubstate' not in (status.modified + status.added +
1663 '.hgsubstate' not in (status.modified + status.added +
1662 status.removed)):
1664 status.removed)):
1663 status.removed.insert(0, '.hgsubstate')
1665 status.removed.insert(0, '.hgsubstate')
1664
1666
1665 # make sure all explicit patterns are matched
1667 # make sure all explicit patterns are matched
1666 if not force:
1668 if not force:
1667 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1669 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1668
1670
1669 cctx = context.workingcommitctx(self, status,
1671 cctx = context.workingcommitctx(self, status,
1670 text, user, date, extra)
1672 text, user, date, extra)
1671
1673
1672 # internal config: ui.allowemptycommit
1674 # internal config: ui.allowemptycommit
1673 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1675 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1674 or extra.get('close') or merge or cctx.files()
1676 or extra.get('close') or merge or cctx.files()
1675 or self.ui.configbool('ui', 'allowemptycommit'))
1677 or self.ui.configbool('ui', 'allowemptycommit'))
1676 if not allowemptycommit:
1678 if not allowemptycommit:
1677 return None
1679 return None
1678
1680
1679 if merge and cctx.deleted():
1681 if merge and cctx.deleted():
1680 raise error.Abort(_("cannot commit merge with missing files"))
1682 raise error.Abort(_("cannot commit merge with missing files"))
1681
1683
1682 ms = mergemod.mergestate.read(self)
1684 ms = mergemod.mergestate.read(self)
1683 mergeutil.checkunresolved(ms)
1685 mergeutil.checkunresolved(ms)
1684
1686
1685 if editor:
1687 if editor:
1686 cctx._text = editor(self, cctx, subs)
1688 cctx._text = editor(self, cctx, subs)
1687 edited = (text != cctx._text)
1689 edited = (text != cctx._text)
1688
1690
1689 # Save commit message in case this transaction gets rolled back
1691 # Save commit message in case this transaction gets rolled back
1690 # (e.g. by a pretxncommit hook). Leave the content alone on
1692 # (e.g. by a pretxncommit hook). Leave the content alone on
1691 # the assumption that the user will use the same editor again.
1693 # the assumption that the user will use the same editor again.
1692 msgfn = self.savecommitmessage(cctx._text)
1694 msgfn = self.savecommitmessage(cctx._text)
1693
1695
1694 # commit subs and write new state
1696 # commit subs and write new state
1695 if subs:
1697 if subs:
1696 for s in sorted(commitsubs):
1698 for s in sorted(commitsubs):
1697 sub = wctx.sub(s)
1699 sub = wctx.sub(s)
1698 self.ui.status(_('committing subrepository %s\n') %
1700 self.ui.status(_('committing subrepository %s\n') %
1699 subrepo.subrelpath(sub))
1701 subrepo.subrelpath(sub))
1700 sr = sub.commit(cctx._text, user, date)
1702 sr = sub.commit(cctx._text, user, date)
1701 newstate[s] = (newstate[s][0], sr)
1703 newstate[s] = (newstate[s][0], sr)
1702 subrepo.writestate(self, newstate)
1704 subrepo.writestate(self, newstate)
1703
1705
1704 p1, p2 = self.dirstate.parents()
1706 p1, p2 = self.dirstate.parents()
1705 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1707 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1706 try:
1708 try:
1707 self.hook("precommit", throw=True, parent1=hookp1,
1709 self.hook("precommit", throw=True, parent1=hookp1,
1708 parent2=hookp2)
1710 parent2=hookp2)
1709 tr = self.transaction('commit')
1711 tr = self.transaction('commit')
1710 ret = self.commitctx(cctx, True)
1712 ret = self.commitctx(cctx, True)
1711 except: # re-raises
1713 except: # re-raises
1712 if edited:
1714 if edited:
1713 self.ui.write(
1715 self.ui.write(
1714 _('note: commit message saved in %s\n') % msgfn)
1716 _('note: commit message saved in %s\n') % msgfn)
1715 raise
1717 raise
1716 # update bookmarks, dirstate and mergestate
1718 # update bookmarks, dirstate and mergestate
1717 bookmarks.update(self, [p1, p2], ret)
1719 bookmarks.update(self, [p1, p2], ret)
1718 cctx.markcommitted(ret)
1720 cctx.markcommitted(ret)
1719 ms.reset()
1721 ms.reset()
1720 tr.close()
1722 tr.close()
1721
1723
1722 finally:
1724 finally:
1723 lockmod.release(tr, lock, wlock)
1725 lockmod.release(tr, lock, wlock)
1724
1726
1725 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1727 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1726 # hack for command that use a temporary commit (eg: histedit)
1728 # hack for command that use a temporary commit (eg: histedit)
1727 # temporary commit got stripped before hook release
1729 # temporary commit got stripped before hook release
1728 if self.changelog.hasnode(ret):
1730 if self.changelog.hasnode(ret):
1729 self.hook("commit", node=node, parent1=parent1,
1731 self.hook("commit", node=node, parent1=parent1,
1730 parent2=parent2)
1732 parent2=parent2)
1731 self._afterlock(commithook)
1733 self._afterlock(commithook)
1732 return ret
1734 return ret
1733
1735
1734 @unfilteredmethod
1736 @unfilteredmethod
1735 def commitctx(self, ctx, error=False):
1737 def commitctx(self, ctx, error=False):
1736 """Add a new revision to current repository.
1738 """Add a new revision to current repository.
1737 Revision information is passed via the context argument.
1739 Revision information is passed via the context argument.
1738 """
1740 """
1739
1741
1740 tr = None
1742 tr = None
1741 p1, p2 = ctx.p1(), ctx.p2()
1743 p1, p2 = ctx.p1(), ctx.p2()
1742 user = ctx.user()
1744 user = ctx.user()
1743
1745
1744 lock = self.lock()
1746 lock = self.lock()
1745 try:
1747 try:
1746 tr = self.transaction("commit")
1748 tr = self.transaction("commit")
1747 trp = weakref.proxy(tr)
1749 trp = weakref.proxy(tr)
1748
1750
1749 if ctx.manifestnode():
1751 if ctx.manifestnode():
1750 # reuse an existing manifest revision
1752 # reuse an existing manifest revision
1751 mn = ctx.manifestnode()
1753 mn = ctx.manifestnode()
1752 files = ctx.files()
1754 files = ctx.files()
1753 elif ctx.files():
1755 elif ctx.files():
1754 m1ctx = p1.manifestctx()
1756 m1ctx = p1.manifestctx()
1755 m2ctx = p2.manifestctx()
1757 m2ctx = p2.manifestctx()
1756 mctx = m1ctx.copy()
1758 mctx = m1ctx.copy()
1757
1759
1758 m = mctx.read()
1760 m = mctx.read()
1759 m1 = m1ctx.read()
1761 m1 = m1ctx.read()
1760 m2 = m2ctx.read()
1762 m2 = m2ctx.read()
1761
1763
1762 # check in files
1764 # check in files
1763 added = []
1765 added = []
1764 changed = []
1766 changed = []
1765 removed = list(ctx.removed())
1767 removed = list(ctx.removed())
1766 linkrev = len(self)
1768 linkrev = len(self)
1767 self.ui.note(_("committing files:\n"))
1769 self.ui.note(_("committing files:\n"))
1768 for f in sorted(ctx.modified() + ctx.added()):
1770 for f in sorted(ctx.modified() + ctx.added()):
1769 self.ui.note(f + "\n")
1771 self.ui.note(f + "\n")
1770 try:
1772 try:
1771 fctx = ctx[f]
1773 fctx = ctx[f]
1772 if fctx is None:
1774 if fctx is None:
1773 removed.append(f)
1775 removed.append(f)
1774 else:
1776 else:
1775 added.append(f)
1777 added.append(f)
1776 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1778 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1777 trp, changed)
1779 trp, changed)
1778 m.setflag(f, fctx.flags())
1780 m.setflag(f, fctx.flags())
1779 except OSError as inst:
1781 except OSError as inst:
1780 self.ui.warn(_("trouble committing %s!\n") % f)
1782 self.ui.warn(_("trouble committing %s!\n") % f)
1781 raise
1783 raise
1782 except IOError as inst:
1784 except IOError as inst:
1783 errcode = getattr(inst, 'errno', errno.ENOENT)
1785 errcode = getattr(inst, 'errno', errno.ENOENT)
1784 if error or errcode and errcode != errno.ENOENT:
1786 if error or errcode and errcode != errno.ENOENT:
1785 self.ui.warn(_("trouble committing %s!\n") % f)
1787 self.ui.warn(_("trouble committing %s!\n") % f)
1786 raise
1788 raise
1787
1789
1788 # update manifest
1790 # update manifest
1789 self.ui.note(_("committing manifest\n"))
1791 self.ui.note(_("committing manifest\n"))
1790 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1792 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1791 drop = [f for f in removed if f in m]
1793 drop = [f for f in removed if f in m]
1792 for f in drop:
1794 for f in drop:
1793 del m[f]
1795 del m[f]
1794 mn = mctx.write(trp, linkrev,
1796 mn = mctx.write(trp, linkrev,
1795 p1.manifestnode(), p2.manifestnode(),
1797 p1.manifestnode(), p2.manifestnode(),
1796 added, drop)
1798 added, drop)
1797 files = changed + removed
1799 files = changed + removed
1798 else:
1800 else:
1799 mn = p1.manifestnode()
1801 mn = p1.manifestnode()
1800 files = []
1802 files = []
1801
1803
1802 # update changelog
1804 # update changelog
1803 self.ui.note(_("committing changelog\n"))
1805 self.ui.note(_("committing changelog\n"))
1804 self.changelog.delayupdate(tr)
1806 self.changelog.delayupdate(tr)
1805 n = self.changelog.add(mn, files, ctx.description(),
1807 n = self.changelog.add(mn, files, ctx.description(),
1806 trp, p1.node(), p2.node(),
1808 trp, p1.node(), p2.node(),
1807 user, ctx.date(), ctx.extra().copy())
1809 user, ctx.date(), ctx.extra().copy())
1808 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1810 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1809 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1811 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1810 parent2=xp2)
1812 parent2=xp2)
1811 # set the new commit is proper phase
1813 # set the new commit is proper phase
1812 targetphase = subrepo.newcommitphase(self.ui, ctx)
1814 targetphase = subrepo.newcommitphase(self.ui, ctx)
1813 if targetphase:
1815 if targetphase:
1814 # retract boundary do not alter parent changeset.
1816 # retract boundary do not alter parent changeset.
1815 # if a parent have higher the resulting phase will
1817 # if a parent have higher the resulting phase will
1816 # be compliant anyway
1818 # be compliant anyway
1817 #
1819 #
1818 # if minimal phase was 0 we don't need to retract anything
1820 # if minimal phase was 0 we don't need to retract anything
1819 phases.retractboundary(self, tr, targetphase, [n])
1821 phases.retractboundary(self, tr, targetphase, [n])
1820 tr.close()
1822 tr.close()
1821 return n
1823 return n
1822 finally:
1824 finally:
1823 if tr:
1825 if tr:
1824 tr.release()
1826 tr.release()
1825 lock.release()
1827 lock.release()
1826
1828
1827 @unfilteredmethod
1829 @unfilteredmethod
1828 def destroying(self):
1830 def destroying(self):
1829 '''Inform the repository that nodes are about to be destroyed.
1831 '''Inform the repository that nodes are about to be destroyed.
1830 Intended for use by strip and rollback, so there's a common
1832 Intended for use by strip and rollback, so there's a common
1831 place for anything that has to be done before destroying history.
1833 place for anything that has to be done before destroying history.
1832
1834
1833 This is mostly useful for saving state that is in memory and waiting
1835 This is mostly useful for saving state that is in memory and waiting
1834 to be flushed when the current lock is released. Because a call to
1836 to be flushed when the current lock is released. Because a call to
1835 destroyed is imminent, the repo will be invalidated causing those
1837 destroyed is imminent, the repo will be invalidated causing those
1836 changes to stay in memory (waiting for the next unlock), or vanish
1838 changes to stay in memory (waiting for the next unlock), or vanish
1837 completely.
1839 completely.
1838 '''
1840 '''
1839 # When using the same lock to commit and strip, the phasecache is left
1841 # When using the same lock to commit and strip, the phasecache is left
1840 # dirty after committing. Then when we strip, the repo is invalidated,
1842 # dirty after committing. Then when we strip, the repo is invalidated,
1841 # causing those changes to disappear.
1843 # causing those changes to disappear.
1842 if '_phasecache' in vars(self):
1844 if '_phasecache' in vars(self):
1843 self._phasecache.write()
1845 self._phasecache.write()
1844
1846
1845 @unfilteredmethod
1847 @unfilteredmethod
1846 def destroyed(self):
1848 def destroyed(self):
1847 '''Inform the repository that nodes have been destroyed.
1849 '''Inform the repository that nodes have been destroyed.
1848 Intended for use by strip and rollback, so there's a common
1850 Intended for use by strip and rollback, so there's a common
1849 place for anything that has to be done after destroying history.
1851 place for anything that has to be done after destroying history.
1850 '''
1852 '''
1851 # When one tries to:
1853 # When one tries to:
1852 # 1) destroy nodes thus calling this method (e.g. strip)
1854 # 1) destroy nodes thus calling this method (e.g. strip)
1853 # 2) use phasecache somewhere (e.g. commit)
1855 # 2) use phasecache somewhere (e.g. commit)
1854 #
1856 #
1855 # then 2) will fail because the phasecache contains nodes that were
1857 # then 2) will fail because the phasecache contains nodes that were
1856 # removed. We can either remove phasecache from the filecache,
1858 # removed. We can either remove phasecache from the filecache,
1857 # causing it to reload next time it is accessed, or simply filter
1859 # causing it to reload next time it is accessed, or simply filter
1858 # the removed nodes now and write the updated cache.
1860 # the removed nodes now and write the updated cache.
1859 self._phasecache.filterunknown(self)
1861 self._phasecache.filterunknown(self)
1860 self._phasecache.write()
1862 self._phasecache.write()
1861
1863
1862 # refresh all repository caches
1864 # refresh all repository caches
1863 self.updatecaches()
1865 self.updatecaches()
1864
1866
1865 # Ensure the persistent tag cache is updated. Doing it now
1867 # Ensure the persistent tag cache is updated. Doing it now
1866 # means that the tag cache only has to worry about destroyed
1868 # means that the tag cache only has to worry about destroyed
1867 # heads immediately after a strip/rollback. That in turn
1869 # heads immediately after a strip/rollback. That in turn
1868 # guarantees that "cachetip == currenttip" (comparing both rev
1870 # guarantees that "cachetip == currenttip" (comparing both rev
1869 # and node) always means no nodes have been added or destroyed.
1871 # and node) always means no nodes have been added or destroyed.
1870
1872
1871 # XXX this is suboptimal when qrefresh'ing: we strip the current
1873 # XXX this is suboptimal when qrefresh'ing: we strip the current
1872 # head, refresh the tag cache, then immediately add a new head.
1874 # head, refresh the tag cache, then immediately add a new head.
1873 # But I think doing it this way is necessary for the "instant
1875 # But I think doing it this way is necessary for the "instant
1874 # tag cache retrieval" case to work.
1876 # tag cache retrieval" case to work.
1875 self.invalidate()
1877 self.invalidate()
1876
1878
1877 def walk(self, match, node=None):
1879 def walk(self, match, node=None):
1878 '''
1880 '''
1879 walk recursively through the directory tree or a given
1881 walk recursively through the directory tree or a given
1880 changeset, finding all files matched by the match
1882 changeset, finding all files matched by the match
1881 function
1883 function
1882 '''
1884 '''
1883 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1885 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1884 return self[node].walk(match)
1886 return self[node].walk(match)
1885
1887
1886 def status(self, node1='.', node2=None, match=None,
1888 def status(self, node1='.', node2=None, match=None,
1887 ignored=False, clean=False, unknown=False,
1889 ignored=False, clean=False, unknown=False,
1888 listsubrepos=False):
1890 listsubrepos=False):
1889 '''a convenience method that calls node1.status(node2)'''
1891 '''a convenience method that calls node1.status(node2)'''
1890 return self[node1].status(node2, match, ignored, clean, unknown,
1892 return self[node1].status(node2, match, ignored, clean, unknown,
1891 listsubrepos)
1893 listsubrepos)
1892
1894
1893 def addpostdsstatus(self, ps):
1895 def addpostdsstatus(self, ps):
1894 """Add a callback to run within the wlock, at the point at which status
1896 """Add a callback to run within the wlock, at the point at which status
1895 fixups happen.
1897 fixups happen.
1896
1898
1897 On status completion, callback(wctx, status) will be called with the
1899 On status completion, callback(wctx, status) will be called with the
1898 wlock held, unless the dirstate has changed from underneath or the wlock
1900 wlock held, unless the dirstate has changed from underneath or the wlock
1899 couldn't be grabbed.
1901 couldn't be grabbed.
1900
1902
1901 Callbacks should not capture and use a cached copy of the dirstate --
1903 Callbacks should not capture and use a cached copy of the dirstate --
1902 it might change in the meanwhile. Instead, they should access the
1904 it might change in the meanwhile. Instead, they should access the
1903 dirstate via wctx.repo().dirstate.
1905 dirstate via wctx.repo().dirstate.
1904
1906
1905 This list is emptied out after each status run -- extensions should
1907 This list is emptied out after each status run -- extensions should
1906 make sure it adds to this list each time dirstate.status is called.
1908 make sure it adds to this list each time dirstate.status is called.
1907 Extensions should also make sure they don't call this for statuses
1909 Extensions should also make sure they don't call this for statuses
1908 that don't involve the dirstate.
1910 that don't involve the dirstate.
1909 """
1911 """
1910
1912
1911 # The list is located here for uniqueness reasons -- it is actually
1913 # The list is located here for uniqueness reasons -- it is actually
1912 # managed by the workingctx, but that isn't unique per-repo.
1914 # managed by the workingctx, but that isn't unique per-repo.
1913 self._postdsstatus.append(ps)
1915 self._postdsstatus.append(ps)
1914
1916
1915 def postdsstatus(self):
1917 def postdsstatus(self):
1916 """Used by workingctx to get the list of post-dirstate-status hooks."""
1918 """Used by workingctx to get the list of post-dirstate-status hooks."""
1917 return self._postdsstatus
1919 return self._postdsstatus
1918
1920
1919 def clearpostdsstatus(self):
1921 def clearpostdsstatus(self):
1920 """Used by workingctx to clear post-dirstate-status hooks."""
1922 """Used by workingctx to clear post-dirstate-status hooks."""
1921 del self._postdsstatus[:]
1923 del self._postdsstatus[:]
1922
1924
1923 def heads(self, start=None):
1925 def heads(self, start=None):
1924 if start is None:
1926 if start is None:
1925 cl = self.changelog
1927 cl = self.changelog
1926 headrevs = reversed(cl.headrevs())
1928 headrevs = reversed(cl.headrevs())
1927 return [cl.node(rev) for rev in headrevs]
1929 return [cl.node(rev) for rev in headrevs]
1928
1930
1929 heads = self.changelog.heads(start)
1931 heads = self.changelog.heads(start)
1930 # sort the output in rev descending order
1932 # sort the output in rev descending order
1931 return sorted(heads, key=self.changelog.rev, reverse=True)
1933 return sorted(heads, key=self.changelog.rev, reverse=True)
1932
1934
1933 def branchheads(self, branch=None, start=None, closed=False):
1935 def branchheads(self, branch=None, start=None, closed=False):
1934 '''return a (possibly filtered) list of heads for the given branch
1936 '''return a (possibly filtered) list of heads for the given branch
1935
1937
1936 Heads are returned in topological order, from newest to oldest.
1938 Heads are returned in topological order, from newest to oldest.
1937 If branch is None, use the dirstate branch.
1939 If branch is None, use the dirstate branch.
1938 If start is not None, return only heads reachable from start.
1940 If start is not None, return only heads reachable from start.
1939 If closed is True, return heads that are marked as closed as well.
1941 If closed is True, return heads that are marked as closed as well.
1940 '''
1942 '''
1941 if branch is None:
1943 if branch is None:
1942 branch = self[None].branch()
1944 branch = self[None].branch()
1943 branches = self.branchmap()
1945 branches = self.branchmap()
1944 if branch not in branches:
1946 if branch not in branches:
1945 return []
1947 return []
1946 # the cache returns heads ordered lowest to highest
1948 # the cache returns heads ordered lowest to highest
1947 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1949 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1948 if start is not None:
1950 if start is not None:
1949 # filter out the heads that cannot be reached from startrev
1951 # filter out the heads that cannot be reached from startrev
1950 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1952 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1951 bheads = [h for h in bheads if h in fbheads]
1953 bheads = [h for h in bheads if h in fbheads]
1952 return bheads
1954 return bheads
1953
1955
1954 def branches(self, nodes):
1956 def branches(self, nodes):
1955 if not nodes:
1957 if not nodes:
1956 nodes = [self.changelog.tip()]
1958 nodes = [self.changelog.tip()]
1957 b = []
1959 b = []
1958 for n in nodes:
1960 for n in nodes:
1959 t = n
1961 t = n
1960 while True:
1962 while True:
1961 p = self.changelog.parents(n)
1963 p = self.changelog.parents(n)
1962 if p[1] != nullid or p[0] == nullid:
1964 if p[1] != nullid or p[0] == nullid:
1963 b.append((t, n, p[0], p[1]))
1965 b.append((t, n, p[0], p[1]))
1964 break
1966 break
1965 n = p[0]
1967 n = p[0]
1966 return b
1968 return b
1967
1969
1968 def between(self, pairs):
1970 def between(self, pairs):
1969 r = []
1971 r = []
1970
1972
1971 for top, bottom in pairs:
1973 for top, bottom in pairs:
1972 n, l, i = top, [], 0
1974 n, l, i = top, [], 0
1973 f = 1
1975 f = 1
1974
1976
1975 while n != bottom and n != nullid:
1977 while n != bottom and n != nullid:
1976 p = self.changelog.parents(n)[0]
1978 p = self.changelog.parents(n)[0]
1977 if i == f:
1979 if i == f:
1978 l.append(n)
1980 l.append(n)
1979 f = f * 2
1981 f = f * 2
1980 n = p
1982 n = p
1981 i += 1
1983 i += 1
1982
1984
1983 r.append(l)
1985 r.append(l)
1984
1986
1985 return r
1987 return r
1986
1988
1987 def checkpush(self, pushop):
1989 def checkpush(self, pushop):
1988 """Extensions can override this function if additional checks have
1990 """Extensions can override this function if additional checks have
1989 to be performed before pushing, or call it if they override push
1991 to be performed before pushing, or call it if they override push
1990 command.
1992 command.
1991 """
1993 """
1992 pass
1994 pass
1993
1995
1994 @unfilteredpropertycache
1996 @unfilteredpropertycache
1995 def prepushoutgoinghooks(self):
1997 def prepushoutgoinghooks(self):
1996 """Return util.hooks consists of a pushop with repo, remote, outgoing
1998 """Return util.hooks consists of a pushop with repo, remote, outgoing
1997 methods, which are called before pushing changesets.
1999 methods, which are called before pushing changesets.
1998 """
2000 """
1999 return util.hooks()
2001 return util.hooks()
2000
2002
2001 def pushkey(self, namespace, key, old, new):
2003 def pushkey(self, namespace, key, old, new):
2002 try:
2004 try:
2003 tr = self.currenttransaction()
2005 tr = self.currenttransaction()
2004 hookargs = {}
2006 hookargs = {}
2005 if tr is not None:
2007 if tr is not None:
2006 hookargs.update(tr.hookargs)
2008 hookargs.update(tr.hookargs)
2007 hookargs['namespace'] = namespace
2009 hookargs['namespace'] = namespace
2008 hookargs['key'] = key
2010 hookargs['key'] = key
2009 hookargs['old'] = old
2011 hookargs['old'] = old
2010 hookargs['new'] = new
2012 hookargs['new'] = new
2011 self.hook('prepushkey', throw=True, **hookargs)
2013 self.hook('prepushkey', throw=True, **hookargs)
2012 except error.HookAbort as exc:
2014 except error.HookAbort as exc:
2013 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2015 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2014 if exc.hint:
2016 if exc.hint:
2015 self.ui.write_err(_("(%s)\n") % exc.hint)
2017 self.ui.write_err(_("(%s)\n") % exc.hint)
2016 return False
2018 return False
2017 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2019 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2018 ret = pushkey.push(self, namespace, key, old, new)
2020 ret = pushkey.push(self, namespace, key, old, new)
2019 def runhook():
2021 def runhook():
2020 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2022 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2021 ret=ret)
2023 ret=ret)
2022 self._afterlock(runhook)
2024 self._afterlock(runhook)
2023 return ret
2025 return ret
2024
2026
2025 def listkeys(self, namespace):
2027 def listkeys(self, namespace):
2026 self.hook('prelistkeys', throw=True, namespace=namespace)
2028 self.hook('prelistkeys', throw=True, namespace=namespace)
2027 self.ui.debug('listing keys for "%s"\n' % namespace)
2029 self.ui.debug('listing keys for "%s"\n' % namespace)
2028 values = pushkey.list(self, namespace)
2030 values = pushkey.list(self, namespace)
2029 self.hook('listkeys', namespace=namespace, values=values)
2031 self.hook('listkeys', namespace=namespace, values=values)
2030 return values
2032 return values
2031
2033
2032 def debugwireargs(self, one, two, three=None, four=None, five=None):
2034 def debugwireargs(self, one, two, three=None, four=None, five=None):
2033 '''used to test argument passing over the wire'''
2035 '''used to test argument passing over the wire'''
2034 return "%s %s %s %s %s" % (one, two, three, four, five)
2036 return "%s %s %s %s %s" % (one, two, three, four, five)
2035
2037
2036 def savecommitmessage(self, text):
2038 def savecommitmessage(self, text):
2037 fp = self.vfs('last-message.txt', 'wb')
2039 fp = self.vfs('last-message.txt', 'wb')
2038 try:
2040 try:
2039 fp.write(text)
2041 fp.write(text)
2040 finally:
2042 finally:
2041 fp.close()
2043 fp.close()
2042 return self.pathto(fp.name[len(self.root) + 1:])
2044 return self.pathto(fp.name[len(self.root) + 1:])
2043
2045
2044 # used to avoid circular references so destructors work
2046 # used to avoid circular references so destructors work
2045 def aftertrans(files):
2047 def aftertrans(files):
2046 renamefiles = [tuple(t) for t in files]
2048 renamefiles = [tuple(t) for t in files]
2047 def a():
2049 def a():
2048 for vfs, src, dest in renamefiles:
2050 for vfs, src, dest in renamefiles:
2049 # if src and dest refer to a same file, vfs.rename is a no-op,
2051 # if src and dest refer to a same file, vfs.rename is a no-op,
2050 # leaving both src and dest on disk. delete dest to make sure
2052 # leaving both src and dest on disk. delete dest to make sure
2051 # the rename couldn't be such a no-op.
2053 # the rename couldn't be such a no-op.
2052 vfs.tryunlink(dest)
2054 vfs.tryunlink(dest)
2053 try:
2055 try:
2054 vfs.rename(src, dest)
2056 vfs.rename(src, dest)
2055 except OSError: # journal file does not yet exist
2057 except OSError: # journal file does not yet exist
2056 pass
2058 pass
2057 return a
2059 return a
2058
2060
2059 def undoname(fn):
2061 def undoname(fn):
2060 base, name = os.path.split(fn)
2062 base, name = os.path.split(fn)
2061 assert name.startswith('journal')
2063 assert name.startswith('journal')
2062 return os.path.join(base, name.replace('journal', 'undo', 1))
2064 return os.path.join(base, name.replace('journal', 'undo', 1))
2063
2065
2064 def instance(ui, path, create):
2066 def instance(ui, path, create):
2065 return localrepository(ui, util.urllocalpath(path), create)
2067 return localrepository(ui, util.urllocalpath(path), create)
2066
2068
2067 def islocal(path):
2069 def islocal(path):
2068 return True
2070 return True
2069
2071
2070 def newreporequirements(repo):
2072 def newreporequirements(repo):
2071 """Determine the set of requirements for a new local repository.
2073 """Determine the set of requirements for a new local repository.
2072
2074
2073 Extensions can wrap this function to specify custom requirements for
2075 Extensions can wrap this function to specify custom requirements for
2074 new repositories.
2076 new repositories.
2075 """
2077 """
2076 ui = repo.ui
2078 ui = repo.ui
2077 requirements = {'revlogv1'}
2079 requirements = {'revlogv1'}
2078 if ui.configbool('format', 'usestore'):
2080 if ui.configbool('format', 'usestore'):
2079 requirements.add('store')
2081 requirements.add('store')
2080 if ui.configbool('format', 'usefncache'):
2082 if ui.configbool('format', 'usefncache'):
2081 requirements.add('fncache')
2083 requirements.add('fncache')
2082 if ui.configbool('format', 'dotencode'):
2084 if ui.configbool('format', 'dotencode'):
2083 requirements.add('dotencode')
2085 requirements.add('dotencode')
2084
2086
2085 compengine = ui.config('experimental', 'format.compression', 'zlib')
2087 compengine = ui.config('experimental', 'format.compression', 'zlib')
2086 if compengine not in util.compengines:
2088 if compengine not in util.compengines:
2087 raise error.Abort(_('compression engine %s defined by '
2089 raise error.Abort(_('compression engine %s defined by '
2088 'experimental.format.compression not available') %
2090 'experimental.format.compression not available') %
2089 compengine,
2091 compengine,
2090 hint=_('run "hg debuginstall" to list available '
2092 hint=_('run "hg debuginstall" to list available '
2091 'compression engines'))
2093 'compression engines'))
2092
2094
2093 # zlib is the historical default and doesn't need an explicit requirement.
2095 # zlib is the historical default and doesn't need an explicit requirement.
2094 if compengine != 'zlib':
2096 if compengine != 'zlib':
2095 requirements.add('exp-compression-%s' % compengine)
2097 requirements.add('exp-compression-%s' % compengine)
2096
2098
2097 if scmutil.gdinitconfig(ui):
2099 if scmutil.gdinitconfig(ui):
2098 requirements.add('generaldelta')
2100 requirements.add('generaldelta')
2099 if ui.configbool('experimental', 'treemanifest', False):
2101 if ui.configbool('experimental', 'treemanifest', False):
2100 requirements.add('treemanifest')
2102 requirements.add('treemanifest')
2101 if ui.configbool('experimental', 'manifestv2', False):
2103 if ui.configbool('experimental', 'manifestv2', False):
2102 requirements.add('manifestv2')
2104 requirements.add('manifestv2')
2103
2105
2104 revlogv2 = ui.config('experimental', 'revlogv2')
2106 revlogv2 = ui.config('experimental', 'revlogv2')
2105 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2107 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2106 requirements.remove('revlogv1')
2108 requirements.remove('revlogv1')
2107 # generaldelta is implied by revlogv2.
2109 # generaldelta is implied by revlogv2.
2108 requirements.discard('generaldelta')
2110 requirements.discard('generaldelta')
2109 requirements.add(REVLOGV2_REQUIREMENT)
2111 requirements.add(REVLOGV2_REQUIREMENT)
2110
2112
2111 return requirements
2113 return requirements
General Comments 0
You need to be logged in to leave comments. Login now