##// END OF EJS Templates
py3: make localrepo filtered repo cache work on py3...
Martin von Zweigbergk -
r33403:1bb209d0 default
parent child Browse files
Show More
@@ -1,2166 +1,2166 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 obsolete,
46 obsolete,
47 pathutil,
47 pathutil,
48 peer,
48 peer,
49 phases,
49 phases,
50 pushkey,
50 pushkey,
51 pycompat,
51 pycompat,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 sparse,
56 sparse,
57 store,
57 store,
58 subrepo,
58 subrepo,
59 tags as tagsmod,
59 tags as tagsmod,
60 transaction,
60 transaction,
61 txnutil,
61 txnutil,
62 util,
62 util,
63 vfs as vfsmod,
63 vfs as vfsmod,
64 )
64 )
65
65
66 release = lockmod.release
66 release = lockmod.release
67 urlerr = util.urlerr
67 urlerr = util.urlerr
68 urlreq = util.urlreq
68 urlreq = util.urlreq
69
69
70 # set of (path, vfs-location) tuples. vfs-location is:
70 # set of (path, vfs-location) tuples. vfs-location is:
71 # - 'plain for vfs relative paths
71 # - 'plain for vfs relative paths
72 # - '' for svfs relative paths
72 # - '' for svfs relative paths
73 _cachedfiles = set()
73 _cachedfiles = set()
74
74
75 class _basefilecache(scmutil.filecache):
75 class _basefilecache(scmutil.filecache):
76 """All filecache usage on repo are done for logic that should be unfiltered
76 """All filecache usage on repo are done for logic that should be unfiltered
77 """
77 """
78 def __get__(self, repo, type=None):
78 def __get__(self, repo, type=None):
79 if repo is None:
79 if repo is None:
80 return self
80 return self
81 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
81 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
82 def __set__(self, repo, value):
82 def __set__(self, repo, value):
83 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
83 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
84 def __delete__(self, repo):
84 def __delete__(self, repo):
85 return super(_basefilecache, self).__delete__(repo.unfiltered())
85 return super(_basefilecache, self).__delete__(repo.unfiltered())
86
86
87 class repofilecache(_basefilecache):
87 class repofilecache(_basefilecache):
88 """filecache for files in .hg but outside of .hg/store"""
88 """filecache for files in .hg but outside of .hg/store"""
89 def __init__(self, *paths):
89 def __init__(self, *paths):
90 super(repofilecache, self).__init__(*paths)
90 super(repofilecache, self).__init__(*paths)
91 for path in paths:
91 for path in paths:
92 _cachedfiles.add((path, 'plain'))
92 _cachedfiles.add((path, 'plain'))
93
93
94 def join(self, obj, fname):
94 def join(self, obj, fname):
95 return obj.vfs.join(fname)
95 return obj.vfs.join(fname)
96
96
97 class storecache(_basefilecache):
97 class storecache(_basefilecache):
98 """filecache for files in the store"""
98 """filecache for files in the store"""
99 def __init__(self, *paths):
99 def __init__(self, *paths):
100 super(storecache, self).__init__(*paths)
100 super(storecache, self).__init__(*paths)
101 for path in paths:
101 for path in paths:
102 _cachedfiles.add((path, ''))
102 _cachedfiles.add((path, ''))
103
103
104 def join(self, obj, fname):
104 def join(self, obj, fname):
105 return obj.sjoin(fname)
105 return obj.sjoin(fname)
106
106
107 def isfilecached(repo, name):
107 def isfilecached(repo, name):
108 """check if a repo has already cached "name" filecache-ed property
108 """check if a repo has already cached "name" filecache-ed property
109
109
110 This returns (cachedobj-or-None, iscached) tuple.
110 This returns (cachedobj-or-None, iscached) tuple.
111 """
111 """
112 cacheentry = repo.unfiltered()._filecache.get(name, None)
112 cacheentry = repo.unfiltered()._filecache.get(name, None)
113 if not cacheentry:
113 if not cacheentry:
114 return None, False
114 return None, False
115 return cacheentry.obj, True
115 return cacheentry.obj, True
116
116
117 class unfilteredpropertycache(util.propertycache):
117 class unfilteredpropertycache(util.propertycache):
118 """propertycache that apply to unfiltered repo only"""
118 """propertycache that apply to unfiltered repo only"""
119
119
120 def __get__(self, repo, type=None):
120 def __get__(self, repo, type=None):
121 unfi = repo.unfiltered()
121 unfi = repo.unfiltered()
122 if unfi is repo:
122 if unfi is repo:
123 return super(unfilteredpropertycache, self).__get__(unfi)
123 return super(unfilteredpropertycache, self).__get__(unfi)
124 return getattr(unfi, self.name)
124 return getattr(unfi, self.name)
125
125
126 class filteredpropertycache(util.propertycache):
126 class filteredpropertycache(util.propertycache):
127 """propertycache that must take filtering in account"""
127 """propertycache that must take filtering in account"""
128
128
129 def cachevalue(self, obj, value):
129 def cachevalue(self, obj, value):
130 object.__setattr__(obj, self.name, value)
130 object.__setattr__(obj, self.name, value)
131
131
132
132
133 def hasunfilteredcache(repo, name):
133 def hasunfilteredcache(repo, name):
134 """check if a repo has an unfilteredpropertycache value for <name>"""
134 """check if a repo has an unfilteredpropertycache value for <name>"""
135 return name in vars(repo.unfiltered())
135 return name in vars(repo.unfiltered())
136
136
137 def unfilteredmethod(orig):
137 def unfilteredmethod(orig):
138 """decorate method that always need to be run on unfiltered version"""
138 """decorate method that always need to be run on unfiltered version"""
139 def wrapper(repo, *args, **kwargs):
139 def wrapper(repo, *args, **kwargs):
140 return orig(repo.unfiltered(), *args, **kwargs)
140 return orig(repo.unfiltered(), *args, **kwargs)
141 return wrapper
141 return wrapper
142
142
143 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
143 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
144 'unbundle'}
144 'unbundle'}
145 legacycaps = moderncaps.union({'changegroupsubset'})
145 legacycaps = moderncaps.union({'changegroupsubset'})
146
146
147 class localpeer(peer.peerrepository):
147 class localpeer(peer.peerrepository):
148 '''peer for a local repo; reflects only the most recent API'''
148 '''peer for a local repo; reflects only the most recent API'''
149
149
150 def __init__(self, repo, caps=None):
150 def __init__(self, repo, caps=None):
151 if caps is None:
151 if caps is None:
152 caps = moderncaps.copy()
152 caps = moderncaps.copy()
153 peer.peerrepository.__init__(self)
153 peer.peerrepository.__init__(self)
154 self._repo = repo.filtered('served')
154 self._repo = repo.filtered('served')
155 self.ui = repo.ui
155 self.ui = repo.ui
156 self._caps = repo._restrictcapabilities(caps)
156 self._caps = repo._restrictcapabilities(caps)
157 self.requirements = repo.requirements
157 self.requirements = repo.requirements
158 self.supportedformats = repo.supportedformats
158 self.supportedformats = repo.supportedformats
159
159
160 def close(self):
160 def close(self):
161 self._repo.close()
161 self._repo.close()
162
162
163 def _capabilities(self):
163 def _capabilities(self):
164 return self._caps
164 return self._caps
165
165
166 def local(self):
166 def local(self):
167 return self._repo
167 return self._repo
168
168
169 def canpush(self):
169 def canpush(self):
170 return True
170 return True
171
171
172 def url(self):
172 def url(self):
173 return self._repo.url()
173 return self._repo.url()
174
174
175 def lookup(self, key):
175 def lookup(self, key):
176 return self._repo.lookup(key)
176 return self._repo.lookup(key)
177
177
178 def branchmap(self):
178 def branchmap(self):
179 return self._repo.branchmap()
179 return self._repo.branchmap()
180
180
181 def heads(self):
181 def heads(self):
182 return self._repo.heads()
182 return self._repo.heads()
183
183
184 def known(self, nodes):
184 def known(self, nodes):
185 return self._repo.known(nodes)
185 return self._repo.known(nodes)
186
186
187 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
187 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
188 **kwargs):
188 **kwargs):
189 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
189 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
190 common=common, bundlecaps=bundlecaps,
190 common=common, bundlecaps=bundlecaps,
191 **kwargs)
191 **kwargs)
192 cb = util.chunkbuffer(chunks)
192 cb = util.chunkbuffer(chunks)
193
193
194 if exchange.bundle2requested(bundlecaps):
194 if exchange.bundle2requested(bundlecaps):
195 # When requesting a bundle2, getbundle returns a stream to make the
195 # When requesting a bundle2, getbundle returns a stream to make the
196 # wire level function happier. We need to build a proper object
196 # wire level function happier. We need to build a proper object
197 # from it in local peer.
197 # from it in local peer.
198 return bundle2.getunbundler(self.ui, cb)
198 return bundle2.getunbundler(self.ui, cb)
199 else:
199 else:
200 return changegroup.getunbundler('01', cb, None)
200 return changegroup.getunbundler('01', cb, None)
201
201
202 # TODO We might want to move the next two calls into legacypeer and add
202 # TODO We might want to move the next two calls into legacypeer and add
203 # unbundle instead.
203 # unbundle instead.
204
204
205 def unbundle(self, cg, heads, url):
205 def unbundle(self, cg, heads, url):
206 """apply a bundle on a repo
206 """apply a bundle on a repo
207
207
208 This function handles the repo locking itself."""
208 This function handles the repo locking itself."""
209 try:
209 try:
210 try:
210 try:
211 cg = exchange.readbundle(self.ui, cg, None)
211 cg = exchange.readbundle(self.ui, cg, None)
212 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
212 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
213 if util.safehasattr(ret, 'getchunks'):
213 if util.safehasattr(ret, 'getchunks'):
214 # This is a bundle20 object, turn it into an unbundler.
214 # This is a bundle20 object, turn it into an unbundler.
215 # This little dance should be dropped eventually when the
215 # This little dance should be dropped eventually when the
216 # API is finally improved.
216 # API is finally improved.
217 stream = util.chunkbuffer(ret.getchunks())
217 stream = util.chunkbuffer(ret.getchunks())
218 ret = bundle2.getunbundler(self.ui, stream)
218 ret = bundle2.getunbundler(self.ui, stream)
219 return ret
219 return ret
220 except Exception as exc:
220 except Exception as exc:
221 # If the exception contains output salvaged from a bundle2
221 # If the exception contains output salvaged from a bundle2
222 # reply, we need to make sure it is printed before continuing
222 # reply, we need to make sure it is printed before continuing
223 # to fail. So we build a bundle2 with such output and consume
223 # to fail. So we build a bundle2 with such output and consume
224 # it directly.
224 # it directly.
225 #
225 #
226 # This is not very elegant but allows a "simple" solution for
226 # This is not very elegant but allows a "simple" solution for
227 # issue4594
227 # issue4594
228 output = getattr(exc, '_bundle2salvagedoutput', ())
228 output = getattr(exc, '_bundle2salvagedoutput', ())
229 if output:
229 if output:
230 bundler = bundle2.bundle20(self._repo.ui)
230 bundler = bundle2.bundle20(self._repo.ui)
231 for out in output:
231 for out in output:
232 bundler.addpart(out)
232 bundler.addpart(out)
233 stream = util.chunkbuffer(bundler.getchunks())
233 stream = util.chunkbuffer(bundler.getchunks())
234 b = bundle2.getunbundler(self.ui, stream)
234 b = bundle2.getunbundler(self.ui, stream)
235 bundle2.processbundle(self._repo, b)
235 bundle2.processbundle(self._repo, b)
236 raise
236 raise
237 except error.PushRaced as exc:
237 except error.PushRaced as exc:
238 raise error.ResponseError(_('push failed:'), str(exc))
238 raise error.ResponseError(_('push failed:'), str(exc))
239
239
240 def lock(self):
240 def lock(self):
241 return self._repo.lock()
241 return self._repo.lock()
242
242
243 def pushkey(self, namespace, key, old, new):
243 def pushkey(self, namespace, key, old, new):
244 return self._repo.pushkey(namespace, key, old, new)
244 return self._repo.pushkey(namespace, key, old, new)
245
245
246 def listkeys(self, namespace):
246 def listkeys(self, namespace):
247 return self._repo.listkeys(namespace)
247 return self._repo.listkeys(namespace)
248
248
249 def debugwireargs(self, one, two, three=None, four=None, five=None):
249 def debugwireargs(self, one, two, three=None, four=None, five=None):
250 '''used to test argument passing over the wire'''
250 '''used to test argument passing over the wire'''
251 return "%s %s %s %s %s" % (one, two, three, four, five)
251 return "%s %s %s %s %s" % (one, two, three, four, five)
252
252
253 class locallegacypeer(localpeer):
253 class locallegacypeer(localpeer):
254 '''peer extension which implements legacy methods too; used for tests with
254 '''peer extension which implements legacy methods too; used for tests with
255 restricted capabilities'''
255 restricted capabilities'''
256
256
257 def __init__(self, repo):
257 def __init__(self, repo):
258 localpeer.__init__(self, repo, caps=legacycaps)
258 localpeer.__init__(self, repo, caps=legacycaps)
259
259
260 def branches(self, nodes):
260 def branches(self, nodes):
261 return self._repo.branches(nodes)
261 return self._repo.branches(nodes)
262
262
263 def between(self, pairs):
263 def between(self, pairs):
264 return self._repo.between(pairs)
264 return self._repo.between(pairs)
265
265
266 def changegroup(self, basenodes, source):
266 def changegroup(self, basenodes, source):
267 return changegroup.changegroup(self._repo, basenodes, source)
267 return changegroup.changegroup(self._repo, basenodes, source)
268
268
269 def changegroupsubset(self, bases, heads, source):
269 def changegroupsubset(self, bases, heads, source):
270 return changegroup.changegroupsubset(self._repo, bases, heads, source)
270 return changegroup.changegroupsubset(self._repo, bases, heads, source)
271
271
272 # Increment the sub-version when the revlog v2 format changes to lock out old
272 # Increment the sub-version when the revlog v2 format changes to lock out old
273 # clients.
273 # clients.
274 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
274 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
275
275
276 class localrepository(object):
276 class localrepository(object):
277
277
278 supportedformats = {
278 supportedformats = {
279 'revlogv1',
279 'revlogv1',
280 'generaldelta',
280 'generaldelta',
281 'treemanifest',
281 'treemanifest',
282 'manifestv2',
282 'manifestv2',
283 REVLOGV2_REQUIREMENT,
283 REVLOGV2_REQUIREMENT,
284 }
284 }
285 _basesupported = supportedformats | {
285 _basesupported = supportedformats | {
286 'store',
286 'store',
287 'fncache',
287 'fncache',
288 'shared',
288 'shared',
289 'relshared',
289 'relshared',
290 'dotencode',
290 'dotencode',
291 }
291 }
292 openerreqs = {
292 openerreqs = {
293 'revlogv1',
293 'revlogv1',
294 'generaldelta',
294 'generaldelta',
295 'treemanifest',
295 'treemanifest',
296 'manifestv2',
296 'manifestv2',
297 }
297 }
298
298
299 # a list of (ui, featureset) functions.
299 # a list of (ui, featureset) functions.
300 # only functions defined in module of enabled extensions are invoked
300 # only functions defined in module of enabled extensions are invoked
301 featuresetupfuncs = set()
301 featuresetupfuncs = set()
302
302
303 def __init__(self, baseui, path, create=False):
303 def __init__(self, baseui, path, create=False):
304 self.requirements = set()
304 self.requirements = set()
305 self.filtername = None
305 self.filtername = None
306 # wvfs: rooted at the repository root, used to access the working copy
306 # wvfs: rooted at the repository root, used to access the working copy
307 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
307 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
308 # vfs: rooted at .hg, used to access repo files outside of .hg/store
308 # vfs: rooted at .hg, used to access repo files outside of .hg/store
309 self.vfs = None
309 self.vfs = None
310 # svfs: usually rooted at .hg/store, used to access repository history
310 # svfs: usually rooted at .hg/store, used to access repository history
311 # If this is a shared repository, this vfs may point to another
311 # If this is a shared repository, this vfs may point to another
312 # repository's .hg/store directory.
312 # repository's .hg/store directory.
313 self.svfs = None
313 self.svfs = None
314 self.root = self.wvfs.base
314 self.root = self.wvfs.base
315 self.path = self.wvfs.join(".hg")
315 self.path = self.wvfs.join(".hg")
316 self.origroot = path
316 self.origroot = path
317 # These auditor are not used by the vfs,
317 # These auditor are not used by the vfs,
318 # only used when writing this comment: basectx.match
318 # only used when writing this comment: basectx.match
319 self.auditor = pathutil.pathauditor(self.root, self._checknested)
319 self.auditor = pathutil.pathauditor(self.root, self._checknested)
320 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
320 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
321 realfs=False)
321 realfs=False)
322 self.vfs = vfsmod.vfs(self.path)
322 self.vfs = vfsmod.vfs(self.path)
323 self.baseui = baseui
323 self.baseui = baseui
324 self.ui = baseui.copy()
324 self.ui = baseui.copy()
325 self.ui.copy = baseui.copy # prevent copying repo configuration
325 self.ui.copy = baseui.copy # prevent copying repo configuration
326 # A list of callback to shape the phase if no data were found.
326 # A list of callback to shape the phase if no data were found.
327 # Callback are in the form: func(repo, roots) --> processed root.
327 # Callback are in the form: func(repo, roots) --> processed root.
328 # This list it to be filled by extension during repo setup
328 # This list it to be filled by extension during repo setup
329 self._phasedefaults = []
329 self._phasedefaults = []
330 try:
330 try:
331 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
331 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
332 self._loadextensions()
332 self._loadextensions()
333 except IOError:
333 except IOError:
334 pass
334 pass
335
335
336 if self.featuresetupfuncs:
336 if self.featuresetupfuncs:
337 self.supported = set(self._basesupported) # use private copy
337 self.supported = set(self._basesupported) # use private copy
338 extmods = set(m.__name__ for n, m
338 extmods = set(m.__name__ for n, m
339 in extensions.extensions(self.ui))
339 in extensions.extensions(self.ui))
340 for setupfunc in self.featuresetupfuncs:
340 for setupfunc in self.featuresetupfuncs:
341 if setupfunc.__module__ in extmods:
341 if setupfunc.__module__ in extmods:
342 setupfunc(self.ui, self.supported)
342 setupfunc(self.ui, self.supported)
343 else:
343 else:
344 self.supported = self._basesupported
344 self.supported = self._basesupported
345 color.setup(self.ui)
345 color.setup(self.ui)
346
346
347 # Add compression engines.
347 # Add compression engines.
348 for name in util.compengines:
348 for name in util.compengines:
349 engine = util.compengines[name]
349 engine = util.compengines[name]
350 if engine.revlogheader():
350 if engine.revlogheader():
351 self.supported.add('exp-compression-%s' % name)
351 self.supported.add('exp-compression-%s' % name)
352
352
353 if not self.vfs.isdir():
353 if not self.vfs.isdir():
354 if create:
354 if create:
355 self.requirements = newreporequirements(self)
355 self.requirements = newreporequirements(self)
356
356
357 if not self.wvfs.exists():
357 if not self.wvfs.exists():
358 self.wvfs.makedirs()
358 self.wvfs.makedirs()
359 self.vfs.makedir(notindexed=True)
359 self.vfs.makedir(notindexed=True)
360
360
361 if 'store' in self.requirements:
361 if 'store' in self.requirements:
362 self.vfs.mkdir("store")
362 self.vfs.mkdir("store")
363
363
364 # create an invalid changelog
364 # create an invalid changelog
365 self.vfs.append(
365 self.vfs.append(
366 "00changelog.i",
366 "00changelog.i",
367 '\0\0\0\2' # represents revlogv2
367 '\0\0\0\2' # represents revlogv2
368 ' dummy changelog to prevent using the old repo layout'
368 ' dummy changelog to prevent using the old repo layout'
369 )
369 )
370 else:
370 else:
371 raise error.RepoError(_("repository %s not found") % path)
371 raise error.RepoError(_("repository %s not found") % path)
372 elif create:
372 elif create:
373 raise error.RepoError(_("repository %s already exists") % path)
373 raise error.RepoError(_("repository %s already exists") % path)
374 else:
374 else:
375 try:
375 try:
376 self.requirements = scmutil.readrequires(
376 self.requirements = scmutil.readrequires(
377 self.vfs, self.supported)
377 self.vfs, self.supported)
378 except IOError as inst:
378 except IOError as inst:
379 if inst.errno != errno.ENOENT:
379 if inst.errno != errno.ENOENT:
380 raise
380 raise
381
381
382 self.sharedpath = self.path
382 self.sharedpath = self.path
383 try:
383 try:
384 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
384 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
385 if 'relshared' in self.requirements:
385 if 'relshared' in self.requirements:
386 sharedpath = self.vfs.join(sharedpath)
386 sharedpath = self.vfs.join(sharedpath)
387 vfs = vfsmod.vfs(sharedpath, realpath=True)
387 vfs = vfsmod.vfs(sharedpath, realpath=True)
388 s = vfs.base
388 s = vfs.base
389 if not vfs.exists():
389 if not vfs.exists():
390 raise error.RepoError(
390 raise error.RepoError(
391 _('.hg/sharedpath points to nonexistent directory %s') % s)
391 _('.hg/sharedpath points to nonexistent directory %s') % s)
392 self.sharedpath = s
392 self.sharedpath = s
393 except IOError as inst:
393 except IOError as inst:
394 if inst.errno != errno.ENOENT:
394 if inst.errno != errno.ENOENT:
395 raise
395 raise
396
396
397 self.store = store.store(
397 self.store = store.store(
398 self.requirements, self.sharedpath, vfsmod.vfs)
398 self.requirements, self.sharedpath, vfsmod.vfs)
399 self.spath = self.store.path
399 self.spath = self.store.path
400 self.svfs = self.store.vfs
400 self.svfs = self.store.vfs
401 self.sjoin = self.store.join
401 self.sjoin = self.store.join
402 self.vfs.createmode = self.store.createmode
402 self.vfs.createmode = self.store.createmode
403 self._applyopenerreqs()
403 self._applyopenerreqs()
404 if create:
404 if create:
405 self._writerequirements()
405 self._writerequirements()
406
406
407 self._dirstatevalidatewarned = False
407 self._dirstatevalidatewarned = False
408
408
409 self._branchcaches = {}
409 self._branchcaches = {}
410 self._revbranchcache = None
410 self._revbranchcache = None
411 self.filterpats = {}
411 self.filterpats = {}
412 self._datafilters = {}
412 self._datafilters = {}
413 self._transref = self._lockref = self._wlockref = None
413 self._transref = self._lockref = self._wlockref = None
414
414
415 # A cache for various files under .hg/ that tracks file changes,
415 # A cache for various files under .hg/ that tracks file changes,
416 # (used by the filecache decorator)
416 # (used by the filecache decorator)
417 #
417 #
418 # Maps a property name to its util.filecacheentry
418 # Maps a property name to its util.filecacheentry
419 self._filecache = {}
419 self._filecache = {}
420
420
421 # hold sets of revision to be filtered
421 # hold sets of revision to be filtered
422 # should be cleared when something might have changed the filter value:
422 # should be cleared when something might have changed the filter value:
423 # - new changesets,
423 # - new changesets,
424 # - phase change,
424 # - phase change,
425 # - new obsolescence marker,
425 # - new obsolescence marker,
426 # - working directory parent change,
426 # - working directory parent change,
427 # - bookmark changes
427 # - bookmark changes
428 self.filteredrevcache = {}
428 self.filteredrevcache = {}
429
429
430 # post-dirstate-status hooks
430 # post-dirstate-status hooks
431 self._postdsstatus = []
431 self._postdsstatus = []
432
432
433 # Cache of types representing filtered repos.
433 # Cache of types representing filtered repos.
434 self._filteredrepotypes = weakref.WeakKeyDictionary()
434 self._filteredrepotypes = weakref.WeakKeyDictionary()
435
435
436 # generic mapping between names and nodes
436 # generic mapping between names and nodes
437 self.names = namespaces.namespaces()
437 self.names = namespaces.namespaces()
438
438
439 # Key to signature value.
439 # Key to signature value.
440 self._sparsesignaturecache = {}
440 self._sparsesignaturecache = {}
441 # Signature to cached matcher instance.
441 # Signature to cached matcher instance.
442 self._sparsematchercache = {}
442 self._sparsematchercache = {}
443
443
444 def close(self):
444 def close(self):
445 self._writecaches()
445 self._writecaches()
446
446
447 def _loadextensions(self):
447 def _loadextensions(self):
448 extensions.loadall(self.ui)
448 extensions.loadall(self.ui)
449
449
450 def _writecaches(self):
450 def _writecaches(self):
451 if self._revbranchcache:
451 if self._revbranchcache:
452 self._revbranchcache.write()
452 self._revbranchcache.write()
453
453
454 def _restrictcapabilities(self, caps):
454 def _restrictcapabilities(self, caps):
455 if self.ui.configbool('experimental', 'bundle2-advertise', True):
455 if self.ui.configbool('experimental', 'bundle2-advertise', True):
456 caps = set(caps)
456 caps = set(caps)
457 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
457 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
458 caps.add('bundle2=' + urlreq.quote(capsblob))
458 caps.add('bundle2=' + urlreq.quote(capsblob))
459 return caps
459 return caps
460
460
461 def _applyopenerreqs(self):
461 def _applyopenerreqs(self):
462 self.svfs.options = dict((r, 1) for r in self.requirements
462 self.svfs.options = dict((r, 1) for r in self.requirements
463 if r in self.openerreqs)
463 if r in self.openerreqs)
464 # experimental config: format.chunkcachesize
464 # experimental config: format.chunkcachesize
465 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
465 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
466 if chunkcachesize is not None:
466 if chunkcachesize is not None:
467 self.svfs.options['chunkcachesize'] = chunkcachesize
467 self.svfs.options['chunkcachesize'] = chunkcachesize
468 # experimental config: format.maxchainlen
468 # experimental config: format.maxchainlen
469 maxchainlen = self.ui.configint('format', 'maxchainlen')
469 maxchainlen = self.ui.configint('format', 'maxchainlen')
470 if maxchainlen is not None:
470 if maxchainlen is not None:
471 self.svfs.options['maxchainlen'] = maxchainlen
471 self.svfs.options['maxchainlen'] = maxchainlen
472 # experimental config: format.manifestcachesize
472 # experimental config: format.manifestcachesize
473 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
473 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
474 if manifestcachesize is not None:
474 if manifestcachesize is not None:
475 self.svfs.options['manifestcachesize'] = manifestcachesize
475 self.svfs.options['manifestcachesize'] = manifestcachesize
476 # experimental config: format.aggressivemergedeltas
476 # experimental config: format.aggressivemergedeltas
477 aggressivemergedeltas = self.ui.configbool('format',
477 aggressivemergedeltas = self.ui.configbool('format',
478 'aggressivemergedeltas')
478 'aggressivemergedeltas')
479 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
479 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
480 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
480 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
481 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
481 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
482 if 0 <= chainspan:
482 if 0 <= chainspan:
483 self.svfs.options['maxdeltachainspan'] = chainspan
483 self.svfs.options['maxdeltachainspan'] = chainspan
484
484
485 for r in self.requirements:
485 for r in self.requirements:
486 if r.startswith('exp-compression-'):
486 if r.startswith('exp-compression-'):
487 self.svfs.options['compengine'] = r[len('exp-compression-'):]
487 self.svfs.options['compengine'] = r[len('exp-compression-'):]
488
488
489 # TODO move "revlogv2" to openerreqs once finalized.
489 # TODO move "revlogv2" to openerreqs once finalized.
490 if REVLOGV2_REQUIREMENT in self.requirements:
490 if REVLOGV2_REQUIREMENT in self.requirements:
491 self.svfs.options['revlogv2'] = True
491 self.svfs.options['revlogv2'] = True
492
492
493 def _writerequirements(self):
493 def _writerequirements(self):
494 scmutil.writerequires(self.vfs, self.requirements)
494 scmutil.writerequires(self.vfs, self.requirements)
495
495
496 def _checknested(self, path):
496 def _checknested(self, path):
497 """Determine if path is a legal nested repository."""
497 """Determine if path is a legal nested repository."""
498 if not path.startswith(self.root):
498 if not path.startswith(self.root):
499 return False
499 return False
500 subpath = path[len(self.root) + 1:]
500 subpath = path[len(self.root) + 1:]
501 normsubpath = util.pconvert(subpath)
501 normsubpath = util.pconvert(subpath)
502
502
503 # XXX: Checking against the current working copy is wrong in
503 # XXX: Checking against the current working copy is wrong in
504 # the sense that it can reject things like
504 # the sense that it can reject things like
505 #
505 #
506 # $ hg cat -r 10 sub/x.txt
506 # $ hg cat -r 10 sub/x.txt
507 #
507 #
508 # if sub/ is no longer a subrepository in the working copy
508 # if sub/ is no longer a subrepository in the working copy
509 # parent revision.
509 # parent revision.
510 #
510 #
511 # However, it can of course also allow things that would have
511 # However, it can of course also allow things that would have
512 # been rejected before, such as the above cat command if sub/
512 # been rejected before, such as the above cat command if sub/
513 # is a subrepository now, but was a normal directory before.
513 # is a subrepository now, but was a normal directory before.
514 # The old path auditor would have rejected by mistake since it
514 # The old path auditor would have rejected by mistake since it
515 # panics when it sees sub/.hg/.
515 # panics when it sees sub/.hg/.
516 #
516 #
517 # All in all, checking against the working copy seems sensible
517 # All in all, checking against the working copy seems sensible
518 # since we want to prevent access to nested repositories on
518 # since we want to prevent access to nested repositories on
519 # the filesystem *now*.
519 # the filesystem *now*.
520 ctx = self[None]
520 ctx = self[None]
521 parts = util.splitpath(subpath)
521 parts = util.splitpath(subpath)
522 while parts:
522 while parts:
523 prefix = '/'.join(parts)
523 prefix = '/'.join(parts)
524 if prefix in ctx.substate:
524 if prefix in ctx.substate:
525 if prefix == normsubpath:
525 if prefix == normsubpath:
526 return True
526 return True
527 else:
527 else:
528 sub = ctx.sub(prefix)
528 sub = ctx.sub(prefix)
529 return sub.checknested(subpath[len(prefix) + 1:])
529 return sub.checknested(subpath[len(prefix) + 1:])
530 else:
530 else:
531 parts.pop()
531 parts.pop()
532 return False
532 return False
533
533
534 def peer(self):
534 def peer(self):
535 return localpeer(self) # not cached to avoid reference cycle
535 return localpeer(self) # not cached to avoid reference cycle
536
536
537 def unfiltered(self):
537 def unfiltered(self):
538 """Return unfiltered version of the repository
538 """Return unfiltered version of the repository
539
539
540 Intended to be overwritten by filtered repo."""
540 Intended to be overwritten by filtered repo."""
541 return self
541 return self
542
542
543 def filtered(self, name):
543 def filtered(self, name):
544 """Return a filtered version of a repository"""
544 """Return a filtered version of a repository"""
545 # Python <3.4 easily leaks types via __mro__. See
545 # Python <3.4 easily leaks types via __mro__. See
546 # https://bugs.python.org/issue17950. We cache dynamically
546 # https://bugs.python.org/issue17950. We cache dynamically
547 # created types so this method doesn't leak on every
547 # created types so this method doesn't leak on every
548 # invocation.
548 # invocation.
549
549
550 key = self.unfiltered().__class__
550 key = self.unfiltered().__class__
551 if key not in self._filteredrepotypes:
551 if key not in self._filteredrepotypes:
552 # Build a new type with the repoview mixin and the base
552 # Build a new type with the repoview mixin and the base
553 # class of this repo. Give it a name containing the
553 # class of this repo. Give it a name containing the
554 # filter name to aid debugging.
554 # filter name to aid debugging.
555 bases = (repoview.repoview, key)
555 bases = (repoview.repoview, key)
556 cls = type('%sfilteredrepo' % name, bases, {})
556 cls = type(r'%sfilteredrepo' % name, bases, {})
557 self._filteredrepotypes[key] = cls
557 self._filteredrepotypes[key] = cls
558
558
559 return self._filteredrepotypes[key](self, name)
559 return self._filteredrepotypes[key](self, name)
560
560
561 @repofilecache('bookmarks', 'bookmarks.current')
561 @repofilecache('bookmarks', 'bookmarks.current')
562 def _bookmarks(self):
562 def _bookmarks(self):
563 return bookmarks.bmstore(self)
563 return bookmarks.bmstore(self)
564
564
565 @property
565 @property
566 def _activebookmark(self):
566 def _activebookmark(self):
567 return self._bookmarks.active
567 return self._bookmarks.active
568
568
569 # _phaserevs and _phasesets depend on changelog. what we need is to
569 # _phaserevs and _phasesets depend on changelog. what we need is to
570 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
570 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
571 # can't be easily expressed in filecache mechanism.
571 # can't be easily expressed in filecache mechanism.
572 @storecache('phaseroots', '00changelog.i')
572 @storecache('phaseroots', '00changelog.i')
573 def _phasecache(self):
573 def _phasecache(self):
574 return phases.phasecache(self, self._phasedefaults)
574 return phases.phasecache(self, self._phasedefaults)
575
575
576 @storecache('obsstore')
576 @storecache('obsstore')
577 def obsstore(self):
577 def obsstore(self):
578 return obsolete.makestore(self.ui, self)
578 return obsolete.makestore(self.ui, self)
579
579
580 @storecache('00changelog.i')
580 @storecache('00changelog.i')
581 def changelog(self):
581 def changelog(self):
582 return changelog.changelog(self.svfs,
582 return changelog.changelog(self.svfs,
583 trypending=txnutil.mayhavepending(self.root))
583 trypending=txnutil.mayhavepending(self.root))
584
584
585 def _constructmanifest(self):
585 def _constructmanifest(self):
586 # This is a temporary function while we migrate from manifest to
586 # This is a temporary function while we migrate from manifest to
587 # manifestlog. It allows bundlerepo and unionrepo to intercept the
587 # manifestlog. It allows bundlerepo and unionrepo to intercept the
588 # manifest creation.
588 # manifest creation.
589 return manifest.manifestrevlog(self.svfs)
589 return manifest.manifestrevlog(self.svfs)
590
590
591 @storecache('00manifest.i')
591 @storecache('00manifest.i')
592 def manifestlog(self):
592 def manifestlog(self):
593 return manifest.manifestlog(self.svfs, self)
593 return manifest.manifestlog(self.svfs, self)
594
594
595 @repofilecache('dirstate')
595 @repofilecache('dirstate')
596 def dirstate(self):
596 def dirstate(self):
597 sparsematchfn = lambda: sparse.matcher(self)
597 sparsematchfn = lambda: sparse.matcher(self)
598
598
599 return dirstate.dirstate(self.vfs, self.ui, self.root,
599 return dirstate.dirstate(self.vfs, self.ui, self.root,
600 self._dirstatevalidate, sparsematchfn)
600 self._dirstatevalidate, sparsematchfn)
601
601
602 def _dirstatevalidate(self, node):
602 def _dirstatevalidate(self, node):
603 try:
603 try:
604 self.changelog.rev(node)
604 self.changelog.rev(node)
605 return node
605 return node
606 except error.LookupError:
606 except error.LookupError:
607 if not self._dirstatevalidatewarned:
607 if not self._dirstatevalidatewarned:
608 self._dirstatevalidatewarned = True
608 self._dirstatevalidatewarned = True
609 self.ui.warn(_("warning: ignoring unknown"
609 self.ui.warn(_("warning: ignoring unknown"
610 " working parent %s!\n") % short(node))
610 " working parent %s!\n") % short(node))
611 return nullid
611 return nullid
612
612
613 def __getitem__(self, changeid):
613 def __getitem__(self, changeid):
614 if changeid is None:
614 if changeid is None:
615 return context.workingctx(self)
615 return context.workingctx(self)
616 if isinstance(changeid, slice):
616 if isinstance(changeid, slice):
617 # wdirrev isn't contiguous so the slice shouldn't include it
617 # wdirrev isn't contiguous so the slice shouldn't include it
618 return [context.changectx(self, i)
618 return [context.changectx(self, i)
619 for i in xrange(*changeid.indices(len(self)))
619 for i in xrange(*changeid.indices(len(self)))
620 if i not in self.changelog.filteredrevs]
620 if i not in self.changelog.filteredrevs]
621 try:
621 try:
622 return context.changectx(self, changeid)
622 return context.changectx(self, changeid)
623 except error.WdirUnsupported:
623 except error.WdirUnsupported:
624 return context.workingctx(self)
624 return context.workingctx(self)
625
625
626 def __contains__(self, changeid):
626 def __contains__(self, changeid):
627 """True if the given changeid exists
627 """True if the given changeid exists
628
628
629 error.LookupError is raised if an ambiguous node specified.
629 error.LookupError is raised if an ambiguous node specified.
630 """
630 """
631 try:
631 try:
632 self[changeid]
632 self[changeid]
633 return True
633 return True
634 except error.RepoLookupError:
634 except error.RepoLookupError:
635 return False
635 return False
636
636
637 def __nonzero__(self):
637 def __nonzero__(self):
638 return True
638 return True
639
639
640 __bool__ = __nonzero__
640 __bool__ = __nonzero__
641
641
642 def __len__(self):
642 def __len__(self):
643 return len(self.changelog)
643 return len(self.changelog)
644
644
645 def __iter__(self):
645 def __iter__(self):
646 return iter(self.changelog)
646 return iter(self.changelog)
647
647
648 def revs(self, expr, *args):
648 def revs(self, expr, *args):
649 '''Find revisions matching a revset.
649 '''Find revisions matching a revset.
650
650
651 The revset is specified as a string ``expr`` that may contain
651 The revset is specified as a string ``expr`` that may contain
652 %-formatting to escape certain types. See ``revsetlang.formatspec``.
652 %-formatting to escape certain types. See ``revsetlang.formatspec``.
653
653
654 Revset aliases from the configuration are not expanded. To expand
654 Revset aliases from the configuration are not expanded. To expand
655 user aliases, consider calling ``scmutil.revrange()`` or
655 user aliases, consider calling ``scmutil.revrange()`` or
656 ``repo.anyrevs([expr], user=True)``.
656 ``repo.anyrevs([expr], user=True)``.
657
657
658 Returns a revset.abstractsmartset, which is a list-like interface
658 Returns a revset.abstractsmartset, which is a list-like interface
659 that contains integer revisions.
659 that contains integer revisions.
660 '''
660 '''
661 expr = revsetlang.formatspec(expr, *args)
661 expr = revsetlang.formatspec(expr, *args)
662 m = revset.match(None, expr)
662 m = revset.match(None, expr)
663 return m(self)
663 return m(self)
664
664
665 def set(self, expr, *args):
665 def set(self, expr, *args):
666 '''Find revisions matching a revset and emit changectx instances.
666 '''Find revisions matching a revset and emit changectx instances.
667
667
668 This is a convenience wrapper around ``revs()`` that iterates the
668 This is a convenience wrapper around ``revs()`` that iterates the
669 result and is a generator of changectx instances.
669 result and is a generator of changectx instances.
670
670
671 Revset aliases from the configuration are not expanded. To expand
671 Revset aliases from the configuration are not expanded. To expand
672 user aliases, consider calling ``scmutil.revrange()``.
672 user aliases, consider calling ``scmutil.revrange()``.
673 '''
673 '''
674 for r in self.revs(expr, *args):
674 for r in self.revs(expr, *args):
675 yield self[r]
675 yield self[r]
676
676
677 def anyrevs(self, specs, user=False, localalias=None):
677 def anyrevs(self, specs, user=False, localalias=None):
678 '''Find revisions matching one of the given revsets.
678 '''Find revisions matching one of the given revsets.
679
679
680 Revset aliases from the configuration are not expanded by default. To
680 Revset aliases from the configuration are not expanded by default. To
681 expand user aliases, specify ``user=True``. To provide some local
681 expand user aliases, specify ``user=True``. To provide some local
682 definitions overriding user aliases, set ``localalias`` to
682 definitions overriding user aliases, set ``localalias`` to
683 ``{name: definitionstring}``.
683 ``{name: definitionstring}``.
684 '''
684 '''
685 if user:
685 if user:
686 m = revset.matchany(self.ui, specs, repo=self,
686 m = revset.matchany(self.ui, specs, repo=self,
687 localalias=localalias)
687 localalias=localalias)
688 else:
688 else:
689 m = revset.matchany(None, specs, localalias=localalias)
689 m = revset.matchany(None, specs, localalias=localalias)
690 return m(self)
690 return m(self)
691
691
692 def url(self):
692 def url(self):
693 return 'file:' + self.root
693 return 'file:' + self.root
694
694
695 def hook(self, name, throw=False, **args):
695 def hook(self, name, throw=False, **args):
696 """Call a hook, passing this repo instance.
696 """Call a hook, passing this repo instance.
697
697
698 This a convenience method to aid invoking hooks. Extensions likely
698 This a convenience method to aid invoking hooks. Extensions likely
699 won't call this unless they have registered a custom hook or are
699 won't call this unless they have registered a custom hook or are
700 replacing code that is expected to call a hook.
700 replacing code that is expected to call a hook.
701 """
701 """
702 return hook.hook(self.ui, self, name, throw, **args)
702 return hook.hook(self.ui, self, name, throw, **args)
703
703
704 @filteredpropertycache
704 @filteredpropertycache
705 def _tagscache(self):
705 def _tagscache(self):
706 '''Returns a tagscache object that contains various tags related
706 '''Returns a tagscache object that contains various tags related
707 caches.'''
707 caches.'''
708
708
709 # This simplifies its cache management by having one decorated
709 # This simplifies its cache management by having one decorated
710 # function (this one) and the rest simply fetch things from it.
710 # function (this one) and the rest simply fetch things from it.
711 class tagscache(object):
711 class tagscache(object):
712 def __init__(self):
712 def __init__(self):
713 # These two define the set of tags for this repository. tags
713 # These two define the set of tags for this repository. tags
714 # maps tag name to node; tagtypes maps tag name to 'global' or
714 # maps tag name to node; tagtypes maps tag name to 'global' or
715 # 'local'. (Global tags are defined by .hgtags across all
715 # 'local'. (Global tags are defined by .hgtags across all
716 # heads, and local tags are defined in .hg/localtags.)
716 # heads, and local tags are defined in .hg/localtags.)
717 # They constitute the in-memory cache of tags.
717 # They constitute the in-memory cache of tags.
718 self.tags = self.tagtypes = None
718 self.tags = self.tagtypes = None
719
719
720 self.nodetagscache = self.tagslist = None
720 self.nodetagscache = self.tagslist = None
721
721
722 cache = tagscache()
722 cache = tagscache()
723 cache.tags, cache.tagtypes = self._findtags()
723 cache.tags, cache.tagtypes = self._findtags()
724
724
725 return cache
725 return cache
726
726
727 def tags(self):
727 def tags(self):
728 '''return a mapping of tag to node'''
728 '''return a mapping of tag to node'''
729 t = {}
729 t = {}
730 if self.changelog.filteredrevs:
730 if self.changelog.filteredrevs:
731 tags, tt = self._findtags()
731 tags, tt = self._findtags()
732 else:
732 else:
733 tags = self._tagscache.tags
733 tags = self._tagscache.tags
734 for k, v in tags.iteritems():
734 for k, v in tags.iteritems():
735 try:
735 try:
736 # ignore tags to unknown nodes
736 # ignore tags to unknown nodes
737 self.changelog.rev(v)
737 self.changelog.rev(v)
738 t[k] = v
738 t[k] = v
739 except (error.LookupError, ValueError):
739 except (error.LookupError, ValueError):
740 pass
740 pass
741 return t
741 return t
742
742
743 def _findtags(self):
743 def _findtags(self):
744 '''Do the hard work of finding tags. Return a pair of dicts
744 '''Do the hard work of finding tags. Return a pair of dicts
745 (tags, tagtypes) where tags maps tag name to node, and tagtypes
745 (tags, tagtypes) where tags maps tag name to node, and tagtypes
746 maps tag name to a string like \'global\' or \'local\'.
746 maps tag name to a string like \'global\' or \'local\'.
747 Subclasses or extensions are free to add their own tags, but
747 Subclasses or extensions are free to add their own tags, but
748 should be aware that the returned dicts will be retained for the
748 should be aware that the returned dicts will be retained for the
749 duration of the localrepo object.'''
749 duration of the localrepo object.'''
750
750
751 # XXX what tagtype should subclasses/extensions use? Currently
751 # XXX what tagtype should subclasses/extensions use? Currently
752 # mq and bookmarks add tags, but do not set the tagtype at all.
752 # mq and bookmarks add tags, but do not set the tagtype at all.
753 # Should each extension invent its own tag type? Should there
753 # Should each extension invent its own tag type? Should there
754 # be one tagtype for all such "virtual" tags? Or is the status
754 # be one tagtype for all such "virtual" tags? Or is the status
755 # quo fine?
755 # quo fine?
756
756
757
757
758 # map tag name to (node, hist)
758 # map tag name to (node, hist)
759 alltags = tagsmod.findglobaltags(self.ui, self)
759 alltags = tagsmod.findglobaltags(self.ui, self)
760 # map tag name to tag type
760 # map tag name to tag type
761 tagtypes = dict((tag, 'global') for tag in alltags)
761 tagtypes = dict((tag, 'global') for tag in alltags)
762
762
763 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
763 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
764
764
765 # Build the return dicts. Have to re-encode tag names because
765 # Build the return dicts. Have to re-encode tag names because
766 # the tags module always uses UTF-8 (in order not to lose info
766 # the tags module always uses UTF-8 (in order not to lose info
767 # writing to the cache), but the rest of Mercurial wants them in
767 # writing to the cache), but the rest of Mercurial wants them in
768 # local encoding.
768 # local encoding.
769 tags = {}
769 tags = {}
770 for (name, (node, hist)) in alltags.iteritems():
770 for (name, (node, hist)) in alltags.iteritems():
771 if node != nullid:
771 if node != nullid:
772 tags[encoding.tolocal(name)] = node
772 tags[encoding.tolocal(name)] = node
773 tags['tip'] = self.changelog.tip()
773 tags['tip'] = self.changelog.tip()
774 tagtypes = dict([(encoding.tolocal(name), value)
774 tagtypes = dict([(encoding.tolocal(name), value)
775 for (name, value) in tagtypes.iteritems()])
775 for (name, value) in tagtypes.iteritems()])
776 return (tags, tagtypes)
776 return (tags, tagtypes)
777
777
778 def tagtype(self, tagname):
778 def tagtype(self, tagname):
779 '''
779 '''
780 return the type of the given tag. result can be:
780 return the type of the given tag. result can be:
781
781
782 'local' : a local tag
782 'local' : a local tag
783 'global' : a global tag
783 'global' : a global tag
784 None : tag does not exist
784 None : tag does not exist
785 '''
785 '''
786
786
787 return self._tagscache.tagtypes.get(tagname)
787 return self._tagscache.tagtypes.get(tagname)
788
788
789 def tagslist(self):
789 def tagslist(self):
790 '''return a list of tags ordered by revision'''
790 '''return a list of tags ordered by revision'''
791 if not self._tagscache.tagslist:
791 if not self._tagscache.tagslist:
792 l = []
792 l = []
793 for t, n in self.tags().iteritems():
793 for t, n in self.tags().iteritems():
794 l.append((self.changelog.rev(n), t, n))
794 l.append((self.changelog.rev(n), t, n))
795 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
795 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
796
796
797 return self._tagscache.tagslist
797 return self._tagscache.tagslist
798
798
799 def nodetags(self, node):
799 def nodetags(self, node):
800 '''return the tags associated with a node'''
800 '''return the tags associated with a node'''
801 if not self._tagscache.nodetagscache:
801 if not self._tagscache.nodetagscache:
802 nodetagscache = {}
802 nodetagscache = {}
803 for t, n in self._tagscache.tags.iteritems():
803 for t, n in self._tagscache.tags.iteritems():
804 nodetagscache.setdefault(n, []).append(t)
804 nodetagscache.setdefault(n, []).append(t)
805 for tags in nodetagscache.itervalues():
805 for tags in nodetagscache.itervalues():
806 tags.sort()
806 tags.sort()
807 self._tagscache.nodetagscache = nodetagscache
807 self._tagscache.nodetagscache = nodetagscache
808 return self._tagscache.nodetagscache.get(node, [])
808 return self._tagscache.nodetagscache.get(node, [])
809
809
810 def nodebookmarks(self, node):
810 def nodebookmarks(self, node):
811 """return the list of bookmarks pointing to the specified node"""
811 """return the list of bookmarks pointing to the specified node"""
812 marks = []
812 marks = []
813 for bookmark, n in self._bookmarks.iteritems():
813 for bookmark, n in self._bookmarks.iteritems():
814 if n == node:
814 if n == node:
815 marks.append(bookmark)
815 marks.append(bookmark)
816 return sorted(marks)
816 return sorted(marks)
817
817
818 def branchmap(self):
818 def branchmap(self):
819 '''returns a dictionary {branch: [branchheads]} with branchheads
819 '''returns a dictionary {branch: [branchheads]} with branchheads
820 ordered by increasing revision number'''
820 ordered by increasing revision number'''
821 branchmap.updatecache(self)
821 branchmap.updatecache(self)
822 return self._branchcaches[self.filtername]
822 return self._branchcaches[self.filtername]
823
823
824 @unfilteredmethod
824 @unfilteredmethod
825 def revbranchcache(self):
825 def revbranchcache(self):
826 if not self._revbranchcache:
826 if not self._revbranchcache:
827 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
827 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
828 return self._revbranchcache
828 return self._revbranchcache
829
829
830 def branchtip(self, branch, ignoremissing=False):
830 def branchtip(self, branch, ignoremissing=False):
831 '''return the tip node for a given branch
831 '''return the tip node for a given branch
832
832
833 If ignoremissing is True, then this method will not raise an error.
833 If ignoremissing is True, then this method will not raise an error.
834 This is helpful for callers that only expect None for a missing branch
834 This is helpful for callers that only expect None for a missing branch
835 (e.g. namespace).
835 (e.g. namespace).
836
836
837 '''
837 '''
838 try:
838 try:
839 return self.branchmap().branchtip(branch)
839 return self.branchmap().branchtip(branch)
840 except KeyError:
840 except KeyError:
841 if not ignoremissing:
841 if not ignoremissing:
842 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
842 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
843 else:
843 else:
844 pass
844 pass
845
845
846 def lookup(self, key):
846 def lookup(self, key):
847 return self[key].node()
847 return self[key].node()
848
848
849 def lookupbranch(self, key, remote=None):
849 def lookupbranch(self, key, remote=None):
850 repo = remote or self
850 repo = remote or self
851 if key in repo.branchmap():
851 if key in repo.branchmap():
852 return key
852 return key
853
853
854 repo = (remote and remote.local()) and remote or self
854 repo = (remote and remote.local()) and remote or self
855 return repo[key].branch()
855 return repo[key].branch()
856
856
857 def known(self, nodes):
857 def known(self, nodes):
858 cl = self.changelog
858 cl = self.changelog
859 nm = cl.nodemap
859 nm = cl.nodemap
860 filtered = cl.filteredrevs
860 filtered = cl.filteredrevs
861 result = []
861 result = []
862 for n in nodes:
862 for n in nodes:
863 r = nm.get(n)
863 r = nm.get(n)
864 resp = not (r is None or r in filtered)
864 resp = not (r is None or r in filtered)
865 result.append(resp)
865 result.append(resp)
866 return result
866 return result
867
867
868 def local(self):
868 def local(self):
869 return self
869 return self
870
870
871 def publishing(self):
871 def publishing(self):
872 # it's safe (and desirable) to trust the publish flag unconditionally
872 # it's safe (and desirable) to trust the publish flag unconditionally
873 # so that we don't finalize changes shared between users via ssh or nfs
873 # so that we don't finalize changes shared between users via ssh or nfs
874 return self.ui.configbool('phases', 'publish', True, untrusted=True)
874 return self.ui.configbool('phases', 'publish', True, untrusted=True)
875
875
876 def cancopy(self):
876 def cancopy(self):
877 # so statichttprepo's override of local() works
877 # so statichttprepo's override of local() works
878 if not self.local():
878 if not self.local():
879 return False
879 return False
880 if not self.publishing():
880 if not self.publishing():
881 return True
881 return True
882 # if publishing we can't copy if there is filtered content
882 # if publishing we can't copy if there is filtered content
883 return not self.filtered('visible').changelog.filteredrevs
883 return not self.filtered('visible').changelog.filteredrevs
884
884
885 def shared(self):
885 def shared(self):
886 '''the type of shared repository (None if not shared)'''
886 '''the type of shared repository (None if not shared)'''
887 if self.sharedpath != self.path:
887 if self.sharedpath != self.path:
888 return 'store'
888 return 'store'
889 return None
889 return None
890
890
891 def wjoin(self, f, *insidef):
891 def wjoin(self, f, *insidef):
892 return self.vfs.reljoin(self.root, f, *insidef)
892 return self.vfs.reljoin(self.root, f, *insidef)
893
893
894 def file(self, f):
894 def file(self, f):
895 if f[0] == '/':
895 if f[0] == '/':
896 f = f[1:]
896 f = f[1:]
897 return filelog.filelog(self.svfs, f)
897 return filelog.filelog(self.svfs, f)
898
898
899 def changectx(self, changeid):
899 def changectx(self, changeid):
900 return self[changeid]
900 return self[changeid]
901
901
902 def setparents(self, p1, p2=nullid):
902 def setparents(self, p1, p2=nullid):
903 with self.dirstate.parentchange():
903 with self.dirstate.parentchange():
904 copies = self.dirstate.setparents(p1, p2)
904 copies = self.dirstate.setparents(p1, p2)
905 pctx = self[p1]
905 pctx = self[p1]
906 if copies:
906 if copies:
907 # Adjust copy records, the dirstate cannot do it, it
907 # Adjust copy records, the dirstate cannot do it, it
908 # requires access to parents manifests. Preserve them
908 # requires access to parents manifests. Preserve them
909 # only for entries added to first parent.
909 # only for entries added to first parent.
910 for f in copies:
910 for f in copies:
911 if f not in pctx and copies[f] in pctx:
911 if f not in pctx and copies[f] in pctx:
912 self.dirstate.copy(copies[f], f)
912 self.dirstate.copy(copies[f], f)
913 if p2 == nullid:
913 if p2 == nullid:
914 for f, s in sorted(self.dirstate.copies().items()):
914 for f, s in sorted(self.dirstate.copies().items()):
915 if f not in pctx and s not in pctx:
915 if f not in pctx and s not in pctx:
916 self.dirstate.copy(None, f)
916 self.dirstate.copy(None, f)
917
917
918 def filectx(self, path, changeid=None, fileid=None):
918 def filectx(self, path, changeid=None, fileid=None):
919 """changeid can be a changeset revision, node, or tag.
919 """changeid can be a changeset revision, node, or tag.
920 fileid can be a file revision or node."""
920 fileid can be a file revision or node."""
921 return context.filectx(self, path, changeid, fileid)
921 return context.filectx(self, path, changeid, fileid)
922
922
923 def getcwd(self):
923 def getcwd(self):
924 return self.dirstate.getcwd()
924 return self.dirstate.getcwd()
925
925
926 def pathto(self, f, cwd=None):
926 def pathto(self, f, cwd=None):
927 return self.dirstate.pathto(f, cwd)
927 return self.dirstate.pathto(f, cwd)
928
928
929 def _loadfilter(self, filter):
929 def _loadfilter(self, filter):
930 if filter not in self.filterpats:
930 if filter not in self.filterpats:
931 l = []
931 l = []
932 for pat, cmd in self.ui.configitems(filter):
932 for pat, cmd in self.ui.configitems(filter):
933 if cmd == '!':
933 if cmd == '!':
934 continue
934 continue
935 mf = matchmod.match(self.root, '', [pat])
935 mf = matchmod.match(self.root, '', [pat])
936 fn = None
936 fn = None
937 params = cmd
937 params = cmd
938 for name, filterfn in self._datafilters.iteritems():
938 for name, filterfn in self._datafilters.iteritems():
939 if cmd.startswith(name):
939 if cmd.startswith(name):
940 fn = filterfn
940 fn = filterfn
941 params = cmd[len(name):].lstrip()
941 params = cmd[len(name):].lstrip()
942 break
942 break
943 if not fn:
943 if not fn:
944 fn = lambda s, c, **kwargs: util.filter(s, c)
944 fn = lambda s, c, **kwargs: util.filter(s, c)
945 # Wrap old filters not supporting keyword arguments
945 # Wrap old filters not supporting keyword arguments
946 if not inspect.getargspec(fn)[2]:
946 if not inspect.getargspec(fn)[2]:
947 oldfn = fn
947 oldfn = fn
948 fn = lambda s, c, **kwargs: oldfn(s, c)
948 fn = lambda s, c, **kwargs: oldfn(s, c)
949 l.append((mf, fn, params))
949 l.append((mf, fn, params))
950 self.filterpats[filter] = l
950 self.filterpats[filter] = l
951 return self.filterpats[filter]
951 return self.filterpats[filter]
952
952
953 def _filter(self, filterpats, filename, data):
953 def _filter(self, filterpats, filename, data):
954 for mf, fn, cmd in filterpats:
954 for mf, fn, cmd in filterpats:
955 if mf(filename):
955 if mf(filename):
956 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
956 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
957 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
957 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
958 break
958 break
959
959
960 return data
960 return data
961
961
962 @unfilteredpropertycache
962 @unfilteredpropertycache
963 def _encodefilterpats(self):
963 def _encodefilterpats(self):
964 return self._loadfilter('encode')
964 return self._loadfilter('encode')
965
965
966 @unfilteredpropertycache
966 @unfilteredpropertycache
967 def _decodefilterpats(self):
967 def _decodefilterpats(self):
968 return self._loadfilter('decode')
968 return self._loadfilter('decode')
969
969
970 def adddatafilter(self, name, filter):
970 def adddatafilter(self, name, filter):
971 self._datafilters[name] = filter
971 self._datafilters[name] = filter
972
972
973 def wread(self, filename):
973 def wread(self, filename):
974 if self.wvfs.islink(filename):
974 if self.wvfs.islink(filename):
975 data = self.wvfs.readlink(filename)
975 data = self.wvfs.readlink(filename)
976 else:
976 else:
977 data = self.wvfs.read(filename)
977 data = self.wvfs.read(filename)
978 return self._filter(self._encodefilterpats, filename, data)
978 return self._filter(self._encodefilterpats, filename, data)
979
979
980 def wwrite(self, filename, data, flags, backgroundclose=False):
980 def wwrite(self, filename, data, flags, backgroundclose=False):
981 """write ``data`` into ``filename`` in the working directory
981 """write ``data`` into ``filename`` in the working directory
982
982
983 This returns length of written (maybe decoded) data.
983 This returns length of written (maybe decoded) data.
984 """
984 """
985 data = self._filter(self._decodefilterpats, filename, data)
985 data = self._filter(self._decodefilterpats, filename, data)
986 if 'l' in flags:
986 if 'l' in flags:
987 self.wvfs.symlink(data, filename)
987 self.wvfs.symlink(data, filename)
988 else:
988 else:
989 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
989 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
990 if 'x' in flags:
990 if 'x' in flags:
991 self.wvfs.setflags(filename, False, True)
991 self.wvfs.setflags(filename, False, True)
992 return len(data)
992 return len(data)
993
993
994 def wwritedata(self, filename, data):
994 def wwritedata(self, filename, data):
995 return self._filter(self._decodefilterpats, filename, data)
995 return self._filter(self._decodefilterpats, filename, data)
996
996
997 def currenttransaction(self):
997 def currenttransaction(self):
998 """return the current transaction or None if non exists"""
998 """return the current transaction or None if non exists"""
999 if self._transref:
999 if self._transref:
1000 tr = self._transref()
1000 tr = self._transref()
1001 else:
1001 else:
1002 tr = None
1002 tr = None
1003
1003
1004 if tr and tr.running():
1004 if tr and tr.running():
1005 return tr
1005 return tr
1006 return None
1006 return None
1007
1007
1008 def transaction(self, desc, report=None):
1008 def transaction(self, desc, report=None):
1009 if (self.ui.configbool('devel', 'all-warnings')
1009 if (self.ui.configbool('devel', 'all-warnings')
1010 or self.ui.configbool('devel', 'check-locks')):
1010 or self.ui.configbool('devel', 'check-locks')):
1011 if self._currentlock(self._lockref) is None:
1011 if self._currentlock(self._lockref) is None:
1012 raise error.ProgrammingError('transaction requires locking')
1012 raise error.ProgrammingError('transaction requires locking')
1013 tr = self.currenttransaction()
1013 tr = self.currenttransaction()
1014 if tr is not None:
1014 if tr is not None:
1015 return tr.nest()
1015 return tr.nest()
1016
1016
1017 # abort here if the journal already exists
1017 # abort here if the journal already exists
1018 if self.svfs.exists("journal"):
1018 if self.svfs.exists("journal"):
1019 raise error.RepoError(
1019 raise error.RepoError(
1020 _("abandoned transaction found"),
1020 _("abandoned transaction found"),
1021 hint=_("run 'hg recover' to clean up transaction"))
1021 hint=_("run 'hg recover' to clean up transaction"))
1022
1022
1023 idbase = "%.40f#%f" % (random.random(), time.time())
1023 idbase = "%.40f#%f" % (random.random(), time.time())
1024 ha = hex(hashlib.sha1(idbase).digest())
1024 ha = hex(hashlib.sha1(idbase).digest())
1025 txnid = 'TXN:' + ha
1025 txnid = 'TXN:' + ha
1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1027
1027
1028 self._writejournal(desc)
1028 self._writejournal(desc)
1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1030 if report:
1030 if report:
1031 rp = report
1031 rp = report
1032 else:
1032 else:
1033 rp = self.ui.warn
1033 rp = self.ui.warn
1034 vfsmap = {'plain': self.vfs} # root of .hg/
1034 vfsmap = {'plain': self.vfs} # root of .hg/
1035 # we must avoid cyclic reference between repo and transaction.
1035 # we must avoid cyclic reference between repo and transaction.
1036 reporef = weakref.ref(self)
1036 reporef = weakref.ref(self)
1037 # Code to track tag movement
1037 # Code to track tag movement
1038 #
1038 #
1039 # Since tags are all handled as file content, it is actually quite hard
1039 # Since tags are all handled as file content, it is actually quite hard
1040 # to track these movement from a code perspective. So we fallback to a
1040 # to track these movement from a code perspective. So we fallback to a
1041 # tracking at the repository level. One could envision to track changes
1041 # tracking at the repository level. One could envision to track changes
1042 # to the '.hgtags' file through changegroup apply but that fails to
1042 # to the '.hgtags' file through changegroup apply but that fails to
1043 # cope with case where transaction expose new heads without changegroup
1043 # cope with case where transaction expose new heads without changegroup
1044 # being involved (eg: phase movement).
1044 # being involved (eg: phase movement).
1045 #
1045 #
1046 # For now, We gate the feature behind a flag since this likely comes
1046 # For now, We gate the feature behind a flag since this likely comes
1047 # with performance impacts. The current code run more often than needed
1047 # with performance impacts. The current code run more often than needed
1048 # and do not use caches as much as it could. The current focus is on
1048 # and do not use caches as much as it could. The current focus is on
1049 # the behavior of the feature so we disable it by default. The flag
1049 # the behavior of the feature so we disable it by default. The flag
1050 # will be removed when we are happy with the performance impact.
1050 # will be removed when we are happy with the performance impact.
1051 #
1051 #
1052 # Once this feature is no longer experimental move the following
1052 # Once this feature is no longer experimental move the following
1053 # documentation to the appropriate help section:
1053 # documentation to the appropriate help section:
1054 #
1054 #
1055 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1055 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1056 # tags (new or changed or deleted tags). In addition the details of
1056 # tags (new or changed or deleted tags). In addition the details of
1057 # these changes are made available in a file at:
1057 # these changes are made available in a file at:
1058 # ``REPOROOT/.hg/changes/tags.changes``.
1058 # ``REPOROOT/.hg/changes/tags.changes``.
1059 # Make sure you check for HG_TAG_MOVED before reading that file as it
1059 # Make sure you check for HG_TAG_MOVED before reading that file as it
1060 # might exist from a previous transaction even if no tag were touched
1060 # might exist from a previous transaction even if no tag were touched
1061 # in this one. Changes are recorded in a line base format::
1061 # in this one. Changes are recorded in a line base format::
1062 #
1062 #
1063 # <action> <hex-node> <tag-name>\n
1063 # <action> <hex-node> <tag-name>\n
1064 #
1064 #
1065 # Actions are defined as follow:
1065 # Actions are defined as follow:
1066 # "-R": tag is removed,
1066 # "-R": tag is removed,
1067 # "+A": tag is added,
1067 # "+A": tag is added,
1068 # "-M": tag is moved (old value),
1068 # "-M": tag is moved (old value),
1069 # "+M": tag is moved (new value),
1069 # "+M": tag is moved (new value),
1070 tracktags = lambda x: None
1070 tracktags = lambda x: None
1071 # experimental config: experimental.hook-track-tags
1071 # experimental config: experimental.hook-track-tags
1072 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1072 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1073 False)
1073 False)
1074 if desc != 'strip' and shouldtracktags:
1074 if desc != 'strip' and shouldtracktags:
1075 oldheads = self.changelog.headrevs()
1075 oldheads = self.changelog.headrevs()
1076 def tracktags(tr2):
1076 def tracktags(tr2):
1077 repo = reporef()
1077 repo = reporef()
1078 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1078 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1079 newheads = repo.changelog.headrevs()
1079 newheads = repo.changelog.headrevs()
1080 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1080 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1081 # notes: we compare lists here.
1081 # notes: we compare lists here.
1082 # As we do it only once buiding set would not be cheaper
1082 # As we do it only once buiding set would not be cheaper
1083 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1083 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1084 if changes:
1084 if changes:
1085 tr2.hookargs['tag_moved'] = '1'
1085 tr2.hookargs['tag_moved'] = '1'
1086 with repo.vfs('changes/tags.changes', 'w',
1086 with repo.vfs('changes/tags.changes', 'w',
1087 atomictemp=True) as changesfile:
1087 atomictemp=True) as changesfile:
1088 # note: we do not register the file to the transaction
1088 # note: we do not register the file to the transaction
1089 # because we needs it to still exist on the transaction
1089 # because we needs it to still exist on the transaction
1090 # is close (for txnclose hooks)
1090 # is close (for txnclose hooks)
1091 tagsmod.writediff(changesfile, changes)
1091 tagsmod.writediff(changesfile, changes)
1092 def validate(tr2):
1092 def validate(tr2):
1093 """will run pre-closing hooks"""
1093 """will run pre-closing hooks"""
1094 # XXX the transaction API is a bit lacking here so we take a hacky
1094 # XXX the transaction API is a bit lacking here so we take a hacky
1095 # path for now
1095 # path for now
1096 #
1096 #
1097 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1097 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1098 # dict is copied before these run. In addition we needs the data
1098 # dict is copied before these run. In addition we needs the data
1099 # available to in memory hooks too.
1099 # available to in memory hooks too.
1100 #
1100 #
1101 # Moreover, we also need to make sure this runs before txnclose
1101 # Moreover, we also need to make sure this runs before txnclose
1102 # hooks and there is no "pending" mechanism that would execute
1102 # hooks and there is no "pending" mechanism that would execute
1103 # logic only if hooks are about to run.
1103 # logic only if hooks are about to run.
1104 #
1104 #
1105 # Fixing this limitation of the transaction is also needed to track
1105 # Fixing this limitation of the transaction is also needed to track
1106 # other families of changes (bookmarks, phases, obsolescence).
1106 # other families of changes (bookmarks, phases, obsolescence).
1107 #
1107 #
1108 # This will have to be fixed before we remove the experimental
1108 # This will have to be fixed before we remove the experimental
1109 # gating.
1109 # gating.
1110 tracktags(tr2)
1110 tracktags(tr2)
1111 reporef().hook('pretxnclose', throw=True,
1111 reporef().hook('pretxnclose', throw=True,
1112 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1112 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1113 def releasefn(tr, success):
1113 def releasefn(tr, success):
1114 repo = reporef()
1114 repo = reporef()
1115 if success:
1115 if success:
1116 # this should be explicitly invoked here, because
1116 # this should be explicitly invoked here, because
1117 # in-memory changes aren't written out at closing
1117 # in-memory changes aren't written out at closing
1118 # transaction, if tr.addfilegenerator (via
1118 # transaction, if tr.addfilegenerator (via
1119 # dirstate.write or so) isn't invoked while
1119 # dirstate.write or so) isn't invoked while
1120 # transaction running
1120 # transaction running
1121 repo.dirstate.write(None)
1121 repo.dirstate.write(None)
1122 else:
1122 else:
1123 # discard all changes (including ones already written
1123 # discard all changes (including ones already written
1124 # out) in this transaction
1124 # out) in this transaction
1125 repo.dirstate.restorebackup(None, prefix='journal.')
1125 repo.dirstate.restorebackup(None, prefix='journal.')
1126
1126
1127 repo.invalidate(clearfilecache=True)
1127 repo.invalidate(clearfilecache=True)
1128
1128
1129 tr = transaction.transaction(rp, self.svfs, vfsmap,
1129 tr = transaction.transaction(rp, self.svfs, vfsmap,
1130 "journal",
1130 "journal",
1131 "undo",
1131 "undo",
1132 aftertrans(renames),
1132 aftertrans(renames),
1133 self.store.createmode,
1133 self.store.createmode,
1134 validator=validate,
1134 validator=validate,
1135 releasefn=releasefn,
1135 releasefn=releasefn,
1136 checkambigfiles=_cachedfiles)
1136 checkambigfiles=_cachedfiles)
1137 tr.changes['revs'] = set()
1137 tr.changes['revs'] = set()
1138 tr.changes['obsmarkers'] = set()
1138 tr.changes['obsmarkers'] = set()
1139
1139
1140 tr.hookargs['txnid'] = txnid
1140 tr.hookargs['txnid'] = txnid
1141 # note: writing the fncache only during finalize mean that the file is
1141 # note: writing the fncache only during finalize mean that the file is
1142 # outdated when running hooks. As fncache is used for streaming clone,
1142 # outdated when running hooks. As fncache is used for streaming clone,
1143 # this is not expected to break anything that happen during the hooks.
1143 # this is not expected to break anything that happen during the hooks.
1144 tr.addfinalize('flush-fncache', self.store.write)
1144 tr.addfinalize('flush-fncache', self.store.write)
1145 def txnclosehook(tr2):
1145 def txnclosehook(tr2):
1146 """To be run if transaction is successful, will schedule a hook run
1146 """To be run if transaction is successful, will schedule a hook run
1147 """
1147 """
1148 # Don't reference tr2 in hook() so we don't hold a reference.
1148 # Don't reference tr2 in hook() so we don't hold a reference.
1149 # This reduces memory consumption when there are multiple
1149 # This reduces memory consumption when there are multiple
1150 # transactions per lock. This can likely go away if issue5045
1150 # transactions per lock. This can likely go away if issue5045
1151 # fixes the function accumulation.
1151 # fixes the function accumulation.
1152 hookargs = tr2.hookargs
1152 hookargs = tr2.hookargs
1153
1153
1154 def hook():
1154 def hook():
1155 reporef().hook('txnclose', throw=False, txnname=desc,
1155 reporef().hook('txnclose', throw=False, txnname=desc,
1156 **pycompat.strkwargs(hookargs))
1156 **pycompat.strkwargs(hookargs))
1157 reporef()._afterlock(hook)
1157 reporef()._afterlock(hook)
1158 tr.addfinalize('txnclose-hook', txnclosehook)
1158 tr.addfinalize('txnclose-hook', txnclosehook)
1159 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1159 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1160 def txnaborthook(tr2):
1160 def txnaborthook(tr2):
1161 """To be run if transaction is aborted
1161 """To be run if transaction is aborted
1162 """
1162 """
1163 reporef().hook('txnabort', throw=False, txnname=desc,
1163 reporef().hook('txnabort', throw=False, txnname=desc,
1164 **tr2.hookargs)
1164 **tr2.hookargs)
1165 tr.addabort('txnabort-hook', txnaborthook)
1165 tr.addabort('txnabort-hook', txnaborthook)
1166 # avoid eager cache invalidation. in-memory data should be identical
1166 # avoid eager cache invalidation. in-memory data should be identical
1167 # to stored data if transaction has no error.
1167 # to stored data if transaction has no error.
1168 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1168 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1169 self._transref = weakref.ref(tr)
1169 self._transref = weakref.ref(tr)
1170 return tr
1170 return tr
1171
1171
1172 def _journalfiles(self):
1172 def _journalfiles(self):
1173 return ((self.svfs, 'journal'),
1173 return ((self.svfs, 'journal'),
1174 (self.vfs, 'journal.dirstate'),
1174 (self.vfs, 'journal.dirstate'),
1175 (self.vfs, 'journal.branch'),
1175 (self.vfs, 'journal.branch'),
1176 (self.vfs, 'journal.desc'),
1176 (self.vfs, 'journal.desc'),
1177 (self.vfs, 'journal.bookmarks'),
1177 (self.vfs, 'journal.bookmarks'),
1178 (self.svfs, 'journal.phaseroots'))
1178 (self.svfs, 'journal.phaseroots'))
1179
1179
1180 def undofiles(self):
1180 def undofiles(self):
1181 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1181 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1182
1182
1183 @unfilteredmethod
1183 @unfilteredmethod
1184 def _writejournal(self, desc):
1184 def _writejournal(self, desc):
1185 self.dirstate.savebackup(None, prefix='journal.')
1185 self.dirstate.savebackup(None, prefix='journal.')
1186 self.vfs.write("journal.branch",
1186 self.vfs.write("journal.branch",
1187 encoding.fromlocal(self.dirstate.branch()))
1187 encoding.fromlocal(self.dirstate.branch()))
1188 self.vfs.write("journal.desc",
1188 self.vfs.write("journal.desc",
1189 "%d\n%s\n" % (len(self), desc))
1189 "%d\n%s\n" % (len(self), desc))
1190 self.vfs.write("journal.bookmarks",
1190 self.vfs.write("journal.bookmarks",
1191 self.vfs.tryread("bookmarks"))
1191 self.vfs.tryread("bookmarks"))
1192 self.svfs.write("journal.phaseroots",
1192 self.svfs.write("journal.phaseroots",
1193 self.svfs.tryread("phaseroots"))
1193 self.svfs.tryread("phaseroots"))
1194
1194
1195 def recover(self):
1195 def recover(self):
1196 with self.lock():
1196 with self.lock():
1197 if self.svfs.exists("journal"):
1197 if self.svfs.exists("journal"):
1198 self.ui.status(_("rolling back interrupted transaction\n"))
1198 self.ui.status(_("rolling back interrupted transaction\n"))
1199 vfsmap = {'': self.svfs,
1199 vfsmap = {'': self.svfs,
1200 'plain': self.vfs,}
1200 'plain': self.vfs,}
1201 transaction.rollback(self.svfs, vfsmap, "journal",
1201 transaction.rollback(self.svfs, vfsmap, "journal",
1202 self.ui.warn,
1202 self.ui.warn,
1203 checkambigfiles=_cachedfiles)
1203 checkambigfiles=_cachedfiles)
1204 self.invalidate()
1204 self.invalidate()
1205 return True
1205 return True
1206 else:
1206 else:
1207 self.ui.warn(_("no interrupted transaction available\n"))
1207 self.ui.warn(_("no interrupted transaction available\n"))
1208 return False
1208 return False
1209
1209
1210 def rollback(self, dryrun=False, force=False):
1210 def rollback(self, dryrun=False, force=False):
1211 wlock = lock = dsguard = None
1211 wlock = lock = dsguard = None
1212 try:
1212 try:
1213 wlock = self.wlock()
1213 wlock = self.wlock()
1214 lock = self.lock()
1214 lock = self.lock()
1215 if self.svfs.exists("undo"):
1215 if self.svfs.exists("undo"):
1216 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1216 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1217
1217
1218 return self._rollback(dryrun, force, dsguard)
1218 return self._rollback(dryrun, force, dsguard)
1219 else:
1219 else:
1220 self.ui.warn(_("no rollback information available\n"))
1220 self.ui.warn(_("no rollback information available\n"))
1221 return 1
1221 return 1
1222 finally:
1222 finally:
1223 release(dsguard, lock, wlock)
1223 release(dsguard, lock, wlock)
1224
1224
1225 @unfilteredmethod # Until we get smarter cache management
1225 @unfilteredmethod # Until we get smarter cache management
1226 def _rollback(self, dryrun, force, dsguard):
1226 def _rollback(self, dryrun, force, dsguard):
1227 ui = self.ui
1227 ui = self.ui
1228 try:
1228 try:
1229 args = self.vfs.read('undo.desc').splitlines()
1229 args = self.vfs.read('undo.desc').splitlines()
1230 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1230 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1231 if len(args) >= 3:
1231 if len(args) >= 3:
1232 detail = args[2]
1232 detail = args[2]
1233 oldtip = oldlen - 1
1233 oldtip = oldlen - 1
1234
1234
1235 if detail and ui.verbose:
1235 if detail and ui.verbose:
1236 msg = (_('repository tip rolled back to revision %d'
1236 msg = (_('repository tip rolled back to revision %d'
1237 ' (undo %s: %s)\n')
1237 ' (undo %s: %s)\n')
1238 % (oldtip, desc, detail))
1238 % (oldtip, desc, detail))
1239 else:
1239 else:
1240 msg = (_('repository tip rolled back to revision %d'
1240 msg = (_('repository tip rolled back to revision %d'
1241 ' (undo %s)\n')
1241 ' (undo %s)\n')
1242 % (oldtip, desc))
1242 % (oldtip, desc))
1243 except IOError:
1243 except IOError:
1244 msg = _('rolling back unknown transaction\n')
1244 msg = _('rolling back unknown transaction\n')
1245 desc = None
1245 desc = None
1246
1246
1247 if not force and self['.'] != self['tip'] and desc == 'commit':
1247 if not force and self['.'] != self['tip'] and desc == 'commit':
1248 raise error.Abort(
1248 raise error.Abort(
1249 _('rollback of last commit while not checked out '
1249 _('rollback of last commit while not checked out '
1250 'may lose data'), hint=_('use -f to force'))
1250 'may lose data'), hint=_('use -f to force'))
1251
1251
1252 ui.status(msg)
1252 ui.status(msg)
1253 if dryrun:
1253 if dryrun:
1254 return 0
1254 return 0
1255
1255
1256 parents = self.dirstate.parents()
1256 parents = self.dirstate.parents()
1257 self.destroying()
1257 self.destroying()
1258 vfsmap = {'plain': self.vfs, '': self.svfs}
1258 vfsmap = {'plain': self.vfs, '': self.svfs}
1259 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1259 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1260 checkambigfiles=_cachedfiles)
1260 checkambigfiles=_cachedfiles)
1261 if self.vfs.exists('undo.bookmarks'):
1261 if self.vfs.exists('undo.bookmarks'):
1262 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1262 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1263 if self.svfs.exists('undo.phaseroots'):
1263 if self.svfs.exists('undo.phaseroots'):
1264 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1264 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1265 self.invalidate()
1265 self.invalidate()
1266
1266
1267 parentgone = (parents[0] not in self.changelog.nodemap or
1267 parentgone = (parents[0] not in self.changelog.nodemap or
1268 parents[1] not in self.changelog.nodemap)
1268 parents[1] not in self.changelog.nodemap)
1269 if parentgone:
1269 if parentgone:
1270 # prevent dirstateguard from overwriting already restored one
1270 # prevent dirstateguard from overwriting already restored one
1271 dsguard.close()
1271 dsguard.close()
1272
1272
1273 self.dirstate.restorebackup(None, prefix='undo.')
1273 self.dirstate.restorebackup(None, prefix='undo.')
1274 try:
1274 try:
1275 branch = self.vfs.read('undo.branch')
1275 branch = self.vfs.read('undo.branch')
1276 self.dirstate.setbranch(encoding.tolocal(branch))
1276 self.dirstate.setbranch(encoding.tolocal(branch))
1277 except IOError:
1277 except IOError:
1278 ui.warn(_('named branch could not be reset: '
1278 ui.warn(_('named branch could not be reset: '
1279 'current branch is still \'%s\'\n')
1279 'current branch is still \'%s\'\n')
1280 % self.dirstate.branch())
1280 % self.dirstate.branch())
1281
1281
1282 parents = tuple([p.rev() for p in self[None].parents()])
1282 parents = tuple([p.rev() for p in self[None].parents()])
1283 if len(parents) > 1:
1283 if len(parents) > 1:
1284 ui.status(_('working directory now based on '
1284 ui.status(_('working directory now based on '
1285 'revisions %d and %d\n') % parents)
1285 'revisions %d and %d\n') % parents)
1286 else:
1286 else:
1287 ui.status(_('working directory now based on '
1287 ui.status(_('working directory now based on '
1288 'revision %d\n') % parents)
1288 'revision %d\n') % parents)
1289 mergemod.mergestate.clean(self, self['.'].node())
1289 mergemod.mergestate.clean(self, self['.'].node())
1290
1290
1291 # TODO: if we know which new heads may result from this rollback, pass
1291 # TODO: if we know which new heads may result from this rollback, pass
1292 # them to destroy(), which will prevent the branchhead cache from being
1292 # them to destroy(), which will prevent the branchhead cache from being
1293 # invalidated.
1293 # invalidated.
1294 self.destroyed()
1294 self.destroyed()
1295 return 0
1295 return 0
1296
1296
1297 def _buildcacheupdater(self, newtransaction):
1297 def _buildcacheupdater(self, newtransaction):
1298 """called during transaction to build the callback updating cache
1298 """called during transaction to build the callback updating cache
1299
1299
1300 Lives on the repository to help extension who might want to augment
1300 Lives on the repository to help extension who might want to augment
1301 this logic. For this purpose, the created transaction is passed to the
1301 this logic. For this purpose, the created transaction is passed to the
1302 method.
1302 method.
1303 """
1303 """
1304 # we must avoid cyclic reference between repo and transaction.
1304 # we must avoid cyclic reference between repo and transaction.
1305 reporef = weakref.ref(self)
1305 reporef = weakref.ref(self)
1306 def updater(tr):
1306 def updater(tr):
1307 repo = reporef()
1307 repo = reporef()
1308 repo.updatecaches(tr)
1308 repo.updatecaches(tr)
1309 return updater
1309 return updater
1310
1310
1311 @unfilteredmethod
1311 @unfilteredmethod
1312 def updatecaches(self, tr=None):
1312 def updatecaches(self, tr=None):
1313 """warm appropriate caches
1313 """warm appropriate caches
1314
1314
1315 If this function is called after a transaction closed. The transaction
1315 If this function is called after a transaction closed. The transaction
1316 will be available in the 'tr' argument. This can be used to selectively
1316 will be available in the 'tr' argument. This can be used to selectively
1317 update caches relevant to the changes in that transaction.
1317 update caches relevant to the changes in that transaction.
1318 """
1318 """
1319 if tr is not None and tr.hookargs.get('source') == 'strip':
1319 if tr is not None and tr.hookargs.get('source') == 'strip':
1320 # During strip, many caches are invalid but
1320 # During strip, many caches are invalid but
1321 # later call to `destroyed` will refresh them.
1321 # later call to `destroyed` will refresh them.
1322 return
1322 return
1323
1323
1324 if tr is None or tr.changes['revs']:
1324 if tr is None or tr.changes['revs']:
1325 # updating the unfiltered branchmap should refresh all the others,
1325 # updating the unfiltered branchmap should refresh all the others,
1326 self.ui.debug('updating the branch cache\n')
1326 self.ui.debug('updating the branch cache\n')
1327 branchmap.updatecache(self.filtered('served'))
1327 branchmap.updatecache(self.filtered('served'))
1328
1328
1329 def invalidatecaches(self):
1329 def invalidatecaches(self):
1330
1330
1331 if '_tagscache' in vars(self):
1331 if '_tagscache' in vars(self):
1332 # can't use delattr on proxy
1332 # can't use delattr on proxy
1333 del self.__dict__['_tagscache']
1333 del self.__dict__['_tagscache']
1334
1334
1335 self.unfiltered()._branchcaches.clear()
1335 self.unfiltered()._branchcaches.clear()
1336 self.invalidatevolatilesets()
1336 self.invalidatevolatilesets()
1337 self._sparsesignaturecache.clear()
1337 self._sparsesignaturecache.clear()
1338
1338
1339 def invalidatevolatilesets(self):
1339 def invalidatevolatilesets(self):
1340 self.filteredrevcache.clear()
1340 self.filteredrevcache.clear()
1341 obsolete.clearobscaches(self)
1341 obsolete.clearobscaches(self)
1342
1342
1343 def invalidatedirstate(self):
1343 def invalidatedirstate(self):
1344 '''Invalidates the dirstate, causing the next call to dirstate
1344 '''Invalidates the dirstate, causing the next call to dirstate
1345 to check if it was modified since the last time it was read,
1345 to check if it was modified since the last time it was read,
1346 rereading it if it has.
1346 rereading it if it has.
1347
1347
1348 This is different to dirstate.invalidate() that it doesn't always
1348 This is different to dirstate.invalidate() that it doesn't always
1349 rereads the dirstate. Use dirstate.invalidate() if you want to
1349 rereads the dirstate. Use dirstate.invalidate() if you want to
1350 explicitly read the dirstate again (i.e. restoring it to a previous
1350 explicitly read the dirstate again (i.e. restoring it to a previous
1351 known good state).'''
1351 known good state).'''
1352 if hasunfilteredcache(self, 'dirstate'):
1352 if hasunfilteredcache(self, 'dirstate'):
1353 for k in self.dirstate._filecache:
1353 for k in self.dirstate._filecache:
1354 try:
1354 try:
1355 delattr(self.dirstate, k)
1355 delattr(self.dirstate, k)
1356 except AttributeError:
1356 except AttributeError:
1357 pass
1357 pass
1358 delattr(self.unfiltered(), 'dirstate')
1358 delattr(self.unfiltered(), 'dirstate')
1359
1359
1360 def invalidate(self, clearfilecache=False):
1360 def invalidate(self, clearfilecache=False):
1361 '''Invalidates both store and non-store parts other than dirstate
1361 '''Invalidates both store and non-store parts other than dirstate
1362
1362
1363 If a transaction is running, invalidation of store is omitted,
1363 If a transaction is running, invalidation of store is omitted,
1364 because discarding in-memory changes might cause inconsistency
1364 because discarding in-memory changes might cause inconsistency
1365 (e.g. incomplete fncache causes unintentional failure, but
1365 (e.g. incomplete fncache causes unintentional failure, but
1366 redundant one doesn't).
1366 redundant one doesn't).
1367 '''
1367 '''
1368 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1368 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1369 for k in list(self._filecache.keys()):
1369 for k in list(self._filecache.keys()):
1370 # dirstate is invalidated separately in invalidatedirstate()
1370 # dirstate is invalidated separately in invalidatedirstate()
1371 if k == 'dirstate':
1371 if k == 'dirstate':
1372 continue
1372 continue
1373
1373
1374 if clearfilecache:
1374 if clearfilecache:
1375 del self._filecache[k]
1375 del self._filecache[k]
1376 try:
1376 try:
1377 delattr(unfiltered, k)
1377 delattr(unfiltered, k)
1378 except AttributeError:
1378 except AttributeError:
1379 pass
1379 pass
1380 self.invalidatecaches()
1380 self.invalidatecaches()
1381 if not self.currenttransaction():
1381 if not self.currenttransaction():
1382 # TODO: Changing contents of store outside transaction
1382 # TODO: Changing contents of store outside transaction
1383 # causes inconsistency. We should make in-memory store
1383 # causes inconsistency. We should make in-memory store
1384 # changes detectable, and abort if changed.
1384 # changes detectable, and abort if changed.
1385 self.store.invalidatecaches()
1385 self.store.invalidatecaches()
1386
1386
1387 def invalidateall(self):
1387 def invalidateall(self):
1388 '''Fully invalidates both store and non-store parts, causing the
1388 '''Fully invalidates both store and non-store parts, causing the
1389 subsequent operation to reread any outside changes.'''
1389 subsequent operation to reread any outside changes.'''
1390 # extension should hook this to invalidate its caches
1390 # extension should hook this to invalidate its caches
1391 self.invalidate()
1391 self.invalidate()
1392 self.invalidatedirstate()
1392 self.invalidatedirstate()
1393
1393
1394 @unfilteredmethod
1394 @unfilteredmethod
1395 def _refreshfilecachestats(self, tr):
1395 def _refreshfilecachestats(self, tr):
1396 """Reload stats of cached files so that they are flagged as valid"""
1396 """Reload stats of cached files so that they are flagged as valid"""
1397 for k, ce in self._filecache.items():
1397 for k, ce in self._filecache.items():
1398 if k == 'dirstate' or k not in self.__dict__:
1398 if k == 'dirstate' or k not in self.__dict__:
1399 continue
1399 continue
1400 ce.refresh()
1400 ce.refresh()
1401
1401
1402 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1402 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1403 inheritchecker=None, parentenvvar=None):
1403 inheritchecker=None, parentenvvar=None):
1404 parentlock = None
1404 parentlock = None
1405 # the contents of parentenvvar are used by the underlying lock to
1405 # the contents of parentenvvar are used by the underlying lock to
1406 # determine whether it can be inherited
1406 # determine whether it can be inherited
1407 if parentenvvar is not None:
1407 if parentenvvar is not None:
1408 parentlock = encoding.environ.get(parentenvvar)
1408 parentlock = encoding.environ.get(parentenvvar)
1409 try:
1409 try:
1410 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1410 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1411 acquirefn=acquirefn, desc=desc,
1411 acquirefn=acquirefn, desc=desc,
1412 inheritchecker=inheritchecker,
1412 inheritchecker=inheritchecker,
1413 parentlock=parentlock)
1413 parentlock=parentlock)
1414 except error.LockHeld as inst:
1414 except error.LockHeld as inst:
1415 if not wait:
1415 if not wait:
1416 raise
1416 raise
1417 # show more details for new-style locks
1417 # show more details for new-style locks
1418 if ':' in inst.locker:
1418 if ':' in inst.locker:
1419 host, pid = inst.locker.split(":", 1)
1419 host, pid = inst.locker.split(":", 1)
1420 self.ui.warn(
1420 self.ui.warn(
1421 _("waiting for lock on %s held by process %r "
1421 _("waiting for lock on %s held by process %r "
1422 "on host %r\n") % (desc, pid, host))
1422 "on host %r\n") % (desc, pid, host))
1423 else:
1423 else:
1424 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1424 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1425 (desc, inst.locker))
1425 (desc, inst.locker))
1426 # default to 600 seconds timeout
1426 # default to 600 seconds timeout
1427 l = lockmod.lock(vfs, lockname,
1427 l = lockmod.lock(vfs, lockname,
1428 int(self.ui.config("ui", "timeout", "600")),
1428 int(self.ui.config("ui", "timeout", "600")),
1429 releasefn=releasefn, acquirefn=acquirefn,
1429 releasefn=releasefn, acquirefn=acquirefn,
1430 desc=desc)
1430 desc=desc)
1431 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1431 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1432 return l
1432 return l
1433
1433
1434 def _afterlock(self, callback):
1434 def _afterlock(self, callback):
1435 """add a callback to be run when the repository is fully unlocked
1435 """add a callback to be run when the repository is fully unlocked
1436
1436
1437 The callback will be executed when the outermost lock is released
1437 The callback will be executed when the outermost lock is released
1438 (with wlock being higher level than 'lock')."""
1438 (with wlock being higher level than 'lock')."""
1439 for ref in (self._wlockref, self._lockref):
1439 for ref in (self._wlockref, self._lockref):
1440 l = ref and ref()
1440 l = ref and ref()
1441 if l and l.held:
1441 if l and l.held:
1442 l.postrelease.append(callback)
1442 l.postrelease.append(callback)
1443 break
1443 break
1444 else: # no lock have been found.
1444 else: # no lock have been found.
1445 callback()
1445 callback()
1446
1446
1447 def lock(self, wait=True):
1447 def lock(self, wait=True):
1448 '''Lock the repository store (.hg/store) and return a weak reference
1448 '''Lock the repository store (.hg/store) and return a weak reference
1449 to the lock. Use this before modifying the store (e.g. committing or
1449 to the lock. Use this before modifying the store (e.g. committing or
1450 stripping). If you are opening a transaction, get a lock as well.)
1450 stripping). If you are opening a transaction, get a lock as well.)
1451
1451
1452 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1452 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1453 'wlock' first to avoid a dead-lock hazard.'''
1453 'wlock' first to avoid a dead-lock hazard.'''
1454 l = self._currentlock(self._lockref)
1454 l = self._currentlock(self._lockref)
1455 if l is not None:
1455 if l is not None:
1456 l.lock()
1456 l.lock()
1457 return l
1457 return l
1458
1458
1459 l = self._lock(self.svfs, "lock", wait, None,
1459 l = self._lock(self.svfs, "lock", wait, None,
1460 self.invalidate, _('repository %s') % self.origroot)
1460 self.invalidate, _('repository %s') % self.origroot)
1461 self._lockref = weakref.ref(l)
1461 self._lockref = weakref.ref(l)
1462 return l
1462 return l
1463
1463
1464 def _wlockchecktransaction(self):
1464 def _wlockchecktransaction(self):
1465 if self.currenttransaction() is not None:
1465 if self.currenttransaction() is not None:
1466 raise error.LockInheritanceContractViolation(
1466 raise error.LockInheritanceContractViolation(
1467 'wlock cannot be inherited in the middle of a transaction')
1467 'wlock cannot be inherited in the middle of a transaction')
1468
1468
1469 def wlock(self, wait=True):
1469 def wlock(self, wait=True):
1470 '''Lock the non-store parts of the repository (everything under
1470 '''Lock the non-store parts of the repository (everything under
1471 .hg except .hg/store) and return a weak reference to the lock.
1471 .hg except .hg/store) and return a weak reference to the lock.
1472
1472
1473 Use this before modifying files in .hg.
1473 Use this before modifying files in .hg.
1474
1474
1475 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1475 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1476 'wlock' first to avoid a dead-lock hazard.'''
1476 'wlock' first to avoid a dead-lock hazard.'''
1477 l = self._wlockref and self._wlockref()
1477 l = self._wlockref and self._wlockref()
1478 if l is not None and l.held:
1478 if l is not None and l.held:
1479 l.lock()
1479 l.lock()
1480 return l
1480 return l
1481
1481
1482 # We do not need to check for non-waiting lock acquisition. Such
1482 # We do not need to check for non-waiting lock acquisition. Such
1483 # acquisition would not cause dead-lock as they would just fail.
1483 # acquisition would not cause dead-lock as they would just fail.
1484 if wait and (self.ui.configbool('devel', 'all-warnings')
1484 if wait and (self.ui.configbool('devel', 'all-warnings')
1485 or self.ui.configbool('devel', 'check-locks')):
1485 or self.ui.configbool('devel', 'check-locks')):
1486 if self._currentlock(self._lockref) is not None:
1486 if self._currentlock(self._lockref) is not None:
1487 self.ui.develwarn('"wlock" acquired after "lock"')
1487 self.ui.develwarn('"wlock" acquired after "lock"')
1488
1488
1489 def unlock():
1489 def unlock():
1490 if self.dirstate.pendingparentchange():
1490 if self.dirstate.pendingparentchange():
1491 self.dirstate.invalidate()
1491 self.dirstate.invalidate()
1492 else:
1492 else:
1493 self.dirstate.write(None)
1493 self.dirstate.write(None)
1494
1494
1495 self._filecache['dirstate'].refresh()
1495 self._filecache['dirstate'].refresh()
1496
1496
1497 l = self._lock(self.vfs, "wlock", wait, unlock,
1497 l = self._lock(self.vfs, "wlock", wait, unlock,
1498 self.invalidatedirstate, _('working directory of %s') %
1498 self.invalidatedirstate, _('working directory of %s') %
1499 self.origroot,
1499 self.origroot,
1500 inheritchecker=self._wlockchecktransaction,
1500 inheritchecker=self._wlockchecktransaction,
1501 parentenvvar='HG_WLOCK_LOCKER')
1501 parentenvvar='HG_WLOCK_LOCKER')
1502 self._wlockref = weakref.ref(l)
1502 self._wlockref = weakref.ref(l)
1503 return l
1503 return l
1504
1504
1505 def _currentlock(self, lockref):
1505 def _currentlock(self, lockref):
1506 """Returns the lock if it's held, or None if it's not."""
1506 """Returns the lock if it's held, or None if it's not."""
1507 if lockref is None:
1507 if lockref is None:
1508 return None
1508 return None
1509 l = lockref()
1509 l = lockref()
1510 if l is None or not l.held:
1510 if l is None or not l.held:
1511 return None
1511 return None
1512 return l
1512 return l
1513
1513
1514 def currentwlock(self):
1514 def currentwlock(self):
1515 """Returns the wlock if it's held, or None if it's not."""
1515 """Returns the wlock if it's held, or None if it's not."""
1516 return self._currentlock(self._wlockref)
1516 return self._currentlock(self._wlockref)
1517
1517
1518 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1518 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1519 """
1519 """
1520 commit an individual file as part of a larger transaction
1520 commit an individual file as part of a larger transaction
1521 """
1521 """
1522
1522
1523 fname = fctx.path()
1523 fname = fctx.path()
1524 fparent1 = manifest1.get(fname, nullid)
1524 fparent1 = manifest1.get(fname, nullid)
1525 fparent2 = manifest2.get(fname, nullid)
1525 fparent2 = manifest2.get(fname, nullid)
1526 if isinstance(fctx, context.filectx):
1526 if isinstance(fctx, context.filectx):
1527 node = fctx.filenode()
1527 node = fctx.filenode()
1528 if node in [fparent1, fparent2]:
1528 if node in [fparent1, fparent2]:
1529 self.ui.debug('reusing %s filelog entry\n' % fname)
1529 self.ui.debug('reusing %s filelog entry\n' % fname)
1530 if manifest1.flags(fname) != fctx.flags():
1530 if manifest1.flags(fname) != fctx.flags():
1531 changelist.append(fname)
1531 changelist.append(fname)
1532 return node
1532 return node
1533
1533
1534 flog = self.file(fname)
1534 flog = self.file(fname)
1535 meta = {}
1535 meta = {}
1536 copy = fctx.renamed()
1536 copy = fctx.renamed()
1537 if copy and copy[0] != fname:
1537 if copy and copy[0] != fname:
1538 # Mark the new revision of this file as a copy of another
1538 # Mark the new revision of this file as a copy of another
1539 # file. This copy data will effectively act as a parent
1539 # file. This copy data will effectively act as a parent
1540 # of this new revision. If this is a merge, the first
1540 # of this new revision. If this is a merge, the first
1541 # parent will be the nullid (meaning "look up the copy data")
1541 # parent will be the nullid (meaning "look up the copy data")
1542 # and the second one will be the other parent. For example:
1542 # and the second one will be the other parent. For example:
1543 #
1543 #
1544 # 0 --- 1 --- 3 rev1 changes file foo
1544 # 0 --- 1 --- 3 rev1 changes file foo
1545 # \ / rev2 renames foo to bar and changes it
1545 # \ / rev2 renames foo to bar and changes it
1546 # \- 2 -/ rev3 should have bar with all changes and
1546 # \- 2 -/ rev3 should have bar with all changes and
1547 # should record that bar descends from
1547 # should record that bar descends from
1548 # bar in rev2 and foo in rev1
1548 # bar in rev2 and foo in rev1
1549 #
1549 #
1550 # this allows this merge to succeed:
1550 # this allows this merge to succeed:
1551 #
1551 #
1552 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1552 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1553 # \ / merging rev3 and rev4 should use bar@rev2
1553 # \ / merging rev3 and rev4 should use bar@rev2
1554 # \- 2 --- 4 as the merge base
1554 # \- 2 --- 4 as the merge base
1555 #
1555 #
1556
1556
1557 cfname = copy[0]
1557 cfname = copy[0]
1558 crev = manifest1.get(cfname)
1558 crev = manifest1.get(cfname)
1559 newfparent = fparent2
1559 newfparent = fparent2
1560
1560
1561 if manifest2: # branch merge
1561 if manifest2: # branch merge
1562 if fparent2 == nullid or crev is None: # copied on remote side
1562 if fparent2 == nullid or crev is None: # copied on remote side
1563 if cfname in manifest2:
1563 if cfname in manifest2:
1564 crev = manifest2[cfname]
1564 crev = manifest2[cfname]
1565 newfparent = fparent1
1565 newfparent = fparent1
1566
1566
1567 # Here, we used to search backwards through history to try to find
1567 # Here, we used to search backwards through history to try to find
1568 # where the file copy came from if the source of a copy was not in
1568 # where the file copy came from if the source of a copy was not in
1569 # the parent directory. However, this doesn't actually make sense to
1569 # the parent directory. However, this doesn't actually make sense to
1570 # do (what does a copy from something not in your working copy even
1570 # do (what does a copy from something not in your working copy even
1571 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1571 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1572 # the user that copy information was dropped, so if they didn't
1572 # the user that copy information was dropped, so if they didn't
1573 # expect this outcome it can be fixed, but this is the correct
1573 # expect this outcome it can be fixed, but this is the correct
1574 # behavior in this circumstance.
1574 # behavior in this circumstance.
1575
1575
1576 if crev:
1576 if crev:
1577 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1577 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1578 meta["copy"] = cfname
1578 meta["copy"] = cfname
1579 meta["copyrev"] = hex(crev)
1579 meta["copyrev"] = hex(crev)
1580 fparent1, fparent2 = nullid, newfparent
1580 fparent1, fparent2 = nullid, newfparent
1581 else:
1581 else:
1582 self.ui.warn(_("warning: can't find ancestor for '%s' "
1582 self.ui.warn(_("warning: can't find ancestor for '%s' "
1583 "copied from '%s'!\n") % (fname, cfname))
1583 "copied from '%s'!\n") % (fname, cfname))
1584
1584
1585 elif fparent1 == nullid:
1585 elif fparent1 == nullid:
1586 fparent1, fparent2 = fparent2, nullid
1586 fparent1, fparent2 = fparent2, nullid
1587 elif fparent2 != nullid:
1587 elif fparent2 != nullid:
1588 # is one parent an ancestor of the other?
1588 # is one parent an ancestor of the other?
1589 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1589 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1590 if fparent1 in fparentancestors:
1590 if fparent1 in fparentancestors:
1591 fparent1, fparent2 = fparent2, nullid
1591 fparent1, fparent2 = fparent2, nullid
1592 elif fparent2 in fparentancestors:
1592 elif fparent2 in fparentancestors:
1593 fparent2 = nullid
1593 fparent2 = nullid
1594
1594
1595 # is the file changed?
1595 # is the file changed?
1596 text = fctx.data()
1596 text = fctx.data()
1597 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1597 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1598 changelist.append(fname)
1598 changelist.append(fname)
1599 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1599 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1600 # are just the flags changed during merge?
1600 # are just the flags changed during merge?
1601 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1601 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1602 changelist.append(fname)
1602 changelist.append(fname)
1603
1603
1604 return fparent1
1604 return fparent1
1605
1605
1606 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1606 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1607 """check for commit arguments that aren't committable"""
1607 """check for commit arguments that aren't committable"""
1608 if match.isexact() or match.prefix():
1608 if match.isexact() or match.prefix():
1609 matched = set(status.modified + status.added + status.removed)
1609 matched = set(status.modified + status.added + status.removed)
1610
1610
1611 for f in match.files():
1611 for f in match.files():
1612 f = self.dirstate.normalize(f)
1612 f = self.dirstate.normalize(f)
1613 if f == '.' or f in matched or f in wctx.substate:
1613 if f == '.' or f in matched or f in wctx.substate:
1614 continue
1614 continue
1615 if f in status.deleted:
1615 if f in status.deleted:
1616 fail(f, _('file not found!'))
1616 fail(f, _('file not found!'))
1617 if f in vdirs: # visited directory
1617 if f in vdirs: # visited directory
1618 d = f + '/'
1618 d = f + '/'
1619 for mf in matched:
1619 for mf in matched:
1620 if mf.startswith(d):
1620 if mf.startswith(d):
1621 break
1621 break
1622 else:
1622 else:
1623 fail(f, _("no match under directory!"))
1623 fail(f, _("no match under directory!"))
1624 elif f not in self.dirstate:
1624 elif f not in self.dirstate:
1625 fail(f, _("file not tracked!"))
1625 fail(f, _("file not tracked!"))
1626
1626
1627 @unfilteredmethod
1627 @unfilteredmethod
1628 def commit(self, text="", user=None, date=None, match=None, force=False,
1628 def commit(self, text="", user=None, date=None, match=None, force=False,
1629 editor=False, extra=None):
1629 editor=False, extra=None):
1630 """Add a new revision to current repository.
1630 """Add a new revision to current repository.
1631
1631
1632 Revision information is gathered from the working directory,
1632 Revision information is gathered from the working directory,
1633 match can be used to filter the committed files. If editor is
1633 match can be used to filter the committed files. If editor is
1634 supplied, it is called to get a commit message.
1634 supplied, it is called to get a commit message.
1635 """
1635 """
1636 if extra is None:
1636 if extra is None:
1637 extra = {}
1637 extra = {}
1638
1638
1639 def fail(f, msg):
1639 def fail(f, msg):
1640 raise error.Abort('%s: %s' % (f, msg))
1640 raise error.Abort('%s: %s' % (f, msg))
1641
1641
1642 if not match:
1642 if not match:
1643 match = matchmod.always(self.root, '')
1643 match = matchmod.always(self.root, '')
1644
1644
1645 if not force:
1645 if not force:
1646 vdirs = []
1646 vdirs = []
1647 match.explicitdir = vdirs.append
1647 match.explicitdir = vdirs.append
1648 match.bad = fail
1648 match.bad = fail
1649
1649
1650 wlock = lock = tr = None
1650 wlock = lock = tr = None
1651 try:
1651 try:
1652 wlock = self.wlock()
1652 wlock = self.wlock()
1653 lock = self.lock() # for recent changelog (see issue4368)
1653 lock = self.lock() # for recent changelog (see issue4368)
1654
1654
1655 wctx = self[None]
1655 wctx = self[None]
1656 merge = len(wctx.parents()) > 1
1656 merge = len(wctx.parents()) > 1
1657
1657
1658 if not force and merge and not match.always():
1658 if not force and merge and not match.always():
1659 raise error.Abort(_('cannot partially commit a merge '
1659 raise error.Abort(_('cannot partially commit a merge '
1660 '(do not specify files or patterns)'))
1660 '(do not specify files or patterns)'))
1661
1661
1662 status = self.status(match=match, clean=force)
1662 status = self.status(match=match, clean=force)
1663 if force:
1663 if force:
1664 status.modified.extend(status.clean) # mq may commit clean files
1664 status.modified.extend(status.clean) # mq may commit clean files
1665
1665
1666 # check subrepos
1666 # check subrepos
1667 subs = []
1667 subs = []
1668 commitsubs = set()
1668 commitsubs = set()
1669 newstate = wctx.substate.copy()
1669 newstate = wctx.substate.copy()
1670 # only manage subrepos and .hgsubstate if .hgsub is present
1670 # only manage subrepos and .hgsubstate if .hgsub is present
1671 if '.hgsub' in wctx:
1671 if '.hgsub' in wctx:
1672 # we'll decide whether to track this ourselves, thanks
1672 # we'll decide whether to track this ourselves, thanks
1673 for c in status.modified, status.added, status.removed:
1673 for c in status.modified, status.added, status.removed:
1674 if '.hgsubstate' in c:
1674 if '.hgsubstate' in c:
1675 c.remove('.hgsubstate')
1675 c.remove('.hgsubstate')
1676
1676
1677 # compare current state to last committed state
1677 # compare current state to last committed state
1678 # build new substate based on last committed state
1678 # build new substate based on last committed state
1679 oldstate = wctx.p1().substate
1679 oldstate = wctx.p1().substate
1680 for s in sorted(newstate.keys()):
1680 for s in sorted(newstate.keys()):
1681 if not match(s):
1681 if not match(s):
1682 # ignore working copy, use old state if present
1682 # ignore working copy, use old state if present
1683 if s in oldstate:
1683 if s in oldstate:
1684 newstate[s] = oldstate[s]
1684 newstate[s] = oldstate[s]
1685 continue
1685 continue
1686 if not force:
1686 if not force:
1687 raise error.Abort(
1687 raise error.Abort(
1688 _("commit with new subrepo %s excluded") % s)
1688 _("commit with new subrepo %s excluded") % s)
1689 dirtyreason = wctx.sub(s).dirtyreason(True)
1689 dirtyreason = wctx.sub(s).dirtyreason(True)
1690 if dirtyreason:
1690 if dirtyreason:
1691 if not self.ui.configbool('ui', 'commitsubrepos'):
1691 if not self.ui.configbool('ui', 'commitsubrepos'):
1692 raise error.Abort(dirtyreason,
1692 raise error.Abort(dirtyreason,
1693 hint=_("use --subrepos for recursive commit"))
1693 hint=_("use --subrepos for recursive commit"))
1694 subs.append(s)
1694 subs.append(s)
1695 commitsubs.add(s)
1695 commitsubs.add(s)
1696 else:
1696 else:
1697 bs = wctx.sub(s).basestate()
1697 bs = wctx.sub(s).basestate()
1698 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1698 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1699 if oldstate.get(s, (None, None, None))[1] != bs:
1699 if oldstate.get(s, (None, None, None))[1] != bs:
1700 subs.append(s)
1700 subs.append(s)
1701
1701
1702 # check for removed subrepos
1702 # check for removed subrepos
1703 for p in wctx.parents():
1703 for p in wctx.parents():
1704 r = [s for s in p.substate if s not in newstate]
1704 r = [s for s in p.substate if s not in newstate]
1705 subs += [s for s in r if match(s)]
1705 subs += [s for s in r if match(s)]
1706 if subs:
1706 if subs:
1707 if (not match('.hgsub') and
1707 if (not match('.hgsub') and
1708 '.hgsub' in (wctx.modified() + wctx.added())):
1708 '.hgsub' in (wctx.modified() + wctx.added())):
1709 raise error.Abort(
1709 raise error.Abort(
1710 _("can't commit subrepos without .hgsub"))
1710 _("can't commit subrepos without .hgsub"))
1711 status.modified.insert(0, '.hgsubstate')
1711 status.modified.insert(0, '.hgsubstate')
1712
1712
1713 elif '.hgsub' in status.removed:
1713 elif '.hgsub' in status.removed:
1714 # clean up .hgsubstate when .hgsub is removed
1714 # clean up .hgsubstate when .hgsub is removed
1715 if ('.hgsubstate' in wctx and
1715 if ('.hgsubstate' in wctx and
1716 '.hgsubstate' not in (status.modified + status.added +
1716 '.hgsubstate' not in (status.modified + status.added +
1717 status.removed)):
1717 status.removed)):
1718 status.removed.insert(0, '.hgsubstate')
1718 status.removed.insert(0, '.hgsubstate')
1719
1719
1720 # make sure all explicit patterns are matched
1720 # make sure all explicit patterns are matched
1721 if not force:
1721 if not force:
1722 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1722 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1723
1723
1724 cctx = context.workingcommitctx(self, status,
1724 cctx = context.workingcommitctx(self, status,
1725 text, user, date, extra)
1725 text, user, date, extra)
1726
1726
1727 # internal config: ui.allowemptycommit
1727 # internal config: ui.allowemptycommit
1728 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1728 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1729 or extra.get('close') or merge or cctx.files()
1729 or extra.get('close') or merge or cctx.files()
1730 or self.ui.configbool('ui', 'allowemptycommit'))
1730 or self.ui.configbool('ui', 'allowemptycommit'))
1731 if not allowemptycommit:
1731 if not allowemptycommit:
1732 return None
1732 return None
1733
1733
1734 if merge and cctx.deleted():
1734 if merge and cctx.deleted():
1735 raise error.Abort(_("cannot commit merge with missing files"))
1735 raise error.Abort(_("cannot commit merge with missing files"))
1736
1736
1737 ms = mergemod.mergestate.read(self)
1737 ms = mergemod.mergestate.read(self)
1738 mergeutil.checkunresolved(ms)
1738 mergeutil.checkunresolved(ms)
1739
1739
1740 if editor:
1740 if editor:
1741 cctx._text = editor(self, cctx, subs)
1741 cctx._text = editor(self, cctx, subs)
1742 edited = (text != cctx._text)
1742 edited = (text != cctx._text)
1743
1743
1744 # Save commit message in case this transaction gets rolled back
1744 # Save commit message in case this transaction gets rolled back
1745 # (e.g. by a pretxncommit hook). Leave the content alone on
1745 # (e.g. by a pretxncommit hook). Leave the content alone on
1746 # the assumption that the user will use the same editor again.
1746 # the assumption that the user will use the same editor again.
1747 msgfn = self.savecommitmessage(cctx._text)
1747 msgfn = self.savecommitmessage(cctx._text)
1748
1748
1749 # commit subs and write new state
1749 # commit subs and write new state
1750 if subs:
1750 if subs:
1751 for s in sorted(commitsubs):
1751 for s in sorted(commitsubs):
1752 sub = wctx.sub(s)
1752 sub = wctx.sub(s)
1753 self.ui.status(_('committing subrepository %s\n') %
1753 self.ui.status(_('committing subrepository %s\n') %
1754 subrepo.subrelpath(sub))
1754 subrepo.subrelpath(sub))
1755 sr = sub.commit(cctx._text, user, date)
1755 sr = sub.commit(cctx._text, user, date)
1756 newstate[s] = (newstate[s][0], sr)
1756 newstate[s] = (newstate[s][0], sr)
1757 subrepo.writestate(self, newstate)
1757 subrepo.writestate(self, newstate)
1758
1758
1759 p1, p2 = self.dirstate.parents()
1759 p1, p2 = self.dirstate.parents()
1760 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1760 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1761 try:
1761 try:
1762 self.hook("precommit", throw=True, parent1=hookp1,
1762 self.hook("precommit", throw=True, parent1=hookp1,
1763 parent2=hookp2)
1763 parent2=hookp2)
1764 tr = self.transaction('commit')
1764 tr = self.transaction('commit')
1765 ret = self.commitctx(cctx, True)
1765 ret = self.commitctx(cctx, True)
1766 except: # re-raises
1766 except: # re-raises
1767 if edited:
1767 if edited:
1768 self.ui.write(
1768 self.ui.write(
1769 _('note: commit message saved in %s\n') % msgfn)
1769 _('note: commit message saved in %s\n') % msgfn)
1770 raise
1770 raise
1771 # update bookmarks, dirstate and mergestate
1771 # update bookmarks, dirstate and mergestate
1772 bookmarks.update(self, [p1, p2], ret)
1772 bookmarks.update(self, [p1, p2], ret)
1773 cctx.markcommitted(ret)
1773 cctx.markcommitted(ret)
1774 ms.reset()
1774 ms.reset()
1775 tr.close()
1775 tr.close()
1776
1776
1777 finally:
1777 finally:
1778 lockmod.release(tr, lock, wlock)
1778 lockmod.release(tr, lock, wlock)
1779
1779
1780 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1780 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1781 # hack for command that use a temporary commit (eg: histedit)
1781 # hack for command that use a temporary commit (eg: histedit)
1782 # temporary commit got stripped before hook release
1782 # temporary commit got stripped before hook release
1783 if self.changelog.hasnode(ret):
1783 if self.changelog.hasnode(ret):
1784 self.hook("commit", node=node, parent1=parent1,
1784 self.hook("commit", node=node, parent1=parent1,
1785 parent2=parent2)
1785 parent2=parent2)
1786 self._afterlock(commithook)
1786 self._afterlock(commithook)
1787 return ret
1787 return ret
1788
1788
1789 @unfilteredmethod
1789 @unfilteredmethod
1790 def commitctx(self, ctx, error=False):
1790 def commitctx(self, ctx, error=False):
1791 """Add a new revision to current repository.
1791 """Add a new revision to current repository.
1792 Revision information is passed via the context argument.
1792 Revision information is passed via the context argument.
1793 """
1793 """
1794
1794
1795 tr = None
1795 tr = None
1796 p1, p2 = ctx.p1(), ctx.p2()
1796 p1, p2 = ctx.p1(), ctx.p2()
1797 user = ctx.user()
1797 user = ctx.user()
1798
1798
1799 lock = self.lock()
1799 lock = self.lock()
1800 try:
1800 try:
1801 tr = self.transaction("commit")
1801 tr = self.transaction("commit")
1802 trp = weakref.proxy(tr)
1802 trp = weakref.proxy(tr)
1803
1803
1804 if ctx.manifestnode():
1804 if ctx.manifestnode():
1805 # reuse an existing manifest revision
1805 # reuse an existing manifest revision
1806 mn = ctx.manifestnode()
1806 mn = ctx.manifestnode()
1807 files = ctx.files()
1807 files = ctx.files()
1808 elif ctx.files():
1808 elif ctx.files():
1809 m1ctx = p1.manifestctx()
1809 m1ctx = p1.manifestctx()
1810 m2ctx = p2.manifestctx()
1810 m2ctx = p2.manifestctx()
1811 mctx = m1ctx.copy()
1811 mctx = m1ctx.copy()
1812
1812
1813 m = mctx.read()
1813 m = mctx.read()
1814 m1 = m1ctx.read()
1814 m1 = m1ctx.read()
1815 m2 = m2ctx.read()
1815 m2 = m2ctx.read()
1816
1816
1817 # check in files
1817 # check in files
1818 added = []
1818 added = []
1819 changed = []
1819 changed = []
1820 removed = list(ctx.removed())
1820 removed = list(ctx.removed())
1821 linkrev = len(self)
1821 linkrev = len(self)
1822 self.ui.note(_("committing files:\n"))
1822 self.ui.note(_("committing files:\n"))
1823 for f in sorted(ctx.modified() + ctx.added()):
1823 for f in sorted(ctx.modified() + ctx.added()):
1824 self.ui.note(f + "\n")
1824 self.ui.note(f + "\n")
1825 try:
1825 try:
1826 fctx = ctx[f]
1826 fctx = ctx[f]
1827 if fctx is None:
1827 if fctx is None:
1828 removed.append(f)
1828 removed.append(f)
1829 else:
1829 else:
1830 added.append(f)
1830 added.append(f)
1831 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1831 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1832 trp, changed)
1832 trp, changed)
1833 m.setflag(f, fctx.flags())
1833 m.setflag(f, fctx.flags())
1834 except OSError as inst:
1834 except OSError as inst:
1835 self.ui.warn(_("trouble committing %s!\n") % f)
1835 self.ui.warn(_("trouble committing %s!\n") % f)
1836 raise
1836 raise
1837 except IOError as inst:
1837 except IOError as inst:
1838 errcode = getattr(inst, 'errno', errno.ENOENT)
1838 errcode = getattr(inst, 'errno', errno.ENOENT)
1839 if error or errcode and errcode != errno.ENOENT:
1839 if error or errcode and errcode != errno.ENOENT:
1840 self.ui.warn(_("trouble committing %s!\n") % f)
1840 self.ui.warn(_("trouble committing %s!\n") % f)
1841 raise
1841 raise
1842
1842
1843 # update manifest
1843 # update manifest
1844 self.ui.note(_("committing manifest\n"))
1844 self.ui.note(_("committing manifest\n"))
1845 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1845 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1846 drop = [f for f in removed if f in m]
1846 drop = [f for f in removed if f in m]
1847 for f in drop:
1847 for f in drop:
1848 del m[f]
1848 del m[f]
1849 mn = mctx.write(trp, linkrev,
1849 mn = mctx.write(trp, linkrev,
1850 p1.manifestnode(), p2.manifestnode(),
1850 p1.manifestnode(), p2.manifestnode(),
1851 added, drop)
1851 added, drop)
1852 files = changed + removed
1852 files = changed + removed
1853 else:
1853 else:
1854 mn = p1.manifestnode()
1854 mn = p1.manifestnode()
1855 files = []
1855 files = []
1856
1856
1857 # update changelog
1857 # update changelog
1858 self.ui.note(_("committing changelog\n"))
1858 self.ui.note(_("committing changelog\n"))
1859 self.changelog.delayupdate(tr)
1859 self.changelog.delayupdate(tr)
1860 n = self.changelog.add(mn, files, ctx.description(),
1860 n = self.changelog.add(mn, files, ctx.description(),
1861 trp, p1.node(), p2.node(),
1861 trp, p1.node(), p2.node(),
1862 user, ctx.date(), ctx.extra().copy())
1862 user, ctx.date(), ctx.extra().copy())
1863 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1863 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1864 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1864 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1865 parent2=xp2)
1865 parent2=xp2)
1866 # set the new commit is proper phase
1866 # set the new commit is proper phase
1867 targetphase = subrepo.newcommitphase(self.ui, ctx)
1867 targetphase = subrepo.newcommitphase(self.ui, ctx)
1868 if targetphase:
1868 if targetphase:
1869 # retract boundary do not alter parent changeset.
1869 # retract boundary do not alter parent changeset.
1870 # if a parent have higher the resulting phase will
1870 # if a parent have higher the resulting phase will
1871 # be compliant anyway
1871 # be compliant anyway
1872 #
1872 #
1873 # if minimal phase was 0 we don't need to retract anything
1873 # if minimal phase was 0 we don't need to retract anything
1874 phases.retractboundary(self, tr, targetphase, [n])
1874 phases.retractboundary(self, tr, targetphase, [n])
1875 tr.close()
1875 tr.close()
1876 return n
1876 return n
1877 finally:
1877 finally:
1878 if tr:
1878 if tr:
1879 tr.release()
1879 tr.release()
1880 lock.release()
1880 lock.release()
1881
1881
1882 @unfilteredmethod
1882 @unfilteredmethod
1883 def destroying(self):
1883 def destroying(self):
1884 '''Inform the repository that nodes are about to be destroyed.
1884 '''Inform the repository that nodes are about to be destroyed.
1885 Intended for use by strip and rollback, so there's a common
1885 Intended for use by strip and rollback, so there's a common
1886 place for anything that has to be done before destroying history.
1886 place for anything that has to be done before destroying history.
1887
1887
1888 This is mostly useful for saving state that is in memory and waiting
1888 This is mostly useful for saving state that is in memory and waiting
1889 to be flushed when the current lock is released. Because a call to
1889 to be flushed when the current lock is released. Because a call to
1890 destroyed is imminent, the repo will be invalidated causing those
1890 destroyed is imminent, the repo will be invalidated causing those
1891 changes to stay in memory (waiting for the next unlock), or vanish
1891 changes to stay in memory (waiting for the next unlock), or vanish
1892 completely.
1892 completely.
1893 '''
1893 '''
1894 # When using the same lock to commit and strip, the phasecache is left
1894 # When using the same lock to commit and strip, the phasecache is left
1895 # dirty after committing. Then when we strip, the repo is invalidated,
1895 # dirty after committing. Then when we strip, the repo is invalidated,
1896 # causing those changes to disappear.
1896 # causing those changes to disappear.
1897 if '_phasecache' in vars(self):
1897 if '_phasecache' in vars(self):
1898 self._phasecache.write()
1898 self._phasecache.write()
1899
1899
1900 @unfilteredmethod
1900 @unfilteredmethod
1901 def destroyed(self):
1901 def destroyed(self):
1902 '''Inform the repository that nodes have been destroyed.
1902 '''Inform the repository that nodes have been destroyed.
1903 Intended for use by strip and rollback, so there's a common
1903 Intended for use by strip and rollback, so there's a common
1904 place for anything that has to be done after destroying history.
1904 place for anything that has to be done after destroying history.
1905 '''
1905 '''
1906 # When one tries to:
1906 # When one tries to:
1907 # 1) destroy nodes thus calling this method (e.g. strip)
1907 # 1) destroy nodes thus calling this method (e.g. strip)
1908 # 2) use phasecache somewhere (e.g. commit)
1908 # 2) use phasecache somewhere (e.g. commit)
1909 #
1909 #
1910 # then 2) will fail because the phasecache contains nodes that were
1910 # then 2) will fail because the phasecache contains nodes that were
1911 # removed. We can either remove phasecache from the filecache,
1911 # removed. We can either remove phasecache from the filecache,
1912 # causing it to reload next time it is accessed, or simply filter
1912 # causing it to reload next time it is accessed, or simply filter
1913 # the removed nodes now and write the updated cache.
1913 # the removed nodes now and write the updated cache.
1914 self._phasecache.filterunknown(self)
1914 self._phasecache.filterunknown(self)
1915 self._phasecache.write()
1915 self._phasecache.write()
1916
1916
1917 # refresh all repository caches
1917 # refresh all repository caches
1918 self.updatecaches()
1918 self.updatecaches()
1919
1919
1920 # Ensure the persistent tag cache is updated. Doing it now
1920 # Ensure the persistent tag cache is updated. Doing it now
1921 # means that the tag cache only has to worry about destroyed
1921 # means that the tag cache only has to worry about destroyed
1922 # heads immediately after a strip/rollback. That in turn
1922 # heads immediately after a strip/rollback. That in turn
1923 # guarantees that "cachetip == currenttip" (comparing both rev
1923 # guarantees that "cachetip == currenttip" (comparing both rev
1924 # and node) always means no nodes have been added or destroyed.
1924 # and node) always means no nodes have been added or destroyed.
1925
1925
1926 # XXX this is suboptimal when qrefresh'ing: we strip the current
1926 # XXX this is suboptimal when qrefresh'ing: we strip the current
1927 # head, refresh the tag cache, then immediately add a new head.
1927 # head, refresh the tag cache, then immediately add a new head.
1928 # But I think doing it this way is necessary for the "instant
1928 # But I think doing it this way is necessary for the "instant
1929 # tag cache retrieval" case to work.
1929 # tag cache retrieval" case to work.
1930 self.invalidate()
1930 self.invalidate()
1931
1931
1932 def walk(self, match, node=None):
1932 def walk(self, match, node=None):
1933 '''
1933 '''
1934 walk recursively through the directory tree or a given
1934 walk recursively through the directory tree or a given
1935 changeset, finding all files matched by the match
1935 changeset, finding all files matched by the match
1936 function
1936 function
1937 '''
1937 '''
1938 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1938 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1939 return self[node].walk(match)
1939 return self[node].walk(match)
1940
1940
1941 def status(self, node1='.', node2=None, match=None,
1941 def status(self, node1='.', node2=None, match=None,
1942 ignored=False, clean=False, unknown=False,
1942 ignored=False, clean=False, unknown=False,
1943 listsubrepos=False):
1943 listsubrepos=False):
1944 '''a convenience method that calls node1.status(node2)'''
1944 '''a convenience method that calls node1.status(node2)'''
1945 return self[node1].status(node2, match, ignored, clean, unknown,
1945 return self[node1].status(node2, match, ignored, clean, unknown,
1946 listsubrepos)
1946 listsubrepos)
1947
1947
1948 def addpostdsstatus(self, ps):
1948 def addpostdsstatus(self, ps):
1949 """Add a callback to run within the wlock, at the point at which status
1949 """Add a callback to run within the wlock, at the point at which status
1950 fixups happen.
1950 fixups happen.
1951
1951
1952 On status completion, callback(wctx, status) will be called with the
1952 On status completion, callback(wctx, status) will be called with the
1953 wlock held, unless the dirstate has changed from underneath or the wlock
1953 wlock held, unless the dirstate has changed from underneath or the wlock
1954 couldn't be grabbed.
1954 couldn't be grabbed.
1955
1955
1956 Callbacks should not capture and use a cached copy of the dirstate --
1956 Callbacks should not capture and use a cached copy of the dirstate --
1957 it might change in the meanwhile. Instead, they should access the
1957 it might change in the meanwhile. Instead, they should access the
1958 dirstate via wctx.repo().dirstate.
1958 dirstate via wctx.repo().dirstate.
1959
1959
1960 This list is emptied out after each status run -- extensions should
1960 This list is emptied out after each status run -- extensions should
1961 make sure it adds to this list each time dirstate.status is called.
1961 make sure it adds to this list each time dirstate.status is called.
1962 Extensions should also make sure they don't call this for statuses
1962 Extensions should also make sure they don't call this for statuses
1963 that don't involve the dirstate.
1963 that don't involve the dirstate.
1964 """
1964 """
1965
1965
1966 # The list is located here for uniqueness reasons -- it is actually
1966 # The list is located here for uniqueness reasons -- it is actually
1967 # managed by the workingctx, but that isn't unique per-repo.
1967 # managed by the workingctx, but that isn't unique per-repo.
1968 self._postdsstatus.append(ps)
1968 self._postdsstatus.append(ps)
1969
1969
1970 def postdsstatus(self):
1970 def postdsstatus(self):
1971 """Used by workingctx to get the list of post-dirstate-status hooks."""
1971 """Used by workingctx to get the list of post-dirstate-status hooks."""
1972 return self._postdsstatus
1972 return self._postdsstatus
1973
1973
1974 def clearpostdsstatus(self):
1974 def clearpostdsstatus(self):
1975 """Used by workingctx to clear post-dirstate-status hooks."""
1975 """Used by workingctx to clear post-dirstate-status hooks."""
1976 del self._postdsstatus[:]
1976 del self._postdsstatus[:]
1977
1977
1978 def heads(self, start=None):
1978 def heads(self, start=None):
1979 if start is None:
1979 if start is None:
1980 cl = self.changelog
1980 cl = self.changelog
1981 headrevs = reversed(cl.headrevs())
1981 headrevs = reversed(cl.headrevs())
1982 return [cl.node(rev) for rev in headrevs]
1982 return [cl.node(rev) for rev in headrevs]
1983
1983
1984 heads = self.changelog.heads(start)
1984 heads = self.changelog.heads(start)
1985 # sort the output in rev descending order
1985 # sort the output in rev descending order
1986 return sorted(heads, key=self.changelog.rev, reverse=True)
1986 return sorted(heads, key=self.changelog.rev, reverse=True)
1987
1987
1988 def branchheads(self, branch=None, start=None, closed=False):
1988 def branchheads(self, branch=None, start=None, closed=False):
1989 '''return a (possibly filtered) list of heads for the given branch
1989 '''return a (possibly filtered) list of heads for the given branch
1990
1990
1991 Heads are returned in topological order, from newest to oldest.
1991 Heads are returned in topological order, from newest to oldest.
1992 If branch is None, use the dirstate branch.
1992 If branch is None, use the dirstate branch.
1993 If start is not None, return only heads reachable from start.
1993 If start is not None, return only heads reachable from start.
1994 If closed is True, return heads that are marked as closed as well.
1994 If closed is True, return heads that are marked as closed as well.
1995 '''
1995 '''
1996 if branch is None:
1996 if branch is None:
1997 branch = self[None].branch()
1997 branch = self[None].branch()
1998 branches = self.branchmap()
1998 branches = self.branchmap()
1999 if branch not in branches:
1999 if branch not in branches:
2000 return []
2000 return []
2001 # the cache returns heads ordered lowest to highest
2001 # the cache returns heads ordered lowest to highest
2002 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2002 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2003 if start is not None:
2003 if start is not None:
2004 # filter out the heads that cannot be reached from startrev
2004 # filter out the heads that cannot be reached from startrev
2005 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2005 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2006 bheads = [h for h in bheads if h in fbheads]
2006 bheads = [h for h in bheads if h in fbheads]
2007 return bheads
2007 return bheads
2008
2008
2009 def branches(self, nodes):
2009 def branches(self, nodes):
2010 if not nodes:
2010 if not nodes:
2011 nodes = [self.changelog.tip()]
2011 nodes = [self.changelog.tip()]
2012 b = []
2012 b = []
2013 for n in nodes:
2013 for n in nodes:
2014 t = n
2014 t = n
2015 while True:
2015 while True:
2016 p = self.changelog.parents(n)
2016 p = self.changelog.parents(n)
2017 if p[1] != nullid or p[0] == nullid:
2017 if p[1] != nullid or p[0] == nullid:
2018 b.append((t, n, p[0], p[1]))
2018 b.append((t, n, p[0], p[1]))
2019 break
2019 break
2020 n = p[0]
2020 n = p[0]
2021 return b
2021 return b
2022
2022
2023 def between(self, pairs):
2023 def between(self, pairs):
2024 r = []
2024 r = []
2025
2025
2026 for top, bottom in pairs:
2026 for top, bottom in pairs:
2027 n, l, i = top, [], 0
2027 n, l, i = top, [], 0
2028 f = 1
2028 f = 1
2029
2029
2030 while n != bottom and n != nullid:
2030 while n != bottom and n != nullid:
2031 p = self.changelog.parents(n)[0]
2031 p = self.changelog.parents(n)[0]
2032 if i == f:
2032 if i == f:
2033 l.append(n)
2033 l.append(n)
2034 f = f * 2
2034 f = f * 2
2035 n = p
2035 n = p
2036 i += 1
2036 i += 1
2037
2037
2038 r.append(l)
2038 r.append(l)
2039
2039
2040 return r
2040 return r
2041
2041
2042 def checkpush(self, pushop):
2042 def checkpush(self, pushop):
2043 """Extensions can override this function if additional checks have
2043 """Extensions can override this function if additional checks have
2044 to be performed before pushing, or call it if they override push
2044 to be performed before pushing, or call it if they override push
2045 command.
2045 command.
2046 """
2046 """
2047 pass
2047 pass
2048
2048
2049 @unfilteredpropertycache
2049 @unfilteredpropertycache
2050 def prepushoutgoinghooks(self):
2050 def prepushoutgoinghooks(self):
2051 """Return util.hooks consists of a pushop with repo, remote, outgoing
2051 """Return util.hooks consists of a pushop with repo, remote, outgoing
2052 methods, which are called before pushing changesets.
2052 methods, which are called before pushing changesets.
2053 """
2053 """
2054 return util.hooks()
2054 return util.hooks()
2055
2055
2056 def pushkey(self, namespace, key, old, new):
2056 def pushkey(self, namespace, key, old, new):
2057 try:
2057 try:
2058 tr = self.currenttransaction()
2058 tr = self.currenttransaction()
2059 hookargs = {}
2059 hookargs = {}
2060 if tr is not None:
2060 if tr is not None:
2061 hookargs.update(tr.hookargs)
2061 hookargs.update(tr.hookargs)
2062 hookargs['namespace'] = namespace
2062 hookargs['namespace'] = namespace
2063 hookargs['key'] = key
2063 hookargs['key'] = key
2064 hookargs['old'] = old
2064 hookargs['old'] = old
2065 hookargs['new'] = new
2065 hookargs['new'] = new
2066 self.hook('prepushkey', throw=True, **hookargs)
2066 self.hook('prepushkey', throw=True, **hookargs)
2067 except error.HookAbort as exc:
2067 except error.HookAbort as exc:
2068 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2068 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2069 if exc.hint:
2069 if exc.hint:
2070 self.ui.write_err(_("(%s)\n") % exc.hint)
2070 self.ui.write_err(_("(%s)\n") % exc.hint)
2071 return False
2071 return False
2072 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2072 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2073 ret = pushkey.push(self, namespace, key, old, new)
2073 ret = pushkey.push(self, namespace, key, old, new)
2074 def runhook():
2074 def runhook():
2075 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2075 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2076 ret=ret)
2076 ret=ret)
2077 self._afterlock(runhook)
2077 self._afterlock(runhook)
2078 return ret
2078 return ret
2079
2079
2080 def listkeys(self, namespace):
2080 def listkeys(self, namespace):
2081 self.hook('prelistkeys', throw=True, namespace=namespace)
2081 self.hook('prelistkeys', throw=True, namespace=namespace)
2082 self.ui.debug('listing keys for "%s"\n' % namespace)
2082 self.ui.debug('listing keys for "%s"\n' % namespace)
2083 values = pushkey.list(self, namespace)
2083 values = pushkey.list(self, namespace)
2084 self.hook('listkeys', namespace=namespace, values=values)
2084 self.hook('listkeys', namespace=namespace, values=values)
2085 return values
2085 return values
2086
2086
2087 def debugwireargs(self, one, two, three=None, four=None, five=None):
2087 def debugwireargs(self, one, two, three=None, four=None, five=None):
2088 '''used to test argument passing over the wire'''
2088 '''used to test argument passing over the wire'''
2089 return "%s %s %s %s %s" % (one, two, three, four, five)
2089 return "%s %s %s %s %s" % (one, two, three, four, five)
2090
2090
2091 def savecommitmessage(self, text):
2091 def savecommitmessage(self, text):
2092 fp = self.vfs('last-message.txt', 'wb')
2092 fp = self.vfs('last-message.txt', 'wb')
2093 try:
2093 try:
2094 fp.write(text)
2094 fp.write(text)
2095 finally:
2095 finally:
2096 fp.close()
2096 fp.close()
2097 return self.pathto(fp.name[len(self.root) + 1:])
2097 return self.pathto(fp.name[len(self.root) + 1:])
2098
2098
2099 # used to avoid circular references so destructors work
2099 # used to avoid circular references so destructors work
2100 def aftertrans(files):
2100 def aftertrans(files):
2101 renamefiles = [tuple(t) for t in files]
2101 renamefiles = [tuple(t) for t in files]
2102 def a():
2102 def a():
2103 for vfs, src, dest in renamefiles:
2103 for vfs, src, dest in renamefiles:
2104 # if src and dest refer to a same file, vfs.rename is a no-op,
2104 # if src and dest refer to a same file, vfs.rename is a no-op,
2105 # leaving both src and dest on disk. delete dest to make sure
2105 # leaving both src and dest on disk. delete dest to make sure
2106 # the rename couldn't be such a no-op.
2106 # the rename couldn't be such a no-op.
2107 vfs.tryunlink(dest)
2107 vfs.tryunlink(dest)
2108 try:
2108 try:
2109 vfs.rename(src, dest)
2109 vfs.rename(src, dest)
2110 except OSError: # journal file does not yet exist
2110 except OSError: # journal file does not yet exist
2111 pass
2111 pass
2112 return a
2112 return a
2113
2113
2114 def undoname(fn):
2114 def undoname(fn):
2115 base, name = os.path.split(fn)
2115 base, name = os.path.split(fn)
2116 assert name.startswith('journal')
2116 assert name.startswith('journal')
2117 return os.path.join(base, name.replace('journal', 'undo', 1))
2117 return os.path.join(base, name.replace('journal', 'undo', 1))
2118
2118
2119 def instance(ui, path, create):
2119 def instance(ui, path, create):
2120 return localrepository(ui, util.urllocalpath(path), create)
2120 return localrepository(ui, util.urllocalpath(path), create)
2121
2121
2122 def islocal(path):
2122 def islocal(path):
2123 return True
2123 return True
2124
2124
2125 def newreporequirements(repo):
2125 def newreporequirements(repo):
2126 """Determine the set of requirements for a new local repository.
2126 """Determine the set of requirements for a new local repository.
2127
2127
2128 Extensions can wrap this function to specify custom requirements for
2128 Extensions can wrap this function to specify custom requirements for
2129 new repositories.
2129 new repositories.
2130 """
2130 """
2131 ui = repo.ui
2131 ui = repo.ui
2132 requirements = {'revlogv1'}
2132 requirements = {'revlogv1'}
2133 if ui.configbool('format', 'usestore'):
2133 if ui.configbool('format', 'usestore'):
2134 requirements.add('store')
2134 requirements.add('store')
2135 if ui.configbool('format', 'usefncache'):
2135 if ui.configbool('format', 'usefncache'):
2136 requirements.add('fncache')
2136 requirements.add('fncache')
2137 if ui.configbool('format', 'dotencode'):
2137 if ui.configbool('format', 'dotencode'):
2138 requirements.add('dotencode')
2138 requirements.add('dotencode')
2139
2139
2140 compengine = ui.config('experimental', 'format.compression', 'zlib')
2140 compengine = ui.config('experimental', 'format.compression', 'zlib')
2141 if compengine not in util.compengines:
2141 if compengine not in util.compengines:
2142 raise error.Abort(_('compression engine %s defined by '
2142 raise error.Abort(_('compression engine %s defined by '
2143 'experimental.format.compression not available') %
2143 'experimental.format.compression not available') %
2144 compengine,
2144 compengine,
2145 hint=_('run "hg debuginstall" to list available '
2145 hint=_('run "hg debuginstall" to list available '
2146 'compression engines'))
2146 'compression engines'))
2147
2147
2148 # zlib is the historical default and doesn't need an explicit requirement.
2148 # zlib is the historical default and doesn't need an explicit requirement.
2149 if compengine != 'zlib':
2149 if compengine != 'zlib':
2150 requirements.add('exp-compression-%s' % compengine)
2150 requirements.add('exp-compression-%s' % compengine)
2151
2151
2152 if scmutil.gdinitconfig(ui):
2152 if scmutil.gdinitconfig(ui):
2153 requirements.add('generaldelta')
2153 requirements.add('generaldelta')
2154 if ui.configbool('experimental', 'treemanifest', False):
2154 if ui.configbool('experimental', 'treemanifest', False):
2155 requirements.add('treemanifest')
2155 requirements.add('treemanifest')
2156 if ui.configbool('experimental', 'manifestv2', False):
2156 if ui.configbool('experimental', 'manifestv2', False):
2157 requirements.add('manifestv2')
2157 requirements.add('manifestv2')
2158
2158
2159 revlogv2 = ui.config('experimental', 'revlogv2')
2159 revlogv2 = ui.config('experimental', 'revlogv2')
2160 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2160 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2161 requirements.remove('revlogv1')
2161 requirements.remove('revlogv1')
2162 # generaldelta is implied by revlogv2.
2162 # generaldelta is implied by revlogv2.
2163 requirements.discard('generaldelta')
2163 requirements.discard('generaldelta')
2164 requirements.add(REVLOGV2_REQUIREMENT)
2164 requirements.add(REVLOGV2_REQUIREMENT)
2165
2165
2166 return requirements
2166 return requirements
General Comments 0
You need to be logged in to leave comments. Login now