##// END OF EJS Templates
localrepo: forcibly copy list of filecache keys...
Augie Fackler -
r31510:2244fb3e default
parent child Browse files
Show More
@@ -1,2087 +1,2087
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repoview,
53 repoview,
54 revset,
54 revset,
55 revsetlang,
55 revsetlang,
56 scmutil,
56 scmutil,
57 store,
57 store,
58 subrepo,
58 subrepo,
59 tags as tagsmod,
59 tags as tagsmod,
60 transaction,
60 transaction,
61 txnutil,
61 txnutil,
62 util,
62 util,
63 vfs as vfsmod,
63 vfs as vfsmod,
64 )
64 )
65
65
66 release = lockmod.release
66 release = lockmod.release
67 urlerr = util.urlerr
67 urlerr = util.urlerr
68 urlreq = util.urlreq
68 urlreq = util.urlreq
69
69
70 class repofilecache(scmutil.filecache):
70 class repofilecache(scmutil.filecache):
71 """All filecache usage on repo are done for logic that should be unfiltered
71 """All filecache usage on repo are done for logic that should be unfiltered
72 """
72 """
73
73
74 def join(self, obj, fname):
74 def join(self, obj, fname):
75 return obj.vfs.join(fname)
75 return obj.vfs.join(fname)
76 def __get__(self, repo, type=None):
76 def __get__(self, repo, type=None):
77 if repo is None:
77 if repo is None:
78 return self
78 return self
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 return super(repofilecache, self).__get__(repo.unfiltered(), type)
80 def __set__(self, repo, value):
80 def __set__(self, repo, value):
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 return super(repofilecache, self).__set__(repo.unfiltered(), value)
82 def __delete__(self, repo):
82 def __delete__(self, repo):
83 return super(repofilecache, self).__delete__(repo.unfiltered())
83 return super(repofilecache, self).__delete__(repo.unfiltered())
84
84
85 class storecache(repofilecache):
85 class storecache(repofilecache):
86 """filecache for files in the store"""
86 """filecache for files in the store"""
87 def join(self, obj, fname):
87 def join(self, obj, fname):
88 return obj.sjoin(fname)
88 return obj.sjoin(fname)
89
89
90 class unfilteredpropertycache(util.propertycache):
90 class unfilteredpropertycache(util.propertycache):
91 """propertycache that apply to unfiltered repo only"""
91 """propertycache that apply to unfiltered repo only"""
92
92
93 def __get__(self, repo, type=None):
93 def __get__(self, repo, type=None):
94 unfi = repo.unfiltered()
94 unfi = repo.unfiltered()
95 if unfi is repo:
95 if unfi is repo:
96 return super(unfilteredpropertycache, self).__get__(unfi)
96 return super(unfilteredpropertycache, self).__get__(unfi)
97 return getattr(unfi, self.name)
97 return getattr(unfi, self.name)
98
98
99 class filteredpropertycache(util.propertycache):
99 class filteredpropertycache(util.propertycache):
100 """propertycache that must take filtering in account"""
100 """propertycache that must take filtering in account"""
101
101
102 def cachevalue(self, obj, value):
102 def cachevalue(self, obj, value):
103 object.__setattr__(obj, self.name, value)
103 object.__setattr__(obj, self.name, value)
104
104
105
105
106 def hasunfilteredcache(repo, name):
106 def hasunfilteredcache(repo, name):
107 """check if a repo has an unfilteredpropertycache value for <name>"""
107 """check if a repo has an unfilteredpropertycache value for <name>"""
108 return name in vars(repo.unfiltered())
108 return name in vars(repo.unfiltered())
109
109
110 def unfilteredmethod(orig):
110 def unfilteredmethod(orig):
111 """decorate method that always need to be run on unfiltered version"""
111 """decorate method that always need to be run on unfiltered version"""
112 def wrapper(repo, *args, **kwargs):
112 def wrapper(repo, *args, **kwargs):
113 return orig(repo.unfiltered(), *args, **kwargs)
113 return orig(repo.unfiltered(), *args, **kwargs)
114 return wrapper
114 return wrapper
115
115
116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
117 'unbundle'))
117 'unbundle'))
118 legacycaps = moderncaps.union(set(['changegroupsubset']))
118 legacycaps = moderncaps.union(set(['changegroupsubset']))
119
119
120 class localpeer(peer.peerrepository):
120 class localpeer(peer.peerrepository):
121 '''peer for a local repo; reflects only the most recent API'''
121 '''peer for a local repo; reflects only the most recent API'''
122
122
123 def __init__(self, repo, caps=None):
123 def __init__(self, repo, caps=None):
124 if caps is None:
124 if caps is None:
125 caps = moderncaps.copy()
125 caps = moderncaps.copy()
126 peer.peerrepository.__init__(self)
126 peer.peerrepository.__init__(self)
127 self._repo = repo.filtered('served')
127 self._repo = repo.filtered('served')
128 self.ui = repo.ui
128 self.ui = repo.ui
129 self._caps = repo._restrictcapabilities(caps)
129 self._caps = repo._restrictcapabilities(caps)
130 self.requirements = repo.requirements
130 self.requirements = repo.requirements
131 self.supportedformats = repo.supportedformats
131 self.supportedformats = repo.supportedformats
132
132
133 def close(self):
133 def close(self):
134 self._repo.close()
134 self._repo.close()
135
135
136 def _capabilities(self):
136 def _capabilities(self):
137 return self._caps
137 return self._caps
138
138
139 def local(self):
139 def local(self):
140 return self._repo
140 return self._repo
141
141
142 def canpush(self):
142 def canpush(self):
143 return True
143 return True
144
144
145 def url(self):
145 def url(self):
146 return self._repo.url()
146 return self._repo.url()
147
147
148 def lookup(self, key):
148 def lookup(self, key):
149 return self._repo.lookup(key)
149 return self._repo.lookup(key)
150
150
151 def branchmap(self):
151 def branchmap(self):
152 return self._repo.branchmap()
152 return self._repo.branchmap()
153
153
154 def heads(self):
154 def heads(self):
155 return self._repo.heads()
155 return self._repo.heads()
156
156
157 def known(self, nodes):
157 def known(self, nodes):
158 return self._repo.known(nodes)
158 return self._repo.known(nodes)
159
159
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
160 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
161 **kwargs):
161 **kwargs):
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
162 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
163 common=common, bundlecaps=bundlecaps,
163 common=common, bundlecaps=bundlecaps,
164 **kwargs)
164 **kwargs)
165 cb = util.chunkbuffer(chunks)
165 cb = util.chunkbuffer(chunks)
166
166
167 if bundlecaps is not None and 'HG20' in bundlecaps:
167 if bundlecaps is not None and 'HG20' in bundlecaps:
168 # When requesting a bundle2, getbundle returns a stream to make the
168 # When requesting a bundle2, getbundle returns a stream to make the
169 # wire level function happier. We need to build a proper object
169 # wire level function happier. We need to build a proper object
170 # from it in local peer.
170 # from it in local peer.
171 return bundle2.getunbundler(self.ui, cb)
171 return bundle2.getunbundler(self.ui, cb)
172 else:
172 else:
173 return changegroup.getunbundler('01', cb, None)
173 return changegroup.getunbundler('01', cb, None)
174
174
175 # TODO We might want to move the next two calls into legacypeer and add
175 # TODO We might want to move the next two calls into legacypeer and add
176 # unbundle instead.
176 # unbundle instead.
177
177
178 def unbundle(self, cg, heads, url):
178 def unbundle(self, cg, heads, url):
179 """apply a bundle on a repo
179 """apply a bundle on a repo
180
180
181 This function handles the repo locking itself."""
181 This function handles the repo locking itself."""
182 try:
182 try:
183 try:
183 try:
184 cg = exchange.readbundle(self.ui, cg, None)
184 cg = exchange.readbundle(self.ui, cg, None)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
185 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
186 if util.safehasattr(ret, 'getchunks'):
186 if util.safehasattr(ret, 'getchunks'):
187 # This is a bundle20 object, turn it into an unbundler.
187 # This is a bundle20 object, turn it into an unbundler.
188 # This little dance should be dropped eventually when the
188 # This little dance should be dropped eventually when the
189 # API is finally improved.
189 # API is finally improved.
190 stream = util.chunkbuffer(ret.getchunks())
190 stream = util.chunkbuffer(ret.getchunks())
191 ret = bundle2.getunbundler(self.ui, stream)
191 ret = bundle2.getunbundler(self.ui, stream)
192 return ret
192 return ret
193 except Exception as exc:
193 except Exception as exc:
194 # If the exception contains output salvaged from a bundle2
194 # If the exception contains output salvaged from a bundle2
195 # reply, we need to make sure it is printed before continuing
195 # reply, we need to make sure it is printed before continuing
196 # to fail. So we build a bundle2 with such output and consume
196 # to fail. So we build a bundle2 with such output and consume
197 # it directly.
197 # it directly.
198 #
198 #
199 # This is not very elegant but allows a "simple" solution for
199 # This is not very elegant but allows a "simple" solution for
200 # issue4594
200 # issue4594
201 output = getattr(exc, '_bundle2salvagedoutput', ())
201 output = getattr(exc, '_bundle2salvagedoutput', ())
202 if output:
202 if output:
203 bundler = bundle2.bundle20(self._repo.ui)
203 bundler = bundle2.bundle20(self._repo.ui)
204 for out in output:
204 for out in output:
205 bundler.addpart(out)
205 bundler.addpart(out)
206 stream = util.chunkbuffer(bundler.getchunks())
206 stream = util.chunkbuffer(bundler.getchunks())
207 b = bundle2.getunbundler(self.ui, stream)
207 b = bundle2.getunbundler(self.ui, stream)
208 bundle2.processbundle(self._repo, b)
208 bundle2.processbundle(self._repo, b)
209 raise
209 raise
210 except error.PushRaced as exc:
210 except error.PushRaced as exc:
211 raise error.ResponseError(_('push failed:'), str(exc))
211 raise error.ResponseError(_('push failed:'), str(exc))
212
212
213 def lock(self):
213 def lock(self):
214 return self._repo.lock()
214 return self._repo.lock()
215
215
216 def addchangegroup(self, cg, source, url):
216 def addchangegroup(self, cg, source, url):
217 return cg.apply(self._repo, source, url)
217 return cg.apply(self._repo, source, url)
218
218
219 def pushkey(self, namespace, key, old, new):
219 def pushkey(self, namespace, key, old, new):
220 return self._repo.pushkey(namespace, key, old, new)
220 return self._repo.pushkey(namespace, key, old, new)
221
221
222 def listkeys(self, namespace):
222 def listkeys(self, namespace):
223 return self._repo.listkeys(namespace)
223 return self._repo.listkeys(namespace)
224
224
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
225 def debugwireargs(self, one, two, three=None, four=None, five=None):
226 '''used to test argument passing over the wire'''
226 '''used to test argument passing over the wire'''
227 return "%s %s %s %s %s" % (one, two, three, four, five)
227 return "%s %s %s %s %s" % (one, two, three, four, five)
228
228
229 class locallegacypeer(localpeer):
229 class locallegacypeer(localpeer):
230 '''peer extension which implements legacy methods too; used for tests with
230 '''peer extension which implements legacy methods too; used for tests with
231 restricted capabilities'''
231 restricted capabilities'''
232
232
233 def __init__(self, repo):
233 def __init__(self, repo):
234 localpeer.__init__(self, repo, caps=legacycaps)
234 localpeer.__init__(self, repo, caps=legacycaps)
235
235
236 def branches(self, nodes):
236 def branches(self, nodes):
237 return self._repo.branches(nodes)
237 return self._repo.branches(nodes)
238
238
239 def between(self, pairs):
239 def between(self, pairs):
240 return self._repo.between(pairs)
240 return self._repo.between(pairs)
241
241
242 def changegroup(self, basenodes, source):
242 def changegroup(self, basenodes, source):
243 return changegroup.changegroup(self._repo, basenodes, source)
243 return changegroup.changegroup(self._repo, basenodes, source)
244
244
245 def changegroupsubset(self, bases, heads, source):
245 def changegroupsubset(self, bases, heads, source):
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
246 return changegroup.changegroupsubset(self._repo, bases, heads, source)
247
247
248 class localrepository(object):
248 class localrepository(object):
249
249
250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
250 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
251 'manifestv2'))
251 'manifestv2'))
252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
252 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
253 'relshared', 'dotencode'))
253 'relshared', 'dotencode'))
254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
254 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
255 filtername = None
255 filtername = None
256
256
257 # a list of (ui, featureset) functions.
257 # a list of (ui, featureset) functions.
258 # only functions defined in module of enabled extensions are invoked
258 # only functions defined in module of enabled extensions are invoked
259 featuresetupfuncs = set()
259 featuresetupfuncs = set()
260
260
261 def __init__(self, baseui, path, create=False):
261 def __init__(self, baseui, path, create=False):
262 self.requirements = set()
262 self.requirements = set()
263 # vfs to access the working copy
263 # vfs to access the working copy
264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
264 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
265 # vfs to access the content of the repository
265 # vfs to access the content of the repository
266 self.vfs = None
266 self.vfs = None
267 # vfs to access the store part of the repository
267 # vfs to access the store part of the repository
268 self.svfs = None
268 self.svfs = None
269 self.root = self.wvfs.base
269 self.root = self.wvfs.base
270 self.path = self.wvfs.join(".hg")
270 self.path = self.wvfs.join(".hg")
271 self.origroot = path
271 self.origroot = path
272 self.auditor = pathutil.pathauditor(self.root, self._checknested)
272 self.auditor = pathutil.pathauditor(self.root, self._checknested)
273 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
273 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
274 realfs=False)
274 realfs=False)
275 self.vfs = vfsmod.vfs(self.path)
275 self.vfs = vfsmod.vfs(self.path)
276 self.baseui = baseui
276 self.baseui = baseui
277 self.ui = baseui.copy()
277 self.ui = baseui.copy()
278 self.ui.copy = baseui.copy # prevent copying repo configuration
278 self.ui.copy = baseui.copy # prevent copying repo configuration
279 # A list of callback to shape the phase if no data were found.
279 # A list of callback to shape the phase if no data were found.
280 # Callback are in the form: func(repo, roots) --> processed root.
280 # Callback are in the form: func(repo, roots) --> processed root.
281 # This list it to be filled by extension during repo setup
281 # This list it to be filled by extension during repo setup
282 self._phasedefaults = []
282 self._phasedefaults = []
283 try:
283 try:
284 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
284 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
285 self._loadextensions()
285 self._loadextensions()
286 except IOError:
286 except IOError:
287 pass
287 pass
288
288
289 if self.featuresetupfuncs:
289 if self.featuresetupfuncs:
290 self.supported = set(self._basesupported) # use private copy
290 self.supported = set(self._basesupported) # use private copy
291 extmods = set(m.__name__ for n, m
291 extmods = set(m.__name__ for n, m
292 in extensions.extensions(self.ui))
292 in extensions.extensions(self.ui))
293 for setupfunc in self.featuresetupfuncs:
293 for setupfunc in self.featuresetupfuncs:
294 if setupfunc.__module__ in extmods:
294 if setupfunc.__module__ in extmods:
295 setupfunc(self.ui, self.supported)
295 setupfunc(self.ui, self.supported)
296 else:
296 else:
297 self.supported = self._basesupported
297 self.supported = self._basesupported
298 color.setup(self.ui)
298 color.setup(self.ui)
299
299
300 # Add compression engines.
300 # Add compression engines.
301 for name in util.compengines:
301 for name in util.compengines:
302 engine = util.compengines[name]
302 engine = util.compengines[name]
303 if engine.revlogheader():
303 if engine.revlogheader():
304 self.supported.add('exp-compression-%s' % name)
304 self.supported.add('exp-compression-%s' % name)
305
305
306 if not self.vfs.isdir():
306 if not self.vfs.isdir():
307 if create:
307 if create:
308 self.requirements = newreporequirements(self)
308 self.requirements = newreporequirements(self)
309
309
310 if not self.wvfs.exists():
310 if not self.wvfs.exists():
311 self.wvfs.makedirs()
311 self.wvfs.makedirs()
312 self.vfs.makedir(notindexed=True)
312 self.vfs.makedir(notindexed=True)
313
313
314 if 'store' in self.requirements:
314 if 'store' in self.requirements:
315 self.vfs.mkdir("store")
315 self.vfs.mkdir("store")
316
316
317 # create an invalid changelog
317 # create an invalid changelog
318 self.vfs.append(
318 self.vfs.append(
319 "00changelog.i",
319 "00changelog.i",
320 '\0\0\0\2' # represents revlogv2
320 '\0\0\0\2' # represents revlogv2
321 ' dummy changelog to prevent using the old repo layout'
321 ' dummy changelog to prevent using the old repo layout'
322 )
322 )
323 else:
323 else:
324 raise error.RepoError(_("repository %s not found") % path)
324 raise error.RepoError(_("repository %s not found") % path)
325 elif create:
325 elif create:
326 raise error.RepoError(_("repository %s already exists") % path)
326 raise error.RepoError(_("repository %s already exists") % path)
327 else:
327 else:
328 try:
328 try:
329 self.requirements = scmutil.readrequires(
329 self.requirements = scmutil.readrequires(
330 self.vfs, self.supported)
330 self.vfs, self.supported)
331 except IOError as inst:
331 except IOError as inst:
332 if inst.errno != errno.ENOENT:
332 if inst.errno != errno.ENOENT:
333 raise
333 raise
334
334
335 self.sharedpath = self.path
335 self.sharedpath = self.path
336 try:
336 try:
337 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
337 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
338 if 'relshared' in self.requirements:
338 if 'relshared' in self.requirements:
339 sharedpath = self.vfs.join(sharedpath)
339 sharedpath = self.vfs.join(sharedpath)
340 vfs = vfsmod.vfs(sharedpath, realpath=True)
340 vfs = vfsmod.vfs(sharedpath, realpath=True)
341 s = vfs.base
341 s = vfs.base
342 if not vfs.exists():
342 if not vfs.exists():
343 raise error.RepoError(
343 raise error.RepoError(
344 _('.hg/sharedpath points to nonexistent directory %s') % s)
344 _('.hg/sharedpath points to nonexistent directory %s') % s)
345 self.sharedpath = s
345 self.sharedpath = s
346 except IOError as inst:
346 except IOError as inst:
347 if inst.errno != errno.ENOENT:
347 if inst.errno != errno.ENOENT:
348 raise
348 raise
349
349
350 self.store = store.store(
350 self.store = store.store(
351 self.requirements, self.sharedpath, vfsmod.vfs)
351 self.requirements, self.sharedpath, vfsmod.vfs)
352 self.spath = self.store.path
352 self.spath = self.store.path
353 self.svfs = self.store.vfs
353 self.svfs = self.store.vfs
354 self.sjoin = self.store.join
354 self.sjoin = self.store.join
355 self.vfs.createmode = self.store.createmode
355 self.vfs.createmode = self.store.createmode
356 self._applyopenerreqs()
356 self._applyopenerreqs()
357 if create:
357 if create:
358 self._writerequirements()
358 self._writerequirements()
359
359
360 self._dirstatevalidatewarned = False
360 self._dirstatevalidatewarned = False
361
361
362 self._branchcaches = {}
362 self._branchcaches = {}
363 self._revbranchcache = None
363 self._revbranchcache = None
364 self.filterpats = {}
364 self.filterpats = {}
365 self._datafilters = {}
365 self._datafilters = {}
366 self._transref = self._lockref = self._wlockref = None
366 self._transref = self._lockref = self._wlockref = None
367
367
368 # A cache for various files under .hg/ that tracks file changes,
368 # A cache for various files under .hg/ that tracks file changes,
369 # (used by the filecache decorator)
369 # (used by the filecache decorator)
370 #
370 #
371 # Maps a property name to its util.filecacheentry
371 # Maps a property name to its util.filecacheentry
372 self._filecache = {}
372 self._filecache = {}
373
373
374 # hold sets of revision to be filtered
374 # hold sets of revision to be filtered
375 # should be cleared when something might have changed the filter value:
375 # should be cleared when something might have changed the filter value:
376 # - new changesets,
376 # - new changesets,
377 # - phase change,
377 # - phase change,
378 # - new obsolescence marker,
378 # - new obsolescence marker,
379 # - working directory parent change,
379 # - working directory parent change,
380 # - bookmark changes
380 # - bookmark changes
381 self.filteredrevcache = {}
381 self.filteredrevcache = {}
382
382
383 # generic mapping between names and nodes
383 # generic mapping between names and nodes
384 self.names = namespaces.namespaces()
384 self.names = namespaces.namespaces()
385
385
386 @property
386 @property
387 def wopener(self):
387 def wopener(self):
388 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
388 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
389 return self.wvfs
389 return self.wvfs
390
390
391 @property
391 @property
392 def opener(self):
392 def opener(self):
393 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
393 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
394 return self.vfs
394 return self.vfs
395
395
396 def close(self):
396 def close(self):
397 self._writecaches()
397 self._writecaches()
398
398
399 def _loadextensions(self):
399 def _loadextensions(self):
400 extensions.loadall(self.ui)
400 extensions.loadall(self.ui)
401
401
402 def _writecaches(self):
402 def _writecaches(self):
403 if self._revbranchcache:
403 if self._revbranchcache:
404 self._revbranchcache.write()
404 self._revbranchcache.write()
405
405
406 def _restrictcapabilities(self, caps):
406 def _restrictcapabilities(self, caps):
407 if self.ui.configbool('experimental', 'bundle2-advertise', True):
407 if self.ui.configbool('experimental', 'bundle2-advertise', True):
408 caps = set(caps)
408 caps = set(caps)
409 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
409 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
410 caps.add('bundle2=' + urlreq.quote(capsblob))
410 caps.add('bundle2=' + urlreq.quote(capsblob))
411 return caps
411 return caps
412
412
413 def _applyopenerreqs(self):
413 def _applyopenerreqs(self):
414 self.svfs.options = dict((r, 1) for r in self.requirements
414 self.svfs.options = dict((r, 1) for r in self.requirements
415 if r in self.openerreqs)
415 if r in self.openerreqs)
416 # experimental config: format.chunkcachesize
416 # experimental config: format.chunkcachesize
417 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
417 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
418 if chunkcachesize is not None:
418 if chunkcachesize is not None:
419 self.svfs.options['chunkcachesize'] = chunkcachesize
419 self.svfs.options['chunkcachesize'] = chunkcachesize
420 # experimental config: format.maxchainlen
420 # experimental config: format.maxchainlen
421 maxchainlen = self.ui.configint('format', 'maxchainlen')
421 maxchainlen = self.ui.configint('format', 'maxchainlen')
422 if maxchainlen is not None:
422 if maxchainlen is not None:
423 self.svfs.options['maxchainlen'] = maxchainlen
423 self.svfs.options['maxchainlen'] = maxchainlen
424 # experimental config: format.manifestcachesize
424 # experimental config: format.manifestcachesize
425 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
425 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
426 if manifestcachesize is not None:
426 if manifestcachesize is not None:
427 self.svfs.options['manifestcachesize'] = manifestcachesize
427 self.svfs.options['manifestcachesize'] = manifestcachesize
428 # experimental config: format.aggressivemergedeltas
428 # experimental config: format.aggressivemergedeltas
429 aggressivemergedeltas = self.ui.configbool('format',
429 aggressivemergedeltas = self.ui.configbool('format',
430 'aggressivemergedeltas', False)
430 'aggressivemergedeltas', False)
431 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
431 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
432 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
432 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
433
433
434 for r in self.requirements:
434 for r in self.requirements:
435 if r.startswith('exp-compression-'):
435 if r.startswith('exp-compression-'):
436 self.svfs.options['compengine'] = r[len('exp-compression-'):]
436 self.svfs.options['compengine'] = r[len('exp-compression-'):]
437
437
438 def _writerequirements(self):
438 def _writerequirements(self):
439 scmutil.writerequires(self.vfs, self.requirements)
439 scmutil.writerequires(self.vfs, self.requirements)
440
440
441 def _checknested(self, path):
441 def _checknested(self, path):
442 """Determine if path is a legal nested repository."""
442 """Determine if path is a legal nested repository."""
443 if not path.startswith(self.root):
443 if not path.startswith(self.root):
444 return False
444 return False
445 subpath = path[len(self.root) + 1:]
445 subpath = path[len(self.root) + 1:]
446 normsubpath = util.pconvert(subpath)
446 normsubpath = util.pconvert(subpath)
447
447
448 # XXX: Checking against the current working copy is wrong in
448 # XXX: Checking against the current working copy is wrong in
449 # the sense that it can reject things like
449 # the sense that it can reject things like
450 #
450 #
451 # $ hg cat -r 10 sub/x.txt
451 # $ hg cat -r 10 sub/x.txt
452 #
452 #
453 # if sub/ is no longer a subrepository in the working copy
453 # if sub/ is no longer a subrepository in the working copy
454 # parent revision.
454 # parent revision.
455 #
455 #
456 # However, it can of course also allow things that would have
456 # However, it can of course also allow things that would have
457 # been rejected before, such as the above cat command if sub/
457 # been rejected before, such as the above cat command if sub/
458 # is a subrepository now, but was a normal directory before.
458 # is a subrepository now, but was a normal directory before.
459 # The old path auditor would have rejected by mistake since it
459 # The old path auditor would have rejected by mistake since it
460 # panics when it sees sub/.hg/.
460 # panics when it sees sub/.hg/.
461 #
461 #
462 # All in all, checking against the working copy seems sensible
462 # All in all, checking against the working copy seems sensible
463 # since we want to prevent access to nested repositories on
463 # since we want to prevent access to nested repositories on
464 # the filesystem *now*.
464 # the filesystem *now*.
465 ctx = self[None]
465 ctx = self[None]
466 parts = util.splitpath(subpath)
466 parts = util.splitpath(subpath)
467 while parts:
467 while parts:
468 prefix = '/'.join(parts)
468 prefix = '/'.join(parts)
469 if prefix in ctx.substate:
469 if prefix in ctx.substate:
470 if prefix == normsubpath:
470 if prefix == normsubpath:
471 return True
471 return True
472 else:
472 else:
473 sub = ctx.sub(prefix)
473 sub = ctx.sub(prefix)
474 return sub.checknested(subpath[len(prefix) + 1:])
474 return sub.checknested(subpath[len(prefix) + 1:])
475 else:
475 else:
476 parts.pop()
476 parts.pop()
477 return False
477 return False
478
478
479 def peer(self):
479 def peer(self):
480 return localpeer(self) # not cached to avoid reference cycle
480 return localpeer(self) # not cached to avoid reference cycle
481
481
482 def unfiltered(self):
482 def unfiltered(self):
483 """Return unfiltered version of the repository
483 """Return unfiltered version of the repository
484
484
485 Intended to be overwritten by filtered repo."""
485 Intended to be overwritten by filtered repo."""
486 return self
486 return self
487
487
488 def filtered(self, name):
488 def filtered(self, name):
489 """Return a filtered version of a repository"""
489 """Return a filtered version of a repository"""
490 # build a new class with the mixin and the current class
490 # build a new class with the mixin and the current class
491 # (possibly subclass of the repo)
491 # (possibly subclass of the repo)
492 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
492 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
493 pass
493 pass
494 return filteredrepo(self, name)
494 return filteredrepo(self, name)
495
495
496 @repofilecache('bookmarks', 'bookmarks.current')
496 @repofilecache('bookmarks', 'bookmarks.current')
497 def _bookmarks(self):
497 def _bookmarks(self):
498 return bookmarks.bmstore(self)
498 return bookmarks.bmstore(self)
499
499
500 @property
500 @property
501 def _activebookmark(self):
501 def _activebookmark(self):
502 return self._bookmarks.active
502 return self._bookmarks.active
503
503
504 def bookmarkheads(self, bookmark):
504 def bookmarkheads(self, bookmark):
505 name = bookmark.split('@', 1)[0]
505 name = bookmark.split('@', 1)[0]
506 heads = []
506 heads = []
507 for mark, n in self._bookmarks.iteritems():
507 for mark, n in self._bookmarks.iteritems():
508 if mark.split('@', 1)[0] == name:
508 if mark.split('@', 1)[0] == name:
509 heads.append(n)
509 heads.append(n)
510 return heads
510 return heads
511
511
512 # _phaserevs and _phasesets depend on changelog. what we need is to
512 # _phaserevs and _phasesets depend on changelog. what we need is to
513 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
513 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
514 # can't be easily expressed in filecache mechanism.
514 # can't be easily expressed in filecache mechanism.
515 @storecache('phaseroots', '00changelog.i')
515 @storecache('phaseroots', '00changelog.i')
516 def _phasecache(self):
516 def _phasecache(self):
517 return phases.phasecache(self, self._phasedefaults)
517 return phases.phasecache(self, self._phasedefaults)
518
518
519 @storecache('obsstore')
519 @storecache('obsstore')
520 def obsstore(self):
520 def obsstore(self):
521 # read default format for new obsstore.
521 # read default format for new obsstore.
522 # developer config: format.obsstore-version
522 # developer config: format.obsstore-version
523 defaultformat = self.ui.configint('format', 'obsstore-version', None)
523 defaultformat = self.ui.configint('format', 'obsstore-version', None)
524 # rely on obsstore class default when possible.
524 # rely on obsstore class default when possible.
525 kwargs = {}
525 kwargs = {}
526 if defaultformat is not None:
526 if defaultformat is not None:
527 kwargs['defaultformat'] = defaultformat
527 kwargs['defaultformat'] = defaultformat
528 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
528 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
529 store = obsolete.obsstore(self.svfs, readonly=readonly,
529 store = obsolete.obsstore(self.svfs, readonly=readonly,
530 **kwargs)
530 **kwargs)
531 if store and readonly:
531 if store and readonly:
532 self.ui.warn(
532 self.ui.warn(
533 _('obsolete feature not enabled but %i markers found!\n')
533 _('obsolete feature not enabled but %i markers found!\n')
534 % len(list(store)))
534 % len(list(store)))
535 return store
535 return store
536
536
537 @storecache('00changelog.i')
537 @storecache('00changelog.i')
538 def changelog(self):
538 def changelog(self):
539 c = changelog.changelog(self.svfs)
539 c = changelog.changelog(self.svfs)
540 if txnutil.mayhavepending(self.root):
540 if txnutil.mayhavepending(self.root):
541 c.readpending('00changelog.i.a')
541 c.readpending('00changelog.i.a')
542 return c
542 return c
543
543
544 def _constructmanifest(self):
544 def _constructmanifest(self):
545 # This is a temporary function while we migrate from manifest to
545 # This is a temporary function while we migrate from manifest to
546 # manifestlog. It allows bundlerepo and unionrepo to intercept the
546 # manifestlog. It allows bundlerepo and unionrepo to intercept the
547 # manifest creation.
547 # manifest creation.
548 return manifest.manifestrevlog(self.svfs)
548 return manifest.manifestrevlog(self.svfs)
549
549
550 @storecache('00manifest.i')
550 @storecache('00manifest.i')
551 def manifestlog(self):
551 def manifestlog(self):
552 return manifest.manifestlog(self.svfs, self)
552 return manifest.manifestlog(self.svfs, self)
553
553
554 @repofilecache('dirstate')
554 @repofilecache('dirstate')
555 def dirstate(self):
555 def dirstate(self):
556 return dirstate.dirstate(self.vfs, self.ui, self.root,
556 return dirstate.dirstate(self.vfs, self.ui, self.root,
557 self._dirstatevalidate)
557 self._dirstatevalidate)
558
558
559 def _dirstatevalidate(self, node):
559 def _dirstatevalidate(self, node):
560 try:
560 try:
561 self.changelog.rev(node)
561 self.changelog.rev(node)
562 return node
562 return node
563 except error.LookupError:
563 except error.LookupError:
564 if not self._dirstatevalidatewarned:
564 if not self._dirstatevalidatewarned:
565 self._dirstatevalidatewarned = True
565 self._dirstatevalidatewarned = True
566 self.ui.warn(_("warning: ignoring unknown"
566 self.ui.warn(_("warning: ignoring unknown"
567 " working parent %s!\n") % short(node))
567 " working parent %s!\n") % short(node))
568 return nullid
568 return nullid
569
569
570 def __getitem__(self, changeid):
570 def __getitem__(self, changeid):
571 if changeid is None or changeid == wdirrev:
571 if changeid is None or changeid == wdirrev:
572 return context.workingctx(self)
572 return context.workingctx(self)
573 if isinstance(changeid, slice):
573 if isinstance(changeid, slice):
574 return [context.changectx(self, i)
574 return [context.changectx(self, i)
575 for i in xrange(*changeid.indices(len(self)))
575 for i in xrange(*changeid.indices(len(self)))
576 if i not in self.changelog.filteredrevs]
576 if i not in self.changelog.filteredrevs]
577 return context.changectx(self, changeid)
577 return context.changectx(self, changeid)
578
578
579 def __contains__(self, changeid):
579 def __contains__(self, changeid):
580 try:
580 try:
581 self[changeid]
581 self[changeid]
582 return True
582 return True
583 except error.RepoLookupError:
583 except error.RepoLookupError:
584 return False
584 return False
585
585
586 def __nonzero__(self):
586 def __nonzero__(self):
587 return True
587 return True
588
588
589 __bool__ = __nonzero__
589 __bool__ = __nonzero__
590
590
591 def __len__(self):
591 def __len__(self):
592 return len(self.changelog)
592 return len(self.changelog)
593
593
594 def __iter__(self):
594 def __iter__(self):
595 return iter(self.changelog)
595 return iter(self.changelog)
596
596
597 def revs(self, expr, *args):
597 def revs(self, expr, *args):
598 '''Find revisions matching a revset.
598 '''Find revisions matching a revset.
599
599
600 The revset is specified as a string ``expr`` that may contain
600 The revset is specified as a string ``expr`` that may contain
601 %-formatting to escape certain types. See ``revsetlang.formatspec``.
601 %-formatting to escape certain types. See ``revsetlang.formatspec``.
602
602
603 Revset aliases from the configuration are not expanded. To expand
603 Revset aliases from the configuration are not expanded. To expand
604 user aliases, consider calling ``scmutil.revrange()`` or
604 user aliases, consider calling ``scmutil.revrange()`` or
605 ``repo.anyrevs([expr], user=True)``.
605 ``repo.anyrevs([expr], user=True)``.
606
606
607 Returns a revset.abstractsmartset, which is a list-like interface
607 Returns a revset.abstractsmartset, which is a list-like interface
608 that contains integer revisions.
608 that contains integer revisions.
609 '''
609 '''
610 expr = revsetlang.formatspec(expr, *args)
610 expr = revsetlang.formatspec(expr, *args)
611 m = revset.match(None, expr)
611 m = revset.match(None, expr)
612 return m(self)
612 return m(self)
613
613
614 def set(self, expr, *args):
614 def set(self, expr, *args):
615 '''Find revisions matching a revset and emit changectx instances.
615 '''Find revisions matching a revset and emit changectx instances.
616
616
617 This is a convenience wrapper around ``revs()`` that iterates the
617 This is a convenience wrapper around ``revs()`` that iterates the
618 result and is a generator of changectx instances.
618 result and is a generator of changectx instances.
619
619
620 Revset aliases from the configuration are not expanded. To expand
620 Revset aliases from the configuration are not expanded. To expand
621 user aliases, consider calling ``scmutil.revrange()``.
621 user aliases, consider calling ``scmutil.revrange()``.
622 '''
622 '''
623 for r in self.revs(expr, *args):
623 for r in self.revs(expr, *args):
624 yield self[r]
624 yield self[r]
625
625
626 def anyrevs(self, specs, user=False):
626 def anyrevs(self, specs, user=False):
627 '''Find revisions matching one of the given revsets.
627 '''Find revisions matching one of the given revsets.
628
628
629 Revset aliases from the configuration are not expanded by default. To
629 Revset aliases from the configuration are not expanded by default. To
630 expand user aliases, specify ``user=True``.
630 expand user aliases, specify ``user=True``.
631 '''
631 '''
632 if user:
632 if user:
633 m = revset.matchany(self.ui, specs, repo=self)
633 m = revset.matchany(self.ui, specs, repo=self)
634 else:
634 else:
635 m = revset.matchany(None, specs)
635 m = revset.matchany(None, specs)
636 return m(self)
636 return m(self)
637
637
638 def url(self):
638 def url(self):
639 return 'file:' + self.root
639 return 'file:' + self.root
640
640
641 def hook(self, name, throw=False, **args):
641 def hook(self, name, throw=False, **args):
642 """Call a hook, passing this repo instance.
642 """Call a hook, passing this repo instance.
643
643
644 This a convenience method to aid invoking hooks. Extensions likely
644 This a convenience method to aid invoking hooks. Extensions likely
645 won't call this unless they have registered a custom hook or are
645 won't call this unless they have registered a custom hook or are
646 replacing code that is expected to call a hook.
646 replacing code that is expected to call a hook.
647 """
647 """
648 return hook.hook(self.ui, self, name, throw, **args)
648 return hook.hook(self.ui, self, name, throw, **args)
649
649
650 @unfilteredmethod
650 @unfilteredmethod
651 def _tag(self, names, node, message, local, user, date, extra=None,
651 def _tag(self, names, node, message, local, user, date, extra=None,
652 editor=False):
652 editor=False):
653 if isinstance(names, str):
653 if isinstance(names, str):
654 names = (names,)
654 names = (names,)
655
655
656 branches = self.branchmap()
656 branches = self.branchmap()
657 for name in names:
657 for name in names:
658 self.hook('pretag', throw=True, node=hex(node), tag=name,
658 self.hook('pretag', throw=True, node=hex(node), tag=name,
659 local=local)
659 local=local)
660 if name in branches:
660 if name in branches:
661 self.ui.warn(_("warning: tag %s conflicts with existing"
661 self.ui.warn(_("warning: tag %s conflicts with existing"
662 " branch name\n") % name)
662 " branch name\n") % name)
663
663
664 def writetags(fp, names, munge, prevtags):
664 def writetags(fp, names, munge, prevtags):
665 fp.seek(0, 2)
665 fp.seek(0, 2)
666 if prevtags and prevtags[-1] != '\n':
666 if prevtags and prevtags[-1] != '\n':
667 fp.write('\n')
667 fp.write('\n')
668 for name in names:
668 for name in names:
669 if munge:
669 if munge:
670 m = munge(name)
670 m = munge(name)
671 else:
671 else:
672 m = name
672 m = name
673
673
674 if (self._tagscache.tagtypes and
674 if (self._tagscache.tagtypes and
675 name in self._tagscache.tagtypes):
675 name in self._tagscache.tagtypes):
676 old = self.tags().get(name, nullid)
676 old = self.tags().get(name, nullid)
677 fp.write('%s %s\n' % (hex(old), m))
677 fp.write('%s %s\n' % (hex(old), m))
678 fp.write('%s %s\n' % (hex(node), m))
678 fp.write('%s %s\n' % (hex(node), m))
679 fp.close()
679 fp.close()
680
680
681 prevtags = ''
681 prevtags = ''
682 if local:
682 if local:
683 try:
683 try:
684 fp = self.vfs('localtags', 'r+')
684 fp = self.vfs('localtags', 'r+')
685 except IOError:
685 except IOError:
686 fp = self.vfs('localtags', 'a')
686 fp = self.vfs('localtags', 'a')
687 else:
687 else:
688 prevtags = fp.read()
688 prevtags = fp.read()
689
689
690 # local tags are stored in the current charset
690 # local tags are stored in the current charset
691 writetags(fp, names, None, prevtags)
691 writetags(fp, names, None, prevtags)
692 for name in names:
692 for name in names:
693 self.hook('tag', node=hex(node), tag=name, local=local)
693 self.hook('tag', node=hex(node), tag=name, local=local)
694 return
694 return
695
695
696 try:
696 try:
697 fp = self.wvfs('.hgtags', 'rb+')
697 fp = self.wvfs('.hgtags', 'rb+')
698 except IOError as e:
698 except IOError as e:
699 if e.errno != errno.ENOENT:
699 if e.errno != errno.ENOENT:
700 raise
700 raise
701 fp = self.wvfs('.hgtags', 'ab')
701 fp = self.wvfs('.hgtags', 'ab')
702 else:
702 else:
703 prevtags = fp.read()
703 prevtags = fp.read()
704
704
705 # committed tags are stored in UTF-8
705 # committed tags are stored in UTF-8
706 writetags(fp, names, encoding.fromlocal, prevtags)
706 writetags(fp, names, encoding.fromlocal, prevtags)
707
707
708 fp.close()
708 fp.close()
709
709
710 self.invalidatecaches()
710 self.invalidatecaches()
711
711
712 if '.hgtags' not in self.dirstate:
712 if '.hgtags' not in self.dirstate:
713 self[None].add(['.hgtags'])
713 self[None].add(['.hgtags'])
714
714
715 m = matchmod.exact(self.root, '', ['.hgtags'])
715 m = matchmod.exact(self.root, '', ['.hgtags'])
716 tagnode = self.commit(message, user, date, extra=extra, match=m,
716 tagnode = self.commit(message, user, date, extra=extra, match=m,
717 editor=editor)
717 editor=editor)
718
718
719 for name in names:
719 for name in names:
720 self.hook('tag', node=hex(node), tag=name, local=local)
720 self.hook('tag', node=hex(node), tag=name, local=local)
721
721
722 return tagnode
722 return tagnode
723
723
724 def tag(self, names, node, message, local, user, date, editor=False):
724 def tag(self, names, node, message, local, user, date, editor=False):
725 '''tag a revision with one or more symbolic names.
725 '''tag a revision with one or more symbolic names.
726
726
727 names is a list of strings or, when adding a single tag, names may be a
727 names is a list of strings or, when adding a single tag, names may be a
728 string.
728 string.
729
729
730 if local is True, the tags are stored in a per-repository file.
730 if local is True, the tags are stored in a per-repository file.
731 otherwise, they are stored in the .hgtags file, and a new
731 otherwise, they are stored in the .hgtags file, and a new
732 changeset is committed with the change.
732 changeset is committed with the change.
733
733
734 keyword arguments:
734 keyword arguments:
735
735
736 local: whether to store tags in non-version-controlled file
736 local: whether to store tags in non-version-controlled file
737 (default False)
737 (default False)
738
738
739 message: commit message to use if committing
739 message: commit message to use if committing
740
740
741 user: name of user to use if committing
741 user: name of user to use if committing
742
742
743 date: date tuple to use if committing'''
743 date: date tuple to use if committing'''
744
744
745 if not local:
745 if not local:
746 m = matchmod.exact(self.root, '', ['.hgtags'])
746 m = matchmod.exact(self.root, '', ['.hgtags'])
747 if any(self.status(match=m, unknown=True, ignored=True)):
747 if any(self.status(match=m, unknown=True, ignored=True)):
748 raise error.Abort(_('working copy of .hgtags is changed'),
748 raise error.Abort(_('working copy of .hgtags is changed'),
749 hint=_('please commit .hgtags manually'))
749 hint=_('please commit .hgtags manually'))
750
750
751 self.tags() # instantiate the cache
751 self.tags() # instantiate the cache
752 self._tag(names, node, message, local, user, date, editor=editor)
752 self._tag(names, node, message, local, user, date, editor=editor)
753
753
754 @filteredpropertycache
754 @filteredpropertycache
755 def _tagscache(self):
755 def _tagscache(self):
756 '''Returns a tagscache object that contains various tags related
756 '''Returns a tagscache object that contains various tags related
757 caches.'''
757 caches.'''
758
758
759 # This simplifies its cache management by having one decorated
759 # This simplifies its cache management by having one decorated
760 # function (this one) and the rest simply fetch things from it.
760 # function (this one) and the rest simply fetch things from it.
761 class tagscache(object):
761 class tagscache(object):
762 def __init__(self):
762 def __init__(self):
763 # These two define the set of tags for this repository. tags
763 # These two define the set of tags for this repository. tags
764 # maps tag name to node; tagtypes maps tag name to 'global' or
764 # maps tag name to node; tagtypes maps tag name to 'global' or
765 # 'local'. (Global tags are defined by .hgtags across all
765 # 'local'. (Global tags are defined by .hgtags across all
766 # heads, and local tags are defined in .hg/localtags.)
766 # heads, and local tags are defined in .hg/localtags.)
767 # They constitute the in-memory cache of tags.
767 # They constitute the in-memory cache of tags.
768 self.tags = self.tagtypes = None
768 self.tags = self.tagtypes = None
769
769
770 self.nodetagscache = self.tagslist = None
770 self.nodetagscache = self.tagslist = None
771
771
772 cache = tagscache()
772 cache = tagscache()
773 cache.tags, cache.tagtypes = self._findtags()
773 cache.tags, cache.tagtypes = self._findtags()
774
774
775 return cache
775 return cache
776
776
777 def tags(self):
777 def tags(self):
778 '''return a mapping of tag to node'''
778 '''return a mapping of tag to node'''
779 t = {}
779 t = {}
780 if self.changelog.filteredrevs:
780 if self.changelog.filteredrevs:
781 tags, tt = self._findtags()
781 tags, tt = self._findtags()
782 else:
782 else:
783 tags = self._tagscache.tags
783 tags = self._tagscache.tags
784 for k, v in tags.iteritems():
784 for k, v in tags.iteritems():
785 try:
785 try:
786 # ignore tags to unknown nodes
786 # ignore tags to unknown nodes
787 self.changelog.rev(v)
787 self.changelog.rev(v)
788 t[k] = v
788 t[k] = v
789 except (error.LookupError, ValueError):
789 except (error.LookupError, ValueError):
790 pass
790 pass
791 return t
791 return t
792
792
793 def _findtags(self):
793 def _findtags(self):
794 '''Do the hard work of finding tags. Return a pair of dicts
794 '''Do the hard work of finding tags. Return a pair of dicts
795 (tags, tagtypes) where tags maps tag name to node, and tagtypes
795 (tags, tagtypes) where tags maps tag name to node, and tagtypes
796 maps tag name to a string like \'global\' or \'local\'.
796 maps tag name to a string like \'global\' or \'local\'.
797 Subclasses or extensions are free to add their own tags, but
797 Subclasses or extensions are free to add their own tags, but
798 should be aware that the returned dicts will be retained for the
798 should be aware that the returned dicts will be retained for the
799 duration of the localrepo object.'''
799 duration of the localrepo object.'''
800
800
801 # XXX what tagtype should subclasses/extensions use? Currently
801 # XXX what tagtype should subclasses/extensions use? Currently
802 # mq and bookmarks add tags, but do not set the tagtype at all.
802 # mq and bookmarks add tags, but do not set the tagtype at all.
803 # Should each extension invent its own tag type? Should there
803 # Should each extension invent its own tag type? Should there
804 # be one tagtype for all such "virtual" tags? Or is the status
804 # be one tagtype for all such "virtual" tags? Or is the status
805 # quo fine?
805 # quo fine?
806
806
807 alltags = {} # map tag name to (node, hist)
807 alltags = {} # map tag name to (node, hist)
808 tagtypes = {}
808 tagtypes = {}
809
809
810 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
810 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
811 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
811 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
812
812
813 # Build the return dicts. Have to re-encode tag names because
813 # Build the return dicts. Have to re-encode tag names because
814 # the tags module always uses UTF-8 (in order not to lose info
814 # the tags module always uses UTF-8 (in order not to lose info
815 # writing to the cache), but the rest of Mercurial wants them in
815 # writing to the cache), but the rest of Mercurial wants them in
816 # local encoding.
816 # local encoding.
817 tags = {}
817 tags = {}
818 for (name, (node, hist)) in alltags.iteritems():
818 for (name, (node, hist)) in alltags.iteritems():
819 if node != nullid:
819 if node != nullid:
820 tags[encoding.tolocal(name)] = node
820 tags[encoding.tolocal(name)] = node
821 tags['tip'] = self.changelog.tip()
821 tags['tip'] = self.changelog.tip()
822 tagtypes = dict([(encoding.tolocal(name), value)
822 tagtypes = dict([(encoding.tolocal(name), value)
823 for (name, value) in tagtypes.iteritems()])
823 for (name, value) in tagtypes.iteritems()])
824 return (tags, tagtypes)
824 return (tags, tagtypes)
825
825
826 def tagtype(self, tagname):
826 def tagtype(self, tagname):
827 '''
827 '''
828 return the type of the given tag. result can be:
828 return the type of the given tag. result can be:
829
829
830 'local' : a local tag
830 'local' : a local tag
831 'global' : a global tag
831 'global' : a global tag
832 None : tag does not exist
832 None : tag does not exist
833 '''
833 '''
834
834
835 return self._tagscache.tagtypes.get(tagname)
835 return self._tagscache.tagtypes.get(tagname)
836
836
837 def tagslist(self):
837 def tagslist(self):
838 '''return a list of tags ordered by revision'''
838 '''return a list of tags ordered by revision'''
839 if not self._tagscache.tagslist:
839 if not self._tagscache.tagslist:
840 l = []
840 l = []
841 for t, n in self.tags().iteritems():
841 for t, n in self.tags().iteritems():
842 l.append((self.changelog.rev(n), t, n))
842 l.append((self.changelog.rev(n), t, n))
843 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
843 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
844
844
845 return self._tagscache.tagslist
845 return self._tagscache.tagslist
846
846
847 def nodetags(self, node):
847 def nodetags(self, node):
848 '''return the tags associated with a node'''
848 '''return the tags associated with a node'''
849 if not self._tagscache.nodetagscache:
849 if not self._tagscache.nodetagscache:
850 nodetagscache = {}
850 nodetagscache = {}
851 for t, n in self._tagscache.tags.iteritems():
851 for t, n in self._tagscache.tags.iteritems():
852 nodetagscache.setdefault(n, []).append(t)
852 nodetagscache.setdefault(n, []).append(t)
853 for tags in nodetagscache.itervalues():
853 for tags in nodetagscache.itervalues():
854 tags.sort()
854 tags.sort()
855 self._tagscache.nodetagscache = nodetagscache
855 self._tagscache.nodetagscache = nodetagscache
856 return self._tagscache.nodetagscache.get(node, [])
856 return self._tagscache.nodetagscache.get(node, [])
857
857
858 def nodebookmarks(self, node):
858 def nodebookmarks(self, node):
859 """return the list of bookmarks pointing to the specified node"""
859 """return the list of bookmarks pointing to the specified node"""
860 marks = []
860 marks = []
861 for bookmark, n in self._bookmarks.iteritems():
861 for bookmark, n in self._bookmarks.iteritems():
862 if n == node:
862 if n == node:
863 marks.append(bookmark)
863 marks.append(bookmark)
864 return sorted(marks)
864 return sorted(marks)
865
865
866 def branchmap(self):
866 def branchmap(self):
867 '''returns a dictionary {branch: [branchheads]} with branchheads
867 '''returns a dictionary {branch: [branchheads]} with branchheads
868 ordered by increasing revision number'''
868 ordered by increasing revision number'''
869 branchmap.updatecache(self)
869 branchmap.updatecache(self)
870 return self._branchcaches[self.filtername]
870 return self._branchcaches[self.filtername]
871
871
872 @unfilteredmethod
872 @unfilteredmethod
873 def revbranchcache(self):
873 def revbranchcache(self):
874 if not self._revbranchcache:
874 if not self._revbranchcache:
875 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
875 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
876 return self._revbranchcache
876 return self._revbranchcache
877
877
878 def branchtip(self, branch, ignoremissing=False):
878 def branchtip(self, branch, ignoremissing=False):
879 '''return the tip node for a given branch
879 '''return the tip node for a given branch
880
880
881 If ignoremissing is True, then this method will not raise an error.
881 If ignoremissing is True, then this method will not raise an error.
882 This is helpful for callers that only expect None for a missing branch
882 This is helpful for callers that only expect None for a missing branch
883 (e.g. namespace).
883 (e.g. namespace).
884
884
885 '''
885 '''
886 try:
886 try:
887 return self.branchmap().branchtip(branch)
887 return self.branchmap().branchtip(branch)
888 except KeyError:
888 except KeyError:
889 if not ignoremissing:
889 if not ignoremissing:
890 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
890 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
891 else:
891 else:
892 pass
892 pass
893
893
894 def lookup(self, key):
894 def lookup(self, key):
895 return self[key].node()
895 return self[key].node()
896
896
897 def lookupbranch(self, key, remote=None):
897 def lookupbranch(self, key, remote=None):
898 repo = remote or self
898 repo = remote or self
899 if key in repo.branchmap():
899 if key in repo.branchmap():
900 return key
900 return key
901
901
902 repo = (remote and remote.local()) and remote or self
902 repo = (remote and remote.local()) and remote or self
903 return repo[key].branch()
903 return repo[key].branch()
904
904
905 def known(self, nodes):
905 def known(self, nodes):
906 cl = self.changelog
906 cl = self.changelog
907 nm = cl.nodemap
907 nm = cl.nodemap
908 filtered = cl.filteredrevs
908 filtered = cl.filteredrevs
909 result = []
909 result = []
910 for n in nodes:
910 for n in nodes:
911 r = nm.get(n)
911 r = nm.get(n)
912 resp = not (r is None or r in filtered)
912 resp = not (r is None or r in filtered)
913 result.append(resp)
913 result.append(resp)
914 return result
914 return result
915
915
916 def local(self):
916 def local(self):
917 return self
917 return self
918
918
919 def publishing(self):
919 def publishing(self):
920 # it's safe (and desirable) to trust the publish flag unconditionally
920 # it's safe (and desirable) to trust the publish flag unconditionally
921 # so that we don't finalize changes shared between users via ssh or nfs
921 # so that we don't finalize changes shared between users via ssh or nfs
922 return self.ui.configbool('phases', 'publish', True, untrusted=True)
922 return self.ui.configbool('phases', 'publish', True, untrusted=True)
923
923
924 def cancopy(self):
924 def cancopy(self):
925 # so statichttprepo's override of local() works
925 # so statichttprepo's override of local() works
926 if not self.local():
926 if not self.local():
927 return False
927 return False
928 if not self.publishing():
928 if not self.publishing():
929 return True
929 return True
930 # if publishing we can't copy if there is filtered content
930 # if publishing we can't copy if there is filtered content
931 return not self.filtered('visible').changelog.filteredrevs
931 return not self.filtered('visible').changelog.filteredrevs
932
932
933 def shared(self):
933 def shared(self):
934 '''the type of shared repository (None if not shared)'''
934 '''the type of shared repository (None if not shared)'''
935 if self.sharedpath != self.path:
935 if self.sharedpath != self.path:
936 return 'store'
936 return 'store'
937 return None
937 return None
938
938
939 def join(self, f, *insidef):
939 def join(self, f, *insidef):
940 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
940 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
941 return self.vfs.join(os.path.join(f, *insidef))
941 return self.vfs.join(os.path.join(f, *insidef))
942
942
943 def wjoin(self, f, *insidef):
943 def wjoin(self, f, *insidef):
944 return self.vfs.reljoin(self.root, f, *insidef)
944 return self.vfs.reljoin(self.root, f, *insidef)
945
945
946 def file(self, f):
946 def file(self, f):
947 if f[0] == '/':
947 if f[0] == '/':
948 f = f[1:]
948 f = f[1:]
949 return filelog.filelog(self.svfs, f)
949 return filelog.filelog(self.svfs, f)
950
950
951 def changectx(self, changeid):
951 def changectx(self, changeid):
952 return self[changeid]
952 return self[changeid]
953
953
954 def setparents(self, p1, p2=nullid):
954 def setparents(self, p1, p2=nullid):
955 self.dirstate.beginparentchange()
955 self.dirstate.beginparentchange()
956 copies = self.dirstate.setparents(p1, p2)
956 copies = self.dirstate.setparents(p1, p2)
957 pctx = self[p1]
957 pctx = self[p1]
958 if copies:
958 if copies:
959 # Adjust copy records, the dirstate cannot do it, it
959 # Adjust copy records, the dirstate cannot do it, it
960 # requires access to parents manifests. Preserve them
960 # requires access to parents manifests. Preserve them
961 # only for entries added to first parent.
961 # only for entries added to first parent.
962 for f in copies:
962 for f in copies:
963 if f not in pctx and copies[f] in pctx:
963 if f not in pctx and copies[f] in pctx:
964 self.dirstate.copy(copies[f], f)
964 self.dirstate.copy(copies[f], f)
965 if p2 == nullid:
965 if p2 == nullid:
966 for f, s in sorted(self.dirstate.copies().items()):
966 for f, s in sorted(self.dirstate.copies().items()):
967 if f not in pctx and s not in pctx:
967 if f not in pctx and s not in pctx:
968 self.dirstate.copy(None, f)
968 self.dirstate.copy(None, f)
969 self.dirstate.endparentchange()
969 self.dirstate.endparentchange()
970
970
971 def filectx(self, path, changeid=None, fileid=None):
971 def filectx(self, path, changeid=None, fileid=None):
972 """changeid can be a changeset revision, node, or tag.
972 """changeid can be a changeset revision, node, or tag.
973 fileid can be a file revision or node."""
973 fileid can be a file revision or node."""
974 return context.filectx(self, path, changeid, fileid)
974 return context.filectx(self, path, changeid, fileid)
975
975
976 def getcwd(self):
976 def getcwd(self):
977 return self.dirstate.getcwd()
977 return self.dirstate.getcwd()
978
978
979 def pathto(self, f, cwd=None):
979 def pathto(self, f, cwd=None):
980 return self.dirstate.pathto(f, cwd)
980 return self.dirstate.pathto(f, cwd)
981
981
982 def wfile(self, f, mode='r'):
982 def wfile(self, f, mode='r'):
983 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
983 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wfile'", '4.2')
984 return self.wvfs(f, mode)
984 return self.wvfs(f, mode)
985
985
986 def _link(self, f):
986 def _link(self, f):
987 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
987 self.ui.deprecwarn("use 'repo.wvfs.islink' instead of 'repo._link'",
988 '4.0')
988 '4.0')
989 return self.wvfs.islink(f)
989 return self.wvfs.islink(f)
990
990
991 def _loadfilter(self, filter):
991 def _loadfilter(self, filter):
992 if filter not in self.filterpats:
992 if filter not in self.filterpats:
993 l = []
993 l = []
994 for pat, cmd in self.ui.configitems(filter):
994 for pat, cmd in self.ui.configitems(filter):
995 if cmd == '!':
995 if cmd == '!':
996 continue
996 continue
997 mf = matchmod.match(self.root, '', [pat])
997 mf = matchmod.match(self.root, '', [pat])
998 fn = None
998 fn = None
999 params = cmd
999 params = cmd
1000 for name, filterfn in self._datafilters.iteritems():
1000 for name, filterfn in self._datafilters.iteritems():
1001 if cmd.startswith(name):
1001 if cmd.startswith(name):
1002 fn = filterfn
1002 fn = filterfn
1003 params = cmd[len(name):].lstrip()
1003 params = cmd[len(name):].lstrip()
1004 break
1004 break
1005 if not fn:
1005 if not fn:
1006 fn = lambda s, c, **kwargs: util.filter(s, c)
1006 fn = lambda s, c, **kwargs: util.filter(s, c)
1007 # Wrap old filters not supporting keyword arguments
1007 # Wrap old filters not supporting keyword arguments
1008 if not inspect.getargspec(fn)[2]:
1008 if not inspect.getargspec(fn)[2]:
1009 oldfn = fn
1009 oldfn = fn
1010 fn = lambda s, c, **kwargs: oldfn(s, c)
1010 fn = lambda s, c, **kwargs: oldfn(s, c)
1011 l.append((mf, fn, params))
1011 l.append((mf, fn, params))
1012 self.filterpats[filter] = l
1012 self.filterpats[filter] = l
1013 return self.filterpats[filter]
1013 return self.filterpats[filter]
1014
1014
1015 def _filter(self, filterpats, filename, data):
1015 def _filter(self, filterpats, filename, data):
1016 for mf, fn, cmd in filterpats:
1016 for mf, fn, cmd in filterpats:
1017 if mf(filename):
1017 if mf(filename):
1018 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1018 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1019 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1019 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1020 break
1020 break
1021
1021
1022 return data
1022 return data
1023
1023
1024 @unfilteredpropertycache
1024 @unfilteredpropertycache
1025 def _encodefilterpats(self):
1025 def _encodefilterpats(self):
1026 return self._loadfilter('encode')
1026 return self._loadfilter('encode')
1027
1027
1028 @unfilteredpropertycache
1028 @unfilteredpropertycache
1029 def _decodefilterpats(self):
1029 def _decodefilterpats(self):
1030 return self._loadfilter('decode')
1030 return self._loadfilter('decode')
1031
1031
1032 def adddatafilter(self, name, filter):
1032 def adddatafilter(self, name, filter):
1033 self._datafilters[name] = filter
1033 self._datafilters[name] = filter
1034
1034
1035 def wread(self, filename):
1035 def wread(self, filename):
1036 if self.wvfs.islink(filename):
1036 if self.wvfs.islink(filename):
1037 data = self.wvfs.readlink(filename)
1037 data = self.wvfs.readlink(filename)
1038 else:
1038 else:
1039 data = self.wvfs.read(filename)
1039 data = self.wvfs.read(filename)
1040 return self._filter(self._encodefilterpats, filename, data)
1040 return self._filter(self._encodefilterpats, filename, data)
1041
1041
1042 def wwrite(self, filename, data, flags, backgroundclose=False):
1042 def wwrite(self, filename, data, flags, backgroundclose=False):
1043 """write ``data`` into ``filename`` in the working directory
1043 """write ``data`` into ``filename`` in the working directory
1044
1044
1045 This returns length of written (maybe decoded) data.
1045 This returns length of written (maybe decoded) data.
1046 """
1046 """
1047 data = self._filter(self._decodefilterpats, filename, data)
1047 data = self._filter(self._decodefilterpats, filename, data)
1048 if 'l' in flags:
1048 if 'l' in flags:
1049 self.wvfs.symlink(data, filename)
1049 self.wvfs.symlink(data, filename)
1050 else:
1050 else:
1051 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1051 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1052 if 'x' in flags:
1052 if 'x' in flags:
1053 self.wvfs.setflags(filename, False, True)
1053 self.wvfs.setflags(filename, False, True)
1054 return len(data)
1054 return len(data)
1055
1055
1056 def wwritedata(self, filename, data):
1056 def wwritedata(self, filename, data):
1057 return self._filter(self._decodefilterpats, filename, data)
1057 return self._filter(self._decodefilterpats, filename, data)
1058
1058
1059 def currenttransaction(self):
1059 def currenttransaction(self):
1060 """return the current transaction or None if non exists"""
1060 """return the current transaction or None if non exists"""
1061 if self._transref:
1061 if self._transref:
1062 tr = self._transref()
1062 tr = self._transref()
1063 else:
1063 else:
1064 tr = None
1064 tr = None
1065
1065
1066 if tr and tr.running():
1066 if tr and tr.running():
1067 return tr
1067 return tr
1068 return None
1068 return None
1069
1069
1070 def transaction(self, desc, report=None):
1070 def transaction(self, desc, report=None):
1071 if (self.ui.configbool('devel', 'all-warnings')
1071 if (self.ui.configbool('devel', 'all-warnings')
1072 or self.ui.configbool('devel', 'check-locks')):
1072 or self.ui.configbool('devel', 'check-locks')):
1073 if self._currentlock(self._lockref) is None:
1073 if self._currentlock(self._lockref) is None:
1074 raise error.ProgrammingError('transaction requires locking')
1074 raise error.ProgrammingError('transaction requires locking')
1075 tr = self.currenttransaction()
1075 tr = self.currenttransaction()
1076 if tr is not None:
1076 if tr is not None:
1077 return tr.nest()
1077 return tr.nest()
1078
1078
1079 # abort here if the journal already exists
1079 # abort here if the journal already exists
1080 if self.svfs.exists("journal"):
1080 if self.svfs.exists("journal"):
1081 raise error.RepoError(
1081 raise error.RepoError(
1082 _("abandoned transaction found"),
1082 _("abandoned transaction found"),
1083 hint=_("run 'hg recover' to clean up transaction"))
1083 hint=_("run 'hg recover' to clean up transaction"))
1084
1084
1085 idbase = "%.40f#%f" % (random.random(), time.time())
1085 idbase = "%.40f#%f" % (random.random(), time.time())
1086 ha = hashlib.sha1(idbase).hexdigest()
1086 ha = hashlib.sha1(idbase).hexdigest()
1087 if pycompat.ispy3:
1087 if pycompat.ispy3:
1088 ha = ha.encode('latin1')
1088 ha = ha.encode('latin1')
1089 txnid = 'TXN:' + ha
1089 txnid = 'TXN:' + ha
1090 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1090 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1091
1091
1092 self._writejournal(desc)
1092 self._writejournal(desc)
1093 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1093 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1094 if report:
1094 if report:
1095 rp = report
1095 rp = report
1096 else:
1096 else:
1097 rp = self.ui.warn
1097 rp = self.ui.warn
1098 vfsmap = {'plain': self.vfs} # root of .hg/
1098 vfsmap = {'plain': self.vfs} # root of .hg/
1099 # we must avoid cyclic reference between repo and transaction.
1099 # we must avoid cyclic reference between repo and transaction.
1100 reporef = weakref.ref(self)
1100 reporef = weakref.ref(self)
1101 def validate(tr):
1101 def validate(tr):
1102 """will run pre-closing hooks"""
1102 """will run pre-closing hooks"""
1103 reporef().hook('pretxnclose', throw=True,
1103 reporef().hook('pretxnclose', throw=True,
1104 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1104 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1105 def releasefn(tr, success):
1105 def releasefn(tr, success):
1106 repo = reporef()
1106 repo = reporef()
1107 if success:
1107 if success:
1108 # this should be explicitly invoked here, because
1108 # this should be explicitly invoked here, because
1109 # in-memory changes aren't written out at closing
1109 # in-memory changes aren't written out at closing
1110 # transaction, if tr.addfilegenerator (via
1110 # transaction, if tr.addfilegenerator (via
1111 # dirstate.write or so) isn't invoked while
1111 # dirstate.write or so) isn't invoked while
1112 # transaction running
1112 # transaction running
1113 repo.dirstate.write(None)
1113 repo.dirstate.write(None)
1114 else:
1114 else:
1115 # discard all changes (including ones already written
1115 # discard all changes (including ones already written
1116 # out) in this transaction
1116 # out) in this transaction
1117 repo.dirstate.restorebackup(None, prefix='journal.')
1117 repo.dirstate.restorebackup(None, prefix='journal.')
1118
1118
1119 repo.invalidate(clearfilecache=True)
1119 repo.invalidate(clearfilecache=True)
1120
1120
1121 tr = transaction.transaction(rp, self.svfs, vfsmap,
1121 tr = transaction.transaction(rp, self.svfs, vfsmap,
1122 "journal",
1122 "journal",
1123 "undo",
1123 "undo",
1124 aftertrans(renames),
1124 aftertrans(renames),
1125 self.store.createmode,
1125 self.store.createmode,
1126 validator=validate,
1126 validator=validate,
1127 releasefn=releasefn)
1127 releasefn=releasefn)
1128
1128
1129 tr.hookargs['txnid'] = txnid
1129 tr.hookargs['txnid'] = txnid
1130 # note: writing the fncache only during finalize mean that the file is
1130 # note: writing the fncache only during finalize mean that the file is
1131 # outdated when running hooks. As fncache is used for streaming clone,
1131 # outdated when running hooks. As fncache is used for streaming clone,
1132 # this is not expected to break anything that happen during the hooks.
1132 # this is not expected to break anything that happen during the hooks.
1133 tr.addfinalize('flush-fncache', self.store.write)
1133 tr.addfinalize('flush-fncache', self.store.write)
1134 def txnclosehook(tr2):
1134 def txnclosehook(tr2):
1135 """To be run if transaction is successful, will schedule a hook run
1135 """To be run if transaction is successful, will schedule a hook run
1136 """
1136 """
1137 # Don't reference tr2 in hook() so we don't hold a reference.
1137 # Don't reference tr2 in hook() so we don't hold a reference.
1138 # This reduces memory consumption when there are multiple
1138 # This reduces memory consumption when there are multiple
1139 # transactions per lock. This can likely go away if issue5045
1139 # transactions per lock. This can likely go away if issue5045
1140 # fixes the function accumulation.
1140 # fixes the function accumulation.
1141 hookargs = tr2.hookargs
1141 hookargs = tr2.hookargs
1142
1142
1143 def hook():
1143 def hook():
1144 reporef().hook('txnclose', throw=False, txnname=desc,
1144 reporef().hook('txnclose', throw=False, txnname=desc,
1145 **pycompat.strkwargs(hookargs))
1145 **pycompat.strkwargs(hookargs))
1146 reporef()._afterlock(hook)
1146 reporef()._afterlock(hook)
1147 tr.addfinalize('txnclose-hook', txnclosehook)
1147 tr.addfinalize('txnclose-hook', txnclosehook)
1148 def txnaborthook(tr2):
1148 def txnaborthook(tr2):
1149 """To be run if transaction is aborted
1149 """To be run if transaction is aborted
1150 """
1150 """
1151 reporef().hook('txnabort', throw=False, txnname=desc,
1151 reporef().hook('txnabort', throw=False, txnname=desc,
1152 **tr2.hookargs)
1152 **tr2.hookargs)
1153 tr.addabort('txnabort-hook', txnaborthook)
1153 tr.addabort('txnabort-hook', txnaborthook)
1154 # avoid eager cache invalidation. in-memory data should be identical
1154 # avoid eager cache invalidation. in-memory data should be identical
1155 # to stored data if transaction has no error.
1155 # to stored data if transaction has no error.
1156 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1156 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1157 self._transref = weakref.ref(tr)
1157 self._transref = weakref.ref(tr)
1158 return tr
1158 return tr
1159
1159
1160 def _journalfiles(self):
1160 def _journalfiles(self):
1161 return ((self.svfs, 'journal'),
1161 return ((self.svfs, 'journal'),
1162 (self.vfs, 'journal.dirstate'),
1162 (self.vfs, 'journal.dirstate'),
1163 (self.vfs, 'journal.branch'),
1163 (self.vfs, 'journal.branch'),
1164 (self.vfs, 'journal.desc'),
1164 (self.vfs, 'journal.desc'),
1165 (self.vfs, 'journal.bookmarks'),
1165 (self.vfs, 'journal.bookmarks'),
1166 (self.svfs, 'journal.phaseroots'))
1166 (self.svfs, 'journal.phaseroots'))
1167
1167
1168 def undofiles(self):
1168 def undofiles(self):
1169 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1169 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1170
1170
1171 def _writejournal(self, desc):
1171 def _writejournal(self, desc):
1172 self.dirstate.savebackup(None, prefix='journal.')
1172 self.dirstate.savebackup(None, prefix='journal.')
1173 self.vfs.write("journal.branch",
1173 self.vfs.write("journal.branch",
1174 encoding.fromlocal(self.dirstate.branch()))
1174 encoding.fromlocal(self.dirstate.branch()))
1175 self.vfs.write("journal.desc",
1175 self.vfs.write("journal.desc",
1176 "%d\n%s\n" % (len(self), desc))
1176 "%d\n%s\n" % (len(self), desc))
1177 self.vfs.write("journal.bookmarks",
1177 self.vfs.write("journal.bookmarks",
1178 self.vfs.tryread("bookmarks"))
1178 self.vfs.tryread("bookmarks"))
1179 self.svfs.write("journal.phaseroots",
1179 self.svfs.write("journal.phaseroots",
1180 self.svfs.tryread("phaseroots"))
1180 self.svfs.tryread("phaseroots"))
1181
1181
1182 def recover(self):
1182 def recover(self):
1183 with self.lock():
1183 with self.lock():
1184 if self.svfs.exists("journal"):
1184 if self.svfs.exists("journal"):
1185 self.ui.status(_("rolling back interrupted transaction\n"))
1185 self.ui.status(_("rolling back interrupted transaction\n"))
1186 vfsmap = {'': self.svfs,
1186 vfsmap = {'': self.svfs,
1187 'plain': self.vfs,}
1187 'plain': self.vfs,}
1188 transaction.rollback(self.svfs, vfsmap, "journal",
1188 transaction.rollback(self.svfs, vfsmap, "journal",
1189 self.ui.warn)
1189 self.ui.warn)
1190 self.invalidate()
1190 self.invalidate()
1191 return True
1191 return True
1192 else:
1192 else:
1193 self.ui.warn(_("no interrupted transaction available\n"))
1193 self.ui.warn(_("no interrupted transaction available\n"))
1194 return False
1194 return False
1195
1195
1196 def rollback(self, dryrun=False, force=False):
1196 def rollback(self, dryrun=False, force=False):
1197 wlock = lock = dsguard = None
1197 wlock = lock = dsguard = None
1198 try:
1198 try:
1199 wlock = self.wlock()
1199 wlock = self.wlock()
1200 lock = self.lock()
1200 lock = self.lock()
1201 if self.svfs.exists("undo"):
1201 if self.svfs.exists("undo"):
1202 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1202 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1203
1203
1204 return self._rollback(dryrun, force, dsguard)
1204 return self._rollback(dryrun, force, dsguard)
1205 else:
1205 else:
1206 self.ui.warn(_("no rollback information available\n"))
1206 self.ui.warn(_("no rollback information available\n"))
1207 return 1
1207 return 1
1208 finally:
1208 finally:
1209 release(dsguard, lock, wlock)
1209 release(dsguard, lock, wlock)
1210
1210
1211 @unfilteredmethod # Until we get smarter cache management
1211 @unfilteredmethod # Until we get smarter cache management
1212 def _rollback(self, dryrun, force, dsguard):
1212 def _rollback(self, dryrun, force, dsguard):
1213 ui = self.ui
1213 ui = self.ui
1214 try:
1214 try:
1215 args = self.vfs.read('undo.desc').splitlines()
1215 args = self.vfs.read('undo.desc').splitlines()
1216 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1216 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1217 if len(args) >= 3:
1217 if len(args) >= 3:
1218 detail = args[2]
1218 detail = args[2]
1219 oldtip = oldlen - 1
1219 oldtip = oldlen - 1
1220
1220
1221 if detail and ui.verbose:
1221 if detail and ui.verbose:
1222 msg = (_('repository tip rolled back to revision %s'
1222 msg = (_('repository tip rolled back to revision %s'
1223 ' (undo %s: %s)\n')
1223 ' (undo %s: %s)\n')
1224 % (oldtip, desc, detail))
1224 % (oldtip, desc, detail))
1225 else:
1225 else:
1226 msg = (_('repository tip rolled back to revision %s'
1226 msg = (_('repository tip rolled back to revision %s'
1227 ' (undo %s)\n')
1227 ' (undo %s)\n')
1228 % (oldtip, desc))
1228 % (oldtip, desc))
1229 except IOError:
1229 except IOError:
1230 msg = _('rolling back unknown transaction\n')
1230 msg = _('rolling back unknown transaction\n')
1231 desc = None
1231 desc = None
1232
1232
1233 if not force and self['.'] != self['tip'] and desc == 'commit':
1233 if not force and self['.'] != self['tip'] and desc == 'commit':
1234 raise error.Abort(
1234 raise error.Abort(
1235 _('rollback of last commit while not checked out '
1235 _('rollback of last commit while not checked out '
1236 'may lose data'), hint=_('use -f to force'))
1236 'may lose data'), hint=_('use -f to force'))
1237
1237
1238 ui.status(msg)
1238 ui.status(msg)
1239 if dryrun:
1239 if dryrun:
1240 return 0
1240 return 0
1241
1241
1242 parents = self.dirstate.parents()
1242 parents = self.dirstate.parents()
1243 self.destroying()
1243 self.destroying()
1244 vfsmap = {'plain': self.vfs, '': self.svfs}
1244 vfsmap = {'plain': self.vfs, '': self.svfs}
1245 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1245 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1246 if self.vfs.exists('undo.bookmarks'):
1246 if self.vfs.exists('undo.bookmarks'):
1247 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1247 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1248 if self.svfs.exists('undo.phaseroots'):
1248 if self.svfs.exists('undo.phaseroots'):
1249 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1249 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1250 self.invalidate()
1250 self.invalidate()
1251
1251
1252 parentgone = (parents[0] not in self.changelog.nodemap or
1252 parentgone = (parents[0] not in self.changelog.nodemap or
1253 parents[1] not in self.changelog.nodemap)
1253 parents[1] not in self.changelog.nodemap)
1254 if parentgone:
1254 if parentgone:
1255 # prevent dirstateguard from overwriting already restored one
1255 # prevent dirstateguard from overwriting already restored one
1256 dsguard.close()
1256 dsguard.close()
1257
1257
1258 self.dirstate.restorebackup(None, prefix='undo.')
1258 self.dirstate.restorebackup(None, prefix='undo.')
1259 try:
1259 try:
1260 branch = self.vfs.read('undo.branch')
1260 branch = self.vfs.read('undo.branch')
1261 self.dirstate.setbranch(encoding.tolocal(branch))
1261 self.dirstate.setbranch(encoding.tolocal(branch))
1262 except IOError:
1262 except IOError:
1263 ui.warn(_('named branch could not be reset: '
1263 ui.warn(_('named branch could not be reset: '
1264 'current branch is still \'%s\'\n')
1264 'current branch is still \'%s\'\n')
1265 % self.dirstate.branch())
1265 % self.dirstate.branch())
1266
1266
1267 parents = tuple([p.rev() for p in self[None].parents()])
1267 parents = tuple([p.rev() for p in self[None].parents()])
1268 if len(parents) > 1:
1268 if len(parents) > 1:
1269 ui.status(_('working directory now based on '
1269 ui.status(_('working directory now based on '
1270 'revisions %d and %d\n') % parents)
1270 'revisions %d and %d\n') % parents)
1271 else:
1271 else:
1272 ui.status(_('working directory now based on '
1272 ui.status(_('working directory now based on '
1273 'revision %d\n') % parents)
1273 'revision %d\n') % parents)
1274 mergemod.mergestate.clean(self, self['.'].node())
1274 mergemod.mergestate.clean(self, self['.'].node())
1275
1275
1276 # TODO: if we know which new heads may result from this rollback, pass
1276 # TODO: if we know which new heads may result from this rollback, pass
1277 # them to destroy(), which will prevent the branchhead cache from being
1277 # them to destroy(), which will prevent the branchhead cache from being
1278 # invalidated.
1278 # invalidated.
1279 self.destroyed()
1279 self.destroyed()
1280 return 0
1280 return 0
1281
1281
1282 def invalidatecaches(self):
1282 def invalidatecaches(self):
1283
1283
1284 if '_tagscache' in vars(self):
1284 if '_tagscache' in vars(self):
1285 # can't use delattr on proxy
1285 # can't use delattr on proxy
1286 del self.__dict__['_tagscache']
1286 del self.__dict__['_tagscache']
1287
1287
1288 self.unfiltered()._branchcaches.clear()
1288 self.unfiltered()._branchcaches.clear()
1289 self.invalidatevolatilesets()
1289 self.invalidatevolatilesets()
1290
1290
1291 def invalidatevolatilesets(self):
1291 def invalidatevolatilesets(self):
1292 self.filteredrevcache.clear()
1292 self.filteredrevcache.clear()
1293 obsolete.clearobscaches(self)
1293 obsolete.clearobscaches(self)
1294
1294
1295 def invalidatedirstate(self):
1295 def invalidatedirstate(self):
1296 '''Invalidates the dirstate, causing the next call to dirstate
1296 '''Invalidates the dirstate, causing the next call to dirstate
1297 to check if it was modified since the last time it was read,
1297 to check if it was modified since the last time it was read,
1298 rereading it if it has.
1298 rereading it if it has.
1299
1299
1300 This is different to dirstate.invalidate() that it doesn't always
1300 This is different to dirstate.invalidate() that it doesn't always
1301 rereads the dirstate. Use dirstate.invalidate() if you want to
1301 rereads the dirstate. Use dirstate.invalidate() if you want to
1302 explicitly read the dirstate again (i.e. restoring it to a previous
1302 explicitly read the dirstate again (i.e. restoring it to a previous
1303 known good state).'''
1303 known good state).'''
1304 if hasunfilteredcache(self, 'dirstate'):
1304 if hasunfilteredcache(self, 'dirstate'):
1305 for k in self.dirstate._filecache:
1305 for k in self.dirstate._filecache:
1306 try:
1306 try:
1307 delattr(self.dirstate, k)
1307 delattr(self.dirstate, k)
1308 except AttributeError:
1308 except AttributeError:
1309 pass
1309 pass
1310 delattr(self.unfiltered(), 'dirstate')
1310 delattr(self.unfiltered(), 'dirstate')
1311
1311
1312 def invalidate(self, clearfilecache=False):
1312 def invalidate(self, clearfilecache=False):
1313 '''Invalidates both store and non-store parts other than dirstate
1313 '''Invalidates both store and non-store parts other than dirstate
1314
1314
1315 If a transaction is running, invalidation of store is omitted,
1315 If a transaction is running, invalidation of store is omitted,
1316 because discarding in-memory changes might cause inconsistency
1316 because discarding in-memory changes might cause inconsistency
1317 (e.g. incomplete fncache causes unintentional failure, but
1317 (e.g. incomplete fncache causes unintentional failure, but
1318 redundant one doesn't).
1318 redundant one doesn't).
1319 '''
1319 '''
1320 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1320 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1321 for k in self._filecache.keys():
1321 for k in list(self._filecache.keys()):
1322 # dirstate is invalidated separately in invalidatedirstate()
1322 # dirstate is invalidated separately in invalidatedirstate()
1323 if k == 'dirstate':
1323 if k == 'dirstate':
1324 continue
1324 continue
1325
1325
1326 if clearfilecache:
1326 if clearfilecache:
1327 del self._filecache[k]
1327 del self._filecache[k]
1328 try:
1328 try:
1329 delattr(unfiltered, k)
1329 delattr(unfiltered, k)
1330 except AttributeError:
1330 except AttributeError:
1331 pass
1331 pass
1332 self.invalidatecaches()
1332 self.invalidatecaches()
1333 if not self.currenttransaction():
1333 if not self.currenttransaction():
1334 # TODO: Changing contents of store outside transaction
1334 # TODO: Changing contents of store outside transaction
1335 # causes inconsistency. We should make in-memory store
1335 # causes inconsistency. We should make in-memory store
1336 # changes detectable, and abort if changed.
1336 # changes detectable, and abort if changed.
1337 self.store.invalidatecaches()
1337 self.store.invalidatecaches()
1338
1338
1339 def invalidateall(self):
1339 def invalidateall(self):
1340 '''Fully invalidates both store and non-store parts, causing the
1340 '''Fully invalidates both store and non-store parts, causing the
1341 subsequent operation to reread any outside changes.'''
1341 subsequent operation to reread any outside changes.'''
1342 # extension should hook this to invalidate its caches
1342 # extension should hook this to invalidate its caches
1343 self.invalidate()
1343 self.invalidate()
1344 self.invalidatedirstate()
1344 self.invalidatedirstate()
1345
1345
1346 @unfilteredmethod
1346 @unfilteredmethod
1347 def _refreshfilecachestats(self, tr):
1347 def _refreshfilecachestats(self, tr):
1348 """Reload stats of cached files so that they are flagged as valid"""
1348 """Reload stats of cached files so that they are flagged as valid"""
1349 for k, ce in self._filecache.items():
1349 for k, ce in self._filecache.items():
1350 if k == 'dirstate' or k not in self.__dict__:
1350 if k == 'dirstate' or k not in self.__dict__:
1351 continue
1351 continue
1352 ce.refresh()
1352 ce.refresh()
1353
1353
1354 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1354 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1355 inheritchecker=None, parentenvvar=None):
1355 inheritchecker=None, parentenvvar=None):
1356 parentlock = None
1356 parentlock = None
1357 # the contents of parentenvvar are used by the underlying lock to
1357 # the contents of parentenvvar are used by the underlying lock to
1358 # determine whether it can be inherited
1358 # determine whether it can be inherited
1359 if parentenvvar is not None:
1359 if parentenvvar is not None:
1360 parentlock = encoding.environ.get(parentenvvar)
1360 parentlock = encoding.environ.get(parentenvvar)
1361 try:
1361 try:
1362 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1362 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1363 acquirefn=acquirefn, desc=desc,
1363 acquirefn=acquirefn, desc=desc,
1364 inheritchecker=inheritchecker,
1364 inheritchecker=inheritchecker,
1365 parentlock=parentlock)
1365 parentlock=parentlock)
1366 except error.LockHeld as inst:
1366 except error.LockHeld as inst:
1367 if not wait:
1367 if not wait:
1368 raise
1368 raise
1369 # show more details for new-style locks
1369 # show more details for new-style locks
1370 if ':' in inst.locker:
1370 if ':' in inst.locker:
1371 host, pid = inst.locker.split(":", 1)
1371 host, pid = inst.locker.split(":", 1)
1372 self.ui.warn(
1372 self.ui.warn(
1373 _("waiting for lock on %s held by process %r "
1373 _("waiting for lock on %s held by process %r "
1374 "on host %r\n") % (desc, pid, host))
1374 "on host %r\n") % (desc, pid, host))
1375 else:
1375 else:
1376 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1376 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1377 (desc, inst.locker))
1377 (desc, inst.locker))
1378 # default to 600 seconds timeout
1378 # default to 600 seconds timeout
1379 l = lockmod.lock(vfs, lockname,
1379 l = lockmod.lock(vfs, lockname,
1380 int(self.ui.config("ui", "timeout", "600")),
1380 int(self.ui.config("ui", "timeout", "600")),
1381 releasefn=releasefn, acquirefn=acquirefn,
1381 releasefn=releasefn, acquirefn=acquirefn,
1382 desc=desc)
1382 desc=desc)
1383 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1383 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1384 return l
1384 return l
1385
1385
1386 def _afterlock(self, callback):
1386 def _afterlock(self, callback):
1387 """add a callback to be run when the repository is fully unlocked
1387 """add a callback to be run when the repository is fully unlocked
1388
1388
1389 The callback will be executed when the outermost lock is released
1389 The callback will be executed when the outermost lock is released
1390 (with wlock being higher level than 'lock')."""
1390 (with wlock being higher level than 'lock')."""
1391 for ref in (self._wlockref, self._lockref):
1391 for ref in (self._wlockref, self._lockref):
1392 l = ref and ref()
1392 l = ref and ref()
1393 if l and l.held:
1393 if l and l.held:
1394 l.postrelease.append(callback)
1394 l.postrelease.append(callback)
1395 break
1395 break
1396 else: # no lock have been found.
1396 else: # no lock have been found.
1397 callback()
1397 callback()
1398
1398
1399 def lock(self, wait=True):
1399 def lock(self, wait=True):
1400 '''Lock the repository store (.hg/store) and return a weak reference
1400 '''Lock the repository store (.hg/store) and return a weak reference
1401 to the lock. Use this before modifying the store (e.g. committing or
1401 to the lock. Use this before modifying the store (e.g. committing or
1402 stripping). If you are opening a transaction, get a lock as well.)
1402 stripping). If you are opening a transaction, get a lock as well.)
1403
1403
1404 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1404 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1405 'wlock' first to avoid a dead-lock hazard.'''
1405 'wlock' first to avoid a dead-lock hazard.'''
1406 l = self._currentlock(self._lockref)
1406 l = self._currentlock(self._lockref)
1407 if l is not None:
1407 if l is not None:
1408 l.lock()
1408 l.lock()
1409 return l
1409 return l
1410
1410
1411 l = self._lock(self.svfs, "lock", wait, None,
1411 l = self._lock(self.svfs, "lock", wait, None,
1412 self.invalidate, _('repository %s') % self.origroot)
1412 self.invalidate, _('repository %s') % self.origroot)
1413 self._lockref = weakref.ref(l)
1413 self._lockref = weakref.ref(l)
1414 return l
1414 return l
1415
1415
1416 def _wlockchecktransaction(self):
1416 def _wlockchecktransaction(self):
1417 if self.currenttransaction() is not None:
1417 if self.currenttransaction() is not None:
1418 raise error.LockInheritanceContractViolation(
1418 raise error.LockInheritanceContractViolation(
1419 'wlock cannot be inherited in the middle of a transaction')
1419 'wlock cannot be inherited in the middle of a transaction')
1420
1420
1421 def wlock(self, wait=True):
1421 def wlock(self, wait=True):
1422 '''Lock the non-store parts of the repository (everything under
1422 '''Lock the non-store parts of the repository (everything under
1423 .hg except .hg/store) and return a weak reference to the lock.
1423 .hg except .hg/store) and return a weak reference to the lock.
1424
1424
1425 Use this before modifying files in .hg.
1425 Use this before modifying files in .hg.
1426
1426
1427 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1427 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1428 'wlock' first to avoid a dead-lock hazard.'''
1428 'wlock' first to avoid a dead-lock hazard.'''
1429 l = self._wlockref and self._wlockref()
1429 l = self._wlockref and self._wlockref()
1430 if l is not None and l.held:
1430 if l is not None and l.held:
1431 l.lock()
1431 l.lock()
1432 return l
1432 return l
1433
1433
1434 # We do not need to check for non-waiting lock acquisition. Such
1434 # We do not need to check for non-waiting lock acquisition. Such
1435 # acquisition would not cause dead-lock as they would just fail.
1435 # acquisition would not cause dead-lock as they would just fail.
1436 if wait and (self.ui.configbool('devel', 'all-warnings')
1436 if wait and (self.ui.configbool('devel', 'all-warnings')
1437 or self.ui.configbool('devel', 'check-locks')):
1437 or self.ui.configbool('devel', 'check-locks')):
1438 if self._currentlock(self._lockref) is not None:
1438 if self._currentlock(self._lockref) is not None:
1439 self.ui.develwarn('"wlock" acquired after "lock"')
1439 self.ui.develwarn('"wlock" acquired after "lock"')
1440
1440
1441 def unlock():
1441 def unlock():
1442 if self.dirstate.pendingparentchange():
1442 if self.dirstate.pendingparentchange():
1443 self.dirstate.invalidate()
1443 self.dirstate.invalidate()
1444 else:
1444 else:
1445 self.dirstate.write(None)
1445 self.dirstate.write(None)
1446
1446
1447 self._filecache['dirstate'].refresh()
1447 self._filecache['dirstate'].refresh()
1448
1448
1449 l = self._lock(self.vfs, "wlock", wait, unlock,
1449 l = self._lock(self.vfs, "wlock", wait, unlock,
1450 self.invalidatedirstate, _('working directory of %s') %
1450 self.invalidatedirstate, _('working directory of %s') %
1451 self.origroot,
1451 self.origroot,
1452 inheritchecker=self._wlockchecktransaction,
1452 inheritchecker=self._wlockchecktransaction,
1453 parentenvvar='HG_WLOCK_LOCKER')
1453 parentenvvar='HG_WLOCK_LOCKER')
1454 self._wlockref = weakref.ref(l)
1454 self._wlockref = weakref.ref(l)
1455 return l
1455 return l
1456
1456
1457 def _currentlock(self, lockref):
1457 def _currentlock(self, lockref):
1458 """Returns the lock if it's held, or None if it's not."""
1458 """Returns the lock if it's held, or None if it's not."""
1459 if lockref is None:
1459 if lockref is None:
1460 return None
1460 return None
1461 l = lockref()
1461 l = lockref()
1462 if l is None or not l.held:
1462 if l is None or not l.held:
1463 return None
1463 return None
1464 return l
1464 return l
1465
1465
1466 def currentwlock(self):
1466 def currentwlock(self):
1467 """Returns the wlock if it's held, or None if it's not."""
1467 """Returns the wlock if it's held, or None if it's not."""
1468 return self._currentlock(self._wlockref)
1468 return self._currentlock(self._wlockref)
1469
1469
1470 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1470 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1471 """
1471 """
1472 commit an individual file as part of a larger transaction
1472 commit an individual file as part of a larger transaction
1473 """
1473 """
1474
1474
1475 fname = fctx.path()
1475 fname = fctx.path()
1476 fparent1 = manifest1.get(fname, nullid)
1476 fparent1 = manifest1.get(fname, nullid)
1477 fparent2 = manifest2.get(fname, nullid)
1477 fparent2 = manifest2.get(fname, nullid)
1478 if isinstance(fctx, context.filectx):
1478 if isinstance(fctx, context.filectx):
1479 node = fctx.filenode()
1479 node = fctx.filenode()
1480 if node in [fparent1, fparent2]:
1480 if node in [fparent1, fparent2]:
1481 self.ui.debug('reusing %s filelog entry\n' % fname)
1481 self.ui.debug('reusing %s filelog entry\n' % fname)
1482 if manifest1.flags(fname) != fctx.flags():
1482 if manifest1.flags(fname) != fctx.flags():
1483 changelist.append(fname)
1483 changelist.append(fname)
1484 return node
1484 return node
1485
1485
1486 flog = self.file(fname)
1486 flog = self.file(fname)
1487 meta = {}
1487 meta = {}
1488 copy = fctx.renamed()
1488 copy = fctx.renamed()
1489 if copy and copy[0] != fname:
1489 if copy and copy[0] != fname:
1490 # Mark the new revision of this file as a copy of another
1490 # Mark the new revision of this file as a copy of another
1491 # file. This copy data will effectively act as a parent
1491 # file. This copy data will effectively act as a parent
1492 # of this new revision. If this is a merge, the first
1492 # of this new revision. If this is a merge, the first
1493 # parent will be the nullid (meaning "look up the copy data")
1493 # parent will be the nullid (meaning "look up the copy data")
1494 # and the second one will be the other parent. For example:
1494 # and the second one will be the other parent. For example:
1495 #
1495 #
1496 # 0 --- 1 --- 3 rev1 changes file foo
1496 # 0 --- 1 --- 3 rev1 changes file foo
1497 # \ / rev2 renames foo to bar and changes it
1497 # \ / rev2 renames foo to bar and changes it
1498 # \- 2 -/ rev3 should have bar with all changes and
1498 # \- 2 -/ rev3 should have bar with all changes and
1499 # should record that bar descends from
1499 # should record that bar descends from
1500 # bar in rev2 and foo in rev1
1500 # bar in rev2 and foo in rev1
1501 #
1501 #
1502 # this allows this merge to succeed:
1502 # this allows this merge to succeed:
1503 #
1503 #
1504 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1504 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1505 # \ / merging rev3 and rev4 should use bar@rev2
1505 # \ / merging rev3 and rev4 should use bar@rev2
1506 # \- 2 --- 4 as the merge base
1506 # \- 2 --- 4 as the merge base
1507 #
1507 #
1508
1508
1509 cfname = copy[0]
1509 cfname = copy[0]
1510 crev = manifest1.get(cfname)
1510 crev = manifest1.get(cfname)
1511 newfparent = fparent2
1511 newfparent = fparent2
1512
1512
1513 if manifest2: # branch merge
1513 if manifest2: # branch merge
1514 if fparent2 == nullid or crev is None: # copied on remote side
1514 if fparent2 == nullid or crev is None: # copied on remote side
1515 if cfname in manifest2:
1515 if cfname in manifest2:
1516 crev = manifest2[cfname]
1516 crev = manifest2[cfname]
1517 newfparent = fparent1
1517 newfparent = fparent1
1518
1518
1519 # Here, we used to search backwards through history to try to find
1519 # Here, we used to search backwards through history to try to find
1520 # where the file copy came from if the source of a copy was not in
1520 # where the file copy came from if the source of a copy was not in
1521 # the parent directory. However, this doesn't actually make sense to
1521 # the parent directory. However, this doesn't actually make sense to
1522 # do (what does a copy from something not in your working copy even
1522 # do (what does a copy from something not in your working copy even
1523 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1523 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1524 # the user that copy information was dropped, so if they didn't
1524 # the user that copy information was dropped, so if they didn't
1525 # expect this outcome it can be fixed, but this is the correct
1525 # expect this outcome it can be fixed, but this is the correct
1526 # behavior in this circumstance.
1526 # behavior in this circumstance.
1527
1527
1528 if crev:
1528 if crev:
1529 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1529 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1530 meta["copy"] = cfname
1530 meta["copy"] = cfname
1531 meta["copyrev"] = hex(crev)
1531 meta["copyrev"] = hex(crev)
1532 fparent1, fparent2 = nullid, newfparent
1532 fparent1, fparent2 = nullid, newfparent
1533 else:
1533 else:
1534 self.ui.warn(_("warning: can't find ancestor for '%s' "
1534 self.ui.warn(_("warning: can't find ancestor for '%s' "
1535 "copied from '%s'!\n") % (fname, cfname))
1535 "copied from '%s'!\n") % (fname, cfname))
1536
1536
1537 elif fparent1 == nullid:
1537 elif fparent1 == nullid:
1538 fparent1, fparent2 = fparent2, nullid
1538 fparent1, fparent2 = fparent2, nullid
1539 elif fparent2 != nullid:
1539 elif fparent2 != nullid:
1540 # is one parent an ancestor of the other?
1540 # is one parent an ancestor of the other?
1541 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1541 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1542 if fparent1 in fparentancestors:
1542 if fparent1 in fparentancestors:
1543 fparent1, fparent2 = fparent2, nullid
1543 fparent1, fparent2 = fparent2, nullid
1544 elif fparent2 in fparentancestors:
1544 elif fparent2 in fparentancestors:
1545 fparent2 = nullid
1545 fparent2 = nullid
1546
1546
1547 # is the file changed?
1547 # is the file changed?
1548 text = fctx.data()
1548 text = fctx.data()
1549 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1549 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1550 changelist.append(fname)
1550 changelist.append(fname)
1551 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1551 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1552 # are just the flags changed during merge?
1552 # are just the flags changed during merge?
1553 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1553 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1554 changelist.append(fname)
1554 changelist.append(fname)
1555
1555
1556 return fparent1
1556 return fparent1
1557
1557
1558 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1558 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1559 """check for commit arguments that aren't committable"""
1559 """check for commit arguments that aren't committable"""
1560 if match.isexact() or match.prefix():
1560 if match.isexact() or match.prefix():
1561 matched = set(status.modified + status.added + status.removed)
1561 matched = set(status.modified + status.added + status.removed)
1562
1562
1563 for f in match.files():
1563 for f in match.files():
1564 f = self.dirstate.normalize(f)
1564 f = self.dirstate.normalize(f)
1565 if f == '.' or f in matched or f in wctx.substate:
1565 if f == '.' or f in matched or f in wctx.substate:
1566 continue
1566 continue
1567 if f in status.deleted:
1567 if f in status.deleted:
1568 fail(f, _('file not found!'))
1568 fail(f, _('file not found!'))
1569 if f in vdirs: # visited directory
1569 if f in vdirs: # visited directory
1570 d = f + '/'
1570 d = f + '/'
1571 for mf in matched:
1571 for mf in matched:
1572 if mf.startswith(d):
1572 if mf.startswith(d):
1573 break
1573 break
1574 else:
1574 else:
1575 fail(f, _("no match under directory!"))
1575 fail(f, _("no match under directory!"))
1576 elif f not in self.dirstate:
1576 elif f not in self.dirstate:
1577 fail(f, _("file not tracked!"))
1577 fail(f, _("file not tracked!"))
1578
1578
1579 @unfilteredmethod
1579 @unfilteredmethod
1580 def commit(self, text="", user=None, date=None, match=None, force=False,
1580 def commit(self, text="", user=None, date=None, match=None, force=False,
1581 editor=False, extra=None):
1581 editor=False, extra=None):
1582 """Add a new revision to current repository.
1582 """Add a new revision to current repository.
1583
1583
1584 Revision information is gathered from the working directory,
1584 Revision information is gathered from the working directory,
1585 match can be used to filter the committed files. If editor is
1585 match can be used to filter the committed files. If editor is
1586 supplied, it is called to get a commit message.
1586 supplied, it is called to get a commit message.
1587 """
1587 """
1588 if extra is None:
1588 if extra is None:
1589 extra = {}
1589 extra = {}
1590
1590
1591 def fail(f, msg):
1591 def fail(f, msg):
1592 raise error.Abort('%s: %s' % (f, msg))
1592 raise error.Abort('%s: %s' % (f, msg))
1593
1593
1594 if not match:
1594 if not match:
1595 match = matchmod.always(self.root, '')
1595 match = matchmod.always(self.root, '')
1596
1596
1597 if not force:
1597 if not force:
1598 vdirs = []
1598 vdirs = []
1599 match.explicitdir = vdirs.append
1599 match.explicitdir = vdirs.append
1600 match.bad = fail
1600 match.bad = fail
1601
1601
1602 wlock = lock = tr = None
1602 wlock = lock = tr = None
1603 try:
1603 try:
1604 wlock = self.wlock()
1604 wlock = self.wlock()
1605 lock = self.lock() # for recent changelog (see issue4368)
1605 lock = self.lock() # for recent changelog (see issue4368)
1606
1606
1607 wctx = self[None]
1607 wctx = self[None]
1608 merge = len(wctx.parents()) > 1
1608 merge = len(wctx.parents()) > 1
1609
1609
1610 if not force and merge and match.ispartial():
1610 if not force and merge and match.ispartial():
1611 raise error.Abort(_('cannot partially commit a merge '
1611 raise error.Abort(_('cannot partially commit a merge '
1612 '(do not specify files or patterns)'))
1612 '(do not specify files or patterns)'))
1613
1613
1614 status = self.status(match=match, clean=force)
1614 status = self.status(match=match, clean=force)
1615 if force:
1615 if force:
1616 status.modified.extend(status.clean) # mq may commit clean files
1616 status.modified.extend(status.clean) # mq may commit clean files
1617
1617
1618 # check subrepos
1618 # check subrepos
1619 subs = []
1619 subs = []
1620 commitsubs = set()
1620 commitsubs = set()
1621 newstate = wctx.substate.copy()
1621 newstate = wctx.substate.copy()
1622 # only manage subrepos and .hgsubstate if .hgsub is present
1622 # only manage subrepos and .hgsubstate if .hgsub is present
1623 if '.hgsub' in wctx:
1623 if '.hgsub' in wctx:
1624 # we'll decide whether to track this ourselves, thanks
1624 # we'll decide whether to track this ourselves, thanks
1625 for c in status.modified, status.added, status.removed:
1625 for c in status.modified, status.added, status.removed:
1626 if '.hgsubstate' in c:
1626 if '.hgsubstate' in c:
1627 c.remove('.hgsubstate')
1627 c.remove('.hgsubstate')
1628
1628
1629 # compare current state to last committed state
1629 # compare current state to last committed state
1630 # build new substate based on last committed state
1630 # build new substate based on last committed state
1631 oldstate = wctx.p1().substate
1631 oldstate = wctx.p1().substate
1632 for s in sorted(newstate.keys()):
1632 for s in sorted(newstate.keys()):
1633 if not match(s):
1633 if not match(s):
1634 # ignore working copy, use old state if present
1634 # ignore working copy, use old state if present
1635 if s in oldstate:
1635 if s in oldstate:
1636 newstate[s] = oldstate[s]
1636 newstate[s] = oldstate[s]
1637 continue
1637 continue
1638 if not force:
1638 if not force:
1639 raise error.Abort(
1639 raise error.Abort(
1640 _("commit with new subrepo %s excluded") % s)
1640 _("commit with new subrepo %s excluded") % s)
1641 dirtyreason = wctx.sub(s).dirtyreason(True)
1641 dirtyreason = wctx.sub(s).dirtyreason(True)
1642 if dirtyreason:
1642 if dirtyreason:
1643 if not self.ui.configbool('ui', 'commitsubrepos'):
1643 if not self.ui.configbool('ui', 'commitsubrepos'):
1644 raise error.Abort(dirtyreason,
1644 raise error.Abort(dirtyreason,
1645 hint=_("use --subrepos for recursive commit"))
1645 hint=_("use --subrepos for recursive commit"))
1646 subs.append(s)
1646 subs.append(s)
1647 commitsubs.add(s)
1647 commitsubs.add(s)
1648 else:
1648 else:
1649 bs = wctx.sub(s).basestate()
1649 bs = wctx.sub(s).basestate()
1650 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1650 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1651 if oldstate.get(s, (None, None, None))[1] != bs:
1651 if oldstate.get(s, (None, None, None))[1] != bs:
1652 subs.append(s)
1652 subs.append(s)
1653
1653
1654 # check for removed subrepos
1654 # check for removed subrepos
1655 for p in wctx.parents():
1655 for p in wctx.parents():
1656 r = [s for s in p.substate if s not in newstate]
1656 r = [s for s in p.substate if s not in newstate]
1657 subs += [s for s in r if match(s)]
1657 subs += [s for s in r if match(s)]
1658 if subs:
1658 if subs:
1659 if (not match('.hgsub') and
1659 if (not match('.hgsub') and
1660 '.hgsub' in (wctx.modified() + wctx.added())):
1660 '.hgsub' in (wctx.modified() + wctx.added())):
1661 raise error.Abort(
1661 raise error.Abort(
1662 _("can't commit subrepos without .hgsub"))
1662 _("can't commit subrepos without .hgsub"))
1663 status.modified.insert(0, '.hgsubstate')
1663 status.modified.insert(0, '.hgsubstate')
1664
1664
1665 elif '.hgsub' in status.removed:
1665 elif '.hgsub' in status.removed:
1666 # clean up .hgsubstate when .hgsub is removed
1666 # clean up .hgsubstate when .hgsub is removed
1667 if ('.hgsubstate' in wctx and
1667 if ('.hgsubstate' in wctx and
1668 '.hgsubstate' not in (status.modified + status.added +
1668 '.hgsubstate' not in (status.modified + status.added +
1669 status.removed)):
1669 status.removed)):
1670 status.removed.insert(0, '.hgsubstate')
1670 status.removed.insert(0, '.hgsubstate')
1671
1671
1672 # make sure all explicit patterns are matched
1672 # make sure all explicit patterns are matched
1673 if not force:
1673 if not force:
1674 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1674 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1675
1675
1676 cctx = context.workingcommitctx(self, status,
1676 cctx = context.workingcommitctx(self, status,
1677 text, user, date, extra)
1677 text, user, date, extra)
1678
1678
1679 # internal config: ui.allowemptycommit
1679 # internal config: ui.allowemptycommit
1680 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1680 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1681 or extra.get('close') or merge or cctx.files()
1681 or extra.get('close') or merge or cctx.files()
1682 or self.ui.configbool('ui', 'allowemptycommit'))
1682 or self.ui.configbool('ui', 'allowemptycommit'))
1683 if not allowemptycommit:
1683 if not allowemptycommit:
1684 return None
1684 return None
1685
1685
1686 if merge and cctx.deleted():
1686 if merge and cctx.deleted():
1687 raise error.Abort(_("cannot commit merge with missing files"))
1687 raise error.Abort(_("cannot commit merge with missing files"))
1688
1688
1689 ms = mergemod.mergestate.read(self)
1689 ms = mergemod.mergestate.read(self)
1690 mergeutil.checkunresolved(ms)
1690 mergeutil.checkunresolved(ms)
1691
1691
1692 if editor:
1692 if editor:
1693 cctx._text = editor(self, cctx, subs)
1693 cctx._text = editor(self, cctx, subs)
1694 edited = (text != cctx._text)
1694 edited = (text != cctx._text)
1695
1695
1696 # Save commit message in case this transaction gets rolled back
1696 # Save commit message in case this transaction gets rolled back
1697 # (e.g. by a pretxncommit hook). Leave the content alone on
1697 # (e.g. by a pretxncommit hook). Leave the content alone on
1698 # the assumption that the user will use the same editor again.
1698 # the assumption that the user will use the same editor again.
1699 msgfn = self.savecommitmessage(cctx._text)
1699 msgfn = self.savecommitmessage(cctx._text)
1700
1700
1701 # commit subs and write new state
1701 # commit subs and write new state
1702 if subs:
1702 if subs:
1703 for s in sorted(commitsubs):
1703 for s in sorted(commitsubs):
1704 sub = wctx.sub(s)
1704 sub = wctx.sub(s)
1705 self.ui.status(_('committing subrepository %s\n') %
1705 self.ui.status(_('committing subrepository %s\n') %
1706 subrepo.subrelpath(sub))
1706 subrepo.subrelpath(sub))
1707 sr = sub.commit(cctx._text, user, date)
1707 sr = sub.commit(cctx._text, user, date)
1708 newstate[s] = (newstate[s][0], sr)
1708 newstate[s] = (newstate[s][0], sr)
1709 subrepo.writestate(self, newstate)
1709 subrepo.writestate(self, newstate)
1710
1710
1711 p1, p2 = self.dirstate.parents()
1711 p1, p2 = self.dirstate.parents()
1712 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1712 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1713 try:
1713 try:
1714 self.hook("precommit", throw=True, parent1=hookp1,
1714 self.hook("precommit", throw=True, parent1=hookp1,
1715 parent2=hookp2)
1715 parent2=hookp2)
1716 tr = self.transaction('commit')
1716 tr = self.transaction('commit')
1717 ret = self.commitctx(cctx, True)
1717 ret = self.commitctx(cctx, True)
1718 except: # re-raises
1718 except: # re-raises
1719 if edited:
1719 if edited:
1720 self.ui.write(
1720 self.ui.write(
1721 _('note: commit message saved in %s\n') % msgfn)
1721 _('note: commit message saved in %s\n') % msgfn)
1722 raise
1722 raise
1723 # update bookmarks, dirstate and mergestate
1723 # update bookmarks, dirstate and mergestate
1724 bookmarks.update(self, [p1, p2], ret)
1724 bookmarks.update(self, [p1, p2], ret)
1725 cctx.markcommitted(ret)
1725 cctx.markcommitted(ret)
1726 ms.reset()
1726 ms.reset()
1727 tr.close()
1727 tr.close()
1728
1728
1729 finally:
1729 finally:
1730 lockmod.release(tr, lock, wlock)
1730 lockmod.release(tr, lock, wlock)
1731
1731
1732 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1732 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1733 # hack for command that use a temporary commit (eg: histedit)
1733 # hack for command that use a temporary commit (eg: histedit)
1734 # temporary commit got stripped before hook release
1734 # temporary commit got stripped before hook release
1735 if self.changelog.hasnode(ret):
1735 if self.changelog.hasnode(ret):
1736 self.hook("commit", node=node, parent1=parent1,
1736 self.hook("commit", node=node, parent1=parent1,
1737 parent2=parent2)
1737 parent2=parent2)
1738 self._afterlock(commithook)
1738 self._afterlock(commithook)
1739 return ret
1739 return ret
1740
1740
1741 @unfilteredmethod
1741 @unfilteredmethod
1742 def commitctx(self, ctx, error=False):
1742 def commitctx(self, ctx, error=False):
1743 """Add a new revision to current repository.
1743 """Add a new revision to current repository.
1744 Revision information is passed via the context argument.
1744 Revision information is passed via the context argument.
1745 """
1745 """
1746
1746
1747 tr = None
1747 tr = None
1748 p1, p2 = ctx.p1(), ctx.p2()
1748 p1, p2 = ctx.p1(), ctx.p2()
1749 user = ctx.user()
1749 user = ctx.user()
1750
1750
1751 lock = self.lock()
1751 lock = self.lock()
1752 try:
1752 try:
1753 tr = self.transaction("commit")
1753 tr = self.transaction("commit")
1754 trp = weakref.proxy(tr)
1754 trp = weakref.proxy(tr)
1755
1755
1756 if ctx.manifestnode():
1756 if ctx.manifestnode():
1757 # reuse an existing manifest revision
1757 # reuse an existing manifest revision
1758 mn = ctx.manifestnode()
1758 mn = ctx.manifestnode()
1759 files = ctx.files()
1759 files = ctx.files()
1760 elif ctx.files():
1760 elif ctx.files():
1761 m1ctx = p1.manifestctx()
1761 m1ctx = p1.manifestctx()
1762 m2ctx = p2.manifestctx()
1762 m2ctx = p2.manifestctx()
1763 mctx = m1ctx.copy()
1763 mctx = m1ctx.copy()
1764
1764
1765 m = mctx.read()
1765 m = mctx.read()
1766 m1 = m1ctx.read()
1766 m1 = m1ctx.read()
1767 m2 = m2ctx.read()
1767 m2 = m2ctx.read()
1768
1768
1769 # check in files
1769 # check in files
1770 added = []
1770 added = []
1771 changed = []
1771 changed = []
1772 removed = list(ctx.removed())
1772 removed = list(ctx.removed())
1773 linkrev = len(self)
1773 linkrev = len(self)
1774 self.ui.note(_("committing files:\n"))
1774 self.ui.note(_("committing files:\n"))
1775 for f in sorted(ctx.modified() + ctx.added()):
1775 for f in sorted(ctx.modified() + ctx.added()):
1776 self.ui.note(f + "\n")
1776 self.ui.note(f + "\n")
1777 try:
1777 try:
1778 fctx = ctx[f]
1778 fctx = ctx[f]
1779 if fctx is None:
1779 if fctx is None:
1780 removed.append(f)
1780 removed.append(f)
1781 else:
1781 else:
1782 added.append(f)
1782 added.append(f)
1783 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1783 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1784 trp, changed)
1784 trp, changed)
1785 m.setflag(f, fctx.flags())
1785 m.setflag(f, fctx.flags())
1786 except OSError as inst:
1786 except OSError as inst:
1787 self.ui.warn(_("trouble committing %s!\n") % f)
1787 self.ui.warn(_("trouble committing %s!\n") % f)
1788 raise
1788 raise
1789 except IOError as inst:
1789 except IOError as inst:
1790 errcode = getattr(inst, 'errno', errno.ENOENT)
1790 errcode = getattr(inst, 'errno', errno.ENOENT)
1791 if error or errcode and errcode != errno.ENOENT:
1791 if error or errcode and errcode != errno.ENOENT:
1792 self.ui.warn(_("trouble committing %s!\n") % f)
1792 self.ui.warn(_("trouble committing %s!\n") % f)
1793 raise
1793 raise
1794
1794
1795 # update manifest
1795 # update manifest
1796 self.ui.note(_("committing manifest\n"))
1796 self.ui.note(_("committing manifest\n"))
1797 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1797 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1798 drop = [f for f in removed if f in m]
1798 drop = [f for f in removed if f in m]
1799 for f in drop:
1799 for f in drop:
1800 del m[f]
1800 del m[f]
1801 mn = mctx.write(trp, linkrev,
1801 mn = mctx.write(trp, linkrev,
1802 p1.manifestnode(), p2.manifestnode(),
1802 p1.manifestnode(), p2.manifestnode(),
1803 added, drop)
1803 added, drop)
1804 files = changed + removed
1804 files = changed + removed
1805 else:
1805 else:
1806 mn = p1.manifestnode()
1806 mn = p1.manifestnode()
1807 files = []
1807 files = []
1808
1808
1809 # update changelog
1809 # update changelog
1810 self.ui.note(_("committing changelog\n"))
1810 self.ui.note(_("committing changelog\n"))
1811 self.changelog.delayupdate(tr)
1811 self.changelog.delayupdate(tr)
1812 n = self.changelog.add(mn, files, ctx.description(),
1812 n = self.changelog.add(mn, files, ctx.description(),
1813 trp, p1.node(), p2.node(),
1813 trp, p1.node(), p2.node(),
1814 user, ctx.date(), ctx.extra().copy())
1814 user, ctx.date(), ctx.extra().copy())
1815 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1815 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1816 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1816 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1817 parent2=xp2)
1817 parent2=xp2)
1818 # set the new commit is proper phase
1818 # set the new commit is proper phase
1819 targetphase = subrepo.newcommitphase(self.ui, ctx)
1819 targetphase = subrepo.newcommitphase(self.ui, ctx)
1820 if targetphase:
1820 if targetphase:
1821 # retract boundary do not alter parent changeset.
1821 # retract boundary do not alter parent changeset.
1822 # if a parent have higher the resulting phase will
1822 # if a parent have higher the resulting phase will
1823 # be compliant anyway
1823 # be compliant anyway
1824 #
1824 #
1825 # if minimal phase was 0 we don't need to retract anything
1825 # if minimal phase was 0 we don't need to retract anything
1826 phases.retractboundary(self, tr, targetphase, [n])
1826 phases.retractboundary(self, tr, targetphase, [n])
1827 tr.close()
1827 tr.close()
1828 branchmap.updatecache(self.filtered('served'))
1828 branchmap.updatecache(self.filtered('served'))
1829 return n
1829 return n
1830 finally:
1830 finally:
1831 if tr:
1831 if tr:
1832 tr.release()
1832 tr.release()
1833 lock.release()
1833 lock.release()
1834
1834
1835 @unfilteredmethod
1835 @unfilteredmethod
1836 def destroying(self):
1836 def destroying(self):
1837 '''Inform the repository that nodes are about to be destroyed.
1837 '''Inform the repository that nodes are about to be destroyed.
1838 Intended for use by strip and rollback, so there's a common
1838 Intended for use by strip and rollback, so there's a common
1839 place for anything that has to be done before destroying history.
1839 place for anything that has to be done before destroying history.
1840
1840
1841 This is mostly useful for saving state that is in memory and waiting
1841 This is mostly useful for saving state that is in memory and waiting
1842 to be flushed when the current lock is released. Because a call to
1842 to be flushed when the current lock is released. Because a call to
1843 destroyed is imminent, the repo will be invalidated causing those
1843 destroyed is imminent, the repo will be invalidated causing those
1844 changes to stay in memory (waiting for the next unlock), or vanish
1844 changes to stay in memory (waiting for the next unlock), or vanish
1845 completely.
1845 completely.
1846 '''
1846 '''
1847 # When using the same lock to commit and strip, the phasecache is left
1847 # When using the same lock to commit and strip, the phasecache is left
1848 # dirty after committing. Then when we strip, the repo is invalidated,
1848 # dirty after committing. Then when we strip, the repo is invalidated,
1849 # causing those changes to disappear.
1849 # causing those changes to disappear.
1850 if '_phasecache' in vars(self):
1850 if '_phasecache' in vars(self):
1851 self._phasecache.write()
1851 self._phasecache.write()
1852
1852
1853 @unfilteredmethod
1853 @unfilteredmethod
1854 def destroyed(self):
1854 def destroyed(self):
1855 '''Inform the repository that nodes have been destroyed.
1855 '''Inform the repository that nodes have been destroyed.
1856 Intended for use by strip and rollback, so there's a common
1856 Intended for use by strip and rollback, so there's a common
1857 place for anything that has to be done after destroying history.
1857 place for anything that has to be done after destroying history.
1858 '''
1858 '''
1859 # When one tries to:
1859 # When one tries to:
1860 # 1) destroy nodes thus calling this method (e.g. strip)
1860 # 1) destroy nodes thus calling this method (e.g. strip)
1861 # 2) use phasecache somewhere (e.g. commit)
1861 # 2) use phasecache somewhere (e.g. commit)
1862 #
1862 #
1863 # then 2) will fail because the phasecache contains nodes that were
1863 # then 2) will fail because the phasecache contains nodes that were
1864 # removed. We can either remove phasecache from the filecache,
1864 # removed. We can either remove phasecache from the filecache,
1865 # causing it to reload next time it is accessed, or simply filter
1865 # causing it to reload next time it is accessed, or simply filter
1866 # the removed nodes now and write the updated cache.
1866 # the removed nodes now and write the updated cache.
1867 self._phasecache.filterunknown(self)
1867 self._phasecache.filterunknown(self)
1868 self._phasecache.write()
1868 self._phasecache.write()
1869
1869
1870 # update the 'served' branch cache to help read only server process
1870 # update the 'served' branch cache to help read only server process
1871 # Thanks to branchcache collaboration this is done from the nearest
1871 # Thanks to branchcache collaboration this is done from the nearest
1872 # filtered subset and it is expected to be fast.
1872 # filtered subset and it is expected to be fast.
1873 branchmap.updatecache(self.filtered('served'))
1873 branchmap.updatecache(self.filtered('served'))
1874
1874
1875 # Ensure the persistent tag cache is updated. Doing it now
1875 # Ensure the persistent tag cache is updated. Doing it now
1876 # means that the tag cache only has to worry about destroyed
1876 # means that the tag cache only has to worry about destroyed
1877 # heads immediately after a strip/rollback. That in turn
1877 # heads immediately after a strip/rollback. That in turn
1878 # guarantees that "cachetip == currenttip" (comparing both rev
1878 # guarantees that "cachetip == currenttip" (comparing both rev
1879 # and node) always means no nodes have been added or destroyed.
1879 # and node) always means no nodes have been added or destroyed.
1880
1880
1881 # XXX this is suboptimal when qrefresh'ing: we strip the current
1881 # XXX this is suboptimal when qrefresh'ing: we strip the current
1882 # head, refresh the tag cache, then immediately add a new head.
1882 # head, refresh the tag cache, then immediately add a new head.
1883 # But I think doing it this way is necessary for the "instant
1883 # But I think doing it this way is necessary for the "instant
1884 # tag cache retrieval" case to work.
1884 # tag cache retrieval" case to work.
1885 self.invalidate()
1885 self.invalidate()
1886
1886
1887 def walk(self, match, node=None):
1887 def walk(self, match, node=None):
1888 '''
1888 '''
1889 walk recursively through the directory tree or a given
1889 walk recursively through the directory tree or a given
1890 changeset, finding all files matched by the match
1890 changeset, finding all files matched by the match
1891 function
1891 function
1892 '''
1892 '''
1893 return self[node].walk(match)
1893 return self[node].walk(match)
1894
1894
1895 def status(self, node1='.', node2=None, match=None,
1895 def status(self, node1='.', node2=None, match=None,
1896 ignored=False, clean=False, unknown=False,
1896 ignored=False, clean=False, unknown=False,
1897 listsubrepos=False):
1897 listsubrepos=False):
1898 '''a convenience method that calls node1.status(node2)'''
1898 '''a convenience method that calls node1.status(node2)'''
1899 return self[node1].status(node2, match, ignored, clean, unknown,
1899 return self[node1].status(node2, match, ignored, clean, unknown,
1900 listsubrepos)
1900 listsubrepos)
1901
1901
1902 def heads(self, start=None):
1902 def heads(self, start=None):
1903 if start is None:
1903 if start is None:
1904 cl = self.changelog
1904 cl = self.changelog
1905 headrevs = reversed(cl.headrevs())
1905 headrevs = reversed(cl.headrevs())
1906 return [cl.node(rev) for rev in headrevs]
1906 return [cl.node(rev) for rev in headrevs]
1907
1907
1908 heads = self.changelog.heads(start)
1908 heads = self.changelog.heads(start)
1909 # sort the output in rev descending order
1909 # sort the output in rev descending order
1910 return sorted(heads, key=self.changelog.rev, reverse=True)
1910 return sorted(heads, key=self.changelog.rev, reverse=True)
1911
1911
1912 def branchheads(self, branch=None, start=None, closed=False):
1912 def branchheads(self, branch=None, start=None, closed=False):
1913 '''return a (possibly filtered) list of heads for the given branch
1913 '''return a (possibly filtered) list of heads for the given branch
1914
1914
1915 Heads are returned in topological order, from newest to oldest.
1915 Heads are returned in topological order, from newest to oldest.
1916 If branch is None, use the dirstate branch.
1916 If branch is None, use the dirstate branch.
1917 If start is not None, return only heads reachable from start.
1917 If start is not None, return only heads reachable from start.
1918 If closed is True, return heads that are marked as closed as well.
1918 If closed is True, return heads that are marked as closed as well.
1919 '''
1919 '''
1920 if branch is None:
1920 if branch is None:
1921 branch = self[None].branch()
1921 branch = self[None].branch()
1922 branches = self.branchmap()
1922 branches = self.branchmap()
1923 if branch not in branches:
1923 if branch not in branches:
1924 return []
1924 return []
1925 # the cache returns heads ordered lowest to highest
1925 # the cache returns heads ordered lowest to highest
1926 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1926 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1927 if start is not None:
1927 if start is not None:
1928 # filter out the heads that cannot be reached from startrev
1928 # filter out the heads that cannot be reached from startrev
1929 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1929 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1930 bheads = [h for h in bheads if h in fbheads]
1930 bheads = [h for h in bheads if h in fbheads]
1931 return bheads
1931 return bheads
1932
1932
1933 def branches(self, nodes):
1933 def branches(self, nodes):
1934 if not nodes:
1934 if not nodes:
1935 nodes = [self.changelog.tip()]
1935 nodes = [self.changelog.tip()]
1936 b = []
1936 b = []
1937 for n in nodes:
1937 for n in nodes:
1938 t = n
1938 t = n
1939 while True:
1939 while True:
1940 p = self.changelog.parents(n)
1940 p = self.changelog.parents(n)
1941 if p[1] != nullid or p[0] == nullid:
1941 if p[1] != nullid or p[0] == nullid:
1942 b.append((t, n, p[0], p[1]))
1942 b.append((t, n, p[0], p[1]))
1943 break
1943 break
1944 n = p[0]
1944 n = p[0]
1945 return b
1945 return b
1946
1946
1947 def between(self, pairs):
1947 def between(self, pairs):
1948 r = []
1948 r = []
1949
1949
1950 for top, bottom in pairs:
1950 for top, bottom in pairs:
1951 n, l, i = top, [], 0
1951 n, l, i = top, [], 0
1952 f = 1
1952 f = 1
1953
1953
1954 while n != bottom and n != nullid:
1954 while n != bottom and n != nullid:
1955 p = self.changelog.parents(n)[0]
1955 p = self.changelog.parents(n)[0]
1956 if i == f:
1956 if i == f:
1957 l.append(n)
1957 l.append(n)
1958 f = f * 2
1958 f = f * 2
1959 n = p
1959 n = p
1960 i += 1
1960 i += 1
1961
1961
1962 r.append(l)
1962 r.append(l)
1963
1963
1964 return r
1964 return r
1965
1965
1966 def checkpush(self, pushop):
1966 def checkpush(self, pushop):
1967 """Extensions can override this function if additional checks have
1967 """Extensions can override this function if additional checks have
1968 to be performed before pushing, or call it if they override push
1968 to be performed before pushing, or call it if they override push
1969 command.
1969 command.
1970 """
1970 """
1971 pass
1971 pass
1972
1972
1973 @unfilteredpropertycache
1973 @unfilteredpropertycache
1974 def prepushoutgoinghooks(self):
1974 def prepushoutgoinghooks(self):
1975 """Return util.hooks consists of a pushop with repo, remote, outgoing
1975 """Return util.hooks consists of a pushop with repo, remote, outgoing
1976 methods, which are called before pushing changesets.
1976 methods, which are called before pushing changesets.
1977 """
1977 """
1978 return util.hooks()
1978 return util.hooks()
1979
1979
1980 def pushkey(self, namespace, key, old, new):
1980 def pushkey(self, namespace, key, old, new):
1981 try:
1981 try:
1982 tr = self.currenttransaction()
1982 tr = self.currenttransaction()
1983 hookargs = {}
1983 hookargs = {}
1984 if tr is not None:
1984 if tr is not None:
1985 hookargs.update(tr.hookargs)
1985 hookargs.update(tr.hookargs)
1986 hookargs['namespace'] = namespace
1986 hookargs['namespace'] = namespace
1987 hookargs['key'] = key
1987 hookargs['key'] = key
1988 hookargs['old'] = old
1988 hookargs['old'] = old
1989 hookargs['new'] = new
1989 hookargs['new'] = new
1990 self.hook('prepushkey', throw=True, **hookargs)
1990 self.hook('prepushkey', throw=True, **hookargs)
1991 except error.HookAbort as exc:
1991 except error.HookAbort as exc:
1992 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1992 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1993 if exc.hint:
1993 if exc.hint:
1994 self.ui.write_err(_("(%s)\n") % exc.hint)
1994 self.ui.write_err(_("(%s)\n") % exc.hint)
1995 return False
1995 return False
1996 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1996 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1997 ret = pushkey.push(self, namespace, key, old, new)
1997 ret = pushkey.push(self, namespace, key, old, new)
1998 def runhook():
1998 def runhook():
1999 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1999 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2000 ret=ret)
2000 ret=ret)
2001 self._afterlock(runhook)
2001 self._afterlock(runhook)
2002 return ret
2002 return ret
2003
2003
2004 def listkeys(self, namespace):
2004 def listkeys(self, namespace):
2005 self.hook('prelistkeys', throw=True, namespace=namespace)
2005 self.hook('prelistkeys', throw=True, namespace=namespace)
2006 self.ui.debug('listing keys for "%s"\n' % namespace)
2006 self.ui.debug('listing keys for "%s"\n' % namespace)
2007 values = pushkey.list(self, namespace)
2007 values = pushkey.list(self, namespace)
2008 self.hook('listkeys', namespace=namespace, values=values)
2008 self.hook('listkeys', namespace=namespace, values=values)
2009 return values
2009 return values
2010
2010
2011 def debugwireargs(self, one, two, three=None, four=None, five=None):
2011 def debugwireargs(self, one, two, three=None, four=None, five=None):
2012 '''used to test argument passing over the wire'''
2012 '''used to test argument passing over the wire'''
2013 return "%s %s %s %s %s" % (one, two, three, four, five)
2013 return "%s %s %s %s %s" % (one, two, three, four, five)
2014
2014
2015 def savecommitmessage(self, text):
2015 def savecommitmessage(self, text):
2016 fp = self.vfs('last-message.txt', 'wb')
2016 fp = self.vfs('last-message.txt', 'wb')
2017 try:
2017 try:
2018 fp.write(text)
2018 fp.write(text)
2019 finally:
2019 finally:
2020 fp.close()
2020 fp.close()
2021 return self.pathto(fp.name[len(self.root) + 1:])
2021 return self.pathto(fp.name[len(self.root) + 1:])
2022
2022
2023 # used to avoid circular references so destructors work
2023 # used to avoid circular references so destructors work
2024 def aftertrans(files):
2024 def aftertrans(files):
2025 renamefiles = [tuple(t) for t in files]
2025 renamefiles = [tuple(t) for t in files]
2026 def a():
2026 def a():
2027 for vfs, src, dest in renamefiles:
2027 for vfs, src, dest in renamefiles:
2028 try:
2028 try:
2029 # if src and dest refer to a same file, vfs.rename is a no-op,
2029 # if src and dest refer to a same file, vfs.rename is a no-op,
2030 # leaving both src and dest on disk. delete dest to make sure
2030 # leaving both src and dest on disk. delete dest to make sure
2031 # the rename couldn't be such a no-op.
2031 # the rename couldn't be such a no-op.
2032 vfs.unlink(dest)
2032 vfs.unlink(dest)
2033 except OSError as ex:
2033 except OSError as ex:
2034 if ex.errno != errno.ENOENT:
2034 if ex.errno != errno.ENOENT:
2035 raise
2035 raise
2036 try:
2036 try:
2037 vfs.rename(src, dest)
2037 vfs.rename(src, dest)
2038 except OSError: # journal file does not yet exist
2038 except OSError: # journal file does not yet exist
2039 pass
2039 pass
2040 return a
2040 return a
2041
2041
2042 def undoname(fn):
2042 def undoname(fn):
2043 base, name = os.path.split(fn)
2043 base, name = os.path.split(fn)
2044 assert name.startswith('journal')
2044 assert name.startswith('journal')
2045 return os.path.join(base, name.replace('journal', 'undo', 1))
2045 return os.path.join(base, name.replace('journal', 'undo', 1))
2046
2046
2047 def instance(ui, path, create):
2047 def instance(ui, path, create):
2048 return localrepository(ui, util.urllocalpath(path), create)
2048 return localrepository(ui, util.urllocalpath(path), create)
2049
2049
2050 def islocal(path):
2050 def islocal(path):
2051 return True
2051 return True
2052
2052
2053 def newreporequirements(repo):
2053 def newreporequirements(repo):
2054 """Determine the set of requirements for a new local repository.
2054 """Determine the set of requirements for a new local repository.
2055
2055
2056 Extensions can wrap this function to specify custom requirements for
2056 Extensions can wrap this function to specify custom requirements for
2057 new repositories.
2057 new repositories.
2058 """
2058 """
2059 ui = repo.ui
2059 ui = repo.ui
2060 requirements = set(['revlogv1'])
2060 requirements = set(['revlogv1'])
2061 if ui.configbool('format', 'usestore', True):
2061 if ui.configbool('format', 'usestore', True):
2062 requirements.add('store')
2062 requirements.add('store')
2063 if ui.configbool('format', 'usefncache', True):
2063 if ui.configbool('format', 'usefncache', True):
2064 requirements.add('fncache')
2064 requirements.add('fncache')
2065 if ui.configbool('format', 'dotencode', True):
2065 if ui.configbool('format', 'dotencode', True):
2066 requirements.add('dotencode')
2066 requirements.add('dotencode')
2067
2067
2068 compengine = ui.config('experimental', 'format.compression', 'zlib')
2068 compengine = ui.config('experimental', 'format.compression', 'zlib')
2069 if compengine not in util.compengines:
2069 if compengine not in util.compengines:
2070 raise error.Abort(_('compression engine %s defined by '
2070 raise error.Abort(_('compression engine %s defined by '
2071 'experimental.format.compression not available') %
2071 'experimental.format.compression not available') %
2072 compengine,
2072 compengine,
2073 hint=_('run "hg debuginstall" to list available '
2073 hint=_('run "hg debuginstall" to list available '
2074 'compression engines'))
2074 'compression engines'))
2075
2075
2076 # zlib is the historical default and doesn't need an explicit requirement.
2076 # zlib is the historical default and doesn't need an explicit requirement.
2077 if compengine != 'zlib':
2077 if compengine != 'zlib':
2078 requirements.add('exp-compression-%s' % compengine)
2078 requirements.add('exp-compression-%s' % compengine)
2079
2079
2080 if scmutil.gdinitconfig(ui):
2080 if scmutil.gdinitconfig(ui):
2081 requirements.add('generaldelta')
2081 requirements.add('generaldelta')
2082 if ui.configbool('experimental', 'treemanifest', False):
2082 if ui.configbool('experimental', 'treemanifest', False):
2083 requirements.add('treemanifest')
2083 requirements.add('treemanifest')
2084 if ui.configbool('experimental', 'manifestv2', False):
2084 if ui.configbool('experimental', 'manifestv2', False):
2085 requirements.add('manifestv2')
2085 requirements.add('manifestv2')
2086
2086
2087 return requirements
2087 return requirements
General Comments 0
You need to be logged in to leave comments. Login now