##// END OF EJS Templates
localrepo: directly use repo.vfs.join...
Pierre-Yves David -
r31319:42a71955 default
parent child Browse files
Show More
@@ -1,2075 +1,2075 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 store,
56 store,
57 subrepo,
57 subrepo,
58 tags as tagsmod,
58 tags as tagsmod,
59 transaction,
59 transaction,
60 txnutil,
60 txnutil,
61 util,
61 util,
62 vfs as vfsmod,
62 vfs as vfsmod,
63 )
63 )
64
64
65 release = lockmod.release
65 release = lockmod.release
66 urlerr = util.urlerr
66 urlerr = util.urlerr
67 urlreq = util.urlreq
67 urlreq = util.urlreq
68
68
69 class repofilecache(scmutil.filecache):
69 class repofilecache(scmutil.filecache):
70 """All filecache usage on repo are done for logic that should be unfiltered
70 """All filecache usage on repo are done for logic that should be unfiltered
71 """
71 """
72
72
73 def join(self, obj, fname):
73 def join(self, obj, fname):
74 return obj.vfs.join(fname)
74 return obj.vfs.join(fname)
75 def __get__(self, repo, type=None):
75 def __get__(self, repo, type=None):
76 if repo is None:
76 if repo is None:
77 return self
77 return self
78 return super(repofilecache, self).__get__(repo.unfiltered(), type)
78 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 def __set__(self, repo, value):
79 def __set__(self, repo, value):
80 return super(repofilecache, self).__set__(repo.unfiltered(), value)
80 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 def __delete__(self, repo):
81 def __delete__(self, repo):
82 return super(repofilecache, self).__delete__(repo.unfiltered())
82 return super(repofilecache, self).__delete__(repo.unfiltered())
83
83
84 class storecache(repofilecache):
84 class storecache(repofilecache):
85 """filecache for files in the store"""
85 """filecache for files in the store"""
86 def join(self, obj, fname):
86 def join(self, obj, fname):
87 return obj.sjoin(fname)
87 return obj.sjoin(fname)
88
88
89 class unfilteredpropertycache(util.propertycache):
89 class unfilteredpropertycache(util.propertycache):
90 """propertycache that apply to unfiltered repo only"""
90 """propertycache that apply to unfiltered repo only"""
91
91
92 def __get__(self, repo, type=None):
92 def __get__(self, repo, type=None):
93 unfi = repo.unfiltered()
93 unfi = repo.unfiltered()
94 if unfi is repo:
94 if unfi is repo:
95 return super(unfilteredpropertycache, self).__get__(unfi)
95 return super(unfilteredpropertycache, self).__get__(unfi)
96 return getattr(unfi, self.name)
96 return getattr(unfi, self.name)
97
97
98 class filteredpropertycache(util.propertycache):
98 class filteredpropertycache(util.propertycache):
99 """propertycache that must take filtering in account"""
99 """propertycache that must take filtering in account"""
100
100
101 def cachevalue(self, obj, value):
101 def cachevalue(self, obj, value):
102 object.__setattr__(obj, self.name, value)
102 object.__setattr__(obj, self.name, value)
103
103
104
104
105 def hasunfilteredcache(repo, name):
105 def hasunfilteredcache(repo, name):
106 """check if a repo has an unfilteredpropertycache value for <name>"""
106 """check if a repo has an unfilteredpropertycache value for <name>"""
107 return name in vars(repo.unfiltered())
107 return name in vars(repo.unfiltered())
108
108
109 def unfilteredmethod(orig):
109 def unfilteredmethod(orig):
110 """decorate method that always need to be run on unfiltered version"""
110 """decorate method that always need to be run on unfiltered version"""
111 def wrapper(repo, *args, **kwargs):
111 def wrapper(repo, *args, **kwargs):
112 return orig(repo.unfiltered(), *args, **kwargs)
112 return orig(repo.unfiltered(), *args, **kwargs)
113 return wrapper
113 return wrapper
114
114
115 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
115 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 'unbundle'))
116 'unbundle'))
117 legacycaps = moderncaps.union(set(['changegroupsubset']))
117 legacycaps = moderncaps.union(set(['changegroupsubset']))
118
118
119 class localpeer(peer.peerrepository):
119 class localpeer(peer.peerrepository):
120 '''peer for a local repo; reflects only the most recent API'''
120 '''peer for a local repo; reflects only the most recent API'''
121
121
122 def __init__(self, repo, caps=moderncaps):
122 def __init__(self, repo, caps=moderncaps):
123 peer.peerrepository.__init__(self)
123 peer.peerrepository.__init__(self)
124 self._repo = repo.filtered('served')
124 self._repo = repo.filtered('served')
125 self.ui = repo.ui
125 self.ui = repo.ui
126 self._caps = repo._restrictcapabilities(caps)
126 self._caps = repo._restrictcapabilities(caps)
127 self.requirements = repo.requirements
127 self.requirements = repo.requirements
128 self.supportedformats = repo.supportedformats
128 self.supportedformats = repo.supportedformats
129
129
130 def close(self):
130 def close(self):
131 self._repo.close()
131 self._repo.close()
132
132
133 def _capabilities(self):
133 def _capabilities(self):
134 return self._caps
134 return self._caps
135
135
136 def local(self):
136 def local(self):
137 return self._repo
137 return self._repo
138
138
139 def canpush(self):
139 def canpush(self):
140 return True
140 return True
141
141
142 def url(self):
142 def url(self):
143 return self._repo.url()
143 return self._repo.url()
144
144
145 def lookup(self, key):
145 def lookup(self, key):
146 return self._repo.lookup(key)
146 return self._repo.lookup(key)
147
147
148 def branchmap(self):
148 def branchmap(self):
149 return self._repo.branchmap()
149 return self._repo.branchmap()
150
150
151 def heads(self):
151 def heads(self):
152 return self._repo.heads()
152 return self._repo.heads()
153
153
154 def known(self, nodes):
154 def known(self, nodes):
155 return self._repo.known(nodes)
155 return self._repo.known(nodes)
156
156
157 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
157 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
158 **kwargs):
158 **kwargs):
159 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
159 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
160 common=common, bundlecaps=bundlecaps,
160 common=common, bundlecaps=bundlecaps,
161 **kwargs)
161 **kwargs)
162 cb = util.chunkbuffer(chunks)
162 cb = util.chunkbuffer(chunks)
163
163
164 if bundlecaps is not None and 'HG20' in bundlecaps:
164 if bundlecaps is not None and 'HG20' in bundlecaps:
165 # When requesting a bundle2, getbundle returns a stream to make the
165 # When requesting a bundle2, getbundle returns a stream to make the
166 # wire level function happier. We need to build a proper object
166 # wire level function happier. We need to build a proper object
167 # from it in local peer.
167 # from it in local peer.
168 return bundle2.getunbundler(self.ui, cb)
168 return bundle2.getunbundler(self.ui, cb)
169 else:
169 else:
170 return changegroup.getunbundler('01', cb, None)
170 return changegroup.getunbundler('01', cb, None)
171
171
172 # TODO We might want to move the next two calls into legacypeer and add
172 # TODO We might want to move the next two calls into legacypeer and add
173 # unbundle instead.
173 # unbundle instead.
174
174
175 def unbundle(self, cg, heads, url):
175 def unbundle(self, cg, heads, url):
176 """apply a bundle on a repo
176 """apply a bundle on a repo
177
177
178 This function handles the repo locking itself."""
178 This function handles the repo locking itself."""
179 try:
179 try:
180 try:
180 try:
181 cg = exchange.readbundle(self.ui, cg, None)
181 cg = exchange.readbundle(self.ui, cg, None)
182 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
182 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
183 if util.safehasattr(ret, 'getchunks'):
183 if util.safehasattr(ret, 'getchunks'):
184 # This is a bundle20 object, turn it into an unbundler.
184 # This is a bundle20 object, turn it into an unbundler.
185 # This little dance should be dropped eventually when the
185 # This little dance should be dropped eventually when the
186 # API is finally improved.
186 # API is finally improved.
187 stream = util.chunkbuffer(ret.getchunks())
187 stream = util.chunkbuffer(ret.getchunks())
188 ret = bundle2.getunbundler(self.ui, stream)
188 ret = bundle2.getunbundler(self.ui, stream)
189 return ret
189 return ret
190 except Exception as exc:
190 except Exception as exc:
191 # If the exception contains output salvaged from a bundle2
191 # If the exception contains output salvaged from a bundle2
192 # reply, we need to make sure it is printed before continuing
192 # reply, we need to make sure it is printed before continuing
193 # to fail. So we build a bundle2 with such output and consume
193 # to fail. So we build a bundle2 with such output and consume
194 # it directly.
194 # it directly.
195 #
195 #
196 # This is not very elegant but allows a "simple" solution for
196 # This is not very elegant but allows a "simple" solution for
197 # issue4594
197 # issue4594
198 output = getattr(exc, '_bundle2salvagedoutput', ())
198 output = getattr(exc, '_bundle2salvagedoutput', ())
199 if output:
199 if output:
200 bundler = bundle2.bundle20(self._repo.ui)
200 bundler = bundle2.bundle20(self._repo.ui)
201 for out in output:
201 for out in output:
202 bundler.addpart(out)
202 bundler.addpart(out)
203 stream = util.chunkbuffer(bundler.getchunks())
203 stream = util.chunkbuffer(bundler.getchunks())
204 b = bundle2.getunbundler(self.ui, stream)
204 b = bundle2.getunbundler(self.ui, stream)
205 bundle2.processbundle(self._repo, b)
205 bundle2.processbundle(self._repo, b)
206 raise
206 raise
207 except error.PushRaced as exc:
207 except error.PushRaced as exc:
208 raise error.ResponseError(_('push failed:'), str(exc))
208 raise error.ResponseError(_('push failed:'), str(exc))
209
209
210 def lock(self):
210 def lock(self):
211 return self._repo.lock()
211 return self._repo.lock()
212
212
213 def addchangegroup(self, cg, source, url):
213 def addchangegroup(self, cg, source, url):
214 return cg.apply(self._repo, source, url)
214 return cg.apply(self._repo, source, url)
215
215
216 def pushkey(self, namespace, key, old, new):
216 def pushkey(self, namespace, key, old, new):
217 return self._repo.pushkey(namespace, key, old, new)
217 return self._repo.pushkey(namespace, key, old, new)
218
218
219 def listkeys(self, namespace):
219 def listkeys(self, namespace):
220 return self._repo.listkeys(namespace)
220 return self._repo.listkeys(namespace)
221
221
222 def debugwireargs(self, one, two, three=None, four=None, five=None):
222 def debugwireargs(self, one, two, three=None, four=None, five=None):
223 '''used to test argument passing over the wire'''
223 '''used to test argument passing over the wire'''
224 return "%s %s %s %s %s" % (one, two, three, four, five)
224 return "%s %s %s %s %s" % (one, two, three, four, five)
225
225
226 class locallegacypeer(localpeer):
226 class locallegacypeer(localpeer):
227 '''peer extension which implements legacy methods too; used for tests with
227 '''peer extension which implements legacy methods too; used for tests with
228 restricted capabilities'''
228 restricted capabilities'''
229
229
230 def __init__(self, repo):
230 def __init__(self, repo):
231 localpeer.__init__(self, repo, caps=legacycaps)
231 localpeer.__init__(self, repo, caps=legacycaps)
232
232
233 def branches(self, nodes):
233 def branches(self, nodes):
234 return self._repo.branches(nodes)
234 return self._repo.branches(nodes)
235
235
236 def between(self, pairs):
236 def between(self, pairs):
237 return self._repo.between(pairs)
237 return self._repo.between(pairs)
238
238
239 def changegroup(self, basenodes, source):
239 def changegroup(self, basenodes, source):
240 return changegroup.changegroup(self._repo, basenodes, source)
240 return changegroup.changegroup(self._repo, basenodes, source)
241
241
242 def changegroupsubset(self, bases, heads, source):
242 def changegroupsubset(self, bases, heads, source):
243 return changegroup.changegroupsubset(self._repo, bases, heads, source)
243 return changegroup.changegroupsubset(self._repo, bases, heads, source)
244
244
245 class localrepository(object):
245 class localrepository(object):
246
246
247 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
247 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
248 'manifestv2'))
248 'manifestv2'))
249 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
249 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
250 'relshared', 'dotencode'))
250 'relshared', 'dotencode'))
251 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
251 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
252 filtername = None
252 filtername = None
253
253
254 # a list of (ui, featureset) functions.
254 # a list of (ui, featureset) functions.
255 # only functions defined in module of enabled extensions are invoked
255 # only functions defined in module of enabled extensions are invoked
256 featuresetupfuncs = set()
256 featuresetupfuncs = set()
257
257
258 def __init__(self, baseui, path, create=False):
258 def __init__(self, baseui, path, create=False):
259 self.requirements = set()
259 self.requirements = set()
260 # vfs to access the working copy
260 # vfs to access the working copy
261 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
261 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
262 # vfs to access the content of the repository
262 # vfs to access the content of the repository
263 self.vfs = None
263 self.vfs = None
264 # vfs to access the store part of the repository
264 # vfs to access the store part of the repository
265 self.svfs = None
265 self.svfs = None
266 self.root = self.wvfs.base
266 self.root = self.wvfs.base
267 self.path = self.wvfs.join(".hg")
267 self.path = self.wvfs.join(".hg")
268 self.origroot = path
268 self.origroot = path
269 self.auditor = pathutil.pathauditor(self.root, self._checknested)
269 self.auditor = pathutil.pathauditor(self.root, self._checknested)
270 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
270 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
271 realfs=False)
271 realfs=False)
272 self.vfs = vfsmod.vfs(self.path)
272 self.vfs = vfsmod.vfs(self.path)
273 self.baseui = baseui
273 self.baseui = baseui
274 self.ui = baseui.copy()
274 self.ui = baseui.copy()
275 self.ui.copy = baseui.copy # prevent copying repo configuration
275 self.ui.copy = baseui.copy # prevent copying repo configuration
276 # A list of callback to shape the phase if no data were found.
276 # A list of callback to shape the phase if no data were found.
277 # Callback are in the form: func(repo, roots) --> processed root.
277 # Callback are in the form: func(repo, roots) --> processed root.
278 # This list it to be filled by extension during repo setup
278 # This list it to be filled by extension during repo setup
279 self._phasedefaults = []
279 self._phasedefaults = []
280 try:
280 try:
281 self.ui.readconfig(self.join("hgrc"), self.root)
281 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
282 self._loadextensions()
282 self._loadextensions()
283 except IOError:
283 except IOError:
284 pass
284 pass
285
285
286 if self.featuresetupfuncs:
286 if self.featuresetupfuncs:
287 self.supported = set(self._basesupported) # use private copy
287 self.supported = set(self._basesupported) # use private copy
288 extmods = set(m.__name__ for n, m
288 extmods = set(m.__name__ for n, m
289 in extensions.extensions(self.ui))
289 in extensions.extensions(self.ui))
290 for setupfunc in self.featuresetupfuncs:
290 for setupfunc in self.featuresetupfuncs:
291 if setupfunc.__module__ in extmods:
291 if setupfunc.__module__ in extmods:
292 setupfunc(self.ui, self.supported)
292 setupfunc(self.ui, self.supported)
293 else:
293 else:
294 self.supported = self._basesupported
294 self.supported = self._basesupported
295 color.setup(self.ui)
295 color.setup(self.ui)
296
296
297 # Add compression engines.
297 # Add compression engines.
298 for name in util.compengines:
298 for name in util.compengines:
299 engine = util.compengines[name]
299 engine = util.compengines[name]
300 if engine.revlogheader():
300 if engine.revlogheader():
301 self.supported.add('exp-compression-%s' % name)
301 self.supported.add('exp-compression-%s' % name)
302
302
303 if not self.vfs.isdir():
303 if not self.vfs.isdir():
304 if create:
304 if create:
305 self.requirements = newreporequirements(self)
305 self.requirements = newreporequirements(self)
306
306
307 if not self.wvfs.exists():
307 if not self.wvfs.exists():
308 self.wvfs.makedirs()
308 self.wvfs.makedirs()
309 self.vfs.makedir(notindexed=True)
309 self.vfs.makedir(notindexed=True)
310
310
311 if 'store' in self.requirements:
311 if 'store' in self.requirements:
312 self.vfs.mkdir("store")
312 self.vfs.mkdir("store")
313
313
314 # create an invalid changelog
314 # create an invalid changelog
315 self.vfs.append(
315 self.vfs.append(
316 "00changelog.i",
316 "00changelog.i",
317 '\0\0\0\2' # represents revlogv2
317 '\0\0\0\2' # represents revlogv2
318 ' dummy changelog to prevent using the old repo layout'
318 ' dummy changelog to prevent using the old repo layout'
319 )
319 )
320 else:
320 else:
321 raise error.RepoError(_("repository %s not found") % path)
321 raise error.RepoError(_("repository %s not found") % path)
322 elif create:
322 elif create:
323 raise error.RepoError(_("repository %s already exists") % path)
323 raise error.RepoError(_("repository %s already exists") % path)
324 else:
324 else:
325 try:
325 try:
326 self.requirements = scmutil.readrequires(
326 self.requirements = scmutil.readrequires(
327 self.vfs, self.supported)
327 self.vfs, self.supported)
328 except IOError as inst:
328 except IOError as inst:
329 if inst.errno != errno.ENOENT:
329 if inst.errno != errno.ENOENT:
330 raise
330 raise
331
331
332 self.sharedpath = self.path
332 self.sharedpath = self.path
333 try:
333 try:
334 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
334 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
335 if 'relshared' in self.requirements:
335 if 'relshared' in self.requirements:
336 sharedpath = self.vfs.join(sharedpath)
336 sharedpath = self.vfs.join(sharedpath)
337 vfs = vfsmod.vfs(sharedpath, realpath=True)
337 vfs = vfsmod.vfs(sharedpath, realpath=True)
338 s = vfs.base
338 s = vfs.base
339 if not vfs.exists():
339 if not vfs.exists():
340 raise error.RepoError(
340 raise error.RepoError(
341 _('.hg/sharedpath points to nonexistent directory %s') % s)
341 _('.hg/sharedpath points to nonexistent directory %s') % s)
342 self.sharedpath = s
342 self.sharedpath = s
343 except IOError as inst:
343 except IOError as inst:
344 if inst.errno != errno.ENOENT:
344 if inst.errno != errno.ENOENT:
345 raise
345 raise
346
346
347 self.store = store.store(
347 self.store = store.store(
348 self.requirements, self.sharedpath, vfsmod.vfs)
348 self.requirements, self.sharedpath, vfsmod.vfs)
349 self.spath = self.store.path
349 self.spath = self.store.path
350 self.svfs = self.store.vfs
350 self.svfs = self.store.vfs
351 self.sjoin = self.store.join
351 self.sjoin = self.store.join
352 self.vfs.createmode = self.store.createmode
352 self.vfs.createmode = self.store.createmode
353 self._applyopenerreqs()
353 self._applyopenerreqs()
354 if create:
354 if create:
355 self._writerequirements()
355 self._writerequirements()
356
356
357 self._dirstatevalidatewarned = False
357 self._dirstatevalidatewarned = False
358
358
359 self._branchcaches = {}
359 self._branchcaches = {}
360 self._revbranchcache = None
360 self._revbranchcache = None
361 self.filterpats = {}
361 self.filterpats = {}
362 self._datafilters = {}
362 self._datafilters = {}
363 self._transref = self._lockref = self._wlockref = None
363 self._transref = self._lockref = self._wlockref = None
364
364
365 # A cache for various files under .hg/ that tracks file changes,
365 # A cache for various files under .hg/ that tracks file changes,
366 # (used by the filecache decorator)
366 # (used by the filecache decorator)
367 #
367 #
368 # Maps a property name to its util.filecacheentry
368 # Maps a property name to its util.filecacheentry
369 self._filecache = {}
369 self._filecache = {}
370
370
371 # hold sets of revision to be filtered
371 # hold sets of revision to be filtered
372 # should be cleared when something might have changed the filter value:
372 # should be cleared when something might have changed the filter value:
373 # - new changesets,
373 # - new changesets,
374 # - phase change,
374 # - phase change,
375 # - new obsolescence marker,
375 # - new obsolescence marker,
376 # - working directory parent change,
376 # - working directory parent change,
377 # - bookmark changes
377 # - bookmark changes
378 self.filteredrevcache = {}
378 self.filteredrevcache = {}
379
379
380 # generic mapping between names and nodes
380 # generic mapping between names and nodes
381 self.names = namespaces.namespaces()
381 self.names = namespaces.namespaces()
382
382
383 @property
383 @property
384 def wopener(self):
384 def wopener(self):
385 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
385 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
386 return self.wvfs
386 return self.wvfs
387
387
388 @property
388 @property
389 def opener(self):
389 def opener(self):
390 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
390 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
391 return self.vfs
391 return self.vfs
392
392
393 def close(self):
393 def close(self):
394 self._writecaches()
394 self._writecaches()
395
395
396 def _loadextensions(self):
396 def _loadextensions(self):
397 extensions.loadall(self.ui)
397 extensions.loadall(self.ui)
398
398
399 def _writecaches(self):
399 def _writecaches(self):
400 if self._revbranchcache:
400 if self._revbranchcache:
401 self._revbranchcache.write()
401 self._revbranchcache.write()
402
402
403 def _restrictcapabilities(self, caps):
403 def _restrictcapabilities(self, caps):
404 if self.ui.configbool('experimental', 'bundle2-advertise', True):
404 if self.ui.configbool('experimental', 'bundle2-advertise', True):
405 caps = set(caps)
405 caps = set(caps)
406 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
406 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
407 caps.add('bundle2=' + urlreq.quote(capsblob))
407 caps.add('bundle2=' + urlreq.quote(capsblob))
408 return caps
408 return caps
409
409
410 def _applyopenerreqs(self):
410 def _applyopenerreqs(self):
411 self.svfs.options = dict((r, 1) for r in self.requirements
411 self.svfs.options = dict((r, 1) for r in self.requirements
412 if r in self.openerreqs)
412 if r in self.openerreqs)
413 # experimental config: format.chunkcachesize
413 # experimental config: format.chunkcachesize
414 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
414 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
415 if chunkcachesize is not None:
415 if chunkcachesize is not None:
416 self.svfs.options['chunkcachesize'] = chunkcachesize
416 self.svfs.options['chunkcachesize'] = chunkcachesize
417 # experimental config: format.maxchainlen
417 # experimental config: format.maxchainlen
418 maxchainlen = self.ui.configint('format', 'maxchainlen')
418 maxchainlen = self.ui.configint('format', 'maxchainlen')
419 if maxchainlen is not None:
419 if maxchainlen is not None:
420 self.svfs.options['maxchainlen'] = maxchainlen
420 self.svfs.options['maxchainlen'] = maxchainlen
421 # experimental config: format.manifestcachesize
421 # experimental config: format.manifestcachesize
422 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
422 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
423 if manifestcachesize is not None:
423 if manifestcachesize is not None:
424 self.svfs.options['manifestcachesize'] = manifestcachesize
424 self.svfs.options['manifestcachesize'] = manifestcachesize
425 # experimental config: format.aggressivemergedeltas
425 # experimental config: format.aggressivemergedeltas
426 aggressivemergedeltas = self.ui.configbool('format',
426 aggressivemergedeltas = self.ui.configbool('format',
427 'aggressivemergedeltas', False)
427 'aggressivemergedeltas', False)
428 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
428 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
429 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
429 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
430
430
431 for r in self.requirements:
431 for r in self.requirements:
432 if r.startswith('exp-compression-'):
432 if r.startswith('exp-compression-'):
433 self.svfs.options['compengine'] = r[len('exp-compression-'):]
433 self.svfs.options['compengine'] = r[len('exp-compression-'):]
434
434
435 def _writerequirements(self):
435 def _writerequirements(self):
436 scmutil.writerequires(self.vfs, self.requirements)
436 scmutil.writerequires(self.vfs, self.requirements)
437
437
438 def _checknested(self, path):
438 def _checknested(self, path):
439 """Determine if path is a legal nested repository."""
439 """Determine if path is a legal nested repository."""
440 if not path.startswith(self.root):
440 if not path.startswith(self.root):
441 return False
441 return False
442 subpath = path[len(self.root) + 1:]
442 subpath = path[len(self.root) + 1:]
443 normsubpath = util.pconvert(subpath)
443 normsubpath = util.pconvert(subpath)
444
444
445 # XXX: Checking against the current working copy is wrong in
445 # XXX: Checking against the current working copy is wrong in
446 # the sense that it can reject things like
446 # the sense that it can reject things like
447 #
447 #
448 # $ hg cat -r 10 sub/x.txt
448 # $ hg cat -r 10 sub/x.txt
449 #
449 #
450 # if sub/ is no longer a subrepository in the working copy
450 # if sub/ is no longer a subrepository in the working copy
451 # parent revision.
451 # parent revision.
452 #
452 #
453 # However, it can of course also allow things that would have
453 # However, it can of course also allow things that would have
454 # been rejected before, such as the above cat command if sub/
454 # been rejected before, such as the above cat command if sub/
455 # is a subrepository now, but was a normal directory before.
455 # is a subrepository now, but was a normal directory before.
456 # The old path auditor would have rejected by mistake since it
456 # The old path auditor would have rejected by mistake since it
457 # panics when it sees sub/.hg/.
457 # panics when it sees sub/.hg/.
458 #
458 #
459 # All in all, checking against the working copy seems sensible
459 # All in all, checking against the working copy seems sensible
460 # since we want to prevent access to nested repositories on
460 # since we want to prevent access to nested repositories on
461 # the filesystem *now*.
461 # the filesystem *now*.
462 ctx = self[None]
462 ctx = self[None]
463 parts = util.splitpath(subpath)
463 parts = util.splitpath(subpath)
464 while parts:
464 while parts:
465 prefix = '/'.join(parts)
465 prefix = '/'.join(parts)
466 if prefix in ctx.substate:
466 if prefix in ctx.substate:
467 if prefix == normsubpath:
467 if prefix == normsubpath:
468 return True
468 return True
469 else:
469 else:
470 sub = ctx.sub(prefix)
470 sub = ctx.sub(prefix)
471 return sub.checknested(subpath[len(prefix) + 1:])
471 return sub.checknested(subpath[len(prefix) + 1:])
472 else:
472 else:
473 parts.pop()
473 parts.pop()
474 return False
474 return False
475
475
476 def peer(self):
476 def peer(self):
477 return localpeer(self) # not cached to avoid reference cycle
477 return localpeer(self) # not cached to avoid reference cycle
478
478
479 def unfiltered(self):
479 def unfiltered(self):
480 """Return unfiltered version of the repository
480 """Return unfiltered version of the repository
481
481
482 Intended to be overwritten by filtered repo."""
482 Intended to be overwritten by filtered repo."""
483 return self
483 return self
484
484
485 def filtered(self, name):
485 def filtered(self, name):
486 """Return a filtered version of a repository"""
486 """Return a filtered version of a repository"""
487 # build a new class with the mixin and the current class
487 # build a new class with the mixin and the current class
488 # (possibly subclass of the repo)
488 # (possibly subclass of the repo)
489 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
489 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
490 pass
490 pass
491 return filteredrepo(self, name)
491 return filteredrepo(self, name)
492
492
493 @repofilecache('bookmarks', 'bookmarks.current')
493 @repofilecache('bookmarks', 'bookmarks.current')
494 def _bookmarks(self):
494 def _bookmarks(self):
495 return bookmarks.bmstore(self)
495 return bookmarks.bmstore(self)
496
496
497 @property
497 @property
498 def _activebookmark(self):
498 def _activebookmark(self):
499 return self._bookmarks.active
499 return self._bookmarks.active
500
500
501 def bookmarkheads(self, bookmark):
501 def bookmarkheads(self, bookmark):
502 name = bookmark.split('@', 1)[0]
502 name = bookmark.split('@', 1)[0]
503 heads = []
503 heads = []
504 for mark, n in self._bookmarks.iteritems():
504 for mark, n in self._bookmarks.iteritems():
505 if mark.split('@', 1)[0] == name:
505 if mark.split('@', 1)[0] == name:
506 heads.append(n)
506 heads.append(n)
507 return heads
507 return heads
508
508
509 # _phaserevs and _phasesets depend on changelog. what we need is to
509 # _phaserevs and _phasesets depend on changelog. what we need is to
510 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
510 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
511 # can't be easily expressed in filecache mechanism.
511 # can't be easily expressed in filecache mechanism.
512 @storecache('phaseroots', '00changelog.i')
512 @storecache('phaseroots', '00changelog.i')
513 def _phasecache(self):
513 def _phasecache(self):
514 return phases.phasecache(self, self._phasedefaults)
514 return phases.phasecache(self, self._phasedefaults)
515
515
516 @storecache('obsstore')
516 @storecache('obsstore')
517 def obsstore(self):
517 def obsstore(self):
518 # read default format for new obsstore.
518 # read default format for new obsstore.
519 # developer config: format.obsstore-version
519 # developer config: format.obsstore-version
520 defaultformat = self.ui.configint('format', 'obsstore-version', None)
520 defaultformat = self.ui.configint('format', 'obsstore-version', None)
521 # rely on obsstore class default when possible.
521 # rely on obsstore class default when possible.
522 kwargs = {}
522 kwargs = {}
523 if defaultformat is not None:
523 if defaultformat is not None:
524 kwargs['defaultformat'] = defaultformat
524 kwargs['defaultformat'] = defaultformat
525 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
525 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
526 store = obsolete.obsstore(self.svfs, readonly=readonly,
526 store = obsolete.obsstore(self.svfs, readonly=readonly,
527 **kwargs)
527 **kwargs)
528 if store and readonly:
528 if store and readonly:
529 self.ui.warn(
529 self.ui.warn(
530 _('obsolete feature not enabled but %i markers found!\n')
530 _('obsolete feature not enabled but %i markers found!\n')
531 % len(list(store)))
531 % len(list(store)))
532 return store
532 return store
533
533
534 @storecache('00changelog.i')
534 @storecache('00changelog.i')
535 def changelog(self):
535 def changelog(self):
536 c = changelog.changelog(self.svfs)
536 c = changelog.changelog(self.svfs)
537 if txnutil.mayhavepending(self.root):
537 if txnutil.mayhavepending(self.root):
538 c.readpending('00changelog.i.a')
538 c.readpending('00changelog.i.a')
539 return c
539 return c
540
540
541 def _constructmanifest(self):
541 def _constructmanifest(self):
542 # This is a temporary function while we migrate from manifest to
542 # This is a temporary function while we migrate from manifest to
543 # manifestlog. It allows bundlerepo and unionrepo to intercept the
543 # manifestlog. It allows bundlerepo and unionrepo to intercept the
544 # manifest creation.
544 # manifest creation.
545 return manifest.manifestrevlog(self.svfs)
545 return manifest.manifestrevlog(self.svfs)
546
546
547 @storecache('00manifest.i')
547 @storecache('00manifest.i')
548 def manifestlog(self):
548 def manifestlog(self):
549 return manifest.manifestlog(self.svfs, self)
549 return manifest.manifestlog(self.svfs, self)
550
550
551 @repofilecache('dirstate')
551 @repofilecache('dirstate')
552 def dirstate(self):
552 def dirstate(self):
553 return dirstate.dirstate(self.vfs, self.ui, self.root,
553 return dirstate.dirstate(self.vfs, self.ui, self.root,
554 self._dirstatevalidate)
554 self._dirstatevalidate)
555
555
556 def _dirstatevalidate(self, node):
556 def _dirstatevalidate(self, node):
557 try:
557 try:
558 self.changelog.rev(node)
558 self.changelog.rev(node)
559 return node
559 return node
560 except error.LookupError:
560 except error.LookupError:
561 if not self._dirstatevalidatewarned:
561 if not self._dirstatevalidatewarned:
562 self._dirstatevalidatewarned = True
562 self._dirstatevalidatewarned = True
563 self.ui.warn(_("warning: ignoring unknown"
563 self.ui.warn(_("warning: ignoring unknown"
564 " working parent %s!\n") % short(node))
564 " working parent %s!\n") % short(node))
565 return nullid
565 return nullid
566
566
567 def __getitem__(self, changeid):
567 def __getitem__(self, changeid):
568 if changeid is None or changeid == wdirrev:
568 if changeid is None or changeid == wdirrev:
569 return context.workingctx(self)
569 return context.workingctx(self)
570 if isinstance(changeid, slice):
570 if isinstance(changeid, slice):
571 return [context.changectx(self, i)
571 return [context.changectx(self, i)
572 for i in xrange(*changeid.indices(len(self)))
572 for i in xrange(*changeid.indices(len(self)))
573 if i not in self.changelog.filteredrevs]
573 if i not in self.changelog.filteredrevs]
574 return context.changectx(self, changeid)
574 return context.changectx(self, changeid)
575
575
576 def __contains__(self, changeid):
576 def __contains__(self, changeid):
577 try:
577 try:
578 self[changeid]
578 self[changeid]
579 return True
579 return True
580 except error.RepoLookupError:
580 except error.RepoLookupError:
581 return False
581 return False
582
582
583 def __nonzero__(self):
583 def __nonzero__(self):
584 return True
584 return True
585
585
586 def __len__(self):
586 def __len__(self):
587 return len(self.changelog)
587 return len(self.changelog)
588
588
589 def __iter__(self):
589 def __iter__(self):
590 return iter(self.changelog)
590 return iter(self.changelog)
591
591
592 def revs(self, expr, *args):
592 def revs(self, expr, *args):
593 '''Find revisions matching a revset.
593 '''Find revisions matching a revset.
594
594
595 The revset is specified as a string ``expr`` that may contain
595 The revset is specified as a string ``expr`` that may contain
596 %-formatting to escape certain types. See ``revsetlang.formatspec``.
596 %-formatting to escape certain types. See ``revsetlang.formatspec``.
597
597
598 Revset aliases from the configuration are not expanded. To expand
598 Revset aliases from the configuration are not expanded. To expand
599 user aliases, consider calling ``scmutil.revrange()`` or
599 user aliases, consider calling ``scmutil.revrange()`` or
600 ``repo.anyrevs([expr], user=True)``.
600 ``repo.anyrevs([expr], user=True)``.
601
601
602 Returns a revset.abstractsmartset, which is a list-like interface
602 Returns a revset.abstractsmartset, which is a list-like interface
603 that contains integer revisions.
603 that contains integer revisions.
604 '''
604 '''
605 expr = revsetlang.formatspec(expr, *args)
605 expr = revsetlang.formatspec(expr, *args)
606 m = revset.match(None, expr)
606 m = revset.match(None, expr)
607 return m(self)
607 return m(self)
608
608
609 def set(self, expr, *args):
609 def set(self, expr, *args):
610 '''Find revisions matching a revset and emit changectx instances.
610 '''Find revisions matching a revset and emit changectx instances.
611
611
612 This is a convenience wrapper around ``revs()`` that iterates the
612 This is a convenience wrapper around ``revs()`` that iterates the
613 result and is a generator of changectx instances.
613 result and is a generator of changectx instances.
614
614
615 Revset aliases from the configuration are not expanded. To expand
615 Revset aliases from the configuration are not expanded. To expand
616 user aliases, consider calling ``scmutil.revrange()``.
616 user aliases, consider calling ``scmutil.revrange()``.
617 '''
617 '''
618 for r in self.revs(expr, *args):
618 for r in self.revs(expr, *args):
619 yield self[r]
619 yield self[r]
620
620
621 def anyrevs(self, specs, user=False):
621 def anyrevs(self, specs, user=False):
622 '''Find revisions matching one of the given revsets.
622 '''Find revisions matching one of the given revsets.
623
623
624 Revset aliases from the configuration are not expanded by default. To
624 Revset aliases from the configuration are not expanded by default. To
625 expand user aliases, specify ``user=True``.
625 expand user aliases, specify ``user=True``.
626 '''
626 '''
627 if user:
627 if user:
628 m = revset.matchany(self.ui, specs, repo=self)
628 m = revset.matchany(self.ui, specs, repo=self)
629 else:
629 else:
630 m = revset.matchany(None, specs)
630 m = revset.matchany(None, specs)
631 return m(self)
631 return m(self)
632
632
633 def url(self):
633 def url(self):
634 return 'file:' + self.root
634 return 'file:' + self.root
635
635
636 def hook(self, name, throw=False, **args):
636 def hook(self, name, throw=False, **args):
637 """Call a hook, passing this repo instance.
637 """Call a hook, passing this repo instance.
638
638
639 This a convenience method to aid invoking hooks. Extensions likely
639 This a convenience method to aid invoking hooks. Extensions likely
640 won't call this unless they have registered a custom hook or are
640 won't call this unless they have registered a custom hook or are
641 replacing code that is expected to call a hook.
641 replacing code that is expected to call a hook.
642 """
642 """
643 return hook.hook(self.ui, self, name, throw, **args)
643 return hook.hook(self.ui, self, name, throw, **args)
644
644
645 @unfilteredmethod
645 @unfilteredmethod
646 def _tag(self, names, node, message, local, user, date, extra=None,
646 def _tag(self, names, node, message, local, user, date, extra=None,
647 editor=False):
647 editor=False):
648 if isinstance(names, str):
648 if isinstance(names, str):
649 names = (names,)
649 names = (names,)
650
650
651 branches = self.branchmap()
651 branches = self.branchmap()
652 for name in names:
652 for name in names:
653 self.hook('pretag', throw=True, node=hex(node), tag=name,
653 self.hook('pretag', throw=True, node=hex(node), tag=name,
654 local=local)
654 local=local)
655 if name in branches:
655 if name in branches:
656 self.ui.warn(_("warning: tag %s conflicts with existing"
656 self.ui.warn(_("warning: tag %s conflicts with existing"
657 " branch name\n") % name)
657 " branch name\n") % name)
658
658
659 def writetags(fp, names, munge, prevtags):
659 def writetags(fp, names, munge, prevtags):
660 fp.seek(0, 2)
660 fp.seek(0, 2)
661 if prevtags and prevtags[-1] != '\n':
661 if prevtags and prevtags[-1] != '\n':
662 fp.write('\n')
662 fp.write('\n')
663 for name in names:
663 for name in names:
664 if munge:
664 if munge:
665 m = munge(name)
665 m = munge(name)
666 else:
666 else:
667 m = name
667 m = name
668
668
669 if (self._tagscache.tagtypes and
669 if (self._tagscache.tagtypes and
670 name in self._tagscache.tagtypes):
670 name in self._tagscache.tagtypes):
671 old = self.tags().get(name, nullid)
671 old = self.tags().get(name, nullid)
672 fp.write('%s %s\n' % (hex(old), m))
672 fp.write('%s %s\n' % (hex(old), m))
673 fp.write('%s %s\n' % (hex(node), m))
673 fp.write('%s %s\n' % (hex(node), m))
674 fp.close()
674 fp.close()
675
675
676 prevtags = ''
676 prevtags = ''
677 if local:
677 if local:
678 try:
678 try:
679 fp = self.vfs('localtags', 'r+')
679 fp = self.vfs('localtags', 'r+')
680 except IOError:
680 except IOError:
681 fp = self.vfs('localtags', 'a')
681 fp = self.vfs('localtags', 'a')
682 else:
682 else:
683 prevtags = fp.read()
683 prevtags = fp.read()
684
684
685 # local tags are stored in the current charset
685 # local tags are stored in the current charset
686 writetags(fp, names, None, prevtags)
686 writetags(fp, names, None, prevtags)
687 for name in names:
687 for name in names:
688 self.hook('tag', node=hex(node), tag=name, local=local)
688 self.hook('tag', node=hex(node), tag=name, local=local)
689 return
689 return
690
690
691 try:
691 try:
692 fp = self.wfile('.hgtags', 'rb+')
692 fp = self.wfile('.hgtags', 'rb+')
693 except IOError as e:
693 except IOError as e:
694 if e.errno != errno.ENOENT:
694 if e.errno != errno.ENOENT:
695 raise
695 raise
696 fp = self.wfile('.hgtags', 'ab')
696 fp = self.wfile('.hgtags', 'ab')
697 else:
697 else:
698 prevtags = fp.read()
698 prevtags = fp.read()
699
699
700 # committed tags are stored in UTF-8
700 # committed tags are stored in UTF-8
701 writetags(fp, names, encoding.fromlocal, prevtags)
701 writetags(fp, names, encoding.fromlocal, prevtags)
702
702
703 fp.close()
703 fp.close()
704
704
705 self.invalidatecaches()
705 self.invalidatecaches()
706
706
707 if '.hgtags' not in self.dirstate:
707 if '.hgtags' not in self.dirstate:
708 self[None].add(['.hgtags'])
708 self[None].add(['.hgtags'])
709
709
710 m = matchmod.exact(self.root, '', ['.hgtags'])
710 m = matchmod.exact(self.root, '', ['.hgtags'])
711 tagnode = self.commit(message, user, date, extra=extra, match=m,
711 tagnode = self.commit(message, user, date, extra=extra, match=m,
712 editor=editor)
712 editor=editor)
713
713
714 for name in names:
714 for name in names:
715 self.hook('tag', node=hex(node), tag=name, local=local)
715 self.hook('tag', node=hex(node), tag=name, local=local)
716
716
717 return tagnode
717 return tagnode
718
718
719 def tag(self, names, node, message, local, user, date, editor=False):
719 def tag(self, names, node, message, local, user, date, editor=False):
720 '''tag a revision with one or more symbolic names.
720 '''tag a revision with one or more symbolic names.
721
721
722 names is a list of strings or, when adding a single tag, names may be a
722 names is a list of strings or, when adding a single tag, names may be a
723 string.
723 string.
724
724
725 if local is True, the tags are stored in a per-repository file.
725 if local is True, the tags are stored in a per-repository file.
726 otherwise, they are stored in the .hgtags file, and a new
726 otherwise, they are stored in the .hgtags file, and a new
727 changeset is committed with the change.
727 changeset is committed with the change.
728
728
729 keyword arguments:
729 keyword arguments:
730
730
731 local: whether to store tags in non-version-controlled file
731 local: whether to store tags in non-version-controlled file
732 (default False)
732 (default False)
733
733
734 message: commit message to use if committing
734 message: commit message to use if committing
735
735
736 user: name of user to use if committing
736 user: name of user to use if committing
737
737
738 date: date tuple to use if committing'''
738 date: date tuple to use if committing'''
739
739
740 if not local:
740 if not local:
741 m = matchmod.exact(self.root, '', ['.hgtags'])
741 m = matchmod.exact(self.root, '', ['.hgtags'])
742 if any(self.status(match=m, unknown=True, ignored=True)):
742 if any(self.status(match=m, unknown=True, ignored=True)):
743 raise error.Abort(_('working copy of .hgtags is changed'),
743 raise error.Abort(_('working copy of .hgtags is changed'),
744 hint=_('please commit .hgtags manually'))
744 hint=_('please commit .hgtags manually'))
745
745
746 self.tags() # instantiate the cache
746 self.tags() # instantiate the cache
747 self._tag(names, node, message, local, user, date, editor=editor)
747 self._tag(names, node, message, local, user, date, editor=editor)
748
748
749 @filteredpropertycache
749 @filteredpropertycache
750 def _tagscache(self):
750 def _tagscache(self):
751 '''Returns a tagscache object that contains various tags related
751 '''Returns a tagscache object that contains various tags related
752 caches.'''
752 caches.'''
753
753
754 # This simplifies its cache management by having one decorated
754 # This simplifies its cache management by having one decorated
755 # function (this one) and the rest simply fetch things from it.
755 # function (this one) and the rest simply fetch things from it.
756 class tagscache(object):
756 class tagscache(object):
757 def __init__(self):
757 def __init__(self):
758 # These two define the set of tags for this repository. tags
758 # These two define the set of tags for this repository. tags
759 # maps tag name to node; tagtypes maps tag name to 'global' or
759 # maps tag name to node; tagtypes maps tag name to 'global' or
760 # 'local'. (Global tags are defined by .hgtags across all
760 # 'local'. (Global tags are defined by .hgtags across all
761 # heads, and local tags are defined in .hg/localtags.)
761 # heads, and local tags are defined in .hg/localtags.)
762 # They constitute the in-memory cache of tags.
762 # They constitute the in-memory cache of tags.
763 self.tags = self.tagtypes = None
763 self.tags = self.tagtypes = None
764
764
765 self.nodetagscache = self.tagslist = None
765 self.nodetagscache = self.tagslist = None
766
766
767 cache = tagscache()
767 cache = tagscache()
768 cache.tags, cache.tagtypes = self._findtags()
768 cache.tags, cache.tagtypes = self._findtags()
769
769
770 return cache
770 return cache
771
771
772 def tags(self):
772 def tags(self):
773 '''return a mapping of tag to node'''
773 '''return a mapping of tag to node'''
774 t = {}
774 t = {}
775 if self.changelog.filteredrevs:
775 if self.changelog.filteredrevs:
776 tags, tt = self._findtags()
776 tags, tt = self._findtags()
777 else:
777 else:
778 tags = self._tagscache.tags
778 tags = self._tagscache.tags
779 for k, v in tags.iteritems():
779 for k, v in tags.iteritems():
780 try:
780 try:
781 # ignore tags to unknown nodes
781 # ignore tags to unknown nodes
782 self.changelog.rev(v)
782 self.changelog.rev(v)
783 t[k] = v
783 t[k] = v
784 except (error.LookupError, ValueError):
784 except (error.LookupError, ValueError):
785 pass
785 pass
786 return t
786 return t
787
787
788 def _findtags(self):
788 def _findtags(self):
789 '''Do the hard work of finding tags. Return a pair of dicts
789 '''Do the hard work of finding tags. Return a pair of dicts
790 (tags, tagtypes) where tags maps tag name to node, and tagtypes
790 (tags, tagtypes) where tags maps tag name to node, and tagtypes
791 maps tag name to a string like \'global\' or \'local\'.
791 maps tag name to a string like \'global\' or \'local\'.
792 Subclasses or extensions are free to add their own tags, but
792 Subclasses or extensions are free to add their own tags, but
793 should be aware that the returned dicts will be retained for the
793 should be aware that the returned dicts will be retained for the
794 duration of the localrepo object.'''
794 duration of the localrepo object.'''
795
795
796 # XXX what tagtype should subclasses/extensions use? Currently
796 # XXX what tagtype should subclasses/extensions use? Currently
797 # mq and bookmarks add tags, but do not set the tagtype at all.
797 # mq and bookmarks add tags, but do not set the tagtype at all.
798 # Should each extension invent its own tag type? Should there
798 # Should each extension invent its own tag type? Should there
799 # be one tagtype for all such "virtual" tags? Or is the status
799 # be one tagtype for all such "virtual" tags? Or is the status
800 # quo fine?
800 # quo fine?
801
801
802 alltags = {} # map tag name to (node, hist)
802 alltags = {} # map tag name to (node, hist)
803 tagtypes = {}
803 tagtypes = {}
804
804
805 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
805 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
806 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
806 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
807
807
808 # Build the return dicts. Have to re-encode tag names because
808 # Build the return dicts. Have to re-encode tag names because
809 # the tags module always uses UTF-8 (in order not to lose info
809 # the tags module always uses UTF-8 (in order not to lose info
810 # writing to the cache), but the rest of Mercurial wants them in
810 # writing to the cache), but the rest of Mercurial wants them in
811 # local encoding.
811 # local encoding.
812 tags = {}
812 tags = {}
813 for (name, (node, hist)) in alltags.iteritems():
813 for (name, (node, hist)) in alltags.iteritems():
814 if node != nullid:
814 if node != nullid:
815 tags[encoding.tolocal(name)] = node
815 tags[encoding.tolocal(name)] = node
816 tags['tip'] = self.changelog.tip()
816 tags['tip'] = self.changelog.tip()
817 tagtypes = dict([(encoding.tolocal(name), value)
817 tagtypes = dict([(encoding.tolocal(name), value)
818 for (name, value) in tagtypes.iteritems()])
818 for (name, value) in tagtypes.iteritems()])
819 return (tags, tagtypes)
819 return (tags, tagtypes)
820
820
821 def tagtype(self, tagname):
821 def tagtype(self, tagname):
822 '''
822 '''
823 return the type of the given tag. result can be:
823 return the type of the given tag. result can be:
824
824
825 'local' : a local tag
825 'local' : a local tag
826 'global' : a global tag
826 'global' : a global tag
827 None : tag does not exist
827 None : tag does not exist
828 '''
828 '''
829
829
830 return self._tagscache.tagtypes.get(tagname)
830 return self._tagscache.tagtypes.get(tagname)
831
831
832 def tagslist(self):
832 def tagslist(self):
833 '''return a list of tags ordered by revision'''
833 '''return a list of tags ordered by revision'''
834 if not self._tagscache.tagslist:
834 if not self._tagscache.tagslist:
835 l = []
835 l = []
836 for t, n in self.tags().iteritems():
836 for t, n in self.tags().iteritems():
837 l.append((self.changelog.rev(n), t, n))
837 l.append((self.changelog.rev(n), t, n))
838 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
838 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
839
839
840 return self._tagscache.tagslist
840 return self._tagscache.tagslist
841
841
842 def nodetags(self, node):
842 def nodetags(self, node):
843 '''return the tags associated with a node'''
843 '''return the tags associated with a node'''
844 if not self._tagscache.nodetagscache:
844 if not self._tagscache.nodetagscache:
845 nodetagscache = {}
845 nodetagscache = {}
846 for t, n in self._tagscache.tags.iteritems():
846 for t, n in self._tagscache.tags.iteritems():
847 nodetagscache.setdefault(n, []).append(t)
847 nodetagscache.setdefault(n, []).append(t)
848 for tags in nodetagscache.itervalues():
848 for tags in nodetagscache.itervalues():
849 tags.sort()
849 tags.sort()
850 self._tagscache.nodetagscache = nodetagscache
850 self._tagscache.nodetagscache = nodetagscache
851 return self._tagscache.nodetagscache.get(node, [])
851 return self._tagscache.nodetagscache.get(node, [])
852
852
853 def nodebookmarks(self, node):
853 def nodebookmarks(self, node):
854 """return the list of bookmarks pointing to the specified node"""
854 """return the list of bookmarks pointing to the specified node"""
855 marks = []
855 marks = []
856 for bookmark, n in self._bookmarks.iteritems():
856 for bookmark, n in self._bookmarks.iteritems():
857 if n == node:
857 if n == node:
858 marks.append(bookmark)
858 marks.append(bookmark)
859 return sorted(marks)
859 return sorted(marks)
860
860
861 def branchmap(self):
861 def branchmap(self):
862 '''returns a dictionary {branch: [branchheads]} with branchheads
862 '''returns a dictionary {branch: [branchheads]} with branchheads
863 ordered by increasing revision number'''
863 ordered by increasing revision number'''
864 branchmap.updatecache(self)
864 branchmap.updatecache(self)
865 return self._branchcaches[self.filtername]
865 return self._branchcaches[self.filtername]
866
866
867 @unfilteredmethod
867 @unfilteredmethod
868 def revbranchcache(self):
868 def revbranchcache(self):
869 if not self._revbranchcache:
869 if not self._revbranchcache:
870 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
870 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
871 return self._revbranchcache
871 return self._revbranchcache
872
872
873 def branchtip(self, branch, ignoremissing=False):
873 def branchtip(self, branch, ignoremissing=False):
874 '''return the tip node for a given branch
874 '''return the tip node for a given branch
875
875
876 If ignoremissing is True, then this method will not raise an error.
876 If ignoremissing is True, then this method will not raise an error.
877 This is helpful for callers that only expect None for a missing branch
877 This is helpful for callers that only expect None for a missing branch
878 (e.g. namespace).
878 (e.g. namespace).
879
879
880 '''
880 '''
881 try:
881 try:
882 return self.branchmap().branchtip(branch)
882 return self.branchmap().branchtip(branch)
883 except KeyError:
883 except KeyError:
884 if not ignoremissing:
884 if not ignoremissing:
885 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
885 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
886 else:
886 else:
887 pass
887 pass
888
888
889 def lookup(self, key):
889 def lookup(self, key):
890 return self[key].node()
890 return self[key].node()
891
891
892 def lookupbranch(self, key, remote=None):
892 def lookupbranch(self, key, remote=None):
893 repo = remote or self
893 repo = remote or self
894 if key in repo.branchmap():
894 if key in repo.branchmap():
895 return key
895 return key
896
896
897 repo = (remote and remote.local()) and remote or self
897 repo = (remote and remote.local()) and remote or self
898 return repo[key].branch()
898 return repo[key].branch()
899
899
900 def known(self, nodes):
900 def known(self, nodes):
901 cl = self.changelog
901 cl = self.changelog
902 nm = cl.nodemap
902 nm = cl.nodemap
903 filtered = cl.filteredrevs
903 filtered = cl.filteredrevs
904 result = []
904 result = []
905 for n in nodes:
905 for n in nodes:
906 r = nm.get(n)
906 r = nm.get(n)
907 resp = not (r is None or r in filtered)
907 resp = not (r is None or r in filtered)
908 result.append(resp)
908 result.append(resp)
909 return result
909 return result
910
910
911 def local(self):
911 def local(self):
912 return self
912 return self
913
913
914 def publishing(self):
914 def publishing(self):
915 # it's safe (and desirable) to trust the publish flag unconditionally
915 # it's safe (and desirable) to trust the publish flag unconditionally
916 # so that we don't finalize changes shared between users via ssh or nfs
916 # so that we don't finalize changes shared between users via ssh or nfs
917 return self.ui.configbool('phases', 'publish', True, untrusted=True)
917 return self.ui.configbool('phases', 'publish', True, untrusted=True)
918
918
919 def cancopy(self):
919 def cancopy(self):
920 # so statichttprepo's override of local() works
920 # so statichttprepo's override of local() works
921 if not self.local():
921 if not self.local():
922 return False
922 return False
923 if not self.publishing():
923 if not self.publishing():
924 return True
924 return True
925 # if publishing we can't copy if there is filtered content
925 # if publishing we can't copy if there is filtered content
926 return not self.filtered('visible').changelog.filteredrevs
926 return not self.filtered('visible').changelog.filteredrevs
927
927
928 def shared(self):
928 def shared(self):
929 '''the type of shared repository (None if not shared)'''
929 '''the type of shared repository (None if not shared)'''
930 if self.sharedpath != self.path:
930 if self.sharedpath != self.path:
931 return 'store'
931 return 'store'
932 return None
932 return None
933
933
934 def join(self, f, *insidef):
934 def join(self, f, *insidef):
935 return self.vfs.join(os.path.join(f, *insidef))
935 return self.vfs.join(os.path.join(f, *insidef))
936
936
937 def wjoin(self, f, *insidef):
937 def wjoin(self, f, *insidef):
938 return self.vfs.reljoin(self.root, f, *insidef)
938 return self.vfs.reljoin(self.root, f, *insidef)
939
939
940 def file(self, f):
940 def file(self, f):
941 if f[0] == '/':
941 if f[0] == '/':
942 f = f[1:]
942 f = f[1:]
943 return filelog.filelog(self.svfs, f)
943 return filelog.filelog(self.svfs, f)
944
944
945 def changectx(self, changeid):
945 def changectx(self, changeid):
946 return self[changeid]
946 return self[changeid]
947
947
948 def setparents(self, p1, p2=nullid):
948 def setparents(self, p1, p2=nullid):
949 self.dirstate.beginparentchange()
949 self.dirstate.beginparentchange()
950 copies = self.dirstate.setparents(p1, p2)
950 copies = self.dirstate.setparents(p1, p2)
951 pctx = self[p1]
951 pctx = self[p1]
952 if copies:
952 if copies:
953 # Adjust copy records, the dirstate cannot do it, it
953 # Adjust copy records, the dirstate cannot do it, it
954 # requires access to parents manifests. Preserve them
954 # requires access to parents manifests. Preserve them
955 # only for entries added to first parent.
955 # only for entries added to first parent.
956 for f in copies:
956 for f in copies:
957 if f not in pctx and copies[f] in pctx:
957 if f not in pctx and copies[f] in pctx:
958 self.dirstate.copy(copies[f], f)
958 self.dirstate.copy(copies[f], f)
959 if p2 == nullid:
959 if p2 == nullid:
960 for f, s in sorted(self.dirstate.copies().items()):
960 for f, s in sorted(self.dirstate.copies().items()):
961 if f not in pctx and s not in pctx:
961 if f not in pctx and s not in pctx:
962 self.dirstate.copy(None, f)
962 self.dirstate.copy(None, f)
963 self.dirstate.endparentchange()
963 self.dirstate.endparentchange()
964
964
965 def filectx(self, path, changeid=None, fileid=None):
965 def filectx(self, path, changeid=None, fileid=None):
966 """changeid can be a changeset revision, node, or tag.
966 """changeid can be a changeset revision, node, or tag.
967 fileid can be a file revision or node."""
967 fileid can be a file revision or node."""
968 return context.filectx(self, path, changeid, fileid)
968 return context.filectx(self, path, changeid, fileid)
969
969
970 def getcwd(self):
970 def getcwd(self):
971 return self.dirstate.getcwd()
971 return self.dirstate.getcwd()
972
972
973 def pathto(self, f, cwd=None):
973 def pathto(self, f, cwd=None):
974 return self.dirstate.pathto(f, cwd)
974 return self.dirstate.pathto(f, cwd)
975
975
976 def wfile(self, f, mode='r'):
976 def wfile(self, f, mode='r'):
977 return self.wvfs(f, mode)
977 return self.wvfs(f, mode)
978
978
979 def _link(self, f):
979 def _link(self, f):
980 return self.wvfs.islink(f)
980 return self.wvfs.islink(f)
981
981
982 def _loadfilter(self, filter):
982 def _loadfilter(self, filter):
983 if filter not in self.filterpats:
983 if filter not in self.filterpats:
984 l = []
984 l = []
985 for pat, cmd in self.ui.configitems(filter):
985 for pat, cmd in self.ui.configitems(filter):
986 if cmd == '!':
986 if cmd == '!':
987 continue
987 continue
988 mf = matchmod.match(self.root, '', [pat])
988 mf = matchmod.match(self.root, '', [pat])
989 fn = None
989 fn = None
990 params = cmd
990 params = cmd
991 for name, filterfn in self._datafilters.iteritems():
991 for name, filterfn in self._datafilters.iteritems():
992 if cmd.startswith(name):
992 if cmd.startswith(name):
993 fn = filterfn
993 fn = filterfn
994 params = cmd[len(name):].lstrip()
994 params = cmd[len(name):].lstrip()
995 break
995 break
996 if not fn:
996 if not fn:
997 fn = lambda s, c, **kwargs: util.filter(s, c)
997 fn = lambda s, c, **kwargs: util.filter(s, c)
998 # Wrap old filters not supporting keyword arguments
998 # Wrap old filters not supporting keyword arguments
999 if not inspect.getargspec(fn)[2]:
999 if not inspect.getargspec(fn)[2]:
1000 oldfn = fn
1000 oldfn = fn
1001 fn = lambda s, c, **kwargs: oldfn(s, c)
1001 fn = lambda s, c, **kwargs: oldfn(s, c)
1002 l.append((mf, fn, params))
1002 l.append((mf, fn, params))
1003 self.filterpats[filter] = l
1003 self.filterpats[filter] = l
1004 return self.filterpats[filter]
1004 return self.filterpats[filter]
1005
1005
1006 def _filter(self, filterpats, filename, data):
1006 def _filter(self, filterpats, filename, data):
1007 for mf, fn, cmd in filterpats:
1007 for mf, fn, cmd in filterpats:
1008 if mf(filename):
1008 if mf(filename):
1009 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1009 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1010 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1010 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1011 break
1011 break
1012
1012
1013 return data
1013 return data
1014
1014
1015 @unfilteredpropertycache
1015 @unfilteredpropertycache
1016 def _encodefilterpats(self):
1016 def _encodefilterpats(self):
1017 return self._loadfilter('encode')
1017 return self._loadfilter('encode')
1018
1018
1019 @unfilteredpropertycache
1019 @unfilteredpropertycache
1020 def _decodefilterpats(self):
1020 def _decodefilterpats(self):
1021 return self._loadfilter('decode')
1021 return self._loadfilter('decode')
1022
1022
1023 def adddatafilter(self, name, filter):
1023 def adddatafilter(self, name, filter):
1024 self._datafilters[name] = filter
1024 self._datafilters[name] = filter
1025
1025
1026 def wread(self, filename):
1026 def wread(self, filename):
1027 if self._link(filename):
1027 if self._link(filename):
1028 data = self.wvfs.readlink(filename)
1028 data = self.wvfs.readlink(filename)
1029 else:
1029 else:
1030 data = self.wvfs.read(filename)
1030 data = self.wvfs.read(filename)
1031 return self._filter(self._encodefilterpats, filename, data)
1031 return self._filter(self._encodefilterpats, filename, data)
1032
1032
1033 def wwrite(self, filename, data, flags, backgroundclose=False):
1033 def wwrite(self, filename, data, flags, backgroundclose=False):
1034 """write ``data`` into ``filename`` in the working directory
1034 """write ``data`` into ``filename`` in the working directory
1035
1035
1036 This returns length of written (maybe decoded) data.
1036 This returns length of written (maybe decoded) data.
1037 """
1037 """
1038 data = self._filter(self._decodefilterpats, filename, data)
1038 data = self._filter(self._decodefilterpats, filename, data)
1039 if 'l' in flags:
1039 if 'l' in flags:
1040 self.wvfs.symlink(data, filename)
1040 self.wvfs.symlink(data, filename)
1041 else:
1041 else:
1042 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1042 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1043 if 'x' in flags:
1043 if 'x' in flags:
1044 self.wvfs.setflags(filename, False, True)
1044 self.wvfs.setflags(filename, False, True)
1045 return len(data)
1045 return len(data)
1046
1046
1047 def wwritedata(self, filename, data):
1047 def wwritedata(self, filename, data):
1048 return self._filter(self._decodefilterpats, filename, data)
1048 return self._filter(self._decodefilterpats, filename, data)
1049
1049
1050 def currenttransaction(self):
1050 def currenttransaction(self):
1051 """return the current transaction or None if non exists"""
1051 """return the current transaction or None if non exists"""
1052 if self._transref:
1052 if self._transref:
1053 tr = self._transref()
1053 tr = self._transref()
1054 else:
1054 else:
1055 tr = None
1055 tr = None
1056
1056
1057 if tr and tr.running():
1057 if tr and tr.running():
1058 return tr
1058 return tr
1059 return None
1059 return None
1060
1060
1061 def transaction(self, desc, report=None):
1061 def transaction(self, desc, report=None):
1062 if (self.ui.configbool('devel', 'all-warnings')
1062 if (self.ui.configbool('devel', 'all-warnings')
1063 or self.ui.configbool('devel', 'check-locks')):
1063 or self.ui.configbool('devel', 'check-locks')):
1064 if self._currentlock(self._lockref) is None:
1064 if self._currentlock(self._lockref) is None:
1065 raise error.ProgrammingError('transaction requires locking')
1065 raise error.ProgrammingError('transaction requires locking')
1066 tr = self.currenttransaction()
1066 tr = self.currenttransaction()
1067 if tr is not None:
1067 if tr is not None:
1068 return tr.nest()
1068 return tr.nest()
1069
1069
1070 # abort here if the journal already exists
1070 # abort here if the journal already exists
1071 if self.svfs.exists("journal"):
1071 if self.svfs.exists("journal"):
1072 raise error.RepoError(
1072 raise error.RepoError(
1073 _("abandoned transaction found"),
1073 _("abandoned transaction found"),
1074 hint=_("run 'hg recover' to clean up transaction"))
1074 hint=_("run 'hg recover' to clean up transaction"))
1075
1075
1076 idbase = "%.40f#%f" % (random.random(), time.time())
1076 idbase = "%.40f#%f" % (random.random(), time.time())
1077 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1077 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1078 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1078 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1079
1079
1080 self._writejournal(desc)
1080 self._writejournal(desc)
1081 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1081 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1082 if report:
1082 if report:
1083 rp = report
1083 rp = report
1084 else:
1084 else:
1085 rp = self.ui.warn
1085 rp = self.ui.warn
1086 vfsmap = {'plain': self.vfs} # root of .hg/
1086 vfsmap = {'plain': self.vfs} # root of .hg/
1087 # we must avoid cyclic reference between repo and transaction.
1087 # we must avoid cyclic reference between repo and transaction.
1088 reporef = weakref.ref(self)
1088 reporef = weakref.ref(self)
1089 def validate(tr):
1089 def validate(tr):
1090 """will run pre-closing hooks"""
1090 """will run pre-closing hooks"""
1091 reporef().hook('pretxnclose', throw=True,
1091 reporef().hook('pretxnclose', throw=True,
1092 txnname=desc, **tr.hookargs)
1092 txnname=desc, **tr.hookargs)
1093 def releasefn(tr, success):
1093 def releasefn(tr, success):
1094 repo = reporef()
1094 repo = reporef()
1095 if success:
1095 if success:
1096 # this should be explicitly invoked here, because
1096 # this should be explicitly invoked here, because
1097 # in-memory changes aren't written out at closing
1097 # in-memory changes aren't written out at closing
1098 # transaction, if tr.addfilegenerator (via
1098 # transaction, if tr.addfilegenerator (via
1099 # dirstate.write or so) isn't invoked while
1099 # dirstate.write or so) isn't invoked while
1100 # transaction running
1100 # transaction running
1101 repo.dirstate.write(None)
1101 repo.dirstate.write(None)
1102 else:
1102 else:
1103 # discard all changes (including ones already written
1103 # discard all changes (including ones already written
1104 # out) in this transaction
1104 # out) in this transaction
1105 repo.dirstate.restorebackup(None, prefix='journal.')
1105 repo.dirstate.restorebackup(None, prefix='journal.')
1106
1106
1107 repo.invalidate(clearfilecache=True)
1107 repo.invalidate(clearfilecache=True)
1108
1108
1109 tr = transaction.transaction(rp, self.svfs, vfsmap,
1109 tr = transaction.transaction(rp, self.svfs, vfsmap,
1110 "journal",
1110 "journal",
1111 "undo",
1111 "undo",
1112 aftertrans(renames),
1112 aftertrans(renames),
1113 self.store.createmode,
1113 self.store.createmode,
1114 validator=validate,
1114 validator=validate,
1115 releasefn=releasefn)
1115 releasefn=releasefn)
1116
1116
1117 tr.hookargs['txnid'] = txnid
1117 tr.hookargs['txnid'] = txnid
1118 # note: writing the fncache only during finalize mean that the file is
1118 # note: writing the fncache only during finalize mean that the file is
1119 # outdated when running hooks. As fncache is used for streaming clone,
1119 # outdated when running hooks. As fncache is used for streaming clone,
1120 # this is not expected to break anything that happen during the hooks.
1120 # this is not expected to break anything that happen during the hooks.
1121 tr.addfinalize('flush-fncache', self.store.write)
1121 tr.addfinalize('flush-fncache', self.store.write)
1122 def txnclosehook(tr2):
1122 def txnclosehook(tr2):
1123 """To be run if transaction is successful, will schedule a hook run
1123 """To be run if transaction is successful, will schedule a hook run
1124 """
1124 """
1125 # Don't reference tr2 in hook() so we don't hold a reference.
1125 # Don't reference tr2 in hook() so we don't hold a reference.
1126 # This reduces memory consumption when there are multiple
1126 # This reduces memory consumption when there are multiple
1127 # transactions per lock. This can likely go away if issue5045
1127 # transactions per lock. This can likely go away if issue5045
1128 # fixes the function accumulation.
1128 # fixes the function accumulation.
1129 hookargs = tr2.hookargs
1129 hookargs = tr2.hookargs
1130
1130
1131 def hook():
1131 def hook():
1132 reporef().hook('txnclose', throw=False, txnname=desc,
1132 reporef().hook('txnclose', throw=False, txnname=desc,
1133 **hookargs)
1133 **hookargs)
1134 reporef()._afterlock(hook)
1134 reporef()._afterlock(hook)
1135 tr.addfinalize('txnclose-hook', txnclosehook)
1135 tr.addfinalize('txnclose-hook', txnclosehook)
1136 def txnaborthook(tr2):
1136 def txnaborthook(tr2):
1137 """To be run if transaction is aborted
1137 """To be run if transaction is aborted
1138 """
1138 """
1139 reporef().hook('txnabort', throw=False, txnname=desc,
1139 reporef().hook('txnabort', throw=False, txnname=desc,
1140 **tr2.hookargs)
1140 **tr2.hookargs)
1141 tr.addabort('txnabort-hook', txnaborthook)
1141 tr.addabort('txnabort-hook', txnaborthook)
1142 # avoid eager cache invalidation. in-memory data should be identical
1142 # avoid eager cache invalidation. in-memory data should be identical
1143 # to stored data if transaction has no error.
1143 # to stored data if transaction has no error.
1144 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1144 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1145 self._transref = weakref.ref(tr)
1145 self._transref = weakref.ref(tr)
1146 return tr
1146 return tr
1147
1147
1148 def _journalfiles(self):
1148 def _journalfiles(self):
1149 return ((self.svfs, 'journal'),
1149 return ((self.svfs, 'journal'),
1150 (self.vfs, 'journal.dirstate'),
1150 (self.vfs, 'journal.dirstate'),
1151 (self.vfs, 'journal.branch'),
1151 (self.vfs, 'journal.branch'),
1152 (self.vfs, 'journal.desc'),
1152 (self.vfs, 'journal.desc'),
1153 (self.vfs, 'journal.bookmarks'),
1153 (self.vfs, 'journal.bookmarks'),
1154 (self.svfs, 'journal.phaseroots'))
1154 (self.svfs, 'journal.phaseroots'))
1155
1155
1156 def undofiles(self):
1156 def undofiles(self):
1157 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1157 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1158
1158
1159 def _writejournal(self, desc):
1159 def _writejournal(self, desc):
1160 self.dirstate.savebackup(None, prefix='journal.')
1160 self.dirstate.savebackup(None, prefix='journal.')
1161 self.vfs.write("journal.branch",
1161 self.vfs.write("journal.branch",
1162 encoding.fromlocal(self.dirstate.branch()))
1162 encoding.fromlocal(self.dirstate.branch()))
1163 self.vfs.write("journal.desc",
1163 self.vfs.write("journal.desc",
1164 "%d\n%s\n" % (len(self), desc))
1164 "%d\n%s\n" % (len(self), desc))
1165 self.vfs.write("journal.bookmarks",
1165 self.vfs.write("journal.bookmarks",
1166 self.vfs.tryread("bookmarks"))
1166 self.vfs.tryread("bookmarks"))
1167 self.svfs.write("journal.phaseroots",
1167 self.svfs.write("journal.phaseroots",
1168 self.svfs.tryread("phaseroots"))
1168 self.svfs.tryread("phaseroots"))
1169
1169
1170 def recover(self):
1170 def recover(self):
1171 with self.lock():
1171 with self.lock():
1172 if self.svfs.exists("journal"):
1172 if self.svfs.exists("journal"):
1173 self.ui.status(_("rolling back interrupted transaction\n"))
1173 self.ui.status(_("rolling back interrupted transaction\n"))
1174 vfsmap = {'': self.svfs,
1174 vfsmap = {'': self.svfs,
1175 'plain': self.vfs,}
1175 'plain': self.vfs,}
1176 transaction.rollback(self.svfs, vfsmap, "journal",
1176 transaction.rollback(self.svfs, vfsmap, "journal",
1177 self.ui.warn)
1177 self.ui.warn)
1178 self.invalidate()
1178 self.invalidate()
1179 return True
1179 return True
1180 else:
1180 else:
1181 self.ui.warn(_("no interrupted transaction available\n"))
1181 self.ui.warn(_("no interrupted transaction available\n"))
1182 return False
1182 return False
1183
1183
1184 def rollback(self, dryrun=False, force=False):
1184 def rollback(self, dryrun=False, force=False):
1185 wlock = lock = dsguard = None
1185 wlock = lock = dsguard = None
1186 try:
1186 try:
1187 wlock = self.wlock()
1187 wlock = self.wlock()
1188 lock = self.lock()
1188 lock = self.lock()
1189 if self.svfs.exists("undo"):
1189 if self.svfs.exists("undo"):
1190 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1190 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1191
1191
1192 return self._rollback(dryrun, force, dsguard)
1192 return self._rollback(dryrun, force, dsguard)
1193 else:
1193 else:
1194 self.ui.warn(_("no rollback information available\n"))
1194 self.ui.warn(_("no rollback information available\n"))
1195 return 1
1195 return 1
1196 finally:
1196 finally:
1197 release(dsguard, lock, wlock)
1197 release(dsguard, lock, wlock)
1198
1198
1199 @unfilteredmethod # Until we get smarter cache management
1199 @unfilteredmethod # Until we get smarter cache management
1200 def _rollback(self, dryrun, force, dsguard):
1200 def _rollback(self, dryrun, force, dsguard):
1201 ui = self.ui
1201 ui = self.ui
1202 try:
1202 try:
1203 args = self.vfs.read('undo.desc').splitlines()
1203 args = self.vfs.read('undo.desc').splitlines()
1204 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1204 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1205 if len(args) >= 3:
1205 if len(args) >= 3:
1206 detail = args[2]
1206 detail = args[2]
1207 oldtip = oldlen - 1
1207 oldtip = oldlen - 1
1208
1208
1209 if detail and ui.verbose:
1209 if detail and ui.verbose:
1210 msg = (_('repository tip rolled back to revision %s'
1210 msg = (_('repository tip rolled back to revision %s'
1211 ' (undo %s: %s)\n')
1211 ' (undo %s: %s)\n')
1212 % (oldtip, desc, detail))
1212 % (oldtip, desc, detail))
1213 else:
1213 else:
1214 msg = (_('repository tip rolled back to revision %s'
1214 msg = (_('repository tip rolled back to revision %s'
1215 ' (undo %s)\n')
1215 ' (undo %s)\n')
1216 % (oldtip, desc))
1216 % (oldtip, desc))
1217 except IOError:
1217 except IOError:
1218 msg = _('rolling back unknown transaction\n')
1218 msg = _('rolling back unknown transaction\n')
1219 desc = None
1219 desc = None
1220
1220
1221 if not force and self['.'] != self['tip'] and desc == 'commit':
1221 if not force and self['.'] != self['tip'] and desc == 'commit':
1222 raise error.Abort(
1222 raise error.Abort(
1223 _('rollback of last commit while not checked out '
1223 _('rollback of last commit while not checked out '
1224 'may lose data'), hint=_('use -f to force'))
1224 'may lose data'), hint=_('use -f to force'))
1225
1225
1226 ui.status(msg)
1226 ui.status(msg)
1227 if dryrun:
1227 if dryrun:
1228 return 0
1228 return 0
1229
1229
1230 parents = self.dirstate.parents()
1230 parents = self.dirstate.parents()
1231 self.destroying()
1231 self.destroying()
1232 vfsmap = {'plain': self.vfs, '': self.svfs}
1232 vfsmap = {'plain': self.vfs, '': self.svfs}
1233 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1233 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1234 if self.vfs.exists('undo.bookmarks'):
1234 if self.vfs.exists('undo.bookmarks'):
1235 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1235 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1236 if self.svfs.exists('undo.phaseroots'):
1236 if self.svfs.exists('undo.phaseroots'):
1237 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1237 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1238 self.invalidate()
1238 self.invalidate()
1239
1239
1240 parentgone = (parents[0] not in self.changelog.nodemap or
1240 parentgone = (parents[0] not in self.changelog.nodemap or
1241 parents[1] not in self.changelog.nodemap)
1241 parents[1] not in self.changelog.nodemap)
1242 if parentgone:
1242 if parentgone:
1243 # prevent dirstateguard from overwriting already restored one
1243 # prevent dirstateguard from overwriting already restored one
1244 dsguard.close()
1244 dsguard.close()
1245
1245
1246 self.dirstate.restorebackup(None, prefix='undo.')
1246 self.dirstate.restorebackup(None, prefix='undo.')
1247 try:
1247 try:
1248 branch = self.vfs.read('undo.branch')
1248 branch = self.vfs.read('undo.branch')
1249 self.dirstate.setbranch(encoding.tolocal(branch))
1249 self.dirstate.setbranch(encoding.tolocal(branch))
1250 except IOError:
1250 except IOError:
1251 ui.warn(_('named branch could not be reset: '
1251 ui.warn(_('named branch could not be reset: '
1252 'current branch is still \'%s\'\n')
1252 'current branch is still \'%s\'\n')
1253 % self.dirstate.branch())
1253 % self.dirstate.branch())
1254
1254
1255 parents = tuple([p.rev() for p in self[None].parents()])
1255 parents = tuple([p.rev() for p in self[None].parents()])
1256 if len(parents) > 1:
1256 if len(parents) > 1:
1257 ui.status(_('working directory now based on '
1257 ui.status(_('working directory now based on '
1258 'revisions %d and %d\n') % parents)
1258 'revisions %d and %d\n') % parents)
1259 else:
1259 else:
1260 ui.status(_('working directory now based on '
1260 ui.status(_('working directory now based on '
1261 'revision %d\n') % parents)
1261 'revision %d\n') % parents)
1262 mergemod.mergestate.clean(self, self['.'].node())
1262 mergemod.mergestate.clean(self, self['.'].node())
1263
1263
1264 # TODO: if we know which new heads may result from this rollback, pass
1264 # TODO: if we know which new heads may result from this rollback, pass
1265 # them to destroy(), which will prevent the branchhead cache from being
1265 # them to destroy(), which will prevent the branchhead cache from being
1266 # invalidated.
1266 # invalidated.
1267 self.destroyed()
1267 self.destroyed()
1268 return 0
1268 return 0
1269
1269
1270 def invalidatecaches(self):
1270 def invalidatecaches(self):
1271
1271
1272 if '_tagscache' in vars(self):
1272 if '_tagscache' in vars(self):
1273 # can't use delattr on proxy
1273 # can't use delattr on proxy
1274 del self.__dict__['_tagscache']
1274 del self.__dict__['_tagscache']
1275
1275
1276 self.unfiltered()._branchcaches.clear()
1276 self.unfiltered()._branchcaches.clear()
1277 self.invalidatevolatilesets()
1277 self.invalidatevolatilesets()
1278
1278
1279 def invalidatevolatilesets(self):
1279 def invalidatevolatilesets(self):
1280 self.filteredrevcache.clear()
1280 self.filteredrevcache.clear()
1281 obsolete.clearobscaches(self)
1281 obsolete.clearobscaches(self)
1282
1282
1283 def invalidatedirstate(self):
1283 def invalidatedirstate(self):
1284 '''Invalidates the dirstate, causing the next call to dirstate
1284 '''Invalidates the dirstate, causing the next call to dirstate
1285 to check if it was modified since the last time it was read,
1285 to check if it was modified since the last time it was read,
1286 rereading it if it has.
1286 rereading it if it has.
1287
1287
1288 This is different to dirstate.invalidate() that it doesn't always
1288 This is different to dirstate.invalidate() that it doesn't always
1289 rereads the dirstate. Use dirstate.invalidate() if you want to
1289 rereads the dirstate. Use dirstate.invalidate() if you want to
1290 explicitly read the dirstate again (i.e. restoring it to a previous
1290 explicitly read the dirstate again (i.e. restoring it to a previous
1291 known good state).'''
1291 known good state).'''
1292 if hasunfilteredcache(self, 'dirstate'):
1292 if hasunfilteredcache(self, 'dirstate'):
1293 for k in self.dirstate._filecache:
1293 for k in self.dirstate._filecache:
1294 try:
1294 try:
1295 delattr(self.dirstate, k)
1295 delattr(self.dirstate, k)
1296 except AttributeError:
1296 except AttributeError:
1297 pass
1297 pass
1298 delattr(self.unfiltered(), 'dirstate')
1298 delattr(self.unfiltered(), 'dirstate')
1299
1299
1300 def invalidate(self, clearfilecache=False):
1300 def invalidate(self, clearfilecache=False):
1301 '''Invalidates both store and non-store parts other than dirstate
1301 '''Invalidates both store and non-store parts other than dirstate
1302
1302
1303 If a transaction is running, invalidation of store is omitted,
1303 If a transaction is running, invalidation of store is omitted,
1304 because discarding in-memory changes might cause inconsistency
1304 because discarding in-memory changes might cause inconsistency
1305 (e.g. incomplete fncache causes unintentional failure, but
1305 (e.g. incomplete fncache causes unintentional failure, but
1306 redundant one doesn't).
1306 redundant one doesn't).
1307 '''
1307 '''
1308 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1308 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1309 for k in self._filecache.keys():
1309 for k in self._filecache.keys():
1310 # dirstate is invalidated separately in invalidatedirstate()
1310 # dirstate is invalidated separately in invalidatedirstate()
1311 if k == 'dirstate':
1311 if k == 'dirstate':
1312 continue
1312 continue
1313
1313
1314 if clearfilecache:
1314 if clearfilecache:
1315 del self._filecache[k]
1315 del self._filecache[k]
1316 try:
1316 try:
1317 delattr(unfiltered, k)
1317 delattr(unfiltered, k)
1318 except AttributeError:
1318 except AttributeError:
1319 pass
1319 pass
1320 self.invalidatecaches()
1320 self.invalidatecaches()
1321 if not self.currenttransaction():
1321 if not self.currenttransaction():
1322 # TODO: Changing contents of store outside transaction
1322 # TODO: Changing contents of store outside transaction
1323 # causes inconsistency. We should make in-memory store
1323 # causes inconsistency. We should make in-memory store
1324 # changes detectable, and abort if changed.
1324 # changes detectable, and abort if changed.
1325 self.store.invalidatecaches()
1325 self.store.invalidatecaches()
1326
1326
1327 def invalidateall(self):
1327 def invalidateall(self):
1328 '''Fully invalidates both store and non-store parts, causing the
1328 '''Fully invalidates both store and non-store parts, causing the
1329 subsequent operation to reread any outside changes.'''
1329 subsequent operation to reread any outside changes.'''
1330 # extension should hook this to invalidate its caches
1330 # extension should hook this to invalidate its caches
1331 self.invalidate()
1331 self.invalidate()
1332 self.invalidatedirstate()
1332 self.invalidatedirstate()
1333
1333
1334 @unfilteredmethod
1334 @unfilteredmethod
1335 def _refreshfilecachestats(self, tr):
1335 def _refreshfilecachestats(self, tr):
1336 """Reload stats of cached files so that they are flagged as valid"""
1336 """Reload stats of cached files so that they are flagged as valid"""
1337 for k, ce in self._filecache.items():
1337 for k, ce in self._filecache.items():
1338 if k == 'dirstate' or k not in self.__dict__:
1338 if k == 'dirstate' or k not in self.__dict__:
1339 continue
1339 continue
1340 ce.refresh()
1340 ce.refresh()
1341
1341
1342 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1342 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1343 inheritchecker=None, parentenvvar=None):
1343 inheritchecker=None, parentenvvar=None):
1344 parentlock = None
1344 parentlock = None
1345 # the contents of parentenvvar are used by the underlying lock to
1345 # the contents of parentenvvar are used by the underlying lock to
1346 # determine whether it can be inherited
1346 # determine whether it can be inherited
1347 if parentenvvar is not None:
1347 if parentenvvar is not None:
1348 parentlock = encoding.environ.get(parentenvvar)
1348 parentlock = encoding.environ.get(parentenvvar)
1349 try:
1349 try:
1350 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1350 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1351 acquirefn=acquirefn, desc=desc,
1351 acquirefn=acquirefn, desc=desc,
1352 inheritchecker=inheritchecker,
1352 inheritchecker=inheritchecker,
1353 parentlock=parentlock)
1353 parentlock=parentlock)
1354 except error.LockHeld as inst:
1354 except error.LockHeld as inst:
1355 if not wait:
1355 if not wait:
1356 raise
1356 raise
1357 # show more details for new-style locks
1357 # show more details for new-style locks
1358 if ':' in inst.locker:
1358 if ':' in inst.locker:
1359 host, pid = inst.locker.split(":", 1)
1359 host, pid = inst.locker.split(":", 1)
1360 self.ui.warn(
1360 self.ui.warn(
1361 _("waiting for lock on %s held by process %r "
1361 _("waiting for lock on %s held by process %r "
1362 "on host %r\n") % (desc, pid, host))
1362 "on host %r\n") % (desc, pid, host))
1363 else:
1363 else:
1364 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1364 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1365 (desc, inst.locker))
1365 (desc, inst.locker))
1366 # default to 600 seconds timeout
1366 # default to 600 seconds timeout
1367 l = lockmod.lock(vfs, lockname,
1367 l = lockmod.lock(vfs, lockname,
1368 int(self.ui.config("ui", "timeout", "600")),
1368 int(self.ui.config("ui", "timeout", "600")),
1369 releasefn=releasefn, acquirefn=acquirefn,
1369 releasefn=releasefn, acquirefn=acquirefn,
1370 desc=desc)
1370 desc=desc)
1371 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1371 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1372 return l
1372 return l
1373
1373
1374 def _afterlock(self, callback):
1374 def _afterlock(self, callback):
1375 """add a callback to be run when the repository is fully unlocked
1375 """add a callback to be run when the repository is fully unlocked
1376
1376
1377 The callback will be executed when the outermost lock is released
1377 The callback will be executed when the outermost lock is released
1378 (with wlock being higher level than 'lock')."""
1378 (with wlock being higher level than 'lock')."""
1379 for ref in (self._wlockref, self._lockref):
1379 for ref in (self._wlockref, self._lockref):
1380 l = ref and ref()
1380 l = ref and ref()
1381 if l and l.held:
1381 if l and l.held:
1382 l.postrelease.append(callback)
1382 l.postrelease.append(callback)
1383 break
1383 break
1384 else: # no lock have been found.
1384 else: # no lock have been found.
1385 callback()
1385 callback()
1386
1386
1387 def lock(self, wait=True):
1387 def lock(self, wait=True):
1388 '''Lock the repository store (.hg/store) and return a weak reference
1388 '''Lock the repository store (.hg/store) and return a weak reference
1389 to the lock. Use this before modifying the store (e.g. committing or
1389 to the lock. Use this before modifying the store (e.g. committing or
1390 stripping). If you are opening a transaction, get a lock as well.)
1390 stripping). If you are opening a transaction, get a lock as well.)
1391
1391
1392 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1392 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1393 'wlock' first to avoid a dead-lock hazard.'''
1393 'wlock' first to avoid a dead-lock hazard.'''
1394 l = self._currentlock(self._lockref)
1394 l = self._currentlock(self._lockref)
1395 if l is not None:
1395 if l is not None:
1396 l.lock()
1396 l.lock()
1397 return l
1397 return l
1398
1398
1399 l = self._lock(self.svfs, "lock", wait, None,
1399 l = self._lock(self.svfs, "lock", wait, None,
1400 self.invalidate, _('repository %s') % self.origroot)
1400 self.invalidate, _('repository %s') % self.origroot)
1401 self._lockref = weakref.ref(l)
1401 self._lockref = weakref.ref(l)
1402 return l
1402 return l
1403
1403
1404 def _wlockchecktransaction(self):
1404 def _wlockchecktransaction(self):
1405 if self.currenttransaction() is not None:
1405 if self.currenttransaction() is not None:
1406 raise error.LockInheritanceContractViolation(
1406 raise error.LockInheritanceContractViolation(
1407 'wlock cannot be inherited in the middle of a transaction')
1407 'wlock cannot be inherited in the middle of a transaction')
1408
1408
1409 def wlock(self, wait=True):
1409 def wlock(self, wait=True):
1410 '''Lock the non-store parts of the repository (everything under
1410 '''Lock the non-store parts of the repository (everything under
1411 .hg except .hg/store) and return a weak reference to the lock.
1411 .hg except .hg/store) and return a weak reference to the lock.
1412
1412
1413 Use this before modifying files in .hg.
1413 Use this before modifying files in .hg.
1414
1414
1415 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1415 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1416 'wlock' first to avoid a dead-lock hazard.'''
1416 'wlock' first to avoid a dead-lock hazard.'''
1417 l = self._wlockref and self._wlockref()
1417 l = self._wlockref and self._wlockref()
1418 if l is not None and l.held:
1418 if l is not None and l.held:
1419 l.lock()
1419 l.lock()
1420 return l
1420 return l
1421
1421
1422 # We do not need to check for non-waiting lock acquisition. Such
1422 # We do not need to check for non-waiting lock acquisition. Such
1423 # acquisition would not cause dead-lock as they would just fail.
1423 # acquisition would not cause dead-lock as they would just fail.
1424 if wait and (self.ui.configbool('devel', 'all-warnings')
1424 if wait and (self.ui.configbool('devel', 'all-warnings')
1425 or self.ui.configbool('devel', 'check-locks')):
1425 or self.ui.configbool('devel', 'check-locks')):
1426 if self._currentlock(self._lockref) is not None:
1426 if self._currentlock(self._lockref) is not None:
1427 self.ui.develwarn('"wlock" acquired after "lock"')
1427 self.ui.develwarn('"wlock" acquired after "lock"')
1428
1428
1429 def unlock():
1429 def unlock():
1430 if self.dirstate.pendingparentchange():
1430 if self.dirstate.pendingparentchange():
1431 self.dirstate.invalidate()
1431 self.dirstate.invalidate()
1432 else:
1432 else:
1433 self.dirstate.write(None)
1433 self.dirstate.write(None)
1434
1434
1435 self._filecache['dirstate'].refresh()
1435 self._filecache['dirstate'].refresh()
1436
1436
1437 l = self._lock(self.vfs, "wlock", wait, unlock,
1437 l = self._lock(self.vfs, "wlock", wait, unlock,
1438 self.invalidatedirstate, _('working directory of %s') %
1438 self.invalidatedirstate, _('working directory of %s') %
1439 self.origroot,
1439 self.origroot,
1440 inheritchecker=self._wlockchecktransaction,
1440 inheritchecker=self._wlockchecktransaction,
1441 parentenvvar='HG_WLOCK_LOCKER')
1441 parentenvvar='HG_WLOCK_LOCKER')
1442 self._wlockref = weakref.ref(l)
1442 self._wlockref = weakref.ref(l)
1443 return l
1443 return l
1444
1444
1445 def _currentlock(self, lockref):
1445 def _currentlock(self, lockref):
1446 """Returns the lock if it's held, or None if it's not."""
1446 """Returns the lock if it's held, or None if it's not."""
1447 if lockref is None:
1447 if lockref is None:
1448 return None
1448 return None
1449 l = lockref()
1449 l = lockref()
1450 if l is None or not l.held:
1450 if l is None or not l.held:
1451 return None
1451 return None
1452 return l
1452 return l
1453
1453
1454 def currentwlock(self):
1454 def currentwlock(self):
1455 """Returns the wlock if it's held, or None if it's not."""
1455 """Returns the wlock if it's held, or None if it's not."""
1456 return self._currentlock(self._wlockref)
1456 return self._currentlock(self._wlockref)
1457
1457
1458 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1458 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1459 """
1459 """
1460 commit an individual file as part of a larger transaction
1460 commit an individual file as part of a larger transaction
1461 """
1461 """
1462
1462
1463 fname = fctx.path()
1463 fname = fctx.path()
1464 fparent1 = manifest1.get(fname, nullid)
1464 fparent1 = manifest1.get(fname, nullid)
1465 fparent2 = manifest2.get(fname, nullid)
1465 fparent2 = manifest2.get(fname, nullid)
1466 if isinstance(fctx, context.filectx):
1466 if isinstance(fctx, context.filectx):
1467 node = fctx.filenode()
1467 node = fctx.filenode()
1468 if node in [fparent1, fparent2]:
1468 if node in [fparent1, fparent2]:
1469 self.ui.debug('reusing %s filelog entry\n' % fname)
1469 self.ui.debug('reusing %s filelog entry\n' % fname)
1470 if manifest1.flags(fname) != fctx.flags():
1470 if manifest1.flags(fname) != fctx.flags():
1471 changelist.append(fname)
1471 changelist.append(fname)
1472 return node
1472 return node
1473
1473
1474 flog = self.file(fname)
1474 flog = self.file(fname)
1475 meta = {}
1475 meta = {}
1476 copy = fctx.renamed()
1476 copy = fctx.renamed()
1477 if copy and copy[0] != fname:
1477 if copy and copy[0] != fname:
1478 # Mark the new revision of this file as a copy of another
1478 # Mark the new revision of this file as a copy of another
1479 # file. This copy data will effectively act as a parent
1479 # file. This copy data will effectively act as a parent
1480 # of this new revision. If this is a merge, the first
1480 # of this new revision. If this is a merge, the first
1481 # parent will be the nullid (meaning "look up the copy data")
1481 # parent will be the nullid (meaning "look up the copy data")
1482 # and the second one will be the other parent. For example:
1482 # and the second one will be the other parent. For example:
1483 #
1483 #
1484 # 0 --- 1 --- 3 rev1 changes file foo
1484 # 0 --- 1 --- 3 rev1 changes file foo
1485 # \ / rev2 renames foo to bar and changes it
1485 # \ / rev2 renames foo to bar and changes it
1486 # \- 2 -/ rev3 should have bar with all changes and
1486 # \- 2 -/ rev3 should have bar with all changes and
1487 # should record that bar descends from
1487 # should record that bar descends from
1488 # bar in rev2 and foo in rev1
1488 # bar in rev2 and foo in rev1
1489 #
1489 #
1490 # this allows this merge to succeed:
1490 # this allows this merge to succeed:
1491 #
1491 #
1492 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1492 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1493 # \ / merging rev3 and rev4 should use bar@rev2
1493 # \ / merging rev3 and rev4 should use bar@rev2
1494 # \- 2 --- 4 as the merge base
1494 # \- 2 --- 4 as the merge base
1495 #
1495 #
1496
1496
1497 cfname = copy[0]
1497 cfname = copy[0]
1498 crev = manifest1.get(cfname)
1498 crev = manifest1.get(cfname)
1499 newfparent = fparent2
1499 newfparent = fparent2
1500
1500
1501 if manifest2: # branch merge
1501 if manifest2: # branch merge
1502 if fparent2 == nullid or crev is None: # copied on remote side
1502 if fparent2 == nullid or crev is None: # copied on remote side
1503 if cfname in manifest2:
1503 if cfname in manifest2:
1504 crev = manifest2[cfname]
1504 crev = manifest2[cfname]
1505 newfparent = fparent1
1505 newfparent = fparent1
1506
1506
1507 # Here, we used to search backwards through history to try to find
1507 # Here, we used to search backwards through history to try to find
1508 # where the file copy came from if the source of a copy was not in
1508 # where the file copy came from if the source of a copy was not in
1509 # the parent directory. However, this doesn't actually make sense to
1509 # the parent directory. However, this doesn't actually make sense to
1510 # do (what does a copy from something not in your working copy even
1510 # do (what does a copy from something not in your working copy even
1511 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1511 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1512 # the user that copy information was dropped, so if they didn't
1512 # the user that copy information was dropped, so if they didn't
1513 # expect this outcome it can be fixed, but this is the correct
1513 # expect this outcome it can be fixed, but this is the correct
1514 # behavior in this circumstance.
1514 # behavior in this circumstance.
1515
1515
1516 if crev:
1516 if crev:
1517 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1517 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1518 meta["copy"] = cfname
1518 meta["copy"] = cfname
1519 meta["copyrev"] = hex(crev)
1519 meta["copyrev"] = hex(crev)
1520 fparent1, fparent2 = nullid, newfparent
1520 fparent1, fparent2 = nullid, newfparent
1521 else:
1521 else:
1522 self.ui.warn(_("warning: can't find ancestor for '%s' "
1522 self.ui.warn(_("warning: can't find ancestor for '%s' "
1523 "copied from '%s'!\n") % (fname, cfname))
1523 "copied from '%s'!\n") % (fname, cfname))
1524
1524
1525 elif fparent1 == nullid:
1525 elif fparent1 == nullid:
1526 fparent1, fparent2 = fparent2, nullid
1526 fparent1, fparent2 = fparent2, nullid
1527 elif fparent2 != nullid:
1527 elif fparent2 != nullid:
1528 # is one parent an ancestor of the other?
1528 # is one parent an ancestor of the other?
1529 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1529 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1530 if fparent1 in fparentancestors:
1530 if fparent1 in fparentancestors:
1531 fparent1, fparent2 = fparent2, nullid
1531 fparent1, fparent2 = fparent2, nullid
1532 elif fparent2 in fparentancestors:
1532 elif fparent2 in fparentancestors:
1533 fparent2 = nullid
1533 fparent2 = nullid
1534
1534
1535 # is the file changed?
1535 # is the file changed?
1536 text = fctx.data()
1536 text = fctx.data()
1537 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1537 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1538 changelist.append(fname)
1538 changelist.append(fname)
1539 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1539 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1540 # are just the flags changed during merge?
1540 # are just the flags changed during merge?
1541 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1541 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1542 changelist.append(fname)
1542 changelist.append(fname)
1543
1543
1544 return fparent1
1544 return fparent1
1545
1545
1546 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1546 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1547 """check for commit arguments that aren't committable"""
1547 """check for commit arguments that aren't committable"""
1548 if match.isexact() or match.prefix():
1548 if match.isexact() or match.prefix():
1549 matched = set(status.modified + status.added + status.removed)
1549 matched = set(status.modified + status.added + status.removed)
1550
1550
1551 for f in match.files():
1551 for f in match.files():
1552 f = self.dirstate.normalize(f)
1552 f = self.dirstate.normalize(f)
1553 if f == '.' or f in matched or f in wctx.substate:
1553 if f == '.' or f in matched or f in wctx.substate:
1554 continue
1554 continue
1555 if f in status.deleted:
1555 if f in status.deleted:
1556 fail(f, _('file not found!'))
1556 fail(f, _('file not found!'))
1557 if f in vdirs: # visited directory
1557 if f in vdirs: # visited directory
1558 d = f + '/'
1558 d = f + '/'
1559 for mf in matched:
1559 for mf in matched:
1560 if mf.startswith(d):
1560 if mf.startswith(d):
1561 break
1561 break
1562 else:
1562 else:
1563 fail(f, _("no match under directory!"))
1563 fail(f, _("no match under directory!"))
1564 elif f not in self.dirstate:
1564 elif f not in self.dirstate:
1565 fail(f, _("file not tracked!"))
1565 fail(f, _("file not tracked!"))
1566
1566
1567 @unfilteredmethod
1567 @unfilteredmethod
1568 def commit(self, text="", user=None, date=None, match=None, force=False,
1568 def commit(self, text="", user=None, date=None, match=None, force=False,
1569 editor=False, extra=None):
1569 editor=False, extra=None):
1570 """Add a new revision to current repository.
1570 """Add a new revision to current repository.
1571
1571
1572 Revision information is gathered from the working directory,
1572 Revision information is gathered from the working directory,
1573 match can be used to filter the committed files. If editor is
1573 match can be used to filter the committed files. If editor is
1574 supplied, it is called to get a commit message.
1574 supplied, it is called to get a commit message.
1575 """
1575 """
1576 if extra is None:
1576 if extra is None:
1577 extra = {}
1577 extra = {}
1578
1578
1579 def fail(f, msg):
1579 def fail(f, msg):
1580 raise error.Abort('%s: %s' % (f, msg))
1580 raise error.Abort('%s: %s' % (f, msg))
1581
1581
1582 if not match:
1582 if not match:
1583 match = matchmod.always(self.root, '')
1583 match = matchmod.always(self.root, '')
1584
1584
1585 if not force:
1585 if not force:
1586 vdirs = []
1586 vdirs = []
1587 match.explicitdir = vdirs.append
1587 match.explicitdir = vdirs.append
1588 match.bad = fail
1588 match.bad = fail
1589
1589
1590 wlock = lock = tr = None
1590 wlock = lock = tr = None
1591 try:
1591 try:
1592 wlock = self.wlock()
1592 wlock = self.wlock()
1593 lock = self.lock() # for recent changelog (see issue4368)
1593 lock = self.lock() # for recent changelog (see issue4368)
1594
1594
1595 wctx = self[None]
1595 wctx = self[None]
1596 merge = len(wctx.parents()) > 1
1596 merge = len(wctx.parents()) > 1
1597
1597
1598 if not force and merge and match.ispartial():
1598 if not force and merge and match.ispartial():
1599 raise error.Abort(_('cannot partially commit a merge '
1599 raise error.Abort(_('cannot partially commit a merge '
1600 '(do not specify files or patterns)'))
1600 '(do not specify files or patterns)'))
1601
1601
1602 status = self.status(match=match, clean=force)
1602 status = self.status(match=match, clean=force)
1603 if force:
1603 if force:
1604 status.modified.extend(status.clean) # mq may commit clean files
1604 status.modified.extend(status.clean) # mq may commit clean files
1605
1605
1606 # check subrepos
1606 # check subrepos
1607 subs = []
1607 subs = []
1608 commitsubs = set()
1608 commitsubs = set()
1609 newstate = wctx.substate.copy()
1609 newstate = wctx.substate.copy()
1610 # only manage subrepos and .hgsubstate if .hgsub is present
1610 # only manage subrepos and .hgsubstate if .hgsub is present
1611 if '.hgsub' in wctx:
1611 if '.hgsub' in wctx:
1612 # we'll decide whether to track this ourselves, thanks
1612 # we'll decide whether to track this ourselves, thanks
1613 for c in status.modified, status.added, status.removed:
1613 for c in status.modified, status.added, status.removed:
1614 if '.hgsubstate' in c:
1614 if '.hgsubstate' in c:
1615 c.remove('.hgsubstate')
1615 c.remove('.hgsubstate')
1616
1616
1617 # compare current state to last committed state
1617 # compare current state to last committed state
1618 # build new substate based on last committed state
1618 # build new substate based on last committed state
1619 oldstate = wctx.p1().substate
1619 oldstate = wctx.p1().substate
1620 for s in sorted(newstate.keys()):
1620 for s in sorted(newstate.keys()):
1621 if not match(s):
1621 if not match(s):
1622 # ignore working copy, use old state if present
1622 # ignore working copy, use old state if present
1623 if s in oldstate:
1623 if s in oldstate:
1624 newstate[s] = oldstate[s]
1624 newstate[s] = oldstate[s]
1625 continue
1625 continue
1626 if not force:
1626 if not force:
1627 raise error.Abort(
1627 raise error.Abort(
1628 _("commit with new subrepo %s excluded") % s)
1628 _("commit with new subrepo %s excluded") % s)
1629 dirtyreason = wctx.sub(s).dirtyreason(True)
1629 dirtyreason = wctx.sub(s).dirtyreason(True)
1630 if dirtyreason:
1630 if dirtyreason:
1631 if not self.ui.configbool('ui', 'commitsubrepos'):
1631 if not self.ui.configbool('ui', 'commitsubrepos'):
1632 raise error.Abort(dirtyreason,
1632 raise error.Abort(dirtyreason,
1633 hint=_("use --subrepos for recursive commit"))
1633 hint=_("use --subrepos for recursive commit"))
1634 subs.append(s)
1634 subs.append(s)
1635 commitsubs.add(s)
1635 commitsubs.add(s)
1636 else:
1636 else:
1637 bs = wctx.sub(s).basestate()
1637 bs = wctx.sub(s).basestate()
1638 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1638 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1639 if oldstate.get(s, (None, None, None))[1] != bs:
1639 if oldstate.get(s, (None, None, None))[1] != bs:
1640 subs.append(s)
1640 subs.append(s)
1641
1641
1642 # check for removed subrepos
1642 # check for removed subrepos
1643 for p in wctx.parents():
1643 for p in wctx.parents():
1644 r = [s for s in p.substate if s not in newstate]
1644 r = [s for s in p.substate if s not in newstate]
1645 subs += [s for s in r if match(s)]
1645 subs += [s for s in r if match(s)]
1646 if subs:
1646 if subs:
1647 if (not match('.hgsub') and
1647 if (not match('.hgsub') and
1648 '.hgsub' in (wctx.modified() + wctx.added())):
1648 '.hgsub' in (wctx.modified() + wctx.added())):
1649 raise error.Abort(
1649 raise error.Abort(
1650 _("can't commit subrepos without .hgsub"))
1650 _("can't commit subrepos without .hgsub"))
1651 status.modified.insert(0, '.hgsubstate')
1651 status.modified.insert(0, '.hgsubstate')
1652
1652
1653 elif '.hgsub' in status.removed:
1653 elif '.hgsub' in status.removed:
1654 # clean up .hgsubstate when .hgsub is removed
1654 # clean up .hgsubstate when .hgsub is removed
1655 if ('.hgsubstate' in wctx and
1655 if ('.hgsubstate' in wctx and
1656 '.hgsubstate' not in (status.modified + status.added +
1656 '.hgsubstate' not in (status.modified + status.added +
1657 status.removed)):
1657 status.removed)):
1658 status.removed.insert(0, '.hgsubstate')
1658 status.removed.insert(0, '.hgsubstate')
1659
1659
1660 # make sure all explicit patterns are matched
1660 # make sure all explicit patterns are matched
1661 if not force:
1661 if not force:
1662 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1662 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1663
1663
1664 cctx = context.workingcommitctx(self, status,
1664 cctx = context.workingcommitctx(self, status,
1665 text, user, date, extra)
1665 text, user, date, extra)
1666
1666
1667 # internal config: ui.allowemptycommit
1667 # internal config: ui.allowemptycommit
1668 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1668 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1669 or extra.get('close') or merge or cctx.files()
1669 or extra.get('close') or merge or cctx.files()
1670 or self.ui.configbool('ui', 'allowemptycommit'))
1670 or self.ui.configbool('ui', 'allowemptycommit'))
1671 if not allowemptycommit:
1671 if not allowemptycommit:
1672 return None
1672 return None
1673
1673
1674 if merge and cctx.deleted():
1674 if merge and cctx.deleted():
1675 raise error.Abort(_("cannot commit merge with missing files"))
1675 raise error.Abort(_("cannot commit merge with missing files"))
1676
1676
1677 ms = mergemod.mergestate.read(self)
1677 ms = mergemod.mergestate.read(self)
1678 mergeutil.checkunresolved(ms)
1678 mergeutil.checkunresolved(ms)
1679
1679
1680 if editor:
1680 if editor:
1681 cctx._text = editor(self, cctx, subs)
1681 cctx._text = editor(self, cctx, subs)
1682 edited = (text != cctx._text)
1682 edited = (text != cctx._text)
1683
1683
1684 # Save commit message in case this transaction gets rolled back
1684 # Save commit message in case this transaction gets rolled back
1685 # (e.g. by a pretxncommit hook). Leave the content alone on
1685 # (e.g. by a pretxncommit hook). Leave the content alone on
1686 # the assumption that the user will use the same editor again.
1686 # the assumption that the user will use the same editor again.
1687 msgfn = self.savecommitmessage(cctx._text)
1687 msgfn = self.savecommitmessage(cctx._text)
1688
1688
1689 # commit subs and write new state
1689 # commit subs and write new state
1690 if subs:
1690 if subs:
1691 for s in sorted(commitsubs):
1691 for s in sorted(commitsubs):
1692 sub = wctx.sub(s)
1692 sub = wctx.sub(s)
1693 self.ui.status(_('committing subrepository %s\n') %
1693 self.ui.status(_('committing subrepository %s\n') %
1694 subrepo.subrelpath(sub))
1694 subrepo.subrelpath(sub))
1695 sr = sub.commit(cctx._text, user, date)
1695 sr = sub.commit(cctx._text, user, date)
1696 newstate[s] = (newstate[s][0], sr)
1696 newstate[s] = (newstate[s][0], sr)
1697 subrepo.writestate(self, newstate)
1697 subrepo.writestate(self, newstate)
1698
1698
1699 p1, p2 = self.dirstate.parents()
1699 p1, p2 = self.dirstate.parents()
1700 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1700 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1701 try:
1701 try:
1702 self.hook("precommit", throw=True, parent1=hookp1,
1702 self.hook("precommit", throw=True, parent1=hookp1,
1703 parent2=hookp2)
1703 parent2=hookp2)
1704 tr = self.transaction('commit')
1704 tr = self.transaction('commit')
1705 ret = self.commitctx(cctx, True)
1705 ret = self.commitctx(cctx, True)
1706 except: # re-raises
1706 except: # re-raises
1707 if edited:
1707 if edited:
1708 self.ui.write(
1708 self.ui.write(
1709 _('note: commit message saved in %s\n') % msgfn)
1709 _('note: commit message saved in %s\n') % msgfn)
1710 raise
1710 raise
1711 # update bookmarks, dirstate and mergestate
1711 # update bookmarks, dirstate and mergestate
1712 bookmarks.update(self, [p1, p2], ret)
1712 bookmarks.update(self, [p1, p2], ret)
1713 cctx.markcommitted(ret)
1713 cctx.markcommitted(ret)
1714 ms.reset()
1714 ms.reset()
1715 tr.close()
1715 tr.close()
1716
1716
1717 finally:
1717 finally:
1718 lockmod.release(tr, lock, wlock)
1718 lockmod.release(tr, lock, wlock)
1719
1719
1720 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1720 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1721 # hack for command that use a temporary commit (eg: histedit)
1721 # hack for command that use a temporary commit (eg: histedit)
1722 # temporary commit got stripped before hook release
1722 # temporary commit got stripped before hook release
1723 if self.changelog.hasnode(ret):
1723 if self.changelog.hasnode(ret):
1724 self.hook("commit", node=node, parent1=parent1,
1724 self.hook("commit", node=node, parent1=parent1,
1725 parent2=parent2)
1725 parent2=parent2)
1726 self._afterlock(commithook)
1726 self._afterlock(commithook)
1727 return ret
1727 return ret
1728
1728
1729 @unfilteredmethod
1729 @unfilteredmethod
1730 def commitctx(self, ctx, error=False):
1730 def commitctx(self, ctx, error=False):
1731 """Add a new revision to current repository.
1731 """Add a new revision to current repository.
1732 Revision information is passed via the context argument.
1732 Revision information is passed via the context argument.
1733 """
1733 """
1734
1734
1735 tr = None
1735 tr = None
1736 p1, p2 = ctx.p1(), ctx.p2()
1736 p1, p2 = ctx.p1(), ctx.p2()
1737 user = ctx.user()
1737 user = ctx.user()
1738
1738
1739 lock = self.lock()
1739 lock = self.lock()
1740 try:
1740 try:
1741 tr = self.transaction("commit")
1741 tr = self.transaction("commit")
1742 trp = weakref.proxy(tr)
1742 trp = weakref.proxy(tr)
1743
1743
1744 if ctx.manifestnode():
1744 if ctx.manifestnode():
1745 # reuse an existing manifest revision
1745 # reuse an existing manifest revision
1746 mn = ctx.manifestnode()
1746 mn = ctx.manifestnode()
1747 files = ctx.files()
1747 files = ctx.files()
1748 elif ctx.files():
1748 elif ctx.files():
1749 m1ctx = p1.manifestctx()
1749 m1ctx = p1.manifestctx()
1750 m2ctx = p2.manifestctx()
1750 m2ctx = p2.manifestctx()
1751 mctx = m1ctx.copy()
1751 mctx = m1ctx.copy()
1752
1752
1753 m = mctx.read()
1753 m = mctx.read()
1754 m1 = m1ctx.read()
1754 m1 = m1ctx.read()
1755 m2 = m2ctx.read()
1755 m2 = m2ctx.read()
1756
1756
1757 # check in files
1757 # check in files
1758 added = []
1758 added = []
1759 changed = []
1759 changed = []
1760 removed = list(ctx.removed())
1760 removed = list(ctx.removed())
1761 linkrev = len(self)
1761 linkrev = len(self)
1762 self.ui.note(_("committing files:\n"))
1762 self.ui.note(_("committing files:\n"))
1763 for f in sorted(ctx.modified() + ctx.added()):
1763 for f in sorted(ctx.modified() + ctx.added()):
1764 self.ui.note(f + "\n")
1764 self.ui.note(f + "\n")
1765 try:
1765 try:
1766 fctx = ctx[f]
1766 fctx = ctx[f]
1767 if fctx is None:
1767 if fctx is None:
1768 removed.append(f)
1768 removed.append(f)
1769 else:
1769 else:
1770 added.append(f)
1770 added.append(f)
1771 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1771 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1772 trp, changed)
1772 trp, changed)
1773 m.setflag(f, fctx.flags())
1773 m.setflag(f, fctx.flags())
1774 except OSError as inst:
1774 except OSError as inst:
1775 self.ui.warn(_("trouble committing %s!\n") % f)
1775 self.ui.warn(_("trouble committing %s!\n") % f)
1776 raise
1776 raise
1777 except IOError as inst:
1777 except IOError as inst:
1778 errcode = getattr(inst, 'errno', errno.ENOENT)
1778 errcode = getattr(inst, 'errno', errno.ENOENT)
1779 if error or errcode and errcode != errno.ENOENT:
1779 if error or errcode and errcode != errno.ENOENT:
1780 self.ui.warn(_("trouble committing %s!\n") % f)
1780 self.ui.warn(_("trouble committing %s!\n") % f)
1781 raise
1781 raise
1782
1782
1783 # update manifest
1783 # update manifest
1784 self.ui.note(_("committing manifest\n"))
1784 self.ui.note(_("committing manifest\n"))
1785 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1785 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1786 drop = [f for f in removed if f in m]
1786 drop = [f for f in removed if f in m]
1787 for f in drop:
1787 for f in drop:
1788 del m[f]
1788 del m[f]
1789 mn = mctx.write(trp, linkrev,
1789 mn = mctx.write(trp, linkrev,
1790 p1.manifestnode(), p2.manifestnode(),
1790 p1.manifestnode(), p2.manifestnode(),
1791 added, drop)
1791 added, drop)
1792 files = changed + removed
1792 files = changed + removed
1793 else:
1793 else:
1794 mn = p1.manifestnode()
1794 mn = p1.manifestnode()
1795 files = []
1795 files = []
1796
1796
1797 # update changelog
1797 # update changelog
1798 self.ui.note(_("committing changelog\n"))
1798 self.ui.note(_("committing changelog\n"))
1799 self.changelog.delayupdate(tr)
1799 self.changelog.delayupdate(tr)
1800 n = self.changelog.add(mn, files, ctx.description(),
1800 n = self.changelog.add(mn, files, ctx.description(),
1801 trp, p1.node(), p2.node(),
1801 trp, p1.node(), p2.node(),
1802 user, ctx.date(), ctx.extra().copy())
1802 user, ctx.date(), ctx.extra().copy())
1803 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1803 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1804 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1804 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1805 parent2=xp2)
1805 parent2=xp2)
1806 # set the new commit is proper phase
1806 # set the new commit is proper phase
1807 targetphase = subrepo.newcommitphase(self.ui, ctx)
1807 targetphase = subrepo.newcommitphase(self.ui, ctx)
1808 if targetphase:
1808 if targetphase:
1809 # retract boundary do not alter parent changeset.
1809 # retract boundary do not alter parent changeset.
1810 # if a parent have higher the resulting phase will
1810 # if a parent have higher the resulting phase will
1811 # be compliant anyway
1811 # be compliant anyway
1812 #
1812 #
1813 # if minimal phase was 0 we don't need to retract anything
1813 # if minimal phase was 0 we don't need to retract anything
1814 phases.retractboundary(self, tr, targetphase, [n])
1814 phases.retractboundary(self, tr, targetphase, [n])
1815 tr.close()
1815 tr.close()
1816 branchmap.updatecache(self.filtered('served'))
1816 branchmap.updatecache(self.filtered('served'))
1817 return n
1817 return n
1818 finally:
1818 finally:
1819 if tr:
1819 if tr:
1820 tr.release()
1820 tr.release()
1821 lock.release()
1821 lock.release()
1822
1822
1823 @unfilteredmethod
1823 @unfilteredmethod
1824 def destroying(self):
1824 def destroying(self):
1825 '''Inform the repository that nodes are about to be destroyed.
1825 '''Inform the repository that nodes are about to be destroyed.
1826 Intended for use by strip and rollback, so there's a common
1826 Intended for use by strip and rollback, so there's a common
1827 place for anything that has to be done before destroying history.
1827 place for anything that has to be done before destroying history.
1828
1828
1829 This is mostly useful for saving state that is in memory and waiting
1829 This is mostly useful for saving state that is in memory and waiting
1830 to be flushed when the current lock is released. Because a call to
1830 to be flushed when the current lock is released. Because a call to
1831 destroyed is imminent, the repo will be invalidated causing those
1831 destroyed is imminent, the repo will be invalidated causing those
1832 changes to stay in memory (waiting for the next unlock), or vanish
1832 changes to stay in memory (waiting for the next unlock), or vanish
1833 completely.
1833 completely.
1834 '''
1834 '''
1835 # When using the same lock to commit and strip, the phasecache is left
1835 # When using the same lock to commit and strip, the phasecache is left
1836 # dirty after committing. Then when we strip, the repo is invalidated,
1836 # dirty after committing. Then when we strip, the repo is invalidated,
1837 # causing those changes to disappear.
1837 # causing those changes to disappear.
1838 if '_phasecache' in vars(self):
1838 if '_phasecache' in vars(self):
1839 self._phasecache.write()
1839 self._phasecache.write()
1840
1840
1841 @unfilteredmethod
1841 @unfilteredmethod
1842 def destroyed(self):
1842 def destroyed(self):
1843 '''Inform the repository that nodes have been destroyed.
1843 '''Inform the repository that nodes have been destroyed.
1844 Intended for use by strip and rollback, so there's a common
1844 Intended for use by strip and rollback, so there's a common
1845 place for anything that has to be done after destroying history.
1845 place for anything that has to be done after destroying history.
1846 '''
1846 '''
1847 # When one tries to:
1847 # When one tries to:
1848 # 1) destroy nodes thus calling this method (e.g. strip)
1848 # 1) destroy nodes thus calling this method (e.g. strip)
1849 # 2) use phasecache somewhere (e.g. commit)
1849 # 2) use phasecache somewhere (e.g. commit)
1850 #
1850 #
1851 # then 2) will fail because the phasecache contains nodes that were
1851 # then 2) will fail because the phasecache contains nodes that were
1852 # removed. We can either remove phasecache from the filecache,
1852 # removed. We can either remove phasecache from the filecache,
1853 # causing it to reload next time it is accessed, or simply filter
1853 # causing it to reload next time it is accessed, or simply filter
1854 # the removed nodes now and write the updated cache.
1854 # the removed nodes now and write the updated cache.
1855 self._phasecache.filterunknown(self)
1855 self._phasecache.filterunknown(self)
1856 self._phasecache.write()
1856 self._phasecache.write()
1857
1857
1858 # update the 'served' branch cache to help read only server process
1858 # update the 'served' branch cache to help read only server process
1859 # Thanks to branchcache collaboration this is done from the nearest
1859 # Thanks to branchcache collaboration this is done from the nearest
1860 # filtered subset and it is expected to be fast.
1860 # filtered subset and it is expected to be fast.
1861 branchmap.updatecache(self.filtered('served'))
1861 branchmap.updatecache(self.filtered('served'))
1862
1862
1863 # Ensure the persistent tag cache is updated. Doing it now
1863 # Ensure the persistent tag cache is updated. Doing it now
1864 # means that the tag cache only has to worry about destroyed
1864 # means that the tag cache only has to worry about destroyed
1865 # heads immediately after a strip/rollback. That in turn
1865 # heads immediately after a strip/rollback. That in turn
1866 # guarantees that "cachetip == currenttip" (comparing both rev
1866 # guarantees that "cachetip == currenttip" (comparing both rev
1867 # and node) always means no nodes have been added or destroyed.
1867 # and node) always means no nodes have been added or destroyed.
1868
1868
1869 # XXX this is suboptimal when qrefresh'ing: we strip the current
1869 # XXX this is suboptimal when qrefresh'ing: we strip the current
1870 # head, refresh the tag cache, then immediately add a new head.
1870 # head, refresh the tag cache, then immediately add a new head.
1871 # But I think doing it this way is necessary for the "instant
1871 # But I think doing it this way is necessary for the "instant
1872 # tag cache retrieval" case to work.
1872 # tag cache retrieval" case to work.
1873 self.invalidate()
1873 self.invalidate()
1874
1874
1875 def walk(self, match, node=None):
1875 def walk(self, match, node=None):
1876 '''
1876 '''
1877 walk recursively through the directory tree or a given
1877 walk recursively through the directory tree or a given
1878 changeset, finding all files matched by the match
1878 changeset, finding all files matched by the match
1879 function
1879 function
1880 '''
1880 '''
1881 return self[node].walk(match)
1881 return self[node].walk(match)
1882
1882
1883 def status(self, node1='.', node2=None, match=None,
1883 def status(self, node1='.', node2=None, match=None,
1884 ignored=False, clean=False, unknown=False,
1884 ignored=False, clean=False, unknown=False,
1885 listsubrepos=False):
1885 listsubrepos=False):
1886 '''a convenience method that calls node1.status(node2)'''
1886 '''a convenience method that calls node1.status(node2)'''
1887 return self[node1].status(node2, match, ignored, clean, unknown,
1887 return self[node1].status(node2, match, ignored, clean, unknown,
1888 listsubrepos)
1888 listsubrepos)
1889
1889
1890 def heads(self, start=None):
1890 def heads(self, start=None):
1891 if start is None:
1891 if start is None:
1892 cl = self.changelog
1892 cl = self.changelog
1893 headrevs = reversed(cl.headrevs())
1893 headrevs = reversed(cl.headrevs())
1894 return [cl.node(rev) for rev in headrevs]
1894 return [cl.node(rev) for rev in headrevs]
1895
1895
1896 heads = self.changelog.heads(start)
1896 heads = self.changelog.heads(start)
1897 # sort the output in rev descending order
1897 # sort the output in rev descending order
1898 return sorted(heads, key=self.changelog.rev, reverse=True)
1898 return sorted(heads, key=self.changelog.rev, reverse=True)
1899
1899
1900 def branchheads(self, branch=None, start=None, closed=False):
1900 def branchheads(self, branch=None, start=None, closed=False):
1901 '''return a (possibly filtered) list of heads for the given branch
1901 '''return a (possibly filtered) list of heads for the given branch
1902
1902
1903 Heads are returned in topological order, from newest to oldest.
1903 Heads are returned in topological order, from newest to oldest.
1904 If branch is None, use the dirstate branch.
1904 If branch is None, use the dirstate branch.
1905 If start is not None, return only heads reachable from start.
1905 If start is not None, return only heads reachable from start.
1906 If closed is True, return heads that are marked as closed as well.
1906 If closed is True, return heads that are marked as closed as well.
1907 '''
1907 '''
1908 if branch is None:
1908 if branch is None:
1909 branch = self[None].branch()
1909 branch = self[None].branch()
1910 branches = self.branchmap()
1910 branches = self.branchmap()
1911 if branch not in branches:
1911 if branch not in branches:
1912 return []
1912 return []
1913 # the cache returns heads ordered lowest to highest
1913 # the cache returns heads ordered lowest to highest
1914 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1914 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1915 if start is not None:
1915 if start is not None:
1916 # filter out the heads that cannot be reached from startrev
1916 # filter out the heads that cannot be reached from startrev
1917 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1917 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1918 bheads = [h for h in bheads if h in fbheads]
1918 bheads = [h for h in bheads if h in fbheads]
1919 return bheads
1919 return bheads
1920
1920
1921 def branches(self, nodes):
1921 def branches(self, nodes):
1922 if not nodes:
1922 if not nodes:
1923 nodes = [self.changelog.tip()]
1923 nodes = [self.changelog.tip()]
1924 b = []
1924 b = []
1925 for n in nodes:
1925 for n in nodes:
1926 t = n
1926 t = n
1927 while True:
1927 while True:
1928 p = self.changelog.parents(n)
1928 p = self.changelog.parents(n)
1929 if p[1] != nullid or p[0] == nullid:
1929 if p[1] != nullid or p[0] == nullid:
1930 b.append((t, n, p[0], p[1]))
1930 b.append((t, n, p[0], p[1]))
1931 break
1931 break
1932 n = p[0]
1932 n = p[0]
1933 return b
1933 return b
1934
1934
1935 def between(self, pairs):
1935 def between(self, pairs):
1936 r = []
1936 r = []
1937
1937
1938 for top, bottom in pairs:
1938 for top, bottom in pairs:
1939 n, l, i = top, [], 0
1939 n, l, i = top, [], 0
1940 f = 1
1940 f = 1
1941
1941
1942 while n != bottom and n != nullid:
1942 while n != bottom and n != nullid:
1943 p = self.changelog.parents(n)[0]
1943 p = self.changelog.parents(n)[0]
1944 if i == f:
1944 if i == f:
1945 l.append(n)
1945 l.append(n)
1946 f = f * 2
1946 f = f * 2
1947 n = p
1947 n = p
1948 i += 1
1948 i += 1
1949
1949
1950 r.append(l)
1950 r.append(l)
1951
1951
1952 return r
1952 return r
1953
1953
1954 def checkpush(self, pushop):
1954 def checkpush(self, pushop):
1955 """Extensions can override this function if additional checks have
1955 """Extensions can override this function if additional checks have
1956 to be performed before pushing, or call it if they override push
1956 to be performed before pushing, or call it if they override push
1957 command.
1957 command.
1958 """
1958 """
1959 pass
1959 pass
1960
1960
1961 @unfilteredpropertycache
1961 @unfilteredpropertycache
1962 def prepushoutgoinghooks(self):
1962 def prepushoutgoinghooks(self):
1963 """Return util.hooks consists of a pushop with repo, remote, outgoing
1963 """Return util.hooks consists of a pushop with repo, remote, outgoing
1964 methods, which are called before pushing changesets.
1964 methods, which are called before pushing changesets.
1965 """
1965 """
1966 return util.hooks()
1966 return util.hooks()
1967
1967
1968 def pushkey(self, namespace, key, old, new):
1968 def pushkey(self, namespace, key, old, new):
1969 try:
1969 try:
1970 tr = self.currenttransaction()
1970 tr = self.currenttransaction()
1971 hookargs = {}
1971 hookargs = {}
1972 if tr is not None:
1972 if tr is not None:
1973 hookargs.update(tr.hookargs)
1973 hookargs.update(tr.hookargs)
1974 hookargs['namespace'] = namespace
1974 hookargs['namespace'] = namespace
1975 hookargs['key'] = key
1975 hookargs['key'] = key
1976 hookargs['old'] = old
1976 hookargs['old'] = old
1977 hookargs['new'] = new
1977 hookargs['new'] = new
1978 self.hook('prepushkey', throw=True, **hookargs)
1978 self.hook('prepushkey', throw=True, **hookargs)
1979 except error.HookAbort as exc:
1979 except error.HookAbort as exc:
1980 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1980 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1981 if exc.hint:
1981 if exc.hint:
1982 self.ui.write_err(_("(%s)\n") % exc.hint)
1982 self.ui.write_err(_("(%s)\n") % exc.hint)
1983 return False
1983 return False
1984 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1984 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1985 ret = pushkey.push(self, namespace, key, old, new)
1985 ret = pushkey.push(self, namespace, key, old, new)
1986 def runhook():
1986 def runhook():
1987 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1987 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1988 ret=ret)
1988 ret=ret)
1989 self._afterlock(runhook)
1989 self._afterlock(runhook)
1990 return ret
1990 return ret
1991
1991
1992 def listkeys(self, namespace):
1992 def listkeys(self, namespace):
1993 self.hook('prelistkeys', throw=True, namespace=namespace)
1993 self.hook('prelistkeys', throw=True, namespace=namespace)
1994 self.ui.debug('listing keys for "%s"\n' % namespace)
1994 self.ui.debug('listing keys for "%s"\n' % namespace)
1995 values = pushkey.list(self, namespace)
1995 values = pushkey.list(self, namespace)
1996 self.hook('listkeys', namespace=namespace, values=values)
1996 self.hook('listkeys', namespace=namespace, values=values)
1997 return values
1997 return values
1998
1998
1999 def debugwireargs(self, one, two, three=None, four=None, five=None):
1999 def debugwireargs(self, one, two, three=None, four=None, five=None):
2000 '''used to test argument passing over the wire'''
2000 '''used to test argument passing over the wire'''
2001 return "%s %s %s %s %s" % (one, two, three, four, five)
2001 return "%s %s %s %s %s" % (one, two, three, four, five)
2002
2002
2003 def savecommitmessage(self, text):
2003 def savecommitmessage(self, text):
2004 fp = self.vfs('last-message.txt', 'wb')
2004 fp = self.vfs('last-message.txt', 'wb')
2005 try:
2005 try:
2006 fp.write(text)
2006 fp.write(text)
2007 finally:
2007 finally:
2008 fp.close()
2008 fp.close()
2009 return self.pathto(fp.name[len(self.root) + 1:])
2009 return self.pathto(fp.name[len(self.root) + 1:])
2010
2010
2011 # used to avoid circular references so destructors work
2011 # used to avoid circular references so destructors work
2012 def aftertrans(files):
2012 def aftertrans(files):
2013 renamefiles = [tuple(t) for t in files]
2013 renamefiles = [tuple(t) for t in files]
2014 def a():
2014 def a():
2015 for vfs, src, dest in renamefiles:
2015 for vfs, src, dest in renamefiles:
2016 try:
2016 try:
2017 # if src and dest refer to a same file, vfs.rename is a no-op,
2017 # if src and dest refer to a same file, vfs.rename is a no-op,
2018 # leaving both src and dest on disk. delete dest to make sure
2018 # leaving both src and dest on disk. delete dest to make sure
2019 # the rename couldn't be such a no-op.
2019 # the rename couldn't be such a no-op.
2020 vfs.unlink(dest)
2020 vfs.unlink(dest)
2021 except OSError as ex:
2021 except OSError as ex:
2022 if ex.errno != errno.ENOENT:
2022 if ex.errno != errno.ENOENT:
2023 raise
2023 raise
2024 try:
2024 try:
2025 vfs.rename(src, dest)
2025 vfs.rename(src, dest)
2026 except OSError: # journal file does not yet exist
2026 except OSError: # journal file does not yet exist
2027 pass
2027 pass
2028 return a
2028 return a
2029
2029
2030 def undoname(fn):
2030 def undoname(fn):
2031 base, name = os.path.split(fn)
2031 base, name = os.path.split(fn)
2032 assert name.startswith('journal')
2032 assert name.startswith('journal')
2033 return os.path.join(base, name.replace('journal', 'undo', 1))
2033 return os.path.join(base, name.replace('journal', 'undo', 1))
2034
2034
2035 def instance(ui, path, create):
2035 def instance(ui, path, create):
2036 return localrepository(ui, util.urllocalpath(path), create)
2036 return localrepository(ui, util.urllocalpath(path), create)
2037
2037
2038 def islocal(path):
2038 def islocal(path):
2039 return True
2039 return True
2040
2040
2041 def newreporequirements(repo):
2041 def newreporequirements(repo):
2042 """Determine the set of requirements for a new local repository.
2042 """Determine the set of requirements for a new local repository.
2043
2043
2044 Extensions can wrap this function to specify custom requirements for
2044 Extensions can wrap this function to specify custom requirements for
2045 new repositories.
2045 new repositories.
2046 """
2046 """
2047 ui = repo.ui
2047 ui = repo.ui
2048 requirements = set(['revlogv1'])
2048 requirements = set(['revlogv1'])
2049 if ui.configbool('format', 'usestore', True):
2049 if ui.configbool('format', 'usestore', True):
2050 requirements.add('store')
2050 requirements.add('store')
2051 if ui.configbool('format', 'usefncache', True):
2051 if ui.configbool('format', 'usefncache', True):
2052 requirements.add('fncache')
2052 requirements.add('fncache')
2053 if ui.configbool('format', 'dotencode', True):
2053 if ui.configbool('format', 'dotencode', True):
2054 requirements.add('dotencode')
2054 requirements.add('dotencode')
2055
2055
2056 compengine = ui.config('experimental', 'format.compression', 'zlib')
2056 compengine = ui.config('experimental', 'format.compression', 'zlib')
2057 if compengine not in util.compengines:
2057 if compengine not in util.compengines:
2058 raise error.Abort(_('compression engine %s defined by '
2058 raise error.Abort(_('compression engine %s defined by '
2059 'experimental.format.compression not available') %
2059 'experimental.format.compression not available') %
2060 compengine,
2060 compengine,
2061 hint=_('run "hg debuginstall" to list available '
2061 hint=_('run "hg debuginstall" to list available '
2062 'compression engines'))
2062 'compression engines'))
2063
2063
2064 # zlib is the historical default and doesn't need an explicit requirement.
2064 # zlib is the historical default and doesn't need an explicit requirement.
2065 if compengine != 'zlib':
2065 if compengine != 'zlib':
2066 requirements.add('exp-compression-%s' % compengine)
2066 requirements.add('exp-compression-%s' % compengine)
2067
2067
2068 if scmutil.gdinitconfig(ui):
2068 if scmutil.gdinitconfig(ui):
2069 requirements.add('generaldelta')
2069 requirements.add('generaldelta')
2070 if ui.configbool('experimental', 'treemanifest', False):
2070 if ui.configbool('experimental', 'treemanifest', False):
2071 requirements.add('treemanifest')
2071 requirements.add('treemanifest')
2072 if ui.configbool('experimental', 'manifestv2', False):
2072 if ui.configbool('experimental', 'manifestv2', False):
2073 requirements.add('manifestv2')
2073 requirements.add('manifestv2')
2074
2074
2075 return requirements
2075 return requirements
General Comments 0
You need to be logged in to leave comments. Login now