##// END OF EJS Templates
localrepo: rename proxycls to filteredrepo...
Jun Wu -
r31279:052bc876 default
parent child Browse files
Show More
@@ -1,2073 +1,2073 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 store,
56 store,
57 subrepo,
57 subrepo,
58 tags as tagsmod,
58 tags as tagsmod,
59 transaction,
59 transaction,
60 txnutil,
60 txnutil,
61 util,
61 util,
62 vfs as vfsmod,
62 vfs as vfsmod,
63 )
63 )
64
64
65 release = lockmod.release
65 release = lockmod.release
66 urlerr = util.urlerr
66 urlerr = util.urlerr
67 urlreq = util.urlreq
67 urlreq = util.urlreq
68
68
69 class repofilecache(scmutil.filecache):
69 class repofilecache(scmutil.filecache):
70 """All filecache usage on repo are done for logic that should be unfiltered
70 """All filecache usage on repo are done for logic that should be unfiltered
71 """
71 """
72
72
73 def __get__(self, repo, type=None):
73 def __get__(self, repo, type=None):
74 if repo is None:
74 if repo is None:
75 return self
75 return self
76 return super(repofilecache, self).__get__(repo.unfiltered(), type)
76 return super(repofilecache, self).__get__(repo.unfiltered(), type)
77 def __set__(self, repo, value):
77 def __set__(self, repo, value):
78 return super(repofilecache, self).__set__(repo.unfiltered(), value)
78 return super(repofilecache, self).__set__(repo.unfiltered(), value)
79 def __delete__(self, repo):
79 def __delete__(self, repo):
80 return super(repofilecache, self).__delete__(repo.unfiltered())
80 return super(repofilecache, self).__delete__(repo.unfiltered())
81
81
82 class storecache(repofilecache):
82 class storecache(repofilecache):
83 """filecache for files in the store"""
83 """filecache for files in the store"""
84 def join(self, obj, fname):
84 def join(self, obj, fname):
85 return obj.sjoin(fname)
85 return obj.sjoin(fname)
86
86
87 class unfilteredpropertycache(util.propertycache):
87 class unfilteredpropertycache(util.propertycache):
88 """propertycache that apply to unfiltered repo only"""
88 """propertycache that apply to unfiltered repo only"""
89
89
90 def __get__(self, repo, type=None):
90 def __get__(self, repo, type=None):
91 unfi = repo.unfiltered()
91 unfi = repo.unfiltered()
92 if unfi is repo:
92 if unfi is repo:
93 return super(unfilteredpropertycache, self).__get__(unfi)
93 return super(unfilteredpropertycache, self).__get__(unfi)
94 return getattr(unfi, self.name)
94 return getattr(unfi, self.name)
95
95
96 class filteredpropertycache(util.propertycache):
96 class filteredpropertycache(util.propertycache):
97 """propertycache that must take filtering in account"""
97 """propertycache that must take filtering in account"""
98
98
99 def cachevalue(self, obj, value):
99 def cachevalue(self, obj, value):
100 object.__setattr__(obj, self.name, value)
100 object.__setattr__(obj, self.name, value)
101
101
102
102
103 def hasunfilteredcache(repo, name):
103 def hasunfilteredcache(repo, name):
104 """check if a repo has an unfilteredpropertycache value for <name>"""
104 """check if a repo has an unfilteredpropertycache value for <name>"""
105 return name in vars(repo.unfiltered())
105 return name in vars(repo.unfiltered())
106
106
107 def unfilteredmethod(orig):
107 def unfilteredmethod(orig):
108 """decorate method that always need to be run on unfiltered version"""
108 """decorate method that always need to be run on unfiltered version"""
109 def wrapper(repo, *args, **kwargs):
109 def wrapper(repo, *args, **kwargs):
110 return orig(repo.unfiltered(), *args, **kwargs)
110 return orig(repo.unfiltered(), *args, **kwargs)
111 return wrapper
111 return wrapper
112
112
113 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
113 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
114 'unbundle'))
114 'unbundle'))
115 legacycaps = moderncaps.union(set(['changegroupsubset']))
115 legacycaps = moderncaps.union(set(['changegroupsubset']))
116
116
117 class localpeer(peer.peerrepository):
117 class localpeer(peer.peerrepository):
118 '''peer for a local repo; reflects only the most recent API'''
118 '''peer for a local repo; reflects only the most recent API'''
119
119
120 def __init__(self, repo, caps=moderncaps):
120 def __init__(self, repo, caps=moderncaps):
121 peer.peerrepository.__init__(self)
121 peer.peerrepository.__init__(self)
122 self._repo = repo.filtered('served')
122 self._repo = repo.filtered('served')
123 self.ui = repo.ui
123 self.ui = repo.ui
124 self._caps = repo._restrictcapabilities(caps)
124 self._caps = repo._restrictcapabilities(caps)
125 self.requirements = repo.requirements
125 self.requirements = repo.requirements
126 self.supportedformats = repo.supportedformats
126 self.supportedformats = repo.supportedformats
127
127
128 def close(self):
128 def close(self):
129 self._repo.close()
129 self._repo.close()
130
130
131 def _capabilities(self):
131 def _capabilities(self):
132 return self._caps
132 return self._caps
133
133
134 def local(self):
134 def local(self):
135 return self._repo
135 return self._repo
136
136
137 def canpush(self):
137 def canpush(self):
138 return True
138 return True
139
139
140 def url(self):
140 def url(self):
141 return self._repo.url()
141 return self._repo.url()
142
142
143 def lookup(self, key):
143 def lookup(self, key):
144 return self._repo.lookup(key)
144 return self._repo.lookup(key)
145
145
146 def branchmap(self):
146 def branchmap(self):
147 return self._repo.branchmap()
147 return self._repo.branchmap()
148
148
149 def heads(self):
149 def heads(self):
150 return self._repo.heads()
150 return self._repo.heads()
151
151
152 def known(self, nodes):
152 def known(self, nodes):
153 return self._repo.known(nodes)
153 return self._repo.known(nodes)
154
154
155 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
155 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
156 **kwargs):
156 **kwargs):
157 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
157 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
158 common=common, bundlecaps=bundlecaps,
158 common=common, bundlecaps=bundlecaps,
159 **kwargs)
159 **kwargs)
160 cb = util.chunkbuffer(chunks)
160 cb = util.chunkbuffer(chunks)
161
161
162 if bundlecaps is not None and 'HG20' in bundlecaps:
162 if bundlecaps is not None and 'HG20' in bundlecaps:
163 # When requesting a bundle2, getbundle returns a stream to make the
163 # When requesting a bundle2, getbundle returns a stream to make the
164 # wire level function happier. We need to build a proper object
164 # wire level function happier. We need to build a proper object
165 # from it in local peer.
165 # from it in local peer.
166 return bundle2.getunbundler(self.ui, cb)
166 return bundle2.getunbundler(self.ui, cb)
167 else:
167 else:
168 return changegroup.getunbundler('01', cb, None)
168 return changegroup.getunbundler('01', cb, None)
169
169
170 # TODO We might want to move the next two calls into legacypeer and add
170 # TODO We might want to move the next two calls into legacypeer and add
171 # unbundle instead.
171 # unbundle instead.
172
172
173 def unbundle(self, cg, heads, url):
173 def unbundle(self, cg, heads, url):
174 """apply a bundle on a repo
174 """apply a bundle on a repo
175
175
176 This function handles the repo locking itself."""
176 This function handles the repo locking itself."""
177 try:
177 try:
178 try:
178 try:
179 cg = exchange.readbundle(self.ui, cg, None)
179 cg = exchange.readbundle(self.ui, cg, None)
180 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
180 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
181 if util.safehasattr(ret, 'getchunks'):
181 if util.safehasattr(ret, 'getchunks'):
182 # This is a bundle20 object, turn it into an unbundler.
182 # This is a bundle20 object, turn it into an unbundler.
183 # This little dance should be dropped eventually when the
183 # This little dance should be dropped eventually when the
184 # API is finally improved.
184 # API is finally improved.
185 stream = util.chunkbuffer(ret.getchunks())
185 stream = util.chunkbuffer(ret.getchunks())
186 ret = bundle2.getunbundler(self.ui, stream)
186 ret = bundle2.getunbundler(self.ui, stream)
187 return ret
187 return ret
188 except Exception as exc:
188 except Exception as exc:
189 # If the exception contains output salvaged from a bundle2
189 # If the exception contains output salvaged from a bundle2
190 # reply, we need to make sure it is printed before continuing
190 # reply, we need to make sure it is printed before continuing
191 # to fail. So we build a bundle2 with such output and consume
191 # to fail. So we build a bundle2 with such output and consume
192 # it directly.
192 # it directly.
193 #
193 #
194 # This is not very elegant but allows a "simple" solution for
194 # This is not very elegant but allows a "simple" solution for
195 # issue4594
195 # issue4594
196 output = getattr(exc, '_bundle2salvagedoutput', ())
196 output = getattr(exc, '_bundle2salvagedoutput', ())
197 if output:
197 if output:
198 bundler = bundle2.bundle20(self._repo.ui)
198 bundler = bundle2.bundle20(self._repo.ui)
199 for out in output:
199 for out in output:
200 bundler.addpart(out)
200 bundler.addpart(out)
201 stream = util.chunkbuffer(bundler.getchunks())
201 stream = util.chunkbuffer(bundler.getchunks())
202 b = bundle2.getunbundler(self.ui, stream)
202 b = bundle2.getunbundler(self.ui, stream)
203 bundle2.processbundle(self._repo, b)
203 bundle2.processbundle(self._repo, b)
204 raise
204 raise
205 except error.PushRaced as exc:
205 except error.PushRaced as exc:
206 raise error.ResponseError(_('push failed:'), str(exc))
206 raise error.ResponseError(_('push failed:'), str(exc))
207
207
208 def lock(self):
208 def lock(self):
209 return self._repo.lock()
209 return self._repo.lock()
210
210
211 def addchangegroup(self, cg, source, url):
211 def addchangegroup(self, cg, source, url):
212 return cg.apply(self._repo, source, url)
212 return cg.apply(self._repo, source, url)
213
213
214 def pushkey(self, namespace, key, old, new):
214 def pushkey(self, namespace, key, old, new):
215 return self._repo.pushkey(namespace, key, old, new)
215 return self._repo.pushkey(namespace, key, old, new)
216
216
217 def listkeys(self, namespace):
217 def listkeys(self, namespace):
218 return self._repo.listkeys(namespace)
218 return self._repo.listkeys(namespace)
219
219
220 def debugwireargs(self, one, two, three=None, four=None, five=None):
220 def debugwireargs(self, one, two, three=None, four=None, five=None):
221 '''used to test argument passing over the wire'''
221 '''used to test argument passing over the wire'''
222 return "%s %s %s %s %s" % (one, two, three, four, five)
222 return "%s %s %s %s %s" % (one, two, three, four, five)
223
223
224 class locallegacypeer(localpeer):
224 class locallegacypeer(localpeer):
225 '''peer extension which implements legacy methods too; used for tests with
225 '''peer extension which implements legacy methods too; used for tests with
226 restricted capabilities'''
226 restricted capabilities'''
227
227
228 def __init__(self, repo):
228 def __init__(self, repo):
229 localpeer.__init__(self, repo, caps=legacycaps)
229 localpeer.__init__(self, repo, caps=legacycaps)
230
230
231 def branches(self, nodes):
231 def branches(self, nodes):
232 return self._repo.branches(nodes)
232 return self._repo.branches(nodes)
233
233
234 def between(self, pairs):
234 def between(self, pairs):
235 return self._repo.between(pairs)
235 return self._repo.between(pairs)
236
236
237 def changegroup(self, basenodes, source):
237 def changegroup(self, basenodes, source):
238 return changegroup.changegroup(self._repo, basenodes, source)
238 return changegroup.changegroup(self._repo, basenodes, source)
239
239
240 def changegroupsubset(self, bases, heads, source):
240 def changegroupsubset(self, bases, heads, source):
241 return changegroup.changegroupsubset(self._repo, bases, heads, source)
241 return changegroup.changegroupsubset(self._repo, bases, heads, source)
242
242
243 class localrepository(object):
243 class localrepository(object):
244
244
245 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
245 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
246 'manifestv2'))
246 'manifestv2'))
247 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
247 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
248 'relshared', 'dotencode'))
248 'relshared', 'dotencode'))
249 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
249 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
250 filtername = None
250 filtername = None
251
251
252 # a list of (ui, featureset) functions.
252 # a list of (ui, featureset) functions.
253 # only functions defined in module of enabled extensions are invoked
253 # only functions defined in module of enabled extensions are invoked
254 featuresetupfuncs = set()
254 featuresetupfuncs = set()
255
255
256 def __init__(self, baseui, path, create=False):
256 def __init__(self, baseui, path, create=False):
257 self.requirements = set()
257 self.requirements = set()
258 # vfs to access the working copy
258 # vfs to access the working copy
259 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
259 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
260 # vfs to access the content of the repository
260 # vfs to access the content of the repository
261 self.vfs = None
261 self.vfs = None
262 # vfs to access the store part of the repository
262 # vfs to access the store part of the repository
263 self.svfs = None
263 self.svfs = None
264 self.root = self.wvfs.base
264 self.root = self.wvfs.base
265 self.path = self.wvfs.join(".hg")
265 self.path = self.wvfs.join(".hg")
266 self.origroot = path
266 self.origroot = path
267 self.auditor = pathutil.pathauditor(self.root, self._checknested)
267 self.auditor = pathutil.pathauditor(self.root, self._checknested)
268 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
268 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
269 realfs=False)
269 realfs=False)
270 self.vfs = vfsmod.vfs(self.path)
270 self.vfs = vfsmod.vfs(self.path)
271 self.baseui = baseui
271 self.baseui = baseui
272 self.ui = baseui.copy()
272 self.ui = baseui.copy()
273 self.ui.copy = baseui.copy # prevent copying repo configuration
273 self.ui.copy = baseui.copy # prevent copying repo configuration
274 # A list of callback to shape the phase if no data were found.
274 # A list of callback to shape the phase if no data were found.
275 # Callback are in the form: func(repo, roots) --> processed root.
275 # Callback are in the form: func(repo, roots) --> processed root.
276 # This list it to be filled by extension during repo setup
276 # This list it to be filled by extension during repo setup
277 self._phasedefaults = []
277 self._phasedefaults = []
278 try:
278 try:
279 self.ui.readconfig(self.join("hgrc"), self.root)
279 self.ui.readconfig(self.join("hgrc"), self.root)
280 self._loadextensions()
280 self._loadextensions()
281 except IOError:
281 except IOError:
282 pass
282 pass
283
283
284 if self.featuresetupfuncs:
284 if self.featuresetupfuncs:
285 self.supported = set(self._basesupported) # use private copy
285 self.supported = set(self._basesupported) # use private copy
286 extmods = set(m.__name__ for n, m
286 extmods = set(m.__name__ for n, m
287 in extensions.extensions(self.ui))
287 in extensions.extensions(self.ui))
288 for setupfunc in self.featuresetupfuncs:
288 for setupfunc in self.featuresetupfuncs:
289 if setupfunc.__module__ in extmods:
289 if setupfunc.__module__ in extmods:
290 setupfunc(self.ui, self.supported)
290 setupfunc(self.ui, self.supported)
291 else:
291 else:
292 self.supported = self._basesupported
292 self.supported = self._basesupported
293 color.setup(self.ui)
293 color.setup(self.ui)
294
294
295 # Add compression engines.
295 # Add compression engines.
296 for name in util.compengines:
296 for name in util.compengines:
297 engine = util.compengines[name]
297 engine = util.compengines[name]
298 if engine.revlogheader():
298 if engine.revlogheader():
299 self.supported.add('exp-compression-%s' % name)
299 self.supported.add('exp-compression-%s' % name)
300
300
301 if not self.vfs.isdir():
301 if not self.vfs.isdir():
302 if create:
302 if create:
303 self.requirements = newreporequirements(self)
303 self.requirements = newreporequirements(self)
304
304
305 if not self.wvfs.exists():
305 if not self.wvfs.exists():
306 self.wvfs.makedirs()
306 self.wvfs.makedirs()
307 self.vfs.makedir(notindexed=True)
307 self.vfs.makedir(notindexed=True)
308
308
309 if 'store' in self.requirements:
309 if 'store' in self.requirements:
310 self.vfs.mkdir("store")
310 self.vfs.mkdir("store")
311
311
312 # create an invalid changelog
312 # create an invalid changelog
313 self.vfs.append(
313 self.vfs.append(
314 "00changelog.i",
314 "00changelog.i",
315 '\0\0\0\2' # represents revlogv2
315 '\0\0\0\2' # represents revlogv2
316 ' dummy changelog to prevent using the old repo layout'
316 ' dummy changelog to prevent using the old repo layout'
317 )
317 )
318 else:
318 else:
319 raise error.RepoError(_("repository %s not found") % path)
319 raise error.RepoError(_("repository %s not found") % path)
320 elif create:
320 elif create:
321 raise error.RepoError(_("repository %s already exists") % path)
321 raise error.RepoError(_("repository %s already exists") % path)
322 else:
322 else:
323 try:
323 try:
324 self.requirements = scmutil.readrequires(
324 self.requirements = scmutil.readrequires(
325 self.vfs, self.supported)
325 self.vfs, self.supported)
326 except IOError as inst:
326 except IOError as inst:
327 if inst.errno != errno.ENOENT:
327 if inst.errno != errno.ENOENT:
328 raise
328 raise
329
329
330 self.sharedpath = self.path
330 self.sharedpath = self.path
331 try:
331 try:
332 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
332 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
333 if 'relshared' in self.requirements:
333 if 'relshared' in self.requirements:
334 sharedpath = self.vfs.join(sharedpath)
334 sharedpath = self.vfs.join(sharedpath)
335 vfs = vfsmod.vfs(sharedpath, realpath=True)
335 vfs = vfsmod.vfs(sharedpath, realpath=True)
336 s = vfs.base
336 s = vfs.base
337 if not vfs.exists():
337 if not vfs.exists():
338 raise error.RepoError(
338 raise error.RepoError(
339 _('.hg/sharedpath points to nonexistent directory %s') % s)
339 _('.hg/sharedpath points to nonexistent directory %s') % s)
340 self.sharedpath = s
340 self.sharedpath = s
341 except IOError as inst:
341 except IOError as inst:
342 if inst.errno != errno.ENOENT:
342 if inst.errno != errno.ENOENT:
343 raise
343 raise
344
344
345 self.store = store.store(
345 self.store = store.store(
346 self.requirements, self.sharedpath, vfsmod.vfs)
346 self.requirements, self.sharedpath, vfsmod.vfs)
347 self.spath = self.store.path
347 self.spath = self.store.path
348 self.svfs = self.store.vfs
348 self.svfs = self.store.vfs
349 self.sjoin = self.store.join
349 self.sjoin = self.store.join
350 self.vfs.createmode = self.store.createmode
350 self.vfs.createmode = self.store.createmode
351 self._applyopenerreqs()
351 self._applyopenerreqs()
352 if create:
352 if create:
353 self._writerequirements()
353 self._writerequirements()
354
354
355 self._dirstatevalidatewarned = False
355 self._dirstatevalidatewarned = False
356
356
357 self._branchcaches = {}
357 self._branchcaches = {}
358 self._revbranchcache = None
358 self._revbranchcache = None
359 self.filterpats = {}
359 self.filterpats = {}
360 self._datafilters = {}
360 self._datafilters = {}
361 self._transref = self._lockref = self._wlockref = None
361 self._transref = self._lockref = self._wlockref = None
362
362
363 # A cache for various files under .hg/ that tracks file changes,
363 # A cache for various files under .hg/ that tracks file changes,
364 # (used by the filecache decorator)
364 # (used by the filecache decorator)
365 #
365 #
366 # Maps a property name to its util.filecacheentry
366 # Maps a property name to its util.filecacheentry
367 self._filecache = {}
367 self._filecache = {}
368
368
369 # hold sets of revision to be filtered
369 # hold sets of revision to be filtered
370 # should be cleared when something might have changed the filter value:
370 # should be cleared when something might have changed the filter value:
371 # - new changesets,
371 # - new changesets,
372 # - phase change,
372 # - phase change,
373 # - new obsolescence marker,
373 # - new obsolescence marker,
374 # - working directory parent change,
374 # - working directory parent change,
375 # - bookmark changes
375 # - bookmark changes
376 self.filteredrevcache = {}
376 self.filteredrevcache = {}
377
377
378 # generic mapping between names and nodes
378 # generic mapping between names and nodes
379 self.names = namespaces.namespaces()
379 self.names = namespaces.namespaces()
380
380
381 @property
381 @property
382 def wopener(self):
382 def wopener(self):
383 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
383 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
384 return self.wvfs
384 return self.wvfs
385
385
386 @property
386 @property
387 def opener(self):
387 def opener(self):
388 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
388 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
389 return self.vfs
389 return self.vfs
390
390
391 def close(self):
391 def close(self):
392 self._writecaches()
392 self._writecaches()
393
393
394 def _loadextensions(self):
394 def _loadextensions(self):
395 extensions.loadall(self.ui)
395 extensions.loadall(self.ui)
396
396
397 def _writecaches(self):
397 def _writecaches(self):
398 if self._revbranchcache:
398 if self._revbranchcache:
399 self._revbranchcache.write()
399 self._revbranchcache.write()
400
400
401 def _restrictcapabilities(self, caps):
401 def _restrictcapabilities(self, caps):
402 if self.ui.configbool('experimental', 'bundle2-advertise', True):
402 if self.ui.configbool('experimental', 'bundle2-advertise', True):
403 caps = set(caps)
403 caps = set(caps)
404 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
404 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
405 caps.add('bundle2=' + urlreq.quote(capsblob))
405 caps.add('bundle2=' + urlreq.quote(capsblob))
406 return caps
406 return caps
407
407
408 def _applyopenerreqs(self):
408 def _applyopenerreqs(self):
409 self.svfs.options = dict((r, 1) for r in self.requirements
409 self.svfs.options = dict((r, 1) for r in self.requirements
410 if r in self.openerreqs)
410 if r in self.openerreqs)
411 # experimental config: format.chunkcachesize
411 # experimental config: format.chunkcachesize
412 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
412 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
413 if chunkcachesize is not None:
413 if chunkcachesize is not None:
414 self.svfs.options['chunkcachesize'] = chunkcachesize
414 self.svfs.options['chunkcachesize'] = chunkcachesize
415 # experimental config: format.maxchainlen
415 # experimental config: format.maxchainlen
416 maxchainlen = self.ui.configint('format', 'maxchainlen')
416 maxchainlen = self.ui.configint('format', 'maxchainlen')
417 if maxchainlen is not None:
417 if maxchainlen is not None:
418 self.svfs.options['maxchainlen'] = maxchainlen
418 self.svfs.options['maxchainlen'] = maxchainlen
419 # experimental config: format.manifestcachesize
419 # experimental config: format.manifestcachesize
420 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
420 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
421 if manifestcachesize is not None:
421 if manifestcachesize is not None:
422 self.svfs.options['manifestcachesize'] = manifestcachesize
422 self.svfs.options['manifestcachesize'] = manifestcachesize
423 # experimental config: format.aggressivemergedeltas
423 # experimental config: format.aggressivemergedeltas
424 aggressivemergedeltas = self.ui.configbool('format',
424 aggressivemergedeltas = self.ui.configbool('format',
425 'aggressivemergedeltas', False)
425 'aggressivemergedeltas', False)
426 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
426 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
427 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
427 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
428
428
429 for r in self.requirements:
429 for r in self.requirements:
430 if r.startswith('exp-compression-'):
430 if r.startswith('exp-compression-'):
431 self.svfs.options['compengine'] = r[len('exp-compression-'):]
431 self.svfs.options['compengine'] = r[len('exp-compression-'):]
432
432
433 def _writerequirements(self):
433 def _writerequirements(self):
434 scmutil.writerequires(self.vfs, self.requirements)
434 scmutil.writerequires(self.vfs, self.requirements)
435
435
436 def _checknested(self, path):
436 def _checknested(self, path):
437 """Determine if path is a legal nested repository."""
437 """Determine if path is a legal nested repository."""
438 if not path.startswith(self.root):
438 if not path.startswith(self.root):
439 return False
439 return False
440 subpath = path[len(self.root) + 1:]
440 subpath = path[len(self.root) + 1:]
441 normsubpath = util.pconvert(subpath)
441 normsubpath = util.pconvert(subpath)
442
442
443 # XXX: Checking against the current working copy is wrong in
443 # XXX: Checking against the current working copy is wrong in
444 # the sense that it can reject things like
444 # the sense that it can reject things like
445 #
445 #
446 # $ hg cat -r 10 sub/x.txt
446 # $ hg cat -r 10 sub/x.txt
447 #
447 #
448 # if sub/ is no longer a subrepository in the working copy
448 # if sub/ is no longer a subrepository in the working copy
449 # parent revision.
449 # parent revision.
450 #
450 #
451 # However, it can of course also allow things that would have
451 # However, it can of course also allow things that would have
452 # been rejected before, such as the above cat command if sub/
452 # been rejected before, such as the above cat command if sub/
453 # is a subrepository now, but was a normal directory before.
453 # is a subrepository now, but was a normal directory before.
454 # The old path auditor would have rejected by mistake since it
454 # The old path auditor would have rejected by mistake since it
455 # panics when it sees sub/.hg/.
455 # panics when it sees sub/.hg/.
456 #
456 #
457 # All in all, checking against the working copy seems sensible
457 # All in all, checking against the working copy seems sensible
458 # since we want to prevent access to nested repositories on
458 # since we want to prevent access to nested repositories on
459 # the filesystem *now*.
459 # the filesystem *now*.
460 ctx = self[None]
460 ctx = self[None]
461 parts = util.splitpath(subpath)
461 parts = util.splitpath(subpath)
462 while parts:
462 while parts:
463 prefix = '/'.join(parts)
463 prefix = '/'.join(parts)
464 if prefix in ctx.substate:
464 if prefix in ctx.substate:
465 if prefix == normsubpath:
465 if prefix == normsubpath:
466 return True
466 return True
467 else:
467 else:
468 sub = ctx.sub(prefix)
468 sub = ctx.sub(prefix)
469 return sub.checknested(subpath[len(prefix) + 1:])
469 return sub.checknested(subpath[len(prefix) + 1:])
470 else:
470 else:
471 parts.pop()
471 parts.pop()
472 return False
472 return False
473
473
474 def peer(self):
474 def peer(self):
475 return localpeer(self) # not cached to avoid reference cycle
475 return localpeer(self) # not cached to avoid reference cycle
476
476
477 def unfiltered(self):
477 def unfiltered(self):
478 """Return unfiltered version of the repository
478 """Return unfiltered version of the repository
479
479
480 Intended to be overwritten by filtered repo."""
480 Intended to be overwritten by filtered repo."""
481 return self
481 return self
482
482
483 def filtered(self, name):
483 def filtered(self, name):
484 """Return a filtered version of a repository"""
484 """Return a filtered version of a repository"""
485 # build a new class with the mixin and the current class
485 # build a new class with the mixin and the current class
486 # (possibly subclass of the repo)
486 # (possibly subclass of the repo)
487 class proxycls(repoview.repoview, self.unfiltered().__class__):
487 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
488 pass
488 pass
489 return proxycls(self, name)
489 return filteredrepo(self, name)
490
490
491 @repofilecache('bookmarks', 'bookmarks.current')
491 @repofilecache('bookmarks', 'bookmarks.current')
492 def _bookmarks(self):
492 def _bookmarks(self):
493 return bookmarks.bmstore(self)
493 return bookmarks.bmstore(self)
494
494
495 @property
495 @property
496 def _activebookmark(self):
496 def _activebookmark(self):
497 return self._bookmarks.active
497 return self._bookmarks.active
498
498
499 def bookmarkheads(self, bookmark):
499 def bookmarkheads(self, bookmark):
500 name = bookmark.split('@', 1)[0]
500 name = bookmark.split('@', 1)[0]
501 heads = []
501 heads = []
502 for mark, n in self._bookmarks.iteritems():
502 for mark, n in self._bookmarks.iteritems():
503 if mark.split('@', 1)[0] == name:
503 if mark.split('@', 1)[0] == name:
504 heads.append(n)
504 heads.append(n)
505 return heads
505 return heads
506
506
507 # _phaserevs and _phasesets depend on changelog. what we need is to
507 # _phaserevs and _phasesets depend on changelog. what we need is to
508 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
508 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
509 # can't be easily expressed in filecache mechanism.
509 # can't be easily expressed in filecache mechanism.
510 @storecache('phaseroots', '00changelog.i')
510 @storecache('phaseroots', '00changelog.i')
511 def _phasecache(self):
511 def _phasecache(self):
512 return phases.phasecache(self, self._phasedefaults)
512 return phases.phasecache(self, self._phasedefaults)
513
513
514 @storecache('obsstore')
514 @storecache('obsstore')
515 def obsstore(self):
515 def obsstore(self):
516 # read default format for new obsstore.
516 # read default format for new obsstore.
517 # developer config: format.obsstore-version
517 # developer config: format.obsstore-version
518 defaultformat = self.ui.configint('format', 'obsstore-version', None)
518 defaultformat = self.ui.configint('format', 'obsstore-version', None)
519 # rely on obsstore class default when possible.
519 # rely on obsstore class default when possible.
520 kwargs = {}
520 kwargs = {}
521 if defaultformat is not None:
521 if defaultformat is not None:
522 kwargs['defaultformat'] = defaultformat
522 kwargs['defaultformat'] = defaultformat
523 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
523 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
524 store = obsolete.obsstore(self.svfs, readonly=readonly,
524 store = obsolete.obsstore(self.svfs, readonly=readonly,
525 **kwargs)
525 **kwargs)
526 if store and readonly:
526 if store and readonly:
527 self.ui.warn(
527 self.ui.warn(
528 _('obsolete feature not enabled but %i markers found!\n')
528 _('obsolete feature not enabled but %i markers found!\n')
529 % len(list(store)))
529 % len(list(store)))
530 return store
530 return store
531
531
532 @storecache('00changelog.i')
532 @storecache('00changelog.i')
533 def changelog(self):
533 def changelog(self):
534 c = changelog.changelog(self.svfs)
534 c = changelog.changelog(self.svfs)
535 if txnutil.mayhavepending(self.root):
535 if txnutil.mayhavepending(self.root):
536 c.readpending('00changelog.i.a')
536 c.readpending('00changelog.i.a')
537 return c
537 return c
538
538
539 def _constructmanifest(self):
539 def _constructmanifest(self):
540 # This is a temporary function while we migrate from manifest to
540 # This is a temporary function while we migrate from manifest to
541 # manifestlog. It allows bundlerepo and unionrepo to intercept the
541 # manifestlog. It allows bundlerepo and unionrepo to intercept the
542 # manifest creation.
542 # manifest creation.
543 return manifest.manifestrevlog(self.svfs)
543 return manifest.manifestrevlog(self.svfs)
544
544
545 @storecache('00manifest.i')
545 @storecache('00manifest.i')
546 def manifestlog(self):
546 def manifestlog(self):
547 return manifest.manifestlog(self.svfs, self)
547 return manifest.manifestlog(self.svfs, self)
548
548
549 @repofilecache('dirstate')
549 @repofilecache('dirstate')
550 def dirstate(self):
550 def dirstate(self):
551 return dirstate.dirstate(self.vfs, self.ui, self.root,
551 return dirstate.dirstate(self.vfs, self.ui, self.root,
552 self._dirstatevalidate)
552 self._dirstatevalidate)
553
553
554 def _dirstatevalidate(self, node):
554 def _dirstatevalidate(self, node):
555 try:
555 try:
556 self.changelog.rev(node)
556 self.changelog.rev(node)
557 return node
557 return node
558 except error.LookupError:
558 except error.LookupError:
559 if not self._dirstatevalidatewarned:
559 if not self._dirstatevalidatewarned:
560 self._dirstatevalidatewarned = True
560 self._dirstatevalidatewarned = True
561 self.ui.warn(_("warning: ignoring unknown"
561 self.ui.warn(_("warning: ignoring unknown"
562 " working parent %s!\n") % short(node))
562 " working parent %s!\n") % short(node))
563 return nullid
563 return nullid
564
564
565 def __getitem__(self, changeid):
565 def __getitem__(self, changeid):
566 if changeid is None or changeid == wdirrev:
566 if changeid is None or changeid == wdirrev:
567 return context.workingctx(self)
567 return context.workingctx(self)
568 if isinstance(changeid, slice):
568 if isinstance(changeid, slice):
569 return [context.changectx(self, i)
569 return [context.changectx(self, i)
570 for i in xrange(*changeid.indices(len(self)))
570 for i in xrange(*changeid.indices(len(self)))
571 if i not in self.changelog.filteredrevs]
571 if i not in self.changelog.filteredrevs]
572 return context.changectx(self, changeid)
572 return context.changectx(self, changeid)
573
573
574 def __contains__(self, changeid):
574 def __contains__(self, changeid):
575 try:
575 try:
576 self[changeid]
576 self[changeid]
577 return True
577 return True
578 except error.RepoLookupError:
578 except error.RepoLookupError:
579 return False
579 return False
580
580
581 def __nonzero__(self):
581 def __nonzero__(self):
582 return True
582 return True
583
583
584 def __len__(self):
584 def __len__(self):
585 return len(self.changelog)
585 return len(self.changelog)
586
586
587 def __iter__(self):
587 def __iter__(self):
588 return iter(self.changelog)
588 return iter(self.changelog)
589
589
590 def revs(self, expr, *args):
590 def revs(self, expr, *args):
591 '''Find revisions matching a revset.
591 '''Find revisions matching a revset.
592
592
593 The revset is specified as a string ``expr`` that may contain
593 The revset is specified as a string ``expr`` that may contain
594 %-formatting to escape certain types. See ``revsetlang.formatspec``.
594 %-formatting to escape certain types. See ``revsetlang.formatspec``.
595
595
596 Revset aliases from the configuration are not expanded. To expand
596 Revset aliases from the configuration are not expanded. To expand
597 user aliases, consider calling ``scmutil.revrange()`` or
597 user aliases, consider calling ``scmutil.revrange()`` or
598 ``repo.anyrevs([expr], user=True)``.
598 ``repo.anyrevs([expr], user=True)``.
599
599
600 Returns a revset.abstractsmartset, which is a list-like interface
600 Returns a revset.abstractsmartset, which is a list-like interface
601 that contains integer revisions.
601 that contains integer revisions.
602 '''
602 '''
603 expr = revsetlang.formatspec(expr, *args)
603 expr = revsetlang.formatspec(expr, *args)
604 m = revset.match(None, expr)
604 m = revset.match(None, expr)
605 return m(self)
605 return m(self)
606
606
607 def set(self, expr, *args):
607 def set(self, expr, *args):
608 '''Find revisions matching a revset and emit changectx instances.
608 '''Find revisions matching a revset and emit changectx instances.
609
609
610 This is a convenience wrapper around ``revs()`` that iterates the
610 This is a convenience wrapper around ``revs()`` that iterates the
611 result and is a generator of changectx instances.
611 result and is a generator of changectx instances.
612
612
613 Revset aliases from the configuration are not expanded. To expand
613 Revset aliases from the configuration are not expanded. To expand
614 user aliases, consider calling ``scmutil.revrange()``.
614 user aliases, consider calling ``scmutil.revrange()``.
615 '''
615 '''
616 for r in self.revs(expr, *args):
616 for r in self.revs(expr, *args):
617 yield self[r]
617 yield self[r]
618
618
619 def anyrevs(self, specs, user=False):
619 def anyrevs(self, specs, user=False):
620 '''Find revisions matching one of the given revsets.
620 '''Find revisions matching one of the given revsets.
621
621
622 Revset aliases from the configuration are not expanded by default. To
622 Revset aliases from the configuration are not expanded by default. To
623 expand user aliases, specify ``user=True``.
623 expand user aliases, specify ``user=True``.
624 '''
624 '''
625 if user:
625 if user:
626 m = revset.matchany(self.ui, specs, repo=self)
626 m = revset.matchany(self.ui, specs, repo=self)
627 else:
627 else:
628 m = revset.matchany(None, specs)
628 m = revset.matchany(None, specs)
629 return m(self)
629 return m(self)
630
630
631 def url(self):
631 def url(self):
632 return 'file:' + self.root
632 return 'file:' + self.root
633
633
634 def hook(self, name, throw=False, **args):
634 def hook(self, name, throw=False, **args):
635 """Call a hook, passing this repo instance.
635 """Call a hook, passing this repo instance.
636
636
637 This a convenience method to aid invoking hooks. Extensions likely
637 This a convenience method to aid invoking hooks. Extensions likely
638 won't call this unless they have registered a custom hook or are
638 won't call this unless they have registered a custom hook or are
639 replacing code that is expected to call a hook.
639 replacing code that is expected to call a hook.
640 """
640 """
641 return hook.hook(self.ui, self, name, throw, **args)
641 return hook.hook(self.ui, self, name, throw, **args)
642
642
643 @unfilteredmethod
643 @unfilteredmethod
644 def _tag(self, names, node, message, local, user, date, extra=None,
644 def _tag(self, names, node, message, local, user, date, extra=None,
645 editor=False):
645 editor=False):
646 if isinstance(names, str):
646 if isinstance(names, str):
647 names = (names,)
647 names = (names,)
648
648
649 branches = self.branchmap()
649 branches = self.branchmap()
650 for name in names:
650 for name in names:
651 self.hook('pretag', throw=True, node=hex(node), tag=name,
651 self.hook('pretag', throw=True, node=hex(node), tag=name,
652 local=local)
652 local=local)
653 if name in branches:
653 if name in branches:
654 self.ui.warn(_("warning: tag %s conflicts with existing"
654 self.ui.warn(_("warning: tag %s conflicts with existing"
655 " branch name\n") % name)
655 " branch name\n") % name)
656
656
657 def writetags(fp, names, munge, prevtags):
657 def writetags(fp, names, munge, prevtags):
658 fp.seek(0, 2)
658 fp.seek(0, 2)
659 if prevtags and prevtags[-1] != '\n':
659 if prevtags and prevtags[-1] != '\n':
660 fp.write('\n')
660 fp.write('\n')
661 for name in names:
661 for name in names:
662 if munge:
662 if munge:
663 m = munge(name)
663 m = munge(name)
664 else:
664 else:
665 m = name
665 m = name
666
666
667 if (self._tagscache.tagtypes and
667 if (self._tagscache.tagtypes and
668 name in self._tagscache.tagtypes):
668 name in self._tagscache.tagtypes):
669 old = self.tags().get(name, nullid)
669 old = self.tags().get(name, nullid)
670 fp.write('%s %s\n' % (hex(old), m))
670 fp.write('%s %s\n' % (hex(old), m))
671 fp.write('%s %s\n' % (hex(node), m))
671 fp.write('%s %s\n' % (hex(node), m))
672 fp.close()
672 fp.close()
673
673
674 prevtags = ''
674 prevtags = ''
675 if local:
675 if local:
676 try:
676 try:
677 fp = self.vfs('localtags', 'r+')
677 fp = self.vfs('localtags', 'r+')
678 except IOError:
678 except IOError:
679 fp = self.vfs('localtags', 'a')
679 fp = self.vfs('localtags', 'a')
680 else:
680 else:
681 prevtags = fp.read()
681 prevtags = fp.read()
682
682
683 # local tags are stored in the current charset
683 # local tags are stored in the current charset
684 writetags(fp, names, None, prevtags)
684 writetags(fp, names, None, prevtags)
685 for name in names:
685 for name in names:
686 self.hook('tag', node=hex(node), tag=name, local=local)
686 self.hook('tag', node=hex(node), tag=name, local=local)
687 return
687 return
688
688
689 try:
689 try:
690 fp = self.wfile('.hgtags', 'rb+')
690 fp = self.wfile('.hgtags', 'rb+')
691 except IOError as e:
691 except IOError as e:
692 if e.errno != errno.ENOENT:
692 if e.errno != errno.ENOENT:
693 raise
693 raise
694 fp = self.wfile('.hgtags', 'ab')
694 fp = self.wfile('.hgtags', 'ab')
695 else:
695 else:
696 prevtags = fp.read()
696 prevtags = fp.read()
697
697
698 # committed tags are stored in UTF-8
698 # committed tags are stored in UTF-8
699 writetags(fp, names, encoding.fromlocal, prevtags)
699 writetags(fp, names, encoding.fromlocal, prevtags)
700
700
701 fp.close()
701 fp.close()
702
702
703 self.invalidatecaches()
703 self.invalidatecaches()
704
704
705 if '.hgtags' not in self.dirstate:
705 if '.hgtags' not in self.dirstate:
706 self[None].add(['.hgtags'])
706 self[None].add(['.hgtags'])
707
707
708 m = matchmod.exact(self.root, '', ['.hgtags'])
708 m = matchmod.exact(self.root, '', ['.hgtags'])
709 tagnode = self.commit(message, user, date, extra=extra, match=m,
709 tagnode = self.commit(message, user, date, extra=extra, match=m,
710 editor=editor)
710 editor=editor)
711
711
712 for name in names:
712 for name in names:
713 self.hook('tag', node=hex(node), tag=name, local=local)
713 self.hook('tag', node=hex(node), tag=name, local=local)
714
714
715 return tagnode
715 return tagnode
716
716
717 def tag(self, names, node, message, local, user, date, editor=False):
717 def tag(self, names, node, message, local, user, date, editor=False):
718 '''tag a revision with one or more symbolic names.
718 '''tag a revision with one or more symbolic names.
719
719
720 names is a list of strings or, when adding a single tag, names may be a
720 names is a list of strings or, when adding a single tag, names may be a
721 string.
721 string.
722
722
723 if local is True, the tags are stored in a per-repository file.
723 if local is True, the tags are stored in a per-repository file.
724 otherwise, they are stored in the .hgtags file, and a new
724 otherwise, they are stored in the .hgtags file, and a new
725 changeset is committed with the change.
725 changeset is committed with the change.
726
726
727 keyword arguments:
727 keyword arguments:
728
728
729 local: whether to store tags in non-version-controlled file
729 local: whether to store tags in non-version-controlled file
730 (default False)
730 (default False)
731
731
732 message: commit message to use if committing
732 message: commit message to use if committing
733
733
734 user: name of user to use if committing
734 user: name of user to use if committing
735
735
736 date: date tuple to use if committing'''
736 date: date tuple to use if committing'''
737
737
738 if not local:
738 if not local:
739 m = matchmod.exact(self.root, '', ['.hgtags'])
739 m = matchmod.exact(self.root, '', ['.hgtags'])
740 if any(self.status(match=m, unknown=True, ignored=True)):
740 if any(self.status(match=m, unknown=True, ignored=True)):
741 raise error.Abort(_('working copy of .hgtags is changed'),
741 raise error.Abort(_('working copy of .hgtags is changed'),
742 hint=_('please commit .hgtags manually'))
742 hint=_('please commit .hgtags manually'))
743
743
744 self.tags() # instantiate the cache
744 self.tags() # instantiate the cache
745 self._tag(names, node, message, local, user, date, editor=editor)
745 self._tag(names, node, message, local, user, date, editor=editor)
746
746
747 @filteredpropertycache
747 @filteredpropertycache
748 def _tagscache(self):
748 def _tagscache(self):
749 '''Returns a tagscache object that contains various tags related
749 '''Returns a tagscache object that contains various tags related
750 caches.'''
750 caches.'''
751
751
752 # This simplifies its cache management by having one decorated
752 # This simplifies its cache management by having one decorated
753 # function (this one) and the rest simply fetch things from it.
753 # function (this one) and the rest simply fetch things from it.
754 class tagscache(object):
754 class tagscache(object):
755 def __init__(self):
755 def __init__(self):
756 # These two define the set of tags for this repository. tags
756 # These two define the set of tags for this repository. tags
757 # maps tag name to node; tagtypes maps tag name to 'global' or
757 # maps tag name to node; tagtypes maps tag name to 'global' or
758 # 'local'. (Global tags are defined by .hgtags across all
758 # 'local'. (Global tags are defined by .hgtags across all
759 # heads, and local tags are defined in .hg/localtags.)
759 # heads, and local tags are defined in .hg/localtags.)
760 # They constitute the in-memory cache of tags.
760 # They constitute the in-memory cache of tags.
761 self.tags = self.tagtypes = None
761 self.tags = self.tagtypes = None
762
762
763 self.nodetagscache = self.tagslist = None
763 self.nodetagscache = self.tagslist = None
764
764
765 cache = tagscache()
765 cache = tagscache()
766 cache.tags, cache.tagtypes = self._findtags()
766 cache.tags, cache.tagtypes = self._findtags()
767
767
768 return cache
768 return cache
769
769
770 def tags(self):
770 def tags(self):
771 '''return a mapping of tag to node'''
771 '''return a mapping of tag to node'''
772 t = {}
772 t = {}
773 if self.changelog.filteredrevs:
773 if self.changelog.filteredrevs:
774 tags, tt = self._findtags()
774 tags, tt = self._findtags()
775 else:
775 else:
776 tags = self._tagscache.tags
776 tags = self._tagscache.tags
777 for k, v in tags.iteritems():
777 for k, v in tags.iteritems():
778 try:
778 try:
779 # ignore tags to unknown nodes
779 # ignore tags to unknown nodes
780 self.changelog.rev(v)
780 self.changelog.rev(v)
781 t[k] = v
781 t[k] = v
782 except (error.LookupError, ValueError):
782 except (error.LookupError, ValueError):
783 pass
783 pass
784 return t
784 return t
785
785
786 def _findtags(self):
786 def _findtags(self):
787 '''Do the hard work of finding tags. Return a pair of dicts
787 '''Do the hard work of finding tags. Return a pair of dicts
788 (tags, tagtypes) where tags maps tag name to node, and tagtypes
788 (tags, tagtypes) where tags maps tag name to node, and tagtypes
789 maps tag name to a string like \'global\' or \'local\'.
789 maps tag name to a string like \'global\' or \'local\'.
790 Subclasses or extensions are free to add their own tags, but
790 Subclasses or extensions are free to add their own tags, but
791 should be aware that the returned dicts will be retained for the
791 should be aware that the returned dicts will be retained for the
792 duration of the localrepo object.'''
792 duration of the localrepo object.'''
793
793
794 # XXX what tagtype should subclasses/extensions use? Currently
794 # XXX what tagtype should subclasses/extensions use? Currently
795 # mq and bookmarks add tags, but do not set the tagtype at all.
795 # mq and bookmarks add tags, but do not set the tagtype at all.
796 # Should each extension invent its own tag type? Should there
796 # Should each extension invent its own tag type? Should there
797 # be one tagtype for all such "virtual" tags? Or is the status
797 # be one tagtype for all such "virtual" tags? Or is the status
798 # quo fine?
798 # quo fine?
799
799
800 alltags = {} # map tag name to (node, hist)
800 alltags = {} # map tag name to (node, hist)
801 tagtypes = {}
801 tagtypes = {}
802
802
803 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
803 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
804 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
804 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
805
805
806 # Build the return dicts. Have to re-encode tag names because
806 # Build the return dicts. Have to re-encode tag names because
807 # the tags module always uses UTF-8 (in order not to lose info
807 # the tags module always uses UTF-8 (in order not to lose info
808 # writing to the cache), but the rest of Mercurial wants them in
808 # writing to the cache), but the rest of Mercurial wants them in
809 # local encoding.
809 # local encoding.
810 tags = {}
810 tags = {}
811 for (name, (node, hist)) in alltags.iteritems():
811 for (name, (node, hist)) in alltags.iteritems():
812 if node != nullid:
812 if node != nullid:
813 tags[encoding.tolocal(name)] = node
813 tags[encoding.tolocal(name)] = node
814 tags['tip'] = self.changelog.tip()
814 tags['tip'] = self.changelog.tip()
815 tagtypes = dict([(encoding.tolocal(name), value)
815 tagtypes = dict([(encoding.tolocal(name), value)
816 for (name, value) in tagtypes.iteritems()])
816 for (name, value) in tagtypes.iteritems()])
817 return (tags, tagtypes)
817 return (tags, tagtypes)
818
818
819 def tagtype(self, tagname):
819 def tagtype(self, tagname):
820 '''
820 '''
821 return the type of the given tag. result can be:
821 return the type of the given tag. result can be:
822
822
823 'local' : a local tag
823 'local' : a local tag
824 'global' : a global tag
824 'global' : a global tag
825 None : tag does not exist
825 None : tag does not exist
826 '''
826 '''
827
827
828 return self._tagscache.tagtypes.get(tagname)
828 return self._tagscache.tagtypes.get(tagname)
829
829
830 def tagslist(self):
830 def tagslist(self):
831 '''return a list of tags ordered by revision'''
831 '''return a list of tags ordered by revision'''
832 if not self._tagscache.tagslist:
832 if not self._tagscache.tagslist:
833 l = []
833 l = []
834 for t, n in self.tags().iteritems():
834 for t, n in self.tags().iteritems():
835 l.append((self.changelog.rev(n), t, n))
835 l.append((self.changelog.rev(n), t, n))
836 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
836 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
837
837
838 return self._tagscache.tagslist
838 return self._tagscache.tagslist
839
839
840 def nodetags(self, node):
840 def nodetags(self, node):
841 '''return the tags associated with a node'''
841 '''return the tags associated with a node'''
842 if not self._tagscache.nodetagscache:
842 if not self._tagscache.nodetagscache:
843 nodetagscache = {}
843 nodetagscache = {}
844 for t, n in self._tagscache.tags.iteritems():
844 for t, n in self._tagscache.tags.iteritems():
845 nodetagscache.setdefault(n, []).append(t)
845 nodetagscache.setdefault(n, []).append(t)
846 for tags in nodetagscache.itervalues():
846 for tags in nodetagscache.itervalues():
847 tags.sort()
847 tags.sort()
848 self._tagscache.nodetagscache = nodetagscache
848 self._tagscache.nodetagscache = nodetagscache
849 return self._tagscache.nodetagscache.get(node, [])
849 return self._tagscache.nodetagscache.get(node, [])
850
850
851 def nodebookmarks(self, node):
851 def nodebookmarks(self, node):
852 """return the list of bookmarks pointing to the specified node"""
852 """return the list of bookmarks pointing to the specified node"""
853 marks = []
853 marks = []
854 for bookmark, n in self._bookmarks.iteritems():
854 for bookmark, n in self._bookmarks.iteritems():
855 if n == node:
855 if n == node:
856 marks.append(bookmark)
856 marks.append(bookmark)
857 return sorted(marks)
857 return sorted(marks)
858
858
859 def branchmap(self):
859 def branchmap(self):
860 '''returns a dictionary {branch: [branchheads]} with branchheads
860 '''returns a dictionary {branch: [branchheads]} with branchheads
861 ordered by increasing revision number'''
861 ordered by increasing revision number'''
862 branchmap.updatecache(self)
862 branchmap.updatecache(self)
863 return self._branchcaches[self.filtername]
863 return self._branchcaches[self.filtername]
864
864
865 @unfilteredmethod
865 @unfilteredmethod
866 def revbranchcache(self):
866 def revbranchcache(self):
867 if not self._revbranchcache:
867 if not self._revbranchcache:
868 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
868 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
869 return self._revbranchcache
869 return self._revbranchcache
870
870
871 def branchtip(self, branch, ignoremissing=False):
871 def branchtip(self, branch, ignoremissing=False):
872 '''return the tip node for a given branch
872 '''return the tip node for a given branch
873
873
874 If ignoremissing is True, then this method will not raise an error.
874 If ignoremissing is True, then this method will not raise an error.
875 This is helpful for callers that only expect None for a missing branch
875 This is helpful for callers that only expect None for a missing branch
876 (e.g. namespace).
876 (e.g. namespace).
877
877
878 '''
878 '''
879 try:
879 try:
880 return self.branchmap().branchtip(branch)
880 return self.branchmap().branchtip(branch)
881 except KeyError:
881 except KeyError:
882 if not ignoremissing:
882 if not ignoremissing:
883 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
883 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
884 else:
884 else:
885 pass
885 pass
886
886
887 def lookup(self, key):
887 def lookup(self, key):
888 return self[key].node()
888 return self[key].node()
889
889
890 def lookupbranch(self, key, remote=None):
890 def lookupbranch(self, key, remote=None):
891 repo = remote or self
891 repo = remote or self
892 if key in repo.branchmap():
892 if key in repo.branchmap():
893 return key
893 return key
894
894
895 repo = (remote and remote.local()) and remote or self
895 repo = (remote and remote.local()) and remote or self
896 return repo[key].branch()
896 return repo[key].branch()
897
897
898 def known(self, nodes):
898 def known(self, nodes):
899 cl = self.changelog
899 cl = self.changelog
900 nm = cl.nodemap
900 nm = cl.nodemap
901 filtered = cl.filteredrevs
901 filtered = cl.filteredrevs
902 result = []
902 result = []
903 for n in nodes:
903 for n in nodes:
904 r = nm.get(n)
904 r = nm.get(n)
905 resp = not (r is None or r in filtered)
905 resp = not (r is None or r in filtered)
906 result.append(resp)
906 result.append(resp)
907 return result
907 return result
908
908
909 def local(self):
909 def local(self):
910 return self
910 return self
911
911
912 def publishing(self):
912 def publishing(self):
913 # it's safe (and desirable) to trust the publish flag unconditionally
913 # it's safe (and desirable) to trust the publish flag unconditionally
914 # so that we don't finalize changes shared between users via ssh or nfs
914 # so that we don't finalize changes shared between users via ssh or nfs
915 return self.ui.configbool('phases', 'publish', True, untrusted=True)
915 return self.ui.configbool('phases', 'publish', True, untrusted=True)
916
916
917 def cancopy(self):
917 def cancopy(self):
918 # so statichttprepo's override of local() works
918 # so statichttprepo's override of local() works
919 if not self.local():
919 if not self.local():
920 return False
920 return False
921 if not self.publishing():
921 if not self.publishing():
922 return True
922 return True
923 # if publishing we can't copy if there is filtered content
923 # if publishing we can't copy if there is filtered content
924 return not self.filtered('visible').changelog.filteredrevs
924 return not self.filtered('visible').changelog.filteredrevs
925
925
926 def shared(self):
926 def shared(self):
927 '''the type of shared repository (None if not shared)'''
927 '''the type of shared repository (None if not shared)'''
928 if self.sharedpath != self.path:
928 if self.sharedpath != self.path:
929 return 'store'
929 return 'store'
930 return None
930 return None
931
931
932 def join(self, f, *insidef):
932 def join(self, f, *insidef):
933 return self.vfs.join(os.path.join(f, *insidef))
933 return self.vfs.join(os.path.join(f, *insidef))
934
934
935 def wjoin(self, f, *insidef):
935 def wjoin(self, f, *insidef):
936 return self.vfs.reljoin(self.root, f, *insidef)
936 return self.vfs.reljoin(self.root, f, *insidef)
937
937
938 def file(self, f):
938 def file(self, f):
939 if f[0] == '/':
939 if f[0] == '/':
940 f = f[1:]
940 f = f[1:]
941 return filelog.filelog(self.svfs, f)
941 return filelog.filelog(self.svfs, f)
942
942
943 def changectx(self, changeid):
943 def changectx(self, changeid):
944 return self[changeid]
944 return self[changeid]
945
945
946 def setparents(self, p1, p2=nullid):
946 def setparents(self, p1, p2=nullid):
947 self.dirstate.beginparentchange()
947 self.dirstate.beginparentchange()
948 copies = self.dirstate.setparents(p1, p2)
948 copies = self.dirstate.setparents(p1, p2)
949 pctx = self[p1]
949 pctx = self[p1]
950 if copies:
950 if copies:
951 # Adjust copy records, the dirstate cannot do it, it
951 # Adjust copy records, the dirstate cannot do it, it
952 # requires access to parents manifests. Preserve them
952 # requires access to parents manifests. Preserve them
953 # only for entries added to first parent.
953 # only for entries added to first parent.
954 for f in copies:
954 for f in copies:
955 if f not in pctx and copies[f] in pctx:
955 if f not in pctx and copies[f] in pctx:
956 self.dirstate.copy(copies[f], f)
956 self.dirstate.copy(copies[f], f)
957 if p2 == nullid:
957 if p2 == nullid:
958 for f, s in sorted(self.dirstate.copies().items()):
958 for f, s in sorted(self.dirstate.copies().items()):
959 if f not in pctx and s not in pctx:
959 if f not in pctx and s not in pctx:
960 self.dirstate.copy(None, f)
960 self.dirstate.copy(None, f)
961 self.dirstate.endparentchange()
961 self.dirstate.endparentchange()
962
962
963 def filectx(self, path, changeid=None, fileid=None):
963 def filectx(self, path, changeid=None, fileid=None):
964 """changeid can be a changeset revision, node, or tag.
964 """changeid can be a changeset revision, node, or tag.
965 fileid can be a file revision or node."""
965 fileid can be a file revision or node."""
966 return context.filectx(self, path, changeid, fileid)
966 return context.filectx(self, path, changeid, fileid)
967
967
968 def getcwd(self):
968 def getcwd(self):
969 return self.dirstate.getcwd()
969 return self.dirstate.getcwd()
970
970
971 def pathto(self, f, cwd=None):
971 def pathto(self, f, cwd=None):
972 return self.dirstate.pathto(f, cwd)
972 return self.dirstate.pathto(f, cwd)
973
973
974 def wfile(self, f, mode='r'):
974 def wfile(self, f, mode='r'):
975 return self.wvfs(f, mode)
975 return self.wvfs(f, mode)
976
976
977 def _link(self, f):
977 def _link(self, f):
978 return self.wvfs.islink(f)
978 return self.wvfs.islink(f)
979
979
980 def _loadfilter(self, filter):
980 def _loadfilter(self, filter):
981 if filter not in self.filterpats:
981 if filter not in self.filterpats:
982 l = []
982 l = []
983 for pat, cmd in self.ui.configitems(filter):
983 for pat, cmd in self.ui.configitems(filter):
984 if cmd == '!':
984 if cmd == '!':
985 continue
985 continue
986 mf = matchmod.match(self.root, '', [pat])
986 mf = matchmod.match(self.root, '', [pat])
987 fn = None
987 fn = None
988 params = cmd
988 params = cmd
989 for name, filterfn in self._datafilters.iteritems():
989 for name, filterfn in self._datafilters.iteritems():
990 if cmd.startswith(name):
990 if cmd.startswith(name):
991 fn = filterfn
991 fn = filterfn
992 params = cmd[len(name):].lstrip()
992 params = cmd[len(name):].lstrip()
993 break
993 break
994 if not fn:
994 if not fn:
995 fn = lambda s, c, **kwargs: util.filter(s, c)
995 fn = lambda s, c, **kwargs: util.filter(s, c)
996 # Wrap old filters not supporting keyword arguments
996 # Wrap old filters not supporting keyword arguments
997 if not inspect.getargspec(fn)[2]:
997 if not inspect.getargspec(fn)[2]:
998 oldfn = fn
998 oldfn = fn
999 fn = lambda s, c, **kwargs: oldfn(s, c)
999 fn = lambda s, c, **kwargs: oldfn(s, c)
1000 l.append((mf, fn, params))
1000 l.append((mf, fn, params))
1001 self.filterpats[filter] = l
1001 self.filterpats[filter] = l
1002 return self.filterpats[filter]
1002 return self.filterpats[filter]
1003
1003
1004 def _filter(self, filterpats, filename, data):
1004 def _filter(self, filterpats, filename, data):
1005 for mf, fn, cmd in filterpats:
1005 for mf, fn, cmd in filterpats:
1006 if mf(filename):
1006 if mf(filename):
1007 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1007 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1008 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1008 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1009 break
1009 break
1010
1010
1011 return data
1011 return data
1012
1012
1013 @unfilteredpropertycache
1013 @unfilteredpropertycache
1014 def _encodefilterpats(self):
1014 def _encodefilterpats(self):
1015 return self._loadfilter('encode')
1015 return self._loadfilter('encode')
1016
1016
1017 @unfilteredpropertycache
1017 @unfilteredpropertycache
1018 def _decodefilterpats(self):
1018 def _decodefilterpats(self):
1019 return self._loadfilter('decode')
1019 return self._loadfilter('decode')
1020
1020
1021 def adddatafilter(self, name, filter):
1021 def adddatafilter(self, name, filter):
1022 self._datafilters[name] = filter
1022 self._datafilters[name] = filter
1023
1023
1024 def wread(self, filename):
1024 def wread(self, filename):
1025 if self._link(filename):
1025 if self._link(filename):
1026 data = self.wvfs.readlink(filename)
1026 data = self.wvfs.readlink(filename)
1027 else:
1027 else:
1028 data = self.wvfs.read(filename)
1028 data = self.wvfs.read(filename)
1029 return self._filter(self._encodefilterpats, filename, data)
1029 return self._filter(self._encodefilterpats, filename, data)
1030
1030
1031 def wwrite(self, filename, data, flags, backgroundclose=False):
1031 def wwrite(self, filename, data, flags, backgroundclose=False):
1032 """write ``data`` into ``filename`` in the working directory
1032 """write ``data`` into ``filename`` in the working directory
1033
1033
1034 This returns length of written (maybe decoded) data.
1034 This returns length of written (maybe decoded) data.
1035 """
1035 """
1036 data = self._filter(self._decodefilterpats, filename, data)
1036 data = self._filter(self._decodefilterpats, filename, data)
1037 if 'l' in flags:
1037 if 'l' in flags:
1038 self.wvfs.symlink(data, filename)
1038 self.wvfs.symlink(data, filename)
1039 else:
1039 else:
1040 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1040 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1041 if 'x' in flags:
1041 if 'x' in flags:
1042 self.wvfs.setflags(filename, False, True)
1042 self.wvfs.setflags(filename, False, True)
1043 return len(data)
1043 return len(data)
1044
1044
1045 def wwritedata(self, filename, data):
1045 def wwritedata(self, filename, data):
1046 return self._filter(self._decodefilterpats, filename, data)
1046 return self._filter(self._decodefilterpats, filename, data)
1047
1047
1048 def currenttransaction(self):
1048 def currenttransaction(self):
1049 """return the current transaction or None if non exists"""
1049 """return the current transaction or None if non exists"""
1050 if self._transref:
1050 if self._transref:
1051 tr = self._transref()
1051 tr = self._transref()
1052 else:
1052 else:
1053 tr = None
1053 tr = None
1054
1054
1055 if tr and tr.running():
1055 if tr and tr.running():
1056 return tr
1056 return tr
1057 return None
1057 return None
1058
1058
1059 def transaction(self, desc, report=None):
1059 def transaction(self, desc, report=None):
1060 if (self.ui.configbool('devel', 'all-warnings')
1060 if (self.ui.configbool('devel', 'all-warnings')
1061 or self.ui.configbool('devel', 'check-locks')):
1061 or self.ui.configbool('devel', 'check-locks')):
1062 if self._currentlock(self._lockref) is None:
1062 if self._currentlock(self._lockref) is None:
1063 raise error.ProgrammingError('transaction requires locking')
1063 raise error.ProgrammingError('transaction requires locking')
1064 tr = self.currenttransaction()
1064 tr = self.currenttransaction()
1065 if tr is not None:
1065 if tr is not None:
1066 return tr.nest()
1066 return tr.nest()
1067
1067
1068 # abort here if the journal already exists
1068 # abort here if the journal already exists
1069 if self.svfs.exists("journal"):
1069 if self.svfs.exists("journal"):
1070 raise error.RepoError(
1070 raise error.RepoError(
1071 _("abandoned transaction found"),
1071 _("abandoned transaction found"),
1072 hint=_("run 'hg recover' to clean up transaction"))
1072 hint=_("run 'hg recover' to clean up transaction"))
1073
1073
1074 idbase = "%.40f#%f" % (random.random(), time.time())
1074 idbase = "%.40f#%f" % (random.random(), time.time())
1075 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1075 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1076 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1076 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1077
1077
1078 self._writejournal(desc)
1078 self._writejournal(desc)
1079 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1079 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1080 if report:
1080 if report:
1081 rp = report
1081 rp = report
1082 else:
1082 else:
1083 rp = self.ui.warn
1083 rp = self.ui.warn
1084 vfsmap = {'plain': self.vfs} # root of .hg/
1084 vfsmap = {'plain': self.vfs} # root of .hg/
1085 # we must avoid cyclic reference between repo and transaction.
1085 # we must avoid cyclic reference between repo and transaction.
1086 reporef = weakref.ref(self)
1086 reporef = weakref.ref(self)
1087 def validate(tr):
1087 def validate(tr):
1088 """will run pre-closing hooks"""
1088 """will run pre-closing hooks"""
1089 reporef().hook('pretxnclose', throw=True,
1089 reporef().hook('pretxnclose', throw=True,
1090 txnname=desc, **tr.hookargs)
1090 txnname=desc, **tr.hookargs)
1091 def releasefn(tr, success):
1091 def releasefn(tr, success):
1092 repo = reporef()
1092 repo = reporef()
1093 if success:
1093 if success:
1094 # this should be explicitly invoked here, because
1094 # this should be explicitly invoked here, because
1095 # in-memory changes aren't written out at closing
1095 # in-memory changes aren't written out at closing
1096 # transaction, if tr.addfilegenerator (via
1096 # transaction, if tr.addfilegenerator (via
1097 # dirstate.write or so) isn't invoked while
1097 # dirstate.write or so) isn't invoked while
1098 # transaction running
1098 # transaction running
1099 repo.dirstate.write(None)
1099 repo.dirstate.write(None)
1100 else:
1100 else:
1101 # discard all changes (including ones already written
1101 # discard all changes (including ones already written
1102 # out) in this transaction
1102 # out) in this transaction
1103 repo.dirstate.restorebackup(None, prefix='journal.')
1103 repo.dirstate.restorebackup(None, prefix='journal.')
1104
1104
1105 repo.invalidate(clearfilecache=True)
1105 repo.invalidate(clearfilecache=True)
1106
1106
1107 tr = transaction.transaction(rp, self.svfs, vfsmap,
1107 tr = transaction.transaction(rp, self.svfs, vfsmap,
1108 "journal",
1108 "journal",
1109 "undo",
1109 "undo",
1110 aftertrans(renames),
1110 aftertrans(renames),
1111 self.store.createmode,
1111 self.store.createmode,
1112 validator=validate,
1112 validator=validate,
1113 releasefn=releasefn)
1113 releasefn=releasefn)
1114
1114
1115 tr.hookargs['txnid'] = txnid
1115 tr.hookargs['txnid'] = txnid
1116 # note: writing the fncache only during finalize mean that the file is
1116 # note: writing the fncache only during finalize mean that the file is
1117 # outdated when running hooks. As fncache is used for streaming clone,
1117 # outdated when running hooks. As fncache is used for streaming clone,
1118 # this is not expected to break anything that happen during the hooks.
1118 # this is not expected to break anything that happen during the hooks.
1119 tr.addfinalize('flush-fncache', self.store.write)
1119 tr.addfinalize('flush-fncache', self.store.write)
1120 def txnclosehook(tr2):
1120 def txnclosehook(tr2):
1121 """To be run if transaction is successful, will schedule a hook run
1121 """To be run if transaction is successful, will schedule a hook run
1122 """
1122 """
1123 # Don't reference tr2 in hook() so we don't hold a reference.
1123 # Don't reference tr2 in hook() so we don't hold a reference.
1124 # This reduces memory consumption when there are multiple
1124 # This reduces memory consumption when there are multiple
1125 # transactions per lock. This can likely go away if issue5045
1125 # transactions per lock. This can likely go away if issue5045
1126 # fixes the function accumulation.
1126 # fixes the function accumulation.
1127 hookargs = tr2.hookargs
1127 hookargs = tr2.hookargs
1128
1128
1129 def hook():
1129 def hook():
1130 reporef().hook('txnclose', throw=False, txnname=desc,
1130 reporef().hook('txnclose', throw=False, txnname=desc,
1131 **hookargs)
1131 **hookargs)
1132 reporef()._afterlock(hook)
1132 reporef()._afterlock(hook)
1133 tr.addfinalize('txnclose-hook', txnclosehook)
1133 tr.addfinalize('txnclose-hook', txnclosehook)
1134 def txnaborthook(tr2):
1134 def txnaborthook(tr2):
1135 """To be run if transaction is aborted
1135 """To be run if transaction is aborted
1136 """
1136 """
1137 reporef().hook('txnabort', throw=False, txnname=desc,
1137 reporef().hook('txnabort', throw=False, txnname=desc,
1138 **tr2.hookargs)
1138 **tr2.hookargs)
1139 tr.addabort('txnabort-hook', txnaborthook)
1139 tr.addabort('txnabort-hook', txnaborthook)
1140 # avoid eager cache invalidation. in-memory data should be identical
1140 # avoid eager cache invalidation. in-memory data should be identical
1141 # to stored data if transaction has no error.
1141 # to stored data if transaction has no error.
1142 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1142 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1143 self._transref = weakref.ref(tr)
1143 self._transref = weakref.ref(tr)
1144 return tr
1144 return tr
1145
1145
1146 def _journalfiles(self):
1146 def _journalfiles(self):
1147 return ((self.svfs, 'journal'),
1147 return ((self.svfs, 'journal'),
1148 (self.vfs, 'journal.dirstate'),
1148 (self.vfs, 'journal.dirstate'),
1149 (self.vfs, 'journal.branch'),
1149 (self.vfs, 'journal.branch'),
1150 (self.vfs, 'journal.desc'),
1150 (self.vfs, 'journal.desc'),
1151 (self.vfs, 'journal.bookmarks'),
1151 (self.vfs, 'journal.bookmarks'),
1152 (self.svfs, 'journal.phaseroots'))
1152 (self.svfs, 'journal.phaseroots'))
1153
1153
1154 def undofiles(self):
1154 def undofiles(self):
1155 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1155 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1156
1156
1157 def _writejournal(self, desc):
1157 def _writejournal(self, desc):
1158 self.dirstate.savebackup(None, prefix='journal.')
1158 self.dirstate.savebackup(None, prefix='journal.')
1159 self.vfs.write("journal.branch",
1159 self.vfs.write("journal.branch",
1160 encoding.fromlocal(self.dirstate.branch()))
1160 encoding.fromlocal(self.dirstate.branch()))
1161 self.vfs.write("journal.desc",
1161 self.vfs.write("journal.desc",
1162 "%d\n%s\n" % (len(self), desc))
1162 "%d\n%s\n" % (len(self), desc))
1163 self.vfs.write("journal.bookmarks",
1163 self.vfs.write("journal.bookmarks",
1164 self.vfs.tryread("bookmarks"))
1164 self.vfs.tryread("bookmarks"))
1165 self.svfs.write("journal.phaseroots",
1165 self.svfs.write("journal.phaseroots",
1166 self.svfs.tryread("phaseroots"))
1166 self.svfs.tryread("phaseroots"))
1167
1167
1168 def recover(self):
1168 def recover(self):
1169 with self.lock():
1169 with self.lock():
1170 if self.svfs.exists("journal"):
1170 if self.svfs.exists("journal"):
1171 self.ui.status(_("rolling back interrupted transaction\n"))
1171 self.ui.status(_("rolling back interrupted transaction\n"))
1172 vfsmap = {'': self.svfs,
1172 vfsmap = {'': self.svfs,
1173 'plain': self.vfs,}
1173 'plain': self.vfs,}
1174 transaction.rollback(self.svfs, vfsmap, "journal",
1174 transaction.rollback(self.svfs, vfsmap, "journal",
1175 self.ui.warn)
1175 self.ui.warn)
1176 self.invalidate()
1176 self.invalidate()
1177 return True
1177 return True
1178 else:
1178 else:
1179 self.ui.warn(_("no interrupted transaction available\n"))
1179 self.ui.warn(_("no interrupted transaction available\n"))
1180 return False
1180 return False
1181
1181
1182 def rollback(self, dryrun=False, force=False):
1182 def rollback(self, dryrun=False, force=False):
1183 wlock = lock = dsguard = None
1183 wlock = lock = dsguard = None
1184 try:
1184 try:
1185 wlock = self.wlock()
1185 wlock = self.wlock()
1186 lock = self.lock()
1186 lock = self.lock()
1187 if self.svfs.exists("undo"):
1187 if self.svfs.exists("undo"):
1188 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1188 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1189
1189
1190 return self._rollback(dryrun, force, dsguard)
1190 return self._rollback(dryrun, force, dsguard)
1191 else:
1191 else:
1192 self.ui.warn(_("no rollback information available\n"))
1192 self.ui.warn(_("no rollback information available\n"))
1193 return 1
1193 return 1
1194 finally:
1194 finally:
1195 release(dsguard, lock, wlock)
1195 release(dsguard, lock, wlock)
1196
1196
1197 @unfilteredmethod # Until we get smarter cache management
1197 @unfilteredmethod # Until we get smarter cache management
1198 def _rollback(self, dryrun, force, dsguard):
1198 def _rollback(self, dryrun, force, dsguard):
1199 ui = self.ui
1199 ui = self.ui
1200 try:
1200 try:
1201 args = self.vfs.read('undo.desc').splitlines()
1201 args = self.vfs.read('undo.desc').splitlines()
1202 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1202 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1203 if len(args) >= 3:
1203 if len(args) >= 3:
1204 detail = args[2]
1204 detail = args[2]
1205 oldtip = oldlen - 1
1205 oldtip = oldlen - 1
1206
1206
1207 if detail and ui.verbose:
1207 if detail and ui.verbose:
1208 msg = (_('repository tip rolled back to revision %s'
1208 msg = (_('repository tip rolled back to revision %s'
1209 ' (undo %s: %s)\n')
1209 ' (undo %s: %s)\n')
1210 % (oldtip, desc, detail))
1210 % (oldtip, desc, detail))
1211 else:
1211 else:
1212 msg = (_('repository tip rolled back to revision %s'
1212 msg = (_('repository tip rolled back to revision %s'
1213 ' (undo %s)\n')
1213 ' (undo %s)\n')
1214 % (oldtip, desc))
1214 % (oldtip, desc))
1215 except IOError:
1215 except IOError:
1216 msg = _('rolling back unknown transaction\n')
1216 msg = _('rolling back unknown transaction\n')
1217 desc = None
1217 desc = None
1218
1218
1219 if not force and self['.'] != self['tip'] and desc == 'commit':
1219 if not force and self['.'] != self['tip'] and desc == 'commit':
1220 raise error.Abort(
1220 raise error.Abort(
1221 _('rollback of last commit while not checked out '
1221 _('rollback of last commit while not checked out '
1222 'may lose data'), hint=_('use -f to force'))
1222 'may lose data'), hint=_('use -f to force'))
1223
1223
1224 ui.status(msg)
1224 ui.status(msg)
1225 if dryrun:
1225 if dryrun:
1226 return 0
1226 return 0
1227
1227
1228 parents = self.dirstate.parents()
1228 parents = self.dirstate.parents()
1229 self.destroying()
1229 self.destroying()
1230 vfsmap = {'plain': self.vfs, '': self.svfs}
1230 vfsmap = {'plain': self.vfs, '': self.svfs}
1231 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1231 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1232 if self.vfs.exists('undo.bookmarks'):
1232 if self.vfs.exists('undo.bookmarks'):
1233 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1233 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1234 if self.svfs.exists('undo.phaseroots'):
1234 if self.svfs.exists('undo.phaseroots'):
1235 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1235 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1236 self.invalidate()
1236 self.invalidate()
1237
1237
1238 parentgone = (parents[0] not in self.changelog.nodemap or
1238 parentgone = (parents[0] not in self.changelog.nodemap or
1239 parents[1] not in self.changelog.nodemap)
1239 parents[1] not in self.changelog.nodemap)
1240 if parentgone:
1240 if parentgone:
1241 # prevent dirstateguard from overwriting already restored one
1241 # prevent dirstateguard from overwriting already restored one
1242 dsguard.close()
1242 dsguard.close()
1243
1243
1244 self.dirstate.restorebackup(None, prefix='undo.')
1244 self.dirstate.restorebackup(None, prefix='undo.')
1245 try:
1245 try:
1246 branch = self.vfs.read('undo.branch')
1246 branch = self.vfs.read('undo.branch')
1247 self.dirstate.setbranch(encoding.tolocal(branch))
1247 self.dirstate.setbranch(encoding.tolocal(branch))
1248 except IOError:
1248 except IOError:
1249 ui.warn(_('named branch could not be reset: '
1249 ui.warn(_('named branch could not be reset: '
1250 'current branch is still \'%s\'\n')
1250 'current branch is still \'%s\'\n')
1251 % self.dirstate.branch())
1251 % self.dirstate.branch())
1252
1252
1253 parents = tuple([p.rev() for p in self[None].parents()])
1253 parents = tuple([p.rev() for p in self[None].parents()])
1254 if len(parents) > 1:
1254 if len(parents) > 1:
1255 ui.status(_('working directory now based on '
1255 ui.status(_('working directory now based on '
1256 'revisions %d and %d\n') % parents)
1256 'revisions %d and %d\n') % parents)
1257 else:
1257 else:
1258 ui.status(_('working directory now based on '
1258 ui.status(_('working directory now based on '
1259 'revision %d\n') % parents)
1259 'revision %d\n') % parents)
1260 mergemod.mergestate.clean(self, self['.'].node())
1260 mergemod.mergestate.clean(self, self['.'].node())
1261
1261
1262 # TODO: if we know which new heads may result from this rollback, pass
1262 # TODO: if we know which new heads may result from this rollback, pass
1263 # them to destroy(), which will prevent the branchhead cache from being
1263 # them to destroy(), which will prevent the branchhead cache from being
1264 # invalidated.
1264 # invalidated.
1265 self.destroyed()
1265 self.destroyed()
1266 return 0
1266 return 0
1267
1267
1268 def invalidatecaches(self):
1268 def invalidatecaches(self):
1269
1269
1270 if '_tagscache' in vars(self):
1270 if '_tagscache' in vars(self):
1271 # can't use delattr on proxy
1271 # can't use delattr on proxy
1272 del self.__dict__['_tagscache']
1272 del self.__dict__['_tagscache']
1273
1273
1274 self.unfiltered()._branchcaches.clear()
1274 self.unfiltered()._branchcaches.clear()
1275 self.invalidatevolatilesets()
1275 self.invalidatevolatilesets()
1276
1276
1277 def invalidatevolatilesets(self):
1277 def invalidatevolatilesets(self):
1278 self.filteredrevcache.clear()
1278 self.filteredrevcache.clear()
1279 obsolete.clearobscaches(self)
1279 obsolete.clearobscaches(self)
1280
1280
1281 def invalidatedirstate(self):
1281 def invalidatedirstate(self):
1282 '''Invalidates the dirstate, causing the next call to dirstate
1282 '''Invalidates the dirstate, causing the next call to dirstate
1283 to check if it was modified since the last time it was read,
1283 to check if it was modified since the last time it was read,
1284 rereading it if it has.
1284 rereading it if it has.
1285
1285
1286 This is different to dirstate.invalidate() that it doesn't always
1286 This is different to dirstate.invalidate() that it doesn't always
1287 rereads the dirstate. Use dirstate.invalidate() if you want to
1287 rereads the dirstate. Use dirstate.invalidate() if you want to
1288 explicitly read the dirstate again (i.e. restoring it to a previous
1288 explicitly read the dirstate again (i.e. restoring it to a previous
1289 known good state).'''
1289 known good state).'''
1290 if hasunfilteredcache(self, 'dirstate'):
1290 if hasunfilteredcache(self, 'dirstate'):
1291 for k in self.dirstate._filecache:
1291 for k in self.dirstate._filecache:
1292 try:
1292 try:
1293 delattr(self.dirstate, k)
1293 delattr(self.dirstate, k)
1294 except AttributeError:
1294 except AttributeError:
1295 pass
1295 pass
1296 delattr(self.unfiltered(), 'dirstate')
1296 delattr(self.unfiltered(), 'dirstate')
1297
1297
1298 def invalidate(self, clearfilecache=False):
1298 def invalidate(self, clearfilecache=False):
1299 '''Invalidates both store and non-store parts other than dirstate
1299 '''Invalidates both store and non-store parts other than dirstate
1300
1300
1301 If a transaction is running, invalidation of store is omitted,
1301 If a transaction is running, invalidation of store is omitted,
1302 because discarding in-memory changes might cause inconsistency
1302 because discarding in-memory changes might cause inconsistency
1303 (e.g. incomplete fncache causes unintentional failure, but
1303 (e.g. incomplete fncache causes unintentional failure, but
1304 redundant one doesn't).
1304 redundant one doesn't).
1305 '''
1305 '''
1306 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1306 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1307 for k in self._filecache.keys():
1307 for k in self._filecache.keys():
1308 # dirstate is invalidated separately in invalidatedirstate()
1308 # dirstate is invalidated separately in invalidatedirstate()
1309 if k == 'dirstate':
1309 if k == 'dirstate':
1310 continue
1310 continue
1311
1311
1312 if clearfilecache:
1312 if clearfilecache:
1313 del self._filecache[k]
1313 del self._filecache[k]
1314 try:
1314 try:
1315 delattr(unfiltered, k)
1315 delattr(unfiltered, k)
1316 except AttributeError:
1316 except AttributeError:
1317 pass
1317 pass
1318 self.invalidatecaches()
1318 self.invalidatecaches()
1319 if not self.currenttransaction():
1319 if not self.currenttransaction():
1320 # TODO: Changing contents of store outside transaction
1320 # TODO: Changing contents of store outside transaction
1321 # causes inconsistency. We should make in-memory store
1321 # causes inconsistency. We should make in-memory store
1322 # changes detectable, and abort if changed.
1322 # changes detectable, and abort if changed.
1323 self.store.invalidatecaches()
1323 self.store.invalidatecaches()
1324
1324
1325 def invalidateall(self):
1325 def invalidateall(self):
1326 '''Fully invalidates both store and non-store parts, causing the
1326 '''Fully invalidates both store and non-store parts, causing the
1327 subsequent operation to reread any outside changes.'''
1327 subsequent operation to reread any outside changes.'''
1328 # extension should hook this to invalidate its caches
1328 # extension should hook this to invalidate its caches
1329 self.invalidate()
1329 self.invalidate()
1330 self.invalidatedirstate()
1330 self.invalidatedirstate()
1331
1331
1332 @unfilteredmethod
1332 @unfilteredmethod
1333 def _refreshfilecachestats(self, tr):
1333 def _refreshfilecachestats(self, tr):
1334 """Reload stats of cached files so that they are flagged as valid"""
1334 """Reload stats of cached files so that they are flagged as valid"""
1335 for k, ce in self._filecache.items():
1335 for k, ce in self._filecache.items():
1336 if k == 'dirstate' or k not in self.__dict__:
1336 if k == 'dirstate' or k not in self.__dict__:
1337 continue
1337 continue
1338 ce.refresh()
1338 ce.refresh()
1339
1339
1340 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1340 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1341 inheritchecker=None, parentenvvar=None):
1341 inheritchecker=None, parentenvvar=None):
1342 parentlock = None
1342 parentlock = None
1343 # the contents of parentenvvar are used by the underlying lock to
1343 # the contents of parentenvvar are used by the underlying lock to
1344 # determine whether it can be inherited
1344 # determine whether it can be inherited
1345 if parentenvvar is not None:
1345 if parentenvvar is not None:
1346 parentlock = encoding.environ.get(parentenvvar)
1346 parentlock = encoding.environ.get(parentenvvar)
1347 try:
1347 try:
1348 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1348 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1349 acquirefn=acquirefn, desc=desc,
1349 acquirefn=acquirefn, desc=desc,
1350 inheritchecker=inheritchecker,
1350 inheritchecker=inheritchecker,
1351 parentlock=parentlock)
1351 parentlock=parentlock)
1352 except error.LockHeld as inst:
1352 except error.LockHeld as inst:
1353 if not wait:
1353 if not wait:
1354 raise
1354 raise
1355 # show more details for new-style locks
1355 # show more details for new-style locks
1356 if ':' in inst.locker:
1356 if ':' in inst.locker:
1357 host, pid = inst.locker.split(":", 1)
1357 host, pid = inst.locker.split(":", 1)
1358 self.ui.warn(
1358 self.ui.warn(
1359 _("waiting for lock on %s held by process %r "
1359 _("waiting for lock on %s held by process %r "
1360 "on host %r\n") % (desc, pid, host))
1360 "on host %r\n") % (desc, pid, host))
1361 else:
1361 else:
1362 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1362 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1363 (desc, inst.locker))
1363 (desc, inst.locker))
1364 # default to 600 seconds timeout
1364 # default to 600 seconds timeout
1365 l = lockmod.lock(vfs, lockname,
1365 l = lockmod.lock(vfs, lockname,
1366 int(self.ui.config("ui", "timeout", "600")),
1366 int(self.ui.config("ui", "timeout", "600")),
1367 releasefn=releasefn, acquirefn=acquirefn,
1367 releasefn=releasefn, acquirefn=acquirefn,
1368 desc=desc)
1368 desc=desc)
1369 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1369 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1370 return l
1370 return l
1371
1371
1372 def _afterlock(self, callback):
1372 def _afterlock(self, callback):
1373 """add a callback to be run when the repository is fully unlocked
1373 """add a callback to be run when the repository is fully unlocked
1374
1374
1375 The callback will be executed when the outermost lock is released
1375 The callback will be executed when the outermost lock is released
1376 (with wlock being higher level than 'lock')."""
1376 (with wlock being higher level than 'lock')."""
1377 for ref in (self._wlockref, self._lockref):
1377 for ref in (self._wlockref, self._lockref):
1378 l = ref and ref()
1378 l = ref and ref()
1379 if l and l.held:
1379 if l and l.held:
1380 l.postrelease.append(callback)
1380 l.postrelease.append(callback)
1381 break
1381 break
1382 else: # no lock have been found.
1382 else: # no lock have been found.
1383 callback()
1383 callback()
1384
1384
1385 def lock(self, wait=True):
1385 def lock(self, wait=True):
1386 '''Lock the repository store (.hg/store) and return a weak reference
1386 '''Lock the repository store (.hg/store) and return a weak reference
1387 to the lock. Use this before modifying the store (e.g. committing or
1387 to the lock. Use this before modifying the store (e.g. committing or
1388 stripping). If you are opening a transaction, get a lock as well.)
1388 stripping). If you are opening a transaction, get a lock as well.)
1389
1389
1390 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1390 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1391 'wlock' first to avoid a dead-lock hazard.'''
1391 'wlock' first to avoid a dead-lock hazard.'''
1392 l = self._currentlock(self._lockref)
1392 l = self._currentlock(self._lockref)
1393 if l is not None:
1393 if l is not None:
1394 l.lock()
1394 l.lock()
1395 return l
1395 return l
1396
1396
1397 l = self._lock(self.svfs, "lock", wait, None,
1397 l = self._lock(self.svfs, "lock", wait, None,
1398 self.invalidate, _('repository %s') % self.origroot)
1398 self.invalidate, _('repository %s') % self.origroot)
1399 self._lockref = weakref.ref(l)
1399 self._lockref = weakref.ref(l)
1400 return l
1400 return l
1401
1401
1402 def _wlockchecktransaction(self):
1402 def _wlockchecktransaction(self):
1403 if self.currenttransaction() is not None:
1403 if self.currenttransaction() is not None:
1404 raise error.LockInheritanceContractViolation(
1404 raise error.LockInheritanceContractViolation(
1405 'wlock cannot be inherited in the middle of a transaction')
1405 'wlock cannot be inherited in the middle of a transaction')
1406
1406
1407 def wlock(self, wait=True):
1407 def wlock(self, wait=True):
1408 '''Lock the non-store parts of the repository (everything under
1408 '''Lock the non-store parts of the repository (everything under
1409 .hg except .hg/store) and return a weak reference to the lock.
1409 .hg except .hg/store) and return a weak reference to the lock.
1410
1410
1411 Use this before modifying files in .hg.
1411 Use this before modifying files in .hg.
1412
1412
1413 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1413 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1414 'wlock' first to avoid a dead-lock hazard.'''
1414 'wlock' first to avoid a dead-lock hazard.'''
1415 l = self._wlockref and self._wlockref()
1415 l = self._wlockref and self._wlockref()
1416 if l is not None and l.held:
1416 if l is not None and l.held:
1417 l.lock()
1417 l.lock()
1418 return l
1418 return l
1419
1419
1420 # We do not need to check for non-waiting lock acquisition. Such
1420 # We do not need to check for non-waiting lock acquisition. Such
1421 # acquisition would not cause dead-lock as they would just fail.
1421 # acquisition would not cause dead-lock as they would just fail.
1422 if wait and (self.ui.configbool('devel', 'all-warnings')
1422 if wait and (self.ui.configbool('devel', 'all-warnings')
1423 or self.ui.configbool('devel', 'check-locks')):
1423 or self.ui.configbool('devel', 'check-locks')):
1424 if self._currentlock(self._lockref) is not None:
1424 if self._currentlock(self._lockref) is not None:
1425 self.ui.develwarn('"wlock" acquired after "lock"')
1425 self.ui.develwarn('"wlock" acquired after "lock"')
1426
1426
1427 def unlock():
1427 def unlock():
1428 if self.dirstate.pendingparentchange():
1428 if self.dirstate.pendingparentchange():
1429 self.dirstate.invalidate()
1429 self.dirstate.invalidate()
1430 else:
1430 else:
1431 self.dirstate.write(None)
1431 self.dirstate.write(None)
1432
1432
1433 self._filecache['dirstate'].refresh()
1433 self._filecache['dirstate'].refresh()
1434
1434
1435 l = self._lock(self.vfs, "wlock", wait, unlock,
1435 l = self._lock(self.vfs, "wlock", wait, unlock,
1436 self.invalidatedirstate, _('working directory of %s') %
1436 self.invalidatedirstate, _('working directory of %s') %
1437 self.origroot,
1437 self.origroot,
1438 inheritchecker=self._wlockchecktransaction,
1438 inheritchecker=self._wlockchecktransaction,
1439 parentenvvar='HG_WLOCK_LOCKER')
1439 parentenvvar='HG_WLOCK_LOCKER')
1440 self._wlockref = weakref.ref(l)
1440 self._wlockref = weakref.ref(l)
1441 return l
1441 return l
1442
1442
1443 def _currentlock(self, lockref):
1443 def _currentlock(self, lockref):
1444 """Returns the lock if it's held, or None if it's not."""
1444 """Returns the lock if it's held, or None if it's not."""
1445 if lockref is None:
1445 if lockref is None:
1446 return None
1446 return None
1447 l = lockref()
1447 l = lockref()
1448 if l is None or not l.held:
1448 if l is None or not l.held:
1449 return None
1449 return None
1450 return l
1450 return l
1451
1451
1452 def currentwlock(self):
1452 def currentwlock(self):
1453 """Returns the wlock if it's held, or None if it's not."""
1453 """Returns the wlock if it's held, or None if it's not."""
1454 return self._currentlock(self._wlockref)
1454 return self._currentlock(self._wlockref)
1455
1455
1456 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1456 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1457 """
1457 """
1458 commit an individual file as part of a larger transaction
1458 commit an individual file as part of a larger transaction
1459 """
1459 """
1460
1460
1461 fname = fctx.path()
1461 fname = fctx.path()
1462 fparent1 = manifest1.get(fname, nullid)
1462 fparent1 = manifest1.get(fname, nullid)
1463 fparent2 = manifest2.get(fname, nullid)
1463 fparent2 = manifest2.get(fname, nullid)
1464 if isinstance(fctx, context.filectx):
1464 if isinstance(fctx, context.filectx):
1465 node = fctx.filenode()
1465 node = fctx.filenode()
1466 if node in [fparent1, fparent2]:
1466 if node in [fparent1, fparent2]:
1467 self.ui.debug('reusing %s filelog entry\n' % fname)
1467 self.ui.debug('reusing %s filelog entry\n' % fname)
1468 if manifest1.flags(fname) != fctx.flags():
1468 if manifest1.flags(fname) != fctx.flags():
1469 changelist.append(fname)
1469 changelist.append(fname)
1470 return node
1470 return node
1471
1471
1472 flog = self.file(fname)
1472 flog = self.file(fname)
1473 meta = {}
1473 meta = {}
1474 copy = fctx.renamed()
1474 copy = fctx.renamed()
1475 if copy and copy[0] != fname:
1475 if copy and copy[0] != fname:
1476 # Mark the new revision of this file as a copy of another
1476 # Mark the new revision of this file as a copy of another
1477 # file. This copy data will effectively act as a parent
1477 # file. This copy data will effectively act as a parent
1478 # of this new revision. If this is a merge, the first
1478 # of this new revision. If this is a merge, the first
1479 # parent will be the nullid (meaning "look up the copy data")
1479 # parent will be the nullid (meaning "look up the copy data")
1480 # and the second one will be the other parent. For example:
1480 # and the second one will be the other parent. For example:
1481 #
1481 #
1482 # 0 --- 1 --- 3 rev1 changes file foo
1482 # 0 --- 1 --- 3 rev1 changes file foo
1483 # \ / rev2 renames foo to bar and changes it
1483 # \ / rev2 renames foo to bar and changes it
1484 # \- 2 -/ rev3 should have bar with all changes and
1484 # \- 2 -/ rev3 should have bar with all changes and
1485 # should record that bar descends from
1485 # should record that bar descends from
1486 # bar in rev2 and foo in rev1
1486 # bar in rev2 and foo in rev1
1487 #
1487 #
1488 # this allows this merge to succeed:
1488 # this allows this merge to succeed:
1489 #
1489 #
1490 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1490 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1491 # \ / merging rev3 and rev4 should use bar@rev2
1491 # \ / merging rev3 and rev4 should use bar@rev2
1492 # \- 2 --- 4 as the merge base
1492 # \- 2 --- 4 as the merge base
1493 #
1493 #
1494
1494
1495 cfname = copy[0]
1495 cfname = copy[0]
1496 crev = manifest1.get(cfname)
1496 crev = manifest1.get(cfname)
1497 newfparent = fparent2
1497 newfparent = fparent2
1498
1498
1499 if manifest2: # branch merge
1499 if manifest2: # branch merge
1500 if fparent2 == nullid or crev is None: # copied on remote side
1500 if fparent2 == nullid or crev is None: # copied on remote side
1501 if cfname in manifest2:
1501 if cfname in manifest2:
1502 crev = manifest2[cfname]
1502 crev = manifest2[cfname]
1503 newfparent = fparent1
1503 newfparent = fparent1
1504
1504
1505 # Here, we used to search backwards through history to try to find
1505 # Here, we used to search backwards through history to try to find
1506 # where the file copy came from if the source of a copy was not in
1506 # where the file copy came from if the source of a copy was not in
1507 # the parent directory. However, this doesn't actually make sense to
1507 # the parent directory. However, this doesn't actually make sense to
1508 # do (what does a copy from something not in your working copy even
1508 # do (what does a copy from something not in your working copy even
1509 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1509 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1510 # the user that copy information was dropped, so if they didn't
1510 # the user that copy information was dropped, so if they didn't
1511 # expect this outcome it can be fixed, but this is the correct
1511 # expect this outcome it can be fixed, but this is the correct
1512 # behavior in this circumstance.
1512 # behavior in this circumstance.
1513
1513
1514 if crev:
1514 if crev:
1515 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1515 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1516 meta["copy"] = cfname
1516 meta["copy"] = cfname
1517 meta["copyrev"] = hex(crev)
1517 meta["copyrev"] = hex(crev)
1518 fparent1, fparent2 = nullid, newfparent
1518 fparent1, fparent2 = nullid, newfparent
1519 else:
1519 else:
1520 self.ui.warn(_("warning: can't find ancestor for '%s' "
1520 self.ui.warn(_("warning: can't find ancestor for '%s' "
1521 "copied from '%s'!\n") % (fname, cfname))
1521 "copied from '%s'!\n") % (fname, cfname))
1522
1522
1523 elif fparent1 == nullid:
1523 elif fparent1 == nullid:
1524 fparent1, fparent2 = fparent2, nullid
1524 fparent1, fparent2 = fparent2, nullid
1525 elif fparent2 != nullid:
1525 elif fparent2 != nullid:
1526 # is one parent an ancestor of the other?
1526 # is one parent an ancestor of the other?
1527 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1527 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1528 if fparent1 in fparentancestors:
1528 if fparent1 in fparentancestors:
1529 fparent1, fparent2 = fparent2, nullid
1529 fparent1, fparent2 = fparent2, nullid
1530 elif fparent2 in fparentancestors:
1530 elif fparent2 in fparentancestors:
1531 fparent2 = nullid
1531 fparent2 = nullid
1532
1532
1533 # is the file changed?
1533 # is the file changed?
1534 text = fctx.data()
1534 text = fctx.data()
1535 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1535 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1536 changelist.append(fname)
1536 changelist.append(fname)
1537 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1537 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1538 # are just the flags changed during merge?
1538 # are just the flags changed during merge?
1539 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1539 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1540 changelist.append(fname)
1540 changelist.append(fname)
1541
1541
1542 return fparent1
1542 return fparent1
1543
1543
1544 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1544 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1545 """check for commit arguments that aren't committable"""
1545 """check for commit arguments that aren't committable"""
1546 if match.isexact() or match.prefix():
1546 if match.isexact() or match.prefix():
1547 matched = set(status.modified + status.added + status.removed)
1547 matched = set(status.modified + status.added + status.removed)
1548
1548
1549 for f in match.files():
1549 for f in match.files():
1550 f = self.dirstate.normalize(f)
1550 f = self.dirstate.normalize(f)
1551 if f == '.' or f in matched or f in wctx.substate:
1551 if f == '.' or f in matched or f in wctx.substate:
1552 continue
1552 continue
1553 if f in status.deleted:
1553 if f in status.deleted:
1554 fail(f, _('file not found!'))
1554 fail(f, _('file not found!'))
1555 if f in vdirs: # visited directory
1555 if f in vdirs: # visited directory
1556 d = f + '/'
1556 d = f + '/'
1557 for mf in matched:
1557 for mf in matched:
1558 if mf.startswith(d):
1558 if mf.startswith(d):
1559 break
1559 break
1560 else:
1560 else:
1561 fail(f, _("no match under directory!"))
1561 fail(f, _("no match under directory!"))
1562 elif f not in self.dirstate:
1562 elif f not in self.dirstate:
1563 fail(f, _("file not tracked!"))
1563 fail(f, _("file not tracked!"))
1564
1564
1565 @unfilteredmethod
1565 @unfilteredmethod
1566 def commit(self, text="", user=None, date=None, match=None, force=False,
1566 def commit(self, text="", user=None, date=None, match=None, force=False,
1567 editor=False, extra=None):
1567 editor=False, extra=None):
1568 """Add a new revision to current repository.
1568 """Add a new revision to current repository.
1569
1569
1570 Revision information is gathered from the working directory,
1570 Revision information is gathered from the working directory,
1571 match can be used to filter the committed files. If editor is
1571 match can be used to filter the committed files. If editor is
1572 supplied, it is called to get a commit message.
1572 supplied, it is called to get a commit message.
1573 """
1573 """
1574 if extra is None:
1574 if extra is None:
1575 extra = {}
1575 extra = {}
1576
1576
1577 def fail(f, msg):
1577 def fail(f, msg):
1578 raise error.Abort('%s: %s' % (f, msg))
1578 raise error.Abort('%s: %s' % (f, msg))
1579
1579
1580 if not match:
1580 if not match:
1581 match = matchmod.always(self.root, '')
1581 match = matchmod.always(self.root, '')
1582
1582
1583 if not force:
1583 if not force:
1584 vdirs = []
1584 vdirs = []
1585 match.explicitdir = vdirs.append
1585 match.explicitdir = vdirs.append
1586 match.bad = fail
1586 match.bad = fail
1587
1587
1588 wlock = lock = tr = None
1588 wlock = lock = tr = None
1589 try:
1589 try:
1590 wlock = self.wlock()
1590 wlock = self.wlock()
1591 lock = self.lock() # for recent changelog (see issue4368)
1591 lock = self.lock() # for recent changelog (see issue4368)
1592
1592
1593 wctx = self[None]
1593 wctx = self[None]
1594 merge = len(wctx.parents()) > 1
1594 merge = len(wctx.parents()) > 1
1595
1595
1596 if not force and merge and match.ispartial():
1596 if not force and merge and match.ispartial():
1597 raise error.Abort(_('cannot partially commit a merge '
1597 raise error.Abort(_('cannot partially commit a merge '
1598 '(do not specify files or patterns)'))
1598 '(do not specify files or patterns)'))
1599
1599
1600 status = self.status(match=match, clean=force)
1600 status = self.status(match=match, clean=force)
1601 if force:
1601 if force:
1602 status.modified.extend(status.clean) # mq may commit clean files
1602 status.modified.extend(status.clean) # mq may commit clean files
1603
1603
1604 # check subrepos
1604 # check subrepos
1605 subs = []
1605 subs = []
1606 commitsubs = set()
1606 commitsubs = set()
1607 newstate = wctx.substate.copy()
1607 newstate = wctx.substate.copy()
1608 # only manage subrepos and .hgsubstate if .hgsub is present
1608 # only manage subrepos and .hgsubstate if .hgsub is present
1609 if '.hgsub' in wctx:
1609 if '.hgsub' in wctx:
1610 # we'll decide whether to track this ourselves, thanks
1610 # we'll decide whether to track this ourselves, thanks
1611 for c in status.modified, status.added, status.removed:
1611 for c in status.modified, status.added, status.removed:
1612 if '.hgsubstate' in c:
1612 if '.hgsubstate' in c:
1613 c.remove('.hgsubstate')
1613 c.remove('.hgsubstate')
1614
1614
1615 # compare current state to last committed state
1615 # compare current state to last committed state
1616 # build new substate based on last committed state
1616 # build new substate based on last committed state
1617 oldstate = wctx.p1().substate
1617 oldstate = wctx.p1().substate
1618 for s in sorted(newstate.keys()):
1618 for s in sorted(newstate.keys()):
1619 if not match(s):
1619 if not match(s):
1620 # ignore working copy, use old state if present
1620 # ignore working copy, use old state if present
1621 if s in oldstate:
1621 if s in oldstate:
1622 newstate[s] = oldstate[s]
1622 newstate[s] = oldstate[s]
1623 continue
1623 continue
1624 if not force:
1624 if not force:
1625 raise error.Abort(
1625 raise error.Abort(
1626 _("commit with new subrepo %s excluded") % s)
1626 _("commit with new subrepo %s excluded") % s)
1627 dirtyreason = wctx.sub(s).dirtyreason(True)
1627 dirtyreason = wctx.sub(s).dirtyreason(True)
1628 if dirtyreason:
1628 if dirtyreason:
1629 if not self.ui.configbool('ui', 'commitsubrepos'):
1629 if not self.ui.configbool('ui', 'commitsubrepos'):
1630 raise error.Abort(dirtyreason,
1630 raise error.Abort(dirtyreason,
1631 hint=_("use --subrepos for recursive commit"))
1631 hint=_("use --subrepos for recursive commit"))
1632 subs.append(s)
1632 subs.append(s)
1633 commitsubs.add(s)
1633 commitsubs.add(s)
1634 else:
1634 else:
1635 bs = wctx.sub(s).basestate()
1635 bs = wctx.sub(s).basestate()
1636 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1636 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1637 if oldstate.get(s, (None, None, None))[1] != bs:
1637 if oldstate.get(s, (None, None, None))[1] != bs:
1638 subs.append(s)
1638 subs.append(s)
1639
1639
1640 # check for removed subrepos
1640 # check for removed subrepos
1641 for p in wctx.parents():
1641 for p in wctx.parents():
1642 r = [s for s in p.substate if s not in newstate]
1642 r = [s for s in p.substate if s not in newstate]
1643 subs += [s for s in r if match(s)]
1643 subs += [s for s in r if match(s)]
1644 if subs:
1644 if subs:
1645 if (not match('.hgsub') and
1645 if (not match('.hgsub') and
1646 '.hgsub' in (wctx.modified() + wctx.added())):
1646 '.hgsub' in (wctx.modified() + wctx.added())):
1647 raise error.Abort(
1647 raise error.Abort(
1648 _("can't commit subrepos without .hgsub"))
1648 _("can't commit subrepos without .hgsub"))
1649 status.modified.insert(0, '.hgsubstate')
1649 status.modified.insert(0, '.hgsubstate')
1650
1650
1651 elif '.hgsub' in status.removed:
1651 elif '.hgsub' in status.removed:
1652 # clean up .hgsubstate when .hgsub is removed
1652 # clean up .hgsubstate when .hgsub is removed
1653 if ('.hgsubstate' in wctx and
1653 if ('.hgsubstate' in wctx and
1654 '.hgsubstate' not in (status.modified + status.added +
1654 '.hgsubstate' not in (status.modified + status.added +
1655 status.removed)):
1655 status.removed)):
1656 status.removed.insert(0, '.hgsubstate')
1656 status.removed.insert(0, '.hgsubstate')
1657
1657
1658 # make sure all explicit patterns are matched
1658 # make sure all explicit patterns are matched
1659 if not force:
1659 if not force:
1660 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1660 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1661
1661
1662 cctx = context.workingcommitctx(self, status,
1662 cctx = context.workingcommitctx(self, status,
1663 text, user, date, extra)
1663 text, user, date, extra)
1664
1664
1665 # internal config: ui.allowemptycommit
1665 # internal config: ui.allowemptycommit
1666 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1666 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1667 or extra.get('close') or merge or cctx.files()
1667 or extra.get('close') or merge or cctx.files()
1668 or self.ui.configbool('ui', 'allowemptycommit'))
1668 or self.ui.configbool('ui', 'allowemptycommit'))
1669 if not allowemptycommit:
1669 if not allowemptycommit:
1670 return None
1670 return None
1671
1671
1672 if merge and cctx.deleted():
1672 if merge and cctx.deleted():
1673 raise error.Abort(_("cannot commit merge with missing files"))
1673 raise error.Abort(_("cannot commit merge with missing files"))
1674
1674
1675 ms = mergemod.mergestate.read(self)
1675 ms = mergemod.mergestate.read(self)
1676 mergeutil.checkunresolved(ms)
1676 mergeutil.checkunresolved(ms)
1677
1677
1678 if editor:
1678 if editor:
1679 cctx._text = editor(self, cctx, subs)
1679 cctx._text = editor(self, cctx, subs)
1680 edited = (text != cctx._text)
1680 edited = (text != cctx._text)
1681
1681
1682 # Save commit message in case this transaction gets rolled back
1682 # Save commit message in case this transaction gets rolled back
1683 # (e.g. by a pretxncommit hook). Leave the content alone on
1683 # (e.g. by a pretxncommit hook). Leave the content alone on
1684 # the assumption that the user will use the same editor again.
1684 # the assumption that the user will use the same editor again.
1685 msgfn = self.savecommitmessage(cctx._text)
1685 msgfn = self.savecommitmessage(cctx._text)
1686
1686
1687 # commit subs and write new state
1687 # commit subs and write new state
1688 if subs:
1688 if subs:
1689 for s in sorted(commitsubs):
1689 for s in sorted(commitsubs):
1690 sub = wctx.sub(s)
1690 sub = wctx.sub(s)
1691 self.ui.status(_('committing subrepository %s\n') %
1691 self.ui.status(_('committing subrepository %s\n') %
1692 subrepo.subrelpath(sub))
1692 subrepo.subrelpath(sub))
1693 sr = sub.commit(cctx._text, user, date)
1693 sr = sub.commit(cctx._text, user, date)
1694 newstate[s] = (newstate[s][0], sr)
1694 newstate[s] = (newstate[s][0], sr)
1695 subrepo.writestate(self, newstate)
1695 subrepo.writestate(self, newstate)
1696
1696
1697 p1, p2 = self.dirstate.parents()
1697 p1, p2 = self.dirstate.parents()
1698 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1698 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1699 try:
1699 try:
1700 self.hook("precommit", throw=True, parent1=hookp1,
1700 self.hook("precommit", throw=True, parent1=hookp1,
1701 parent2=hookp2)
1701 parent2=hookp2)
1702 tr = self.transaction('commit')
1702 tr = self.transaction('commit')
1703 ret = self.commitctx(cctx, True)
1703 ret = self.commitctx(cctx, True)
1704 except: # re-raises
1704 except: # re-raises
1705 if edited:
1705 if edited:
1706 self.ui.write(
1706 self.ui.write(
1707 _('note: commit message saved in %s\n') % msgfn)
1707 _('note: commit message saved in %s\n') % msgfn)
1708 raise
1708 raise
1709 # update bookmarks, dirstate and mergestate
1709 # update bookmarks, dirstate and mergestate
1710 bookmarks.update(self, [p1, p2], ret)
1710 bookmarks.update(self, [p1, p2], ret)
1711 cctx.markcommitted(ret)
1711 cctx.markcommitted(ret)
1712 ms.reset()
1712 ms.reset()
1713 tr.close()
1713 tr.close()
1714
1714
1715 finally:
1715 finally:
1716 lockmod.release(tr, lock, wlock)
1716 lockmod.release(tr, lock, wlock)
1717
1717
1718 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1718 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1719 # hack for command that use a temporary commit (eg: histedit)
1719 # hack for command that use a temporary commit (eg: histedit)
1720 # temporary commit got stripped before hook release
1720 # temporary commit got stripped before hook release
1721 if self.changelog.hasnode(ret):
1721 if self.changelog.hasnode(ret):
1722 self.hook("commit", node=node, parent1=parent1,
1722 self.hook("commit", node=node, parent1=parent1,
1723 parent2=parent2)
1723 parent2=parent2)
1724 self._afterlock(commithook)
1724 self._afterlock(commithook)
1725 return ret
1725 return ret
1726
1726
1727 @unfilteredmethod
1727 @unfilteredmethod
1728 def commitctx(self, ctx, error=False):
1728 def commitctx(self, ctx, error=False):
1729 """Add a new revision to current repository.
1729 """Add a new revision to current repository.
1730 Revision information is passed via the context argument.
1730 Revision information is passed via the context argument.
1731 """
1731 """
1732
1732
1733 tr = None
1733 tr = None
1734 p1, p2 = ctx.p1(), ctx.p2()
1734 p1, p2 = ctx.p1(), ctx.p2()
1735 user = ctx.user()
1735 user = ctx.user()
1736
1736
1737 lock = self.lock()
1737 lock = self.lock()
1738 try:
1738 try:
1739 tr = self.transaction("commit")
1739 tr = self.transaction("commit")
1740 trp = weakref.proxy(tr)
1740 trp = weakref.proxy(tr)
1741
1741
1742 if ctx.manifestnode():
1742 if ctx.manifestnode():
1743 # reuse an existing manifest revision
1743 # reuse an existing manifest revision
1744 mn = ctx.manifestnode()
1744 mn = ctx.manifestnode()
1745 files = ctx.files()
1745 files = ctx.files()
1746 elif ctx.files():
1746 elif ctx.files():
1747 m1ctx = p1.manifestctx()
1747 m1ctx = p1.manifestctx()
1748 m2ctx = p2.manifestctx()
1748 m2ctx = p2.manifestctx()
1749 mctx = m1ctx.copy()
1749 mctx = m1ctx.copy()
1750
1750
1751 m = mctx.read()
1751 m = mctx.read()
1752 m1 = m1ctx.read()
1752 m1 = m1ctx.read()
1753 m2 = m2ctx.read()
1753 m2 = m2ctx.read()
1754
1754
1755 # check in files
1755 # check in files
1756 added = []
1756 added = []
1757 changed = []
1757 changed = []
1758 removed = list(ctx.removed())
1758 removed = list(ctx.removed())
1759 linkrev = len(self)
1759 linkrev = len(self)
1760 self.ui.note(_("committing files:\n"))
1760 self.ui.note(_("committing files:\n"))
1761 for f in sorted(ctx.modified() + ctx.added()):
1761 for f in sorted(ctx.modified() + ctx.added()):
1762 self.ui.note(f + "\n")
1762 self.ui.note(f + "\n")
1763 try:
1763 try:
1764 fctx = ctx[f]
1764 fctx = ctx[f]
1765 if fctx is None:
1765 if fctx is None:
1766 removed.append(f)
1766 removed.append(f)
1767 else:
1767 else:
1768 added.append(f)
1768 added.append(f)
1769 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1769 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1770 trp, changed)
1770 trp, changed)
1771 m.setflag(f, fctx.flags())
1771 m.setflag(f, fctx.flags())
1772 except OSError as inst:
1772 except OSError as inst:
1773 self.ui.warn(_("trouble committing %s!\n") % f)
1773 self.ui.warn(_("trouble committing %s!\n") % f)
1774 raise
1774 raise
1775 except IOError as inst:
1775 except IOError as inst:
1776 errcode = getattr(inst, 'errno', errno.ENOENT)
1776 errcode = getattr(inst, 'errno', errno.ENOENT)
1777 if error or errcode and errcode != errno.ENOENT:
1777 if error or errcode and errcode != errno.ENOENT:
1778 self.ui.warn(_("trouble committing %s!\n") % f)
1778 self.ui.warn(_("trouble committing %s!\n") % f)
1779 raise
1779 raise
1780
1780
1781 # update manifest
1781 # update manifest
1782 self.ui.note(_("committing manifest\n"))
1782 self.ui.note(_("committing manifest\n"))
1783 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1783 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1784 drop = [f for f in removed if f in m]
1784 drop = [f for f in removed if f in m]
1785 for f in drop:
1785 for f in drop:
1786 del m[f]
1786 del m[f]
1787 mn = mctx.write(trp, linkrev,
1787 mn = mctx.write(trp, linkrev,
1788 p1.manifestnode(), p2.manifestnode(),
1788 p1.manifestnode(), p2.manifestnode(),
1789 added, drop)
1789 added, drop)
1790 files = changed + removed
1790 files = changed + removed
1791 else:
1791 else:
1792 mn = p1.manifestnode()
1792 mn = p1.manifestnode()
1793 files = []
1793 files = []
1794
1794
1795 # update changelog
1795 # update changelog
1796 self.ui.note(_("committing changelog\n"))
1796 self.ui.note(_("committing changelog\n"))
1797 self.changelog.delayupdate(tr)
1797 self.changelog.delayupdate(tr)
1798 n = self.changelog.add(mn, files, ctx.description(),
1798 n = self.changelog.add(mn, files, ctx.description(),
1799 trp, p1.node(), p2.node(),
1799 trp, p1.node(), p2.node(),
1800 user, ctx.date(), ctx.extra().copy())
1800 user, ctx.date(), ctx.extra().copy())
1801 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1801 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1802 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1802 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1803 parent2=xp2)
1803 parent2=xp2)
1804 # set the new commit is proper phase
1804 # set the new commit is proper phase
1805 targetphase = subrepo.newcommitphase(self.ui, ctx)
1805 targetphase = subrepo.newcommitphase(self.ui, ctx)
1806 if targetphase:
1806 if targetphase:
1807 # retract boundary do not alter parent changeset.
1807 # retract boundary do not alter parent changeset.
1808 # if a parent have higher the resulting phase will
1808 # if a parent have higher the resulting phase will
1809 # be compliant anyway
1809 # be compliant anyway
1810 #
1810 #
1811 # if minimal phase was 0 we don't need to retract anything
1811 # if minimal phase was 0 we don't need to retract anything
1812 phases.retractboundary(self, tr, targetphase, [n])
1812 phases.retractboundary(self, tr, targetphase, [n])
1813 tr.close()
1813 tr.close()
1814 branchmap.updatecache(self.filtered('served'))
1814 branchmap.updatecache(self.filtered('served'))
1815 return n
1815 return n
1816 finally:
1816 finally:
1817 if tr:
1817 if tr:
1818 tr.release()
1818 tr.release()
1819 lock.release()
1819 lock.release()
1820
1820
1821 @unfilteredmethod
1821 @unfilteredmethod
1822 def destroying(self):
1822 def destroying(self):
1823 '''Inform the repository that nodes are about to be destroyed.
1823 '''Inform the repository that nodes are about to be destroyed.
1824 Intended for use by strip and rollback, so there's a common
1824 Intended for use by strip and rollback, so there's a common
1825 place for anything that has to be done before destroying history.
1825 place for anything that has to be done before destroying history.
1826
1826
1827 This is mostly useful for saving state that is in memory and waiting
1827 This is mostly useful for saving state that is in memory and waiting
1828 to be flushed when the current lock is released. Because a call to
1828 to be flushed when the current lock is released. Because a call to
1829 destroyed is imminent, the repo will be invalidated causing those
1829 destroyed is imminent, the repo will be invalidated causing those
1830 changes to stay in memory (waiting for the next unlock), or vanish
1830 changes to stay in memory (waiting for the next unlock), or vanish
1831 completely.
1831 completely.
1832 '''
1832 '''
1833 # When using the same lock to commit and strip, the phasecache is left
1833 # When using the same lock to commit and strip, the phasecache is left
1834 # dirty after committing. Then when we strip, the repo is invalidated,
1834 # dirty after committing. Then when we strip, the repo is invalidated,
1835 # causing those changes to disappear.
1835 # causing those changes to disappear.
1836 if '_phasecache' in vars(self):
1836 if '_phasecache' in vars(self):
1837 self._phasecache.write()
1837 self._phasecache.write()
1838
1838
1839 @unfilteredmethod
1839 @unfilteredmethod
1840 def destroyed(self):
1840 def destroyed(self):
1841 '''Inform the repository that nodes have been destroyed.
1841 '''Inform the repository that nodes have been destroyed.
1842 Intended for use by strip and rollback, so there's a common
1842 Intended for use by strip and rollback, so there's a common
1843 place for anything that has to be done after destroying history.
1843 place for anything that has to be done after destroying history.
1844 '''
1844 '''
1845 # When one tries to:
1845 # When one tries to:
1846 # 1) destroy nodes thus calling this method (e.g. strip)
1846 # 1) destroy nodes thus calling this method (e.g. strip)
1847 # 2) use phasecache somewhere (e.g. commit)
1847 # 2) use phasecache somewhere (e.g. commit)
1848 #
1848 #
1849 # then 2) will fail because the phasecache contains nodes that were
1849 # then 2) will fail because the phasecache contains nodes that were
1850 # removed. We can either remove phasecache from the filecache,
1850 # removed. We can either remove phasecache from the filecache,
1851 # causing it to reload next time it is accessed, or simply filter
1851 # causing it to reload next time it is accessed, or simply filter
1852 # the removed nodes now and write the updated cache.
1852 # the removed nodes now and write the updated cache.
1853 self._phasecache.filterunknown(self)
1853 self._phasecache.filterunknown(self)
1854 self._phasecache.write()
1854 self._phasecache.write()
1855
1855
1856 # update the 'served' branch cache to help read only server process
1856 # update the 'served' branch cache to help read only server process
1857 # Thanks to branchcache collaboration this is done from the nearest
1857 # Thanks to branchcache collaboration this is done from the nearest
1858 # filtered subset and it is expected to be fast.
1858 # filtered subset and it is expected to be fast.
1859 branchmap.updatecache(self.filtered('served'))
1859 branchmap.updatecache(self.filtered('served'))
1860
1860
1861 # Ensure the persistent tag cache is updated. Doing it now
1861 # Ensure the persistent tag cache is updated. Doing it now
1862 # means that the tag cache only has to worry about destroyed
1862 # means that the tag cache only has to worry about destroyed
1863 # heads immediately after a strip/rollback. That in turn
1863 # heads immediately after a strip/rollback. That in turn
1864 # guarantees that "cachetip == currenttip" (comparing both rev
1864 # guarantees that "cachetip == currenttip" (comparing both rev
1865 # and node) always means no nodes have been added or destroyed.
1865 # and node) always means no nodes have been added or destroyed.
1866
1866
1867 # XXX this is suboptimal when qrefresh'ing: we strip the current
1867 # XXX this is suboptimal when qrefresh'ing: we strip the current
1868 # head, refresh the tag cache, then immediately add a new head.
1868 # head, refresh the tag cache, then immediately add a new head.
1869 # But I think doing it this way is necessary for the "instant
1869 # But I think doing it this way is necessary for the "instant
1870 # tag cache retrieval" case to work.
1870 # tag cache retrieval" case to work.
1871 self.invalidate()
1871 self.invalidate()
1872
1872
1873 def walk(self, match, node=None):
1873 def walk(self, match, node=None):
1874 '''
1874 '''
1875 walk recursively through the directory tree or a given
1875 walk recursively through the directory tree or a given
1876 changeset, finding all files matched by the match
1876 changeset, finding all files matched by the match
1877 function
1877 function
1878 '''
1878 '''
1879 return self[node].walk(match)
1879 return self[node].walk(match)
1880
1880
1881 def status(self, node1='.', node2=None, match=None,
1881 def status(self, node1='.', node2=None, match=None,
1882 ignored=False, clean=False, unknown=False,
1882 ignored=False, clean=False, unknown=False,
1883 listsubrepos=False):
1883 listsubrepos=False):
1884 '''a convenience method that calls node1.status(node2)'''
1884 '''a convenience method that calls node1.status(node2)'''
1885 return self[node1].status(node2, match, ignored, clean, unknown,
1885 return self[node1].status(node2, match, ignored, clean, unknown,
1886 listsubrepos)
1886 listsubrepos)
1887
1887
1888 def heads(self, start=None):
1888 def heads(self, start=None):
1889 if start is None:
1889 if start is None:
1890 cl = self.changelog
1890 cl = self.changelog
1891 headrevs = reversed(cl.headrevs())
1891 headrevs = reversed(cl.headrevs())
1892 return [cl.node(rev) for rev in headrevs]
1892 return [cl.node(rev) for rev in headrevs]
1893
1893
1894 heads = self.changelog.heads(start)
1894 heads = self.changelog.heads(start)
1895 # sort the output in rev descending order
1895 # sort the output in rev descending order
1896 return sorted(heads, key=self.changelog.rev, reverse=True)
1896 return sorted(heads, key=self.changelog.rev, reverse=True)
1897
1897
1898 def branchheads(self, branch=None, start=None, closed=False):
1898 def branchheads(self, branch=None, start=None, closed=False):
1899 '''return a (possibly filtered) list of heads for the given branch
1899 '''return a (possibly filtered) list of heads for the given branch
1900
1900
1901 Heads are returned in topological order, from newest to oldest.
1901 Heads are returned in topological order, from newest to oldest.
1902 If branch is None, use the dirstate branch.
1902 If branch is None, use the dirstate branch.
1903 If start is not None, return only heads reachable from start.
1903 If start is not None, return only heads reachable from start.
1904 If closed is True, return heads that are marked as closed as well.
1904 If closed is True, return heads that are marked as closed as well.
1905 '''
1905 '''
1906 if branch is None:
1906 if branch is None:
1907 branch = self[None].branch()
1907 branch = self[None].branch()
1908 branches = self.branchmap()
1908 branches = self.branchmap()
1909 if branch not in branches:
1909 if branch not in branches:
1910 return []
1910 return []
1911 # the cache returns heads ordered lowest to highest
1911 # the cache returns heads ordered lowest to highest
1912 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1912 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1913 if start is not None:
1913 if start is not None:
1914 # filter out the heads that cannot be reached from startrev
1914 # filter out the heads that cannot be reached from startrev
1915 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1915 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1916 bheads = [h for h in bheads if h in fbheads]
1916 bheads = [h for h in bheads if h in fbheads]
1917 return bheads
1917 return bheads
1918
1918
1919 def branches(self, nodes):
1919 def branches(self, nodes):
1920 if not nodes:
1920 if not nodes:
1921 nodes = [self.changelog.tip()]
1921 nodes = [self.changelog.tip()]
1922 b = []
1922 b = []
1923 for n in nodes:
1923 for n in nodes:
1924 t = n
1924 t = n
1925 while True:
1925 while True:
1926 p = self.changelog.parents(n)
1926 p = self.changelog.parents(n)
1927 if p[1] != nullid or p[0] == nullid:
1927 if p[1] != nullid or p[0] == nullid:
1928 b.append((t, n, p[0], p[1]))
1928 b.append((t, n, p[0], p[1]))
1929 break
1929 break
1930 n = p[0]
1930 n = p[0]
1931 return b
1931 return b
1932
1932
1933 def between(self, pairs):
1933 def between(self, pairs):
1934 r = []
1934 r = []
1935
1935
1936 for top, bottom in pairs:
1936 for top, bottom in pairs:
1937 n, l, i = top, [], 0
1937 n, l, i = top, [], 0
1938 f = 1
1938 f = 1
1939
1939
1940 while n != bottom and n != nullid:
1940 while n != bottom and n != nullid:
1941 p = self.changelog.parents(n)[0]
1941 p = self.changelog.parents(n)[0]
1942 if i == f:
1942 if i == f:
1943 l.append(n)
1943 l.append(n)
1944 f = f * 2
1944 f = f * 2
1945 n = p
1945 n = p
1946 i += 1
1946 i += 1
1947
1947
1948 r.append(l)
1948 r.append(l)
1949
1949
1950 return r
1950 return r
1951
1951
1952 def checkpush(self, pushop):
1952 def checkpush(self, pushop):
1953 """Extensions can override this function if additional checks have
1953 """Extensions can override this function if additional checks have
1954 to be performed before pushing, or call it if they override push
1954 to be performed before pushing, or call it if they override push
1955 command.
1955 command.
1956 """
1956 """
1957 pass
1957 pass
1958
1958
1959 @unfilteredpropertycache
1959 @unfilteredpropertycache
1960 def prepushoutgoinghooks(self):
1960 def prepushoutgoinghooks(self):
1961 """Return util.hooks consists of a pushop with repo, remote, outgoing
1961 """Return util.hooks consists of a pushop with repo, remote, outgoing
1962 methods, which are called before pushing changesets.
1962 methods, which are called before pushing changesets.
1963 """
1963 """
1964 return util.hooks()
1964 return util.hooks()
1965
1965
1966 def pushkey(self, namespace, key, old, new):
1966 def pushkey(self, namespace, key, old, new):
1967 try:
1967 try:
1968 tr = self.currenttransaction()
1968 tr = self.currenttransaction()
1969 hookargs = {}
1969 hookargs = {}
1970 if tr is not None:
1970 if tr is not None:
1971 hookargs.update(tr.hookargs)
1971 hookargs.update(tr.hookargs)
1972 hookargs['namespace'] = namespace
1972 hookargs['namespace'] = namespace
1973 hookargs['key'] = key
1973 hookargs['key'] = key
1974 hookargs['old'] = old
1974 hookargs['old'] = old
1975 hookargs['new'] = new
1975 hookargs['new'] = new
1976 self.hook('prepushkey', throw=True, **hookargs)
1976 self.hook('prepushkey', throw=True, **hookargs)
1977 except error.HookAbort as exc:
1977 except error.HookAbort as exc:
1978 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1978 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1979 if exc.hint:
1979 if exc.hint:
1980 self.ui.write_err(_("(%s)\n") % exc.hint)
1980 self.ui.write_err(_("(%s)\n") % exc.hint)
1981 return False
1981 return False
1982 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1982 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1983 ret = pushkey.push(self, namespace, key, old, new)
1983 ret = pushkey.push(self, namespace, key, old, new)
1984 def runhook():
1984 def runhook():
1985 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1985 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1986 ret=ret)
1986 ret=ret)
1987 self._afterlock(runhook)
1987 self._afterlock(runhook)
1988 return ret
1988 return ret
1989
1989
1990 def listkeys(self, namespace):
1990 def listkeys(self, namespace):
1991 self.hook('prelistkeys', throw=True, namespace=namespace)
1991 self.hook('prelistkeys', throw=True, namespace=namespace)
1992 self.ui.debug('listing keys for "%s"\n' % namespace)
1992 self.ui.debug('listing keys for "%s"\n' % namespace)
1993 values = pushkey.list(self, namespace)
1993 values = pushkey.list(self, namespace)
1994 self.hook('listkeys', namespace=namespace, values=values)
1994 self.hook('listkeys', namespace=namespace, values=values)
1995 return values
1995 return values
1996
1996
1997 def debugwireargs(self, one, two, three=None, four=None, five=None):
1997 def debugwireargs(self, one, two, three=None, four=None, five=None):
1998 '''used to test argument passing over the wire'''
1998 '''used to test argument passing over the wire'''
1999 return "%s %s %s %s %s" % (one, two, three, four, five)
1999 return "%s %s %s %s %s" % (one, two, three, four, five)
2000
2000
2001 def savecommitmessage(self, text):
2001 def savecommitmessage(self, text):
2002 fp = self.vfs('last-message.txt', 'wb')
2002 fp = self.vfs('last-message.txt', 'wb')
2003 try:
2003 try:
2004 fp.write(text)
2004 fp.write(text)
2005 finally:
2005 finally:
2006 fp.close()
2006 fp.close()
2007 return self.pathto(fp.name[len(self.root) + 1:])
2007 return self.pathto(fp.name[len(self.root) + 1:])
2008
2008
2009 # used to avoid circular references so destructors work
2009 # used to avoid circular references so destructors work
2010 def aftertrans(files):
2010 def aftertrans(files):
2011 renamefiles = [tuple(t) for t in files]
2011 renamefiles = [tuple(t) for t in files]
2012 def a():
2012 def a():
2013 for vfs, src, dest in renamefiles:
2013 for vfs, src, dest in renamefiles:
2014 try:
2014 try:
2015 # if src and dest refer to a same file, vfs.rename is a no-op,
2015 # if src and dest refer to a same file, vfs.rename is a no-op,
2016 # leaving both src and dest on disk. delete dest to make sure
2016 # leaving both src and dest on disk. delete dest to make sure
2017 # the rename couldn't be such a no-op.
2017 # the rename couldn't be such a no-op.
2018 vfs.unlink(dest)
2018 vfs.unlink(dest)
2019 except OSError as ex:
2019 except OSError as ex:
2020 if ex.errno != errno.ENOENT:
2020 if ex.errno != errno.ENOENT:
2021 raise
2021 raise
2022 try:
2022 try:
2023 vfs.rename(src, dest)
2023 vfs.rename(src, dest)
2024 except OSError: # journal file does not yet exist
2024 except OSError: # journal file does not yet exist
2025 pass
2025 pass
2026 return a
2026 return a
2027
2027
2028 def undoname(fn):
2028 def undoname(fn):
2029 base, name = os.path.split(fn)
2029 base, name = os.path.split(fn)
2030 assert name.startswith('journal')
2030 assert name.startswith('journal')
2031 return os.path.join(base, name.replace('journal', 'undo', 1))
2031 return os.path.join(base, name.replace('journal', 'undo', 1))
2032
2032
2033 def instance(ui, path, create):
2033 def instance(ui, path, create):
2034 return localrepository(ui, util.urllocalpath(path), create)
2034 return localrepository(ui, util.urllocalpath(path), create)
2035
2035
2036 def islocal(path):
2036 def islocal(path):
2037 return True
2037 return True
2038
2038
2039 def newreporequirements(repo):
2039 def newreporequirements(repo):
2040 """Determine the set of requirements for a new local repository.
2040 """Determine the set of requirements for a new local repository.
2041
2041
2042 Extensions can wrap this function to specify custom requirements for
2042 Extensions can wrap this function to specify custom requirements for
2043 new repositories.
2043 new repositories.
2044 """
2044 """
2045 ui = repo.ui
2045 ui = repo.ui
2046 requirements = set(['revlogv1'])
2046 requirements = set(['revlogv1'])
2047 if ui.configbool('format', 'usestore', True):
2047 if ui.configbool('format', 'usestore', True):
2048 requirements.add('store')
2048 requirements.add('store')
2049 if ui.configbool('format', 'usefncache', True):
2049 if ui.configbool('format', 'usefncache', True):
2050 requirements.add('fncache')
2050 requirements.add('fncache')
2051 if ui.configbool('format', 'dotencode', True):
2051 if ui.configbool('format', 'dotencode', True):
2052 requirements.add('dotencode')
2052 requirements.add('dotencode')
2053
2053
2054 compengine = ui.config('experimental', 'format.compression', 'zlib')
2054 compengine = ui.config('experimental', 'format.compression', 'zlib')
2055 if compengine not in util.compengines:
2055 if compengine not in util.compengines:
2056 raise error.Abort(_('compression engine %s defined by '
2056 raise error.Abort(_('compression engine %s defined by '
2057 'experimental.format.compression not available') %
2057 'experimental.format.compression not available') %
2058 compengine,
2058 compengine,
2059 hint=_('run "hg debuginstall" to list available '
2059 hint=_('run "hg debuginstall" to list available '
2060 'compression engines'))
2060 'compression engines'))
2061
2061
2062 # zlib is the historical default and doesn't need an explicit requirement.
2062 # zlib is the historical default and doesn't need an explicit requirement.
2063 if compengine != 'zlib':
2063 if compengine != 'zlib':
2064 requirements.add('exp-compression-%s' % compengine)
2064 requirements.add('exp-compression-%s' % compengine)
2065
2065
2066 if scmutil.gdinitconfig(ui):
2066 if scmutil.gdinitconfig(ui):
2067 requirements.add('generaldelta')
2067 requirements.add('generaldelta')
2068 if ui.configbool('experimental', 'treemanifest', False):
2068 if ui.configbool('experimental', 'treemanifest', False):
2069 requirements.add('treemanifest')
2069 requirements.add('treemanifest')
2070 if ui.configbool('experimental', 'manifestv2', False):
2070 if ui.configbool('experimental', 'manifestv2', False):
2071 requirements.add('manifestv2')
2071 requirements.add('manifestv2')
2072
2072
2073 return requirements
2073 return requirements
General Comments 0
You need to be logged in to leave comments. Login now