##// END OF EJS Templates
localrepo: don't use mutable default argument value...
Pierre-Yves David -
r31412:ecc87acb default
parent child Browse files
Show More
@@ -1,2076 +1,2078 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 store,
56 store,
57 subrepo,
57 subrepo,
58 tags as tagsmod,
58 tags as tagsmod,
59 transaction,
59 transaction,
60 txnutil,
60 txnutil,
61 util,
61 util,
62 vfs as vfsmod,
62 vfs as vfsmod,
63 )
63 )
64
64
65 release = lockmod.release
65 release = lockmod.release
66 urlerr = util.urlerr
66 urlerr = util.urlerr
67 urlreq = util.urlreq
67 urlreq = util.urlreq
68
68
69 class repofilecache(scmutil.filecache):
69 class repofilecache(scmutil.filecache):
70 """All filecache usage on repo are done for logic that should be unfiltered
70 """All filecache usage on repo are done for logic that should be unfiltered
71 """
71 """
72
72
73 def join(self, obj, fname):
73 def join(self, obj, fname):
74 return obj.vfs.join(fname)
74 return obj.vfs.join(fname)
75 def __get__(self, repo, type=None):
75 def __get__(self, repo, type=None):
76 if repo is None:
76 if repo is None:
77 return self
77 return self
78 return super(repofilecache, self).__get__(repo.unfiltered(), type)
78 return super(repofilecache, self).__get__(repo.unfiltered(), type)
79 def __set__(self, repo, value):
79 def __set__(self, repo, value):
80 return super(repofilecache, self).__set__(repo.unfiltered(), value)
80 return super(repofilecache, self).__set__(repo.unfiltered(), value)
81 def __delete__(self, repo):
81 def __delete__(self, repo):
82 return super(repofilecache, self).__delete__(repo.unfiltered())
82 return super(repofilecache, self).__delete__(repo.unfiltered())
83
83
84 class storecache(repofilecache):
84 class storecache(repofilecache):
85 """filecache for files in the store"""
85 """filecache for files in the store"""
86 def join(self, obj, fname):
86 def join(self, obj, fname):
87 return obj.sjoin(fname)
87 return obj.sjoin(fname)
88
88
89 class unfilteredpropertycache(util.propertycache):
89 class unfilteredpropertycache(util.propertycache):
90 """propertycache that apply to unfiltered repo only"""
90 """propertycache that apply to unfiltered repo only"""
91
91
92 def __get__(self, repo, type=None):
92 def __get__(self, repo, type=None):
93 unfi = repo.unfiltered()
93 unfi = repo.unfiltered()
94 if unfi is repo:
94 if unfi is repo:
95 return super(unfilteredpropertycache, self).__get__(unfi)
95 return super(unfilteredpropertycache, self).__get__(unfi)
96 return getattr(unfi, self.name)
96 return getattr(unfi, self.name)
97
97
98 class filteredpropertycache(util.propertycache):
98 class filteredpropertycache(util.propertycache):
99 """propertycache that must take filtering in account"""
99 """propertycache that must take filtering in account"""
100
100
101 def cachevalue(self, obj, value):
101 def cachevalue(self, obj, value):
102 object.__setattr__(obj, self.name, value)
102 object.__setattr__(obj, self.name, value)
103
103
104
104
105 def hasunfilteredcache(repo, name):
105 def hasunfilteredcache(repo, name):
106 """check if a repo has an unfilteredpropertycache value for <name>"""
106 """check if a repo has an unfilteredpropertycache value for <name>"""
107 return name in vars(repo.unfiltered())
107 return name in vars(repo.unfiltered())
108
108
109 def unfilteredmethod(orig):
109 def unfilteredmethod(orig):
110 """decorate method that always need to be run on unfiltered version"""
110 """decorate method that always need to be run on unfiltered version"""
111 def wrapper(repo, *args, **kwargs):
111 def wrapper(repo, *args, **kwargs):
112 return orig(repo.unfiltered(), *args, **kwargs)
112 return orig(repo.unfiltered(), *args, **kwargs)
113 return wrapper
113 return wrapper
114
114
115 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
115 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
116 'unbundle'))
116 'unbundle'))
117 legacycaps = moderncaps.union(set(['changegroupsubset']))
117 legacycaps = moderncaps.union(set(['changegroupsubset']))
118
118
119 class localpeer(peer.peerrepository):
119 class localpeer(peer.peerrepository):
120 '''peer for a local repo; reflects only the most recent API'''
120 '''peer for a local repo; reflects only the most recent API'''
121
121
122 def __init__(self, repo, caps=moderncaps):
122 def __init__(self, repo, caps=None):
123 if caps is None:
124 caps = moderncaps.copy()
123 peer.peerrepository.__init__(self)
125 peer.peerrepository.__init__(self)
124 self._repo = repo.filtered('served')
126 self._repo = repo.filtered('served')
125 self.ui = repo.ui
127 self.ui = repo.ui
126 self._caps = repo._restrictcapabilities(caps)
128 self._caps = repo._restrictcapabilities(caps)
127 self.requirements = repo.requirements
129 self.requirements = repo.requirements
128 self.supportedformats = repo.supportedformats
130 self.supportedformats = repo.supportedformats
129
131
130 def close(self):
132 def close(self):
131 self._repo.close()
133 self._repo.close()
132
134
133 def _capabilities(self):
135 def _capabilities(self):
134 return self._caps
136 return self._caps
135
137
136 def local(self):
138 def local(self):
137 return self._repo
139 return self._repo
138
140
139 def canpush(self):
141 def canpush(self):
140 return True
142 return True
141
143
142 def url(self):
144 def url(self):
143 return self._repo.url()
145 return self._repo.url()
144
146
145 def lookup(self, key):
147 def lookup(self, key):
146 return self._repo.lookup(key)
148 return self._repo.lookup(key)
147
149
148 def branchmap(self):
150 def branchmap(self):
149 return self._repo.branchmap()
151 return self._repo.branchmap()
150
152
151 def heads(self):
153 def heads(self):
152 return self._repo.heads()
154 return self._repo.heads()
153
155
154 def known(self, nodes):
156 def known(self, nodes):
155 return self._repo.known(nodes)
157 return self._repo.known(nodes)
156
158
157 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
159 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
158 **kwargs):
160 **kwargs):
159 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
161 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
160 common=common, bundlecaps=bundlecaps,
162 common=common, bundlecaps=bundlecaps,
161 **kwargs)
163 **kwargs)
162 cb = util.chunkbuffer(chunks)
164 cb = util.chunkbuffer(chunks)
163
165
164 if bundlecaps is not None and 'HG20' in bundlecaps:
166 if bundlecaps is not None and 'HG20' in bundlecaps:
165 # When requesting a bundle2, getbundle returns a stream to make the
167 # When requesting a bundle2, getbundle returns a stream to make the
166 # wire level function happier. We need to build a proper object
168 # wire level function happier. We need to build a proper object
167 # from it in local peer.
169 # from it in local peer.
168 return bundle2.getunbundler(self.ui, cb)
170 return bundle2.getunbundler(self.ui, cb)
169 else:
171 else:
170 return changegroup.getunbundler('01', cb, None)
172 return changegroup.getunbundler('01', cb, None)
171
173
172 # TODO We might want to move the next two calls into legacypeer and add
174 # TODO We might want to move the next two calls into legacypeer and add
173 # unbundle instead.
175 # unbundle instead.
174
176
175 def unbundle(self, cg, heads, url):
177 def unbundle(self, cg, heads, url):
176 """apply a bundle on a repo
178 """apply a bundle on a repo
177
179
178 This function handles the repo locking itself."""
180 This function handles the repo locking itself."""
179 try:
181 try:
180 try:
182 try:
181 cg = exchange.readbundle(self.ui, cg, None)
183 cg = exchange.readbundle(self.ui, cg, None)
182 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
184 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
183 if util.safehasattr(ret, 'getchunks'):
185 if util.safehasattr(ret, 'getchunks'):
184 # This is a bundle20 object, turn it into an unbundler.
186 # This is a bundle20 object, turn it into an unbundler.
185 # This little dance should be dropped eventually when the
187 # This little dance should be dropped eventually when the
186 # API is finally improved.
188 # API is finally improved.
187 stream = util.chunkbuffer(ret.getchunks())
189 stream = util.chunkbuffer(ret.getchunks())
188 ret = bundle2.getunbundler(self.ui, stream)
190 ret = bundle2.getunbundler(self.ui, stream)
189 return ret
191 return ret
190 except Exception as exc:
192 except Exception as exc:
191 # If the exception contains output salvaged from a bundle2
193 # If the exception contains output salvaged from a bundle2
192 # reply, we need to make sure it is printed before continuing
194 # reply, we need to make sure it is printed before continuing
193 # to fail. So we build a bundle2 with such output and consume
195 # to fail. So we build a bundle2 with such output and consume
194 # it directly.
196 # it directly.
195 #
197 #
196 # This is not very elegant but allows a "simple" solution for
198 # This is not very elegant but allows a "simple" solution for
197 # issue4594
199 # issue4594
198 output = getattr(exc, '_bundle2salvagedoutput', ())
200 output = getattr(exc, '_bundle2salvagedoutput', ())
199 if output:
201 if output:
200 bundler = bundle2.bundle20(self._repo.ui)
202 bundler = bundle2.bundle20(self._repo.ui)
201 for out in output:
203 for out in output:
202 bundler.addpart(out)
204 bundler.addpart(out)
203 stream = util.chunkbuffer(bundler.getchunks())
205 stream = util.chunkbuffer(bundler.getchunks())
204 b = bundle2.getunbundler(self.ui, stream)
206 b = bundle2.getunbundler(self.ui, stream)
205 bundle2.processbundle(self._repo, b)
207 bundle2.processbundle(self._repo, b)
206 raise
208 raise
207 except error.PushRaced as exc:
209 except error.PushRaced as exc:
208 raise error.ResponseError(_('push failed:'), str(exc))
210 raise error.ResponseError(_('push failed:'), str(exc))
209
211
210 def lock(self):
212 def lock(self):
211 return self._repo.lock()
213 return self._repo.lock()
212
214
213 def addchangegroup(self, cg, source, url):
215 def addchangegroup(self, cg, source, url):
214 return cg.apply(self._repo, source, url)
216 return cg.apply(self._repo, source, url)
215
217
216 def pushkey(self, namespace, key, old, new):
218 def pushkey(self, namespace, key, old, new):
217 return self._repo.pushkey(namespace, key, old, new)
219 return self._repo.pushkey(namespace, key, old, new)
218
220
219 def listkeys(self, namespace):
221 def listkeys(self, namespace):
220 return self._repo.listkeys(namespace)
222 return self._repo.listkeys(namespace)
221
223
222 def debugwireargs(self, one, two, three=None, four=None, five=None):
224 def debugwireargs(self, one, two, three=None, four=None, five=None):
223 '''used to test argument passing over the wire'''
225 '''used to test argument passing over the wire'''
224 return "%s %s %s %s %s" % (one, two, three, four, five)
226 return "%s %s %s %s %s" % (one, two, three, four, five)
225
227
226 class locallegacypeer(localpeer):
228 class locallegacypeer(localpeer):
227 '''peer extension which implements legacy methods too; used for tests with
229 '''peer extension which implements legacy methods too; used for tests with
228 restricted capabilities'''
230 restricted capabilities'''
229
231
230 def __init__(self, repo):
232 def __init__(self, repo):
231 localpeer.__init__(self, repo, caps=legacycaps)
233 localpeer.__init__(self, repo, caps=legacycaps)
232
234
233 def branches(self, nodes):
235 def branches(self, nodes):
234 return self._repo.branches(nodes)
236 return self._repo.branches(nodes)
235
237
236 def between(self, pairs):
238 def between(self, pairs):
237 return self._repo.between(pairs)
239 return self._repo.between(pairs)
238
240
239 def changegroup(self, basenodes, source):
241 def changegroup(self, basenodes, source):
240 return changegroup.changegroup(self._repo, basenodes, source)
242 return changegroup.changegroup(self._repo, basenodes, source)
241
243
242 def changegroupsubset(self, bases, heads, source):
244 def changegroupsubset(self, bases, heads, source):
243 return changegroup.changegroupsubset(self._repo, bases, heads, source)
245 return changegroup.changegroupsubset(self._repo, bases, heads, source)
244
246
245 class localrepository(object):
247 class localrepository(object):
246
248
247 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
249 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
248 'manifestv2'))
250 'manifestv2'))
249 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
251 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
250 'relshared', 'dotencode'))
252 'relshared', 'dotencode'))
251 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
253 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
252 filtername = None
254 filtername = None
253
255
254 # a list of (ui, featureset) functions.
256 # a list of (ui, featureset) functions.
255 # only functions defined in module of enabled extensions are invoked
257 # only functions defined in module of enabled extensions are invoked
256 featuresetupfuncs = set()
258 featuresetupfuncs = set()
257
259
258 def __init__(self, baseui, path, create=False):
260 def __init__(self, baseui, path, create=False):
259 self.requirements = set()
261 self.requirements = set()
260 # vfs to access the working copy
262 # vfs to access the working copy
261 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
263 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
262 # vfs to access the content of the repository
264 # vfs to access the content of the repository
263 self.vfs = None
265 self.vfs = None
264 # vfs to access the store part of the repository
266 # vfs to access the store part of the repository
265 self.svfs = None
267 self.svfs = None
266 self.root = self.wvfs.base
268 self.root = self.wvfs.base
267 self.path = self.wvfs.join(".hg")
269 self.path = self.wvfs.join(".hg")
268 self.origroot = path
270 self.origroot = path
269 self.auditor = pathutil.pathauditor(self.root, self._checknested)
271 self.auditor = pathutil.pathauditor(self.root, self._checknested)
270 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
272 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
271 realfs=False)
273 realfs=False)
272 self.vfs = vfsmod.vfs(self.path)
274 self.vfs = vfsmod.vfs(self.path)
273 self.baseui = baseui
275 self.baseui = baseui
274 self.ui = baseui.copy()
276 self.ui = baseui.copy()
275 self.ui.copy = baseui.copy # prevent copying repo configuration
277 self.ui.copy = baseui.copy # prevent copying repo configuration
276 # A list of callback to shape the phase if no data were found.
278 # A list of callback to shape the phase if no data were found.
277 # Callback are in the form: func(repo, roots) --> processed root.
279 # Callback are in the form: func(repo, roots) --> processed root.
278 # This list it to be filled by extension during repo setup
280 # This list it to be filled by extension during repo setup
279 self._phasedefaults = []
281 self._phasedefaults = []
280 try:
282 try:
281 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
283 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
282 self._loadextensions()
284 self._loadextensions()
283 except IOError:
285 except IOError:
284 pass
286 pass
285
287
286 if self.featuresetupfuncs:
288 if self.featuresetupfuncs:
287 self.supported = set(self._basesupported) # use private copy
289 self.supported = set(self._basesupported) # use private copy
288 extmods = set(m.__name__ for n, m
290 extmods = set(m.__name__ for n, m
289 in extensions.extensions(self.ui))
291 in extensions.extensions(self.ui))
290 for setupfunc in self.featuresetupfuncs:
292 for setupfunc in self.featuresetupfuncs:
291 if setupfunc.__module__ in extmods:
293 if setupfunc.__module__ in extmods:
292 setupfunc(self.ui, self.supported)
294 setupfunc(self.ui, self.supported)
293 else:
295 else:
294 self.supported = self._basesupported
296 self.supported = self._basesupported
295 color.setup(self.ui)
297 color.setup(self.ui)
296
298
297 # Add compression engines.
299 # Add compression engines.
298 for name in util.compengines:
300 for name in util.compengines:
299 engine = util.compengines[name]
301 engine = util.compengines[name]
300 if engine.revlogheader():
302 if engine.revlogheader():
301 self.supported.add('exp-compression-%s' % name)
303 self.supported.add('exp-compression-%s' % name)
302
304
303 if not self.vfs.isdir():
305 if not self.vfs.isdir():
304 if create:
306 if create:
305 self.requirements = newreporequirements(self)
307 self.requirements = newreporequirements(self)
306
308
307 if not self.wvfs.exists():
309 if not self.wvfs.exists():
308 self.wvfs.makedirs()
310 self.wvfs.makedirs()
309 self.vfs.makedir(notindexed=True)
311 self.vfs.makedir(notindexed=True)
310
312
311 if 'store' in self.requirements:
313 if 'store' in self.requirements:
312 self.vfs.mkdir("store")
314 self.vfs.mkdir("store")
313
315
314 # create an invalid changelog
316 # create an invalid changelog
315 self.vfs.append(
317 self.vfs.append(
316 "00changelog.i",
318 "00changelog.i",
317 '\0\0\0\2' # represents revlogv2
319 '\0\0\0\2' # represents revlogv2
318 ' dummy changelog to prevent using the old repo layout'
320 ' dummy changelog to prevent using the old repo layout'
319 )
321 )
320 else:
322 else:
321 raise error.RepoError(_("repository %s not found") % path)
323 raise error.RepoError(_("repository %s not found") % path)
322 elif create:
324 elif create:
323 raise error.RepoError(_("repository %s already exists") % path)
325 raise error.RepoError(_("repository %s already exists") % path)
324 else:
326 else:
325 try:
327 try:
326 self.requirements = scmutil.readrequires(
328 self.requirements = scmutil.readrequires(
327 self.vfs, self.supported)
329 self.vfs, self.supported)
328 except IOError as inst:
330 except IOError as inst:
329 if inst.errno != errno.ENOENT:
331 if inst.errno != errno.ENOENT:
330 raise
332 raise
331
333
332 self.sharedpath = self.path
334 self.sharedpath = self.path
333 try:
335 try:
334 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
336 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
335 if 'relshared' in self.requirements:
337 if 'relshared' in self.requirements:
336 sharedpath = self.vfs.join(sharedpath)
338 sharedpath = self.vfs.join(sharedpath)
337 vfs = vfsmod.vfs(sharedpath, realpath=True)
339 vfs = vfsmod.vfs(sharedpath, realpath=True)
338 s = vfs.base
340 s = vfs.base
339 if not vfs.exists():
341 if not vfs.exists():
340 raise error.RepoError(
342 raise error.RepoError(
341 _('.hg/sharedpath points to nonexistent directory %s') % s)
343 _('.hg/sharedpath points to nonexistent directory %s') % s)
342 self.sharedpath = s
344 self.sharedpath = s
343 except IOError as inst:
345 except IOError as inst:
344 if inst.errno != errno.ENOENT:
346 if inst.errno != errno.ENOENT:
345 raise
347 raise
346
348
347 self.store = store.store(
349 self.store = store.store(
348 self.requirements, self.sharedpath, vfsmod.vfs)
350 self.requirements, self.sharedpath, vfsmod.vfs)
349 self.spath = self.store.path
351 self.spath = self.store.path
350 self.svfs = self.store.vfs
352 self.svfs = self.store.vfs
351 self.sjoin = self.store.join
353 self.sjoin = self.store.join
352 self.vfs.createmode = self.store.createmode
354 self.vfs.createmode = self.store.createmode
353 self._applyopenerreqs()
355 self._applyopenerreqs()
354 if create:
356 if create:
355 self._writerequirements()
357 self._writerequirements()
356
358
357 self._dirstatevalidatewarned = False
359 self._dirstatevalidatewarned = False
358
360
359 self._branchcaches = {}
361 self._branchcaches = {}
360 self._revbranchcache = None
362 self._revbranchcache = None
361 self.filterpats = {}
363 self.filterpats = {}
362 self._datafilters = {}
364 self._datafilters = {}
363 self._transref = self._lockref = self._wlockref = None
365 self._transref = self._lockref = self._wlockref = None
364
366
365 # A cache for various files under .hg/ that tracks file changes,
367 # A cache for various files under .hg/ that tracks file changes,
366 # (used by the filecache decorator)
368 # (used by the filecache decorator)
367 #
369 #
368 # Maps a property name to its util.filecacheentry
370 # Maps a property name to its util.filecacheentry
369 self._filecache = {}
371 self._filecache = {}
370
372
371 # hold sets of revision to be filtered
373 # hold sets of revision to be filtered
372 # should be cleared when something might have changed the filter value:
374 # should be cleared when something might have changed the filter value:
373 # - new changesets,
375 # - new changesets,
374 # - phase change,
376 # - phase change,
375 # - new obsolescence marker,
377 # - new obsolescence marker,
376 # - working directory parent change,
378 # - working directory parent change,
377 # - bookmark changes
379 # - bookmark changes
378 self.filteredrevcache = {}
380 self.filteredrevcache = {}
379
381
380 # generic mapping between names and nodes
382 # generic mapping between names and nodes
381 self.names = namespaces.namespaces()
383 self.names = namespaces.namespaces()
382
384
383 @property
385 @property
384 def wopener(self):
386 def wopener(self):
385 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
387 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
386 return self.wvfs
388 return self.wvfs
387
389
388 @property
390 @property
389 def opener(self):
391 def opener(self):
390 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
392 self.ui.deprecwarn("use 'repo.vfs' instead of 'repo.opener'", '4.2')
391 return self.vfs
393 return self.vfs
392
394
393 def close(self):
395 def close(self):
394 self._writecaches()
396 self._writecaches()
395
397
396 def _loadextensions(self):
398 def _loadextensions(self):
397 extensions.loadall(self.ui)
399 extensions.loadall(self.ui)
398
400
399 def _writecaches(self):
401 def _writecaches(self):
400 if self._revbranchcache:
402 if self._revbranchcache:
401 self._revbranchcache.write()
403 self._revbranchcache.write()
402
404
403 def _restrictcapabilities(self, caps):
405 def _restrictcapabilities(self, caps):
404 if self.ui.configbool('experimental', 'bundle2-advertise', True):
406 if self.ui.configbool('experimental', 'bundle2-advertise', True):
405 caps = set(caps)
407 caps = set(caps)
406 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
408 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
407 caps.add('bundle2=' + urlreq.quote(capsblob))
409 caps.add('bundle2=' + urlreq.quote(capsblob))
408 return caps
410 return caps
409
411
410 def _applyopenerreqs(self):
412 def _applyopenerreqs(self):
411 self.svfs.options = dict((r, 1) for r in self.requirements
413 self.svfs.options = dict((r, 1) for r in self.requirements
412 if r in self.openerreqs)
414 if r in self.openerreqs)
413 # experimental config: format.chunkcachesize
415 # experimental config: format.chunkcachesize
414 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
416 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
415 if chunkcachesize is not None:
417 if chunkcachesize is not None:
416 self.svfs.options['chunkcachesize'] = chunkcachesize
418 self.svfs.options['chunkcachesize'] = chunkcachesize
417 # experimental config: format.maxchainlen
419 # experimental config: format.maxchainlen
418 maxchainlen = self.ui.configint('format', 'maxchainlen')
420 maxchainlen = self.ui.configint('format', 'maxchainlen')
419 if maxchainlen is not None:
421 if maxchainlen is not None:
420 self.svfs.options['maxchainlen'] = maxchainlen
422 self.svfs.options['maxchainlen'] = maxchainlen
421 # experimental config: format.manifestcachesize
423 # experimental config: format.manifestcachesize
422 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
424 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
423 if manifestcachesize is not None:
425 if manifestcachesize is not None:
424 self.svfs.options['manifestcachesize'] = manifestcachesize
426 self.svfs.options['manifestcachesize'] = manifestcachesize
425 # experimental config: format.aggressivemergedeltas
427 # experimental config: format.aggressivemergedeltas
426 aggressivemergedeltas = self.ui.configbool('format',
428 aggressivemergedeltas = self.ui.configbool('format',
427 'aggressivemergedeltas', False)
429 'aggressivemergedeltas', False)
428 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
430 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
429 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
431 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
430
432
431 for r in self.requirements:
433 for r in self.requirements:
432 if r.startswith('exp-compression-'):
434 if r.startswith('exp-compression-'):
433 self.svfs.options['compengine'] = r[len('exp-compression-'):]
435 self.svfs.options['compengine'] = r[len('exp-compression-'):]
434
436
435 def _writerequirements(self):
437 def _writerequirements(self):
436 scmutil.writerequires(self.vfs, self.requirements)
438 scmutil.writerequires(self.vfs, self.requirements)
437
439
438 def _checknested(self, path):
440 def _checknested(self, path):
439 """Determine if path is a legal nested repository."""
441 """Determine if path is a legal nested repository."""
440 if not path.startswith(self.root):
442 if not path.startswith(self.root):
441 return False
443 return False
442 subpath = path[len(self.root) + 1:]
444 subpath = path[len(self.root) + 1:]
443 normsubpath = util.pconvert(subpath)
445 normsubpath = util.pconvert(subpath)
444
446
445 # XXX: Checking against the current working copy is wrong in
447 # XXX: Checking against the current working copy is wrong in
446 # the sense that it can reject things like
448 # the sense that it can reject things like
447 #
449 #
448 # $ hg cat -r 10 sub/x.txt
450 # $ hg cat -r 10 sub/x.txt
449 #
451 #
450 # if sub/ is no longer a subrepository in the working copy
452 # if sub/ is no longer a subrepository in the working copy
451 # parent revision.
453 # parent revision.
452 #
454 #
453 # However, it can of course also allow things that would have
455 # However, it can of course also allow things that would have
454 # been rejected before, such as the above cat command if sub/
456 # been rejected before, such as the above cat command if sub/
455 # is a subrepository now, but was a normal directory before.
457 # is a subrepository now, but was a normal directory before.
456 # The old path auditor would have rejected by mistake since it
458 # The old path auditor would have rejected by mistake since it
457 # panics when it sees sub/.hg/.
459 # panics when it sees sub/.hg/.
458 #
460 #
459 # All in all, checking against the working copy seems sensible
461 # All in all, checking against the working copy seems sensible
460 # since we want to prevent access to nested repositories on
462 # since we want to prevent access to nested repositories on
461 # the filesystem *now*.
463 # the filesystem *now*.
462 ctx = self[None]
464 ctx = self[None]
463 parts = util.splitpath(subpath)
465 parts = util.splitpath(subpath)
464 while parts:
466 while parts:
465 prefix = '/'.join(parts)
467 prefix = '/'.join(parts)
466 if prefix in ctx.substate:
468 if prefix in ctx.substate:
467 if prefix == normsubpath:
469 if prefix == normsubpath:
468 return True
470 return True
469 else:
471 else:
470 sub = ctx.sub(prefix)
472 sub = ctx.sub(prefix)
471 return sub.checknested(subpath[len(prefix) + 1:])
473 return sub.checknested(subpath[len(prefix) + 1:])
472 else:
474 else:
473 parts.pop()
475 parts.pop()
474 return False
476 return False
475
477
476 def peer(self):
478 def peer(self):
477 return localpeer(self) # not cached to avoid reference cycle
479 return localpeer(self) # not cached to avoid reference cycle
478
480
479 def unfiltered(self):
481 def unfiltered(self):
480 """Return unfiltered version of the repository
482 """Return unfiltered version of the repository
481
483
482 Intended to be overwritten by filtered repo."""
484 Intended to be overwritten by filtered repo."""
483 return self
485 return self
484
486
485 def filtered(self, name):
487 def filtered(self, name):
486 """Return a filtered version of a repository"""
488 """Return a filtered version of a repository"""
487 # build a new class with the mixin and the current class
489 # build a new class with the mixin and the current class
488 # (possibly subclass of the repo)
490 # (possibly subclass of the repo)
489 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
491 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
490 pass
492 pass
491 return filteredrepo(self, name)
493 return filteredrepo(self, name)
492
494
493 @repofilecache('bookmarks', 'bookmarks.current')
495 @repofilecache('bookmarks', 'bookmarks.current')
494 def _bookmarks(self):
496 def _bookmarks(self):
495 return bookmarks.bmstore(self)
497 return bookmarks.bmstore(self)
496
498
497 @property
499 @property
498 def _activebookmark(self):
500 def _activebookmark(self):
499 return self._bookmarks.active
501 return self._bookmarks.active
500
502
501 def bookmarkheads(self, bookmark):
503 def bookmarkheads(self, bookmark):
502 name = bookmark.split('@', 1)[0]
504 name = bookmark.split('@', 1)[0]
503 heads = []
505 heads = []
504 for mark, n in self._bookmarks.iteritems():
506 for mark, n in self._bookmarks.iteritems():
505 if mark.split('@', 1)[0] == name:
507 if mark.split('@', 1)[0] == name:
506 heads.append(n)
508 heads.append(n)
507 return heads
509 return heads
508
510
509 # _phaserevs and _phasesets depend on changelog. what we need is to
511 # _phaserevs and _phasesets depend on changelog. what we need is to
510 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
512 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
511 # can't be easily expressed in filecache mechanism.
513 # can't be easily expressed in filecache mechanism.
512 @storecache('phaseroots', '00changelog.i')
514 @storecache('phaseroots', '00changelog.i')
513 def _phasecache(self):
515 def _phasecache(self):
514 return phases.phasecache(self, self._phasedefaults)
516 return phases.phasecache(self, self._phasedefaults)
515
517
516 @storecache('obsstore')
518 @storecache('obsstore')
517 def obsstore(self):
519 def obsstore(self):
518 # read default format for new obsstore.
520 # read default format for new obsstore.
519 # developer config: format.obsstore-version
521 # developer config: format.obsstore-version
520 defaultformat = self.ui.configint('format', 'obsstore-version', None)
522 defaultformat = self.ui.configint('format', 'obsstore-version', None)
521 # rely on obsstore class default when possible.
523 # rely on obsstore class default when possible.
522 kwargs = {}
524 kwargs = {}
523 if defaultformat is not None:
525 if defaultformat is not None:
524 kwargs['defaultformat'] = defaultformat
526 kwargs['defaultformat'] = defaultformat
525 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
527 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
526 store = obsolete.obsstore(self.svfs, readonly=readonly,
528 store = obsolete.obsstore(self.svfs, readonly=readonly,
527 **kwargs)
529 **kwargs)
528 if store and readonly:
530 if store and readonly:
529 self.ui.warn(
531 self.ui.warn(
530 _('obsolete feature not enabled but %i markers found!\n')
532 _('obsolete feature not enabled but %i markers found!\n')
531 % len(list(store)))
533 % len(list(store)))
532 return store
534 return store
533
535
534 @storecache('00changelog.i')
536 @storecache('00changelog.i')
535 def changelog(self):
537 def changelog(self):
536 c = changelog.changelog(self.svfs)
538 c = changelog.changelog(self.svfs)
537 if txnutil.mayhavepending(self.root):
539 if txnutil.mayhavepending(self.root):
538 c.readpending('00changelog.i.a')
540 c.readpending('00changelog.i.a')
539 return c
541 return c
540
542
541 def _constructmanifest(self):
543 def _constructmanifest(self):
542 # This is a temporary function while we migrate from manifest to
544 # This is a temporary function while we migrate from manifest to
543 # manifestlog. It allows bundlerepo and unionrepo to intercept the
545 # manifestlog. It allows bundlerepo and unionrepo to intercept the
544 # manifest creation.
546 # manifest creation.
545 return manifest.manifestrevlog(self.svfs)
547 return manifest.manifestrevlog(self.svfs)
546
548
547 @storecache('00manifest.i')
549 @storecache('00manifest.i')
548 def manifestlog(self):
550 def manifestlog(self):
549 return manifest.manifestlog(self.svfs, self)
551 return manifest.manifestlog(self.svfs, self)
550
552
551 @repofilecache('dirstate')
553 @repofilecache('dirstate')
552 def dirstate(self):
554 def dirstate(self):
553 return dirstate.dirstate(self.vfs, self.ui, self.root,
555 return dirstate.dirstate(self.vfs, self.ui, self.root,
554 self._dirstatevalidate)
556 self._dirstatevalidate)
555
557
556 def _dirstatevalidate(self, node):
558 def _dirstatevalidate(self, node):
557 try:
559 try:
558 self.changelog.rev(node)
560 self.changelog.rev(node)
559 return node
561 return node
560 except error.LookupError:
562 except error.LookupError:
561 if not self._dirstatevalidatewarned:
563 if not self._dirstatevalidatewarned:
562 self._dirstatevalidatewarned = True
564 self._dirstatevalidatewarned = True
563 self.ui.warn(_("warning: ignoring unknown"
565 self.ui.warn(_("warning: ignoring unknown"
564 " working parent %s!\n") % short(node))
566 " working parent %s!\n") % short(node))
565 return nullid
567 return nullid
566
568
567 def __getitem__(self, changeid):
569 def __getitem__(self, changeid):
568 if changeid is None or changeid == wdirrev:
570 if changeid is None or changeid == wdirrev:
569 return context.workingctx(self)
571 return context.workingctx(self)
570 if isinstance(changeid, slice):
572 if isinstance(changeid, slice):
571 return [context.changectx(self, i)
573 return [context.changectx(self, i)
572 for i in xrange(*changeid.indices(len(self)))
574 for i in xrange(*changeid.indices(len(self)))
573 if i not in self.changelog.filteredrevs]
575 if i not in self.changelog.filteredrevs]
574 return context.changectx(self, changeid)
576 return context.changectx(self, changeid)
575
577
576 def __contains__(self, changeid):
578 def __contains__(self, changeid):
577 try:
579 try:
578 self[changeid]
580 self[changeid]
579 return True
581 return True
580 except error.RepoLookupError:
582 except error.RepoLookupError:
581 return False
583 return False
582
584
583 def __nonzero__(self):
585 def __nonzero__(self):
584 return True
586 return True
585
587
586 def __len__(self):
588 def __len__(self):
587 return len(self.changelog)
589 return len(self.changelog)
588
590
589 def __iter__(self):
591 def __iter__(self):
590 return iter(self.changelog)
592 return iter(self.changelog)
591
593
592 def revs(self, expr, *args):
594 def revs(self, expr, *args):
593 '''Find revisions matching a revset.
595 '''Find revisions matching a revset.
594
596
595 The revset is specified as a string ``expr`` that may contain
597 The revset is specified as a string ``expr`` that may contain
596 %-formatting to escape certain types. See ``revsetlang.formatspec``.
598 %-formatting to escape certain types. See ``revsetlang.formatspec``.
597
599
598 Revset aliases from the configuration are not expanded. To expand
600 Revset aliases from the configuration are not expanded. To expand
599 user aliases, consider calling ``scmutil.revrange()`` or
601 user aliases, consider calling ``scmutil.revrange()`` or
600 ``repo.anyrevs([expr], user=True)``.
602 ``repo.anyrevs([expr], user=True)``.
601
603
602 Returns a revset.abstractsmartset, which is a list-like interface
604 Returns a revset.abstractsmartset, which is a list-like interface
603 that contains integer revisions.
605 that contains integer revisions.
604 '''
606 '''
605 expr = revsetlang.formatspec(expr, *args)
607 expr = revsetlang.formatspec(expr, *args)
606 m = revset.match(None, expr)
608 m = revset.match(None, expr)
607 return m(self)
609 return m(self)
608
610
609 def set(self, expr, *args):
611 def set(self, expr, *args):
610 '''Find revisions matching a revset and emit changectx instances.
612 '''Find revisions matching a revset and emit changectx instances.
611
613
612 This is a convenience wrapper around ``revs()`` that iterates the
614 This is a convenience wrapper around ``revs()`` that iterates the
613 result and is a generator of changectx instances.
615 result and is a generator of changectx instances.
614
616
615 Revset aliases from the configuration are not expanded. To expand
617 Revset aliases from the configuration are not expanded. To expand
616 user aliases, consider calling ``scmutil.revrange()``.
618 user aliases, consider calling ``scmutil.revrange()``.
617 '''
619 '''
618 for r in self.revs(expr, *args):
620 for r in self.revs(expr, *args):
619 yield self[r]
621 yield self[r]
620
622
621 def anyrevs(self, specs, user=False):
623 def anyrevs(self, specs, user=False):
622 '''Find revisions matching one of the given revsets.
624 '''Find revisions matching one of the given revsets.
623
625
624 Revset aliases from the configuration are not expanded by default. To
626 Revset aliases from the configuration are not expanded by default. To
625 expand user aliases, specify ``user=True``.
627 expand user aliases, specify ``user=True``.
626 '''
628 '''
627 if user:
629 if user:
628 m = revset.matchany(self.ui, specs, repo=self)
630 m = revset.matchany(self.ui, specs, repo=self)
629 else:
631 else:
630 m = revset.matchany(None, specs)
632 m = revset.matchany(None, specs)
631 return m(self)
633 return m(self)
632
634
633 def url(self):
635 def url(self):
634 return 'file:' + self.root
636 return 'file:' + self.root
635
637
636 def hook(self, name, throw=False, **args):
638 def hook(self, name, throw=False, **args):
637 """Call a hook, passing this repo instance.
639 """Call a hook, passing this repo instance.
638
640
639 This a convenience method to aid invoking hooks. Extensions likely
641 This a convenience method to aid invoking hooks. Extensions likely
640 won't call this unless they have registered a custom hook or are
642 won't call this unless they have registered a custom hook or are
641 replacing code that is expected to call a hook.
643 replacing code that is expected to call a hook.
642 """
644 """
643 return hook.hook(self.ui, self, name, throw, **args)
645 return hook.hook(self.ui, self, name, throw, **args)
644
646
645 @unfilteredmethod
647 @unfilteredmethod
646 def _tag(self, names, node, message, local, user, date, extra=None,
648 def _tag(self, names, node, message, local, user, date, extra=None,
647 editor=False):
649 editor=False):
648 if isinstance(names, str):
650 if isinstance(names, str):
649 names = (names,)
651 names = (names,)
650
652
651 branches = self.branchmap()
653 branches = self.branchmap()
652 for name in names:
654 for name in names:
653 self.hook('pretag', throw=True, node=hex(node), tag=name,
655 self.hook('pretag', throw=True, node=hex(node), tag=name,
654 local=local)
656 local=local)
655 if name in branches:
657 if name in branches:
656 self.ui.warn(_("warning: tag %s conflicts with existing"
658 self.ui.warn(_("warning: tag %s conflicts with existing"
657 " branch name\n") % name)
659 " branch name\n") % name)
658
660
659 def writetags(fp, names, munge, prevtags):
661 def writetags(fp, names, munge, prevtags):
660 fp.seek(0, 2)
662 fp.seek(0, 2)
661 if prevtags and prevtags[-1] != '\n':
663 if prevtags and prevtags[-1] != '\n':
662 fp.write('\n')
664 fp.write('\n')
663 for name in names:
665 for name in names:
664 if munge:
666 if munge:
665 m = munge(name)
667 m = munge(name)
666 else:
668 else:
667 m = name
669 m = name
668
670
669 if (self._tagscache.tagtypes and
671 if (self._tagscache.tagtypes and
670 name in self._tagscache.tagtypes):
672 name in self._tagscache.tagtypes):
671 old = self.tags().get(name, nullid)
673 old = self.tags().get(name, nullid)
672 fp.write('%s %s\n' % (hex(old), m))
674 fp.write('%s %s\n' % (hex(old), m))
673 fp.write('%s %s\n' % (hex(node), m))
675 fp.write('%s %s\n' % (hex(node), m))
674 fp.close()
676 fp.close()
675
677
676 prevtags = ''
678 prevtags = ''
677 if local:
679 if local:
678 try:
680 try:
679 fp = self.vfs('localtags', 'r+')
681 fp = self.vfs('localtags', 'r+')
680 except IOError:
682 except IOError:
681 fp = self.vfs('localtags', 'a')
683 fp = self.vfs('localtags', 'a')
682 else:
684 else:
683 prevtags = fp.read()
685 prevtags = fp.read()
684
686
685 # local tags are stored in the current charset
687 # local tags are stored in the current charset
686 writetags(fp, names, None, prevtags)
688 writetags(fp, names, None, prevtags)
687 for name in names:
689 for name in names:
688 self.hook('tag', node=hex(node), tag=name, local=local)
690 self.hook('tag', node=hex(node), tag=name, local=local)
689 return
691 return
690
692
691 try:
693 try:
692 fp = self.wfile('.hgtags', 'rb+')
694 fp = self.wfile('.hgtags', 'rb+')
693 except IOError as e:
695 except IOError as e:
694 if e.errno != errno.ENOENT:
696 if e.errno != errno.ENOENT:
695 raise
697 raise
696 fp = self.wfile('.hgtags', 'ab')
698 fp = self.wfile('.hgtags', 'ab')
697 else:
699 else:
698 prevtags = fp.read()
700 prevtags = fp.read()
699
701
700 # committed tags are stored in UTF-8
702 # committed tags are stored in UTF-8
701 writetags(fp, names, encoding.fromlocal, prevtags)
703 writetags(fp, names, encoding.fromlocal, prevtags)
702
704
703 fp.close()
705 fp.close()
704
706
705 self.invalidatecaches()
707 self.invalidatecaches()
706
708
707 if '.hgtags' not in self.dirstate:
709 if '.hgtags' not in self.dirstate:
708 self[None].add(['.hgtags'])
710 self[None].add(['.hgtags'])
709
711
710 m = matchmod.exact(self.root, '', ['.hgtags'])
712 m = matchmod.exact(self.root, '', ['.hgtags'])
711 tagnode = self.commit(message, user, date, extra=extra, match=m,
713 tagnode = self.commit(message, user, date, extra=extra, match=m,
712 editor=editor)
714 editor=editor)
713
715
714 for name in names:
716 for name in names:
715 self.hook('tag', node=hex(node), tag=name, local=local)
717 self.hook('tag', node=hex(node), tag=name, local=local)
716
718
717 return tagnode
719 return tagnode
718
720
719 def tag(self, names, node, message, local, user, date, editor=False):
721 def tag(self, names, node, message, local, user, date, editor=False):
720 '''tag a revision with one or more symbolic names.
722 '''tag a revision with one or more symbolic names.
721
723
722 names is a list of strings or, when adding a single tag, names may be a
724 names is a list of strings or, when adding a single tag, names may be a
723 string.
725 string.
724
726
725 if local is True, the tags are stored in a per-repository file.
727 if local is True, the tags are stored in a per-repository file.
726 otherwise, they are stored in the .hgtags file, and a new
728 otherwise, they are stored in the .hgtags file, and a new
727 changeset is committed with the change.
729 changeset is committed with the change.
728
730
729 keyword arguments:
731 keyword arguments:
730
732
731 local: whether to store tags in non-version-controlled file
733 local: whether to store tags in non-version-controlled file
732 (default False)
734 (default False)
733
735
734 message: commit message to use if committing
736 message: commit message to use if committing
735
737
736 user: name of user to use if committing
738 user: name of user to use if committing
737
739
738 date: date tuple to use if committing'''
740 date: date tuple to use if committing'''
739
741
740 if not local:
742 if not local:
741 m = matchmod.exact(self.root, '', ['.hgtags'])
743 m = matchmod.exact(self.root, '', ['.hgtags'])
742 if any(self.status(match=m, unknown=True, ignored=True)):
744 if any(self.status(match=m, unknown=True, ignored=True)):
743 raise error.Abort(_('working copy of .hgtags is changed'),
745 raise error.Abort(_('working copy of .hgtags is changed'),
744 hint=_('please commit .hgtags manually'))
746 hint=_('please commit .hgtags manually'))
745
747
746 self.tags() # instantiate the cache
748 self.tags() # instantiate the cache
747 self._tag(names, node, message, local, user, date, editor=editor)
749 self._tag(names, node, message, local, user, date, editor=editor)
748
750
749 @filteredpropertycache
751 @filteredpropertycache
750 def _tagscache(self):
752 def _tagscache(self):
751 '''Returns a tagscache object that contains various tags related
753 '''Returns a tagscache object that contains various tags related
752 caches.'''
754 caches.'''
753
755
754 # This simplifies its cache management by having one decorated
756 # This simplifies its cache management by having one decorated
755 # function (this one) and the rest simply fetch things from it.
757 # function (this one) and the rest simply fetch things from it.
756 class tagscache(object):
758 class tagscache(object):
757 def __init__(self):
759 def __init__(self):
758 # These two define the set of tags for this repository. tags
760 # These two define the set of tags for this repository. tags
759 # maps tag name to node; tagtypes maps tag name to 'global' or
761 # maps tag name to node; tagtypes maps tag name to 'global' or
760 # 'local'. (Global tags are defined by .hgtags across all
762 # 'local'. (Global tags are defined by .hgtags across all
761 # heads, and local tags are defined in .hg/localtags.)
763 # heads, and local tags are defined in .hg/localtags.)
762 # They constitute the in-memory cache of tags.
764 # They constitute the in-memory cache of tags.
763 self.tags = self.tagtypes = None
765 self.tags = self.tagtypes = None
764
766
765 self.nodetagscache = self.tagslist = None
767 self.nodetagscache = self.tagslist = None
766
768
767 cache = tagscache()
769 cache = tagscache()
768 cache.tags, cache.tagtypes = self._findtags()
770 cache.tags, cache.tagtypes = self._findtags()
769
771
770 return cache
772 return cache
771
773
772 def tags(self):
774 def tags(self):
773 '''return a mapping of tag to node'''
775 '''return a mapping of tag to node'''
774 t = {}
776 t = {}
775 if self.changelog.filteredrevs:
777 if self.changelog.filteredrevs:
776 tags, tt = self._findtags()
778 tags, tt = self._findtags()
777 else:
779 else:
778 tags = self._tagscache.tags
780 tags = self._tagscache.tags
779 for k, v in tags.iteritems():
781 for k, v in tags.iteritems():
780 try:
782 try:
781 # ignore tags to unknown nodes
783 # ignore tags to unknown nodes
782 self.changelog.rev(v)
784 self.changelog.rev(v)
783 t[k] = v
785 t[k] = v
784 except (error.LookupError, ValueError):
786 except (error.LookupError, ValueError):
785 pass
787 pass
786 return t
788 return t
787
789
788 def _findtags(self):
790 def _findtags(self):
789 '''Do the hard work of finding tags. Return a pair of dicts
791 '''Do the hard work of finding tags. Return a pair of dicts
790 (tags, tagtypes) where tags maps tag name to node, and tagtypes
792 (tags, tagtypes) where tags maps tag name to node, and tagtypes
791 maps tag name to a string like \'global\' or \'local\'.
793 maps tag name to a string like \'global\' or \'local\'.
792 Subclasses or extensions are free to add their own tags, but
794 Subclasses or extensions are free to add their own tags, but
793 should be aware that the returned dicts will be retained for the
795 should be aware that the returned dicts will be retained for the
794 duration of the localrepo object.'''
796 duration of the localrepo object.'''
795
797
796 # XXX what tagtype should subclasses/extensions use? Currently
798 # XXX what tagtype should subclasses/extensions use? Currently
797 # mq and bookmarks add tags, but do not set the tagtype at all.
799 # mq and bookmarks add tags, but do not set the tagtype at all.
798 # Should each extension invent its own tag type? Should there
800 # Should each extension invent its own tag type? Should there
799 # be one tagtype for all such "virtual" tags? Or is the status
801 # be one tagtype for all such "virtual" tags? Or is the status
800 # quo fine?
802 # quo fine?
801
803
802 alltags = {} # map tag name to (node, hist)
804 alltags = {} # map tag name to (node, hist)
803 tagtypes = {}
805 tagtypes = {}
804
806
805 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
807 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
806 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
808 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
807
809
808 # Build the return dicts. Have to re-encode tag names because
810 # Build the return dicts. Have to re-encode tag names because
809 # the tags module always uses UTF-8 (in order not to lose info
811 # the tags module always uses UTF-8 (in order not to lose info
810 # writing to the cache), but the rest of Mercurial wants them in
812 # writing to the cache), but the rest of Mercurial wants them in
811 # local encoding.
813 # local encoding.
812 tags = {}
814 tags = {}
813 for (name, (node, hist)) in alltags.iteritems():
815 for (name, (node, hist)) in alltags.iteritems():
814 if node != nullid:
816 if node != nullid:
815 tags[encoding.tolocal(name)] = node
817 tags[encoding.tolocal(name)] = node
816 tags['tip'] = self.changelog.tip()
818 tags['tip'] = self.changelog.tip()
817 tagtypes = dict([(encoding.tolocal(name), value)
819 tagtypes = dict([(encoding.tolocal(name), value)
818 for (name, value) in tagtypes.iteritems()])
820 for (name, value) in tagtypes.iteritems()])
819 return (tags, tagtypes)
821 return (tags, tagtypes)
820
822
821 def tagtype(self, tagname):
823 def tagtype(self, tagname):
822 '''
824 '''
823 return the type of the given tag. result can be:
825 return the type of the given tag. result can be:
824
826
825 'local' : a local tag
827 'local' : a local tag
826 'global' : a global tag
828 'global' : a global tag
827 None : tag does not exist
829 None : tag does not exist
828 '''
830 '''
829
831
830 return self._tagscache.tagtypes.get(tagname)
832 return self._tagscache.tagtypes.get(tagname)
831
833
832 def tagslist(self):
834 def tagslist(self):
833 '''return a list of tags ordered by revision'''
835 '''return a list of tags ordered by revision'''
834 if not self._tagscache.tagslist:
836 if not self._tagscache.tagslist:
835 l = []
837 l = []
836 for t, n in self.tags().iteritems():
838 for t, n in self.tags().iteritems():
837 l.append((self.changelog.rev(n), t, n))
839 l.append((self.changelog.rev(n), t, n))
838 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
840 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
839
841
840 return self._tagscache.tagslist
842 return self._tagscache.tagslist
841
843
842 def nodetags(self, node):
844 def nodetags(self, node):
843 '''return the tags associated with a node'''
845 '''return the tags associated with a node'''
844 if not self._tagscache.nodetagscache:
846 if not self._tagscache.nodetagscache:
845 nodetagscache = {}
847 nodetagscache = {}
846 for t, n in self._tagscache.tags.iteritems():
848 for t, n in self._tagscache.tags.iteritems():
847 nodetagscache.setdefault(n, []).append(t)
849 nodetagscache.setdefault(n, []).append(t)
848 for tags in nodetagscache.itervalues():
850 for tags in nodetagscache.itervalues():
849 tags.sort()
851 tags.sort()
850 self._tagscache.nodetagscache = nodetagscache
852 self._tagscache.nodetagscache = nodetagscache
851 return self._tagscache.nodetagscache.get(node, [])
853 return self._tagscache.nodetagscache.get(node, [])
852
854
853 def nodebookmarks(self, node):
855 def nodebookmarks(self, node):
854 """return the list of bookmarks pointing to the specified node"""
856 """return the list of bookmarks pointing to the specified node"""
855 marks = []
857 marks = []
856 for bookmark, n in self._bookmarks.iteritems():
858 for bookmark, n in self._bookmarks.iteritems():
857 if n == node:
859 if n == node:
858 marks.append(bookmark)
860 marks.append(bookmark)
859 return sorted(marks)
861 return sorted(marks)
860
862
861 def branchmap(self):
863 def branchmap(self):
862 '''returns a dictionary {branch: [branchheads]} with branchheads
864 '''returns a dictionary {branch: [branchheads]} with branchheads
863 ordered by increasing revision number'''
865 ordered by increasing revision number'''
864 branchmap.updatecache(self)
866 branchmap.updatecache(self)
865 return self._branchcaches[self.filtername]
867 return self._branchcaches[self.filtername]
866
868
867 @unfilteredmethod
869 @unfilteredmethod
868 def revbranchcache(self):
870 def revbranchcache(self):
869 if not self._revbranchcache:
871 if not self._revbranchcache:
870 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
872 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
871 return self._revbranchcache
873 return self._revbranchcache
872
874
873 def branchtip(self, branch, ignoremissing=False):
875 def branchtip(self, branch, ignoremissing=False):
874 '''return the tip node for a given branch
876 '''return the tip node for a given branch
875
877
876 If ignoremissing is True, then this method will not raise an error.
878 If ignoremissing is True, then this method will not raise an error.
877 This is helpful for callers that only expect None for a missing branch
879 This is helpful for callers that only expect None for a missing branch
878 (e.g. namespace).
880 (e.g. namespace).
879
881
880 '''
882 '''
881 try:
883 try:
882 return self.branchmap().branchtip(branch)
884 return self.branchmap().branchtip(branch)
883 except KeyError:
885 except KeyError:
884 if not ignoremissing:
886 if not ignoremissing:
885 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
887 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
886 else:
888 else:
887 pass
889 pass
888
890
889 def lookup(self, key):
891 def lookup(self, key):
890 return self[key].node()
892 return self[key].node()
891
893
892 def lookupbranch(self, key, remote=None):
894 def lookupbranch(self, key, remote=None):
893 repo = remote or self
895 repo = remote or self
894 if key in repo.branchmap():
896 if key in repo.branchmap():
895 return key
897 return key
896
898
897 repo = (remote and remote.local()) and remote or self
899 repo = (remote and remote.local()) and remote or self
898 return repo[key].branch()
900 return repo[key].branch()
899
901
900 def known(self, nodes):
902 def known(self, nodes):
901 cl = self.changelog
903 cl = self.changelog
902 nm = cl.nodemap
904 nm = cl.nodemap
903 filtered = cl.filteredrevs
905 filtered = cl.filteredrevs
904 result = []
906 result = []
905 for n in nodes:
907 for n in nodes:
906 r = nm.get(n)
908 r = nm.get(n)
907 resp = not (r is None or r in filtered)
909 resp = not (r is None or r in filtered)
908 result.append(resp)
910 result.append(resp)
909 return result
911 return result
910
912
911 def local(self):
913 def local(self):
912 return self
914 return self
913
915
914 def publishing(self):
916 def publishing(self):
915 # it's safe (and desirable) to trust the publish flag unconditionally
917 # it's safe (and desirable) to trust the publish flag unconditionally
916 # so that we don't finalize changes shared between users via ssh or nfs
918 # so that we don't finalize changes shared between users via ssh or nfs
917 return self.ui.configbool('phases', 'publish', True, untrusted=True)
919 return self.ui.configbool('phases', 'publish', True, untrusted=True)
918
920
919 def cancopy(self):
921 def cancopy(self):
920 # so statichttprepo's override of local() works
922 # so statichttprepo's override of local() works
921 if not self.local():
923 if not self.local():
922 return False
924 return False
923 if not self.publishing():
925 if not self.publishing():
924 return True
926 return True
925 # if publishing we can't copy if there is filtered content
927 # if publishing we can't copy if there is filtered content
926 return not self.filtered('visible').changelog.filteredrevs
928 return not self.filtered('visible').changelog.filteredrevs
927
929
928 def shared(self):
930 def shared(self):
929 '''the type of shared repository (None if not shared)'''
931 '''the type of shared repository (None if not shared)'''
930 if self.sharedpath != self.path:
932 if self.sharedpath != self.path:
931 return 'store'
933 return 'store'
932 return None
934 return None
933
935
934 def join(self, f, *insidef):
936 def join(self, f, *insidef):
935 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
937 self.ui.deprecwarn("use 'repo.vfs.join' instead of 'repo.join'", '4.0')
936 return self.vfs.join(os.path.join(f, *insidef))
938 return self.vfs.join(os.path.join(f, *insidef))
937
939
938 def wjoin(self, f, *insidef):
940 def wjoin(self, f, *insidef):
939 return self.vfs.reljoin(self.root, f, *insidef)
941 return self.vfs.reljoin(self.root, f, *insidef)
940
942
941 def file(self, f):
943 def file(self, f):
942 if f[0] == '/':
944 if f[0] == '/':
943 f = f[1:]
945 f = f[1:]
944 return filelog.filelog(self.svfs, f)
946 return filelog.filelog(self.svfs, f)
945
947
946 def changectx(self, changeid):
948 def changectx(self, changeid):
947 return self[changeid]
949 return self[changeid]
948
950
949 def setparents(self, p1, p2=nullid):
951 def setparents(self, p1, p2=nullid):
950 self.dirstate.beginparentchange()
952 self.dirstate.beginparentchange()
951 copies = self.dirstate.setparents(p1, p2)
953 copies = self.dirstate.setparents(p1, p2)
952 pctx = self[p1]
954 pctx = self[p1]
953 if copies:
955 if copies:
954 # Adjust copy records, the dirstate cannot do it, it
956 # Adjust copy records, the dirstate cannot do it, it
955 # requires access to parents manifests. Preserve them
957 # requires access to parents manifests. Preserve them
956 # only for entries added to first parent.
958 # only for entries added to first parent.
957 for f in copies:
959 for f in copies:
958 if f not in pctx and copies[f] in pctx:
960 if f not in pctx and copies[f] in pctx:
959 self.dirstate.copy(copies[f], f)
961 self.dirstate.copy(copies[f], f)
960 if p2 == nullid:
962 if p2 == nullid:
961 for f, s in sorted(self.dirstate.copies().items()):
963 for f, s in sorted(self.dirstate.copies().items()):
962 if f not in pctx and s not in pctx:
964 if f not in pctx and s not in pctx:
963 self.dirstate.copy(None, f)
965 self.dirstate.copy(None, f)
964 self.dirstate.endparentchange()
966 self.dirstate.endparentchange()
965
967
966 def filectx(self, path, changeid=None, fileid=None):
968 def filectx(self, path, changeid=None, fileid=None):
967 """changeid can be a changeset revision, node, or tag.
969 """changeid can be a changeset revision, node, or tag.
968 fileid can be a file revision or node."""
970 fileid can be a file revision or node."""
969 return context.filectx(self, path, changeid, fileid)
971 return context.filectx(self, path, changeid, fileid)
970
972
971 def getcwd(self):
973 def getcwd(self):
972 return self.dirstate.getcwd()
974 return self.dirstate.getcwd()
973
975
974 def pathto(self, f, cwd=None):
976 def pathto(self, f, cwd=None):
975 return self.dirstate.pathto(f, cwd)
977 return self.dirstate.pathto(f, cwd)
976
978
977 def wfile(self, f, mode='r'):
979 def wfile(self, f, mode='r'):
978 return self.wvfs(f, mode)
980 return self.wvfs(f, mode)
979
981
980 def _link(self, f):
982 def _link(self, f):
981 return self.wvfs.islink(f)
983 return self.wvfs.islink(f)
982
984
983 def _loadfilter(self, filter):
985 def _loadfilter(self, filter):
984 if filter not in self.filterpats:
986 if filter not in self.filterpats:
985 l = []
987 l = []
986 for pat, cmd in self.ui.configitems(filter):
988 for pat, cmd in self.ui.configitems(filter):
987 if cmd == '!':
989 if cmd == '!':
988 continue
990 continue
989 mf = matchmod.match(self.root, '', [pat])
991 mf = matchmod.match(self.root, '', [pat])
990 fn = None
992 fn = None
991 params = cmd
993 params = cmd
992 for name, filterfn in self._datafilters.iteritems():
994 for name, filterfn in self._datafilters.iteritems():
993 if cmd.startswith(name):
995 if cmd.startswith(name):
994 fn = filterfn
996 fn = filterfn
995 params = cmd[len(name):].lstrip()
997 params = cmd[len(name):].lstrip()
996 break
998 break
997 if not fn:
999 if not fn:
998 fn = lambda s, c, **kwargs: util.filter(s, c)
1000 fn = lambda s, c, **kwargs: util.filter(s, c)
999 # Wrap old filters not supporting keyword arguments
1001 # Wrap old filters not supporting keyword arguments
1000 if not inspect.getargspec(fn)[2]:
1002 if not inspect.getargspec(fn)[2]:
1001 oldfn = fn
1003 oldfn = fn
1002 fn = lambda s, c, **kwargs: oldfn(s, c)
1004 fn = lambda s, c, **kwargs: oldfn(s, c)
1003 l.append((mf, fn, params))
1005 l.append((mf, fn, params))
1004 self.filterpats[filter] = l
1006 self.filterpats[filter] = l
1005 return self.filterpats[filter]
1007 return self.filterpats[filter]
1006
1008
1007 def _filter(self, filterpats, filename, data):
1009 def _filter(self, filterpats, filename, data):
1008 for mf, fn, cmd in filterpats:
1010 for mf, fn, cmd in filterpats:
1009 if mf(filename):
1011 if mf(filename):
1010 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1012 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1011 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1013 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1012 break
1014 break
1013
1015
1014 return data
1016 return data
1015
1017
1016 @unfilteredpropertycache
1018 @unfilteredpropertycache
1017 def _encodefilterpats(self):
1019 def _encodefilterpats(self):
1018 return self._loadfilter('encode')
1020 return self._loadfilter('encode')
1019
1021
1020 @unfilteredpropertycache
1022 @unfilteredpropertycache
1021 def _decodefilterpats(self):
1023 def _decodefilterpats(self):
1022 return self._loadfilter('decode')
1024 return self._loadfilter('decode')
1023
1025
1024 def adddatafilter(self, name, filter):
1026 def adddatafilter(self, name, filter):
1025 self._datafilters[name] = filter
1027 self._datafilters[name] = filter
1026
1028
1027 def wread(self, filename):
1029 def wread(self, filename):
1028 if self._link(filename):
1030 if self._link(filename):
1029 data = self.wvfs.readlink(filename)
1031 data = self.wvfs.readlink(filename)
1030 else:
1032 else:
1031 data = self.wvfs.read(filename)
1033 data = self.wvfs.read(filename)
1032 return self._filter(self._encodefilterpats, filename, data)
1034 return self._filter(self._encodefilterpats, filename, data)
1033
1035
1034 def wwrite(self, filename, data, flags, backgroundclose=False):
1036 def wwrite(self, filename, data, flags, backgroundclose=False):
1035 """write ``data`` into ``filename`` in the working directory
1037 """write ``data`` into ``filename`` in the working directory
1036
1038
1037 This returns length of written (maybe decoded) data.
1039 This returns length of written (maybe decoded) data.
1038 """
1040 """
1039 data = self._filter(self._decodefilterpats, filename, data)
1041 data = self._filter(self._decodefilterpats, filename, data)
1040 if 'l' in flags:
1042 if 'l' in flags:
1041 self.wvfs.symlink(data, filename)
1043 self.wvfs.symlink(data, filename)
1042 else:
1044 else:
1043 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1045 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1044 if 'x' in flags:
1046 if 'x' in flags:
1045 self.wvfs.setflags(filename, False, True)
1047 self.wvfs.setflags(filename, False, True)
1046 return len(data)
1048 return len(data)
1047
1049
1048 def wwritedata(self, filename, data):
1050 def wwritedata(self, filename, data):
1049 return self._filter(self._decodefilterpats, filename, data)
1051 return self._filter(self._decodefilterpats, filename, data)
1050
1052
1051 def currenttransaction(self):
1053 def currenttransaction(self):
1052 """return the current transaction or None if non exists"""
1054 """return the current transaction or None if non exists"""
1053 if self._transref:
1055 if self._transref:
1054 tr = self._transref()
1056 tr = self._transref()
1055 else:
1057 else:
1056 tr = None
1058 tr = None
1057
1059
1058 if tr and tr.running():
1060 if tr and tr.running():
1059 return tr
1061 return tr
1060 return None
1062 return None
1061
1063
1062 def transaction(self, desc, report=None):
1064 def transaction(self, desc, report=None):
1063 if (self.ui.configbool('devel', 'all-warnings')
1065 if (self.ui.configbool('devel', 'all-warnings')
1064 or self.ui.configbool('devel', 'check-locks')):
1066 or self.ui.configbool('devel', 'check-locks')):
1065 if self._currentlock(self._lockref) is None:
1067 if self._currentlock(self._lockref) is None:
1066 raise error.ProgrammingError('transaction requires locking')
1068 raise error.ProgrammingError('transaction requires locking')
1067 tr = self.currenttransaction()
1069 tr = self.currenttransaction()
1068 if tr is not None:
1070 if tr is not None:
1069 return tr.nest()
1071 return tr.nest()
1070
1072
1071 # abort here if the journal already exists
1073 # abort here if the journal already exists
1072 if self.svfs.exists("journal"):
1074 if self.svfs.exists("journal"):
1073 raise error.RepoError(
1075 raise error.RepoError(
1074 _("abandoned transaction found"),
1076 _("abandoned transaction found"),
1075 hint=_("run 'hg recover' to clean up transaction"))
1077 hint=_("run 'hg recover' to clean up transaction"))
1076
1078
1077 idbase = "%.40f#%f" % (random.random(), time.time())
1079 idbase = "%.40f#%f" % (random.random(), time.time())
1078 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1080 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1079 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1081 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1080
1082
1081 self._writejournal(desc)
1083 self._writejournal(desc)
1082 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1084 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1083 if report:
1085 if report:
1084 rp = report
1086 rp = report
1085 else:
1087 else:
1086 rp = self.ui.warn
1088 rp = self.ui.warn
1087 vfsmap = {'plain': self.vfs} # root of .hg/
1089 vfsmap = {'plain': self.vfs} # root of .hg/
1088 # we must avoid cyclic reference between repo and transaction.
1090 # we must avoid cyclic reference between repo and transaction.
1089 reporef = weakref.ref(self)
1091 reporef = weakref.ref(self)
1090 def validate(tr):
1092 def validate(tr):
1091 """will run pre-closing hooks"""
1093 """will run pre-closing hooks"""
1092 reporef().hook('pretxnclose', throw=True,
1094 reporef().hook('pretxnclose', throw=True,
1093 txnname=desc, **tr.hookargs)
1095 txnname=desc, **tr.hookargs)
1094 def releasefn(tr, success):
1096 def releasefn(tr, success):
1095 repo = reporef()
1097 repo = reporef()
1096 if success:
1098 if success:
1097 # this should be explicitly invoked here, because
1099 # this should be explicitly invoked here, because
1098 # in-memory changes aren't written out at closing
1100 # in-memory changes aren't written out at closing
1099 # transaction, if tr.addfilegenerator (via
1101 # transaction, if tr.addfilegenerator (via
1100 # dirstate.write or so) isn't invoked while
1102 # dirstate.write or so) isn't invoked while
1101 # transaction running
1103 # transaction running
1102 repo.dirstate.write(None)
1104 repo.dirstate.write(None)
1103 else:
1105 else:
1104 # discard all changes (including ones already written
1106 # discard all changes (including ones already written
1105 # out) in this transaction
1107 # out) in this transaction
1106 repo.dirstate.restorebackup(None, prefix='journal.')
1108 repo.dirstate.restorebackup(None, prefix='journal.')
1107
1109
1108 repo.invalidate(clearfilecache=True)
1110 repo.invalidate(clearfilecache=True)
1109
1111
1110 tr = transaction.transaction(rp, self.svfs, vfsmap,
1112 tr = transaction.transaction(rp, self.svfs, vfsmap,
1111 "journal",
1113 "journal",
1112 "undo",
1114 "undo",
1113 aftertrans(renames),
1115 aftertrans(renames),
1114 self.store.createmode,
1116 self.store.createmode,
1115 validator=validate,
1117 validator=validate,
1116 releasefn=releasefn)
1118 releasefn=releasefn)
1117
1119
1118 tr.hookargs['txnid'] = txnid
1120 tr.hookargs['txnid'] = txnid
1119 # note: writing the fncache only during finalize mean that the file is
1121 # note: writing the fncache only during finalize mean that the file is
1120 # outdated when running hooks. As fncache is used for streaming clone,
1122 # outdated when running hooks. As fncache is used for streaming clone,
1121 # this is not expected to break anything that happen during the hooks.
1123 # this is not expected to break anything that happen during the hooks.
1122 tr.addfinalize('flush-fncache', self.store.write)
1124 tr.addfinalize('flush-fncache', self.store.write)
1123 def txnclosehook(tr2):
1125 def txnclosehook(tr2):
1124 """To be run if transaction is successful, will schedule a hook run
1126 """To be run if transaction is successful, will schedule a hook run
1125 """
1127 """
1126 # Don't reference tr2 in hook() so we don't hold a reference.
1128 # Don't reference tr2 in hook() so we don't hold a reference.
1127 # This reduces memory consumption when there are multiple
1129 # This reduces memory consumption when there are multiple
1128 # transactions per lock. This can likely go away if issue5045
1130 # transactions per lock. This can likely go away if issue5045
1129 # fixes the function accumulation.
1131 # fixes the function accumulation.
1130 hookargs = tr2.hookargs
1132 hookargs = tr2.hookargs
1131
1133
1132 def hook():
1134 def hook():
1133 reporef().hook('txnclose', throw=False, txnname=desc,
1135 reporef().hook('txnclose', throw=False, txnname=desc,
1134 **hookargs)
1136 **hookargs)
1135 reporef()._afterlock(hook)
1137 reporef()._afterlock(hook)
1136 tr.addfinalize('txnclose-hook', txnclosehook)
1138 tr.addfinalize('txnclose-hook', txnclosehook)
1137 def txnaborthook(tr2):
1139 def txnaborthook(tr2):
1138 """To be run if transaction is aborted
1140 """To be run if transaction is aborted
1139 """
1141 """
1140 reporef().hook('txnabort', throw=False, txnname=desc,
1142 reporef().hook('txnabort', throw=False, txnname=desc,
1141 **tr2.hookargs)
1143 **tr2.hookargs)
1142 tr.addabort('txnabort-hook', txnaborthook)
1144 tr.addabort('txnabort-hook', txnaborthook)
1143 # avoid eager cache invalidation. in-memory data should be identical
1145 # avoid eager cache invalidation. in-memory data should be identical
1144 # to stored data if transaction has no error.
1146 # to stored data if transaction has no error.
1145 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1147 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1146 self._transref = weakref.ref(tr)
1148 self._transref = weakref.ref(tr)
1147 return tr
1149 return tr
1148
1150
1149 def _journalfiles(self):
1151 def _journalfiles(self):
1150 return ((self.svfs, 'journal'),
1152 return ((self.svfs, 'journal'),
1151 (self.vfs, 'journal.dirstate'),
1153 (self.vfs, 'journal.dirstate'),
1152 (self.vfs, 'journal.branch'),
1154 (self.vfs, 'journal.branch'),
1153 (self.vfs, 'journal.desc'),
1155 (self.vfs, 'journal.desc'),
1154 (self.vfs, 'journal.bookmarks'),
1156 (self.vfs, 'journal.bookmarks'),
1155 (self.svfs, 'journal.phaseroots'))
1157 (self.svfs, 'journal.phaseroots'))
1156
1158
1157 def undofiles(self):
1159 def undofiles(self):
1158 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1160 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1159
1161
1160 def _writejournal(self, desc):
1162 def _writejournal(self, desc):
1161 self.dirstate.savebackup(None, prefix='journal.')
1163 self.dirstate.savebackup(None, prefix='journal.')
1162 self.vfs.write("journal.branch",
1164 self.vfs.write("journal.branch",
1163 encoding.fromlocal(self.dirstate.branch()))
1165 encoding.fromlocal(self.dirstate.branch()))
1164 self.vfs.write("journal.desc",
1166 self.vfs.write("journal.desc",
1165 "%d\n%s\n" % (len(self), desc))
1167 "%d\n%s\n" % (len(self), desc))
1166 self.vfs.write("journal.bookmarks",
1168 self.vfs.write("journal.bookmarks",
1167 self.vfs.tryread("bookmarks"))
1169 self.vfs.tryread("bookmarks"))
1168 self.svfs.write("journal.phaseroots",
1170 self.svfs.write("journal.phaseroots",
1169 self.svfs.tryread("phaseroots"))
1171 self.svfs.tryread("phaseroots"))
1170
1172
1171 def recover(self):
1173 def recover(self):
1172 with self.lock():
1174 with self.lock():
1173 if self.svfs.exists("journal"):
1175 if self.svfs.exists("journal"):
1174 self.ui.status(_("rolling back interrupted transaction\n"))
1176 self.ui.status(_("rolling back interrupted transaction\n"))
1175 vfsmap = {'': self.svfs,
1177 vfsmap = {'': self.svfs,
1176 'plain': self.vfs,}
1178 'plain': self.vfs,}
1177 transaction.rollback(self.svfs, vfsmap, "journal",
1179 transaction.rollback(self.svfs, vfsmap, "journal",
1178 self.ui.warn)
1180 self.ui.warn)
1179 self.invalidate()
1181 self.invalidate()
1180 return True
1182 return True
1181 else:
1183 else:
1182 self.ui.warn(_("no interrupted transaction available\n"))
1184 self.ui.warn(_("no interrupted transaction available\n"))
1183 return False
1185 return False
1184
1186
1185 def rollback(self, dryrun=False, force=False):
1187 def rollback(self, dryrun=False, force=False):
1186 wlock = lock = dsguard = None
1188 wlock = lock = dsguard = None
1187 try:
1189 try:
1188 wlock = self.wlock()
1190 wlock = self.wlock()
1189 lock = self.lock()
1191 lock = self.lock()
1190 if self.svfs.exists("undo"):
1192 if self.svfs.exists("undo"):
1191 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1193 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1192
1194
1193 return self._rollback(dryrun, force, dsguard)
1195 return self._rollback(dryrun, force, dsguard)
1194 else:
1196 else:
1195 self.ui.warn(_("no rollback information available\n"))
1197 self.ui.warn(_("no rollback information available\n"))
1196 return 1
1198 return 1
1197 finally:
1199 finally:
1198 release(dsguard, lock, wlock)
1200 release(dsguard, lock, wlock)
1199
1201
1200 @unfilteredmethod # Until we get smarter cache management
1202 @unfilteredmethod # Until we get smarter cache management
1201 def _rollback(self, dryrun, force, dsguard):
1203 def _rollback(self, dryrun, force, dsguard):
1202 ui = self.ui
1204 ui = self.ui
1203 try:
1205 try:
1204 args = self.vfs.read('undo.desc').splitlines()
1206 args = self.vfs.read('undo.desc').splitlines()
1205 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1207 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1206 if len(args) >= 3:
1208 if len(args) >= 3:
1207 detail = args[2]
1209 detail = args[2]
1208 oldtip = oldlen - 1
1210 oldtip = oldlen - 1
1209
1211
1210 if detail and ui.verbose:
1212 if detail and ui.verbose:
1211 msg = (_('repository tip rolled back to revision %s'
1213 msg = (_('repository tip rolled back to revision %s'
1212 ' (undo %s: %s)\n')
1214 ' (undo %s: %s)\n')
1213 % (oldtip, desc, detail))
1215 % (oldtip, desc, detail))
1214 else:
1216 else:
1215 msg = (_('repository tip rolled back to revision %s'
1217 msg = (_('repository tip rolled back to revision %s'
1216 ' (undo %s)\n')
1218 ' (undo %s)\n')
1217 % (oldtip, desc))
1219 % (oldtip, desc))
1218 except IOError:
1220 except IOError:
1219 msg = _('rolling back unknown transaction\n')
1221 msg = _('rolling back unknown transaction\n')
1220 desc = None
1222 desc = None
1221
1223
1222 if not force and self['.'] != self['tip'] and desc == 'commit':
1224 if not force and self['.'] != self['tip'] and desc == 'commit':
1223 raise error.Abort(
1225 raise error.Abort(
1224 _('rollback of last commit while not checked out '
1226 _('rollback of last commit while not checked out '
1225 'may lose data'), hint=_('use -f to force'))
1227 'may lose data'), hint=_('use -f to force'))
1226
1228
1227 ui.status(msg)
1229 ui.status(msg)
1228 if dryrun:
1230 if dryrun:
1229 return 0
1231 return 0
1230
1232
1231 parents = self.dirstate.parents()
1233 parents = self.dirstate.parents()
1232 self.destroying()
1234 self.destroying()
1233 vfsmap = {'plain': self.vfs, '': self.svfs}
1235 vfsmap = {'plain': self.vfs, '': self.svfs}
1234 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1236 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1235 if self.vfs.exists('undo.bookmarks'):
1237 if self.vfs.exists('undo.bookmarks'):
1236 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1238 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1237 if self.svfs.exists('undo.phaseroots'):
1239 if self.svfs.exists('undo.phaseroots'):
1238 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1240 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1239 self.invalidate()
1241 self.invalidate()
1240
1242
1241 parentgone = (parents[0] not in self.changelog.nodemap or
1243 parentgone = (parents[0] not in self.changelog.nodemap or
1242 parents[1] not in self.changelog.nodemap)
1244 parents[1] not in self.changelog.nodemap)
1243 if parentgone:
1245 if parentgone:
1244 # prevent dirstateguard from overwriting already restored one
1246 # prevent dirstateguard from overwriting already restored one
1245 dsguard.close()
1247 dsguard.close()
1246
1248
1247 self.dirstate.restorebackup(None, prefix='undo.')
1249 self.dirstate.restorebackup(None, prefix='undo.')
1248 try:
1250 try:
1249 branch = self.vfs.read('undo.branch')
1251 branch = self.vfs.read('undo.branch')
1250 self.dirstate.setbranch(encoding.tolocal(branch))
1252 self.dirstate.setbranch(encoding.tolocal(branch))
1251 except IOError:
1253 except IOError:
1252 ui.warn(_('named branch could not be reset: '
1254 ui.warn(_('named branch could not be reset: '
1253 'current branch is still \'%s\'\n')
1255 'current branch is still \'%s\'\n')
1254 % self.dirstate.branch())
1256 % self.dirstate.branch())
1255
1257
1256 parents = tuple([p.rev() for p in self[None].parents()])
1258 parents = tuple([p.rev() for p in self[None].parents()])
1257 if len(parents) > 1:
1259 if len(parents) > 1:
1258 ui.status(_('working directory now based on '
1260 ui.status(_('working directory now based on '
1259 'revisions %d and %d\n') % parents)
1261 'revisions %d and %d\n') % parents)
1260 else:
1262 else:
1261 ui.status(_('working directory now based on '
1263 ui.status(_('working directory now based on '
1262 'revision %d\n') % parents)
1264 'revision %d\n') % parents)
1263 mergemod.mergestate.clean(self, self['.'].node())
1265 mergemod.mergestate.clean(self, self['.'].node())
1264
1266
1265 # TODO: if we know which new heads may result from this rollback, pass
1267 # TODO: if we know which new heads may result from this rollback, pass
1266 # them to destroy(), which will prevent the branchhead cache from being
1268 # them to destroy(), which will prevent the branchhead cache from being
1267 # invalidated.
1269 # invalidated.
1268 self.destroyed()
1270 self.destroyed()
1269 return 0
1271 return 0
1270
1272
1271 def invalidatecaches(self):
1273 def invalidatecaches(self):
1272
1274
1273 if '_tagscache' in vars(self):
1275 if '_tagscache' in vars(self):
1274 # can't use delattr on proxy
1276 # can't use delattr on proxy
1275 del self.__dict__['_tagscache']
1277 del self.__dict__['_tagscache']
1276
1278
1277 self.unfiltered()._branchcaches.clear()
1279 self.unfiltered()._branchcaches.clear()
1278 self.invalidatevolatilesets()
1280 self.invalidatevolatilesets()
1279
1281
1280 def invalidatevolatilesets(self):
1282 def invalidatevolatilesets(self):
1281 self.filteredrevcache.clear()
1283 self.filteredrevcache.clear()
1282 obsolete.clearobscaches(self)
1284 obsolete.clearobscaches(self)
1283
1285
1284 def invalidatedirstate(self):
1286 def invalidatedirstate(self):
1285 '''Invalidates the dirstate, causing the next call to dirstate
1287 '''Invalidates the dirstate, causing the next call to dirstate
1286 to check if it was modified since the last time it was read,
1288 to check if it was modified since the last time it was read,
1287 rereading it if it has.
1289 rereading it if it has.
1288
1290
1289 This is different to dirstate.invalidate() that it doesn't always
1291 This is different to dirstate.invalidate() that it doesn't always
1290 rereads the dirstate. Use dirstate.invalidate() if you want to
1292 rereads the dirstate. Use dirstate.invalidate() if you want to
1291 explicitly read the dirstate again (i.e. restoring it to a previous
1293 explicitly read the dirstate again (i.e. restoring it to a previous
1292 known good state).'''
1294 known good state).'''
1293 if hasunfilteredcache(self, 'dirstate'):
1295 if hasunfilteredcache(self, 'dirstate'):
1294 for k in self.dirstate._filecache:
1296 for k in self.dirstate._filecache:
1295 try:
1297 try:
1296 delattr(self.dirstate, k)
1298 delattr(self.dirstate, k)
1297 except AttributeError:
1299 except AttributeError:
1298 pass
1300 pass
1299 delattr(self.unfiltered(), 'dirstate')
1301 delattr(self.unfiltered(), 'dirstate')
1300
1302
1301 def invalidate(self, clearfilecache=False):
1303 def invalidate(self, clearfilecache=False):
1302 '''Invalidates both store and non-store parts other than dirstate
1304 '''Invalidates both store and non-store parts other than dirstate
1303
1305
1304 If a transaction is running, invalidation of store is omitted,
1306 If a transaction is running, invalidation of store is omitted,
1305 because discarding in-memory changes might cause inconsistency
1307 because discarding in-memory changes might cause inconsistency
1306 (e.g. incomplete fncache causes unintentional failure, but
1308 (e.g. incomplete fncache causes unintentional failure, but
1307 redundant one doesn't).
1309 redundant one doesn't).
1308 '''
1310 '''
1309 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1311 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1310 for k in self._filecache.keys():
1312 for k in self._filecache.keys():
1311 # dirstate is invalidated separately in invalidatedirstate()
1313 # dirstate is invalidated separately in invalidatedirstate()
1312 if k == 'dirstate':
1314 if k == 'dirstate':
1313 continue
1315 continue
1314
1316
1315 if clearfilecache:
1317 if clearfilecache:
1316 del self._filecache[k]
1318 del self._filecache[k]
1317 try:
1319 try:
1318 delattr(unfiltered, k)
1320 delattr(unfiltered, k)
1319 except AttributeError:
1321 except AttributeError:
1320 pass
1322 pass
1321 self.invalidatecaches()
1323 self.invalidatecaches()
1322 if not self.currenttransaction():
1324 if not self.currenttransaction():
1323 # TODO: Changing contents of store outside transaction
1325 # TODO: Changing contents of store outside transaction
1324 # causes inconsistency. We should make in-memory store
1326 # causes inconsistency. We should make in-memory store
1325 # changes detectable, and abort if changed.
1327 # changes detectable, and abort if changed.
1326 self.store.invalidatecaches()
1328 self.store.invalidatecaches()
1327
1329
1328 def invalidateall(self):
1330 def invalidateall(self):
1329 '''Fully invalidates both store and non-store parts, causing the
1331 '''Fully invalidates both store and non-store parts, causing the
1330 subsequent operation to reread any outside changes.'''
1332 subsequent operation to reread any outside changes.'''
1331 # extension should hook this to invalidate its caches
1333 # extension should hook this to invalidate its caches
1332 self.invalidate()
1334 self.invalidate()
1333 self.invalidatedirstate()
1335 self.invalidatedirstate()
1334
1336
1335 @unfilteredmethod
1337 @unfilteredmethod
1336 def _refreshfilecachestats(self, tr):
1338 def _refreshfilecachestats(self, tr):
1337 """Reload stats of cached files so that they are flagged as valid"""
1339 """Reload stats of cached files so that they are flagged as valid"""
1338 for k, ce in self._filecache.items():
1340 for k, ce in self._filecache.items():
1339 if k == 'dirstate' or k not in self.__dict__:
1341 if k == 'dirstate' or k not in self.__dict__:
1340 continue
1342 continue
1341 ce.refresh()
1343 ce.refresh()
1342
1344
1343 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1345 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1344 inheritchecker=None, parentenvvar=None):
1346 inheritchecker=None, parentenvvar=None):
1345 parentlock = None
1347 parentlock = None
1346 # the contents of parentenvvar are used by the underlying lock to
1348 # the contents of parentenvvar are used by the underlying lock to
1347 # determine whether it can be inherited
1349 # determine whether it can be inherited
1348 if parentenvvar is not None:
1350 if parentenvvar is not None:
1349 parentlock = encoding.environ.get(parentenvvar)
1351 parentlock = encoding.environ.get(parentenvvar)
1350 try:
1352 try:
1351 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1353 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1352 acquirefn=acquirefn, desc=desc,
1354 acquirefn=acquirefn, desc=desc,
1353 inheritchecker=inheritchecker,
1355 inheritchecker=inheritchecker,
1354 parentlock=parentlock)
1356 parentlock=parentlock)
1355 except error.LockHeld as inst:
1357 except error.LockHeld as inst:
1356 if not wait:
1358 if not wait:
1357 raise
1359 raise
1358 # show more details for new-style locks
1360 # show more details for new-style locks
1359 if ':' in inst.locker:
1361 if ':' in inst.locker:
1360 host, pid = inst.locker.split(":", 1)
1362 host, pid = inst.locker.split(":", 1)
1361 self.ui.warn(
1363 self.ui.warn(
1362 _("waiting for lock on %s held by process %r "
1364 _("waiting for lock on %s held by process %r "
1363 "on host %r\n") % (desc, pid, host))
1365 "on host %r\n") % (desc, pid, host))
1364 else:
1366 else:
1365 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1367 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1366 (desc, inst.locker))
1368 (desc, inst.locker))
1367 # default to 600 seconds timeout
1369 # default to 600 seconds timeout
1368 l = lockmod.lock(vfs, lockname,
1370 l = lockmod.lock(vfs, lockname,
1369 int(self.ui.config("ui", "timeout", "600")),
1371 int(self.ui.config("ui", "timeout", "600")),
1370 releasefn=releasefn, acquirefn=acquirefn,
1372 releasefn=releasefn, acquirefn=acquirefn,
1371 desc=desc)
1373 desc=desc)
1372 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1374 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1373 return l
1375 return l
1374
1376
1375 def _afterlock(self, callback):
1377 def _afterlock(self, callback):
1376 """add a callback to be run when the repository is fully unlocked
1378 """add a callback to be run when the repository is fully unlocked
1377
1379
1378 The callback will be executed when the outermost lock is released
1380 The callback will be executed when the outermost lock is released
1379 (with wlock being higher level than 'lock')."""
1381 (with wlock being higher level than 'lock')."""
1380 for ref in (self._wlockref, self._lockref):
1382 for ref in (self._wlockref, self._lockref):
1381 l = ref and ref()
1383 l = ref and ref()
1382 if l and l.held:
1384 if l and l.held:
1383 l.postrelease.append(callback)
1385 l.postrelease.append(callback)
1384 break
1386 break
1385 else: # no lock have been found.
1387 else: # no lock have been found.
1386 callback()
1388 callback()
1387
1389
1388 def lock(self, wait=True):
1390 def lock(self, wait=True):
1389 '''Lock the repository store (.hg/store) and return a weak reference
1391 '''Lock the repository store (.hg/store) and return a weak reference
1390 to the lock. Use this before modifying the store (e.g. committing or
1392 to the lock. Use this before modifying the store (e.g. committing or
1391 stripping). If you are opening a transaction, get a lock as well.)
1393 stripping). If you are opening a transaction, get a lock as well.)
1392
1394
1393 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1395 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1394 'wlock' first to avoid a dead-lock hazard.'''
1396 'wlock' first to avoid a dead-lock hazard.'''
1395 l = self._currentlock(self._lockref)
1397 l = self._currentlock(self._lockref)
1396 if l is not None:
1398 if l is not None:
1397 l.lock()
1399 l.lock()
1398 return l
1400 return l
1399
1401
1400 l = self._lock(self.svfs, "lock", wait, None,
1402 l = self._lock(self.svfs, "lock", wait, None,
1401 self.invalidate, _('repository %s') % self.origroot)
1403 self.invalidate, _('repository %s') % self.origroot)
1402 self._lockref = weakref.ref(l)
1404 self._lockref = weakref.ref(l)
1403 return l
1405 return l
1404
1406
1405 def _wlockchecktransaction(self):
1407 def _wlockchecktransaction(self):
1406 if self.currenttransaction() is not None:
1408 if self.currenttransaction() is not None:
1407 raise error.LockInheritanceContractViolation(
1409 raise error.LockInheritanceContractViolation(
1408 'wlock cannot be inherited in the middle of a transaction')
1410 'wlock cannot be inherited in the middle of a transaction')
1409
1411
1410 def wlock(self, wait=True):
1412 def wlock(self, wait=True):
1411 '''Lock the non-store parts of the repository (everything under
1413 '''Lock the non-store parts of the repository (everything under
1412 .hg except .hg/store) and return a weak reference to the lock.
1414 .hg except .hg/store) and return a weak reference to the lock.
1413
1415
1414 Use this before modifying files in .hg.
1416 Use this before modifying files in .hg.
1415
1417
1416 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1418 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1417 'wlock' first to avoid a dead-lock hazard.'''
1419 'wlock' first to avoid a dead-lock hazard.'''
1418 l = self._wlockref and self._wlockref()
1420 l = self._wlockref and self._wlockref()
1419 if l is not None and l.held:
1421 if l is not None and l.held:
1420 l.lock()
1422 l.lock()
1421 return l
1423 return l
1422
1424
1423 # We do not need to check for non-waiting lock acquisition. Such
1425 # We do not need to check for non-waiting lock acquisition. Such
1424 # acquisition would not cause dead-lock as they would just fail.
1426 # acquisition would not cause dead-lock as they would just fail.
1425 if wait and (self.ui.configbool('devel', 'all-warnings')
1427 if wait and (self.ui.configbool('devel', 'all-warnings')
1426 or self.ui.configbool('devel', 'check-locks')):
1428 or self.ui.configbool('devel', 'check-locks')):
1427 if self._currentlock(self._lockref) is not None:
1429 if self._currentlock(self._lockref) is not None:
1428 self.ui.develwarn('"wlock" acquired after "lock"')
1430 self.ui.develwarn('"wlock" acquired after "lock"')
1429
1431
1430 def unlock():
1432 def unlock():
1431 if self.dirstate.pendingparentchange():
1433 if self.dirstate.pendingparentchange():
1432 self.dirstate.invalidate()
1434 self.dirstate.invalidate()
1433 else:
1435 else:
1434 self.dirstate.write(None)
1436 self.dirstate.write(None)
1435
1437
1436 self._filecache['dirstate'].refresh()
1438 self._filecache['dirstate'].refresh()
1437
1439
1438 l = self._lock(self.vfs, "wlock", wait, unlock,
1440 l = self._lock(self.vfs, "wlock", wait, unlock,
1439 self.invalidatedirstate, _('working directory of %s') %
1441 self.invalidatedirstate, _('working directory of %s') %
1440 self.origroot,
1442 self.origroot,
1441 inheritchecker=self._wlockchecktransaction,
1443 inheritchecker=self._wlockchecktransaction,
1442 parentenvvar='HG_WLOCK_LOCKER')
1444 parentenvvar='HG_WLOCK_LOCKER')
1443 self._wlockref = weakref.ref(l)
1445 self._wlockref = weakref.ref(l)
1444 return l
1446 return l
1445
1447
1446 def _currentlock(self, lockref):
1448 def _currentlock(self, lockref):
1447 """Returns the lock if it's held, or None if it's not."""
1449 """Returns the lock if it's held, or None if it's not."""
1448 if lockref is None:
1450 if lockref is None:
1449 return None
1451 return None
1450 l = lockref()
1452 l = lockref()
1451 if l is None or not l.held:
1453 if l is None or not l.held:
1452 return None
1454 return None
1453 return l
1455 return l
1454
1456
1455 def currentwlock(self):
1457 def currentwlock(self):
1456 """Returns the wlock if it's held, or None if it's not."""
1458 """Returns the wlock if it's held, or None if it's not."""
1457 return self._currentlock(self._wlockref)
1459 return self._currentlock(self._wlockref)
1458
1460
1459 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1461 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1460 """
1462 """
1461 commit an individual file as part of a larger transaction
1463 commit an individual file as part of a larger transaction
1462 """
1464 """
1463
1465
1464 fname = fctx.path()
1466 fname = fctx.path()
1465 fparent1 = manifest1.get(fname, nullid)
1467 fparent1 = manifest1.get(fname, nullid)
1466 fparent2 = manifest2.get(fname, nullid)
1468 fparent2 = manifest2.get(fname, nullid)
1467 if isinstance(fctx, context.filectx):
1469 if isinstance(fctx, context.filectx):
1468 node = fctx.filenode()
1470 node = fctx.filenode()
1469 if node in [fparent1, fparent2]:
1471 if node in [fparent1, fparent2]:
1470 self.ui.debug('reusing %s filelog entry\n' % fname)
1472 self.ui.debug('reusing %s filelog entry\n' % fname)
1471 if manifest1.flags(fname) != fctx.flags():
1473 if manifest1.flags(fname) != fctx.flags():
1472 changelist.append(fname)
1474 changelist.append(fname)
1473 return node
1475 return node
1474
1476
1475 flog = self.file(fname)
1477 flog = self.file(fname)
1476 meta = {}
1478 meta = {}
1477 copy = fctx.renamed()
1479 copy = fctx.renamed()
1478 if copy and copy[0] != fname:
1480 if copy and copy[0] != fname:
1479 # Mark the new revision of this file as a copy of another
1481 # Mark the new revision of this file as a copy of another
1480 # file. This copy data will effectively act as a parent
1482 # file. This copy data will effectively act as a parent
1481 # of this new revision. If this is a merge, the first
1483 # of this new revision. If this is a merge, the first
1482 # parent will be the nullid (meaning "look up the copy data")
1484 # parent will be the nullid (meaning "look up the copy data")
1483 # and the second one will be the other parent. For example:
1485 # and the second one will be the other parent. For example:
1484 #
1486 #
1485 # 0 --- 1 --- 3 rev1 changes file foo
1487 # 0 --- 1 --- 3 rev1 changes file foo
1486 # \ / rev2 renames foo to bar and changes it
1488 # \ / rev2 renames foo to bar and changes it
1487 # \- 2 -/ rev3 should have bar with all changes and
1489 # \- 2 -/ rev3 should have bar with all changes and
1488 # should record that bar descends from
1490 # should record that bar descends from
1489 # bar in rev2 and foo in rev1
1491 # bar in rev2 and foo in rev1
1490 #
1492 #
1491 # this allows this merge to succeed:
1493 # this allows this merge to succeed:
1492 #
1494 #
1493 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1495 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1494 # \ / merging rev3 and rev4 should use bar@rev2
1496 # \ / merging rev3 and rev4 should use bar@rev2
1495 # \- 2 --- 4 as the merge base
1497 # \- 2 --- 4 as the merge base
1496 #
1498 #
1497
1499
1498 cfname = copy[0]
1500 cfname = copy[0]
1499 crev = manifest1.get(cfname)
1501 crev = manifest1.get(cfname)
1500 newfparent = fparent2
1502 newfparent = fparent2
1501
1503
1502 if manifest2: # branch merge
1504 if manifest2: # branch merge
1503 if fparent2 == nullid or crev is None: # copied on remote side
1505 if fparent2 == nullid or crev is None: # copied on remote side
1504 if cfname in manifest2:
1506 if cfname in manifest2:
1505 crev = manifest2[cfname]
1507 crev = manifest2[cfname]
1506 newfparent = fparent1
1508 newfparent = fparent1
1507
1509
1508 # Here, we used to search backwards through history to try to find
1510 # Here, we used to search backwards through history to try to find
1509 # where the file copy came from if the source of a copy was not in
1511 # where the file copy came from if the source of a copy was not in
1510 # the parent directory. However, this doesn't actually make sense to
1512 # the parent directory. However, this doesn't actually make sense to
1511 # do (what does a copy from something not in your working copy even
1513 # do (what does a copy from something not in your working copy even
1512 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1514 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1513 # the user that copy information was dropped, so if they didn't
1515 # the user that copy information was dropped, so if they didn't
1514 # expect this outcome it can be fixed, but this is the correct
1516 # expect this outcome it can be fixed, but this is the correct
1515 # behavior in this circumstance.
1517 # behavior in this circumstance.
1516
1518
1517 if crev:
1519 if crev:
1518 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1520 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1519 meta["copy"] = cfname
1521 meta["copy"] = cfname
1520 meta["copyrev"] = hex(crev)
1522 meta["copyrev"] = hex(crev)
1521 fparent1, fparent2 = nullid, newfparent
1523 fparent1, fparent2 = nullid, newfparent
1522 else:
1524 else:
1523 self.ui.warn(_("warning: can't find ancestor for '%s' "
1525 self.ui.warn(_("warning: can't find ancestor for '%s' "
1524 "copied from '%s'!\n") % (fname, cfname))
1526 "copied from '%s'!\n") % (fname, cfname))
1525
1527
1526 elif fparent1 == nullid:
1528 elif fparent1 == nullid:
1527 fparent1, fparent2 = fparent2, nullid
1529 fparent1, fparent2 = fparent2, nullid
1528 elif fparent2 != nullid:
1530 elif fparent2 != nullid:
1529 # is one parent an ancestor of the other?
1531 # is one parent an ancestor of the other?
1530 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1532 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1531 if fparent1 in fparentancestors:
1533 if fparent1 in fparentancestors:
1532 fparent1, fparent2 = fparent2, nullid
1534 fparent1, fparent2 = fparent2, nullid
1533 elif fparent2 in fparentancestors:
1535 elif fparent2 in fparentancestors:
1534 fparent2 = nullid
1536 fparent2 = nullid
1535
1537
1536 # is the file changed?
1538 # is the file changed?
1537 text = fctx.data()
1539 text = fctx.data()
1538 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1540 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1539 changelist.append(fname)
1541 changelist.append(fname)
1540 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1542 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1541 # are just the flags changed during merge?
1543 # are just the flags changed during merge?
1542 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1544 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1543 changelist.append(fname)
1545 changelist.append(fname)
1544
1546
1545 return fparent1
1547 return fparent1
1546
1548
1547 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1549 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1548 """check for commit arguments that aren't committable"""
1550 """check for commit arguments that aren't committable"""
1549 if match.isexact() or match.prefix():
1551 if match.isexact() or match.prefix():
1550 matched = set(status.modified + status.added + status.removed)
1552 matched = set(status.modified + status.added + status.removed)
1551
1553
1552 for f in match.files():
1554 for f in match.files():
1553 f = self.dirstate.normalize(f)
1555 f = self.dirstate.normalize(f)
1554 if f == '.' or f in matched or f in wctx.substate:
1556 if f == '.' or f in matched or f in wctx.substate:
1555 continue
1557 continue
1556 if f in status.deleted:
1558 if f in status.deleted:
1557 fail(f, _('file not found!'))
1559 fail(f, _('file not found!'))
1558 if f in vdirs: # visited directory
1560 if f in vdirs: # visited directory
1559 d = f + '/'
1561 d = f + '/'
1560 for mf in matched:
1562 for mf in matched:
1561 if mf.startswith(d):
1563 if mf.startswith(d):
1562 break
1564 break
1563 else:
1565 else:
1564 fail(f, _("no match under directory!"))
1566 fail(f, _("no match under directory!"))
1565 elif f not in self.dirstate:
1567 elif f not in self.dirstate:
1566 fail(f, _("file not tracked!"))
1568 fail(f, _("file not tracked!"))
1567
1569
1568 @unfilteredmethod
1570 @unfilteredmethod
1569 def commit(self, text="", user=None, date=None, match=None, force=False,
1571 def commit(self, text="", user=None, date=None, match=None, force=False,
1570 editor=False, extra=None):
1572 editor=False, extra=None):
1571 """Add a new revision to current repository.
1573 """Add a new revision to current repository.
1572
1574
1573 Revision information is gathered from the working directory,
1575 Revision information is gathered from the working directory,
1574 match can be used to filter the committed files. If editor is
1576 match can be used to filter the committed files. If editor is
1575 supplied, it is called to get a commit message.
1577 supplied, it is called to get a commit message.
1576 """
1578 """
1577 if extra is None:
1579 if extra is None:
1578 extra = {}
1580 extra = {}
1579
1581
1580 def fail(f, msg):
1582 def fail(f, msg):
1581 raise error.Abort('%s: %s' % (f, msg))
1583 raise error.Abort('%s: %s' % (f, msg))
1582
1584
1583 if not match:
1585 if not match:
1584 match = matchmod.always(self.root, '')
1586 match = matchmod.always(self.root, '')
1585
1587
1586 if not force:
1588 if not force:
1587 vdirs = []
1589 vdirs = []
1588 match.explicitdir = vdirs.append
1590 match.explicitdir = vdirs.append
1589 match.bad = fail
1591 match.bad = fail
1590
1592
1591 wlock = lock = tr = None
1593 wlock = lock = tr = None
1592 try:
1594 try:
1593 wlock = self.wlock()
1595 wlock = self.wlock()
1594 lock = self.lock() # for recent changelog (see issue4368)
1596 lock = self.lock() # for recent changelog (see issue4368)
1595
1597
1596 wctx = self[None]
1598 wctx = self[None]
1597 merge = len(wctx.parents()) > 1
1599 merge = len(wctx.parents()) > 1
1598
1600
1599 if not force and merge and match.ispartial():
1601 if not force and merge and match.ispartial():
1600 raise error.Abort(_('cannot partially commit a merge '
1602 raise error.Abort(_('cannot partially commit a merge '
1601 '(do not specify files or patterns)'))
1603 '(do not specify files or patterns)'))
1602
1604
1603 status = self.status(match=match, clean=force)
1605 status = self.status(match=match, clean=force)
1604 if force:
1606 if force:
1605 status.modified.extend(status.clean) # mq may commit clean files
1607 status.modified.extend(status.clean) # mq may commit clean files
1606
1608
1607 # check subrepos
1609 # check subrepos
1608 subs = []
1610 subs = []
1609 commitsubs = set()
1611 commitsubs = set()
1610 newstate = wctx.substate.copy()
1612 newstate = wctx.substate.copy()
1611 # only manage subrepos and .hgsubstate if .hgsub is present
1613 # only manage subrepos and .hgsubstate if .hgsub is present
1612 if '.hgsub' in wctx:
1614 if '.hgsub' in wctx:
1613 # we'll decide whether to track this ourselves, thanks
1615 # we'll decide whether to track this ourselves, thanks
1614 for c in status.modified, status.added, status.removed:
1616 for c in status.modified, status.added, status.removed:
1615 if '.hgsubstate' in c:
1617 if '.hgsubstate' in c:
1616 c.remove('.hgsubstate')
1618 c.remove('.hgsubstate')
1617
1619
1618 # compare current state to last committed state
1620 # compare current state to last committed state
1619 # build new substate based on last committed state
1621 # build new substate based on last committed state
1620 oldstate = wctx.p1().substate
1622 oldstate = wctx.p1().substate
1621 for s in sorted(newstate.keys()):
1623 for s in sorted(newstate.keys()):
1622 if not match(s):
1624 if not match(s):
1623 # ignore working copy, use old state if present
1625 # ignore working copy, use old state if present
1624 if s in oldstate:
1626 if s in oldstate:
1625 newstate[s] = oldstate[s]
1627 newstate[s] = oldstate[s]
1626 continue
1628 continue
1627 if not force:
1629 if not force:
1628 raise error.Abort(
1630 raise error.Abort(
1629 _("commit with new subrepo %s excluded") % s)
1631 _("commit with new subrepo %s excluded") % s)
1630 dirtyreason = wctx.sub(s).dirtyreason(True)
1632 dirtyreason = wctx.sub(s).dirtyreason(True)
1631 if dirtyreason:
1633 if dirtyreason:
1632 if not self.ui.configbool('ui', 'commitsubrepos'):
1634 if not self.ui.configbool('ui', 'commitsubrepos'):
1633 raise error.Abort(dirtyreason,
1635 raise error.Abort(dirtyreason,
1634 hint=_("use --subrepos for recursive commit"))
1636 hint=_("use --subrepos for recursive commit"))
1635 subs.append(s)
1637 subs.append(s)
1636 commitsubs.add(s)
1638 commitsubs.add(s)
1637 else:
1639 else:
1638 bs = wctx.sub(s).basestate()
1640 bs = wctx.sub(s).basestate()
1639 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1641 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1640 if oldstate.get(s, (None, None, None))[1] != bs:
1642 if oldstate.get(s, (None, None, None))[1] != bs:
1641 subs.append(s)
1643 subs.append(s)
1642
1644
1643 # check for removed subrepos
1645 # check for removed subrepos
1644 for p in wctx.parents():
1646 for p in wctx.parents():
1645 r = [s for s in p.substate if s not in newstate]
1647 r = [s for s in p.substate if s not in newstate]
1646 subs += [s for s in r if match(s)]
1648 subs += [s for s in r if match(s)]
1647 if subs:
1649 if subs:
1648 if (not match('.hgsub') and
1650 if (not match('.hgsub') and
1649 '.hgsub' in (wctx.modified() + wctx.added())):
1651 '.hgsub' in (wctx.modified() + wctx.added())):
1650 raise error.Abort(
1652 raise error.Abort(
1651 _("can't commit subrepos without .hgsub"))
1653 _("can't commit subrepos without .hgsub"))
1652 status.modified.insert(0, '.hgsubstate')
1654 status.modified.insert(0, '.hgsubstate')
1653
1655
1654 elif '.hgsub' in status.removed:
1656 elif '.hgsub' in status.removed:
1655 # clean up .hgsubstate when .hgsub is removed
1657 # clean up .hgsubstate when .hgsub is removed
1656 if ('.hgsubstate' in wctx and
1658 if ('.hgsubstate' in wctx and
1657 '.hgsubstate' not in (status.modified + status.added +
1659 '.hgsubstate' not in (status.modified + status.added +
1658 status.removed)):
1660 status.removed)):
1659 status.removed.insert(0, '.hgsubstate')
1661 status.removed.insert(0, '.hgsubstate')
1660
1662
1661 # make sure all explicit patterns are matched
1663 # make sure all explicit patterns are matched
1662 if not force:
1664 if not force:
1663 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1665 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1664
1666
1665 cctx = context.workingcommitctx(self, status,
1667 cctx = context.workingcommitctx(self, status,
1666 text, user, date, extra)
1668 text, user, date, extra)
1667
1669
1668 # internal config: ui.allowemptycommit
1670 # internal config: ui.allowemptycommit
1669 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1671 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1670 or extra.get('close') or merge or cctx.files()
1672 or extra.get('close') or merge or cctx.files()
1671 or self.ui.configbool('ui', 'allowemptycommit'))
1673 or self.ui.configbool('ui', 'allowemptycommit'))
1672 if not allowemptycommit:
1674 if not allowemptycommit:
1673 return None
1675 return None
1674
1676
1675 if merge and cctx.deleted():
1677 if merge and cctx.deleted():
1676 raise error.Abort(_("cannot commit merge with missing files"))
1678 raise error.Abort(_("cannot commit merge with missing files"))
1677
1679
1678 ms = mergemod.mergestate.read(self)
1680 ms = mergemod.mergestate.read(self)
1679 mergeutil.checkunresolved(ms)
1681 mergeutil.checkunresolved(ms)
1680
1682
1681 if editor:
1683 if editor:
1682 cctx._text = editor(self, cctx, subs)
1684 cctx._text = editor(self, cctx, subs)
1683 edited = (text != cctx._text)
1685 edited = (text != cctx._text)
1684
1686
1685 # Save commit message in case this transaction gets rolled back
1687 # Save commit message in case this transaction gets rolled back
1686 # (e.g. by a pretxncommit hook). Leave the content alone on
1688 # (e.g. by a pretxncommit hook). Leave the content alone on
1687 # the assumption that the user will use the same editor again.
1689 # the assumption that the user will use the same editor again.
1688 msgfn = self.savecommitmessage(cctx._text)
1690 msgfn = self.savecommitmessage(cctx._text)
1689
1691
1690 # commit subs and write new state
1692 # commit subs and write new state
1691 if subs:
1693 if subs:
1692 for s in sorted(commitsubs):
1694 for s in sorted(commitsubs):
1693 sub = wctx.sub(s)
1695 sub = wctx.sub(s)
1694 self.ui.status(_('committing subrepository %s\n') %
1696 self.ui.status(_('committing subrepository %s\n') %
1695 subrepo.subrelpath(sub))
1697 subrepo.subrelpath(sub))
1696 sr = sub.commit(cctx._text, user, date)
1698 sr = sub.commit(cctx._text, user, date)
1697 newstate[s] = (newstate[s][0], sr)
1699 newstate[s] = (newstate[s][0], sr)
1698 subrepo.writestate(self, newstate)
1700 subrepo.writestate(self, newstate)
1699
1701
1700 p1, p2 = self.dirstate.parents()
1702 p1, p2 = self.dirstate.parents()
1701 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1703 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1702 try:
1704 try:
1703 self.hook("precommit", throw=True, parent1=hookp1,
1705 self.hook("precommit", throw=True, parent1=hookp1,
1704 parent2=hookp2)
1706 parent2=hookp2)
1705 tr = self.transaction('commit')
1707 tr = self.transaction('commit')
1706 ret = self.commitctx(cctx, True)
1708 ret = self.commitctx(cctx, True)
1707 except: # re-raises
1709 except: # re-raises
1708 if edited:
1710 if edited:
1709 self.ui.write(
1711 self.ui.write(
1710 _('note: commit message saved in %s\n') % msgfn)
1712 _('note: commit message saved in %s\n') % msgfn)
1711 raise
1713 raise
1712 # update bookmarks, dirstate and mergestate
1714 # update bookmarks, dirstate and mergestate
1713 bookmarks.update(self, [p1, p2], ret)
1715 bookmarks.update(self, [p1, p2], ret)
1714 cctx.markcommitted(ret)
1716 cctx.markcommitted(ret)
1715 ms.reset()
1717 ms.reset()
1716 tr.close()
1718 tr.close()
1717
1719
1718 finally:
1720 finally:
1719 lockmod.release(tr, lock, wlock)
1721 lockmod.release(tr, lock, wlock)
1720
1722
1721 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1723 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1722 # hack for command that use a temporary commit (eg: histedit)
1724 # hack for command that use a temporary commit (eg: histedit)
1723 # temporary commit got stripped before hook release
1725 # temporary commit got stripped before hook release
1724 if self.changelog.hasnode(ret):
1726 if self.changelog.hasnode(ret):
1725 self.hook("commit", node=node, parent1=parent1,
1727 self.hook("commit", node=node, parent1=parent1,
1726 parent2=parent2)
1728 parent2=parent2)
1727 self._afterlock(commithook)
1729 self._afterlock(commithook)
1728 return ret
1730 return ret
1729
1731
1730 @unfilteredmethod
1732 @unfilteredmethod
1731 def commitctx(self, ctx, error=False):
1733 def commitctx(self, ctx, error=False):
1732 """Add a new revision to current repository.
1734 """Add a new revision to current repository.
1733 Revision information is passed via the context argument.
1735 Revision information is passed via the context argument.
1734 """
1736 """
1735
1737
1736 tr = None
1738 tr = None
1737 p1, p2 = ctx.p1(), ctx.p2()
1739 p1, p2 = ctx.p1(), ctx.p2()
1738 user = ctx.user()
1740 user = ctx.user()
1739
1741
1740 lock = self.lock()
1742 lock = self.lock()
1741 try:
1743 try:
1742 tr = self.transaction("commit")
1744 tr = self.transaction("commit")
1743 trp = weakref.proxy(tr)
1745 trp = weakref.proxy(tr)
1744
1746
1745 if ctx.manifestnode():
1747 if ctx.manifestnode():
1746 # reuse an existing manifest revision
1748 # reuse an existing manifest revision
1747 mn = ctx.manifestnode()
1749 mn = ctx.manifestnode()
1748 files = ctx.files()
1750 files = ctx.files()
1749 elif ctx.files():
1751 elif ctx.files():
1750 m1ctx = p1.manifestctx()
1752 m1ctx = p1.manifestctx()
1751 m2ctx = p2.manifestctx()
1753 m2ctx = p2.manifestctx()
1752 mctx = m1ctx.copy()
1754 mctx = m1ctx.copy()
1753
1755
1754 m = mctx.read()
1756 m = mctx.read()
1755 m1 = m1ctx.read()
1757 m1 = m1ctx.read()
1756 m2 = m2ctx.read()
1758 m2 = m2ctx.read()
1757
1759
1758 # check in files
1760 # check in files
1759 added = []
1761 added = []
1760 changed = []
1762 changed = []
1761 removed = list(ctx.removed())
1763 removed = list(ctx.removed())
1762 linkrev = len(self)
1764 linkrev = len(self)
1763 self.ui.note(_("committing files:\n"))
1765 self.ui.note(_("committing files:\n"))
1764 for f in sorted(ctx.modified() + ctx.added()):
1766 for f in sorted(ctx.modified() + ctx.added()):
1765 self.ui.note(f + "\n")
1767 self.ui.note(f + "\n")
1766 try:
1768 try:
1767 fctx = ctx[f]
1769 fctx = ctx[f]
1768 if fctx is None:
1770 if fctx is None:
1769 removed.append(f)
1771 removed.append(f)
1770 else:
1772 else:
1771 added.append(f)
1773 added.append(f)
1772 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1774 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1773 trp, changed)
1775 trp, changed)
1774 m.setflag(f, fctx.flags())
1776 m.setflag(f, fctx.flags())
1775 except OSError as inst:
1777 except OSError as inst:
1776 self.ui.warn(_("trouble committing %s!\n") % f)
1778 self.ui.warn(_("trouble committing %s!\n") % f)
1777 raise
1779 raise
1778 except IOError as inst:
1780 except IOError as inst:
1779 errcode = getattr(inst, 'errno', errno.ENOENT)
1781 errcode = getattr(inst, 'errno', errno.ENOENT)
1780 if error or errcode and errcode != errno.ENOENT:
1782 if error or errcode and errcode != errno.ENOENT:
1781 self.ui.warn(_("trouble committing %s!\n") % f)
1783 self.ui.warn(_("trouble committing %s!\n") % f)
1782 raise
1784 raise
1783
1785
1784 # update manifest
1786 # update manifest
1785 self.ui.note(_("committing manifest\n"))
1787 self.ui.note(_("committing manifest\n"))
1786 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1788 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1787 drop = [f for f in removed if f in m]
1789 drop = [f for f in removed if f in m]
1788 for f in drop:
1790 for f in drop:
1789 del m[f]
1791 del m[f]
1790 mn = mctx.write(trp, linkrev,
1792 mn = mctx.write(trp, linkrev,
1791 p1.manifestnode(), p2.manifestnode(),
1793 p1.manifestnode(), p2.manifestnode(),
1792 added, drop)
1794 added, drop)
1793 files = changed + removed
1795 files = changed + removed
1794 else:
1796 else:
1795 mn = p1.manifestnode()
1797 mn = p1.manifestnode()
1796 files = []
1798 files = []
1797
1799
1798 # update changelog
1800 # update changelog
1799 self.ui.note(_("committing changelog\n"))
1801 self.ui.note(_("committing changelog\n"))
1800 self.changelog.delayupdate(tr)
1802 self.changelog.delayupdate(tr)
1801 n = self.changelog.add(mn, files, ctx.description(),
1803 n = self.changelog.add(mn, files, ctx.description(),
1802 trp, p1.node(), p2.node(),
1804 trp, p1.node(), p2.node(),
1803 user, ctx.date(), ctx.extra().copy())
1805 user, ctx.date(), ctx.extra().copy())
1804 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1806 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1805 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1807 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1806 parent2=xp2)
1808 parent2=xp2)
1807 # set the new commit is proper phase
1809 # set the new commit is proper phase
1808 targetphase = subrepo.newcommitphase(self.ui, ctx)
1810 targetphase = subrepo.newcommitphase(self.ui, ctx)
1809 if targetphase:
1811 if targetphase:
1810 # retract boundary do not alter parent changeset.
1812 # retract boundary do not alter parent changeset.
1811 # if a parent have higher the resulting phase will
1813 # if a parent have higher the resulting phase will
1812 # be compliant anyway
1814 # be compliant anyway
1813 #
1815 #
1814 # if minimal phase was 0 we don't need to retract anything
1816 # if minimal phase was 0 we don't need to retract anything
1815 phases.retractboundary(self, tr, targetphase, [n])
1817 phases.retractboundary(self, tr, targetphase, [n])
1816 tr.close()
1818 tr.close()
1817 branchmap.updatecache(self.filtered('served'))
1819 branchmap.updatecache(self.filtered('served'))
1818 return n
1820 return n
1819 finally:
1821 finally:
1820 if tr:
1822 if tr:
1821 tr.release()
1823 tr.release()
1822 lock.release()
1824 lock.release()
1823
1825
1824 @unfilteredmethod
1826 @unfilteredmethod
1825 def destroying(self):
1827 def destroying(self):
1826 '''Inform the repository that nodes are about to be destroyed.
1828 '''Inform the repository that nodes are about to be destroyed.
1827 Intended for use by strip and rollback, so there's a common
1829 Intended for use by strip and rollback, so there's a common
1828 place for anything that has to be done before destroying history.
1830 place for anything that has to be done before destroying history.
1829
1831
1830 This is mostly useful for saving state that is in memory and waiting
1832 This is mostly useful for saving state that is in memory and waiting
1831 to be flushed when the current lock is released. Because a call to
1833 to be flushed when the current lock is released. Because a call to
1832 destroyed is imminent, the repo will be invalidated causing those
1834 destroyed is imminent, the repo will be invalidated causing those
1833 changes to stay in memory (waiting for the next unlock), or vanish
1835 changes to stay in memory (waiting for the next unlock), or vanish
1834 completely.
1836 completely.
1835 '''
1837 '''
1836 # When using the same lock to commit and strip, the phasecache is left
1838 # When using the same lock to commit and strip, the phasecache is left
1837 # dirty after committing. Then when we strip, the repo is invalidated,
1839 # dirty after committing. Then when we strip, the repo is invalidated,
1838 # causing those changes to disappear.
1840 # causing those changes to disappear.
1839 if '_phasecache' in vars(self):
1841 if '_phasecache' in vars(self):
1840 self._phasecache.write()
1842 self._phasecache.write()
1841
1843
1842 @unfilteredmethod
1844 @unfilteredmethod
1843 def destroyed(self):
1845 def destroyed(self):
1844 '''Inform the repository that nodes have been destroyed.
1846 '''Inform the repository that nodes have been destroyed.
1845 Intended for use by strip and rollback, so there's a common
1847 Intended for use by strip and rollback, so there's a common
1846 place for anything that has to be done after destroying history.
1848 place for anything that has to be done after destroying history.
1847 '''
1849 '''
1848 # When one tries to:
1850 # When one tries to:
1849 # 1) destroy nodes thus calling this method (e.g. strip)
1851 # 1) destroy nodes thus calling this method (e.g. strip)
1850 # 2) use phasecache somewhere (e.g. commit)
1852 # 2) use phasecache somewhere (e.g. commit)
1851 #
1853 #
1852 # then 2) will fail because the phasecache contains nodes that were
1854 # then 2) will fail because the phasecache contains nodes that were
1853 # removed. We can either remove phasecache from the filecache,
1855 # removed. We can either remove phasecache from the filecache,
1854 # causing it to reload next time it is accessed, or simply filter
1856 # causing it to reload next time it is accessed, or simply filter
1855 # the removed nodes now and write the updated cache.
1857 # the removed nodes now and write the updated cache.
1856 self._phasecache.filterunknown(self)
1858 self._phasecache.filterunknown(self)
1857 self._phasecache.write()
1859 self._phasecache.write()
1858
1860
1859 # update the 'served' branch cache to help read only server process
1861 # update the 'served' branch cache to help read only server process
1860 # Thanks to branchcache collaboration this is done from the nearest
1862 # Thanks to branchcache collaboration this is done from the nearest
1861 # filtered subset and it is expected to be fast.
1863 # filtered subset and it is expected to be fast.
1862 branchmap.updatecache(self.filtered('served'))
1864 branchmap.updatecache(self.filtered('served'))
1863
1865
1864 # Ensure the persistent tag cache is updated. Doing it now
1866 # Ensure the persistent tag cache is updated. Doing it now
1865 # means that the tag cache only has to worry about destroyed
1867 # means that the tag cache only has to worry about destroyed
1866 # heads immediately after a strip/rollback. That in turn
1868 # heads immediately after a strip/rollback. That in turn
1867 # guarantees that "cachetip == currenttip" (comparing both rev
1869 # guarantees that "cachetip == currenttip" (comparing both rev
1868 # and node) always means no nodes have been added or destroyed.
1870 # and node) always means no nodes have been added or destroyed.
1869
1871
1870 # XXX this is suboptimal when qrefresh'ing: we strip the current
1872 # XXX this is suboptimal when qrefresh'ing: we strip the current
1871 # head, refresh the tag cache, then immediately add a new head.
1873 # head, refresh the tag cache, then immediately add a new head.
1872 # But I think doing it this way is necessary for the "instant
1874 # But I think doing it this way is necessary for the "instant
1873 # tag cache retrieval" case to work.
1875 # tag cache retrieval" case to work.
1874 self.invalidate()
1876 self.invalidate()
1875
1877
1876 def walk(self, match, node=None):
1878 def walk(self, match, node=None):
1877 '''
1879 '''
1878 walk recursively through the directory tree or a given
1880 walk recursively through the directory tree or a given
1879 changeset, finding all files matched by the match
1881 changeset, finding all files matched by the match
1880 function
1882 function
1881 '''
1883 '''
1882 return self[node].walk(match)
1884 return self[node].walk(match)
1883
1885
1884 def status(self, node1='.', node2=None, match=None,
1886 def status(self, node1='.', node2=None, match=None,
1885 ignored=False, clean=False, unknown=False,
1887 ignored=False, clean=False, unknown=False,
1886 listsubrepos=False):
1888 listsubrepos=False):
1887 '''a convenience method that calls node1.status(node2)'''
1889 '''a convenience method that calls node1.status(node2)'''
1888 return self[node1].status(node2, match, ignored, clean, unknown,
1890 return self[node1].status(node2, match, ignored, clean, unknown,
1889 listsubrepos)
1891 listsubrepos)
1890
1892
1891 def heads(self, start=None):
1893 def heads(self, start=None):
1892 if start is None:
1894 if start is None:
1893 cl = self.changelog
1895 cl = self.changelog
1894 headrevs = reversed(cl.headrevs())
1896 headrevs = reversed(cl.headrevs())
1895 return [cl.node(rev) for rev in headrevs]
1897 return [cl.node(rev) for rev in headrevs]
1896
1898
1897 heads = self.changelog.heads(start)
1899 heads = self.changelog.heads(start)
1898 # sort the output in rev descending order
1900 # sort the output in rev descending order
1899 return sorted(heads, key=self.changelog.rev, reverse=True)
1901 return sorted(heads, key=self.changelog.rev, reverse=True)
1900
1902
1901 def branchheads(self, branch=None, start=None, closed=False):
1903 def branchheads(self, branch=None, start=None, closed=False):
1902 '''return a (possibly filtered) list of heads for the given branch
1904 '''return a (possibly filtered) list of heads for the given branch
1903
1905
1904 Heads are returned in topological order, from newest to oldest.
1906 Heads are returned in topological order, from newest to oldest.
1905 If branch is None, use the dirstate branch.
1907 If branch is None, use the dirstate branch.
1906 If start is not None, return only heads reachable from start.
1908 If start is not None, return only heads reachable from start.
1907 If closed is True, return heads that are marked as closed as well.
1909 If closed is True, return heads that are marked as closed as well.
1908 '''
1910 '''
1909 if branch is None:
1911 if branch is None:
1910 branch = self[None].branch()
1912 branch = self[None].branch()
1911 branches = self.branchmap()
1913 branches = self.branchmap()
1912 if branch not in branches:
1914 if branch not in branches:
1913 return []
1915 return []
1914 # the cache returns heads ordered lowest to highest
1916 # the cache returns heads ordered lowest to highest
1915 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1917 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1916 if start is not None:
1918 if start is not None:
1917 # filter out the heads that cannot be reached from startrev
1919 # filter out the heads that cannot be reached from startrev
1918 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1920 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1919 bheads = [h for h in bheads if h in fbheads]
1921 bheads = [h for h in bheads if h in fbheads]
1920 return bheads
1922 return bheads
1921
1923
1922 def branches(self, nodes):
1924 def branches(self, nodes):
1923 if not nodes:
1925 if not nodes:
1924 nodes = [self.changelog.tip()]
1926 nodes = [self.changelog.tip()]
1925 b = []
1927 b = []
1926 for n in nodes:
1928 for n in nodes:
1927 t = n
1929 t = n
1928 while True:
1930 while True:
1929 p = self.changelog.parents(n)
1931 p = self.changelog.parents(n)
1930 if p[1] != nullid or p[0] == nullid:
1932 if p[1] != nullid or p[0] == nullid:
1931 b.append((t, n, p[0], p[1]))
1933 b.append((t, n, p[0], p[1]))
1932 break
1934 break
1933 n = p[0]
1935 n = p[0]
1934 return b
1936 return b
1935
1937
1936 def between(self, pairs):
1938 def between(self, pairs):
1937 r = []
1939 r = []
1938
1940
1939 for top, bottom in pairs:
1941 for top, bottom in pairs:
1940 n, l, i = top, [], 0
1942 n, l, i = top, [], 0
1941 f = 1
1943 f = 1
1942
1944
1943 while n != bottom and n != nullid:
1945 while n != bottom and n != nullid:
1944 p = self.changelog.parents(n)[0]
1946 p = self.changelog.parents(n)[0]
1945 if i == f:
1947 if i == f:
1946 l.append(n)
1948 l.append(n)
1947 f = f * 2
1949 f = f * 2
1948 n = p
1950 n = p
1949 i += 1
1951 i += 1
1950
1952
1951 r.append(l)
1953 r.append(l)
1952
1954
1953 return r
1955 return r
1954
1956
1955 def checkpush(self, pushop):
1957 def checkpush(self, pushop):
1956 """Extensions can override this function if additional checks have
1958 """Extensions can override this function if additional checks have
1957 to be performed before pushing, or call it if they override push
1959 to be performed before pushing, or call it if they override push
1958 command.
1960 command.
1959 """
1961 """
1960 pass
1962 pass
1961
1963
1962 @unfilteredpropertycache
1964 @unfilteredpropertycache
1963 def prepushoutgoinghooks(self):
1965 def prepushoutgoinghooks(self):
1964 """Return util.hooks consists of a pushop with repo, remote, outgoing
1966 """Return util.hooks consists of a pushop with repo, remote, outgoing
1965 methods, which are called before pushing changesets.
1967 methods, which are called before pushing changesets.
1966 """
1968 """
1967 return util.hooks()
1969 return util.hooks()
1968
1970
1969 def pushkey(self, namespace, key, old, new):
1971 def pushkey(self, namespace, key, old, new):
1970 try:
1972 try:
1971 tr = self.currenttransaction()
1973 tr = self.currenttransaction()
1972 hookargs = {}
1974 hookargs = {}
1973 if tr is not None:
1975 if tr is not None:
1974 hookargs.update(tr.hookargs)
1976 hookargs.update(tr.hookargs)
1975 hookargs['namespace'] = namespace
1977 hookargs['namespace'] = namespace
1976 hookargs['key'] = key
1978 hookargs['key'] = key
1977 hookargs['old'] = old
1979 hookargs['old'] = old
1978 hookargs['new'] = new
1980 hookargs['new'] = new
1979 self.hook('prepushkey', throw=True, **hookargs)
1981 self.hook('prepushkey', throw=True, **hookargs)
1980 except error.HookAbort as exc:
1982 except error.HookAbort as exc:
1981 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1983 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1982 if exc.hint:
1984 if exc.hint:
1983 self.ui.write_err(_("(%s)\n") % exc.hint)
1985 self.ui.write_err(_("(%s)\n") % exc.hint)
1984 return False
1986 return False
1985 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1987 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1986 ret = pushkey.push(self, namespace, key, old, new)
1988 ret = pushkey.push(self, namespace, key, old, new)
1987 def runhook():
1989 def runhook():
1988 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1990 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1989 ret=ret)
1991 ret=ret)
1990 self._afterlock(runhook)
1992 self._afterlock(runhook)
1991 return ret
1993 return ret
1992
1994
1993 def listkeys(self, namespace):
1995 def listkeys(self, namespace):
1994 self.hook('prelistkeys', throw=True, namespace=namespace)
1996 self.hook('prelistkeys', throw=True, namespace=namespace)
1995 self.ui.debug('listing keys for "%s"\n' % namespace)
1997 self.ui.debug('listing keys for "%s"\n' % namespace)
1996 values = pushkey.list(self, namespace)
1998 values = pushkey.list(self, namespace)
1997 self.hook('listkeys', namespace=namespace, values=values)
1999 self.hook('listkeys', namespace=namespace, values=values)
1998 return values
2000 return values
1999
2001
2000 def debugwireargs(self, one, two, three=None, four=None, five=None):
2002 def debugwireargs(self, one, two, three=None, four=None, five=None):
2001 '''used to test argument passing over the wire'''
2003 '''used to test argument passing over the wire'''
2002 return "%s %s %s %s %s" % (one, two, three, four, five)
2004 return "%s %s %s %s %s" % (one, two, three, four, five)
2003
2005
2004 def savecommitmessage(self, text):
2006 def savecommitmessage(self, text):
2005 fp = self.vfs('last-message.txt', 'wb')
2007 fp = self.vfs('last-message.txt', 'wb')
2006 try:
2008 try:
2007 fp.write(text)
2009 fp.write(text)
2008 finally:
2010 finally:
2009 fp.close()
2011 fp.close()
2010 return self.pathto(fp.name[len(self.root) + 1:])
2012 return self.pathto(fp.name[len(self.root) + 1:])
2011
2013
2012 # used to avoid circular references so destructors work
2014 # used to avoid circular references so destructors work
2013 def aftertrans(files):
2015 def aftertrans(files):
2014 renamefiles = [tuple(t) for t in files]
2016 renamefiles = [tuple(t) for t in files]
2015 def a():
2017 def a():
2016 for vfs, src, dest in renamefiles:
2018 for vfs, src, dest in renamefiles:
2017 try:
2019 try:
2018 # if src and dest refer to a same file, vfs.rename is a no-op,
2020 # if src and dest refer to a same file, vfs.rename is a no-op,
2019 # leaving both src and dest on disk. delete dest to make sure
2021 # leaving both src and dest on disk. delete dest to make sure
2020 # the rename couldn't be such a no-op.
2022 # the rename couldn't be such a no-op.
2021 vfs.unlink(dest)
2023 vfs.unlink(dest)
2022 except OSError as ex:
2024 except OSError as ex:
2023 if ex.errno != errno.ENOENT:
2025 if ex.errno != errno.ENOENT:
2024 raise
2026 raise
2025 try:
2027 try:
2026 vfs.rename(src, dest)
2028 vfs.rename(src, dest)
2027 except OSError: # journal file does not yet exist
2029 except OSError: # journal file does not yet exist
2028 pass
2030 pass
2029 return a
2031 return a
2030
2032
2031 def undoname(fn):
2033 def undoname(fn):
2032 base, name = os.path.split(fn)
2034 base, name = os.path.split(fn)
2033 assert name.startswith('journal')
2035 assert name.startswith('journal')
2034 return os.path.join(base, name.replace('journal', 'undo', 1))
2036 return os.path.join(base, name.replace('journal', 'undo', 1))
2035
2037
2036 def instance(ui, path, create):
2038 def instance(ui, path, create):
2037 return localrepository(ui, util.urllocalpath(path), create)
2039 return localrepository(ui, util.urllocalpath(path), create)
2038
2040
2039 def islocal(path):
2041 def islocal(path):
2040 return True
2042 return True
2041
2043
2042 def newreporequirements(repo):
2044 def newreporequirements(repo):
2043 """Determine the set of requirements for a new local repository.
2045 """Determine the set of requirements for a new local repository.
2044
2046
2045 Extensions can wrap this function to specify custom requirements for
2047 Extensions can wrap this function to specify custom requirements for
2046 new repositories.
2048 new repositories.
2047 """
2049 """
2048 ui = repo.ui
2050 ui = repo.ui
2049 requirements = set(['revlogv1'])
2051 requirements = set(['revlogv1'])
2050 if ui.configbool('format', 'usestore', True):
2052 if ui.configbool('format', 'usestore', True):
2051 requirements.add('store')
2053 requirements.add('store')
2052 if ui.configbool('format', 'usefncache', True):
2054 if ui.configbool('format', 'usefncache', True):
2053 requirements.add('fncache')
2055 requirements.add('fncache')
2054 if ui.configbool('format', 'dotencode', True):
2056 if ui.configbool('format', 'dotencode', True):
2055 requirements.add('dotencode')
2057 requirements.add('dotencode')
2056
2058
2057 compengine = ui.config('experimental', 'format.compression', 'zlib')
2059 compengine = ui.config('experimental', 'format.compression', 'zlib')
2058 if compengine not in util.compengines:
2060 if compengine not in util.compengines:
2059 raise error.Abort(_('compression engine %s defined by '
2061 raise error.Abort(_('compression engine %s defined by '
2060 'experimental.format.compression not available') %
2062 'experimental.format.compression not available') %
2061 compengine,
2063 compengine,
2062 hint=_('run "hg debuginstall" to list available '
2064 hint=_('run "hg debuginstall" to list available '
2063 'compression engines'))
2065 'compression engines'))
2064
2066
2065 # zlib is the historical default and doesn't need an explicit requirement.
2067 # zlib is the historical default and doesn't need an explicit requirement.
2066 if compengine != 'zlib':
2068 if compengine != 'zlib':
2067 requirements.add('exp-compression-%s' % compengine)
2069 requirements.add('exp-compression-%s' % compengine)
2068
2070
2069 if scmutil.gdinitconfig(ui):
2071 if scmutil.gdinitconfig(ui):
2070 requirements.add('generaldelta')
2072 requirements.add('generaldelta')
2071 if ui.configbool('experimental', 'treemanifest', False):
2073 if ui.configbool('experimental', 'treemanifest', False):
2072 requirements.add('treemanifest')
2074 requirements.add('treemanifest')
2073 if ui.configbool('experimental', 'manifestv2', False):
2075 if ui.configbool('experimental', 'manifestv2', False):
2074 requirements.add('manifestv2')
2076 requirements.add('manifestv2')
2075
2077
2076 return requirements
2078 return requirements
General Comments 0
You need to be logged in to leave comments. Login now