##// END OF EJS Templates
localrepo: deprecated 'repo.wopener' (API)...
Pierre-Yves David -
r31145:11a97785 default
parent child Browse files
Show More
@@ -1,2057 +1,2061 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 store,
56 store,
57 subrepo,
57 subrepo,
58 tags as tagsmod,
58 tags as tagsmod,
59 transaction,
59 transaction,
60 txnutil,
60 txnutil,
61 util,
61 util,
62 )
62 )
63
63
64 release = lockmod.release
64 release = lockmod.release
65 urlerr = util.urlerr
65 urlerr = util.urlerr
66 urlreq = util.urlreq
66 urlreq = util.urlreq
67
67
68 class repofilecache(scmutil.filecache):
68 class repofilecache(scmutil.filecache):
69 """All filecache usage on repo are done for logic that should be unfiltered
69 """All filecache usage on repo are done for logic that should be unfiltered
70 """
70 """
71
71
72 def __get__(self, repo, type=None):
72 def __get__(self, repo, type=None):
73 if repo is None:
73 if repo is None:
74 return self
74 return self
75 return super(repofilecache, self).__get__(repo.unfiltered(), type)
75 return super(repofilecache, self).__get__(repo.unfiltered(), type)
76 def __set__(self, repo, value):
76 def __set__(self, repo, value):
77 return super(repofilecache, self).__set__(repo.unfiltered(), value)
77 return super(repofilecache, self).__set__(repo.unfiltered(), value)
78 def __delete__(self, repo):
78 def __delete__(self, repo):
79 return super(repofilecache, self).__delete__(repo.unfiltered())
79 return super(repofilecache, self).__delete__(repo.unfiltered())
80
80
81 class storecache(repofilecache):
81 class storecache(repofilecache):
82 """filecache for files in the store"""
82 """filecache for files in the store"""
83 def join(self, obj, fname):
83 def join(self, obj, fname):
84 return obj.sjoin(fname)
84 return obj.sjoin(fname)
85
85
86 class unfilteredpropertycache(util.propertycache):
86 class unfilteredpropertycache(util.propertycache):
87 """propertycache that apply to unfiltered repo only"""
87 """propertycache that apply to unfiltered repo only"""
88
88
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 unfi = repo.unfiltered()
90 unfi = repo.unfiltered()
91 if unfi is repo:
91 if unfi is repo:
92 return super(unfilteredpropertycache, self).__get__(unfi)
92 return super(unfilteredpropertycache, self).__get__(unfi)
93 return getattr(unfi, self.name)
93 return getattr(unfi, self.name)
94
94
95 class filteredpropertycache(util.propertycache):
95 class filteredpropertycache(util.propertycache):
96 """propertycache that must take filtering in account"""
96 """propertycache that must take filtering in account"""
97
97
98 def cachevalue(self, obj, value):
98 def cachevalue(self, obj, value):
99 object.__setattr__(obj, self.name, value)
99 object.__setattr__(obj, self.name, value)
100
100
101
101
102 def hasunfilteredcache(repo, name):
102 def hasunfilteredcache(repo, name):
103 """check if a repo has an unfilteredpropertycache value for <name>"""
103 """check if a repo has an unfilteredpropertycache value for <name>"""
104 return name in vars(repo.unfiltered())
104 return name in vars(repo.unfiltered())
105
105
106 def unfilteredmethod(orig):
106 def unfilteredmethod(orig):
107 """decorate method that always need to be run on unfiltered version"""
107 """decorate method that always need to be run on unfiltered version"""
108 def wrapper(repo, *args, **kwargs):
108 def wrapper(repo, *args, **kwargs):
109 return orig(repo.unfiltered(), *args, **kwargs)
109 return orig(repo.unfiltered(), *args, **kwargs)
110 return wrapper
110 return wrapper
111
111
112 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
112 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
113 'unbundle'))
113 'unbundle'))
114 legacycaps = moderncaps.union(set(['changegroupsubset']))
114 legacycaps = moderncaps.union(set(['changegroupsubset']))
115
115
116 class localpeer(peer.peerrepository):
116 class localpeer(peer.peerrepository):
117 '''peer for a local repo; reflects only the most recent API'''
117 '''peer for a local repo; reflects only the most recent API'''
118
118
119 def __init__(self, repo, caps=moderncaps):
119 def __init__(self, repo, caps=moderncaps):
120 peer.peerrepository.__init__(self)
120 peer.peerrepository.__init__(self)
121 self._repo = repo.filtered('served')
121 self._repo = repo.filtered('served')
122 self.ui = repo.ui
122 self.ui = repo.ui
123 self._caps = repo._restrictcapabilities(caps)
123 self._caps = repo._restrictcapabilities(caps)
124 self.requirements = repo.requirements
124 self.requirements = repo.requirements
125 self.supportedformats = repo.supportedformats
125 self.supportedformats = repo.supportedformats
126
126
127 def close(self):
127 def close(self):
128 self._repo.close()
128 self._repo.close()
129
129
130 def _capabilities(self):
130 def _capabilities(self):
131 return self._caps
131 return self._caps
132
132
133 def local(self):
133 def local(self):
134 return self._repo
134 return self._repo
135
135
136 def canpush(self):
136 def canpush(self):
137 return True
137 return True
138
138
139 def url(self):
139 def url(self):
140 return self._repo.url()
140 return self._repo.url()
141
141
142 def lookup(self, key):
142 def lookup(self, key):
143 return self._repo.lookup(key)
143 return self._repo.lookup(key)
144
144
145 def branchmap(self):
145 def branchmap(self):
146 return self._repo.branchmap()
146 return self._repo.branchmap()
147
147
148 def heads(self):
148 def heads(self):
149 return self._repo.heads()
149 return self._repo.heads()
150
150
151 def known(self, nodes):
151 def known(self, nodes):
152 return self._repo.known(nodes)
152 return self._repo.known(nodes)
153
153
154 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
154 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
155 **kwargs):
155 **kwargs):
156 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
156 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
157 common=common, bundlecaps=bundlecaps,
157 common=common, bundlecaps=bundlecaps,
158 **kwargs)
158 **kwargs)
159 cb = util.chunkbuffer(chunks)
159 cb = util.chunkbuffer(chunks)
160
160
161 if bundlecaps is not None and 'HG20' in bundlecaps:
161 if bundlecaps is not None and 'HG20' in bundlecaps:
162 # When requesting a bundle2, getbundle returns a stream to make the
162 # When requesting a bundle2, getbundle returns a stream to make the
163 # wire level function happier. We need to build a proper object
163 # wire level function happier. We need to build a proper object
164 # from it in local peer.
164 # from it in local peer.
165 return bundle2.getunbundler(self.ui, cb)
165 return bundle2.getunbundler(self.ui, cb)
166 else:
166 else:
167 return changegroup.getunbundler('01', cb, None)
167 return changegroup.getunbundler('01', cb, None)
168
168
169 # TODO We might want to move the next two calls into legacypeer and add
169 # TODO We might want to move the next two calls into legacypeer and add
170 # unbundle instead.
170 # unbundle instead.
171
171
172 def unbundle(self, cg, heads, url):
172 def unbundle(self, cg, heads, url):
173 """apply a bundle on a repo
173 """apply a bundle on a repo
174
174
175 This function handles the repo locking itself."""
175 This function handles the repo locking itself."""
176 try:
176 try:
177 try:
177 try:
178 cg = exchange.readbundle(self.ui, cg, None)
178 cg = exchange.readbundle(self.ui, cg, None)
179 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
179 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
180 if util.safehasattr(ret, 'getchunks'):
180 if util.safehasattr(ret, 'getchunks'):
181 # This is a bundle20 object, turn it into an unbundler.
181 # This is a bundle20 object, turn it into an unbundler.
182 # This little dance should be dropped eventually when the
182 # This little dance should be dropped eventually when the
183 # API is finally improved.
183 # API is finally improved.
184 stream = util.chunkbuffer(ret.getchunks())
184 stream = util.chunkbuffer(ret.getchunks())
185 ret = bundle2.getunbundler(self.ui, stream)
185 ret = bundle2.getunbundler(self.ui, stream)
186 return ret
186 return ret
187 except Exception as exc:
187 except Exception as exc:
188 # If the exception contains output salvaged from a bundle2
188 # If the exception contains output salvaged from a bundle2
189 # reply, we need to make sure it is printed before continuing
189 # reply, we need to make sure it is printed before continuing
190 # to fail. So we build a bundle2 with such output and consume
190 # to fail. So we build a bundle2 with such output and consume
191 # it directly.
191 # it directly.
192 #
192 #
193 # This is not very elegant but allows a "simple" solution for
193 # This is not very elegant but allows a "simple" solution for
194 # issue4594
194 # issue4594
195 output = getattr(exc, '_bundle2salvagedoutput', ())
195 output = getattr(exc, '_bundle2salvagedoutput', ())
196 if output:
196 if output:
197 bundler = bundle2.bundle20(self._repo.ui)
197 bundler = bundle2.bundle20(self._repo.ui)
198 for out in output:
198 for out in output:
199 bundler.addpart(out)
199 bundler.addpart(out)
200 stream = util.chunkbuffer(bundler.getchunks())
200 stream = util.chunkbuffer(bundler.getchunks())
201 b = bundle2.getunbundler(self.ui, stream)
201 b = bundle2.getunbundler(self.ui, stream)
202 bundle2.processbundle(self._repo, b)
202 bundle2.processbundle(self._repo, b)
203 raise
203 raise
204 except error.PushRaced as exc:
204 except error.PushRaced as exc:
205 raise error.ResponseError(_('push failed:'), str(exc))
205 raise error.ResponseError(_('push failed:'), str(exc))
206
206
207 def lock(self):
207 def lock(self):
208 return self._repo.lock()
208 return self._repo.lock()
209
209
210 def addchangegroup(self, cg, source, url):
210 def addchangegroup(self, cg, source, url):
211 return cg.apply(self._repo, source, url)
211 return cg.apply(self._repo, source, url)
212
212
213 def pushkey(self, namespace, key, old, new):
213 def pushkey(self, namespace, key, old, new):
214 return self._repo.pushkey(namespace, key, old, new)
214 return self._repo.pushkey(namespace, key, old, new)
215
215
216 def listkeys(self, namespace):
216 def listkeys(self, namespace):
217 return self._repo.listkeys(namespace)
217 return self._repo.listkeys(namespace)
218
218
219 def debugwireargs(self, one, two, three=None, four=None, five=None):
219 def debugwireargs(self, one, two, three=None, four=None, five=None):
220 '''used to test argument passing over the wire'''
220 '''used to test argument passing over the wire'''
221 return "%s %s %s %s %s" % (one, two, three, four, five)
221 return "%s %s %s %s %s" % (one, two, three, four, five)
222
222
223 class locallegacypeer(localpeer):
223 class locallegacypeer(localpeer):
224 '''peer extension which implements legacy methods too; used for tests with
224 '''peer extension which implements legacy methods too; used for tests with
225 restricted capabilities'''
225 restricted capabilities'''
226
226
227 def __init__(self, repo):
227 def __init__(self, repo):
228 localpeer.__init__(self, repo, caps=legacycaps)
228 localpeer.__init__(self, repo, caps=legacycaps)
229
229
230 def branches(self, nodes):
230 def branches(self, nodes):
231 return self._repo.branches(nodes)
231 return self._repo.branches(nodes)
232
232
233 def between(self, pairs):
233 def between(self, pairs):
234 return self._repo.between(pairs)
234 return self._repo.between(pairs)
235
235
236 def changegroup(self, basenodes, source):
236 def changegroup(self, basenodes, source):
237 return changegroup.changegroup(self._repo, basenodes, source)
237 return changegroup.changegroup(self._repo, basenodes, source)
238
238
239 def changegroupsubset(self, bases, heads, source):
239 def changegroupsubset(self, bases, heads, source):
240 return changegroup.changegroupsubset(self._repo, bases, heads, source)
240 return changegroup.changegroupsubset(self._repo, bases, heads, source)
241
241
242 class localrepository(object):
242 class localrepository(object):
243
243
244 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
244 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
245 'manifestv2'))
245 'manifestv2'))
246 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
246 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
247 'relshared', 'dotencode'))
247 'relshared', 'dotencode'))
248 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
248 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
249 filtername = None
249 filtername = None
250
250
251 # a list of (ui, featureset) functions.
251 # a list of (ui, featureset) functions.
252 # only functions defined in module of enabled extensions are invoked
252 # only functions defined in module of enabled extensions are invoked
253 featuresetupfuncs = set()
253 featuresetupfuncs = set()
254
254
255 def __init__(self, baseui, path, create=False):
255 def __init__(self, baseui, path, create=False):
256 self.requirements = set()
256 self.requirements = set()
257 # vfs to access the working copy
257 # vfs to access the working copy
258 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
258 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
259 # vfs to access the content of the repository
259 # vfs to access the content of the repository
260 self.vfs = None
260 self.vfs = None
261 # vfs to access the store part of the repository
261 # vfs to access the store part of the repository
262 self.svfs = None
262 self.svfs = None
263 self.wopener = self.wvfs
264 self.root = self.wvfs.base
263 self.root = self.wvfs.base
265 self.path = self.wvfs.join(".hg")
264 self.path = self.wvfs.join(".hg")
266 self.origroot = path
265 self.origroot = path
267 self.auditor = pathutil.pathauditor(self.root, self._checknested)
266 self.auditor = pathutil.pathauditor(self.root, self._checknested)
268 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
267 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
269 realfs=False)
268 realfs=False)
270 self.vfs = scmutil.vfs(self.path)
269 self.vfs = scmutil.vfs(self.path)
271 self.opener = self.vfs
270 self.opener = self.vfs
272 self.baseui = baseui
271 self.baseui = baseui
273 self.ui = baseui.copy()
272 self.ui = baseui.copy()
274 self.ui.copy = baseui.copy # prevent copying repo configuration
273 self.ui.copy = baseui.copy # prevent copying repo configuration
275 # A list of callback to shape the phase if no data were found.
274 # A list of callback to shape the phase if no data were found.
276 # Callback are in the form: func(repo, roots) --> processed root.
275 # Callback are in the form: func(repo, roots) --> processed root.
277 # This list it to be filled by extension during repo setup
276 # This list it to be filled by extension during repo setup
278 self._phasedefaults = []
277 self._phasedefaults = []
279 try:
278 try:
280 self.ui.readconfig(self.join("hgrc"), self.root)
279 self.ui.readconfig(self.join("hgrc"), self.root)
281 self._loadextensions()
280 self._loadextensions()
282 except IOError:
281 except IOError:
283 pass
282 pass
284
283
285 if self.featuresetupfuncs:
284 if self.featuresetupfuncs:
286 self.supported = set(self._basesupported) # use private copy
285 self.supported = set(self._basesupported) # use private copy
287 extmods = set(m.__name__ for n, m
286 extmods = set(m.__name__ for n, m
288 in extensions.extensions(self.ui))
287 in extensions.extensions(self.ui))
289 for setupfunc in self.featuresetupfuncs:
288 for setupfunc in self.featuresetupfuncs:
290 if setupfunc.__module__ in extmods:
289 if setupfunc.__module__ in extmods:
291 setupfunc(self.ui, self.supported)
290 setupfunc(self.ui, self.supported)
292 else:
291 else:
293 self.supported = self._basesupported
292 self.supported = self._basesupported
294 color.setup(self.ui)
293 color.setup(self.ui)
295
294
296 # Add compression engines.
295 # Add compression engines.
297 for name in util.compengines:
296 for name in util.compengines:
298 engine = util.compengines[name]
297 engine = util.compengines[name]
299 if engine.revlogheader():
298 if engine.revlogheader():
300 self.supported.add('exp-compression-%s' % name)
299 self.supported.add('exp-compression-%s' % name)
301
300
302 if not self.vfs.isdir():
301 if not self.vfs.isdir():
303 if create:
302 if create:
304 self.requirements = newreporequirements(self)
303 self.requirements = newreporequirements(self)
305
304
306 if not self.wvfs.exists():
305 if not self.wvfs.exists():
307 self.wvfs.makedirs()
306 self.wvfs.makedirs()
308 self.vfs.makedir(notindexed=True)
307 self.vfs.makedir(notindexed=True)
309
308
310 if 'store' in self.requirements:
309 if 'store' in self.requirements:
311 self.vfs.mkdir("store")
310 self.vfs.mkdir("store")
312
311
313 # create an invalid changelog
312 # create an invalid changelog
314 self.vfs.append(
313 self.vfs.append(
315 "00changelog.i",
314 "00changelog.i",
316 '\0\0\0\2' # represents revlogv2
315 '\0\0\0\2' # represents revlogv2
317 ' dummy changelog to prevent using the old repo layout'
316 ' dummy changelog to prevent using the old repo layout'
318 )
317 )
319 else:
318 else:
320 raise error.RepoError(_("repository %s not found") % path)
319 raise error.RepoError(_("repository %s not found") % path)
321 elif create:
320 elif create:
322 raise error.RepoError(_("repository %s already exists") % path)
321 raise error.RepoError(_("repository %s already exists") % path)
323 else:
322 else:
324 try:
323 try:
325 self.requirements = scmutil.readrequires(
324 self.requirements = scmutil.readrequires(
326 self.vfs, self.supported)
325 self.vfs, self.supported)
327 except IOError as inst:
326 except IOError as inst:
328 if inst.errno != errno.ENOENT:
327 if inst.errno != errno.ENOENT:
329 raise
328 raise
330
329
331 self.sharedpath = self.path
330 self.sharedpath = self.path
332 try:
331 try:
333 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
332 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
334 if 'relshared' in self.requirements:
333 if 'relshared' in self.requirements:
335 sharedpath = self.vfs.join(sharedpath)
334 sharedpath = self.vfs.join(sharedpath)
336 vfs = scmutil.vfs(sharedpath, realpath=True)
335 vfs = scmutil.vfs(sharedpath, realpath=True)
337
336
338 s = vfs.base
337 s = vfs.base
339 if not vfs.exists():
338 if not vfs.exists():
340 raise error.RepoError(
339 raise error.RepoError(
341 _('.hg/sharedpath points to nonexistent directory %s') % s)
340 _('.hg/sharedpath points to nonexistent directory %s') % s)
342 self.sharedpath = s
341 self.sharedpath = s
343 except IOError as inst:
342 except IOError as inst:
344 if inst.errno != errno.ENOENT:
343 if inst.errno != errno.ENOENT:
345 raise
344 raise
346
345
347 self.store = store.store(
346 self.store = store.store(
348 self.requirements, self.sharedpath, scmutil.vfs)
347 self.requirements, self.sharedpath, scmutil.vfs)
349 self.spath = self.store.path
348 self.spath = self.store.path
350 self.svfs = self.store.vfs
349 self.svfs = self.store.vfs
351 self.sjoin = self.store.join
350 self.sjoin = self.store.join
352 self.vfs.createmode = self.store.createmode
351 self.vfs.createmode = self.store.createmode
353 self._applyopenerreqs()
352 self._applyopenerreqs()
354 if create:
353 if create:
355 self._writerequirements()
354 self._writerequirements()
356
355
357 self._dirstatevalidatewarned = False
356 self._dirstatevalidatewarned = False
358
357
359 self._branchcaches = {}
358 self._branchcaches = {}
360 self._revbranchcache = None
359 self._revbranchcache = None
361 self.filterpats = {}
360 self.filterpats = {}
362 self._datafilters = {}
361 self._datafilters = {}
363 self._transref = self._lockref = self._wlockref = None
362 self._transref = self._lockref = self._wlockref = None
364
363
365 # A cache for various files under .hg/ that tracks file changes,
364 # A cache for various files under .hg/ that tracks file changes,
366 # (used by the filecache decorator)
365 # (used by the filecache decorator)
367 #
366 #
368 # Maps a property name to its util.filecacheentry
367 # Maps a property name to its util.filecacheentry
369 self._filecache = {}
368 self._filecache = {}
370
369
371 # hold sets of revision to be filtered
370 # hold sets of revision to be filtered
372 # should be cleared when something might have changed the filter value:
371 # should be cleared when something might have changed the filter value:
373 # - new changesets,
372 # - new changesets,
374 # - phase change,
373 # - phase change,
375 # - new obsolescence marker,
374 # - new obsolescence marker,
376 # - working directory parent change,
375 # - working directory parent change,
377 # - bookmark changes
376 # - bookmark changes
378 self.filteredrevcache = {}
377 self.filteredrevcache = {}
379
378
380 # generic mapping between names and nodes
379 # generic mapping between names and nodes
381 self.names = namespaces.namespaces()
380 self.names = namespaces.namespaces()
382
381
382 @property
383 def wopener(self):
384 self.ui.deprecwarn("use 'repo.wvfs' instead of 'repo.wopener'", '4.2')
385 return self.wvfs
386
383 def close(self):
387 def close(self):
384 self._writecaches()
388 self._writecaches()
385
389
386 def _loadextensions(self):
390 def _loadextensions(self):
387 extensions.loadall(self.ui)
391 extensions.loadall(self.ui)
388
392
389 def _writecaches(self):
393 def _writecaches(self):
390 if self._revbranchcache:
394 if self._revbranchcache:
391 self._revbranchcache.write()
395 self._revbranchcache.write()
392
396
393 def _restrictcapabilities(self, caps):
397 def _restrictcapabilities(self, caps):
394 if self.ui.configbool('experimental', 'bundle2-advertise', True):
398 if self.ui.configbool('experimental', 'bundle2-advertise', True):
395 caps = set(caps)
399 caps = set(caps)
396 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
400 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
397 caps.add('bundle2=' + urlreq.quote(capsblob))
401 caps.add('bundle2=' + urlreq.quote(capsblob))
398 return caps
402 return caps
399
403
400 def _applyopenerreqs(self):
404 def _applyopenerreqs(self):
401 self.svfs.options = dict((r, 1) for r in self.requirements
405 self.svfs.options = dict((r, 1) for r in self.requirements
402 if r in self.openerreqs)
406 if r in self.openerreqs)
403 # experimental config: format.chunkcachesize
407 # experimental config: format.chunkcachesize
404 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
408 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
405 if chunkcachesize is not None:
409 if chunkcachesize is not None:
406 self.svfs.options['chunkcachesize'] = chunkcachesize
410 self.svfs.options['chunkcachesize'] = chunkcachesize
407 # experimental config: format.maxchainlen
411 # experimental config: format.maxchainlen
408 maxchainlen = self.ui.configint('format', 'maxchainlen')
412 maxchainlen = self.ui.configint('format', 'maxchainlen')
409 if maxchainlen is not None:
413 if maxchainlen is not None:
410 self.svfs.options['maxchainlen'] = maxchainlen
414 self.svfs.options['maxchainlen'] = maxchainlen
411 # experimental config: format.manifestcachesize
415 # experimental config: format.manifestcachesize
412 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
416 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
413 if manifestcachesize is not None:
417 if manifestcachesize is not None:
414 self.svfs.options['manifestcachesize'] = manifestcachesize
418 self.svfs.options['manifestcachesize'] = manifestcachesize
415 # experimental config: format.aggressivemergedeltas
419 # experimental config: format.aggressivemergedeltas
416 aggressivemergedeltas = self.ui.configbool('format',
420 aggressivemergedeltas = self.ui.configbool('format',
417 'aggressivemergedeltas', False)
421 'aggressivemergedeltas', False)
418 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
422 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
419 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
423 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
420
424
421 for r in self.requirements:
425 for r in self.requirements:
422 if r.startswith('exp-compression-'):
426 if r.startswith('exp-compression-'):
423 self.svfs.options['compengine'] = r[len('exp-compression-'):]
427 self.svfs.options['compengine'] = r[len('exp-compression-'):]
424
428
425 def _writerequirements(self):
429 def _writerequirements(self):
426 scmutil.writerequires(self.vfs, self.requirements)
430 scmutil.writerequires(self.vfs, self.requirements)
427
431
428 def _checknested(self, path):
432 def _checknested(self, path):
429 """Determine if path is a legal nested repository."""
433 """Determine if path is a legal nested repository."""
430 if not path.startswith(self.root):
434 if not path.startswith(self.root):
431 return False
435 return False
432 subpath = path[len(self.root) + 1:]
436 subpath = path[len(self.root) + 1:]
433 normsubpath = util.pconvert(subpath)
437 normsubpath = util.pconvert(subpath)
434
438
435 # XXX: Checking against the current working copy is wrong in
439 # XXX: Checking against the current working copy is wrong in
436 # the sense that it can reject things like
440 # the sense that it can reject things like
437 #
441 #
438 # $ hg cat -r 10 sub/x.txt
442 # $ hg cat -r 10 sub/x.txt
439 #
443 #
440 # if sub/ is no longer a subrepository in the working copy
444 # if sub/ is no longer a subrepository in the working copy
441 # parent revision.
445 # parent revision.
442 #
446 #
443 # However, it can of course also allow things that would have
447 # However, it can of course also allow things that would have
444 # been rejected before, such as the above cat command if sub/
448 # been rejected before, such as the above cat command if sub/
445 # is a subrepository now, but was a normal directory before.
449 # is a subrepository now, but was a normal directory before.
446 # The old path auditor would have rejected by mistake since it
450 # The old path auditor would have rejected by mistake since it
447 # panics when it sees sub/.hg/.
451 # panics when it sees sub/.hg/.
448 #
452 #
449 # All in all, checking against the working copy seems sensible
453 # All in all, checking against the working copy seems sensible
450 # since we want to prevent access to nested repositories on
454 # since we want to prevent access to nested repositories on
451 # the filesystem *now*.
455 # the filesystem *now*.
452 ctx = self[None]
456 ctx = self[None]
453 parts = util.splitpath(subpath)
457 parts = util.splitpath(subpath)
454 while parts:
458 while parts:
455 prefix = '/'.join(parts)
459 prefix = '/'.join(parts)
456 if prefix in ctx.substate:
460 if prefix in ctx.substate:
457 if prefix == normsubpath:
461 if prefix == normsubpath:
458 return True
462 return True
459 else:
463 else:
460 sub = ctx.sub(prefix)
464 sub = ctx.sub(prefix)
461 return sub.checknested(subpath[len(prefix) + 1:])
465 return sub.checknested(subpath[len(prefix) + 1:])
462 else:
466 else:
463 parts.pop()
467 parts.pop()
464 return False
468 return False
465
469
466 def peer(self):
470 def peer(self):
467 return localpeer(self) # not cached to avoid reference cycle
471 return localpeer(self) # not cached to avoid reference cycle
468
472
469 def unfiltered(self):
473 def unfiltered(self):
470 """Return unfiltered version of the repository
474 """Return unfiltered version of the repository
471
475
472 Intended to be overwritten by filtered repo."""
476 Intended to be overwritten by filtered repo."""
473 return self
477 return self
474
478
475 def filtered(self, name):
479 def filtered(self, name):
476 """Return a filtered version of a repository"""
480 """Return a filtered version of a repository"""
477 # build a new class with the mixin and the current class
481 # build a new class with the mixin and the current class
478 # (possibly subclass of the repo)
482 # (possibly subclass of the repo)
479 class proxycls(repoview.repoview, self.unfiltered().__class__):
483 class proxycls(repoview.repoview, self.unfiltered().__class__):
480 pass
484 pass
481 return proxycls(self, name)
485 return proxycls(self, name)
482
486
483 @repofilecache('bookmarks', 'bookmarks.current')
487 @repofilecache('bookmarks', 'bookmarks.current')
484 def _bookmarks(self):
488 def _bookmarks(self):
485 return bookmarks.bmstore(self)
489 return bookmarks.bmstore(self)
486
490
487 @property
491 @property
488 def _activebookmark(self):
492 def _activebookmark(self):
489 return self._bookmarks.active
493 return self._bookmarks.active
490
494
491 def bookmarkheads(self, bookmark):
495 def bookmarkheads(self, bookmark):
492 name = bookmark.split('@', 1)[0]
496 name = bookmark.split('@', 1)[0]
493 heads = []
497 heads = []
494 for mark, n in self._bookmarks.iteritems():
498 for mark, n in self._bookmarks.iteritems():
495 if mark.split('@', 1)[0] == name:
499 if mark.split('@', 1)[0] == name:
496 heads.append(n)
500 heads.append(n)
497 return heads
501 return heads
498
502
499 # _phaserevs and _phasesets depend on changelog. what we need is to
503 # _phaserevs and _phasesets depend on changelog. what we need is to
500 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
504 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
501 # can't be easily expressed in filecache mechanism.
505 # can't be easily expressed in filecache mechanism.
502 @storecache('phaseroots', '00changelog.i')
506 @storecache('phaseroots', '00changelog.i')
503 def _phasecache(self):
507 def _phasecache(self):
504 return phases.phasecache(self, self._phasedefaults)
508 return phases.phasecache(self, self._phasedefaults)
505
509
506 @storecache('obsstore')
510 @storecache('obsstore')
507 def obsstore(self):
511 def obsstore(self):
508 # read default format for new obsstore.
512 # read default format for new obsstore.
509 # developer config: format.obsstore-version
513 # developer config: format.obsstore-version
510 defaultformat = self.ui.configint('format', 'obsstore-version', None)
514 defaultformat = self.ui.configint('format', 'obsstore-version', None)
511 # rely on obsstore class default when possible.
515 # rely on obsstore class default when possible.
512 kwargs = {}
516 kwargs = {}
513 if defaultformat is not None:
517 if defaultformat is not None:
514 kwargs['defaultformat'] = defaultformat
518 kwargs['defaultformat'] = defaultformat
515 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
519 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
516 store = obsolete.obsstore(self.svfs, readonly=readonly,
520 store = obsolete.obsstore(self.svfs, readonly=readonly,
517 **kwargs)
521 **kwargs)
518 if store and readonly:
522 if store and readonly:
519 self.ui.warn(
523 self.ui.warn(
520 _('obsolete feature not enabled but %i markers found!\n')
524 _('obsolete feature not enabled but %i markers found!\n')
521 % len(list(store)))
525 % len(list(store)))
522 return store
526 return store
523
527
524 @storecache('00changelog.i')
528 @storecache('00changelog.i')
525 def changelog(self):
529 def changelog(self):
526 c = changelog.changelog(self.svfs)
530 c = changelog.changelog(self.svfs)
527 if txnutil.mayhavepending(self.root):
531 if txnutil.mayhavepending(self.root):
528 c.readpending('00changelog.i.a')
532 c.readpending('00changelog.i.a')
529 return c
533 return c
530
534
531 def _constructmanifest(self):
535 def _constructmanifest(self):
532 # This is a temporary function while we migrate from manifest to
536 # This is a temporary function while we migrate from manifest to
533 # manifestlog. It allows bundlerepo and unionrepo to intercept the
537 # manifestlog. It allows bundlerepo and unionrepo to intercept the
534 # manifest creation.
538 # manifest creation.
535 return manifest.manifestrevlog(self.svfs)
539 return manifest.manifestrevlog(self.svfs)
536
540
537 @storecache('00manifest.i')
541 @storecache('00manifest.i')
538 def manifestlog(self):
542 def manifestlog(self):
539 return manifest.manifestlog(self.svfs, self)
543 return manifest.manifestlog(self.svfs, self)
540
544
541 @repofilecache('dirstate')
545 @repofilecache('dirstate')
542 def dirstate(self):
546 def dirstate(self):
543 return dirstate.dirstate(self.vfs, self.ui, self.root,
547 return dirstate.dirstate(self.vfs, self.ui, self.root,
544 self._dirstatevalidate)
548 self._dirstatevalidate)
545
549
546 def _dirstatevalidate(self, node):
550 def _dirstatevalidate(self, node):
547 try:
551 try:
548 self.changelog.rev(node)
552 self.changelog.rev(node)
549 return node
553 return node
550 except error.LookupError:
554 except error.LookupError:
551 if not self._dirstatevalidatewarned:
555 if not self._dirstatevalidatewarned:
552 self._dirstatevalidatewarned = True
556 self._dirstatevalidatewarned = True
553 self.ui.warn(_("warning: ignoring unknown"
557 self.ui.warn(_("warning: ignoring unknown"
554 " working parent %s!\n") % short(node))
558 " working parent %s!\n") % short(node))
555 return nullid
559 return nullid
556
560
557 def __getitem__(self, changeid):
561 def __getitem__(self, changeid):
558 if changeid is None or changeid == wdirrev:
562 if changeid is None or changeid == wdirrev:
559 return context.workingctx(self)
563 return context.workingctx(self)
560 if isinstance(changeid, slice):
564 if isinstance(changeid, slice):
561 return [context.changectx(self, i)
565 return [context.changectx(self, i)
562 for i in xrange(*changeid.indices(len(self)))
566 for i in xrange(*changeid.indices(len(self)))
563 if i not in self.changelog.filteredrevs]
567 if i not in self.changelog.filteredrevs]
564 return context.changectx(self, changeid)
568 return context.changectx(self, changeid)
565
569
566 def __contains__(self, changeid):
570 def __contains__(self, changeid):
567 try:
571 try:
568 self[changeid]
572 self[changeid]
569 return True
573 return True
570 except error.RepoLookupError:
574 except error.RepoLookupError:
571 return False
575 return False
572
576
573 def __nonzero__(self):
577 def __nonzero__(self):
574 return True
578 return True
575
579
576 def __len__(self):
580 def __len__(self):
577 return len(self.changelog)
581 return len(self.changelog)
578
582
579 def __iter__(self):
583 def __iter__(self):
580 return iter(self.changelog)
584 return iter(self.changelog)
581
585
582 def revs(self, expr, *args):
586 def revs(self, expr, *args):
583 '''Find revisions matching a revset.
587 '''Find revisions matching a revset.
584
588
585 The revset is specified as a string ``expr`` that may contain
589 The revset is specified as a string ``expr`` that may contain
586 %-formatting to escape certain types. See ``revsetlang.formatspec``.
590 %-formatting to escape certain types. See ``revsetlang.formatspec``.
587
591
588 Revset aliases from the configuration are not expanded. To expand
592 Revset aliases from the configuration are not expanded. To expand
589 user aliases, consider calling ``scmutil.revrange()`` or
593 user aliases, consider calling ``scmutil.revrange()`` or
590 ``repo.anyrevs([expr], user=True)``.
594 ``repo.anyrevs([expr], user=True)``.
591
595
592 Returns a revset.abstractsmartset, which is a list-like interface
596 Returns a revset.abstractsmartset, which is a list-like interface
593 that contains integer revisions.
597 that contains integer revisions.
594 '''
598 '''
595 expr = revsetlang.formatspec(expr, *args)
599 expr = revsetlang.formatspec(expr, *args)
596 m = revset.match(None, expr)
600 m = revset.match(None, expr)
597 return m(self)
601 return m(self)
598
602
599 def set(self, expr, *args):
603 def set(self, expr, *args):
600 '''Find revisions matching a revset and emit changectx instances.
604 '''Find revisions matching a revset and emit changectx instances.
601
605
602 This is a convenience wrapper around ``revs()`` that iterates the
606 This is a convenience wrapper around ``revs()`` that iterates the
603 result and is a generator of changectx instances.
607 result and is a generator of changectx instances.
604
608
605 Revset aliases from the configuration are not expanded. To expand
609 Revset aliases from the configuration are not expanded. To expand
606 user aliases, consider calling ``scmutil.revrange()``.
610 user aliases, consider calling ``scmutil.revrange()``.
607 '''
611 '''
608 for r in self.revs(expr, *args):
612 for r in self.revs(expr, *args):
609 yield self[r]
613 yield self[r]
610
614
611 def anyrevs(self, specs, user=False):
615 def anyrevs(self, specs, user=False):
612 '''Find revisions matching one of the given revsets.
616 '''Find revisions matching one of the given revsets.
613
617
614 Revset aliases from the configuration are not expanded by default. To
618 Revset aliases from the configuration are not expanded by default. To
615 expand user aliases, specify ``user=True``.
619 expand user aliases, specify ``user=True``.
616 '''
620 '''
617 if user:
621 if user:
618 m = revset.matchany(self.ui, specs, repo=self)
622 m = revset.matchany(self.ui, specs, repo=self)
619 else:
623 else:
620 m = revset.matchany(None, specs)
624 m = revset.matchany(None, specs)
621 return m(self)
625 return m(self)
622
626
623 def url(self):
627 def url(self):
624 return 'file:' + self.root
628 return 'file:' + self.root
625
629
626 def hook(self, name, throw=False, **args):
630 def hook(self, name, throw=False, **args):
627 """Call a hook, passing this repo instance.
631 """Call a hook, passing this repo instance.
628
632
629 This a convenience method to aid invoking hooks. Extensions likely
633 This a convenience method to aid invoking hooks. Extensions likely
630 won't call this unless they have registered a custom hook or are
634 won't call this unless they have registered a custom hook or are
631 replacing code that is expected to call a hook.
635 replacing code that is expected to call a hook.
632 """
636 """
633 return hook.hook(self.ui, self, name, throw, **args)
637 return hook.hook(self.ui, self, name, throw, **args)
634
638
635 @unfilteredmethod
639 @unfilteredmethod
636 def _tag(self, names, node, message, local, user, date, extra=None,
640 def _tag(self, names, node, message, local, user, date, extra=None,
637 editor=False):
641 editor=False):
638 if isinstance(names, str):
642 if isinstance(names, str):
639 names = (names,)
643 names = (names,)
640
644
641 branches = self.branchmap()
645 branches = self.branchmap()
642 for name in names:
646 for name in names:
643 self.hook('pretag', throw=True, node=hex(node), tag=name,
647 self.hook('pretag', throw=True, node=hex(node), tag=name,
644 local=local)
648 local=local)
645 if name in branches:
649 if name in branches:
646 self.ui.warn(_("warning: tag %s conflicts with existing"
650 self.ui.warn(_("warning: tag %s conflicts with existing"
647 " branch name\n") % name)
651 " branch name\n") % name)
648
652
649 def writetags(fp, names, munge, prevtags):
653 def writetags(fp, names, munge, prevtags):
650 fp.seek(0, 2)
654 fp.seek(0, 2)
651 if prevtags and prevtags[-1] != '\n':
655 if prevtags and prevtags[-1] != '\n':
652 fp.write('\n')
656 fp.write('\n')
653 for name in names:
657 for name in names:
654 if munge:
658 if munge:
655 m = munge(name)
659 m = munge(name)
656 else:
660 else:
657 m = name
661 m = name
658
662
659 if (self._tagscache.tagtypes and
663 if (self._tagscache.tagtypes and
660 name in self._tagscache.tagtypes):
664 name in self._tagscache.tagtypes):
661 old = self.tags().get(name, nullid)
665 old = self.tags().get(name, nullid)
662 fp.write('%s %s\n' % (hex(old), m))
666 fp.write('%s %s\n' % (hex(old), m))
663 fp.write('%s %s\n' % (hex(node), m))
667 fp.write('%s %s\n' % (hex(node), m))
664 fp.close()
668 fp.close()
665
669
666 prevtags = ''
670 prevtags = ''
667 if local:
671 if local:
668 try:
672 try:
669 fp = self.vfs('localtags', 'r+')
673 fp = self.vfs('localtags', 'r+')
670 except IOError:
674 except IOError:
671 fp = self.vfs('localtags', 'a')
675 fp = self.vfs('localtags', 'a')
672 else:
676 else:
673 prevtags = fp.read()
677 prevtags = fp.read()
674
678
675 # local tags are stored in the current charset
679 # local tags are stored in the current charset
676 writetags(fp, names, None, prevtags)
680 writetags(fp, names, None, prevtags)
677 for name in names:
681 for name in names:
678 self.hook('tag', node=hex(node), tag=name, local=local)
682 self.hook('tag', node=hex(node), tag=name, local=local)
679 return
683 return
680
684
681 try:
685 try:
682 fp = self.wfile('.hgtags', 'rb+')
686 fp = self.wfile('.hgtags', 'rb+')
683 except IOError as e:
687 except IOError as e:
684 if e.errno != errno.ENOENT:
688 if e.errno != errno.ENOENT:
685 raise
689 raise
686 fp = self.wfile('.hgtags', 'ab')
690 fp = self.wfile('.hgtags', 'ab')
687 else:
691 else:
688 prevtags = fp.read()
692 prevtags = fp.read()
689
693
690 # committed tags are stored in UTF-8
694 # committed tags are stored in UTF-8
691 writetags(fp, names, encoding.fromlocal, prevtags)
695 writetags(fp, names, encoding.fromlocal, prevtags)
692
696
693 fp.close()
697 fp.close()
694
698
695 self.invalidatecaches()
699 self.invalidatecaches()
696
700
697 if '.hgtags' not in self.dirstate:
701 if '.hgtags' not in self.dirstate:
698 self[None].add(['.hgtags'])
702 self[None].add(['.hgtags'])
699
703
700 m = matchmod.exact(self.root, '', ['.hgtags'])
704 m = matchmod.exact(self.root, '', ['.hgtags'])
701 tagnode = self.commit(message, user, date, extra=extra, match=m,
705 tagnode = self.commit(message, user, date, extra=extra, match=m,
702 editor=editor)
706 editor=editor)
703
707
704 for name in names:
708 for name in names:
705 self.hook('tag', node=hex(node), tag=name, local=local)
709 self.hook('tag', node=hex(node), tag=name, local=local)
706
710
707 return tagnode
711 return tagnode
708
712
709 def tag(self, names, node, message, local, user, date, editor=False):
713 def tag(self, names, node, message, local, user, date, editor=False):
710 '''tag a revision with one or more symbolic names.
714 '''tag a revision with one or more symbolic names.
711
715
712 names is a list of strings or, when adding a single tag, names may be a
716 names is a list of strings or, when adding a single tag, names may be a
713 string.
717 string.
714
718
715 if local is True, the tags are stored in a per-repository file.
719 if local is True, the tags are stored in a per-repository file.
716 otherwise, they are stored in the .hgtags file, and a new
720 otherwise, they are stored in the .hgtags file, and a new
717 changeset is committed with the change.
721 changeset is committed with the change.
718
722
719 keyword arguments:
723 keyword arguments:
720
724
721 local: whether to store tags in non-version-controlled file
725 local: whether to store tags in non-version-controlled file
722 (default False)
726 (default False)
723
727
724 message: commit message to use if committing
728 message: commit message to use if committing
725
729
726 user: name of user to use if committing
730 user: name of user to use if committing
727
731
728 date: date tuple to use if committing'''
732 date: date tuple to use if committing'''
729
733
730 if not local:
734 if not local:
731 m = matchmod.exact(self.root, '', ['.hgtags'])
735 m = matchmod.exact(self.root, '', ['.hgtags'])
732 if any(self.status(match=m, unknown=True, ignored=True)):
736 if any(self.status(match=m, unknown=True, ignored=True)):
733 raise error.Abort(_('working copy of .hgtags is changed'),
737 raise error.Abort(_('working copy of .hgtags is changed'),
734 hint=_('please commit .hgtags manually'))
738 hint=_('please commit .hgtags manually'))
735
739
736 self.tags() # instantiate the cache
740 self.tags() # instantiate the cache
737 self._tag(names, node, message, local, user, date, editor=editor)
741 self._tag(names, node, message, local, user, date, editor=editor)
738
742
739 @filteredpropertycache
743 @filteredpropertycache
740 def _tagscache(self):
744 def _tagscache(self):
741 '''Returns a tagscache object that contains various tags related
745 '''Returns a tagscache object that contains various tags related
742 caches.'''
746 caches.'''
743
747
744 # This simplifies its cache management by having one decorated
748 # This simplifies its cache management by having one decorated
745 # function (this one) and the rest simply fetch things from it.
749 # function (this one) and the rest simply fetch things from it.
746 class tagscache(object):
750 class tagscache(object):
747 def __init__(self):
751 def __init__(self):
748 # These two define the set of tags for this repository. tags
752 # These two define the set of tags for this repository. tags
749 # maps tag name to node; tagtypes maps tag name to 'global' or
753 # maps tag name to node; tagtypes maps tag name to 'global' or
750 # 'local'. (Global tags are defined by .hgtags across all
754 # 'local'. (Global tags are defined by .hgtags across all
751 # heads, and local tags are defined in .hg/localtags.)
755 # heads, and local tags are defined in .hg/localtags.)
752 # They constitute the in-memory cache of tags.
756 # They constitute the in-memory cache of tags.
753 self.tags = self.tagtypes = None
757 self.tags = self.tagtypes = None
754
758
755 self.nodetagscache = self.tagslist = None
759 self.nodetagscache = self.tagslist = None
756
760
757 cache = tagscache()
761 cache = tagscache()
758 cache.tags, cache.tagtypes = self._findtags()
762 cache.tags, cache.tagtypes = self._findtags()
759
763
760 return cache
764 return cache
761
765
762 def tags(self):
766 def tags(self):
763 '''return a mapping of tag to node'''
767 '''return a mapping of tag to node'''
764 t = {}
768 t = {}
765 if self.changelog.filteredrevs:
769 if self.changelog.filteredrevs:
766 tags, tt = self._findtags()
770 tags, tt = self._findtags()
767 else:
771 else:
768 tags = self._tagscache.tags
772 tags = self._tagscache.tags
769 for k, v in tags.iteritems():
773 for k, v in tags.iteritems():
770 try:
774 try:
771 # ignore tags to unknown nodes
775 # ignore tags to unknown nodes
772 self.changelog.rev(v)
776 self.changelog.rev(v)
773 t[k] = v
777 t[k] = v
774 except (error.LookupError, ValueError):
778 except (error.LookupError, ValueError):
775 pass
779 pass
776 return t
780 return t
777
781
778 def _findtags(self):
782 def _findtags(self):
779 '''Do the hard work of finding tags. Return a pair of dicts
783 '''Do the hard work of finding tags. Return a pair of dicts
780 (tags, tagtypes) where tags maps tag name to node, and tagtypes
784 (tags, tagtypes) where tags maps tag name to node, and tagtypes
781 maps tag name to a string like \'global\' or \'local\'.
785 maps tag name to a string like \'global\' or \'local\'.
782 Subclasses or extensions are free to add their own tags, but
786 Subclasses or extensions are free to add their own tags, but
783 should be aware that the returned dicts will be retained for the
787 should be aware that the returned dicts will be retained for the
784 duration of the localrepo object.'''
788 duration of the localrepo object.'''
785
789
786 # XXX what tagtype should subclasses/extensions use? Currently
790 # XXX what tagtype should subclasses/extensions use? Currently
787 # mq and bookmarks add tags, but do not set the tagtype at all.
791 # mq and bookmarks add tags, but do not set the tagtype at all.
788 # Should each extension invent its own tag type? Should there
792 # Should each extension invent its own tag type? Should there
789 # be one tagtype for all such "virtual" tags? Or is the status
793 # be one tagtype for all such "virtual" tags? Or is the status
790 # quo fine?
794 # quo fine?
791
795
792 alltags = {} # map tag name to (node, hist)
796 alltags = {} # map tag name to (node, hist)
793 tagtypes = {}
797 tagtypes = {}
794
798
795 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
799 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
796 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
800 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
797
801
798 # Build the return dicts. Have to re-encode tag names because
802 # Build the return dicts. Have to re-encode tag names because
799 # the tags module always uses UTF-8 (in order not to lose info
803 # the tags module always uses UTF-8 (in order not to lose info
800 # writing to the cache), but the rest of Mercurial wants them in
804 # writing to the cache), but the rest of Mercurial wants them in
801 # local encoding.
805 # local encoding.
802 tags = {}
806 tags = {}
803 for (name, (node, hist)) in alltags.iteritems():
807 for (name, (node, hist)) in alltags.iteritems():
804 if node != nullid:
808 if node != nullid:
805 tags[encoding.tolocal(name)] = node
809 tags[encoding.tolocal(name)] = node
806 tags['tip'] = self.changelog.tip()
810 tags['tip'] = self.changelog.tip()
807 tagtypes = dict([(encoding.tolocal(name), value)
811 tagtypes = dict([(encoding.tolocal(name), value)
808 for (name, value) in tagtypes.iteritems()])
812 for (name, value) in tagtypes.iteritems()])
809 return (tags, tagtypes)
813 return (tags, tagtypes)
810
814
811 def tagtype(self, tagname):
815 def tagtype(self, tagname):
812 '''
816 '''
813 return the type of the given tag. result can be:
817 return the type of the given tag. result can be:
814
818
815 'local' : a local tag
819 'local' : a local tag
816 'global' : a global tag
820 'global' : a global tag
817 None : tag does not exist
821 None : tag does not exist
818 '''
822 '''
819
823
820 return self._tagscache.tagtypes.get(tagname)
824 return self._tagscache.tagtypes.get(tagname)
821
825
822 def tagslist(self):
826 def tagslist(self):
823 '''return a list of tags ordered by revision'''
827 '''return a list of tags ordered by revision'''
824 if not self._tagscache.tagslist:
828 if not self._tagscache.tagslist:
825 l = []
829 l = []
826 for t, n in self.tags().iteritems():
830 for t, n in self.tags().iteritems():
827 l.append((self.changelog.rev(n), t, n))
831 l.append((self.changelog.rev(n), t, n))
828 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
832 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
829
833
830 return self._tagscache.tagslist
834 return self._tagscache.tagslist
831
835
832 def nodetags(self, node):
836 def nodetags(self, node):
833 '''return the tags associated with a node'''
837 '''return the tags associated with a node'''
834 if not self._tagscache.nodetagscache:
838 if not self._tagscache.nodetagscache:
835 nodetagscache = {}
839 nodetagscache = {}
836 for t, n in self._tagscache.tags.iteritems():
840 for t, n in self._tagscache.tags.iteritems():
837 nodetagscache.setdefault(n, []).append(t)
841 nodetagscache.setdefault(n, []).append(t)
838 for tags in nodetagscache.itervalues():
842 for tags in nodetagscache.itervalues():
839 tags.sort()
843 tags.sort()
840 self._tagscache.nodetagscache = nodetagscache
844 self._tagscache.nodetagscache = nodetagscache
841 return self._tagscache.nodetagscache.get(node, [])
845 return self._tagscache.nodetagscache.get(node, [])
842
846
843 def nodebookmarks(self, node):
847 def nodebookmarks(self, node):
844 """return the list of bookmarks pointing to the specified node"""
848 """return the list of bookmarks pointing to the specified node"""
845 marks = []
849 marks = []
846 for bookmark, n in self._bookmarks.iteritems():
850 for bookmark, n in self._bookmarks.iteritems():
847 if n == node:
851 if n == node:
848 marks.append(bookmark)
852 marks.append(bookmark)
849 return sorted(marks)
853 return sorted(marks)
850
854
851 def branchmap(self):
855 def branchmap(self):
852 '''returns a dictionary {branch: [branchheads]} with branchheads
856 '''returns a dictionary {branch: [branchheads]} with branchheads
853 ordered by increasing revision number'''
857 ordered by increasing revision number'''
854 branchmap.updatecache(self)
858 branchmap.updatecache(self)
855 return self._branchcaches[self.filtername]
859 return self._branchcaches[self.filtername]
856
860
857 @unfilteredmethod
861 @unfilteredmethod
858 def revbranchcache(self):
862 def revbranchcache(self):
859 if not self._revbranchcache:
863 if not self._revbranchcache:
860 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
864 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
861 return self._revbranchcache
865 return self._revbranchcache
862
866
863 def branchtip(self, branch, ignoremissing=False):
867 def branchtip(self, branch, ignoremissing=False):
864 '''return the tip node for a given branch
868 '''return the tip node for a given branch
865
869
866 If ignoremissing is True, then this method will not raise an error.
870 If ignoremissing is True, then this method will not raise an error.
867 This is helpful for callers that only expect None for a missing branch
871 This is helpful for callers that only expect None for a missing branch
868 (e.g. namespace).
872 (e.g. namespace).
869
873
870 '''
874 '''
871 try:
875 try:
872 return self.branchmap().branchtip(branch)
876 return self.branchmap().branchtip(branch)
873 except KeyError:
877 except KeyError:
874 if not ignoremissing:
878 if not ignoremissing:
875 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
879 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
876 else:
880 else:
877 pass
881 pass
878
882
879 def lookup(self, key):
883 def lookup(self, key):
880 return self[key].node()
884 return self[key].node()
881
885
882 def lookupbranch(self, key, remote=None):
886 def lookupbranch(self, key, remote=None):
883 repo = remote or self
887 repo = remote or self
884 if key in repo.branchmap():
888 if key in repo.branchmap():
885 return key
889 return key
886
890
887 repo = (remote and remote.local()) and remote or self
891 repo = (remote and remote.local()) and remote or self
888 return repo[key].branch()
892 return repo[key].branch()
889
893
890 def known(self, nodes):
894 def known(self, nodes):
891 cl = self.changelog
895 cl = self.changelog
892 nm = cl.nodemap
896 nm = cl.nodemap
893 filtered = cl.filteredrevs
897 filtered = cl.filteredrevs
894 result = []
898 result = []
895 for n in nodes:
899 for n in nodes:
896 r = nm.get(n)
900 r = nm.get(n)
897 resp = not (r is None or r in filtered)
901 resp = not (r is None or r in filtered)
898 result.append(resp)
902 result.append(resp)
899 return result
903 return result
900
904
901 def local(self):
905 def local(self):
902 return self
906 return self
903
907
904 def publishing(self):
908 def publishing(self):
905 # it's safe (and desirable) to trust the publish flag unconditionally
909 # it's safe (and desirable) to trust the publish flag unconditionally
906 # so that we don't finalize changes shared between users via ssh or nfs
910 # so that we don't finalize changes shared between users via ssh or nfs
907 return self.ui.configbool('phases', 'publish', True, untrusted=True)
911 return self.ui.configbool('phases', 'publish', True, untrusted=True)
908
912
909 def cancopy(self):
913 def cancopy(self):
910 # so statichttprepo's override of local() works
914 # so statichttprepo's override of local() works
911 if not self.local():
915 if not self.local():
912 return False
916 return False
913 if not self.publishing():
917 if not self.publishing():
914 return True
918 return True
915 # if publishing we can't copy if there is filtered content
919 # if publishing we can't copy if there is filtered content
916 return not self.filtered('visible').changelog.filteredrevs
920 return not self.filtered('visible').changelog.filteredrevs
917
921
918 def shared(self):
922 def shared(self):
919 '''the type of shared repository (None if not shared)'''
923 '''the type of shared repository (None if not shared)'''
920 if self.sharedpath != self.path:
924 if self.sharedpath != self.path:
921 return 'store'
925 return 'store'
922 return None
926 return None
923
927
924 def join(self, f, *insidef):
928 def join(self, f, *insidef):
925 return self.vfs.join(os.path.join(f, *insidef))
929 return self.vfs.join(os.path.join(f, *insidef))
926
930
927 def wjoin(self, f, *insidef):
931 def wjoin(self, f, *insidef):
928 return self.vfs.reljoin(self.root, f, *insidef)
932 return self.vfs.reljoin(self.root, f, *insidef)
929
933
930 def file(self, f):
934 def file(self, f):
931 if f[0] == '/':
935 if f[0] == '/':
932 f = f[1:]
936 f = f[1:]
933 return filelog.filelog(self.svfs, f)
937 return filelog.filelog(self.svfs, f)
934
938
935 def changectx(self, changeid):
939 def changectx(self, changeid):
936 return self[changeid]
940 return self[changeid]
937
941
938 def setparents(self, p1, p2=nullid):
942 def setparents(self, p1, p2=nullid):
939 self.dirstate.beginparentchange()
943 self.dirstate.beginparentchange()
940 copies = self.dirstate.setparents(p1, p2)
944 copies = self.dirstate.setparents(p1, p2)
941 pctx = self[p1]
945 pctx = self[p1]
942 if copies:
946 if copies:
943 # Adjust copy records, the dirstate cannot do it, it
947 # Adjust copy records, the dirstate cannot do it, it
944 # requires access to parents manifests. Preserve them
948 # requires access to parents manifests. Preserve them
945 # only for entries added to first parent.
949 # only for entries added to first parent.
946 for f in copies:
950 for f in copies:
947 if f not in pctx and copies[f] in pctx:
951 if f not in pctx and copies[f] in pctx:
948 self.dirstate.copy(copies[f], f)
952 self.dirstate.copy(copies[f], f)
949 if p2 == nullid:
953 if p2 == nullid:
950 for f, s in sorted(self.dirstate.copies().items()):
954 for f, s in sorted(self.dirstate.copies().items()):
951 if f not in pctx and s not in pctx:
955 if f not in pctx and s not in pctx:
952 self.dirstate.copy(None, f)
956 self.dirstate.copy(None, f)
953 self.dirstate.endparentchange()
957 self.dirstate.endparentchange()
954
958
955 def filectx(self, path, changeid=None, fileid=None):
959 def filectx(self, path, changeid=None, fileid=None):
956 """changeid can be a changeset revision, node, or tag.
960 """changeid can be a changeset revision, node, or tag.
957 fileid can be a file revision or node."""
961 fileid can be a file revision or node."""
958 return context.filectx(self, path, changeid, fileid)
962 return context.filectx(self, path, changeid, fileid)
959
963
960 def getcwd(self):
964 def getcwd(self):
961 return self.dirstate.getcwd()
965 return self.dirstate.getcwd()
962
966
963 def pathto(self, f, cwd=None):
967 def pathto(self, f, cwd=None):
964 return self.dirstate.pathto(f, cwd)
968 return self.dirstate.pathto(f, cwd)
965
969
966 def wfile(self, f, mode='r'):
970 def wfile(self, f, mode='r'):
967 return self.wvfs(f, mode)
971 return self.wvfs(f, mode)
968
972
969 def _link(self, f):
973 def _link(self, f):
970 return self.wvfs.islink(f)
974 return self.wvfs.islink(f)
971
975
972 def _loadfilter(self, filter):
976 def _loadfilter(self, filter):
973 if filter not in self.filterpats:
977 if filter not in self.filterpats:
974 l = []
978 l = []
975 for pat, cmd in self.ui.configitems(filter):
979 for pat, cmd in self.ui.configitems(filter):
976 if cmd == '!':
980 if cmd == '!':
977 continue
981 continue
978 mf = matchmod.match(self.root, '', [pat])
982 mf = matchmod.match(self.root, '', [pat])
979 fn = None
983 fn = None
980 params = cmd
984 params = cmd
981 for name, filterfn in self._datafilters.iteritems():
985 for name, filterfn in self._datafilters.iteritems():
982 if cmd.startswith(name):
986 if cmd.startswith(name):
983 fn = filterfn
987 fn = filterfn
984 params = cmd[len(name):].lstrip()
988 params = cmd[len(name):].lstrip()
985 break
989 break
986 if not fn:
990 if not fn:
987 fn = lambda s, c, **kwargs: util.filter(s, c)
991 fn = lambda s, c, **kwargs: util.filter(s, c)
988 # Wrap old filters not supporting keyword arguments
992 # Wrap old filters not supporting keyword arguments
989 if not inspect.getargspec(fn)[2]:
993 if not inspect.getargspec(fn)[2]:
990 oldfn = fn
994 oldfn = fn
991 fn = lambda s, c, **kwargs: oldfn(s, c)
995 fn = lambda s, c, **kwargs: oldfn(s, c)
992 l.append((mf, fn, params))
996 l.append((mf, fn, params))
993 self.filterpats[filter] = l
997 self.filterpats[filter] = l
994 return self.filterpats[filter]
998 return self.filterpats[filter]
995
999
996 def _filter(self, filterpats, filename, data):
1000 def _filter(self, filterpats, filename, data):
997 for mf, fn, cmd in filterpats:
1001 for mf, fn, cmd in filterpats:
998 if mf(filename):
1002 if mf(filename):
999 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1003 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1000 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1004 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1001 break
1005 break
1002
1006
1003 return data
1007 return data
1004
1008
1005 @unfilteredpropertycache
1009 @unfilteredpropertycache
1006 def _encodefilterpats(self):
1010 def _encodefilterpats(self):
1007 return self._loadfilter('encode')
1011 return self._loadfilter('encode')
1008
1012
1009 @unfilteredpropertycache
1013 @unfilteredpropertycache
1010 def _decodefilterpats(self):
1014 def _decodefilterpats(self):
1011 return self._loadfilter('decode')
1015 return self._loadfilter('decode')
1012
1016
1013 def adddatafilter(self, name, filter):
1017 def adddatafilter(self, name, filter):
1014 self._datafilters[name] = filter
1018 self._datafilters[name] = filter
1015
1019
1016 def wread(self, filename):
1020 def wread(self, filename):
1017 if self._link(filename):
1021 if self._link(filename):
1018 data = self.wvfs.readlink(filename)
1022 data = self.wvfs.readlink(filename)
1019 else:
1023 else:
1020 data = self.wvfs.read(filename)
1024 data = self.wvfs.read(filename)
1021 return self._filter(self._encodefilterpats, filename, data)
1025 return self._filter(self._encodefilterpats, filename, data)
1022
1026
1023 def wwrite(self, filename, data, flags, backgroundclose=False):
1027 def wwrite(self, filename, data, flags, backgroundclose=False):
1024 """write ``data`` into ``filename`` in the working directory
1028 """write ``data`` into ``filename`` in the working directory
1025
1029
1026 This returns length of written (maybe decoded) data.
1030 This returns length of written (maybe decoded) data.
1027 """
1031 """
1028 data = self._filter(self._decodefilterpats, filename, data)
1032 data = self._filter(self._decodefilterpats, filename, data)
1029 if 'l' in flags:
1033 if 'l' in flags:
1030 self.wvfs.symlink(data, filename)
1034 self.wvfs.symlink(data, filename)
1031 else:
1035 else:
1032 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1036 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1033 if 'x' in flags:
1037 if 'x' in flags:
1034 self.wvfs.setflags(filename, False, True)
1038 self.wvfs.setflags(filename, False, True)
1035 return len(data)
1039 return len(data)
1036
1040
1037 def wwritedata(self, filename, data):
1041 def wwritedata(self, filename, data):
1038 return self._filter(self._decodefilterpats, filename, data)
1042 return self._filter(self._decodefilterpats, filename, data)
1039
1043
1040 def currenttransaction(self):
1044 def currenttransaction(self):
1041 """return the current transaction or None if non exists"""
1045 """return the current transaction or None if non exists"""
1042 if self._transref:
1046 if self._transref:
1043 tr = self._transref()
1047 tr = self._transref()
1044 else:
1048 else:
1045 tr = None
1049 tr = None
1046
1050
1047 if tr and tr.running():
1051 if tr and tr.running():
1048 return tr
1052 return tr
1049 return None
1053 return None
1050
1054
1051 def transaction(self, desc, report=None):
1055 def transaction(self, desc, report=None):
1052 if (self.ui.configbool('devel', 'all-warnings')
1056 if (self.ui.configbool('devel', 'all-warnings')
1053 or self.ui.configbool('devel', 'check-locks')):
1057 or self.ui.configbool('devel', 'check-locks')):
1054 if self._currentlock(self._lockref) is None:
1058 if self._currentlock(self._lockref) is None:
1055 raise error.ProgrammingError('transaction requires locking')
1059 raise error.ProgrammingError('transaction requires locking')
1056 tr = self.currenttransaction()
1060 tr = self.currenttransaction()
1057 if tr is not None:
1061 if tr is not None:
1058 return tr.nest()
1062 return tr.nest()
1059
1063
1060 # abort here if the journal already exists
1064 # abort here if the journal already exists
1061 if self.svfs.exists("journal"):
1065 if self.svfs.exists("journal"):
1062 raise error.RepoError(
1066 raise error.RepoError(
1063 _("abandoned transaction found"),
1067 _("abandoned transaction found"),
1064 hint=_("run 'hg recover' to clean up transaction"))
1068 hint=_("run 'hg recover' to clean up transaction"))
1065
1069
1066 idbase = "%.40f#%f" % (random.random(), time.time())
1070 idbase = "%.40f#%f" % (random.random(), time.time())
1067 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1071 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1068 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1072 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1069
1073
1070 self._writejournal(desc)
1074 self._writejournal(desc)
1071 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1075 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1072 if report:
1076 if report:
1073 rp = report
1077 rp = report
1074 else:
1078 else:
1075 rp = self.ui.warn
1079 rp = self.ui.warn
1076 vfsmap = {'plain': self.vfs} # root of .hg/
1080 vfsmap = {'plain': self.vfs} # root of .hg/
1077 # we must avoid cyclic reference between repo and transaction.
1081 # we must avoid cyclic reference between repo and transaction.
1078 reporef = weakref.ref(self)
1082 reporef = weakref.ref(self)
1079 def validate(tr):
1083 def validate(tr):
1080 """will run pre-closing hooks"""
1084 """will run pre-closing hooks"""
1081 reporef().hook('pretxnclose', throw=True,
1085 reporef().hook('pretxnclose', throw=True,
1082 txnname=desc, **tr.hookargs)
1086 txnname=desc, **tr.hookargs)
1083 def releasefn(tr, success):
1087 def releasefn(tr, success):
1084 repo = reporef()
1088 repo = reporef()
1085 if success:
1089 if success:
1086 # this should be explicitly invoked here, because
1090 # this should be explicitly invoked here, because
1087 # in-memory changes aren't written out at closing
1091 # in-memory changes aren't written out at closing
1088 # transaction, if tr.addfilegenerator (via
1092 # transaction, if tr.addfilegenerator (via
1089 # dirstate.write or so) isn't invoked while
1093 # dirstate.write or so) isn't invoked while
1090 # transaction running
1094 # transaction running
1091 repo.dirstate.write(None)
1095 repo.dirstate.write(None)
1092 else:
1096 else:
1093 # discard all changes (including ones already written
1097 # discard all changes (including ones already written
1094 # out) in this transaction
1098 # out) in this transaction
1095 repo.dirstate.restorebackup(None, prefix='journal.')
1099 repo.dirstate.restorebackup(None, prefix='journal.')
1096
1100
1097 repo.invalidate(clearfilecache=True)
1101 repo.invalidate(clearfilecache=True)
1098
1102
1099 tr = transaction.transaction(rp, self.svfs, vfsmap,
1103 tr = transaction.transaction(rp, self.svfs, vfsmap,
1100 "journal",
1104 "journal",
1101 "undo",
1105 "undo",
1102 aftertrans(renames),
1106 aftertrans(renames),
1103 self.store.createmode,
1107 self.store.createmode,
1104 validator=validate,
1108 validator=validate,
1105 releasefn=releasefn)
1109 releasefn=releasefn)
1106
1110
1107 tr.hookargs['txnid'] = txnid
1111 tr.hookargs['txnid'] = txnid
1108 # note: writing the fncache only during finalize mean that the file is
1112 # note: writing the fncache only during finalize mean that the file is
1109 # outdated when running hooks. As fncache is used for streaming clone,
1113 # outdated when running hooks. As fncache is used for streaming clone,
1110 # this is not expected to break anything that happen during the hooks.
1114 # this is not expected to break anything that happen during the hooks.
1111 tr.addfinalize('flush-fncache', self.store.write)
1115 tr.addfinalize('flush-fncache', self.store.write)
1112 def txnclosehook(tr2):
1116 def txnclosehook(tr2):
1113 """To be run if transaction is successful, will schedule a hook run
1117 """To be run if transaction is successful, will schedule a hook run
1114 """
1118 """
1115 # Don't reference tr2 in hook() so we don't hold a reference.
1119 # Don't reference tr2 in hook() so we don't hold a reference.
1116 # This reduces memory consumption when there are multiple
1120 # This reduces memory consumption when there are multiple
1117 # transactions per lock. This can likely go away if issue5045
1121 # transactions per lock. This can likely go away if issue5045
1118 # fixes the function accumulation.
1122 # fixes the function accumulation.
1119 hookargs = tr2.hookargs
1123 hookargs = tr2.hookargs
1120
1124
1121 def hook():
1125 def hook():
1122 reporef().hook('txnclose', throw=False, txnname=desc,
1126 reporef().hook('txnclose', throw=False, txnname=desc,
1123 **hookargs)
1127 **hookargs)
1124 reporef()._afterlock(hook)
1128 reporef()._afterlock(hook)
1125 tr.addfinalize('txnclose-hook', txnclosehook)
1129 tr.addfinalize('txnclose-hook', txnclosehook)
1126 def txnaborthook(tr2):
1130 def txnaborthook(tr2):
1127 """To be run if transaction is aborted
1131 """To be run if transaction is aborted
1128 """
1132 """
1129 reporef().hook('txnabort', throw=False, txnname=desc,
1133 reporef().hook('txnabort', throw=False, txnname=desc,
1130 **tr2.hookargs)
1134 **tr2.hookargs)
1131 tr.addabort('txnabort-hook', txnaborthook)
1135 tr.addabort('txnabort-hook', txnaborthook)
1132 # avoid eager cache invalidation. in-memory data should be identical
1136 # avoid eager cache invalidation. in-memory data should be identical
1133 # to stored data if transaction has no error.
1137 # to stored data if transaction has no error.
1134 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1138 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1135 self._transref = weakref.ref(tr)
1139 self._transref = weakref.ref(tr)
1136 return tr
1140 return tr
1137
1141
1138 def _journalfiles(self):
1142 def _journalfiles(self):
1139 return ((self.svfs, 'journal'),
1143 return ((self.svfs, 'journal'),
1140 (self.vfs, 'journal.dirstate'),
1144 (self.vfs, 'journal.dirstate'),
1141 (self.vfs, 'journal.branch'),
1145 (self.vfs, 'journal.branch'),
1142 (self.vfs, 'journal.desc'),
1146 (self.vfs, 'journal.desc'),
1143 (self.vfs, 'journal.bookmarks'),
1147 (self.vfs, 'journal.bookmarks'),
1144 (self.svfs, 'journal.phaseroots'))
1148 (self.svfs, 'journal.phaseroots'))
1145
1149
1146 def undofiles(self):
1150 def undofiles(self):
1147 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1151 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1148
1152
1149 def _writejournal(self, desc):
1153 def _writejournal(self, desc):
1150 self.dirstate.savebackup(None, prefix='journal.')
1154 self.dirstate.savebackup(None, prefix='journal.')
1151 self.vfs.write("journal.branch",
1155 self.vfs.write("journal.branch",
1152 encoding.fromlocal(self.dirstate.branch()))
1156 encoding.fromlocal(self.dirstate.branch()))
1153 self.vfs.write("journal.desc",
1157 self.vfs.write("journal.desc",
1154 "%d\n%s\n" % (len(self), desc))
1158 "%d\n%s\n" % (len(self), desc))
1155 self.vfs.write("journal.bookmarks",
1159 self.vfs.write("journal.bookmarks",
1156 self.vfs.tryread("bookmarks"))
1160 self.vfs.tryread("bookmarks"))
1157 self.svfs.write("journal.phaseroots",
1161 self.svfs.write("journal.phaseroots",
1158 self.svfs.tryread("phaseroots"))
1162 self.svfs.tryread("phaseroots"))
1159
1163
1160 def recover(self):
1164 def recover(self):
1161 with self.lock():
1165 with self.lock():
1162 if self.svfs.exists("journal"):
1166 if self.svfs.exists("journal"):
1163 self.ui.status(_("rolling back interrupted transaction\n"))
1167 self.ui.status(_("rolling back interrupted transaction\n"))
1164 vfsmap = {'': self.svfs,
1168 vfsmap = {'': self.svfs,
1165 'plain': self.vfs,}
1169 'plain': self.vfs,}
1166 transaction.rollback(self.svfs, vfsmap, "journal",
1170 transaction.rollback(self.svfs, vfsmap, "journal",
1167 self.ui.warn)
1171 self.ui.warn)
1168 self.invalidate()
1172 self.invalidate()
1169 return True
1173 return True
1170 else:
1174 else:
1171 self.ui.warn(_("no interrupted transaction available\n"))
1175 self.ui.warn(_("no interrupted transaction available\n"))
1172 return False
1176 return False
1173
1177
1174 def rollback(self, dryrun=False, force=False):
1178 def rollback(self, dryrun=False, force=False):
1175 wlock = lock = dsguard = None
1179 wlock = lock = dsguard = None
1176 try:
1180 try:
1177 wlock = self.wlock()
1181 wlock = self.wlock()
1178 lock = self.lock()
1182 lock = self.lock()
1179 if self.svfs.exists("undo"):
1183 if self.svfs.exists("undo"):
1180 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1184 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1181
1185
1182 return self._rollback(dryrun, force, dsguard)
1186 return self._rollback(dryrun, force, dsguard)
1183 else:
1187 else:
1184 self.ui.warn(_("no rollback information available\n"))
1188 self.ui.warn(_("no rollback information available\n"))
1185 return 1
1189 return 1
1186 finally:
1190 finally:
1187 release(dsguard, lock, wlock)
1191 release(dsguard, lock, wlock)
1188
1192
1189 @unfilteredmethod # Until we get smarter cache management
1193 @unfilteredmethod # Until we get smarter cache management
1190 def _rollback(self, dryrun, force, dsguard):
1194 def _rollback(self, dryrun, force, dsguard):
1191 ui = self.ui
1195 ui = self.ui
1192 try:
1196 try:
1193 args = self.vfs.read('undo.desc').splitlines()
1197 args = self.vfs.read('undo.desc').splitlines()
1194 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1198 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1195 if len(args) >= 3:
1199 if len(args) >= 3:
1196 detail = args[2]
1200 detail = args[2]
1197 oldtip = oldlen - 1
1201 oldtip = oldlen - 1
1198
1202
1199 if detail and ui.verbose:
1203 if detail and ui.verbose:
1200 msg = (_('repository tip rolled back to revision %s'
1204 msg = (_('repository tip rolled back to revision %s'
1201 ' (undo %s: %s)\n')
1205 ' (undo %s: %s)\n')
1202 % (oldtip, desc, detail))
1206 % (oldtip, desc, detail))
1203 else:
1207 else:
1204 msg = (_('repository tip rolled back to revision %s'
1208 msg = (_('repository tip rolled back to revision %s'
1205 ' (undo %s)\n')
1209 ' (undo %s)\n')
1206 % (oldtip, desc))
1210 % (oldtip, desc))
1207 except IOError:
1211 except IOError:
1208 msg = _('rolling back unknown transaction\n')
1212 msg = _('rolling back unknown transaction\n')
1209 desc = None
1213 desc = None
1210
1214
1211 if not force and self['.'] != self['tip'] and desc == 'commit':
1215 if not force and self['.'] != self['tip'] and desc == 'commit':
1212 raise error.Abort(
1216 raise error.Abort(
1213 _('rollback of last commit while not checked out '
1217 _('rollback of last commit while not checked out '
1214 'may lose data'), hint=_('use -f to force'))
1218 'may lose data'), hint=_('use -f to force'))
1215
1219
1216 ui.status(msg)
1220 ui.status(msg)
1217 if dryrun:
1221 if dryrun:
1218 return 0
1222 return 0
1219
1223
1220 parents = self.dirstate.parents()
1224 parents = self.dirstate.parents()
1221 self.destroying()
1225 self.destroying()
1222 vfsmap = {'plain': self.vfs, '': self.svfs}
1226 vfsmap = {'plain': self.vfs, '': self.svfs}
1223 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1227 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1224 if self.vfs.exists('undo.bookmarks'):
1228 if self.vfs.exists('undo.bookmarks'):
1225 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1229 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1226 if self.svfs.exists('undo.phaseroots'):
1230 if self.svfs.exists('undo.phaseroots'):
1227 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1231 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1228 self.invalidate()
1232 self.invalidate()
1229
1233
1230 parentgone = (parents[0] not in self.changelog.nodemap or
1234 parentgone = (parents[0] not in self.changelog.nodemap or
1231 parents[1] not in self.changelog.nodemap)
1235 parents[1] not in self.changelog.nodemap)
1232 if parentgone:
1236 if parentgone:
1233 # prevent dirstateguard from overwriting already restored one
1237 # prevent dirstateguard from overwriting already restored one
1234 dsguard.close()
1238 dsguard.close()
1235
1239
1236 self.dirstate.restorebackup(None, prefix='undo.')
1240 self.dirstate.restorebackup(None, prefix='undo.')
1237 try:
1241 try:
1238 branch = self.vfs.read('undo.branch')
1242 branch = self.vfs.read('undo.branch')
1239 self.dirstate.setbranch(encoding.tolocal(branch))
1243 self.dirstate.setbranch(encoding.tolocal(branch))
1240 except IOError:
1244 except IOError:
1241 ui.warn(_('named branch could not be reset: '
1245 ui.warn(_('named branch could not be reset: '
1242 'current branch is still \'%s\'\n')
1246 'current branch is still \'%s\'\n')
1243 % self.dirstate.branch())
1247 % self.dirstate.branch())
1244
1248
1245 parents = tuple([p.rev() for p in self[None].parents()])
1249 parents = tuple([p.rev() for p in self[None].parents()])
1246 if len(parents) > 1:
1250 if len(parents) > 1:
1247 ui.status(_('working directory now based on '
1251 ui.status(_('working directory now based on '
1248 'revisions %d and %d\n') % parents)
1252 'revisions %d and %d\n') % parents)
1249 else:
1253 else:
1250 ui.status(_('working directory now based on '
1254 ui.status(_('working directory now based on '
1251 'revision %d\n') % parents)
1255 'revision %d\n') % parents)
1252 mergemod.mergestate.clean(self, self['.'].node())
1256 mergemod.mergestate.clean(self, self['.'].node())
1253
1257
1254 # TODO: if we know which new heads may result from this rollback, pass
1258 # TODO: if we know which new heads may result from this rollback, pass
1255 # them to destroy(), which will prevent the branchhead cache from being
1259 # them to destroy(), which will prevent the branchhead cache from being
1256 # invalidated.
1260 # invalidated.
1257 self.destroyed()
1261 self.destroyed()
1258 return 0
1262 return 0
1259
1263
1260 def invalidatecaches(self):
1264 def invalidatecaches(self):
1261
1265
1262 if '_tagscache' in vars(self):
1266 if '_tagscache' in vars(self):
1263 # can't use delattr on proxy
1267 # can't use delattr on proxy
1264 del self.__dict__['_tagscache']
1268 del self.__dict__['_tagscache']
1265
1269
1266 self.unfiltered()._branchcaches.clear()
1270 self.unfiltered()._branchcaches.clear()
1267 self.invalidatevolatilesets()
1271 self.invalidatevolatilesets()
1268
1272
1269 def invalidatevolatilesets(self):
1273 def invalidatevolatilesets(self):
1270 self.filteredrevcache.clear()
1274 self.filteredrevcache.clear()
1271 obsolete.clearobscaches(self)
1275 obsolete.clearobscaches(self)
1272
1276
1273 def invalidatedirstate(self):
1277 def invalidatedirstate(self):
1274 '''Invalidates the dirstate, causing the next call to dirstate
1278 '''Invalidates the dirstate, causing the next call to dirstate
1275 to check if it was modified since the last time it was read,
1279 to check if it was modified since the last time it was read,
1276 rereading it if it has.
1280 rereading it if it has.
1277
1281
1278 This is different to dirstate.invalidate() that it doesn't always
1282 This is different to dirstate.invalidate() that it doesn't always
1279 rereads the dirstate. Use dirstate.invalidate() if you want to
1283 rereads the dirstate. Use dirstate.invalidate() if you want to
1280 explicitly read the dirstate again (i.e. restoring it to a previous
1284 explicitly read the dirstate again (i.e. restoring it to a previous
1281 known good state).'''
1285 known good state).'''
1282 if hasunfilteredcache(self, 'dirstate'):
1286 if hasunfilteredcache(self, 'dirstate'):
1283 for k in self.dirstate._filecache:
1287 for k in self.dirstate._filecache:
1284 try:
1288 try:
1285 delattr(self.dirstate, k)
1289 delattr(self.dirstate, k)
1286 except AttributeError:
1290 except AttributeError:
1287 pass
1291 pass
1288 delattr(self.unfiltered(), 'dirstate')
1292 delattr(self.unfiltered(), 'dirstate')
1289
1293
1290 def invalidate(self, clearfilecache=False):
1294 def invalidate(self, clearfilecache=False):
1291 '''Invalidates both store and non-store parts other than dirstate
1295 '''Invalidates both store and non-store parts other than dirstate
1292
1296
1293 If a transaction is running, invalidation of store is omitted,
1297 If a transaction is running, invalidation of store is omitted,
1294 because discarding in-memory changes might cause inconsistency
1298 because discarding in-memory changes might cause inconsistency
1295 (e.g. incomplete fncache causes unintentional failure, but
1299 (e.g. incomplete fncache causes unintentional failure, but
1296 redundant one doesn't).
1300 redundant one doesn't).
1297 '''
1301 '''
1298 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1302 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1299 for k in self._filecache.keys():
1303 for k in self._filecache.keys():
1300 # dirstate is invalidated separately in invalidatedirstate()
1304 # dirstate is invalidated separately in invalidatedirstate()
1301 if k == 'dirstate':
1305 if k == 'dirstate':
1302 continue
1306 continue
1303
1307
1304 if clearfilecache:
1308 if clearfilecache:
1305 del self._filecache[k]
1309 del self._filecache[k]
1306 try:
1310 try:
1307 delattr(unfiltered, k)
1311 delattr(unfiltered, k)
1308 except AttributeError:
1312 except AttributeError:
1309 pass
1313 pass
1310 self.invalidatecaches()
1314 self.invalidatecaches()
1311 if not self.currenttransaction():
1315 if not self.currenttransaction():
1312 # TODO: Changing contents of store outside transaction
1316 # TODO: Changing contents of store outside transaction
1313 # causes inconsistency. We should make in-memory store
1317 # causes inconsistency. We should make in-memory store
1314 # changes detectable, and abort if changed.
1318 # changes detectable, and abort if changed.
1315 self.store.invalidatecaches()
1319 self.store.invalidatecaches()
1316
1320
1317 def invalidateall(self):
1321 def invalidateall(self):
1318 '''Fully invalidates both store and non-store parts, causing the
1322 '''Fully invalidates both store and non-store parts, causing the
1319 subsequent operation to reread any outside changes.'''
1323 subsequent operation to reread any outside changes.'''
1320 # extension should hook this to invalidate its caches
1324 # extension should hook this to invalidate its caches
1321 self.invalidate()
1325 self.invalidate()
1322 self.invalidatedirstate()
1326 self.invalidatedirstate()
1323
1327
1324 @unfilteredmethod
1328 @unfilteredmethod
1325 def _refreshfilecachestats(self, tr):
1329 def _refreshfilecachestats(self, tr):
1326 """Reload stats of cached files so that they are flagged as valid"""
1330 """Reload stats of cached files so that they are flagged as valid"""
1327 for k, ce in self._filecache.items():
1331 for k, ce in self._filecache.items():
1328 if k == 'dirstate' or k not in self.__dict__:
1332 if k == 'dirstate' or k not in self.__dict__:
1329 continue
1333 continue
1330 ce.refresh()
1334 ce.refresh()
1331
1335
1332 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1336 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1333 inheritchecker=None, parentenvvar=None):
1337 inheritchecker=None, parentenvvar=None):
1334 parentlock = None
1338 parentlock = None
1335 # the contents of parentenvvar are used by the underlying lock to
1339 # the contents of parentenvvar are used by the underlying lock to
1336 # determine whether it can be inherited
1340 # determine whether it can be inherited
1337 if parentenvvar is not None:
1341 if parentenvvar is not None:
1338 parentlock = encoding.environ.get(parentenvvar)
1342 parentlock = encoding.environ.get(parentenvvar)
1339 try:
1343 try:
1340 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1344 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1341 acquirefn=acquirefn, desc=desc,
1345 acquirefn=acquirefn, desc=desc,
1342 inheritchecker=inheritchecker,
1346 inheritchecker=inheritchecker,
1343 parentlock=parentlock)
1347 parentlock=parentlock)
1344 except error.LockHeld as inst:
1348 except error.LockHeld as inst:
1345 if not wait:
1349 if not wait:
1346 raise
1350 raise
1347 # show more details for new-style locks
1351 # show more details for new-style locks
1348 if ':' in inst.locker:
1352 if ':' in inst.locker:
1349 host, pid = inst.locker.split(":", 1)
1353 host, pid = inst.locker.split(":", 1)
1350 self.ui.warn(
1354 self.ui.warn(
1351 _("waiting for lock on %s held by process %r "
1355 _("waiting for lock on %s held by process %r "
1352 "on host %r\n") % (desc, pid, host))
1356 "on host %r\n") % (desc, pid, host))
1353 else:
1357 else:
1354 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1358 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1355 (desc, inst.locker))
1359 (desc, inst.locker))
1356 # default to 600 seconds timeout
1360 # default to 600 seconds timeout
1357 l = lockmod.lock(vfs, lockname,
1361 l = lockmod.lock(vfs, lockname,
1358 int(self.ui.config("ui", "timeout", "600")),
1362 int(self.ui.config("ui", "timeout", "600")),
1359 releasefn=releasefn, acquirefn=acquirefn,
1363 releasefn=releasefn, acquirefn=acquirefn,
1360 desc=desc)
1364 desc=desc)
1361 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1365 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1362 return l
1366 return l
1363
1367
1364 def _afterlock(self, callback):
1368 def _afterlock(self, callback):
1365 """add a callback to be run when the repository is fully unlocked
1369 """add a callback to be run when the repository is fully unlocked
1366
1370
1367 The callback will be executed when the outermost lock is released
1371 The callback will be executed when the outermost lock is released
1368 (with wlock being higher level than 'lock')."""
1372 (with wlock being higher level than 'lock')."""
1369 for ref in (self._wlockref, self._lockref):
1373 for ref in (self._wlockref, self._lockref):
1370 l = ref and ref()
1374 l = ref and ref()
1371 if l and l.held:
1375 if l and l.held:
1372 l.postrelease.append(callback)
1376 l.postrelease.append(callback)
1373 break
1377 break
1374 else: # no lock have been found.
1378 else: # no lock have been found.
1375 callback()
1379 callback()
1376
1380
1377 def lock(self, wait=True):
1381 def lock(self, wait=True):
1378 '''Lock the repository store (.hg/store) and return a weak reference
1382 '''Lock the repository store (.hg/store) and return a weak reference
1379 to the lock. Use this before modifying the store (e.g. committing or
1383 to the lock. Use this before modifying the store (e.g. committing or
1380 stripping). If you are opening a transaction, get a lock as well.)
1384 stripping). If you are opening a transaction, get a lock as well.)
1381
1385
1382 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1386 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1383 'wlock' first to avoid a dead-lock hazard.'''
1387 'wlock' first to avoid a dead-lock hazard.'''
1384 l = self._currentlock(self._lockref)
1388 l = self._currentlock(self._lockref)
1385 if l is not None:
1389 if l is not None:
1386 l.lock()
1390 l.lock()
1387 return l
1391 return l
1388
1392
1389 l = self._lock(self.svfs, "lock", wait, None,
1393 l = self._lock(self.svfs, "lock", wait, None,
1390 self.invalidate, _('repository %s') % self.origroot)
1394 self.invalidate, _('repository %s') % self.origroot)
1391 self._lockref = weakref.ref(l)
1395 self._lockref = weakref.ref(l)
1392 return l
1396 return l
1393
1397
1394 def _wlockchecktransaction(self):
1398 def _wlockchecktransaction(self):
1395 if self.currenttransaction() is not None:
1399 if self.currenttransaction() is not None:
1396 raise error.LockInheritanceContractViolation(
1400 raise error.LockInheritanceContractViolation(
1397 'wlock cannot be inherited in the middle of a transaction')
1401 'wlock cannot be inherited in the middle of a transaction')
1398
1402
1399 def wlock(self, wait=True):
1403 def wlock(self, wait=True):
1400 '''Lock the non-store parts of the repository (everything under
1404 '''Lock the non-store parts of the repository (everything under
1401 .hg except .hg/store) and return a weak reference to the lock.
1405 .hg except .hg/store) and return a weak reference to the lock.
1402
1406
1403 Use this before modifying files in .hg.
1407 Use this before modifying files in .hg.
1404
1408
1405 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1409 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1406 'wlock' first to avoid a dead-lock hazard.'''
1410 'wlock' first to avoid a dead-lock hazard.'''
1407 l = self._wlockref and self._wlockref()
1411 l = self._wlockref and self._wlockref()
1408 if l is not None and l.held:
1412 if l is not None and l.held:
1409 l.lock()
1413 l.lock()
1410 return l
1414 return l
1411
1415
1412 # We do not need to check for non-waiting lock acquisition. Such
1416 # We do not need to check for non-waiting lock acquisition. Such
1413 # acquisition would not cause dead-lock as they would just fail.
1417 # acquisition would not cause dead-lock as they would just fail.
1414 if wait and (self.ui.configbool('devel', 'all-warnings')
1418 if wait and (self.ui.configbool('devel', 'all-warnings')
1415 or self.ui.configbool('devel', 'check-locks')):
1419 or self.ui.configbool('devel', 'check-locks')):
1416 if self._currentlock(self._lockref) is not None:
1420 if self._currentlock(self._lockref) is not None:
1417 self.ui.develwarn('"wlock" acquired after "lock"')
1421 self.ui.develwarn('"wlock" acquired after "lock"')
1418
1422
1419 def unlock():
1423 def unlock():
1420 if self.dirstate.pendingparentchange():
1424 if self.dirstate.pendingparentchange():
1421 self.dirstate.invalidate()
1425 self.dirstate.invalidate()
1422 else:
1426 else:
1423 self.dirstate.write(None)
1427 self.dirstate.write(None)
1424
1428
1425 self._filecache['dirstate'].refresh()
1429 self._filecache['dirstate'].refresh()
1426
1430
1427 l = self._lock(self.vfs, "wlock", wait, unlock,
1431 l = self._lock(self.vfs, "wlock", wait, unlock,
1428 self.invalidatedirstate, _('working directory of %s') %
1432 self.invalidatedirstate, _('working directory of %s') %
1429 self.origroot,
1433 self.origroot,
1430 inheritchecker=self._wlockchecktransaction,
1434 inheritchecker=self._wlockchecktransaction,
1431 parentenvvar='HG_WLOCK_LOCKER')
1435 parentenvvar='HG_WLOCK_LOCKER')
1432 self._wlockref = weakref.ref(l)
1436 self._wlockref = weakref.ref(l)
1433 return l
1437 return l
1434
1438
1435 def _currentlock(self, lockref):
1439 def _currentlock(self, lockref):
1436 """Returns the lock if it's held, or None if it's not."""
1440 """Returns the lock if it's held, or None if it's not."""
1437 if lockref is None:
1441 if lockref is None:
1438 return None
1442 return None
1439 l = lockref()
1443 l = lockref()
1440 if l is None or not l.held:
1444 if l is None or not l.held:
1441 return None
1445 return None
1442 return l
1446 return l
1443
1447
1444 def currentwlock(self):
1448 def currentwlock(self):
1445 """Returns the wlock if it's held, or None if it's not."""
1449 """Returns the wlock if it's held, or None if it's not."""
1446 return self._currentlock(self._wlockref)
1450 return self._currentlock(self._wlockref)
1447
1451
1448 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1452 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1449 """
1453 """
1450 commit an individual file as part of a larger transaction
1454 commit an individual file as part of a larger transaction
1451 """
1455 """
1452
1456
1453 fname = fctx.path()
1457 fname = fctx.path()
1454 fparent1 = manifest1.get(fname, nullid)
1458 fparent1 = manifest1.get(fname, nullid)
1455 fparent2 = manifest2.get(fname, nullid)
1459 fparent2 = manifest2.get(fname, nullid)
1456 if isinstance(fctx, context.filectx):
1460 if isinstance(fctx, context.filectx):
1457 node = fctx.filenode()
1461 node = fctx.filenode()
1458 if node in [fparent1, fparent2]:
1462 if node in [fparent1, fparent2]:
1459 self.ui.debug('reusing %s filelog entry\n' % fname)
1463 self.ui.debug('reusing %s filelog entry\n' % fname)
1460 if manifest1.flags(fname) != fctx.flags():
1464 if manifest1.flags(fname) != fctx.flags():
1461 changelist.append(fname)
1465 changelist.append(fname)
1462 return node
1466 return node
1463
1467
1464 flog = self.file(fname)
1468 flog = self.file(fname)
1465 meta = {}
1469 meta = {}
1466 copy = fctx.renamed()
1470 copy = fctx.renamed()
1467 if copy and copy[0] != fname:
1471 if copy and copy[0] != fname:
1468 # Mark the new revision of this file as a copy of another
1472 # Mark the new revision of this file as a copy of another
1469 # file. This copy data will effectively act as a parent
1473 # file. This copy data will effectively act as a parent
1470 # of this new revision. If this is a merge, the first
1474 # of this new revision. If this is a merge, the first
1471 # parent will be the nullid (meaning "look up the copy data")
1475 # parent will be the nullid (meaning "look up the copy data")
1472 # and the second one will be the other parent. For example:
1476 # and the second one will be the other parent. For example:
1473 #
1477 #
1474 # 0 --- 1 --- 3 rev1 changes file foo
1478 # 0 --- 1 --- 3 rev1 changes file foo
1475 # \ / rev2 renames foo to bar and changes it
1479 # \ / rev2 renames foo to bar and changes it
1476 # \- 2 -/ rev3 should have bar with all changes and
1480 # \- 2 -/ rev3 should have bar with all changes and
1477 # should record that bar descends from
1481 # should record that bar descends from
1478 # bar in rev2 and foo in rev1
1482 # bar in rev2 and foo in rev1
1479 #
1483 #
1480 # this allows this merge to succeed:
1484 # this allows this merge to succeed:
1481 #
1485 #
1482 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1486 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1483 # \ / merging rev3 and rev4 should use bar@rev2
1487 # \ / merging rev3 and rev4 should use bar@rev2
1484 # \- 2 --- 4 as the merge base
1488 # \- 2 --- 4 as the merge base
1485 #
1489 #
1486
1490
1487 cfname = copy[0]
1491 cfname = copy[0]
1488 crev = manifest1.get(cfname)
1492 crev = manifest1.get(cfname)
1489 newfparent = fparent2
1493 newfparent = fparent2
1490
1494
1491 if manifest2: # branch merge
1495 if manifest2: # branch merge
1492 if fparent2 == nullid or crev is None: # copied on remote side
1496 if fparent2 == nullid or crev is None: # copied on remote side
1493 if cfname in manifest2:
1497 if cfname in manifest2:
1494 crev = manifest2[cfname]
1498 crev = manifest2[cfname]
1495 newfparent = fparent1
1499 newfparent = fparent1
1496
1500
1497 # Here, we used to search backwards through history to try to find
1501 # Here, we used to search backwards through history to try to find
1498 # where the file copy came from if the source of a copy was not in
1502 # where the file copy came from if the source of a copy was not in
1499 # the parent directory. However, this doesn't actually make sense to
1503 # the parent directory. However, this doesn't actually make sense to
1500 # do (what does a copy from something not in your working copy even
1504 # do (what does a copy from something not in your working copy even
1501 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1505 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1502 # the user that copy information was dropped, so if they didn't
1506 # the user that copy information was dropped, so if they didn't
1503 # expect this outcome it can be fixed, but this is the correct
1507 # expect this outcome it can be fixed, but this is the correct
1504 # behavior in this circumstance.
1508 # behavior in this circumstance.
1505
1509
1506 if crev:
1510 if crev:
1507 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1511 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1508 meta["copy"] = cfname
1512 meta["copy"] = cfname
1509 meta["copyrev"] = hex(crev)
1513 meta["copyrev"] = hex(crev)
1510 fparent1, fparent2 = nullid, newfparent
1514 fparent1, fparent2 = nullid, newfparent
1511 else:
1515 else:
1512 self.ui.warn(_("warning: can't find ancestor for '%s' "
1516 self.ui.warn(_("warning: can't find ancestor for '%s' "
1513 "copied from '%s'!\n") % (fname, cfname))
1517 "copied from '%s'!\n") % (fname, cfname))
1514
1518
1515 elif fparent1 == nullid:
1519 elif fparent1 == nullid:
1516 fparent1, fparent2 = fparent2, nullid
1520 fparent1, fparent2 = fparent2, nullid
1517 elif fparent2 != nullid:
1521 elif fparent2 != nullid:
1518 # is one parent an ancestor of the other?
1522 # is one parent an ancestor of the other?
1519 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1523 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1520 if fparent1 in fparentancestors:
1524 if fparent1 in fparentancestors:
1521 fparent1, fparent2 = fparent2, nullid
1525 fparent1, fparent2 = fparent2, nullid
1522 elif fparent2 in fparentancestors:
1526 elif fparent2 in fparentancestors:
1523 fparent2 = nullid
1527 fparent2 = nullid
1524
1528
1525 # is the file changed?
1529 # is the file changed?
1526 text = fctx.data()
1530 text = fctx.data()
1527 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1531 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1528 changelist.append(fname)
1532 changelist.append(fname)
1529 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1533 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1530 # are just the flags changed during merge?
1534 # are just the flags changed during merge?
1531 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1535 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1532 changelist.append(fname)
1536 changelist.append(fname)
1533
1537
1534 return fparent1
1538 return fparent1
1535
1539
1536 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1540 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1537 """check for commit arguments that aren't committable"""
1541 """check for commit arguments that aren't committable"""
1538 if match.isexact() or match.prefix():
1542 if match.isexact() or match.prefix():
1539 matched = set(status.modified + status.added + status.removed)
1543 matched = set(status.modified + status.added + status.removed)
1540
1544
1541 for f in match.files():
1545 for f in match.files():
1542 f = self.dirstate.normalize(f)
1546 f = self.dirstate.normalize(f)
1543 if f == '.' or f in matched or f in wctx.substate:
1547 if f == '.' or f in matched or f in wctx.substate:
1544 continue
1548 continue
1545 if f in status.deleted:
1549 if f in status.deleted:
1546 fail(f, _('file not found!'))
1550 fail(f, _('file not found!'))
1547 if f in vdirs: # visited directory
1551 if f in vdirs: # visited directory
1548 d = f + '/'
1552 d = f + '/'
1549 for mf in matched:
1553 for mf in matched:
1550 if mf.startswith(d):
1554 if mf.startswith(d):
1551 break
1555 break
1552 else:
1556 else:
1553 fail(f, _("no match under directory!"))
1557 fail(f, _("no match under directory!"))
1554 elif f not in self.dirstate:
1558 elif f not in self.dirstate:
1555 fail(f, _("file not tracked!"))
1559 fail(f, _("file not tracked!"))
1556
1560
1557 @unfilteredmethod
1561 @unfilteredmethod
1558 def commit(self, text="", user=None, date=None, match=None, force=False,
1562 def commit(self, text="", user=None, date=None, match=None, force=False,
1559 editor=False, extra=None):
1563 editor=False, extra=None):
1560 """Add a new revision to current repository.
1564 """Add a new revision to current repository.
1561
1565
1562 Revision information is gathered from the working directory,
1566 Revision information is gathered from the working directory,
1563 match can be used to filter the committed files. If editor is
1567 match can be used to filter the committed files. If editor is
1564 supplied, it is called to get a commit message.
1568 supplied, it is called to get a commit message.
1565 """
1569 """
1566 if extra is None:
1570 if extra is None:
1567 extra = {}
1571 extra = {}
1568
1572
1569 def fail(f, msg):
1573 def fail(f, msg):
1570 raise error.Abort('%s: %s' % (f, msg))
1574 raise error.Abort('%s: %s' % (f, msg))
1571
1575
1572 if not match:
1576 if not match:
1573 match = matchmod.always(self.root, '')
1577 match = matchmod.always(self.root, '')
1574
1578
1575 if not force:
1579 if not force:
1576 vdirs = []
1580 vdirs = []
1577 match.explicitdir = vdirs.append
1581 match.explicitdir = vdirs.append
1578 match.bad = fail
1582 match.bad = fail
1579
1583
1580 wlock = lock = tr = None
1584 wlock = lock = tr = None
1581 try:
1585 try:
1582 wlock = self.wlock()
1586 wlock = self.wlock()
1583 lock = self.lock() # for recent changelog (see issue4368)
1587 lock = self.lock() # for recent changelog (see issue4368)
1584
1588
1585 wctx = self[None]
1589 wctx = self[None]
1586 merge = len(wctx.parents()) > 1
1590 merge = len(wctx.parents()) > 1
1587
1591
1588 if not force and merge and match.ispartial():
1592 if not force and merge and match.ispartial():
1589 raise error.Abort(_('cannot partially commit a merge '
1593 raise error.Abort(_('cannot partially commit a merge '
1590 '(do not specify files or patterns)'))
1594 '(do not specify files or patterns)'))
1591
1595
1592 status = self.status(match=match, clean=force)
1596 status = self.status(match=match, clean=force)
1593 if force:
1597 if force:
1594 status.modified.extend(status.clean) # mq may commit clean files
1598 status.modified.extend(status.clean) # mq may commit clean files
1595
1599
1596 # check subrepos
1600 # check subrepos
1597 subs = []
1601 subs = []
1598 commitsubs = set()
1602 commitsubs = set()
1599 newstate = wctx.substate.copy()
1603 newstate = wctx.substate.copy()
1600 # only manage subrepos and .hgsubstate if .hgsub is present
1604 # only manage subrepos and .hgsubstate if .hgsub is present
1601 if '.hgsub' in wctx:
1605 if '.hgsub' in wctx:
1602 # we'll decide whether to track this ourselves, thanks
1606 # we'll decide whether to track this ourselves, thanks
1603 for c in status.modified, status.added, status.removed:
1607 for c in status.modified, status.added, status.removed:
1604 if '.hgsubstate' in c:
1608 if '.hgsubstate' in c:
1605 c.remove('.hgsubstate')
1609 c.remove('.hgsubstate')
1606
1610
1607 # compare current state to last committed state
1611 # compare current state to last committed state
1608 # build new substate based on last committed state
1612 # build new substate based on last committed state
1609 oldstate = wctx.p1().substate
1613 oldstate = wctx.p1().substate
1610 for s in sorted(newstate.keys()):
1614 for s in sorted(newstate.keys()):
1611 if not match(s):
1615 if not match(s):
1612 # ignore working copy, use old state if present
1616 # ignore working copy, use old state if present
1613 if s in oldstate:
1617 if s in oldstate:
1614 newstate[s] = oldstate[s]
1618 newstate[s] = oldstate[s]
1615 continue
1619 continue
1616 if not force:
1620 if not force:
1617 raise error.Abort(
1621 raise error.Abort(
1618 _("commit with new subrepo %s excluded") % s)
1622 _("commit with new subrepo %s excluded") % s)
1619 dirtyreason = wctx.sub(s).dirtyreason(True)
1623 dirtyreason = wctx.sub(s).dirtyreason(True)
1620 if dirtyreason:
1624 if dirtyreason:
1621 if not self.ui.configbool('ui', 'commitsubrepos'):
1625 if not self.ui.configbool('ui', 'commitsubrepos'):
1622 raise error.Abort(dirtyreason,
1626 raise error.Abort(dirtyreason,
1623 hint=_("use --subrepos for recursive commit"))
1627 hint=_("use --subrepos for recursive commit"))
1624 subs.append(s)
1628 subs.append(s)
1625 commitsubs.add(s)
1629 commitsubs.add(s)
1626 else:
1630 else:
1627 bs = wctx.sub(s).basestate()
1631 bs = wctx.sub(s).basestate()
1628 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1632 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1629 if oldstate.get(s, (None, None, None))[1] != bs:
1633 if oldstate.get(s, (None, None, None))[1] != bs:
1630 subs.append(s)
1634 subs.append(s)
1631
1635
1632 # check for removed subrepos
1636 # check for removed subrepos
1633 for p in wctx.parents():
1637 for p in wctx.parents():
1634 r = [s for s in p.substate if s not in newstate]
1638 r = [s for s in p.substate if s not in newstate]
1635 subs += [s for s in r if match(s)]
1639 subs += [s for s in r if match(s)]
1636 if subs:
1640 if subs:
1637 if (not match('.hgsub') and
1641 if (not match('.hgsub') and
1638 '.hgsub' in (wctx.modified() + wctx.added())):
1642 '.hgsub' in (wctx.modified() + wctx.added())):
1639 raise error.Abort(
1643 raise error.Abort(
1640 _("can't commit subrepos without .hgsub"))
1644 _("can't commit subrepos without .hgsub"))
1641 status.modified.insert(0, '.hgsubstate')
1645 status.modified.insert(0, '.hgsubstate')
1642
1646
1643 elif '.hgsub' in status.removed:
1647 elif '.hgsub' in status.removed:
1644 # clean up .hgsubstate when .hgsub is removed
1648 # clean up .hgsubstate when .hgsub is removed
1645 if ('.hgsubstate' in wctx and
1649 if ('.hgsubstate' in wctx and
1646 '.hgsubstate' not in (status.modified + status.added +
1650 '.hgsubstate' not in (status.modified + status.added +
1647 status.removed)):
1651 status.removed)):
1648 status.removed.insert(0, '.hgsubstate')
1652 status.removed.insert(0, '.hgsubstate')
1649
1653
1650 # make sure all explicit patterns are matched
1654 # make sure all explicit patterns are matched
1651 if not force:
1655 if not force:
1652 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1656 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1653
1657
1654 cctx = context.workingcommitctx(self, status,
1658 cctx = context.workingcommitctx(self, status,
1655 text, user, date, extra)
1659 text, user, date, extra)
1656
1660
1657 # internal config: ui.allowemptycommit
1661 # internal config: ui.allowemptycommit
1658 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1662 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1659 or extra.get('close') or merge or cctx.files()
1663 or extra.get('close') or merge or cctx.files()
1660 or self.ui.configbool('ui', 'allowemptycommit'))
1664 or self.ui.configbool('ui', 'allowemptycommit'))
1661 if not allowemptycommit:
1665 if not allowemptycommit:
1662 return None
1666 return None
1663
1667
1664 if merge and cctx.deleted():
1668 if merge and cctx.deleted():
1665 raise error.Abort(_("cannot commit merge with missing files"))
1669 raise error.Abort(_("cannot commit merge with missing files"))
1666
1670
1667 ms = mergemod.mergestate.read(self)
1671 ms = mergemod.mergestate.read(self)
1668 mergeutil.checkunresolved(ms)
1672 mergeutil.checkunresolved(ms)
1669
1673
1670 if editor:
1674 if editor:
1671 cctx._text = editor(self, cctx, subs)
1675 cctx._text = editor(self, cctx, subs)
1672 edited = (text != cctx._text)
1676 edited = (text != cctx._text)
1673
1677
1674 # Save commit message in case this transaction gets rolled back
1678 # Save commit message in case this transaction gets rolled back
1675 # (e.g. by a pretxncommit hook). Leave the content alone on
1679 # (e.g. by a pretxncommit hook). Leave the content alone on
1676 # the assumption that the user will use the same editor again.
1680 # the assumption that the user will use the same editor again.
1677 msgfn = self.savecommitmessage(cctx._text)
1681 msgfn = self.savecommitmessage(cctx._text)
1678
1682
1679 # commit subs and write new state
1683 # commit subs and write new state
1680 if subs:
1684 if subs:
1681 for s in sorted(commitsubs):
1685 for s in sorted(commitsubs):
1682 sub = wctx.sub(s)
1686 sub = wctx.sub(s)
1683 self.ui.status(_('committing subrepository %s\n') %
1687 self.ui.status(_('committing subrepository %s\n') %
1684 subrepo.subrelpath(sub))
1688 subrepo.subrelpath(sub))
1685 sr = sub.commit(cctx._text, user, date)
1689 sr = sub.commit(cctx._text, user, date)
1686 newstate[s] = (newstate[s][0], sr)
1690 newstate[s] = (newstate[s][0], sr)
1687 subrepo.writestate(self, newstate)
1691 subrepo.writestate(self, newstate)
1688
1692
1689 p1, p2 = self.dirstate.parents()
1693 p1, p2 = self.dirstate.parents()
1690 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1694 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1691 try:
1695 try:
1692 self.hook("precommit", throw=True, parent1=hookp1,
1696 self.hook("precommit", throw=True, parent1=hookp1,
1693 parent2=hookp2)
1697 parent2=hookp2)
1694 tr = self.transaction('commit')
1698 tr = self.transaction('commit')
1695 ret = self.commitctx(cctx, True)
1699 ret = self.commitctx(cctx, True)
1696 except: # re-raises
1700 except: # re-raises
1697 if edited:
1701 if edited:
1698 self.ui.write(
1702 self.ui.write(
1699 _('note: commit message saved in %s\n') % msgfn)
1703 _('note: commit message saved in %s\n') % msgfn)
1700 raise
1704 raise
1701 # update bookmarks, dirstate and mergestate
1705 # update bookmarks, dirstate and mergestate
1702 bookmarks.update(self, [p1, p2], ret)
1706 bookmarks.update(self, [p1, p2], ret)
1703 cctx.markcommitted(ret)
1707 cctx.markcommitted(ret)
1704 ms.reset()
1708 ms.reset()
1705 tr.close()
1709 tr.close()
1706
1710
1707 finally:
1711 finally:
1708 lockmod.release(tr, lock, wlock)
1712 lockmod.release(tr, lock, wlock)
1709
1713
1710 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1714 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1711 # hack for command that use a temporary commit (eg: histedit)
1715 # hack for command that use a temporary commit (eg: histedit)
1712 # temporary commit got stripped before hook release
1716 # temporary commit got stripped before hook release
1713 if self.changelog.hasnode(ret):
1717 if self.changelog.hasnode(ret):
1714 self.hook("commit", node=node, parent1=parent1,
1718 self.hook("commit", node=node, parent1=parent1,
1715 parent2=parent2)
1719 parent2=parent2)
1716 self._afterlock(commithook)
1720 self._afterlock(commithook)
1717 return ret
1721 return ret
1718
1722
1719 @unfilteredmethod
1723 @unfilteredmethod
1720 def commitctx(self, ctx, error=False):
1724 def commitctx(self, ctx, error=False):
1721 """Add a new revision to current repository.
1725 """Add a new revision to current repository.
1722 Revision information is passed via the context argument.
1726 Revision information is passed via the context argument.
1723 """
1727 """
1724
1728
1725 tr = None
1729 tr = None
1726 p1, p2 = ctx.p1(), ctx.p2()
1730 p1, p2 = ctx.p1(), ctx.p2()
1727 user = ctx.user()
1731 user = ctx.user()
1728
1732
1729 lock = self.lock()
1733 lock = self.lock()
1730 try:
1734 try:
1731 tr = self.transaction("commit")
1735 tr = self.transaction("commit")
1732 trp = weakref.proxy(tr)
1736 trp = weakref.proxy(tr)
1733
1737
1734 if ctx.manifestnode():
1738 if ctx.manifestnode():
1735 # reuse an existing manifest revision
1739 # reuse an existing manifest revision
1736 mn = ctx.manifestnode()
1740 mn = ctx.manifestnode()
1737 files = ctx.files()
1741 files = ctx.files()
1738 elif ctx.files():
1742 elif ctx.files():
1739 m1ctx = p1.manifestctx()
1743 m1ctx = p1.manifestctx()
1740 m2ctx = p2.manifestctx()
1744 m2ctx = p2.manifestctx()
1741 mctx = m1ctx.copy()
1745 mctx = m1ctx.copy()
1742
1746
1743 m = mctx.read()
1747 m = mctx.read()
1744 m1 = m1ctx.read()
1748 m1 = m1ctx.read()
1745 m2 = m2ctx.read()
1749 m2 = m2ctx.read()
1746
1750
1747 # check in files
1751 # check in files
1748 added = []
1752 added = []
1749 changed = []
1753 changed = []
1750 removed = list(ctx.removed())
1754 removed = list(ctx.removed())
1751 linkrev = len(self)
1755 linkrev = len(self)
1752 self.ui.note(_("committing files:\n"))
1756 self.ui.note(_("committing files:\n"))
1753 for f in sorted(ctx.modified() + ctx.added()):
1757 for f in sorted(ctx.modified() + ctx.added()):
1754 self.ui.note(f + "\n")
1758 self.ui.note(f + "\n")
1755 try:
1759 try:
1756 fctx = ctx[f]
1760 fctx = ctx[f]
1757 if fctx is None:
1761 if fctx is None:
1758 removed.append(f)
1762 removed.append(f)
1759 else:
1763 else:
1760 added.append(f)
1764 added.append(f)
1761 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1765 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1762 trp, changed)
1766 trp, changed)
1763 m.setflag(f, fctx.flags())
1767 m.setflag(f, fctx.flags())
1764 except OSError as inst:
1768 except OSError as inst:
1765 self.ui.warn(_("trouble committing %s!\n") % f)
1769 self.ui.warn(_("trouble committing %s!\n") % f)
1766 raise
1770 raise
1767 except IOError as inst:
1771 except IOError as inst:
1768 errcode = getattr(inst, 'errno', errno.ENOENT)
1772 errcode = getattr(inst, 'errno', errno.ENOENT)
1769 if error or errcode and errcode != errno.ENOENT:
1773 if error or errcode and errcode != errno.ENOENT:
1770 self.ui.warn(_("trouble committing %s!\n") % f)
1774 self.ui.warn(_("trouble committing %s!\n") % f)
1771 raise
1775 raise
1772
1776
1773 # update manifest
1777 # update manifest
1774 self.ui.note(_("committing manifest\n"))
1778 self.ui.note(_("committing manifest\n"))
1775 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1779 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1776 drop = [f for f in removed if f in m]
1780 drop = [f for f in removed if f in m]
1777 for f in drop:
1781 for f in drop:
1778 del m[f]
1782 del m[f]
1779 mn = mctx.write(trp, linkrev,
1783 mn = mctx.write(trp, linkrev,
1780 p1.manifestnode(), p2.manifestnode(),
1784 p1.manifestnode(), p2.manifestnode(),
1781 added, drop)
1785 added, drop)
1782 files = changed + removed
1786 files = changed + removed
1783 else:
1787 else:
1784 mn = p1.manifestnode()
1788 mn = p1.manifestnode()
1785 files = []
1789 files = []
1786
1790
1787 # update changelog
1791 # update changelog
1788 self.ui.note(_("committing changelog\n"))
1792 self.ui.note(_("committing changelog\n"))
1789 self.changelog.delayupdate(tr)
1793 self.changelog.delayupdate(tr)
1790 n = self.changelog.add(mn, files, ctx.description(),
1794 n = self.changelog.add(mn, files, ctx.description(),
1791 trp, p1.node(), p2.node(),
1795 trp, p1.node(), p2.node(),
1792 user, ctx.date(), ctx.extra().copy())
1796 user, ctx.date(), ctx.extra().copy())
1793 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1797 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1794 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1798 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1795 parent2=xp2)
1799 parent2=xp2)
1796 # set the new commit is proper phase
1800 # set the new commit is proper phase
1797 targetphase = subrepo.newcommitphase(self.ui, ctx)
1801 targetphase = subrepo.newcommitphase(self.ui, ctx)
1798 if targetphase:
1802 if targetphase:
1799 # retract boundary do not alter parent changeset.
1803 # retract boundary do not alter parent changeset.
1800 # if a parent have higher the resulting phase will
1804 # if a parent have higher the resulting phase will
1801 # be compliant anyway
1805 # be compliant anyway
1802 #
1806 #
1803 # if minimal phase was 0 we don't need to retract anything
1807 # if minimal phase was 0 we don't need to retract anything
1804 phases.retractboundary(self, tr, targetphase, [n])
1808 phases.retractboundary(self, tr, targetphase, [n])
1805 tr.close()
1809 tr.close()
1806 branchmap.updatecache(self.filtered('served'))
1810 branchmap.updatecache(self.filtered('served'))
1807 return n
1811 return n
1808 finally:
1812 finally:
1809 if tr:
1813 if tr:
1810 tr.release()
1814 tr.release()
1811 lock.release()
1815 lock.release()
1812
1816
1813 @unfilteredmethod
1817 @unfilteredmethod
1814 def destroying(self):
1818 def destroying(self):
1815 '''Inform the repository that nodes are about to be destroyed.
1819 '''Inform the repository that nodes are about to be destroyed.
1816 Intended for use by strip and rollback, so there's a common
1820 Intended for use by strip and rollback, so there's a common
1817 place for anything that has to be done before destroying history.
1821 place for anything that has to be done before destroying history.
1818
1822
1819 This is mostly useful for saving state that is in memory and waiting
1823 This is mostly useful for saving state that is in memory and waiting
1820 to be flushed when the current lock is released. Because a call to
1824 to be flushed when the current lock is released. Because a call to
1821 destroyed is imminent, the repo will be invalidated causing those
1825 destroyed is imminent, the repo will be invalidated causing those
1822 changes to stay in memory (waiting for the next unlock), or vanish
1826 changes to stay in memory (waiting for the next unlock), or vanish
1823 completely.
1827 completely.
1824 '''
1828 '''
1825 # When using the same lock to commit and strip, the phasecache is left
1829 # When using the same lock to commit and strip, the phasecache is left
1826 # dirty after committing. Then when we strip, the repo is invalidated,
1830 # dirty after committing. Then when we strip, the repo is invalidated,
1827 # causing those changes to disappear.
1831 # causing those changes to disappear.
1828 if '_phasecache' in vars(self):
1832 if '_phasecache' in vars(self):
1829 self._phasecache.write()
1833 self._phasecache.write()
1830
1834
1831 @unfilteredmethod
1835 @unfilteredmethod
1832 def destroyed(self):
1836 def destroyed(self):
1833 '''Inform the repository that nodes have been destroyed.
1837 '''Inform the repository that nodes have been destroyed.
1834 Intended for use by strip and rollback, so there's a common
1838 Intended for use by strip and rollback, so there's a common
1835 place for anything that has to be done after destroying history.
1839 place for anything that has to be done after destroying history.
1836 '''
1840 '''
1837 # When one tries to:
1841 # When one tries to:
1838 # 1) destroy nodes thus calling this method (e.g. strip)
1842 # 1) destroy nodes thus calling this method (e.g. strip)
1839 # 2) use phasecache somewhere (e.g. commit)
1843 # 2) use phasecache somewhere (e.g. commit)
1840 #
1844 #
1841 # then 2) will fail because the phasecache contains nodes that were
1845 # then 2) will fail because the phasecache contains nodes that were
1842 # removed. We can either remove phasecache from the filecache,
1846 # removed. We can either remove phasecache from the filecache,
1843 # causing it to reload next time it is accessed, or simply filter
1847 # causing it to reload next time it is accessed, or simply filter
1844 # the removed nodes now and write the updated cache.
1848 # the removed nodes now and write the updated cache.
1845 self._phasecache.filterunknown(self)
1849 self._phasecache.filterunknown(self)
1846 self._phasecache.write()
1850 self._phasecache.write()
1847
1851
1848 # update the 'served' branch cache to help read only server process
1852 # update the 'served' branch cache to help read only server process
1849 # Thanks to branchcache collaboration this is done from the nearest
1853 # Thanks to branchcache collaboration this is done from the nearest
1850 # filtered subset and it is expected to be fast.
1854 # filtered subset and it is expected to be fast.
1851 branchmap.updatecache(self.filtered('served'))
1855 branchmap.updatecache(self.filtered('served'))
1852
1856
1853 # Ensure the persistent tag cache is updated. Doing it now
1857 # Ensure the persistent tag cache is updated. Doing it now
1854 # means that the tag cache only has to worry about destroyed
1858 # means that the tag cache only has to worry about destroyed
1855 # heads immediately after a strip/rollback. That in turn
1859 # heads immediately after a strip/rollback. That in turn
1856 # guarantees that "cachetip == currenttip" (comparing both rev
1860 # guarantees that "cachetip == currenttip" (comparing both rev
1857 # and node) always means no nodes have been added or destroyed.
1861 # and node) always means no nodes have been added or destroyed.
1858
1862
1859 # XXX this is suboptimal when qrefresh'ing: we strip the current
1863 # XXX this is suboptimal when qrefresh'ing: we strip the current
1860 # head, refresh the tag cache, then immediately add a new head.
1864 # head, refresh the tag cache, then immediately add a new head.
1861 # But I think doing it this way is necessary for the "instant
1865 # But I think doing it this way is necessary for the "instant
1862 # tag cache retrieval" case to work.
1866 # tag cache retrieval" case to work.
1863 self.invalidate()
1867 self.invalidate()
1864
1868
1865 def walk(self, match, node=None):
1869 def walk(self, match, node=None):
1866 '''
1870 '''
1867 walk recursively through the directory tree or a given
1871 walk recursively through the directory tree or a given
1868 changeset, finding all files matched by the match
1872 changeset, finding all files matched by the match
1869 function
1873 function
1870 '''
1874 '''
1871 return self[node].walk(match)
1875 return self[node].walk(match)
1872
1876
1873 def status(self, node1='.', node2=None, match=None,
1877 def status(self, node1='.', node2=None, match=None,
1874 ignored=False, clean=False, unknown=False,
1878 ignored=False, clean=False, unknown=False,
1875 listsubrepos=False):
1879 listsubrepos=False):
1876 '''a convenience method that calls node1.status(node2)'''
1880 '''a convenience method that calls node1.status(node2)'''
1877 return self[node1].status(node2, match, ignored, clean, unknown,
1881 return self[node1].status(node2, match, ignored, clean, unknown,
1878 listsubrepos)
1882 listsubrepos)
1879
1883
1880 def heads(self, start=None):
1884 def heads(self, start=None):
1881 if start is None:
1885 if start is None:
1882 cl = self.changelog
1886 cl = self.changelog
1883 headrevs = reversed(cl.headrevs())
1887 headrevs = reversed(cl.headrevs())
1884 return [cl.node(rev) for rev in headrevs]
1888 return [cl.node(rev) for rev in headrevs]
1885
1889
1886 heads = self.changelog.heads(start)
1890 heads = self.changelog.heads(start)
1887 # sort the output in rev descending order
1891 # sort the output in rev descending order
1888 return sorted(heads, key=self.changelog.rev, reverse=True)
1892 return sorted(heads, key=self.changelog.rev, reverse=True)
1889
1893
1890 def branchheads(self, branch=None, start=None, closed=False):
1894 def branchheads(self, branch=None, start=None, closed=False):
1891 '''return a (possibly filtered) list of heads for the given branch
1895 '''return a (possibly filtered) list of heads for the given branch
1892
1896
1893 Heads are returned in topological order, from newest to oldest.
1897 Heads are returned in topological order, from newest to oldest.
1894 If branch is None, use the dirstate branch.
1898 If branch is None, use the dirstate branch.
1895 If start is not None, return only heads reachable from start.
1899 If start is not None, return only heads reachable from start.
1896 If closed is True, return heads that are marked as closed as well.
1900 If closed is True, return heads that are marked as closed as well.
1897 '''
1901 '''
1898 if branch is None:
1902 if branch is None:
1899 branch = self[None].branch()
1903 branch = self[None].branch()
1900 branches = self.branchmap()
1904 branches = self.branchmap()
1901 if branch not in branches:
1905 if branch not in branches:
1902 return []
1906 return []
1903 # the cache returns heads ordered lowest to highest
1907 # the cache returns heads ordered lowest to highest
1904 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1908 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1905 if start is not None:
1909 if start is not None:
1906 # filter out the heads that cannot be reached from startrev
1910 # filter out the heads that cannot be reached from startrev
1907 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1911 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1908 bheads = [h for h in bheads if h in fbheads]
1912 bheads = [h for h in bheads if h in fbheads]
1909 return bheads
1913 return bheads
1910
1914
1911 def branches(self, nodes):
1915 def branches(self, nodes):
1912 if not nodes:
1916 if not nodes:
1913 nodes = [self.changelog.tip()]
1917 nodes = [self.changelog.tip()]
1914 b = []
1918 b = []
1915 for n in nodes:
1919 for n in nodes:
1916 t = n
1920 t = n
1917 while True:
1921 while True:
1918 p = self.changelog.parents(n)
1922 p = self.changelog.parents(n)
1919 if p[1] != nullid or p[0] == nullid:
1923 if p[1] != nullid or p[0] == nullid:
1920 b.append((t, n, p[0], p[1]))
1924 b.append((t, n, p[0], p[1]))
1921 break
1925 break
1922 n = p[0]
1926 n = p[0]
1923 return b
1927 return b
1924
1928
1925 def between(self, pairs):
1929 def between(self, pairs):
1926 r = []
1930 r = []
1927
1931
1928 for top, bottom in pairs:
1932 for top, bottom in pairs:
1929 n, l, i = top, [], 0
1933 n, l, i = top, [], 0
1930 f = 1
1934 f = 1
1931
1935
1932 while n != bottom and n != nullid:
1936 while n != bottom and n != nullid:
1933 p = self.changelog.parents(n)[0]
1937 p = self.changelog.parents(n)[0]
1934 if i == f:
1938 if i == f:
1935 l.append(n)
1939 l.append(n)
1936 f = f * 2
1940 f = f * 2
1937 n = p
1941 n = p
1938 i += 1
1942 i += 1
1939
1943
1940 r.append(l)
1944 r.append(l)
1941
1945
1942 return r
1946 return r
1943
1947
1944 def checkpush(self, pushop):
1948 def checkpush(self, pushop):
1945 """Extensions can override this function if additional checks have
1949 """Extensions can override this function if additional checks have
1946 to be performed before pushing, or call it if they override push
1950 to be performed before pushing, or call it if they override push
1947 command.
1951 command.
1948 """
1952 """
1949 pass
1953 pass
1950
1954
1951 @unfilteredpropertycache
1955 @unfilteredpropertycache
1952 def prepushoutgoinghooks(self):
1956 def prepushoutgoinghooks(self):
1953 """Return util.hooks consists of a pushop with repo, remote, outgoing
1957 """Return util.hooks consists of a pushop with repo, remote, outgoing
1954 methods, which are called before pushing changesets.
1958 methods, which are called before pushing changesets.
1955 """
1959 """
1956 return util.hooks()
1960 return util.hooks()
1957
1961
1958 def pushkey(self, namespace, key, old, new):
1962 def pushkey(self, namespace, key, old, new):
1959 try:
1963 try:
1960 tr = self.currenttransaction()
1964 tr = self.currenttransaction()
1961 hookargs = {}
1965 hookargs = {}
1962 if tr is not None:
1966 if tr is not None:
1963 hookargs.update(tr.hookargs)
1967 hookargs.update(tr.hookargs)
1964 hookargs['namespace'] = namespace
1968 hookargs['namespace'] = namespace
1965 hookargs['key'] = key
1969 hookargs['key'] = key
1966 hookargs['old'] = old
1970 hookargs['old'] = old
1967 hookargs['new'] = new
1971 hookargs['new'] = new
1968 self.hook('prepushkey', throw=True, **hookargs)
1972 self.hook('prepushkey', throw=True, **hookargs)
1969 except error.HookAbort as exc:
1973 except error.HookAbort as exc:
1970 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1974 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1971 if exc.hint:
1975 if exc.hint:
1972 self.ui.write_err(_("(%s)\n") % exc.hint)
1976 self.ui.write_err(_("(%s)\n") % exc.hint)
1973 return False
1977 return False
1974 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1978 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1975 ret = pushkey.push(self, namespace, key, old, new)
1979 ret = pushkey.push(self, namespace, key, old, new)
1976 def runhook():
1980 def runhook():
1977 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1981 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1978 ret=ret)
1982 ret=ret)
1979 self._afterlock(runhook)
1983 self._afterlock(runhook)
1980 return ret
1984 return ret
1981
1985
1982 def listkeys(self, namespace):
1986 def listkeys(self, namespace):
1983 self.hook('prelistkeys', throw=True, namespace=namespace)
1987 self.hook('prelistkeys', throw=True, namespace=namespace)
1984 self.ui.debug('listing keys for "%s"\n' % namespace)
1988 self.ui.debug('listing keys for "%s"\n' % namespace)
1985 values = pushkey.list(self, namespace)
1989 values = pushkey.list(self, namespace)
1986 self.hook('listkeys', namespace=namespace, values=values)
1990 self.hook('listkeys', namespace=namespace, values=values)
1987 return values
1991 return values
1988
1992
1989 def debugwireargs(self, one, two, three=None, four=None, five=None):
1993 def debugwireargs(self, one, two, three=None, four=None, five=None):
1990 '''used to test argument passing over the wire'''
1994 '''used to test argument passing over the wire'''
1991 return "%s %s %s %s %s" % (one, two, three, four, five)
1995 return "%s %s %s %s %s" % (one, two, three, four, five)
1992
1996
1993 def savecommitmessage(self, text):
1997 def savecommitmessage(self, text):
1994 fp = self.vfs('last-message.txt', 'wb')
1998 fp = self.vfs('last-message.txt', 'wb')
1995 try:
1999 try:
1996 fp.write(text)
2000 fp.write(text)
1997 finally:
2001 finally:
1998 fp.close()
2002 fp.close()
1999 return self.pathto(fp.name[len(self.root) + 1:])
2003 return self.pathto(fp.name[len(self.root) + 1:])
2000
2004
2001 # used to avoid circular references so destructors work
2005 # used to avoid circular references so destructors work
2002 def aftertrans(files):
2006 def aftertrans(files):
2003 renamefiles = [tuple(t) for t in files]
2007 renamefiles = [tuple(t) for t in files]
2004 def a():
2008 def a():
2005 for vfs, src, dest in renamefiles:
2009 for vfs, src, dest in renamefiles:
2006 try:
2010 try:
2007 vfs.rename(src, dest)
2011 vfs.rename(src, dest)
2008 except OSError: # journal file does not yet exist
2012 except OSError: # journal file does not yet exist
2009 pass
2013 pass
2010 return a
2014 return a
2011
2015
2012 def undoname(fn):
2016 def undoname(fn):
2013 base, name = os.path.split(fn)
2017 base, name = os.path.split(fn)
2014 assert name.startswith('journal')
2018 assert name.startswith('journal')
2015 return os.path.join(base, name.replace('journal', 'undo', 1))
2019 return os.path.join(base, name.replace('journal', 'undo', 1))
2016
2020
2017 def instance(ui, path, create):
2021 def instance(ui, path, create):
2018 return localrepository(ui, util.urllocalpath(path), create)
2022 return localrepository(ui, util.urllocalpath(path), create)
2019
2023
2020 def islocal(path):
2024 def islocal(path):
2021 return True
2025 return True
2022
2026
2023 def newreporequirements(repo):
2027 def newreporequirements(repo):
2024 """Determine the set of requirements for a new local repository.
2028 """Determine the set of requirements for a new local repository.
2025
2029
2026 Extensions can wrap this function to specify custom requirements for
2030 Extensions can wrap this function to specify custom requirements for
2027 new repositories.
2031 new repositories.
2028 """
2032 """
2029 ui = repo.ui
2033 ui = repo.ui
2030 requirements = set(['revlogv1'])
2034 requirements = set(['revlogv1'])
2031 if ui.configbool('format', 'usestore', True):
2035 if ui.configbool('format', 'usestore', True):
2032 requirements.add('store')
2036 requirements.add('store')
2033 if ui.configbool('format', 'usefncache', True):
2037 if ui.configbool('format', 'usefncache', True):
2034 requirements.add('fncache')
2038 requirements.add('fncache')
2035 if ui.configbool('format', 'dotencode', True):
2039 if ui.configbool('format', 'dotencode', True):
2036 requirements.add('dotencode')
2040 requirements.add('dotencode')
2037
2041
2038 compengine = ui.config('experimental', 'format.compression', 'zlib')
2042 compengine = ui.config('experimental', 'format.compression', 'zlib')
2039 if compengine not in util.compengines:
2043 if compengine not in util.compengines:
2040 raise error.Abort(_('compression engine %s defined by '
2044 raise error.Abort(_('compression engine %s defined by '
2041 'experimental.format.compression not available') %
2045 'experimental.format.compression not available') %
2042 compengine,
2046 compengine,
2043 hint=_('run "hg debuginstall" to list available '
2047 hint=_('run "hg debuginstall" to list available '
2044 'compression engines'))
2048 'compression engines'))
2045
2049
2046 # zlib is the historical default and doesn't need an explicit requirement.
2050 # zlib is the historical default and doesn't need an explicit requirement.
2047 if compengine != 'zlib':
2051 if compengine != 'zlib':
2048 requirements.add('exp-compression-%s' % compengine)
2052 requirements.add('exp-compression-%s' % compengine)
2049
2053
2050 if scmutil.gdinitconfig(ui):
2054 if scmutil.gdinitconfig(ui):
2051 requirements.add('generaldelta')
2055 requirements.add('generaldelta')
2052 if ui.configbool('experimental', 'treemanifest', False):
2056 if ui.configbool('experimental', 'treemanifest', False):
2053 requirements.add('treemanifest')
2057 requirements.add('treemanifest')
2054 if ui.configbool('experimental', 'manifestv2', False):
2058 if ui.configbool('experimental', 'manifestv2', False):
2055 requirements.add('manifestv2')
2059 requirements.add('manifestv2')
2056
2060
2057 return requirements
2061 return requirements
General Comments 0
You need to be logged in to leave comments. Login now